diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
41 files changed, 17730 insertions, 11168 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 5c8e53458edb..0ae6a7c5020f 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -9,6 +9,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | |||
9 | i915_gem.o \ | 9 | i915_gem.o \ |
10 | i915_gem_debug.o \ | 10 | i915_gem_debug.o \ |
11 | i915_gem_evict.o \ | 11 | i915_gem_evict.o \ |
12 | i915_gem_execbuffer.o \ | ||
13 | i915_gem_gtt.o \ | ||
12 | i915_gem_tiling.o \ | 14 | i915_gem_tiling.o \ |
13 | i915_trace_points.o \ | 15 | i915_trace_points.o \ |
14 | intel_display.o \ | 16 | intel_display.o \ |
@@ -26,15 +28,17 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | |||
26 | intel_dvo.o \ | 28 | intel_dvo.o \ |
27 | intel_ringbuffer.o \ | 29 | intel_ringbuffer.o \ |
28 | intel_overlay.o \ | 30 | intel_overlay.o \ |
31 | intel_opregion.o \ | ||
29 | dvo_ch7xxx.o \ | 32 | dvo_ch7xxx.o \ |
30 | dvo_ch7017.o \ | 33 | dvo_ch7017.o \ |
31 | dvo_ivch.o \ | 34 | dvo_ivch.o \ |
32 | dvo_tfp410.o \ | 35 | dvo_tfp410.o \ |
33 | dvo_sil164.o | 36 | dvo_sil164.o |
34 | 37 | ||
35 | i915-$(CONFIG_ACPI) += i915_opregion.o | ||
36 | i915-$(CONFIG_COMPAT) += i915_ioc32.o | 38 | i915-$(CONFIG_COMPAT) += i915_ioc32.o |
37 | 39 | ||
40 | i915-$(CONFIG_ACPI) += intel_acpi.o | ||
41 | |||
38 | obj-$(CONFIG_DRM_I915) += i915.o | 42 | obj-$(CONFIG_DRM_I915) += i915.o |
39 | 43 | ||
40 | CFLAGS_i915_trace_points.o := -I$(src) | 44 | CFLAGS_i915_trace_points.o := -I$(src) |
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c index 14d59804acd7..d3e8c540f778 100644 --- a/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/dvo_ch7017.c | |||
@@ -165,67 +165,44 @@ struct ch7017_priv { | |||
165 | static void ch7017_dump_regs(struct intel_dvo_device *dvo); | 165 | static void ch7017_dump_regs(struct intel_dvo_device *dvo); |
166 | static void ch7017_dpms(struct intel_dvo_device *dvo, int mode); | 166 | static void ch7017_dpms(struct intel_dvo_device *dvo, int mode); |
167 | 167 | ||
168 | static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) | 168 | static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val) |
169 | { | 169 | { |
170 | struct i2c_adapter *adapter = dvo->i2c_bus; | ||
171 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
172 | u8 out_buf[2]; | ||
173 | u8 in_buf[2]; | ||
174 | |||
175 | struct i2c_msg msgs[] = { | 170 | struct i2c_msg msgs[] = { |
176 | { | 171 | { |
177 | .addr = dvo->slave_addr, | 172 | .addr = dvo->slave_addr, |
178 | .flags = 0, | 173 | .flags = 0, |
179 | .len = 1, | 174 | .len = 1, |
180 | .buf = out_buf, | 175 | .buf = &addr, |
181 | }, | 176 | }, |
182 | { | 177 | { |
183 | .addr = dvo->slave_addr, | 178 | .addr = dvo->slave_addr, |
184 | .flags = I2C_M_RD, | 179 | .flags = I2C_M_RD, |
185 | .len = 1, | 180 | .len = 1, |
186 | .buf = in_buf, | 181 | .buf = val, |
187 | } | 182 | } |
188 | }; | 183 | }; |
189 | 184 | return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2; | |
190 | out_buf[0] = addr; | ||
191 | out_buf[1] = 0; | ||
192 | |||
193 | if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { | ||
194 | *val= in_buf[0]; | ||
195 | return true; | ||
196 | }; | ||
197 | |||
198 | return false; | ||
199 | } | 185 | } |
200 | 186 | ||
201 | static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) | 187 | static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val) |
202 | { | 188 | { |
203 | struct i2c_adapter *adapter = dvo->i2c_bus; | 189 | uint8_t buf[2] = { addr, val }; |
204 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
205 | uint8_t out_buf[2]; | ||
206 | struct i2c_msg msg = { | 190 | struct i2c_msg msg = { |
207 | .addr = dvo->slave_addr, | 191 | .addr = dvo->slave_addr, |
208 | .flags = 0, | 192 | .flags = 0, |
209 | .len = 2, | 193 | .len = 2, |
210 | .buf = out_buf, | 194 | .buf = buf, |
211 | }; | 195 | }; |
212 | 196 | return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1; | |
213 | out_buf[0] = addr; | ||
214 | out_buf[1] = val; | ||
215 | |||
216 | if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) | ||
217 | return true; | ||
218 | |||
219 | return false; | ||
220 | } | 197 | } |
221 | 198 | ||
222 | /** Probes for a CH7017 on the given bus and slave address. */ | 199 | /** Probes for a CH7017 on the given bus and slave address. */ |
223 | static bool ch7017_init(struct intel_dvo_device *dvo, | 200 | static bool ch7017_init(struct intel_dvo_device *dvo, |
224 | struct i2c_adapter *adapter) | 201 | struct i2c_adapter *adapter) |
225 | { | 202 | { |
226 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
227 | struct ch7017_priv *priv; | 203 | struct ch7017_priv *priv; |
228 | uint8_t val; | 204 | const char *str; |
205 | u8 val; | ||
229 | 206 | ||
230 | priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL); | 207 | priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL); |
231 | if (priv == NULL) | 208 | if (priv == NULL) |
@@ -237,16 +214,27 @@ static bool ch7017_init(struct intel_dvo_device *dvo, | |||
237 | if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) | 214 | if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) |
238 | goto fail; | 215 | goto fail; |
239 | 216 | ||
240 | if (val != CH7017_DEVICE_ID_VALUE && | 217 | switch (val) { |
241 | val != CH7018_DEVICE_ID_VALUE && | 218 | case CH7017_DEVICE_ID_VALUE: |
242 | val != CH7019_DEVICE_ID_VALUE) { | 219 | str = "ch7017"; |
220 | break; | ||
221 | case CH7018_DEVICE_ID_VALUE: | ||
222 | str = "ch7018"; | ||
223 | break; | ||
224 | case CH7019_DEVICE_ID_VALUE: | ||
225 | str = "ch7019"; | ||
226 | break; | ||
227 | default: | ||
243 | DRM_DEBUG_KMS("ch701x not detected, got %d: from %s " | 228 | DRM_DEBUG_KMS("ch701x not detected, got %d: from %s " |
244 | "Slave %d.\n", | 229 | "slave %d.\n", |
245 | val, i2cbus->adapter.name,dvo->slave_addr); | 230 | val, adapter->name,dvo->slave_addr); |
246 | goto fail; | 231 | goto fail; |
247 | } | 232 | } |
248 | 233 | ||
234 | DRM_DEBUG_KMS("%s detected on %s, addr %d\n", | ||
235 | str, adapter->name, dvo->slave_addr); | ||
249 | return true; | 236 | return true; |
237 | |||
250 | fail: | 238 | fail: |
251 | kfree(priv); | 239 | kfree(priv); |
252 | return false; | 240 | return false; |
@@ -254,7 +242,7 @@ fail: | |||
254 | 242 | ||
255 | static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo) | 243 | static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo) |
256 | { | 244 | { |
257 | return connector_status_unknown; | 245 | return connector_status_connected; |
258 | } | 246 | } |
259 | 247 | ||
260 | static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo, | 248 | static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo, |
@@ -368,7 +356,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode) | |||
368 | } | 356 | } |
369 | 357 | ||
370 | /* XXX: Should actually wait for update power status somehow */ | 358 | /* XXX: Should actually wait for update power status somehow */ |
371 | udelay(20000); | 359 | msleep(20); |
372 | } | 360 | } |
373 | 361 | ||
374 | static void ch7017_dump_regs(struct intel_dvo_device *dvo) | 362 | static void ch7017_dump_regs(struct intel_dvo_device *dvo) |
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index 6f1944b24441..7eaa94e4ff06 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c | |||
@@ -113,7 +113,6 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
113 | { | 113 | { |
114 | struct ch7xxx_priv *ch7xxx= dvo->dev_priv; | 114 | struct ch7xxx_priv *ch7xxx= dvo->dev_priv; |
115 | struct i2c_adapter *adapter = dvo->i2c_bus; | 115 | struct i2c_adapter *adapter = dvo->i2c_bus; |
116 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
117 | u8 out_buf[2]; | 116 | u8 out_buf[2]; |
118 | u8 in_buf[2]; | 117 | u8 in_buf[2]; |
119 | 118 | ||
@@ -135,14 +134,14 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
135 | out_buf[0] = addr; | 134 | out_buf[0] = addr; |
136 | out_buf[1] = 0; | 135 | out_buf[1] = 0; |
137 | 136 | ||
138 | if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { | 137 | if (i2c_transfer(adapter, msgs, 2) == 2) { |
139 | *ch = in_buf[0]; | 138 | *ch = in_buf[0]; |
140 | return true; | 139 | return true; |
141 | }; | 140 | }; |
142 | 141 | ||
143 | if (!ch7xxx->quiet) { | 142 | if (!ch7xxx->quiet) { |
144 | DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", | 143 | DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", |
145 | addr, i2cbus->adapter.name, dvo->slave_addr); | 144 | addr, adapter->name, dvo->slave_addr); |
146 | } | 145 | } |
147 | return false; | 146 | return false; |
148 | } | 147 | } |
@@ -152,7 +151,6 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | |||
152 | { | 151 | { |
153 | struct ch7xxx_priv *ch7xxx = dvo->dev_priv; | 152 | struct ch7xxx_priv *ch7xxx = dvo->dev_priv; |
154 | struct i2c_adapter *adapter = dvo->i2c_bus; | 153 | struct i2c_adapter *adapter = dvo->i2c_bus; |
155 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
156 | uint8_t out_buf[2]; | 154 | uint8_t out_buf[2]; |
157 | struct i2c_msg msg = { | 155 | struct i2c_msg msg = { |
158 | .addr = dvo->slave_addr, | 156 | .addr = dvo->slave_addr, |
@@ -164,12 +162,12 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | |||
164 | out_buf[0] = addr; | 162 | out_buf[0] = addr; |
165 | out_buf[1] = ch; | 163 | out_buf[1] = ch; |
166 | 164 | ||
167 | if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) | 165 | if (i2c_transfer(adapter, &msg, 1) == 1) |
168 | return true; | 166 | return true; |
169 | 167 | ||
170 | if (!ch7xxx->quiet) { | 168 | if (!ch7xxx->quiet) { |
171 | DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", | 169 | DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", |
172 | addr, i2cbus->adapter.name, dvo->slave_addr); | 170 | addr, adapter->name, dvo->slave_addr); |
173 | } | 171 | } |
174 | 172 | ||
175 | return false; | 173 | return false; |
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c index a2ec3f487202..a12ed9414cc7 100644 --- a/drivers/gpu/drm/i915/dvo_ivch.c +++ b/drivers/gpu/drm/i915/dvo_ivch.c | |||
@@ -167,7 +167,6 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) | |||
167 | { | 167 | { |
168 | struct ivch_priv *priv = dvo->dev_priv; | 168 | struct ivch_priv *priv = dvo->dev_priv; |
169 | struct i2c_adapter *adapter = dvo->i2c_bus; | 169 | struct i2c_adapter *adapter = dvo->i2c_bus; |
170 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
171 | u8 out_buf[1]; | 170 | u8 out_buf[1]; |
172 | u8 in_buf[2]; | 171 | u8 in_buf[2]; |
173 | 172 | ||
@@ -193,7 +192,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) | |||
193 | 192 | ||
194 | out_buf[0] = addr; | 193 | out_buf[0] = addr; |
195 | 194 | ||
196 | if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) { | 195 | if (i2c_transfer(adapter, msgs, 3) == 3) { |
197 | *data = (in_buf[1] << 8) | in_buf[0]; | 196 | *data = (in_buf[1] << 8) | in_buf[0]; |
198 | return true; | 197 | return true; |
199 | }; | 198 | }; |
@@ -201,7 +200,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) | |||
201 | if (!priv->quiet) { | 200 | if (!priv->quiet) { |
202 | DRM_DEBUG_KMS("Unable to read register 0x%02x from " | 201 | DRM_DEBUG_KMS("Unable to read register 0x%02x from " |
203 | "%s:%02x.\n", | 202 | "%s:%02x.\n", |
204 | addr, i2cbus->adapter.name, dvo->slave_addr); | 203 | addr, adapter->name, dvo->slave_addr); |
205 | } | 204 | } |
206 | return false; | 205 | return false; |
207 | } | 206 | } |
@@ -211,7 +210,6 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) | |||
211 | { | 210 | { |
212 | struct ivch_priv *priv = dvo->dev_priv; | 211 | struct ivch_priv *priv = dvo->dev_priv; |
213 | struct i2c_adapter *adapter = dvo->i2c_bus; | 212 | struct i2c_adapter *adapter = dvo->i2c_bus; |
214 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
215 | u8 out_buf[3]; | 213 | u8 out_buf[3]; |
216 | struct i2c_msg msg = { | 214 | struct i2c_msg msg = { |
217 | .addr = dvo->slave_addr, | 215 | .addr = dvo->slave_addr, |
@@ -224,12 +222,12 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) | |||
224 | out_buf[1] = data & 0xff; | 222 | out_buf[1] = data & 0xff; |
225 | out_buf[2] = data >> 8; | 223 | out_buf[2] = data >> 8; |
226 | 224 | ||
227 | if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) | 225 | if (i2c_transfer(adapter, &msg, 1) == 1) |
228 | return true; | 226 | return true; |
229 | 227 | ||
230 | if (!priv->quiet) { | 228 | if (!priv->quiet) { |
231 | DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", | 229 | DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", |
232 | addr, i2cbus->adapter.name, dvo->slave_addr); | 230 | addr, adapter->name, dvo->slave_addr); |
233 | } | 231 | } |
234 | 232 | ||
235 | return false; | 233 | return false; |
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c index 9b8e6765cf26..e4b4091df942 100644 --- a/drivers/gpu/drm/i915/dvo_sil164.c +++ b/drivers/gpu/drm/i915/dvo_sil164.c | |||
@@ -69,7 +69,6 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
69 | { | 69 | { |
70 | struct sil164_priv *sil = dvo->dev_priv; | 70 | struct sil164_priv *sil = dvo->dev_priv; |
71 | struct i2c_adapter *adapter = dvo->i2c_bus; | 71 | struct i2c_adapter *adapter = dvo->i2c_bus; |
72 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
73 | u8 out_buf[2]; | 72 | u8 out_buf[2]; |
74 | u8 in_buf[2]; | 73 | u8 in_buf[2]; |
75 | 74 | ||
@@ -91,14 +90,14 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
91 | out_buf[0] = addr; | 90 | out_buf[0] = addr; |
92 | out_buf[1] = 0; | 91 | out_buf[1] = 0; |
93 | 92 | ||
94 | if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { | 93 | if (i2c_transfer(adapter, msgs, 2) == 2) { |
95 | *ch = in_buf[0]; | 94 | *ch = in_buf[0]; |
96 | return true; | 95 | return true; |
97 | }; | 96 | }; |
98 | 97 | ||
99 | if (!sil->quiet) { | 98 | if (!sil->quiet) { |
100 | DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", | 99 | DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", |
101 | addr, i2cbus->adapter.name, dvo->slave_addr); | 100 | addr, adapter->name, dvo->slave_addr); |
102 | } | 101 | } |
103 | return false; | 102 | return false; |
104 | } | 103 | } |
@@ -107,7 +106,6 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | |||
107 | { | 106 | { |
108 | struct sil164_priv *sil= dvo->dev_priv; | 107 | struct sil164_priv *sil= dvo->dev_priv; |
109 | struct i2c_adapter *adapter = dvo->i2c_bus; | 108 | struct i2c_adapter *adapter = dvo->i2c_bus; |
110 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
111 | uint8_t out_buf[2]; | 109 | uint8_t out_buf[2]; |
112 | struct i2c_msg msg = { | 110 | struct i2c_msg msg = { |
113 | .addr = dvo->slave_addr, | 111 | .addr = dvo->slave_addr, |
@@ -119,12 +117,12 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | |||
119 | out_buf[0] = addr; | 117 | out_buf[0] = addr; |
120 | out_buf[1] = ch; | 118 | out_buf[1] = ch; |
121 | 119 | ||
122 | if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) | 120 | if (i2c_transfer(adapter, &msg, 1) == 1) |
123 | return true; | 121 | return true; |
124 | 122 | ||
125 | if (!sil->quiet) { | 123 | if (!sil->quiet) { |
126 | DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", | 124 | DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", |
127 | addr, i2cbus->adapter.name, dvo->slave_addr); | 125 | addr, adapter->name, dvo->slave_addr); |
128 | } | 126 | } |
129 | 127 | ||
130 | return false; | 128 | return false; |
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index 56f66426207f..8ab2855bb544 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c | |||
@@ -94,7 +94,6 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
94 | { | 94 | { |
95 | struct tfp410_priv *tfp = dvo->dev_priv; | 95 | struct tfp410_priv *tfp = dvo->dev_priv; |
96 | struct i2c_adapter *adapter = dvo->i2c_bus; | 96 | struct i2c_adapter *adapter = dvo->i2c_bus; |
97 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
98 | u8 out_buf[2]; | 97 | u8 out_buf[2]; |
99 | u8 in_buf[2]; | 98 | u8 in_buf[2]; |
100 | 99 | ||
@@ -116,14 +115,14 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
116 | out_buf[0] = addr; | 115 | out_buf[0] = addr; |
117 | out_buf[1] = 0; | 116 | out_buf[1] = 0; |
118 | 117 | ||
119 | if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { | 118 | if (i2c_transfer(adapter, msgs, 2) == 2) { |
120 | *ch = in_buf[0]; | 119 | *ch = in_buf[0]; |
121 | return true; | 120 | return true; |
122 | }; | 121 | }; |
123 | 122 | ||
124 | if (!tfp->quiet) { | 123 | if (!tfp->quiet) { |
125 | DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", | 124 | DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", |
126 | addr, i2cbus->adapter.name, dvo->slave_addr); | 125 | addr, adapter->name, dvo->slave_addr); |
127 | } | 126 | } |
128 | return false; | 127 | return false; |
129 | } | 128 | } |
@@ -132,7 +131,6 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | |||
132 | { | 131 | { |
133 | struct tfp410_priv *tfp = dvo->dev_priv; | 132 | struct tfp410_priv *tfp = dvo->dev_priv; |
134 | struct i2c_adapter *adapter = dvo->i2c_bus; | 133 | struct i2c_adapter *adapter = dvo->i2c_bus; |
135 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
136 | uint8_t out_buf[2]; | 134 | uint8_t out_buf[2]; |
137 | struct i2c_msg msg = { | 135 | struct i2c_msg msg = { |
138 | .addr = dvo->slave_addr, | 136 | .addr = dvo->slave_addr, |
@@ -144,12 +142,12 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | |||
144 | out_buf[0] = addr; | 142 | out_buf[0] = addr; |
145 | out_buf[1] = ch; | 143 | out_buf[1] = ch; |
146 | 144 | ||
147 | if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) | 145 | if (i2c_transfer(adapter, &msg, 1) == 1) |
148 | return true; | 146 | return true; |
149 | 147 | ||
150 | if (!tfp->quiet) { | 148 | if (!tfp->quiet) { |
151 | DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", | 149 | DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", |
152 | addr, i2cbus->adapter.name, dvo->slave_addr); | 150 | addr, adapter->name, dvo->slave_addr); |
153 | } | 151 | } |
154 | 152 | ||
155 | return false; | 153 | return false; |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 5e43d7076789..0a893f7400fa 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include "drmP.h" | 32 | #include "drmP.h" |
33 | #include "drm.h" | 33 | #include "drm.h" |
34 | #include "intel_drv.h" | 34 | #include "intel_drv.h" |
35 | #include "intel_ringbuffer.h" | ||
35 | #include "i915_drm.h" | 36 | #include "i915_drm.h" |
36 | #include "i915_drv.h" | 37 | #include "i915_drv.h" |
37 | 38 | ||
@@ -40,23 +41,64 @@ | |||
40 | 41 | ||
41 | #if defined(CONFIG_DEBUG_FS) | 42 | #if defined(CONFIG_DEBUG_FS) |
42 | 43 | ||
43 | #define ACTIVE_LIST 1 | 44 | enum { |
44 | #define FLUSHING_LIST 2 | 45 | ACTIVE_LIST, |
45 | #define INACTIVE_LIST 3 | 46 | FLUSHING_LIST, |
47 | INACTIVE_LIST, | ||
48 | PINNED_LIST, | ||
49 | DEFERRED_FREE_LIST, | ||
50 | }; | ||
51 | |||
52 | static const char *yesno(int v) | ||
53 | { | ||
54 | return v ? "yes" : "no"; | ||
55 | } | ||
56 | |||
57 | static int i915_capabilities(struct seq_file *m, void *data) | ||
58 | { | ||
59 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
60 | struct drm_device *dev = node->minor->dev; | ||
61 | const struct intel_device_info *info = INTEL_INFO(dev); | ||
62 | |||
63 | seq_printf(m, "gen: %d\n", info->gen); | ||
64 | #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) | ||
65 | B(is_mobile); | ||
66 | B(is_i85x); | ||
67 | B(is_i915g); | ||
68 | B(is_i945gm); | ||
69 | B(is_g33); | ||
70 | B(need_gfx_hws); | ||
71 | B(is_g4x); | ||
72 | B(is_pineview); | ||
73 | B(is_broadwater); | ||
74 | B(is_crestline); | ||
75 | B(has_fbc); | ||
76 | B(has_pipe_cxsr); | ||
77 | B(has_hotplug); | ||
78 | B(cursor_needs_physical); | ||
79 | B(has_overlay); | ||
80 | B(overlay_needs_physical); | ||
81 | B(supports_tv); | ||
82 | B(has_bsd_ring); | ||
83 | B(has_blt_ring); | ||
84 | #undef B | ||
46 | 85 | ||
47 | static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv) | 86 | return 0; |
87 | } | ||
88 | |||
89 | static const char *get_pin_flag(struct drm_i915_gem_object *obj) | ||
48 | { | 90 | { |
49 | if (obj_priv->user_pin_count > 0) | 91 | if (obj->user_pin_count > 0) |
50 | return "P"; | 92 | return "P"; |
51 | else if (obj_priv->pin_count > 0) | 93 | else if (obj->pin_count > 0) |
52 | return "p"; | 94 | return "p"; |
53 | else | 95 | else |
54 | return " "; | 96 | return " "; |
55 | } | 97 | } |
56 | 98 | ||
57 | static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv) | 99 | static const char *get_tiling_flag(struct drm_i915_gem_object *obj) |
58 | { | 100 | { |
59 | switch (obj_priv->tiling_mode) { | 101 | switch (obj->tiling_mode) { |
60 | default: | 102 | default: |
61 | case I915_TILING_NONE: return " "; | 103 | case I915_TILING_NONE: return " "; |
62 | case I915_TILING_X: return "X"; | 104 | case I915_TILING_X: return "X"; |
@@ -64,6 +106,51 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv) | |||
64 | } | 106 | } |
65 | } | 107 | } |
66 | 108 | ||
109 | static const char *cache_level_str(int type) | ||
110 | { | ||
111 | switch (type) { | ||
112 | case I915_CACHE_NONE: return " uncached"; | ||
113 | case I915_CACHE_LLC: return " snooped (LLC)"; | ||
114 | case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; | ||
115 | default: return ""; | ||
116 | } | ||
117 | } | ||
118 | |||
119 | static void | ||
120 | describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | ||
121 | { | ||
122 | seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s", | ||
123 | &obj->base, | ||
124 | get_pin_flag(obj), | ||
125 | get_tiling_flag(obj), | ||
126 | obj->base.size, | ||
127 | obj->base.read_domains, | ||
128 | obj->base.write_domain, | ||
129 | obj->last_rendering_seqno, | ||
130 | obj->last_fenced_seqno, | ||
131 | cache_level_str(obj->cache_level), | ||
132 | obj->dirty ? " dirty" : "", | ||
133 | obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); | ||
134 | if (obj->base.name) | ||
135 | seq_printf(m, " (name: %d)", obj->base.name); | ||
136 | if (obj->fence_reg != I915_FENCE_REG_NONE) | ||
137 | seq_printf(m, " (fence: %d)", obj->fence_reg); | ||
138 | if (obj->gtt_space != NULL) | ||
139 | seq_printf(m, " (gtt offset: %08x, size: %08x)", | ||
140 | obj->gtt_offset, (unsigned int)obj->gtt_space->size); | ||
141 | if (obj->pin_mappable || obj->fault_mappable) { | ||
142 | char s[3], *t = s; | ||
143 | if (obj->pin_mappable) | ||
144 | *t++ = 'p'; | ||
145 | if (obj->fault_mappable) | ||
146 | *t++ = 'f'; | ||
147 | *t = '\0'; | ||
148 | seq_printf(m, " (%s mappable)", s); | ||
149 | } | ||
150 | if (obj->ring != NULL) | ||
151 | seq_printf(m, " (%s)", obj->ring->name); | ||
152 | } | ||
153 | |||
67 | static int i915_gem_object_list_info(struct seq_file *m, void *data) | 154 | static int i915_gem_object_list_info(struct seq_file *m, void *data) |
68 | { | 155 | { |
69 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 156 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
@@ -71,57 +158,167 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
71 | struct list_head *head; | 158 | struct list_head *head; |
72 | struct drm_device *dev = node->minor->dev; | 159 | struct drm_device *dev = node->minor->dev; |
73 | drm_i915_private_t *dev_priv = dev->dev_private; | 160 | drm_i915_private_t *dev_priv = dev->dev_private; |
74 | struct drm_i915_gem_object *obj_priv; | 161 | struct drm_i915_gem_object *obj; |
75 | spinlock_t *lock = NULL; | 162 | size_t total_obj_size, total_gtt_size; |
163 | int count, ret; | ||
164 | |||
165 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
166 | if (ret) | ||
167 | return ret; | ||
76 | 168 | ||
77 | switch (list) { | 169 | switch (list) { |
78 | case ACTIVE_LIST: | 170 | case ACTIVE_LIST: |
79 | seq_printf(m, "Active:\n"); | 171 | seq_printf(m, "Active:\n"); |
80 | lock = &dev_priv->mm.active_list_lock; | 172 | head = &dev_priv->mm.active_list; |
81 | head = &dev_priv->render_ring.active_list; | ||
82 | break; | 173 | break; |
83 | case INACTIVE_LIST: | 174 | case INACTIVE_LIST: |
84 | seq_printf(m, "Inactive:\n"); | 175 | seq_printf(m, "Inactive:\n"); |
85 | head = &dev_priv->mm.inactive_list; | 176 | head = &dev_priv->mm.inactive_list; |
86 | break; | 177 | break; |
178 | case PINNED_LIST: | ||
179 | seq_printf(m, "Pinned:\n"); | ||
180 | head = &dev_priv->mm.pinned_list; | ||
181 | break; | ||
87 | case FLUSHING_LIST: | 182 | case FLUSHING_LIST: |
88 | seq_printf(m, "Flushing:\n"); | 183 | seq_printf(m, "Flushing:\n"); |
89 | head = &dev_priv->mm.flushing_list; | 184 | head = &dev_priv->mm.flushing_list; |
90 | break; | 185 | break; |
186 | case DEFERRED_FREE_LIST: | ||
187 | seq_printf(m, "Deferred free:\n"); | ||
188 | head = &dev_priv->mm.deferred_free_list; | ||
189 | break; | ||
91 | default: | 190 | default: |
92 | DRM_INFO("Ooops, unexpected list\n"); | 191 | mutex_unlock(&dev->struct_mutex); |
93 | return 0; | 192 | return -EINVAL; |
193 | } | ||
194 | |||
195 | total_obj_size = total_gtt_size = count = 0; | ||
196 | list_for_each_entry(obj, head, mm_list) { | ||
197 | seq_printf(m, " "); | ||
198 | describe_obj(m, obj); | ||
199 | seq_printf(m, "\n"); | ||
200 | total_obj_size += obj->base.size; | ||
201 | total_gtt_size += obj->gtt_space->size; | ||
202 | count++; | ||
94 | } | 203 | } |
204 | mutex_unlock(&dev->struct_mutex); | ||
95 | 205 | ||
96 | if (lock) | 206 | seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", |
97 | spin_lock(lock); | 207 | count, total_obj_size, total_gtt_size); |
98 | list_for_each_entry(obj_priv, head, list) | 208 | return 0; |
99 | { | 209 | } |
100 | seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s", | ||
101 | &obj_priv->base, | ||
102 | get_pin_flag(obj_priv), | ||
103 | obj_priv->base.size, | ||
104 | obj_priv->base.read_domains, | ||
105 | obj_priv->base.write_domain, | ||
106 | obj_priv->last_rendering_seqno, | ||
107 | obj_priv->dirty ? " dirty" : "", | ||
108 | obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : ""); | ||
109 | |||
110 | if (obj_priv->base.name) | ||
111 | seq_printf(m, " (name: %d)", obj_priv->base.name); | ||
112 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
113 | seq_printf(m, " (fence: %d)", obj_priv->fence_reg); | ||
114 | if (obj_priv->gtt_space != NULL) | ||
115 | seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset); | ||
116 | 210 | ||
211 | #define count_objects(list, member) do { \ | ||
212 | list_for_each_entry(obj, list, member) { \ | ||
213 | size += obj->gtt_space->size; \ | ||
214 | ++count; \ | ||
215 | if (obj->map_and_fenceable) { \ | ||
216 | mappable_size += obj->gtt_space->size; \ | ||
217 | ++mappable_count; \ | ||
218 | } \ | ||
219 | } \ | ||
220 | } while(0) | ||
221 | |||
222 | static int i915_gem_object_info(struct seq_file *m, void* data) | ||
223 | { | ||
224 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
225 | struct drm_device *dev = node->minor->dev; | ||
226 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
227 | u32 count, mappable_count; | ||
228 | size_t size, mappable_size; | ||
229 | struct drm_i915_gem_object *obj; | ||
230 | int ret; | ||
231 | |||
232 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
233 | if (ret) | ||
234 | return ret; | ||
235 | |||
236 | seq_printf(m, "%u objects, %zu bytes\n", | ||
237 | dev_priv->mm.object_count, | ||
238 | dev_priv->mm.object_memory); | ||
239 | |||
240 | size = count = mappable_size = mappable_count = 0; | ||
241 | count_objects(&dev_priv->mm.gtt_list, gtt_list); | ||
242 | seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", | ||
243 | count, mappable_count, size, mappable_size); | ||
244 | |||
245 | size = count = mappable_size = mappable_count = 0; | ||
246 | count_objects(&dev_priv->mm.active_list, mm_list); | ||
247 | count_objects(&dev_priv->mm.flushing_list, mm_list); | ||
248 | seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", | ||
249 | count, mappable_count, size, mappable_size); | ||
250 | |||
251 | size = count = mappable_size = mappable_count = 0; | ||
252 | count_objects(&dev_priv->mm.pinned_list, mm_list); | ||
253 | seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n", | ||
254 | count, mappable_count, size, mappable_size); | ||
255 | |||
256 | size = count = mappable_size = mappable_count = 0; | ||
257 | count_objects(&dev_priv->mm.inactive_list, mm_list); | ||
258 | seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", | ||
259 | count, mappable_count, size, mappable_size); | ||
260 | |||
261 | size = count = mappable_size = mappable_count = 0; | ||
262 | count_objects(&dev_priv->mm.deferred_free_list, mm_list); | ||
263 | seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n", | ||
264 | count, mappable_count, size, mappable_size); | ||
265 | |||
266 | size = count = mappable_size = mappable_count = 0; | ||
267 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { | ||
268 | if (obj->fault_mappable) { | ||
269 | size += obj->gtt_space->size; | ||
270 | ++count; | ||
271 | } | ||
272 | if (obj->pin_mappable) { | ||
273 | mappable_size += obj->gtt_space->size; | ||
274 | ++mappable_count; | ||
275 | } | ||
276 | } | ||
277 | seq_printf(m, "%u pinned mappable objects, %zu bytes\n", | ||
278 | mappable_count, mappable_size); | ||
279 | seq_printf(m, "%u fault mappable objects, %zu bytes\n", | ||
280 | count, size); | ||
281 | |||
282 | seq_printf(m, "%zu [%zu] gtt total\n", | ||
283 | dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); | ||
284 | |||
285 | mutex_unlock(&dev->struct_mutex); | ||
286 | |||
287 | return 0; | ||
288 | } | ||
289 | |||
290 | static int i915_gem_gtt_info(struct seq_file *m, void* data) | ||
291 | { | ||
292 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
293 | struct drm_device *dev = node->minor->dev; | ||
294 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
295 | struct drm_i915_gem_object *obj; | ||
296 | size_t total_obj_size, total_gtt_size; | ||
297 | int count, ret; | ||
298 | |||
299 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
300 | if (ret) | ||
301 | return ret; | ||
302 | |||
303 | total_obj_size = total_gtt_size = count = 0; | ||
304 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { | ||
305 | seq_printf(m, " "); | ||
306 | describe_obj(m, obj); | ||
117 | seq_printf(m, "\n"); | 307 | seq_printf(m, "\n"); |
308 | total_obj_size += obj->base.size; | ||
309 | total_gtt_size += obj->gtt_space->size; | ||
310 | count++; | ||
118 | } | 311 | } |
119 | 312 | ||
120 | if (lock) | 313 | mutex_unlock(&dev->struct_mutex); |
121 | spin_unlock(lock); | 314 | |
315 | seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", | ||
316 | count, total_obj_size, total_gtt_size); | ||
317 | |||
122 | return 0; | 318 | return 0; |
123 | } | 319 | } |
124 | 320 | ||
321 | |||
125 | static int i915_gem_pageflip_info(struct seq_file *m, void *data) | 322 | static int i915_gem_pageflip_info(struct seq_file *m, void *data) |
126 | { | 323 | { |
127 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 324 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
@@ -130,21 +327,21 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) | |||
130 | struct intel_crtc *crtc; | 327 | struct intel_crtc *crtc; |
131 | 328 | ||
132 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { | 329 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { |
133 | const char *pipe = crtc->pipe ? "B" : "A"; | 330 | const char pipe = pipe_name(crtc->pipe); |
134 | const char *plane = crtc->plane ? "B" : "A"; | 331 | const char plane = plane_name(crtc->plane); |
135 | struct intel_unpin_work *work; | 332 | struct intel_unpin_work *work; |
136 | 333 | ||
137 | spin_lock_irqsave(&dev->event_lock, flags); | 334 | spin_lock_irqsave(&dev->event_lock, flags); |
138 | work = crtc->unpin_work; | 335 | work = crtc->unpin_work; |
139 | if (work == NULL) { | 336 | if (work == NULL) { |
140 | seq_printf(m, "No flip due on pipe %s (plane %s)\n", | 337 | seq_printf(m, "No flip due on pipe %c (plane %c)\n", |
141 | pipe, plane); | 338 | pipe, plane); |
142 | } else { | 339 | } else { |
143 | if (!work->pending) { | 340 | if (!work->pending) { |
144 | seq_printf(m, "Flip queued on pipe %s (plane %s)\n", | 341 | seq_printf(m, "Flip queued on pipe %c (plane %c)\n", |
145 | pipe, plane); | 342 | pipe, plane); |
146 | } else { | 343 | } else { |
147 | seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n", | 344 | seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", |
148 | pipe, plane); | 345 | pipe, plane); |
149 | } | 346 | } |
150 | if (work->enable_stall_check) | 347 | if (work->enable_stall_check) |
@@ -154,14 +351,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) | |||
154 | seq_printf(m, "%d prepares\n", work->pending); | 351 | seq_printf(m, "%d prepares\n", work->pending); |
155 | 352 | ||
156 | if (work->old_fb_obj) { | 353 | if (work->old_fb_obj) { |
157 | struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj); | 354 | struct drm_i915_gem_object *obj = work->old_fb_obj; |
158 | if(obj_priv) | 355 | if (obj) |
159 | seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); | 356 | seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); |
160 | } | 357 | } |
161 | if (work->pending_flip_obj) { | 358 | if (work->pending_flip_obj) { |
162 | struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj); | 359 | struct drm_i915_gem_object *obj = work->pending_flip_obj; |
163 | if(obj_priv) | 360 | if (obj) |
164 | seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); | 361 | seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); |
165 | } | 362 | } |
166 | } | 363 | } |
167 | spin_unlock_irqrestore(&dev->event_lock, flags); | 364 | spin_unlock_irqrestore(&dev->event_lock, flags); |
@@ -176,32 +373,83 @@ static int i915_gem_request_info(struct seq_file *m, void *data) | |||
176 | struct drm_device *dev = node->minor->dev; | 373 | struct drm_device *dev = node->minor->dev; |
177 | drm_i915_private_t *dev_priv = dev->dev_private; | 374 | drm_i915_private_t *dev_priv = dev->dev_private; |
178 | struct drm_i915_gem_request *gem_request; | 375 | struct drm_i915_gem_request *gem_request; |
376 | int ret, count; | ||
377 | |||
378 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
379 | if (ret) | ||
380 | return ret; | ||
179 | 381 | ||
180 | seq_printf(m, "Request:\n"); | 382 | count = 0; |
181 | list_for_each_entry(gem_request, &dev_priv->render_ring.request_list, | 383 | if (!list_empty(&dev_priv->ring[RCS].request_list)) { |
182 | list) { | 384 | seq_printf(m, "Render requests:\n"); |
183 | seq_printf(m, " %d @ %d\n", | 385 | list_for_each_entry(gem_request, |
184 | gem_request->seqno, | 386 | &dev_priv->ring[RCS].request_list, |
185 | (int) (jiffies - gem_request->emitted_jiffies)); | 387 | list) { |
388 | seq_printf(m, " %d @ %d\n", | ||
389 | gem_request->seqno, | ||
390 | (int) (jiffies - gem_request->emitted_jiffies)); | ||
391 | } | ||
392 | count++; | ||
393 | } | ||
394 | if (!list_empty(&dev_priv->ring[VCS].request_list)) { | ||
395 | seq_printf(m, "BSD requests:\n"); | ||
396 | list_for_each_entry(gem_request, | ||
397 | &dev_priv->ring[VCS].request_list, | ||
398 | list) { | ||
399 | seq_printf(m, " %d @ %d\n", | ||
400 | gem_request->seqno, | ||
401 | (int) (jiffies - gem_request->emitted_jiffies)); | ||
402 | } | ||
403 | count++; | ||
186 | } | 404 | } |
405 | if (!list_empty(&dev_priv->ring[BCS].request_list)) { | ||
406 | seq_printf(m, "BLT requests:\n"); | ||
407 | list_for_each_entry(gem_request, | ||
408 | &dev_priv->ring[BCS].request_list, | ||
409 | list) { | ||
410 | seq_printf(m, " %d @ %d\n", | ||
411 | gem_request->seqno, | ||
412 | (int) (jiffies - gem_request->emitted_jiffies)); | ||
413 | } | ||
414 | count++; | ||
415 | } | ||
416 | mutex_unlock(&dev->struct_mutex); | ||
417 | |||
418 | if (count == 0) | ||
419 | seq_printf(m, "No requests\n"); | ||
420 | |||
187 | return 0; | 421 | return 0; |
188 | } | 422 | } |
189 | 423 | ||
424 | static void i915_ring_seqno_info(struct seq_file *m, | ||
425 | struct intel_ring_buffer *ring) | ||
426 | { | ||
427 | if (ring->get_seqno) { | ||
428 | seq_printf(m, "Current sequence (%s): %d\n", | ||
429 | ring->name, ring->get_seqno(ring)); | ||
430 | seq_printf(m, "Waiter sequence (%s): %d\n", | ||
431 | ring->name, ring->waiting_seqno); | ||
432 | seq_printf(m, "IRQ sequence (%s): %d\n", | ||
433 | ring->name, ring->irq_seqno); | ||
434 | } | ||
435 | } | ||
436 | |||
190 | static int i915_gem_seqno_info(struct seq_file *m, void *data) | 437 | static int i915_gem_seqno_info(struct seq_file *m, void *data) |
191 | { | 438 | { |
192 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 439 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
193 | struct drm_device *dev = node->minor->dev; | 440 | struct drm_device *dev = node->minor->dev; |
194 | drm_i915_private_t *dev_priv = dev->dev_private; | 441 | drm_i915_private_t *dev_priv = dev->dev_private; |
442 | int ret, i; | ||
443 | |||
444 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
445 | if (ret) | ||
446 | return ret; | ||
447 | |||
448 | for (i = 0; i < I915_NUM_RINGS; i++) | ||
449 | i915_ring_seqno_info(m, &dev_priv->ring[i]); | ||
450 | |||
451 | mutex_unlock(&dev->struct_mutex); | ||
195 | 452 | ||
196 | if (dev_priv->render_ring.status_page.page_addr != NULL) { | ||
197 | seq_printf(m, "Current sequence: %d\n", | ||
198 | i915_get_gem_seqno(dev, &dev_priv->render_ring)); | ||
199 | } else { | ||
200 | seq_printf(m, "Current sequence: hws uninitialized\n"); | ||
201 | } | ||
202 | seq_printf(m, "Waiter sequence: %d\n", | ||
203 | dev_priv->mm.waiting_gem_seqno); | ||
204 | seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); | ||
205 | return 0; | 453 | return 0; |
206 | } | 454 | } |
207 | 455 | ||
@@ -211,6 +459,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data) | |||
211 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 459 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
212 | struct drm_device *dev = node->minor->dev; | 460 | struct drm_device *dev = node->minor->dev; |
213 | drm_i915_private_t *dev_priv = dev->dev_private; | 461 | drm_i915_private_t *dev_priv = dev->dev_private; |
462 | int ret, i, pipe; | ||
463 | |||
464 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
465 | if (ret) | ||
466 | return ret; | ||
214 | 467 | ||
215 | if (!HAS_PCH_SPLIT(dev)) { | 468 | if (!HAS_PCH_SPLIT(dev)) { |
216 | seq_printf(m, "Interrupt enable: %08x\n", | 469 | seq_printf(m, "Interrupt enable: %08x\n", |
@@ -219,10 +472,10 @@ static int i915_interrupt_info(struct seq_file *m, void *data) | |||
219 | I915_READ(IIR)); | 472 | I915_READ(IIR)); |
220 | seq_printf(m, "Interrupt mask: %08x\n", | 473 | seq_printf(m, "Interrupt mask: %08x\n", |
221 | I915_READ(IMR)); | 474 | I915_READ(IMR)); |
222 | seq_printf(m, "Pipe A stat: %08x\n", | 475 | for_each_pipe(pipe) |
223 | I915_READ(PIPEASTAT)); | 476 | seq_printf(m, "Pipe %c stat: %08x\n", |
224 | seq_printf(m, "Pipe B stat: %08x\n", | 477 | pipe_name(pipe), |
225 | I915_READ(PIPEBSTAT)); | 478 | I915_READ(PIPESTAT(pipe))); |
226 | } else { | 479 | } else { |
227 | seq_printf(m, "North Display Interrupt enable: %08x\n", | 480 | seq_printf(m, "North Display Interrupt enable: %08x\n", |
228 | I915_READ(DEIER)); | 481 | I915_READ(DEIER)); |
@@ -245,16 +498,16 @@ static int i915_interrupt_info(struct seq_file *m, void *data) | |||
245 | } | 498 | } |
246 | seq_printf(m, "Interrupts received: %d\n", | 499 | seq_printf(m, "Interrupts received: %d\n", |
247 | atomic_read(&dev_priv->irq_received)); | 500 | atomic_read(&dev_priv->irq_received)); |
248 | if (dev_priv->render_ring.status_page.page_addr != NULL) { | 501 | for (i = 0; i < I915_NUM_RINGS; i++) { |
249 | seq_printf(m, "Current sequence: %d\n", | 502 | if (IS_GEN6(dev)) { |
250 | i915_get_gem_seqno(dev, &dev_priv->render_ring)); | 503 | seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", |
251 | } else { | 504 | dev_priv->ring[i].name, |
252 | seq_printf(m, "Current sequence: hws uninitialized\n"); | 505 | I915_READ_IMR(&dev_priv->ring[i])); |
506 | } | ||
507 | i915_ring_seqno_info(m, &dev_priv->ring[i]); | ||
253 | } | 508 | } |
254 | seq_printf(m, "Waiter sequence: %d\n", | 509 | mutex_unlock(&dev->struct_mutex); |
255 | dev_priv->mm.waiting_gem_seqno); | 510 | |
256 | seq_printf(m, "IRQ sequence: %d\n", | ||
257 | dev_priv->mm.irq_gem_seqno); | ||
258 | return 0; | 511 | return 0; |
259 | } | 512 | } |
260 | 513 | ||
@@ -263,33 +516,26 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) | |||
263 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 516 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
264 | struct drm_device *dev = node->minor->dev; | 517 | struct drm_device *dev = node->minor->dev; |
265 | drm_i915_private_t *dev_priv = dev->dev_private; | 518 | drm_i915_private_t *dev_priv = dev->dev_private; |
266 | int i; | 519 | int i, ret; |
520 | |||
521 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
522 | if (ret) | ||
523 | return ret; | ||
267 | 524 | ||
268 | seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); | 525 | seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); |
269 | seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); | 526 | seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); |
270 | for (i = 0; i < dev_priv->num_fence_regs; i++) { | 527 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
271 | struct drm_gem_object *obj = dev_priv->fence_regs[i].obj; | 528 | struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; |
272 | 529 | ||
273 | if (obj == NULL) { | 530 | seq_printf(m, "Fenced object[%2d] = ", i); |
274 | seq_printf(m, "Fenced object[%2d] = unused\n", i); | 531 | if (obj == NULL) |
275 | } else { | 532 | seq_printf(m, "unused"); |
276 | struct drm_i915_gem_object *obj_priv; | 533 | else |
277 | 534 | describe_obj(m, obj); | |
278 | obj_priv = to_intel_bo(obj); | 535 | seq_printf(m, "\n"); |
279 | seq_printf(m, "Fenced object[%2d] = %p: %s " | ||
280 | "%08x %08zx %08x %s %08x %08x %d", | ||
281 | i, obj, get_pin_flag(obj_priv), | ||
282 | obj_priv->gtt_offset, | ||
283 | obj->size, obj_priv->stride, | ||
284 | get_tiling_flag(obj_priv), | ||
285 | obj->read_domains, obj->write_domain, | ||
286 | obj_priv->last_rendering_seqno); | ||
287 | if (obj->name) | ||
288 | seq_printf(m, " (name: %d)", obj->name); | ||
289 | seq_printf(m, "\n"); | ||
290 | } | ||
291 | } | 536 | } |
292 | 537 | ||
538 | mutex_unlock(&dev->struct_mutex); | ||
293 | return 0; | 539 | return 0; |
294 | } | 540 | } |
295 | 541 | ||
@@ -298,10 +544,12 @@ static int i915_hws_info(struct seq_file *m, void *data) | |||
298 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 544 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
299 | struct drm_device *dev = node->minor->dev; | 545 | struct drm_device *dev = node->minor->dev; |
300 | drm_i915_private_t *dev_priv = dev->dev_private; | 546 | drm_i915_private_t *dev_priv = dev->dev_private; |
547 | struct intel_ring_buffer *ring; | ||
548 | const volatile u32 __iomem *hws; | ||
301 | int i; | 549 | int i; |
302 | volatile u32 *hws; | ||
303 | 550 | ||
304 | hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr; | 551 | ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; |
552 | hws = (volatile u32 __iomem *)ring->status_page.page_addr; | ||
305 | if (hws == NULL) | 553 | if (hws == NULL) |
306 | return 0; | 554 | return 0; |
307 | 555 | ||
@@ -313,16 +561,19 @@ static int i915_hws_info(struct seq_file *m, void *data) | |||
313 | return 0; | 561 | return 0; |
314 | } | 562 | } |
315 | 563 | ||
316 | static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count) | 564 | static void i915_dump_object(struct seq_file *m, |
565 | struct io_mapping *mapping, | ||
566 | struct drm_i915_gem_object *obj) | ||
317 | { | 567 | { |
318 | int page, i; | 568 | int page, page_count, i; |
319 | uint32_t *mem; | ||
320 | 569 | ||
570 | page_count = obj->base.size / PAGE_SIZE; | ||
321 | for (page = 0; page < page_count; page++) { | 571 | for (page = 0; page < page_count; page++) { |
322 | mem = kmap_atomic(pages[page], KM_USER0); | 572 | u32 *mem = io_mapping_map_wc(mapping, |
573 | obj->gtt_offset + page * PAGE_SIZE); | ||
323 | for (i = 0; i < PAGE_SIZE; i += 4) | 574 | for (i = 0; i < PAGE_SIZE; i += 4) |
324 | seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); | 575 | seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); |
325 | kunmap_atomic(mem, KM_USER0); | 576 | io_mapping_unmap(mem); |
326 | } | 577 | } |
327 | } | 578 | } |
328 | 579 | ||
@@ -331,32 +582,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) | |||
331 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 582 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
332 | struct drm_device *dev = node->minor->dev; | 583 | struct drm_device *dev = node->minor->dev; |
333 | drm_i915_private_t *dev_priv = dev->dev_private; | 584 | drm_i915_private_t *dev_priv = dev->dev_private; |
334 | struct drm_gem_object *obj; | 585 | struct drm_i915_gem_object *obj; |
335 | struct drm_i915_gem_object *obj_priv; | ||
336 | int ret; | 586 | int ret; |
337 | 587 | ||
338 | spin_lock(&dev_priv->mm.active_list_lock); | 588 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
339 | 589 | if (ret) | |
340 | list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, | 590 | return ret; |
341 | list) { | ||
342 | obj = &obj_priv->base; | ||
343 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { | ||
344 | ret = i915_gem_object_get_pages(obj, 0); | ||
345 | if (ret) { | ||
346 | DRM_ERROR("Failed to get pages: %d\n", ret); | ||
347 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
348 | return ret; | ||
349 | } | ||
350 | |||
351 | seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset); | ||
352 | i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE); | ||
353 | 591 | ||
354 | i915_gem_object_put_pages(obj); | 592 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
593 | if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) { | ||
594 | seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); | ||
595 | i915_dump_object(m, dev_priv->mm.gtt_mapping, obj); | ||
355 | } | 596 | } |
356 | } | 597 | } |
357 | 598 | ||
358 | spin_unlock(&dev_priv->mm.active_list_lock); | 599 | mutex_unlock(&dev->struct_mutex); |
359 | |||
360 | return 0; | 600 | return 0; |
361 | } | 601 | } |
362 | 602 | ||
@@ -365,20 +605,26 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) | |||
365 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 605 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
366 | struct drm_device *dev = node->minor->dev; | 606 | struct drm_device *dev = node->minor->dev; |
367 | drm_i915_private_t *dev_priv = dev->dev_private; | 607 | drm_i915_private_t *dev_priv = dev->dev_private; |
368 | u8 *virt; | 608 | struct intel_ring_buffer *ring; |
369 | uint32_t *ptr, off; | 609 | int ret; |
370 | 610 | ||
371 | if (!dev_priv->render_ring.gem_object) { | 611 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
372 | seq_printf(m, "No ringbuffer setup\n"); | 612 | if (ret) |
373 | return 0; | 613 | return ret; |
374 | } | ||
375 | 614 | ||
376 | virt = dev_priv->render_ring.virtual_start; | 615 | ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; |
616 | if (!ring->obj) { | ||
617 | seq_printf(m, "No ringbuffer setup\n"); | ||
618 | } else { | ||
619 | const u8 __iomem *virt = ring->virtual_start; | ||
620 | uint32_t off; | ||
377 | 621 | ||
378 | for (off = 0; off < dev_priv->render_ring.size; off += 4) { | 622 | for (off = 0; off < ring->size; off += 4) { |
379 | ptr = (uint32_t *)(virt + off); | 623 | uint32_t *ptr = (uint32_t *)(virt + off); |
380 | seq_printf(m, "%08x : %08x\n", off, *ptr); | 624 | seq_printf(m, "%08x : %08x\n", off, *ptr); |
625 | } | ||
381 | } | 626 | } |
627 | mutex_unlock(&dev->struct_mutex); | ||
382 | 628 | ||
383 | return 0; | 629 | return 0; |
384 | } | 630 | } |
@@ -388,19 +634,38 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) | |||
388 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 634 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
389 | struct drm_device *dev = node->minor->dev; | 635 | struct drm_device *dev = node->minor->dev; |
390 | drm_i915_private_t *dev_priv = dev->dev_private; | 636 | drm_i915_private_t *dev_priv = dev->dev_private; |
391 | unsigned int head, tail; | 637 | struct intel_ring_buffer *ring; |
392 | 638 | ||
393 | head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | 639 | ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; |
394 | tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; | 640 | if (ring->size == 0) |
641 | return 0; | ||
395 | 642 | ||
396 | seq_printf(m, "RingHead : %08x\n", head); | 643 | seq_printf(m, "Ring %s:\n", ring->name); |
397 | seq_printf(m, "RingTail : %08x\n", tail); | 644 | seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); |
398 | seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size); | 645 | seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); |
399 | seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); | 646 | seq_printf(m, " Size : %08x\n", ring->size); |
647 | seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring)); | ||
648 | seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring)); | ||
649 | if (IS_GEN6(dev)) { | ||
650 | seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring)); | ||
651 | seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring)); | ||
652 | } | ||
653 | seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); | ||
654 | seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); | ||
400 | 655 | ||
401 | return 0; | 656 | return 0; |
402 | } | 657 | } |
403 | 658 | ||
659 | static const char *ring_str(int ring) | ||
660 | { | ||
661 | switch (ring) { | ||
662 | case RING_RENDER: return " render"; | ||
663 | case RING_BSD: return " bsd"; | ||
664 | case RING_BLT: return " blt"; | ||
665 | default: return ""; | ||
666 | } | ||
667 | } | ||
668 | |||
404 | static const char *pin_flag(int pinned) | 669 | static const char *pin_flag(int pinned) |
405 | { | 670 | { |
406 | if (pinned > 0) | 671 | if (pinned > 0) |
@@ -431,6 +696,37 @@ static const char *purgeable_flag(int purgeable) | |||
431 | return purgeable ? " purgeable" : ""; | 696 | return purgeable ? " purgeable" : ""; |
432 | } | 697 | } |
433 | 698 | ||
699 | static void print_error_buffers(struct seq_file *m, | ||
700 | const char *name, | ||
701 | struct drm_i915_error_buffer *err, | ||
702 | int count) | ||
703 | { | ||
704 | seq_printf(m, "%s [%d]:\n", name, count); | ||
705 | |||
706 | while (count--) { | ||
707 | seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s", | ||
708 | err->gtt_offset, | ||
709 | err->size, | ||
710 | err->read_domains, | ||
711 | err->write_domain, | ||
712 | err->seqno, | ||
713 | pin_flag(err->pinned), | ||
714 | tiling_flag(err->tiling), | ||
715 | dirty_flag(err->dirty), | ||
716 | purgeable_flag(err->purgeable), | ||
717 | ring_str(err->ring), | ||
718 | cache_level_str(err->cache_level)); | ||
719 | |||
720 | if (err->name) | ||
721 | seq_printf(m, " (name: %d)", err->name); | ||
722 | if (err->fence_reg != I915_FENCE_REG_NONE) | ||
723 | seq_printf(m, " (fence: %d)", err->fence_reg); | ||
724 | |||
725 | seq_printf(m, "\n"); | ||
726 | err++; | ||
727 | } | ||
728 | } | ||
729 | |||
434 | static int i915_error_state(struct seq_file *m, void *unused) | 730 | static int i915_error_state(struct seq_file *m, void *unused) |
435 | { | 731 | { |
436 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 732 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
@@ -452,47 +748,54 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
452 | error->time.tv_usec); | 748 | error->time.tv_usec); |
453 | seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); | 749 | seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); |
454 | seq_printf(m, "EIR: 0x%08x\n", error->eir); | 750 | seq_printf(m, "EIR: 0x%08x\n", error->eir); |
455 | seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); | 751 | seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); |
456 | seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); | 752 | if (INTEL_INFO(dev)->gen >= 6) { |
753 | seq_printf(m, "ERROR: 0x%08x\n", error->error); | ||
754 | seq_printf(m, "Blitter command stream:\n"); | ||
755 | seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd); | ||
756 | seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir); | ||
757 | seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr); | ||
758 | seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone); | ||
759 | seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno); | ||
760 | seq_printf(m, "Video (BSD) command stream:\n"); | ||
761 | seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd); | ||
762 | seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir); | ||
763 | seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr); | ||
764 | seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone); | ||
765 | seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno); | ||
766 | } | ||
767 | seq_printf(m, "Render command stream:\n"); | ||
768 | seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); | ||
457 | seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); | 769 | seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); |
458 | seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); | 770 | seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); |
459 | seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); | 771 | seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); |
460 | seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); | 772 | if (INTEL_INFO(dev)->gen >= 4) { |
461 | if (IS_I965G(dev)) { | ||
462 | seq_printf(m, " INSTPS: 0x%08x\n", error->instps); | ||
463 | seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); | 773 | seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); |
774 | seq_printf(m, " INSTPS: 0x%08x\n", error->instps); | ||
464 | } | 775 | } |
465 | seq_printf(m, "seqno: 0x%08x\n", error->seqno); | 776 | seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); |
466 | 777 | seq_printf(m, " seqno: 0x%08x\n", error->seqno); | |
467 | if (error->active_bo_count) { | 778 | |
468 | seq_printf(m, "Buffers [%d]:\n", error->active_bo_count); | 779 | for (i = 0; i < dev_priv->num_fence_regs; i++) |
469 | 780 | seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); | |
470 | for (i = 0; i < error->active_bo_count; i++) { | 781 | |
471 | seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s", | 782 | if (error->active_bo) |
472 | error->active_bo[i].gtt_offset, | 783 | print_error_buffers(m, "Active", |
473 | error->active_bo[i].size, | 784 | error->active_bo, |
474 | error->active_bo[i].read_domains, | 785 | error->active_bo_count); |
475 | error->active_bo[i].write_domain, | 786 | |
476 | error->active_bo[i].seqno, | 787 | if (error->pinned_bo) |
477 | pin_flag(error->active_bo[i].pinned), | 788 | print_error_buffers(m, "Pinned", |
478 | tiling_flag(error->active_bo[i].tiling), | 789 | error->pinned_bo, |
479 | dirty_flag(error->active_bo[i].dirty), | 790 | error->pinned_bo_count); |
480 | purgeable_flag(error->active_bo[i].purgeable)); | ||
481 | |||
482 | if (error->active_bo[i].name) | ||
483 | seq_printf(m, " (name: %d)", error->active_bo[i].name); | ||
484 | if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE) | ||
485 | seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg); | ||
486 | |||
487 | seq_printf(m, "\n"); | ||
488 | } | ||
489 | } | ||
490 | 791 | ||
491 | for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { | 792 | for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { |
492 | if (error->batchbuffer[i]) { | 793 | if (error->batchbuffer[i]) { |
493 | struct drm_i915_error_object *obj = error->batchbuffer[i]; | 794 | struct drm_i915_error_object *obj = error->batchbuffer[i]; |
494 | 795 | ||
495 | seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); | 796 | seq_printf(m, "%s --- gtt_offset = 0x%08x\n", |
797 | dev_priv->ring[i].name, | ||
798 | obj->gtt_offset); | ||
496 | offset = 0; | 799 | offset = 0; |
497 | for (page = 0; page < obj->page_count; page++) { | 800 | for (page = 0; page < obj->page_count; page++) { |
498 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { | 801 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { |
@@ -503,15 +806,20 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
503 | } | 806 | } |
504 | } | 807 | } |
505 | 808 | ||
506 | if (error->ringbuffer) { | 809 | for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) { |
507 | struct drm_i915_error_object *obj = error->ringbuffer; | 810 | if (error->ringbuffer[i]) { |
508 | 811 | struct drm_i915_error_object *obj = error->ringbuffer[i]; | |
509 | seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset); | 812 | seq_printf(m, "%s --- ringbuffer = 0x%08x\n", |
510 | offset = 0; | 813 | dev_priv->ring[i].name, |
511 | for (page = 0; page < obj->page_count; page++) { | 814 | obj->gtt_offset); |
512 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { | 815 | offset = 0; |
513 | seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); | 816 | for (page = 0; page < obj->page_count; page++) { |
514 | offset += 4; | 817 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { |
818 | seq_printf(m, "%08x : %08x\n", | ||
819 | offset, | ||
820 | obj->pages[page][elt]); | ||
821 | offset += 4; | ||
822 | } | ||
515 | } | 823 | } |
516 | } | 824 | } |
517 | } | 825 | } |
@@ -519,6 +827,9 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
519 | if (error->overlay) | 827 | if (error->overlay) |
520 | intel_overlay_print_error_state(m, error->overlay); | 828 | intel_overlay_print_error_state(m, error->overlay); |
521 | 829 | ||
830 | if (error->display) | ||
831 | intel_display_print_error_state(m, dev, error->display); | ||
832 | |||
522 | out: | 833 | out: |
523 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | 834 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); |
524 | 835 | ||
@@ -542,15 +853,82 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
542 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 853 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
543 | struct drm_device *dev = node->minor->dev; | 854 | struct drm_device *dev = node->minor->dev; |
544 | drm_i915_private_t *dev_priv = dev->dev_private; | 855 | drm_i915_private_t *dev_priv = dev->dev_private; |
545 | u16 rgvswctl = I915_READ16(MEMSWCTL); | 856 | int ret; |
546 | u16 rgvstat = I915_READ16(MEMSTAT_ILK); | ||
547 | 857 | ||
548 | seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); | 858 | if (IS_GEN5(dev)) { |
549 | seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); | 859 | u16 rgvswctl = I915_READ16(MEMSWCTL); |
550 | seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> | 860 | u16 rgvstat = I915_READ16(MEMSTAT_ILK); |
551 | MEMSTAT_VID_SHIFT); | 861 | |
552 | seq_printf(m, "Current P-state: %d\n", | 862 | seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); |
553 | (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); | 863 | seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); |
864 | seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> | ||
865 | MEMSTAT_VID_SHIFT); | ||
866 | seq_printf(m, "Current P-state: %d\n", | ||
867 | (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); | ||
868 | } else if (IS_GEN6(dev)) { | ||
869 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | ||
870 | u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); | ||
871 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | ||
872 | u32 rpstat; | ||
873 | u32 rpupei, rpcurup, rpprevup; | ||
874 | u32 rpdownei, rpcurdown, rpprevdown; | ||
875 | int max_freq; | ||
876 | |||
877 | /* RPSTAT1 is in the GT power well */ | ||
878 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
879 | if (ret) | ||
880 | return ret; | ||
881 | |||
882 | gen6_gt_force_wake_get(dev_priv); | ||
883 | |||
884 | rpstat = I915_READ(GEN6_RPSTAT1); | ||
885 | rpupei = I915_READ(GEN6_RP_CUR_UP_EI); | ||
886 | rpcurup = I915_READ(GEN6_RP_CUR_UP); | ||
887 | rpprevup = I915_READ(GEN6_RP_PREV_UP); | ||
888 | rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); | ||
889 | rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); | ||
890 | rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); | ||
891 | |||
892 | gen6_gt_force_wake_put(dev_priv); | ||
893 | mutex_unlock(&dev->struct_mutex); | ||
894 | |||
895 | seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); | ||
896 | seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); | ||
897 | seq_printf(m, "Render p-state ratio: %d\n", | ||
898 | (gt_perf_status & 0xff00) >> 8); | ||
899 | seq_printf(m, "Render p-state VID: %d\n", | ||
900 | gt_perf_status & 0xff); | ||
901 | seq_printf(m, "Render p-state limit: %d\n", | ||
902 | rp_state_limits & 0xff); | ||
903 | seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> | ||
904 | GEN6_CAGF_SHIFT) * 50); | ||
905 | seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & | ||
906 | GEN6_CURICONT_MASK); | ||
907 | seq_printf(m, "RP CUR UP: %dus\n", rpcurup & | ||
908 | GEN6_CURBSYTAVG_MASK); | ||
909 | seq_printf(m, "RP PREV UP: %dus\n", rpprevup & | ||
910 | GEN6_CURBSYTAVG_MASK); | ||
911 | seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & | ||
912 | GEN6_CURIAVG_MASK); | ||
913 | seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & | ||
914 | GEN6_CURBSYTAVG_MASK); | ||
915 | seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & | ||
916 | GEN6_CURBSYTAVG_MASK); | ||
917 | |||
918 | max_freq = (rp_state_cap & 0xff0000) >> 16; | ||
919 | seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", | ||
920 | max_freq * 50); | ||
921 | |||
922 | max_freq = (rp_state_cap & 0xff00) >> 8; | ||
923 | seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", | ||
924 | max_freq * 50); | ||
925 | |||
926 | max_freq = rp_state_cap & 0xff; | ||
927 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", | ||
928 | max_freq * 50); | ||
929 | } else { | ||
930 | seq_printf(m, "no P-state info available\n"); | ||
931 | } | ||
554 | 932 | ||
555 | return 0; | 933 | return 0; |
556 | } | 934 | } |
@@ -599,7 +977,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused) | |||
599 | struct drm_device *dev = node->minor->dev; | 977 | struct drm_device *dev = node->minor->dev; |
600 | drm_i915_private_t *dev_priv = dev->dev_private; | 978 | drm_i915_private_t *dev_priv = dev->dev_private; |
601 | u32 rgvmodectl = I915_READ(MEMMODECTL); | 979 | u32 rgvmodectl = I915_READ(MEMMODECTL); |
602 | u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY); | 980 | u32 rstdbyctl = I915_READ(RSTDBYCTL); |
603 | u16 crstandvid = I915_READ16(CRSTANDVID); | 981 | u16 crstandvid = I915_READ16(CRSTANDVID); |
604 | 982 | ||
605 | seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? | 983 | seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? |
@@ -622,6 +1000,30 @@ static int i915_drpc_info(struct seq_file *m, void *unused) | |||
622 | seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); | 1000 | seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); |
623 | seq_printf(m, "Render standby enabled: %s\n", | 1001 | seq_printf(m, "Render standby enabled: %s\n", |
624 | (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); | 1002 | (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); |
1003 | seq_printf(m, "Current RS state: "); | ||
1004 | switch (rstdbyctl & RSX_STATUS_MASK) { | ||
1005 | case RSX_STATUS_ON: | ||
1006 | seq_printf(m, "on\n"); | ||
1007 | break; | ||
1008 | case RSX_STATUS_RC1: | ||
1009 | seq_printf(m, "RC1\n"); | ||
1010 | break; | ||
1011 | case RSX_STATUS_RC1E: | ||
1012 | seq_printf(m, "RC1E\n"); | ||
1013 | break; | ||
1014 | case RSX_STATUS_RS1: | ||
1015 | seq_printf(m, "RS1\n"); | ||
1016 | break; | ||
1017 | case RSX_STATUS_RS2: | ||
1018 | seq_printf(m, "RS2 (RC6)\n"); | ||
1019 | break; | ||
1020 | case RSX_STATUS_RS3: | ||
1021 | seq_printf(m, "RC3 (RC6+)\n"); | ||
1022 | break; | ||
1023 | default: | ||
1024 | seq_printf(m, "unknown\n"); | ||
1025 | break; | ||
1026 | } | ||
625 | 1027 | ||
626 | return 0; | 1028 | return 0; |
627 | } | 1029 | } |
@@ -642,6 +1044,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused) | |||
642 | } else { | 1044 | } else { |
643 | seq_printf(m, "FBC disabled: "); | 1045 | seq_printf(m, "FBC disabled: "); |
644 | switch (dev_priv->no_fbc_reason) { | 1046 | switch (dev_priv->no_fbc_reason) { |
1047 | case FBC_NO_OUTPUT: | ||
1048 | seq_printf(m, "no outputs"); | ||
1049 | break; | ||
645 | case FBC_STOLEN_TOO_SMALL: | 1050 | case FBC_STOLEN_TOO_SMALL: |
646 | seq_printf(m, "not enough stolen memory"); | 1051 | seq_printf(m, "not enough stolen memory"); |
647 | break; | 1052 | break; |
@@ -660,6 +1065,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused) | |||
660 | case FBC_MULTIPLE_PIPES: | 1065 | case FBC_MULTIPLE_PIPES: |
661 | seq_printf(m, "multiple pipes are enabled"); | 1066 | seq_printf(m, "multiple pipes are enabled"); |
662 | break; | 1067 | break; |
1068 | case FBC_MODULE_PARAM: | ||
1069 | seq_printf(m, "disabled per module param (default off)"); | ||
1070 | break; | ||
663 | default: | 1071 | default: |
664 | seq_printf(m, "unknown reason"); | 1072 | seq_printf(m, "unknown reason"); |
665 | } | 1073 | } |
@@ -675,15 +1083,17 @@ static int i915_sr_status(struct seq_file *m, void *unused) | |||
675 | drm_i915_private_t *dev_priv = dev->dev_private; | 1083 | drm_i915_private_t *dev_priv = dev->dev_private; |
676 | bool sr_enabled = false; | 1084 | bool sr_enabled = false; |
677 | 1085 | ||
678 | if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev)) | 1086 | if (HAS_PCH_SPLIT(dev)) |
1087 | sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; | ||
1088 | else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) | ||
679 | sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; | 1089 | sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; |
680 | else if (IS_I915GM(dev)) | 1090 | else if (IS_I915GM(dev)) |
681 | sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; | 1091 | sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; |
682 | else if (IS_PINEVIEW(dev)) | 1092 | else if (IS_PINEVIEW(dev)) |
683 | sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; | 1093 | sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; |
684 | 1094 | ||
685 | seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" : | 1095 | seq_printf(m, "self-refresh: %s\n", |
686 | "disabled"); | 1096 | sr_enabled ? "enabled" : "disabled"); |
687 | 1097 | ||
688 | return 0; | 1098 | return 0; |
689 | } | 1099 | } |
@@ -694,10 +1104,16 @@ static int i915_emon_status(struct seq_file *m, void *unused) | |||
694 | struct drm_device *dev = node->minor->dev; | 1104 | struct drm_device *dev = node->minor->dev; |
695 | drm_i915_private_t *dev_priv = dev->dev_private; | 1105 | drm_i915_private_t *dev_priv = dev->dev_private; |
696 | unsigned long temp, chipset, gfx; | 1106 | unsigned long temp, chipset, gfx; |
1107 | int ret; | ||
1108 | |||
1109 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
1110 | if (ret) | ||
1111 | return ret; | ||
697 | 1112 | ||
698 | temp = i915_mch_val(dev_priv); | 1113 | temp = i915_mch_val(dev_priv); |
699 | chipset = i915_chipset_val(dev_priv); | 1114 | chipset = i915_chipset_val(dev_priv); |
700 | gfx = i915_gfx_val(dev_priv); | 1115 | gfx = i915_gfx_val(dev_priv); |
1116 | mutex_unlock(&dev->struct_mutex); | ||
701 | 1117 | ||
702 | seq_printf(m, "GMCH temp: %ld\n", temp); | 1118 | seq_printf(m, "GMCH temp: %ld\n", temp); |
703 | seq_printf(m, "Chipset power: %ld\n", chipset); | 1119 | seq_printf(m, "Chipset power: %ld\n", chipset); |
@@ -718,6 +1134,108 @@ static int i915_gfxec(struct seq_file *m, void *unused) | |||
718 | return 0; | 1134 | return 0; |
719 | } | 1135 | } |
720 | 1136 | ||
1137 | static int i915_opregion(struct seq_file *m, void *unused) | ||
1138 | { | ||
1139 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
1140 | struct drm_device *dev = node->minor->dev; | ||
1141 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1142 | struct intel_opregion *opregion = &dev_priv->opregion; | ||
1143 | int ret; | ||
1144 | |||
1145 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
1146 | if (ret) | ||
1147 | return ret; | ||
1148 | |||
1149 | if (opregion->header) | ||
1150 | seq_write(m, opregion->header, OPREGION_SIZE); | ||
1151 | |||
1152 | mutex_unlock(&dev->struct_mutex); | ||
1153 | |||
1154 | return 0; | ||
1155 | } | ||
1156 | |||
1157 | static int i915_gem_framebuffer_info(struct seq_file *m, void *data) | ||
1158 | { | ||
1159 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
1160 | struct drm_device *dev = node->minor->dev; | ||
1161 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1162 | struct intel_fbdev *ifbdev; | ||
1163 | struct intel_framebuffer *fb; | ||
1164 | int ret; | ||
1165 | |||
1166 | ret = mutex_lock_interruptible(&dev->mode_config.mutex); | ||
1167 | if (ret) | ||
1168 | return ret; | ||
1169 | |||
1170 | ifbdev = dev_priv->fbdev; | ||
1171 | fb = to_intel_framebuffer(ifbdev->helper.fb); | ||
1172 | |||
1173 | seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ", | ||
1174 | fb->base.width, | ||
1175 | fb->base.height, | ||
1176 | fb->base.depth, | ||
1177 | fb->base.bits_per_pixel); | ||
1178 | describe_obj(m, fb->obj); | ||
1179 | seq_printf(m, "\n"); | ||
1180 | |||
1181 | list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { | ||
1182 | if (&fb->base == ifbdev->helper.fb) | ||
1183 | continue; | ||
1184 | |||
1185 | seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ", | ||
1186 | fb->base.width, | ||
1187 | fb->base.height, | ||
1188 | fb->base.depth, | ||
1189 | fb->base.bits_per_pixel); | ||
1190 | describe_obj(m, fb->obj); | ||
1191 | seq_printf(m, "\n"); | ||
1192 | } | ||
1193 | |||
1194 | mutex_unlock(&dev->mode_config.mutex); | ||
1195 | |||
1196 | return 0; | ||
1197 | } | ||
1198 | |||
1199 | static int i915_context_status(struct seq_file *m, void *unused) | ||
1200 | { | ||
1201 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
1202 | struct drm_device *dev = node->minor->dev; | ||
1203 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1204 | int ret; | ||
1205 | |||
1206 | ret = mutex_lock_interruptible(&dev->mode_config.mutex); | ||
1207 | if (ret) | ||
1208 | return ret; | ||
1209 | |||
1210 | if (dev_priv->pwrctx) { | ||
1211 | seq_printf(m, "power context "); | ||
1212 | describe_obj(m, dev_priv->pwrctx); | ||
1213 | seq_printf(m, "\n"); | ||
1214 | } | ||
1215 | |||
1216 | if (dev_priv->renderctx) { | ||
1217 | seq_printf(m, "render context "); | ||
1218 | describe_obj(m, dev_priv->renderctx); | ||
1219 | seq_printf(m, "\n"); | ||
1220 | } | ||
1221 | |||
1222 | mutex_unlock(&dev->mode_config.mutex); | ||
1223 | |||
1224 | return 0; | ||
1225 | } | ||
1226 | |||
1227 | static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) | ||
1228 | { | ||
1229 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
1230 | struct drm_device *dev = node->minor->dev; | ||
1231 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1232 | |||
1233 | seq_printf(m, "forcewake count = %d\n", | ||
1234 | atomic_read(&dev_priv->forcewake_count)); | ||
1235 | |||
1236 | return 0; | ||
1237 | } | ||
1238 | |||
721 | static int | 1239 | static int |
722 | i915_wedged_open(struct inode *inode, | 1240 | i915_wedged_open(struct inode *inode, |
723 | struct file *filp) | 1241 | struct file *filp) |
@@ -741,6 +1259,9 @@ i915_wedged_read(struct file *filp, | |||
741 | "wedged : %d\n", | 1259 | "wedged : %d\n", |
742 | atomic_read(&dev_priv->mm.wedged)); | 1260 | atomic_read(&dev_priv->mm.wedged)); |
743 | 1261 | ||
1262 | if (len > sizeof (buf)) | ||
1263 | len = sizeof (buf); | ||
1264 | |||
744 | return simple_read_from_buffer(ubuf, max, ppos, buf, len); | 1265 | return simple_read_from_buffer(ubuf, max, ppos, buf, len); |
745 | } | 1266 | } |
746 | 1267 | ||
@@ -751,7 +1272,6 @@ i915_wedged_write(struct file *filp, | |||
751 | loff_t *ppos) | 1272 | loff_t *ppos) |
752 | { | 1273 | { |
753 | struct drm_device *dev = filp->private_data; | 1274 | struct drm_device *dev = filp->private_data; |
754 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
755 | char buf[20]; | 1275 | char buf[20]; |
756 | int val = 1; | 1276 | int val = 1; |
757 | 1277 | ||
@@ -767,12 +1287,7 @@ i915_wedged_write(struct file *filp, | |||
767 | } | 1287 | } |
768 | 1288 | ||
769 | DRM_INFO("Manually setting wedged to %d\n", val); | 1289 | DRM_INFO("Manually setting wedged to %d\n", val); |
770 | 1290 | i915_handle_error(dev, val); | |
771 | atomic_set(&dev_priv->mm.wedged, val); | ||
772 | if (val) { | ||
773 | DRM_WAKEUP(&dev_priv->irq_queue); | ||
774 | queue_work(dev_priv->wq, &dev_priv->error_work); | ||
775 | } | ||
776 | 1291 | ||
777 | return cnt; | 1292 | return cnt; |
778 | } | 1293 | } |
@@ -782,6 +1297,7 @@ static const struct file_operations i915_wedged_fops = { | |||
782 | .open = i915_wedged_open, | 1297 | .open = i915_wedged_open, |
783 | .read = i915_wedged_read, | 1298 | .read = i915_wedged_read, |
784 | .write = i915_wedged_write, | 1299 | .write = i915_wedged_write, |
1300 | .llseek = default_llseek, | ||
785 | }; | 1301 | }; |
786 | 1302 | ||
787 | /* As the drm_debugfs_init() routines are called before dev->dev_private is | 1303 | /* As the drm_debugfs_init() routines are called before dev->dev_private is |
@@ -822,18 +1338,90 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) | |||
822 | return drm_add_fake_info_node(minor, ent, &i915_wedged_fops); | 1338 | return drm_add_fake_info_node(minor, ent, &i915_wedged_fops); |
823 | } | 1339 | } |
824 | 1340 | ||
1341 | static int i915_forcewake_open(struct inode *inode, struct file *file) | ||
1342 | { | ||
1343 | struct drm_device *dev = inode->i_private; | ||
1344 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1345 | int ret; | ||
1346 | |||
1347 | if (!IS_GEN6(dev)) | ||
1348 | return 0; | ||
1349 | |||
1350 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
1351 | if (ret) | ||
1352 | return ret; | ||
1353 | gen6_gt_force_wake_get(dev_priv); | ||
1354 | mutex_unlock(&dev->struct_mutex); | ||
1355 | |||
1356 | return 0; | ||
1357 | } | ||
1358 | |||
1359 | int i915_forcewake_release(struct inode *inode, struct file *file) | ||
1360 | { | ||
1361 | struct drm_device *dev = inode->i_private; | ||
1362 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1363 | |||
1364 | if (!IS_GEN6(dev)) | ||
1365 | return 0; | ||
1366 | |||
1367 | /* | ||
1368 | * It's bad that we can potentially hang userspace if struct_mutex gets | ||
1369 | * forever stuck. However, if we cannot acquire this lock it means that | ||
1370 | * almost certainly the driver has hung, is not unload-able. Therefore | ||
1371 | * hanging here is probably a minor inconvenience not to be seen my | ||
1372 | * almost every user. | ||
1373 | */ | ||
1374 | mutex_lock(&dev->struct_mutex); | ||
1375 | gen6_gt_force_wake_put(dev_priv); | ||
1376 | mutex_unlock(&dev->struct_mutex); | ||
1377 | |||
1378 | return 0; | ||
1379 | } | ||
1380 | |||
1381 | static const struct file_operations i915_forcewake_fops = { | ||
1382 | .owner = THIS_MODULE, | ||
1383 | .open = i915_forcewake_open, | ||
1384 | .release = i915_forcewake_release, | ||
1385 | }; | ||
1386 | |||
1387 | static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) | ||
1388 | { | ||
1389 | struct drm_device *dev = minor->dev; | ||
1390 | struct dentry *ent; | ||
1391 | |||
1392 | ent = debugfs_create_file("i915_forcewake_user", | ||
1393 | S_IRUSR, | ||
1394 | root, dev, | ||
1395 | &i915_forcewake_fops); | ||
1396 | if (IS_ERR(ent)) | ||
1397 | return PTR_ERR(ent); | ||
1398 | |||
1399 | return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); | ||
1400 | } | ||
1401 | |||
825 | static struct drm_info_list i915_debugfs_list[] = { | 1402 | static struct drm_info_list i915_debugfs_list[] = { |
1403 | {"i915_capabilities", i915_capabilities, 0}, | ||
1404 | {"i915_gem_objects", i915_gem_object_info, 0}, | ||
1405 | {"i915_gem_gtt", i915_gem_gtt_info, 0}, | ||
826 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, | 1406 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, |
827 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, | 1407 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, |
828 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, | 1408 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, |
1409 | {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST}, | ||
1410 | {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST}, | ||
829 | {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, | 1411 | {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, |
830 | {"i915_gem_request", i915_gem_request_info, 0}, | 1412 | {"i915_gem_request", i915_gem_request_info, 0}, |
831 | {"i915_gem_seqno", i915_gem_seqno_info, 0}, | 1413 | {"i915_gem_seqno", i915_gem_seqno_info, 0}, |
832 | {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, | 1414 | {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, |
833 | {"i915_gem_interrupt", i915_interrupt_info, 0}, | 1415 | {"i915_gem_interrupt", i915_interrupt_info, 0}, |
834 | {"i915_gem_hws", i915_hws_info, 0}, | 1416 | {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, |
835 | {"i915_ringbuffer_data", i915_ringbuffer_data, 0}, | 1417 | {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, |
836 | {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, | 1418 | {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, |
1419 | {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS}, | ||
1420 | {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS}, | ||
1421 | {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS}, | ||
1422 | {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS}, | ||
1423 | {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS}, | ||
1424 | {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS}, | ||
837 | {"i915_batchbuffers", i915_batchbuffer_info, 0}, | 1425 | {"i915_batchbuffers", i915_batchbuffer_info, 0}, |
838 | {"i915_error_state", i915_error_state, 0}, | 1426 | {"i915_error_state", i915_error_state, 0}, |
839 | {"i915_rstdby_delays", i915_rstdby_delays, 0}, | 1427 | {"i915_rstdby_delays", i915_rstdby_delays, 0}, |
@@ -845,6 +1433,10 @@ static struct drm_info_list i915_debugfs_list[] = { | |||
845 | {"i915_gfxec", i915_gfxec, 0}, | 1433 | {"i915_gfxec", i915_gfxec, 0}, |
846 | {"i915_fbc_status", i915_fbc_status, 0}, | 1434 | {"i915_fbc_status", i915_fbc_status, 0}, |
847 | {"i915_sr_status", i915_sr_status, 0}, | 1435 | {"i915_sr_status", i915_sr_status, 0}, |
1436 | {"i915_opregion", i915_opregion, 0}, | ||
1437 | {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, | ||
1438 | {"i915_context_status", i915_context_status, 0}, | ||
1439 | {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, | ||
848 | }; | 1440 | }; |
849 | #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) | 1441 | #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) |
850 | 1442 | ||
@@ -856,6 +1448,10 @@ int i915_debugfs_init(struct drm_minor *minor) | |||
856 | if (ret) | 1448 | if (ret) |
857 | return ret; | 1449 | return ret; |
858 | 1450 | ||
1451 | ret = i915_forcewake_create(minor->debugfs_root, minor); | ||
1452 | if (ret) | ||
1453 | return ret; | ||
1454 | |||
859 | return drm_debugfs_create_files(i915_debugfs_list, | 1455 | return drm_debugfs_create_files(i915_debugfs_list, |
860 | I915_DEBUGFS_ENTRIES, | 1456 | I915_DEBUGFS_ENTRIES, |
861 | minor->debugfs_root, minor); | 1457 | minor->debugfs_root, minor); |
@@ -865,6 +1461,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor) | |||
865 | { | 1461 | { |
866 | drm_debugfs_remove_files(i915_debugfs_list, | 1462 | drm_debugfs_remove_files(i915_debugfs_list, |
867 | I915_DEBUGFS_ENTRIES, minor); | 1463 | I915_DEBUGFS_ENTRIES, minor); |
1464 | drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, | ||
1465 | 1, minor); | ||
868 | drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, | 1466 | drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, |
869 | 1, minor); | 1467 | 1, minor); |
870 | } | 1468 | } |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2dd2c93ebfa3..296fbd66f0e1 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -34,14 +34,25 @@ | |||
34 | #include "i915_drm.h" | 34 | #include "i915_drm.h" |
35 | #include "i915_drv.h" | 35 | #include "i915_drv.h" |
36 | #include "i915_trace.h" | 36 | #include "i915_trace.h" |
37 | #include "../../../platform/x86/intel_ips.h" | ||
37 | #include <linux/pci.h> | 38 | #include <linux/pci.h> |
38 | #include <linux/vgaarb.h> | 39 | #include <linux/vgaarb.h> |
39 | #include <linux/acpi.h> | 40 | #include <linux/acpi.h> |
40 | #include <linux/pnp.h> | 41 | #include <linux/pnp.h> |
41 | #include <linux/vga_switcheroo.h> | 42 | #include <linux/vga_switcheroo.h> |
42 | #include <linux/slab.h> | 43 | #include <linux/slab.h> |
44 | #include <acpi/video.h> | ||
43 | 45 | ||
44 | extern int intel_max_stolen; /* from AGP driver */ | 46 | static void i915_write_hws_pga(struct drm_device *dev) |
47 | { | ||
48 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
49 | u32 addr; | ||
50 | |||
51 | addr = dev_priv->status_page_dmah->busaddr; | ||
52 | if (INTEL_INFO(dev)->gen >= 4) | ||
53 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; | ||
54 | I915_WRITE(HWS_PGA, addr); | ||
55 | } | ||
45 | 56 | ||
46 | /** | 57 | /** |
47 | * Sets up the hardware status page for devices that need a physical address | 58 | * Sets up the hardware status page for devices that need a physical address |
@@ -50,6 +61,8 @@ extern int intel_max_stolen; /* from AGP driver */ | |||
50 | static int i915_init_phys_hws(struct drm_device *dev) | 61 | static int i915_init_phys_hws(struct drm_device *dev) |
51 | { | 62 | { |
52 | drm_i915_private_t *dev_priv = dev->dev_private; | 63 | drm_i915_private_t *dev_priv = dev->dev_private; |
64 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | ||
65 | |||
53 | /* Program Hardware Status Page */ | 66 | /* Program Hardware Status Page */ |
54 | dev_priv->status_page_dmah = | 67 | dev_priv->status_page_dmah = |
55 | drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); | 68 | drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); |
@@ -58,17 +71,13 @@ static int i915_init_phys_hws(struct drm_device *dev) | |||
58 | DRM_ERROR("Can not allocate hardware status page\n"); | 71 | DRM_ERROR("Can not allocate hardware status page\n"); |
59 | return -ENOMEM; | 72 | return -ENOMEM; |
60 | } | 73 | } |
61 | dev_priv->render_ring.status_page.page_addr | 74 | ring->status_page.page_addr = |
62 | = dev_priv->status_page_dmah->vaddr; | 75 | (void __force __iomem *)dev_priv->status_page_dmah->vaddr; |
63 | dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; | ||
64 | 76 | ||
65 | memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); | 77 | memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); |
66 | 78 | ||
67 | if (IS_I965G(dev)) | 79 | i915_write_hws_pga(dev); |
68 | dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & | ||
69 | 0xf0; | ||
70 | 80 | ||
71 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); | ||
72 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); | 81 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
73 | return 0; | 82 | return 0; |
74 | } | 83 | } |
@@ -80,13 +89,15 @@ static int i915_init_phys_hws(struct drm_device *dev) | |||
80 | static void i915_free_hws(struct drm_device *dev) | 89 | static void i915_free_hws(struct drm_device *dev) |
81 | { | 90 | { |
82 | drm_i915_private_t *dev_priv = dev->dev_private; | 91 | drm_i915_private_t *dev_priv = dev->dev_private; |
92 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | ||
93 | |||
83 | if (dev_priv->status_page_dmah) { | 94 | if (dev_priv->status_page_dmah) { |
84 | drm_pci_free(dev, dev_priv->status_page_dmah); | 95 | drm_pci_free(dev, dev_priv->status_page_dmah); |
85 | dev_priv->status_page_dmah = NULL; | 96 | dev_priv->status_page_dmah = NULL; |
86 | } | 97 | } |
87 | 98 | ||
88 | if (dev_priv->render_ring.status_page.gfx_addr) { | 99 | if (ring->status_page.gfx_addr) { |
89 | dev_priv->render_ring.status_page.gfx_addr = 0; | 100 | ring->status_page.gfx_addr = 0; |
90 | drm_core_ioremapfree(&dev_priv->hws_map, dev); | 101 | drm_core_ioremapfree(&dev_priv->hws_map, dev); |
91 | } | 102 | } |
92 | 103 | ||
@@ -98,7 +109,7 @@ void i915_kernel_lost_context(struct drm_device * dev) | |||
98 | { | 109 | { |
99 | drm_i915_private_t *dev_priv = dev->dev_private; | 110 | drm_i915_private_t *dev_priv = dev->dev_private; |
100 | struct drm_i915_master_private *master_priv; | 111 | struct drm_i915_master_private *master_priv; |
101 | struct intel_ring_buffer *ring = &dev_priv->render_ring; | 112 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
102 | 113 | ||
103 | /* | 114 | /* |
104 | * We should never lose context on the ring with modesetting | 115 | * We should never lose context on the ring with modesetting |
@@ -107,8 +118,8 @@ void i915_kernel_lost_context(struct drm_device * dev) | |||
107 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 118 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
108 | return; | 119 | return; |
109 | 120 | ||
110 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | 121 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
111 | ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; | 122 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
112 | ring->space = ring->head - (ring->tail + 8); | 123 | ring->space = ring->head - (ring->tail + 8); |
113 | if (ring->space < 0) | 124 | if (ring->space < 0) |
114 | ring->space += ring->size; | 125 | ring->space += ring->size; |
@@ -124,6 +135,8 @@ void i915_kernel_lost_context(struct drm_device * dev) | |||
124 | static int i915_dma_cleanup(struct drm_device * dev) | 135 | static int i915_dma_cleanup(struct drm_device * dev) |
125 | { | 136 | { |
126 | drm_i915_private_t *dev_priv = dev->dev_private; | 137 | drm_i915_private_t *dev_priv = dev->dev_private; |
138 | int i; | ||
139 | |||
127 | /* Make sure interrupts are disabled here because the uninstall ioctl | 140 | /* Make sure interrupts are disabled here because the uninstall ioctl |
128 | * may not have been called from userspace and after dev_private | 141 | * may not have been called from userspace and after dev_private |
129 | * is freed, it's too late. | 142 | * is freed, it's too late. |
@@ -132,9 +145,8 @@ static int i915_dma_cleanup(struct drm_device * dev) | |||
132 | drm_irq_uninstall(dev); | 145 | drm_irq_uninstall(dev); |
133 | 146 | ||
134 | mutex_lock(&dev->struct_mutex); | 147 | mutex_lock(&dev->struct_mutex); |
135 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); | 148 | for (i = 0; i < I915_NUM_RINGS; i++) |
136 | if (HAS_BSD(dev)) | 149 | intel_cleanup_ring_buffer(&dev_priv->ring[i]); |
137 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); | ||
138 | mutex_unlock(&dev->struct_mutex); | 150 | mutex_unlock(&dev->struct_mutex); |
139 | 151 | ||
140 | /* Clear the HWS virtual address at teardown */ | 152 | /* Clear the HWS virtual address at teardown */ |
@@ -148,6 +160,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
148 | { | 160 | { |
149 | drm_i915_private_t *dev_priv = dev->dev_private; | 161 | drm_i915_private_t *dev_priv = dev->dev_private; |
150 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 162 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
163 | int ret; | ||
151 | 164 | ||
152 | master_priv->sarea = drm_getsarea(dev); | 165 | master_priv->sarea = drm_getsarea(dev); |
153 | if (master_priv->sarea) { | 166 | if (master_priv->sarea) { |
@@ -158,33 +171,22 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
158 | } | 171 | } |
159 | 172 | ||
160 | if (init->ring_size != 0) { | 173 | if (init->ring_size != 0) { |
161 | if (dev_priv->render_ring.gem_object != NULL) { | 174 | if (LP_RING(dev_priv)->obj != NULL) { |
162 | i915_dma_cleanup(dev); | 175 | i915_dma_cleanup(dev); |
163 | DRM_ERROR("Client tried to initialize ringbuffer in " | 176 | DRM_ERROR("Client tried to initialize ringbuffer in " |
164 | "GEM mode\n"); | 177 | "GEM mode\n"); |
165 | return -EINVAL; | 178 | return -EINVAL; |
166 | } | 179 | } |
167 | 180 | ||
168 | dev_priv->render_ring.size = init->ring_size; | 181 | ret = intel_render_ring_init_dri(dev, |
169 | 182 | init->ring_start, | |
170 | dev_priv->render_ring.map.offset = init->ring_start; | 183 | init->ring_size); |
171 | dev_priv->render_ring.map.size = init->ring_size; | 184 | if (ret) { |
172 | dev_priv->render_ring.map.type = 0; | ||
173 | dev_priv->render_ring.map.flags = 0; | ||
174 | dev_priv->render_ring.map.mtrr = 0; | ||
175 | |||
176 | drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); | ||
177 | |||
178 | if (dev_priv->render_ring.map.handle == NULL) { | ||
179 | i915_dma_cleanup(dev); | 185 | i915_dma_cleanup(dev); |
180 | DRM_ERROR("can not ioremap virtual address for" | 186 | return ret; |
181 | " ring buffer\n"); | ||
182 | return -ENOMEM; | ||
183 | } | 187 | } |
184 | } | 188 | } |
185 | 189 | ||
186 | dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; | ||
187 | |||
188 | dev_priv->cpp = init->cpp; | 190 | dev_priv->cpp = init->cpp; |
189 | dev_priv->back_offset = init->back_offset; | 191 | dev_priv->back_offset = init->back_offset; |
190 | dev_priv->front_offset = init->front_offset; | 192 | dev_priv->front_offset = init->front_offset; |
@@ -202,12 +204,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
202 | static int i915_dma_resume(struct drm_device * dev) | 204 | static int i915_dma_resume(struct drm_device * dev) |
203 | { | 205 | { |
204 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 206 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
207 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | ||
205 | 208 | ||
206 | struct intel_ring_buffer *ring; | ||
207 | DRM_DEBUG_DRIVER("%s\n", __func__); | 209 | DRM_DEBUG_DRIVER("%s\n", __func__); |
208 | 210 | ||
209 | ring = &dev_priv->render_ring; | ||
210 | |||
211 | if (ring->map.handle == NULL) { | 211 | if (ring->map.handle == NULL) { |
212 | DRM_ERROR("can not ioremap virtual address for" | 212 | DRM_ERROR("can not ioremap virtual address for" |
213 | " ring buffer\n"); | 213 | " ring buffer\n"); |
@@ -222,9 +222,9 @@ static int i915_dma_resume(struct drm_device * dev) | |||
222 | DRM_DEBUG_DRIVER("hw status page @ %p\n", | 222 | DRM_DEBUG_DRIVER("hw status page @ %p\n", |
223 | ring->status_page.page_addr); | 223 | ring->status_page.page_addr); |
224 | if (ring->status_page.gfx_addr != 0) | 224 | if (ring->status_page.gfx_addr != 0) |
225 | ring->setup_status_page(dev, ring); | 225 | intel_ring_setup_status_page(ring); |
226 | else | 226 | else |
227 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); | 227 | i915_write_hws_pga(dev); |
228 | 228 | ||
229 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); | 229 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
230 | 230 | ||
@@ -264,7 +264,7 @@ static int i915_dma_init(struct drm_device *dev, void *data, | |||
264 | * instruction detected will be given a size of zero, which is a | 264 | * instruction detected will be given a size of zero, which is a |
265 | * signal to abort the rest of the buffer. | 265 | * signal to abort the rest of the buffer. |
266 | */ | 266 | */ |
267 | static int do_validate_cmd(int cmd) | 267 | static int validate_cmd(int cmd) |
268 | { | 268 | { |
269 | switch (((cmd >> 29) & 0x7)) { | 269 | switch (((cmd >> 29) & 0x7)) { |
270 | case 0x0: | 270 | case 0x0: |
@@ -322,40 +322,27 @@ static int do_validate_cmd(int cmd) | |||
322 | return 0; | 322 | return 0; |
323 | } | 323 | } |
324 | 324 | ||
325 | static int validate_cmd(int cmd) | ||
326 | { | ||
327 | int ret = do_validate_cmd(cmd); | ||
328 | |||
329 | /* printk("validate_cmd( %x ): %d\n", cmd, ret); */ | ||
330 | |||
331 | return ret; | ||
332 | } | ||
333 | |||
334 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) | 325 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) |
335 | { | 326 | { |
336 | drm_i915_private_t *dev_priv = dev->dev_private; | 327 | drm_i915_private_t *dev_priv = dev->dev_private; |
337 | int i; | 328 | int i, ret; |
338 | 329 | ||
339 | if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) | 330 | if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) |
340 | return -EINVAL; | 331 | return -EINVAL; |
341 | 332 | ||
342 | BEGIN_LP_RING((dwords+1)&~1); | ||
343 | |||
344 | for (i = 0; i < dwords;) { | 333 | for (i = 0; i < dwords;) { |
345 | int cmd, sz; | 334 | int sz = validate_cmd(buffer[i]); |
346 | 335 | if (sz == 0 || i + sz > dwords) | |
347 | cmd = buffer[i]; | ||
348 | |||
349 | if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) | ||
350 | return -EINVAL; | 336 | return -EINVAL; |
351 | 337 | i += sz; | |
352 | OUT_RING(cmd); | ||
353 | |||
354 | while (++i, --sz) { | ||
355 | OUT_RING(buffer[i]); | ||
356 | } | ||
357 | } | 338 | } |
358 | 339 | ||
340 | ret = BEGIN_LP_RING((dwords+1)&~1); | ||
341 | if (ret) | ||
342 | return ret; | ||
343 | |||
344 | for (i = 0; i < dwords; i++) | ||
345 | OUT_RING(buffer[i]); | ||
359 | if (dwords & 1) | 346 | if (dwords & 1) |
360 | OUT_RING(0); | 347 | OUT_RING(0); |
361 | 348 | ||
@@ -366,34 +353,41 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) | |||
366 | 353 | ||
367 | int | 354 | int |
368 | i915_emit_box(struct drm_device *dev, | 355 | i915_emit_box(struct drm_device *dev, |
369 | struct drm_clip_rect *boxes, | 356 | struct drm_clip_rect *box, |
370 | int i, int DR1, int DR4) | 357 | int DR1, int DR4) |
371 | { | 358 | { |
372 | struct drm_clip_rect box = boxes[i]; | 359 | struct drm_i915_private *dev_priv = dev->dev_private; |
360 | int ret; | ||
373 | 361 | ||
374 | if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { | 362 | if (box->y2 <= box->y1 || box->x2 <= box->x1 || |
363 | box->y2 <= 0 || box->x2 <= 0) { | ||
375 | DRM_ERROR("Bad box %d,%d..%d,%d\n", | 364 | DRM_ERROR("Bad box %d,%d..%d,%d\n", |
376 | box.x1, box.y1, box.x2, box.y2); | 365 | box->x1, box->y1, box->x2, box->y2); |
377 | return -EINVAL; | 366 | return -EINVAL; |
378 | } | 367 | } |
379 | 368 | ||
380 | if (IS_I965G(dev)) { | 369 | if (INTEL_INFO(dev)->gen >= 4) { |
381 | BEGIN_LP_RING(4); | 370 | ret = BEGIN_LP_RING(4); |
371 | if (ret) | ||
372 | return ret; | ||
373 | |||
382 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); | 374 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); |
383 | OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); | 375 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
384 | OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); | 376 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
385 | OUT_RING(DR4); | 377 | OUT_RING(DR4); |
386 | ADVANCE_LP_RING(); | ||
387 | } else { | 378 | } else { |
388 | BEGIN_LP_RING(6); | 379 | ret = BEGIN_LP_RING(6); |
380 | if (ret) | ||
381 | return ret; | ||
382 | |||
389 | OUT_RING(GFX_OP_DRAWRECT_INFO); | 383 | OUT_RING(GFX_OP_DRAWRECT_INFO); |
390 | OUT_RING(DR1); | 384 | OUT_RING(DR1); |
391 | OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); | 385 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
392 | OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); | 386 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
393 | OUT_RING(DR4); | 387 | OUT_RING(DR4); |
394 | OUT_RING(0); | 388 | OUT_RING(0); |
395 | ADVANCE_LP_RING(); | ||
396 | } | 389 | } |
390 | ADVANCE_LP_RING(); | ||
397 | 391 | ||
398 | return 0; | 392 | return 0; |
399 | } | 393 | } |
@@ -413,12 +407,13 @@ static void i915_emit_breadcrumb(struct drm_device *dev) | |||
413 | if (master_priv->sarea_priv) | 407 | if (master_priv->sarea_priv) |
414 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; | 408 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; |
415 | 409 | ||
416 | BEGIN_LP_RING(4); | 410 | if (BEGIN_LP_RING(4) == 0) { |
417 | OUT_RING(MI_STORE_DWORD_INDEX); | 411 | OUT_RING(MI_STORE_DWORD_INDEX); |
418 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 412 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
419 | OUT_RING(dev_priv->counter); | 413 | OUT_RING(dev_priv->counter); |
420 | OUT_RING(0); | 414 | OUT_RING(0); |
421 | ADVANCE_LP_RING(); | 415 | ADVANCE_LP_RING(); |
416 | } | ||
422 | } | 417 | } |
423 | 418 | ||
424 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, | 419 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, |
@@ -440,7 +435,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, | |||
440 | 435 | ||
441 | for (i = 0; i < count; i++) { | 436 | for (i = 0; i < count; i++) { |
442 | if (i < nbox) { | 437 | if (i < nbox) { |
443 | ret = i915_emit_box(dev, cliprects, i, | 438 | ret = i915_emit_box(dev, &cliprects[i], |
444 | cmd->DR1, cmd->DR4); | 439 | cmd->DR1, cmd->DR4); |
445 | if (ret) | 440 | if (ret) |
446 | return ret; | 441 | return ret; |
@@ -459,8 +454,9 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, | |||
459 | drm_i915_batchbuffer_t * batch, | 454 | drm_i915_batchbuffer_t * batch, |
460 | struct drm_clip_rect *cliprects) | 455 | struct drm_clip_rect *cliprects) |
461 | { | 456 | { |
457 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
462 | int nbox = batch->num_cliprects; | 458 | int nbox = batch->num_cliprects; |
463 | int i = 0, count; | 459 | int i, count, ret; |
464 | 460 | ||
465 | if ((batch->start | batch->used) & 0x7) { | 461 | if ((batch->start | batch->used) & 0x7) { |
466 | DRM_ERROR("alignment"); | 462 | DRM_ERROR("alignment"); |
@@ -470,44 +466,49 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, | |||
470 | i915_kernel_lost_context(dev); | 466 | i915_kernel_lost_context(dev); |
471 | 467 | ||
472 | count = nbox ? nbox : 1; | 468 | count = nbox ? nbox : 1; |
473 | |||
474 | for (i = 0; i < count; i++) { | 469 | for (i = 0; i < count; i++) { |
475 | if (i < nbox) { | 470 | if (i < nbox) { |
476 | int ret = i915_emit_box(dev, cliprects, i, | 471 | ret = i915_emit_box(dev, &cliprects[i], |
477 | batch->DR1, batch->DR4); | 472 | batch->DR1, batch->DR4); |
478 | if (ret) | 473 | if (ret) |
479 | return ret; | 474 | return ret; |
480 | } | 475 | } |
481 | 476 | ||
482 | if (!IS_I830(dev) && !IS_845G(dev)) { | 477 | if (!IS_I830(dev) && !IS_845G(dev)) { |
483 | BEGIN_LP_RING(2); | 478 | ret = BEGIN_LP_RING(2); |
484 | if (IS_I965G(dev)) { | 479 | if (ret) |
480 | return ret; | ||
481 | |||
482 | if (INTEL_INFO(dev)->gen >= 4) { | ||
485 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); | 483 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); |
486 | OUT_RING(batch->start); | 484 | OUT_RING(batch->start); |
487 | } else { | 485 | } else { |
488 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); | 486 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); |
489 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | 487 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); |
490 | } | 488 | } |
491 | ADVANCE_LP_RING(); | ||
492 | } else { | 489 | } else { |
493 | BEGIN_LP_RING(4); | 490 | ret = BEGIN_LP_RING(4); |
491 | if (ret) | ||
492 | return ret; | ||
493 | |||
494 | OUT_RING(MI_BATCH_BUFFER); | 494 | OUT_RING(MI_BATCH_BUFFER); |
495 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | 495 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); |
496 | OUT_RING(batch->start + batch->used - 4); | 496 | OUT_RING(batch->start + batch->used - 4); |
497 | OUT_RING(0); | 497 | OUT_RING(0); |
498 | ADVANCE_LP_RING(); | ||
499 | } | 498 | } |
499 | ADVANCE_LP_RING(); | ||
500 | } | 500 | } |
501 | 501 | ||
502 | 502 | ||
503 | if (IS_G4X(dev) || IS_IRONLAKE(dev)) { | 503 | if (IS_G4X(dev) || IS_GEN5(dev)) { |
504 | BEGIN_LP_RING(2); | 504 | if (BEGIN_LP_RING(2) == 0) { |
505 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); | 505 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); |
506 | OUT_RING(MI_NOOP); | 506 | OUT_RING(MI_NOOP); |
507 | ADVANCE_LP_RING(); | 507 | ADVANCE_LP_RING(); |
508 | } | ||
508 | } | 509 | } |
509 | i915_emit_breadcrumb(dev); | ||
510 | 510 | ||
511 | i915_emit_breadcrumb(dev); | ||
511 | return 0; | 512 | return 0; |
512 | } | 513 | } |
513 | 514 | ||
@@ -516,6 +517,7 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
516 | drm_i915_private_t *dev_priv = dev->dev_private; | 517 | drm_i915_private_t *dev_priv = dev->dev_private; |
517 | struct drm_i915_master_private *master_priv = | 518 | struct drm_i915_master_private *master_priv = |
518 | dev->primary->master->driver_priv; | 519 | dev->primary->master->driver_priv; |
520 | int ret; | ||
519 | 521 | ||
520 | if (!master_priv->sarea_priv) | 522 | if (!master_priv->sarea_priv) |
521 | return -EINVAL; | 523 | return -EINVAL; |
@@ -527,12 +529,13 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
527 | 529 | ||
528 | i915_kernel_lost_context(dev); | 530 | i915_kernel_lost_context(dev); |
529 | 531 | ||
530 | BEGIN_LP_RING(2); | 532 | ret = BEGIN_LP_RING(10); |
533 | if (ret) | ||
534 | return ret; | ||
535 | |||
531 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); | 536 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); |
532 | OUT_RING(0); | 537 | OUT_RING(0); |
533 | ADVANCE_LP_RING(); | ||
534 | 538 | ||
535 | BEGIN_LP_RING(6); | ||
536 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); | 539 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); |
537 | OUT_RING(0); | 540 | OUT_RING(0); |
538 | if (dev_priv->current_page == 0) { | 541 | if (dev_priv->current_page == 0) { |
@@ -543,33 +546,32 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
543 | dev_priv->current_page = 0; | 546 | dev_priv->current_page = 0; |
544 | } | 547 | } |
545 | OUT_RING(0); | 548 | OUT_RING(0); |
546 | ADVANCE_LP_RING(); | ||
547 | 549 | ||
548 | BEGIN_LP_RING(2); | ||
549 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); | 550 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); |
550 | OUT_RING(0); | 551 | OUT_RING(0); |
552 | |||
551 | ADVANCE_LP_RING(); | 553 | ADVANCE_LP_RING(); |
552 | 554 | ||
553 | master_priv->sarea_priv->last_enqueue = dev_priv->counter++; | 555 | master_priv->sarea_priv->last_enqueue = dev_priv->counter++; |
554 | 556 | ||
555 | BEGIN_LP_RING(4); | 557 | if (BEGIN_LP_RING(4) == 0) { |
556 | OUT_RING(MI_STORE_DWORD_INDEX); | 558 | OUT_RING(MI_STORE_DWORD_INDEX); |
557 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 559 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
558 | OUT_RING(dev_priv->counter); | 560 | OUT_RING(dev_priv->counter); |
559 | OUT_RING(0); | 561 | OUT_RING(0); |
560 | ADVANCE_LP_RING(); | 562 | ADVANCE_LP_RING(); |
563 | } | ||
561 | 564 | ||
562 | master_priv->sarea_priv->pf_current_page = dev_priv->current_page; | 565 | master_priv->sarea_priv->pf_current_page = dev_priv->current_page; |
563 | return 0; | 566 | return 0; |
564 | } | 567 | } |
565 | 568 | ||
566 | static int i915_quiescent(struct drm_device * dev) | 569 | static int i915_quiescent(struct drm_device *dev) |
567 | { | 570 | { |
568 | drm_i915_private_t *dev_priv = dev->dev_private; | 571 | struct intel_ring_buffer *ring = LP_RING(dev->dev_private); |
569 | 572 | ||
570 | i915_kernel_lost_context(dev); | 573 | i915_kernel_lost_context(dev); |
571 | return intel_wait_ring_buffer(dev, &dev_priv->render_ring, | 574 | return intel_wait_ring_idle(ring); |
572 | dev_priv->render_ring.size - 8); | ||
573 | } | 575 | } |
574 | 576 | ||
575 | static int i915_flush_ioctl(struct drm_device *dev, void *data, | 577 | static int i915_flush_ioctl(struct drm_device *dev, void *data, |
@@ -765,6 +767,21 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
765 | case I915_PARAM_HAS_BSD: | 767 | case I915_PARAM_HAS_BSD: |
766 | value = HAS_BSD(dev); | 768 | value = HAS_BSD(dev); |
767 | break; | 769 | break; |
770 | case I915_PARAM_HAS_BLT: | ||
771 | value = HAS_BLT(dev); | ||
772 | break; | ||
773 | case I915_PARAM_HAS_RELAXED_FENCING: | ||
774 | value = 1; | ||
775 | break; | ||
776 | case I915_PARAM_HAS_COHERENT_RINGS: | ||
777 | value = 1; | ||
778 | break; | ||
779 | case I915_PARAM_HAS_EXEC_CONSTANTS: | ||
780 | value = INTEL_INFO(dev)->gen >= 4; | ||
781 | break; | ||
782 | case I915_PARAM_HAS_RELAXED_DELTA: | ||
783 | value = 1; | ||
784 | break; | ||
768 | default: | 785 | default: |
769 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 786 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
770 | param->param); | 787 | param->param); |
@@ -820,7 +837,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
820 | { | 837 | { |
821 | drm_i915_private_t *dev_priv = dev->dev_private; | 838 | drm_i915_private_t *dev_priv = dev->dev_private; |
822 | drm_i915_hws_addr_t *hws = data; | 839 | drm_i915_hws_addr_t *hws = data; |
823 | struct intel_ring_buffer *ring = &dev_priv->render_ring; | 840 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
824 | 841 | ||
825 | if (!I915_NEED_GFX_HWS(dev)) | 842 | if (!I915_NEED_GFX_HWS(dev)) |
826 | return -EINVAL; | 843 | return -EINVAL; |
@@ -853,8 +870,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
853 | " G33 hw status page\n"); | 870 | " G33 hw status page\n"); |
854 | return -ENOMEM; | 871 | return -ENOMEM; |
855 | } | 872 | } |
856 | ring->status_page.page_addr = dev_priv->hws_map.handle; | 873 | ring->status_page.page_addr = |
857 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 874 | (void __force __iomem *)dev_priv->hws_map.handle; |
875 | memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); | ||
858 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); | 876 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); |
859 | 877 | ||
860 | DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", | 878 | DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", |
@@ -888,12 +906,12 @@ static int | |||
888 | intel_alloc_mchbar_resource(struct drm_device *dev) | 906 | intel_alloc_mchbar_resource(struct drm_device *dev) |
889 | { | 907 | { |
890 | drm_i915_private_t *dev_priv = dev->dev_private; | 908 | drm_i915_private_t *dev_priv = dev->dev_private; |
891 | int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; | 909 | int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
892 | u32 temp_lo, temp_hi = 0; | 910 | u32 temp_lo, temp_hi = 0; |
893 | u64 mchbar_addr; | 911 | u64 mchbar_addr; |
894 | int ret; | 912 | int ret; |
895 | 913 | ||
896 | if (IS_I965G(dev)) | 914 | if (INTEL_INFO(dev)->gen >= 4) |
897 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); | 915 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); |
898 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); | 916 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); |
899 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; | 917 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; |
@@ -920,7 +938,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev) | |||
920 | return ret; | 938 | return ret; |
921 | } | 939 | } |
922 | 940 | ||
923 | if (IS_I965G(dev)) | 941 | if (INTEL_INFO(dev)->gen >= 4) |
924 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, | 942 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, |
925 | upper_32_bits(dev_priv->mch_res.start)); | 943 | upper_32_bits(dev_priv->mch_res.start)); |
926 | 944 | ||
@@ -934,7 +952,7 @@ static void | |||
934 | intel_setup_mchbar(struct drm_device *dev) | 952 | intel_setup_mchbar(struct drm_device *dev) |
935 | { | 953 | { |
936 | drm_i915_private_t *dev_priv = dev->dev_private; | 954 | drm_i915_private_t *dev_priv = dev->dev_private; |
937 | int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; | 955 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
938 | u32 temp; | 956 | u32 temp; |
939 | bool enabled; | 957 | bool enabled; |
940 | 958 | ||
@@ -971,7 +989,7 @@ static void | |||
971 | intel_teardown_mchbar(struct drm_device *dev) | 989 | intel_teardown_mchbar(struct drm_device *dev) |
972 | { | 990 | { |
973 | drm_i915_private_t *dev_priv = dev->dev_private; | 991 | drm_i915_private_t *dev_priv = dev->dev_private; |
974 | int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; | 992 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
975 | u32 temp; | 993 | u32 temp; |
976 | 994 | ||
977 | if (dev_priv->mchbar_need_disable) { | 995 | if (dev_priv->mchbar_need_disable) { |
@@ -990,174 +1008,6 @@ intel_teardown_mchbar(struct drm_device *dev) | |||
990 | release_resource(&dev_priv->mch_res); | 1008 | release_resource(&dev_priv->mch_res); |
991 | } | 1009 | } |
992 | 1010 | ||
993 | /** | ||
994 | * i915_probe_agp - get AGP bootup configuration | ||
995 | * @pdev: PCI device | ||
996 | * @aperture_size: returns AGP aperture configured size | ||
997 | * @preallocated_size: returns size of BIOS preallocated AGP space | ||
998 | * | ||
999 | * Since Intel integrated graphics are UMA, the BIOS has to set aside | ||
1000 | * some RAM for the framebuffer at early boot. This code figures out | ||
1001 | * how much was set aside so we can use it for our own purposes. | ||
1002 | */ | ||
1003 | static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, | ||
1004 | uint32_t *preallocated_size, | ||
1005 | uint32_t *start) | ||
1006 | { | ||
1007 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1008 | u16 tmp = 0; | ||
1009 | unsigned long overhead; | ||
1010 | unsigned long stolen; | ||
1011 | |||
1012 | /* Get the fb aperture size and "stolen" memory amount. */ | ||
1013 | pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp); | ||
1014 | |||
1015 | *aperture_size = 1024 * 1024; | ||
1016 | *preallocated_size = 1024 * 1024; | ||
1017 | |||
1018 | switch (dev->pdev->device) { | ||
1019 | case PCI_DEVICE_ID_INTEL_82830_CGC: | ||
1020 | case PCI_DEVICE_ID_INTEL_82845G_IG: | ||
1021 | case PCI_DEVICE_ID_INTEL_82855GM_IG: | ||
1022 | case PCI_DEVICE_ID_INTEL_82865_IG: | ||
1023 | if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M) | ||
1024 | *aperture_size *= 64; | ||
1025 | else | ||
1026 | *aperture_size *= 128; | ||
1027 | break; | ||
1028 | default: | ||
1029 | /* 9xx supports large sizes, just look at the length */ | ||
1030 | *aperture_size = pci_resource_len(dev->pdev, 2); | ||
1031 | break; | ||
1032 | } | ||
1033 | |||
1034 | /* | ||
1035 | * Some of the preallocated space is taken by the GTT | ||
1036 | * and popup. GTT is 1K per MB of aperture size, and popup is 4K. | ||
1037 | */ | ||
1038 | if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) | ||
1039 | overhead = 4096; | ||
1040 | else | ||
1041 | overhead = (*aperture_size / 1024) + 4096; | ||
1042 | |||
1043 | if (IS_GEN6(dev)) { | ||
1044 | /* SNB has memory control reg at 0x50.w */ | ||
1045 | pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp); | ||
1046 | |||
1047 | switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) { | ||
1048 | case INTEL_855_GMCH_GMS_DISABLED: | ||
1049 | DRM_ERROR("video memory is disabled\n"); | ||
1050 | return -1; | ||
1051 | case SNB_GMCH_GMS_STOLEN_32M: | ||
1052 | stolen = 32 * 1024 * 1024; | ||
1053 | break; | ||
1054 | case SNB_GMCH_GMS_STOLEN_64M: | ||
1055 | stolen = 64 * 1024 * 1024; | ||
1056 | break; | ||
1057 | case SNB_GMCH_GMS_STOLEN_96M: | ||
1058 | stolen = 96 * 1024 * 1024; | ||
1059 | break; | ||
1060 | case SNB_GMCH_GMS_STOLEN_128M: | ||
1061 | stolen = 128 * 1024 * 1024; | ||
1062 | break; | ||
1063 | case SNB_GMCH_GMS_STOLEN_160M: | ||
1064 | stolen = 160 * 1024 * 1024; | ||
1065 | break; | ||
1066 | case SNB_GMCH_GMS_STOLEN_192M: | ||
1067 | stolen = 192 * 1024 * 1024; | ||
1068 | break; | ||
1069 | case SNB_GMCH_GMS_STOLEN_224M: | ||
1070 | stolen = 224 * 1024 * 1024; | ||
1071 | break; | ||
1072 | case SNB_GMCH_GMS_STOLEN_256M: | ||
1073 | stolen = 256 * 1024 * 1024; | ||
1074 | break; | ||
1075 | case SNB_GMCH_GMS_STOLEN_288M: | ||
1076 | stolen = 288 * 1024 * 1024; | ||
1077 | break; | ||
1078 | case SNB_GMCH_GMS_STOLEN_320M: | ||
1079 | stolen = 320 * 1024 * 1024; | ||
1080 | break; | ||
1081 | case SNB_GMCH_GMS_STOLEN_352M: | ||
1082 | stolen = 352 * 1024 * 1024; | ||
1083 | break; | ||
1084 | case SNB_GMCH_GMS_STOLEN_384M: | ||
1085 | stolen = 384 * 1024 * 1024; | ||
1086 | break; | ||
1087 | case SNB_GMCH_GMS_STOLEN_416M: | ||
1088 | stolen = 416 * 1024 * 1024; | ||
1089 | break; | ||
1090 | case SNB_GMCH_GMS_STOLEN_448M: | ||
1091 | stolen = 448 * 1024 * 1024; | ||
1092 | break; | ||
1093 | case SNB_GMCH_GMS_STOLEN_480M: | ||
1094 | stolen = 480 * 1024 * 1024; | ||
1095 | break; | ||
1096 | case SNB_GMCH_GMS_STOLEN_512M: | ||
1097 | stolen = 512 * 1024 * 1024; | ||
1098 | break; | ||
1099 | default: | ||
1100 | DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", | ||
1101 | tmp & SNB_GMCH_GMS_STOLEN_MASK); | ||
1102 | return -1; | ||
1103 | } | ||
1104 | } else { | ||
1105 | switch (tmp & INTEL_GMCH_GMS_MASK) { | ||
1106 | case INTEL_855_GMCH_GMS_DISABLED: | ||
1107 | DRM_ERROR("video memory is disabled\n"); | ||
1108 | return -1; | ||
1109 | case INTEL_855_GMCH_GMS_STOLEN_1M: | ||
1110 | stolen = 1 * 1024 * 1024; | ||
1111 | break; | ||
1112 | case INTEL_855_GMCH_GMS_STOLEN_4M: | ||
1113 | stolen = 4 * 1024 * 1024; | ||
1114 | break; | ||
1115 | case INTEL_855_GMCH_GMS_STOLEN_8M: | ||
1116 | stolen = 8 * 1024 * 1024; | ||
1117 | break; | ||
1118 | case INTEL_855_GMCH_GMS_STOLEN_16M: | ||
1119 | stolen = 16 * 1024 * 1024; | ||
1120 | break; | ||
1121 | case INTEL_855_GMCH_GMS_STOLEN_32M: | ||
1122 | stolen = 32 * 1024 * 1024; | ||
1123 | break; | ||
1124 | case INTEL_915G_GMCH_GMS_STOLEN_48M: | ||
1125 | stolen = 48 * 1024 * 1024; | ||
1126 | break; | ||
1127 | case INTEL_915G_GMCH_GMS_STOLEN_64M: | ||
1128 | stolen = 64 * 1024 * 1024; | ||
1129 | break; | ||
1130 | case INTEL_GMCH_GMS_STOLEN_128M: | ||
1131 | stolen = 128 * 1024 * 1024; | ||
1132 | break; | ||
1133 | case INTEL_GMCH_GMS_STOLEN_256M: | ||
1134 | stolen = 256 * 1024 * 1024; | ||
1135 | break; | ||
1136 | case INTEL_GMCH_GMS_STOLEN_96M: | ||
1137 | stolen = 96 * 1024 * 1024; | ||
1138 | break; | ||
1139 | case INTEL_GMCH_GMS_STOLEN_160M: | ||
1140 | stolen = 160 * 1024 * 1024; | ||
1141 | break; | ||
1142 | case INTEL_GMCH_GMS_STOLEN_224M: | ||
1143 | stolen = 224 * 1024 * 1024; | ||
1144 | break; | ||
1145 | case INTEL_GMCH_GMS_STOLEN_352M: | ||
1146 | stolen = 352 * 1024 * 1024; | ||
1147 | break; | ||
1148 | default: | ||
1149 | DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", | ||
1150 | tmp & INTEL_GMCH_GMS_MASK); | ||
1151 | return -1; | ||
1152 | } | ||
1153 | } | ||
1154 | |||
1155 | *preallocated_size = stolen - overhead; | ||
1156 | *start = overhead; | ||
1157 | |||
1158 | return 0; | ||
1159 | } | ||
1160 | |||
1161 | #define PTE_ADDRESS_MASK 0xfffff000 | 1011 | #define PTE_ADDRESS_MASK 0xfffff000 |
1162 | #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ | 1012 | #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ |
1163 | #define PTE_MAPPING_TYPE_UNCACHED (0 << 1) | 1013 | #define PTE_MAPPING_TYPE_UNCACHED (0 << 1) |
@@ -1167,75 +1017,47 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, | |||
1167 | #define PTE_VALID (1 << 0) | 1017 | #define PTE_VALID (1 << 0) |
1168 | 1018 | ||
1169 | /** | 1019 | /** |
1170 | * i915_gtt_to_phys - take a GTT address and turn it into a physical one | 1020 | * i915_stolen_to_phys - take an offset into stolen memory and turn it into |
1021 | * a physical one | ||
1171 | * @dev: drm device | 1022 | * @dev: drm device |
1172 | * @gtt_addr: address to translate | 1023 | * @offset: address to translate |
1173 | * | 1024 | * |
1174 | * Some chip functions require allocations from stolen space but need the | 1025 | * Some chip functions require allocations from stolen space and need the |
1175 | * physical address of the memory in question. We use this routine | 1026 | * physical address of the memory in question. |
1176 | * to get a physical address suitable for register programming from a given | ||
1177 | * GTT address. | ||
1178 | */ | 1027 | */ |
1179 | static unsigned long i915_gtt_to_phys(struct drm_device *dev, | 1028 | static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset) |
1180 | unsigned long gtt_addr) | ||
1181 | { | 1029 | { |
1182 | unsigned long *gtt; | 1030 | struct drm_i915_private *dev_priv = dev->dev_private; |
1183 | unsigned long entry, phys; | 1031 | struct pci_dev *pdev = dev_priv->bridge_dev; |
1184 | int gtt_bar = IS_I9XX(dev) ? 0 : 1; | 1032 | u32 base; |
1185 | int gtt_offset, gtt_size; | 1033 | |
1186 | 1034 | #if 0 | |
1187 | if (IS_I965G(dev)) { | 1035 | /* On the machines I have tested the Graphics Base of Stolen Memory |
1188 | if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { | 1036 | * is unreliable, so compute the base by subtracting the stolen memory |
1189 | gtt_offset = 2*1024*1024; | 1037 | * from the Top of Low Usable DRAM which is where the BIOS places |
1190 | gtt_size = 2*1024*1024; | 1038 | * the graphics stolen memory. |
1191 | } else { | 1039 | */ |
1192 | gtt_offset = 512*1024; | 1040 | if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { |
1193 | gtt_size = 512*1024; | 1041 | /* top 32bits are reserved = 0 */ |
1194 | } | 1042 | pci_read_config_dword(pdev, 0xA4, &base); |
1195 | } else { | 1043 | } else { |
1196 | gtt_bar = 3; | 1044 | /* XXX presume 8xx is the same as i915 */ |
1197 | gtt_offset = 0; | 1045 | pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base); |
1198 | gtt_size = pci_resource_len(dev->pdev, gtt_bar); | 1046 | } |
1199 | } | 1047 | #else |
1200 | 1048 | if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { | |
1201 | gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset, | 1049 | u16 val; |
1202 | gtt_size); | 1050 | pci_read_config_word(pdev, 0xb0, &val); |
1203 | if (!gtt) { | 1051 | base = val >> 4 << 20; |
1204 | DRM_ERROR("ioremap of GTT failed\n"); | 1052 | } else { |
1205 | return 0; | 1053 | u8 val; |
1206 | } | 1054 | pci_read_config_byte(pdev, 0x9c, &val); |
1207 | 1055 | base = val >> 3 << 27; | |
1208 | entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); | ||
1209 | |||
1210 | DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); | ||
1211 | |||
1212 | /* Mask out these reserved bits on this hardware. */ | ||
1213 | if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || | ||
1214 | IS_I945G(dev) || IS_I945GM(dev)) { | ||
1215 | entry &= ~PTE_ADDRESS_MASK_HIGH; | ||
1216 | } | ||
1217 | |||
1218 | /* If it's not a mapping type we know, then bail. */ | ||
1219 | if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && | ||
1220 | (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) { | ||
1221 | iounmap(gtt); | ||
1222 | return 0; | ||
1223 | } | ||
1224 | |||
1225 | if (!(entry & PTE_VALID)) { | ||
1226 | DRM_ERROR("bad GTT entry in stolen space\n"); | ||
1227 | iounmap(gtt); | ||
1228 | return 0; | ||
1229 | } | 1056 | } |
1057 | base -= dev_priv->mm.gtt->stolen_size; | ||
1058 | #endif | ||
1230 | 1059 | ||
1231 | iounmap(gtt); | 1060 | return base + offset; |
1232 | |||
1233 | phys =(entry & PTE_ADDRESS_MASK) | | ||
1234 | ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); | ||
1235 | |||
1236 | DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); | ||
1237 | |||
1238 | return phys; | ||
1239 | } | 1061 | } |
1240 | 1062 | ||
1241 | static void i915_warn_stolen(struct drm_device *dev) | 1063 | static void i915_warn_stolen(struct drm_device *dev) |
@@ -1251,54 +1073,35 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1251 | unsigned long cfb_base; | 1073 | unsigned long cfb_base; |
1252 | unsigned long ll_base = 0; | 1074 | unsigned long ll_base = 0; |
1253 | 1075 | ||
1254 | /* Leave 1M for line length buffer & misc. */ | 1076 | compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); |
1255 | compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); | 1077 | if (compressed_fb) |
1256 | if (!compressed_fb) { | 1078 | compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); |
1257 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; | 1079 | if (!compressed_fb) |
1258 | i915_warn_stolen(dev); | 1080 | goto err; |
1259 | return; | ||
1260 | } | ||
1261 | |||
1262 | compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); | ||
1263 | if (!compressed_fb) { | ||
1264 | i915_warn_stolen(dev); | ||
1265 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; | ||
1266 | return; | ||
1267 | } | ||
1268 | |||
1269 | cfb_base = i915_gtt_to_phys(dev, compressed_fb->start); | ||
1270 | if (!cfb_base) { | ||
1271 | DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); | ||
1272 | drm_mm_put_block(compressed_fb); | ||
1273 | } | ||
1274 | 1081 | ||
1275 | if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { | 1082 | cfb_base = i915_stolen_to_phys(dev, compressed_fb->start); |
1276 | compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, | 1083 | if (!cfb_base) |
1277 | 4096, 0); | 1084 | goto err_fb; |
1278 | if (!compressed_llb) { | ||
1279 | i915_warn_stolen(dev); | ||
1280 | return; | ||
1281 | } | ||
1282 | 1085 | ||
1283 | compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096); | 1086 | if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) { |
1284 | if (!compressed_llb) { | 1087 | compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, |
1285 | i915_warn_stolen(dev); | 1088 | 4096, 4096, 0); |
1286 | return; | 1089 | if (compressed_llb) |
1287 | } | 1090 | compressed_llb = drm_mm_get_block(compressed_llb, |
1091 | 4096, 4096); | ||
1092 | if (!compressed_llb) | ||
1093 | goto err_fb; | ||
1288 | 1094 | ||
1289 | ll_base = i915_gtt_to_phys(dev, compressed_llb->start); | 1095 | ll_base = i915_stolen_to_phys(dev, compressed_llb->start); |
1290 | if (!ll_base) { | 1096 | if (!ll_base) |
1291 | DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); | 1097 | goto err_llb; |
1292 | drm_mm_put_block(compressed_fb); | ||
1293 | drm_mm_put_block(compressed_llb); | ||
1294 | } | ||
1295 | } | 1098 | } |
1296 | 1099 | ||
1297 | dev_priv->cfb_size = size; | 1100 | dev_priv->cfb_size = size; |
1298 | 1101 | ||
1299 | intel_disable_fbc(dev); | 1102 | intel_disable_fbc(dev); |
1300 | dev_priv->compressed_fb = compressed_fb; | 1103 | dev_priv->compressed_fb = compressed_fb; |
1301 | if (IS_IRONLAKE_M(dev)) | 1104 | if (HAS_PCH_SPLIT(dev)) |
1302 | I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); | 1105 | I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); |
1303 | else if (IS_GM45(dev)) { | 1106 | else if (IS_GM45(dev)) { |
1304 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); | 1107 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); |
@@ -1308,8 +1111,17 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1308 | dev_priv->compressed_llb = compressed_llb; | 1111 | dev_priv->compressed_llb = compressed_llb; |
1309 | } | 1112 | } |
1310 | 1113 | ||
1311 | DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, | 1114 | DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", |
1312 | ll_base, size >> 20); | 1115 | cfb_base, ll_base, size >> 20); |
1116 | return; | ||
1117 | |||
1118 | err_llb: | ||
1119 | drm_mm_put_block(compressed_llb); | ||
1120 | err_fb: | ||
1121 | drm_mm_put_block(compressed_fb); | ||
1122 | err: | ||
1123 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; | ||
1124 | i915_warn_stolen(dev); | ||
1313 | } | 1125 | } |
1314 | 1126 | ||
1315 | static void i915_cleanup_compression(struct drm_device *dev) | 1127 | static void i915_cleanup_compression(struct drm_device *dev) |
@@ -1340,14 +1152,16 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ | |||
1340 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | 1152 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; |
1341 | if (state == VGA_SWITCHEROO_ON) { | 1153 | if (state == VGA_SWITCHEROO_ON) { |
1342 | printk(KERN_INFO "i915: switched on\n"); | 1154 | printk(KERN_INFO "i915: switched on\n"); |
1155 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | ||
1343 | /* i915 resume handler doesn't set to D0 */ | 1156 | /* i915 resume handler doesn't set to D0 */ |
1344 | pci_set_power_state(dev->pdev, PCI_D0); | 1157 | pci_set_power_state(dev->pdev, PCI_D0); |
1345 | i915_resume(dev); | 1158 | i915_resume(dev); |
1346 | drm_kms_helper_poll_enable(dev); | 1159 | dev->switch_power_state = DRM_SWITCH_POWER_ON; |
1347 | } else { | 1160 | } else { |
1348 | printk(KERN_ERR "i915: switched off\n"); | 1161 | printk(KERN_ERR "i915: switched off\n"); |
1349 | drm_kms_helper_poll_disable(dev); | 1162 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
1350 | i915_suspend(dev, pmm); | 1163 | i915_suspend(dev, pmm); |
1164 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; | ||
1351 | } | 1165 | } |
1352 | } | 1166 | } |
1353 | 1167 | ||
@@ -1362,26 +1176,20 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev) | |||
1362 | return can_switch; | 1176 | return can_switch; |
1363 | } | 1177 | } |
1364 | 1178 | ||
1365 | static int i915_load_modeset_init(struct drm_device *dev, | 1179 | static int i915_load_gem_init(struct drm_device *dev) |
1366 | unsigned long prealloc_start, | ||
1367 | unsigned long prealloc_size, | ||
1368 | unsigned long agp_size) | ||
1369 | { | 1180 | { |
1370 | struct drm_i915_private *dev_priv = dev->dev_private; | 1181 | struct drm_i915_private *dev_priv = dev->dev_private; |
1371 | int fb_bar = IS_I9XX(dev) ? 2 : 0; | 1182 | unsigned long prealloc_size, gtt_size, mappable_size; |
1372 | int ret = 0; | 1183 | int ret; |
1373 | |||
1374 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) & | ||
1375 | 0xff000000; | ||
1376 | 1184 | ||
1377 | /* Basic memrange allocator for stolen space (aka vram) */ | 1185 | prealloc_size = dev_priv->mm.gtt->stolen_size; |
1378 | drm_mm_init(&dev_priv->vram, 0, prealloc_size); | 1186 | gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; |
1379 | DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); | 1187 | mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
1380 | 1188 | ||
1381 | /* We're off and running w/KMS */ | 1189 | /* Basic memrange allocator for stolen space */ |
1382 | dev_priv->mm.suspended = 0; | 1190 | drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); |
1383 | 1191 | ||
1384 | /* Let GEM Manage from end of prealloc space to end of aperture. | 1192 | /* Let GEM Manage all of the aperture. |
1385 | * | 1193 | * |
1386 | * However, leave one page at the end still bound to the scratch page. | 1194 | * However, leave one page at the end still bound to the scratch page. |
1387 | * There are a number of places where the hardware apparently | 1195 | * There are a number of places where the hardware apparently |
@@ -1390,41 +1198,58 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1390 | * at the last page of the aperture. One page should be enough to | 1198 | * at the last page of the aperture. One page should be enough to |
1391 | * keep any prefetching inside of the aperture. | 1199 | * keep any prefetching inside of the aperture. |
1392 | */ | 1200 | */ |
1393 | i915_gem_do_init(dev, prealloc_size, agp_size - 4096); | 1201 | i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE); |
1394 | 1202 | ||
1395 | mutex_lock(&dev->struct_mutex); | 1203 | mutex_lock(&dev->struct_mutex); |
1396 | ret = i915_gem_init_ringbuffer(dev); | 1204 | ret = i915_gem_init_ringbuffer(dev); |
1397 | mutex_unlock(&dev->struct_mutex); | 1205 | mutex_unlock(&dev->struct_mutex); |
1398 | if (ret) | 1206 | if (ret) |
1399 | goto out; | 1207 | return ret; |
1400 | 1208 | ||
1401 | /* Try to set up FBC with a reasonable compressed buffer size */ | 1209 | /* Try to set up FBC with a reasonable compressed buffer size */ |
1402 | if (I915_HAS_FBC(dev) && i915_powersave) { | 1210 | if (I915_HAS_FBC(dev) && i915_powersave) { |
1403 | int cfb_size; | 1211 | int cfb_size; |
1404 | 1212 | ||
1405 | /* Try to get an 8M buffer... */ | 1213 | /* Leave 1M for line length buffer & misc. */ |
1406 | if (prealloc_size > (9*1024*1024)) | 1214 | |
1407 | cfb_size = 8*1024*1024; | 1215 | /* Try to get a 32M buffer... */ |
1216 | if (prealloc_size > (36*1024*1024)) | ||
1217 | cfb_size = 32*1024*1024; | ||
1408 | else /* fall back to 7/8 of the stolen space */ | 1218 | else /* fall back to 7/8 of the stolen space */ |
1409 | cfb_size = prealloc_size * 7 / 8; | 1219 | cfb_size = prealloc_size * 7 / 8; |
1410 | i915_setup_compression(dev, cfb_size); | 1220 | i915_setup_compression(dev, cfb_size); |
1411 | } | 1221 | } |
1412 | 1222 | ||
1413 | /* Allow hardware batchbuffers unless told otherwise. | 1223 | /* Allow hardware batchbuffers unless told otherwise. */ |
1414 | */ | ||
1415 | dev_priv->allow_batchbuffer = 1; | 1224 | dev_priv->allow_batchbuffer = 1; |
1225 | return 0; | ||
1226 | } | ||
1227 | |||
1228 | static int i915_load_modeset_init(struct drm_device *dev) | ||
1229 | { | ||
1230 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1231 | int ret; | ||
1416 | 1232 | ||
1417 | ret = intel_init_bios(dev); | 1233 | ret = intel_parse_bios(dev); |
1418 | if (ret) | 1234 | if (ret) |
1419 | DRM_INFO("failed to find VBIOS tables\n"); | 1235 | DRM_INFO("failed to find VBIOS tables\n"); |
1420 | 1236 | ||
1421 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ | 1237 | /* If we have > 1 VGA cards, then we need to arbitrate access |
1238 | * to the common VGA resources. | ||
1239 | * | ||
1240 | * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), | ||
1241 | * then we do not take part in VGA arbitration and the | ||
1242 | * vga_client_register() fails with -ENODEV. | ||
1243 | */ | ||
1422 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); | 1244 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); |
1423 | if (ret) | 1245 | if (ret && ret != -ENODEV) |
1424 | goto cleanup_ringbuffer; | 1246 | goto out; |
1247 | |||
1248 | intel_register_dsm_handler(); | ||
1425 | 1249 | ||
1426 | ret = vga_switcheroo_register_client(dev->pdev, | 1250 | ret = vga_switcheroo_register_client(dev->pdev, |
1427 | i915_switcheroo_set_state, | 1251 | i915_switcheroo_set_state, |
1252 | NULL, | ||
1428 | i915_switcheroo_can_switch); | 1253 | i915_switcheroo_can_switch); |
1429 | if (ret) | 1254 | if (ret) |
1430 | goto cleanup_vga_client; | 1255 | goto cleanup_vga_client; |
@@ -1435,37 +1260,41 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1435 | 1260 | ||
1436 | intel_modeset_init(dev); | 1261 | intel_modeset_init(dev); |
1437 | 1262 | ||
1438 | ret = drm_irq_install(dev); | 1263 | ret = i915_load_gem_init(dev); |
1439 | if (ret) | 1264 | if (ret) |
1440 | goto cleanup_vga_switcheroo; | 1265 | goto cleanup_vga_switcheroo; |
1441 | 1266 | ||
1267 | intel_modeset_gem_init(dev); | ||
1268 | |||
1269 | ret = drm_irq_install(dev); | ||
1270 | if (ret) | ||
1271 | goto cleanup_gem; | ||
1272 | |||
1442 | /* Always safe in the mode setting case. */ | 1273 | /* Always safe in the mode setting case. */ |
1443 | /* FIXME: do pre/post-mode set stuff in core KMS code */ | 1274 | /* FIXME: do pre/post-mode set stuff in core KMS code */ |
1444 | dev->vblank_disable_allowed = 1; | 1275 | dev->vblank_disable_allowed = 1; |
1445 | 1276 | ||
1446 | /* | ||
1447 | * Initialize the hardware status page IRQ location. | ||
1448 | */ | ||
1449 | |||
1450 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); | ||
1451 | |||
1452 | ret = intel_fbdev_init(dev); | 1277 | ret = intel_fbdev_init(dev); |
1453 | if (ret) | 1278 | if (ret) |
1454 | goto cleanup_irq; | 1279 | goto cleanup_irq; |
1455 | 1280 | ||
1456 | drm_kms_helper_poll_init(dev); | 1281 | drm_kms_helper_poll_init(dev); |
1282 | |||
1283 | /* We're off and running w/KMS */ | ||
1284 | dev_priv->mm.suspended = 0; | ||
1285 | |||
1457 | return 0; | 1286 | return 0; |
1458 | 1287 | ||
1459 | cleanup_irq: | 1288 | cleanup_irq: |
1460 | drm_irq_uninstall(dev); | 1289 | drm_irq_uninstall(dev); |
1290 | cleanup_gem: | ||
1291 | mutex_lock(&dev->struct_mutex); | ||
1292 | i915_gem_cleanup_ringbuffer(dev); | ||
1293 | mutex_unlock(&dev->struct_mutex); | ||
1461 | cleanup_vga_switcheroo: | 1294 | cleanup_vga_switcheroo: |
1462 | vga_switcheroo_unregister_client(dev->pdev); | 1295 | vga_switcheroo_unregister_client(dev->pdev); |
1463 | cleanup_vga_client: | 1296 | cleanup_vga_client: |
1464 | vga_client_register(dev->pdev, NULL, NULL, NULL); | 1297 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
1465 | cleanup_ringbuffer: | ||
1466 | mutex_lock(&dev->struct_mutex); | ||
1467 | i915_gem_cleanup_ringbuffer(dev); | ||
1468 | mutex_unlock(&dev->struct_mutex); | ||
1469 | out: | 1298 | out: |
1470 | return ret; | 1299 | return ret; |
1471 | } | 1300 | } |
@@ -1601,152 +1430,12 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev) | |||
1601 | } | 1430 | } |
1602 | } | 1431 | } |
1603 | 1432 | ||
1604 | struct v_table { | 1433 | static const struct cparams { |
1605 | u8 vid; | 1434 | u16 i; |
1606 | unsigned long vd; /* in .1 mil */ | 1435 | u16 t; |
1607 | unsigned long vm; /* in .1 mil */ | 1436 | u16 m; |
1608 | u8 pvid; | 1437 | u16 c; |
1609 | }; | 1438 | } cparams[] = { |
1610 | |||
1611 | static struct v_table v_table[] = { | ||
1612 | { 0, 16125, 15000, 0x7f, }, | ||
1613 | { 1, 16000, 14875, 0x7e, }, | ||
1614 | { 2, 15875, 14750, 0x7d, }, | ||
1615 | { 3, 15750, 14625, 0x7c, }, | ||
1616 | { 4, 15625, 14500, 0x7b, }, | ||
1617 | { 5, 15500, 14375, 0x7a, }, | ||
1618 | { 6, 15375, 14250, 0x79, }, | ||
1619 | { 7, 15250, 14125, 0x78, }, | ||
1620 | { 8, 15125, 14000, 0x77, }, | ||
1621 | { 9, 15000, 13875, 0x76, }, | ||
1622 | { 10, 14875, 13750, 0x75, }, | ||
1623 | { 11, 14750, 13625, 0x74, }, | ||
1624 | { 12, 14625, 13500, 0x73, }, | ||
1625 | { 13, 14500, 13375, 0x72, }, | ||
1626 | { 14, 14375, 13250, 0x71, }, | ||
1627 | { 15, 14250, 13125, 0x70, }, | ||
1628 | { 16, 14125, 13000, 0x6f, }, | ||
1629 | { 17, 14000, 12875, 0x6e, }, | ||
1630 | { 18, 13875, 12750, 0x6d, }, | ||
1631 | { 19, 13750, 12625, 0x6c, }, | ||
1632 | { 20, 13625, 12500, 0x6b, }, | ||
1633 | { 21, 13500, 12375, 0x6a, }, | ||
1634 | { 22, 13375, 12250, 0x69, }, | ||
1635 | { 23, 13250, 12125, 0x68, }, | ||
1636 | { 24, 13125, 12000, 0x67, }, | ||
1637 | { 25, 13000, 11875, 0x66, }, | ||
1638 | { 26, 12875, 11750, 0x65, }, | ||
1639 | { 27, 12750, 11625, 0x64, }, | ||
1640 | { 28, 12625, 11500, 0x63, }, | ||
1641 | { 29, 12500, 11375, 0x62, }, | ||
1642 | { 30, 12375, 11250, 0x61, }, | ||
1643 | { 31, 12250, 11125, 0x60, }, | ||
1644 | { 32, 12125, 11000, 0x5f, }, | ||
1645 | { 33, 12000, 10875, 0x5e, }, | ||
1646 | { 34, 11875, 10750, 0x5d, }, | ||
1647 | { 35, 11750, 10625, 0x5c, }, | ||
1648 | { 36, 11625, 10500, 0x5b, }, | ||
1649 | { 37, 11500, 10375, 0x5a, }, | ||
1650 | { 38, 11375, 10250, 0x59, }, | ||
1651 | { 39, 11250, 10125, 0x58, }, | ||
1652 | { 40, 11125, 10000, 0x57, }, | ||
1653 | { 41, 11000, 9875, 0x56, }, | ||
1654 | { 42, 10875, 9750, 0x55, }, | ||
1655 | { 43, 10750, 9625, 0x54, }, | ||
1656 | { 44, 10625, 9500, 0x53, }, | ||
1657 | { 45, 10500, 9375, 0x52, }, | ||
1658 | { 46, 10375, 9250, 0x51, }, | ||
1659 | { 47, 10250, 9125, 0x50, }, | ||
1660 | { 48, 10125, 9000, 0x4f, }, | ||
1661 | { 49, 10000, 8875, 0x4e, }, | ||
1662 | { 50, 9875, 8750, 0x4d, }, | ||
1663 | { 51, 9750, 8625, 0x4c, }, | ||
1664 | { 52, 9625, 8500, 0x4b, }, | ||
1665 | { 53, 9500, 8375, 0x4a, }, | ||
1666 | { 54, 9375, 8250, 0x49, }, | ||
1667 | { 55, 9250, 8125, 0x48, }, | ||
1668 | { 56, 9125, 8000, 0x47, }, | ||
1669 | { 57, 9000, 7875, 0x46, }, | ||
1670 | { 58, 8875, 7750, 0x45, }, | ||
1671 | { 59, 8750, 7625, 0x44, }, | ||
1672 | { 60, 8625, 7500, 0x43, }, | ||
1673 | { 61, 8500, 7375, 0x42, }, | ||
1674 | { 62, 8375, 7250, 0x41, }, | ||
1675 | { 63, 8250, 7125, 0x40, }, | ||
1676 | { 64, 8125, 7000, 0x3f, }, | ||
1677 | { 65, 8000, 6875, 0x3e, }, | ||
1678 | { 66, 7875, 6750, 0x3d, }, | ||
1679 | { 67, 7750, 6625, 0x3c, }, | ||
1680 | { 68, 7625, 6500, 0x3b, }, | ||
1681 | { 69, 7500, 6375, 0x3a, }, | ||
1682 | { 70, 7375, 6250, 0x39, }, | ||
1683 | { 71, 7250, 6125, 0x38, }, | ||
1684 | { 72, 7125, 6000, 0x37, }, | ||
1685 | { 73, 7000, 5875, 0x36, }, | ||
1686 | { 74, 6875, 5750, 0x35, }, | ||
1687 | { 75, 6750, 5625, 0x34, }, | ||
1688 | { 76, 6625, 5500, 0x33, }, | ||
1689 | { 77, 6500, 5375, 0x32, }, | ||
1690 | { 78, 6375, 5250, 0x31, }, | ||
1691 | { 79, 6250, 5125, 0x30, }, | ||
1692 | { 80, 6125, 5000, 0x2f, }, | ||
1693 | { 81, 6000, 4875, 0x2e, }, | ||
1694 | { 82, 5875, 4750, 0x2d, }, | ||
1695 | { 83, 5750, 4625, 0x2c, }, | ||
1696 | { 84, 5625, 4500, 0x2b, }, | ||
1697 | { 85, 5500, 4375, 0x2a, }, | ||
1698 | { 86, 5375, 4250, 0x29, }, | ||
1699 | { 87, 5250, 4125, 0x28, }, | ||
1700 | { 88, 5125, 4000, 0x27, }, | ||
1701 | { 89, 5000, 3875, 0x26, }, | ||
1702 | { 90, 4875, 3750, 0x25, }, | ||
1703 | { 91, 4750, 3625, 0x24, }, | ||
1704 | { 92, 4625, 3500, 0x23, }, | ||
1705 | { 93, 4500, 3375, 0x22, }, | ||
1706 | { 94, 4375, 3250, 0x21, }, | ||
1707 | { 95, 4250, 3125, 0x20, }, | ||
1708 | { 96, 4125, 3000, 0x1f, }, | ||
1709 | { 97, 4125, 3000, 0x1e, }, | ||
1710 | { 98, 4125, 3000, 0x1d, }, | ||
1711 | { 99, 4125, 3000, 0x1c, }, | ||
1712 | { 100, 4125, 3000, 0x1b, }, | ||
1713 | { 101, 4125, 3000, 0x1a, }, | ||
1714 | { 102, 4125, 3000, 0x19, }, | ||
1715 | { 103, 4125, 3000, 0x18, }, | ||
1716 | { 104, 4125, 3000, 0x17, }, | ||
1717 | { 105, 4125, 3000, 0x16, }, | ||
1718 | { 106, 4125, 3000, 0x15, }, | ||
1719 | { 107, 4125, 3000, 0x14, }, | ||
1720 | { 108, 4125, 3000, 0x13, }, | ||
1721 | { 109, 4125, 3000, 0x12, }, | ||
1722 | { 110, 4125, 3000, 0x11, }, | ||
1723 | { 111, 4125, 3000, 0x10, }, | ||
1724 | { 112, 4125, 3000, 0x0f, }, | ||
1725 | { 113, 4125, 3000, 0x0e, }, | ||
1726 | { 114, 4125, 3000, 0x0d, }, | ||
1727 | { 115, 4125, 3000, 0x0c, }, | ||
1728 | { 116, 4125, 3000, 0x0b, }, | ||
1729 | { 117, 4125, 3000, 0x0a, }, | ||
1730 | { 118, 4125, 3000, 0x09, }, | ||
1731 | { 119, 4125, 3000, 0x08, }, | ||
1732 | { 120, 1125, 0, 0x07, }, | ||
1733 | { 121, 1000, 0, 0x06, }, | ||
1734 | { 122, 875, 0, 0x05, }, | ||
1735 | { 123, 750, 0, 0x04, }, | ||
1736 | { 124, 625, 0, 0x03, }, | ||
1737 | { 125, 500, 0, 0x02, }, | ||
1738 | { 126, 375, 0, 0x01, }, | ||
1739 | { 127, 0, 0, 0x00, }, | ||
1740 | }; | ||
1741 | |||
1742 | struct cparams { | ||
1743 | int i; | ||
1744 | int t; | ||
1745 | int m; | ||
1746 | int c; | ||
1747 | }; | ||
1748 | |||
1749 | static struct cparams cparams[] = { | ||
1750 | { 1, 1333, 301, 28664 }, | 1439 | { 1, 1333, 301, 28664 }, |
1751 | { 1, 1066, 294, 24460 }, | 1440 | { 1, 1066, 294, 24460 }, |
1752 | { 1, 800, 294, 25192 }, | 1441 | { 1, 800, 294, 25192 }, |
@@ -1812,21 +1501,145 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv) | |||
1812 | return ((m * x) / 127) - b; | 1501 | return ((m * x) / 127) - b; |
1813 | } | 1502 | } |
1814 | 1503 | ||
1815 | static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) | 1504 | static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) |
1816 | { | 1505 | { |
1817 | unsigned long val = 0; | 1506 | static const struct v_table { |
1818 | int i; | 1507 | u16 vd; /* in .1 mil */ |
1819 | 1508 | u16 vm; /* in .1 mil */ | |
1820 | for (i = 0; i < ARRAY_SIZE(v_table); i++) { | 1509 | } v_table[] = { |
1821 | if (v_table[i].pvid == pxvid) { | 1510 | { 0, 0, }, |
1822 | if (IS_MOBILE(dev_priv->dev)) | 1511 | { 375, 0, }, |
1823 | val = v_table[i].vm; | 1512 | { 500, 0, }, |
1824 | else | 1513 | { 625, 0, }, |
1825 | val = v_table[i].vd; | 1514 | { 750, 0, }, |
1826 | } | 1515 | { 875, 0, }, |
1827 | } | 1516 | { 1000, 0, }, |
1828 | 1517 | { 1125, 0, }, | |
1829 | return val; | 1518 | { 4125, 3000, }, |
1519 | { 4125, 3000, }, | ||
1520 | { 4125, 3000, }, | ||
1521 | { 4125, 3000, }, | ||
1522 | { 4125, 3000, }, | ||
1523 | { 4125, 3000, }, | ||
1524 | { 4125, 3000, }, | ||
1525 | { 4125, 3000, }, | ||
1526 | { 4125, 3000, }, | ||
1527 | { 4125, 3000, }, | ||
1528 | { 4125, 3000, }, | ||
1529 | { 4125, 3000, }, | ||
1530 | { 4125, 3000, }, | ||
1531 | { 4125, 3000, }, | ||
1532 | { 4125, 3000, }, | ||
1533 | { 4125, 3000, }, | ||
1534 | { 4125, 3000, }, | ||
1535 | { 4125, 3000, }, | ||
1536 | { 4125, 3000, }, | ||
1537 | { 4125, 3000, }, | ||
1538 | { 4125, 3000, }, | ||
1539 | { 4125, 3000, }, | ||
1540 | { 4125, 3000, }, | ||
1541 | { 4125, 3000, }, | ||
1542 | { 4250, 3125, }, | ||
1543 | { 4375, 3250, }, | ||
1544 | { 4500, 3375, }, | ||
1545 | { 4625, 3500, }, | ||
1546 | { 4750, 3625, }, | ||
1547 | { 4875, 3750, }, | ||
1548 | { 5000, 3875, }, | ||
1549 | { 5125, 4000, }, | ||
1550 | { 5250, 4125, }, | ||
1551 | { 5375, 4250, }, | ||
1552 | { 5500, 4375, }, | ||
1553 | { 5625, 4500, }, | ||
1554 | { 5750, 4625, }, | ||
1555 | { 5875, 4750, }, | ||
1556 | { 6000, 4875, }, | ||
1557 | { 6125, 5000, }, | ||
1558 | { 6250, 5125, }, | ||
1559 | { 6375, 5250, }, | ||
1560 | { 6500, 5375, }, | ||
1561 | { 6625, 5500, }, | ||
1562 | { 6750, 5625, }, | ||
1563 | { 6875, 5750, }, | ||
1564 | { 7000, 5875, }, | ||
1565 | { 7125, 6000, }, | ||
1566 | { 7250, 6125, }, | ||
1567 | { 7375, 6250, }, | ||
1568 | { 7500, 6375, }, | ||
1569 | { 7625, 6500, }, | ||
1570 | { 7750, 6625, }, | ||
1571 | { 7875, 6750, }, | ||
1572 | { 8000, 6875, }, | ||
1573 | { 8125, 7000, }, | ||
1574 | { 8250, 7125, }, | ||
1575 | { 8375, 7250, }, | ||
1576 | { 8500, 7375, }, | ||
1577 | { 8625, 7500, }, | ||
1578 | { 8750, 7625, }, | ||
1579 | { 8875, 7750, }, | ||
1580 | { 9000, 7875, }, | ||
1581 | { 9125, 8000, }, | ||
1582 | { 9250, 8125, }, | ||
1583 | { 9375, 8250, }, | ||
1584 | { 9500, 8375, }, | ||
1585 | { 9625, 8500, }, | ||
1586 | { 9750, 8625, }, | ||
1587 | { 9875, 8750, }, | ||
1588 | { 10000, 8875, }, | ||
1589 | { 10125, 9000, }, | ||
1590 | { 10250, 9125, }, | ||
1591 | { 10375, 9250, }, | ||
1592 | { 10500, 9375, }, | ||
1593 | { 10625, 9500, }, | ||
1594 | { 10750, 9625, }, | ||
1595 | { 10875, 9750, }, | ||
1596 | { 11000, 9875, }, | ||
1597 | { 11125, 10000, }, | ||
1598 | { 11250, 10125, }, | ||
1599 | { 11375, 10250, }, | ||
1600 | { 11500, 10375, }, | ||
1601 | { 11625, 10500, }, | ||
1602 | { 11750, 10625, }, | ||
1603 | { 11875, 10750, }, | ||
1604 | { 12000, 10875, }, | ||
1605 | { 12125, 11000, }, | ||
1606 | { 12250, 11125, }, | ||
1607 | { 12375, 11250, }, | ||
1608 | { 12500, 11375, }, | ||
1609 | { 12625, 11500, }, | ||
1610 | { 12750, 11625, }, | ||
1611 | { 12875, 11750, }, | ||
1612 | { 13000, 11875, }, | ||
1613 | { 13125, 12000, }, | ||
1614 | { 13250, 12125, }, | ||
1615 | { 13375, 12250, }, | ||
1616 | { 13500, 12375, }, | ||
1617 | { 13625, 12500, }, | ||
1618 | { 13750, 12625, }, | ||
1619 | { 13875, 12750, }, | ||
1620 | { 14000, 12875, }, | ||
1621 | { 14125, 13000, }, | ||
1622 | { 14250, 13125, }, | ||
1623 | { 14375, 13250, }, | ||
1624 | { 14500, 13375, }, | ||
1625 | { 14625, 13500, }, | ||
1626 | { 14750, 13625, }, | ||
1627 | { 14875, 13750, }, | ||
1628 | { 15000, 13875, }, | ||
1629 | { 15125, 14000, }, | ||
1630 | { 15250, 14125, }, | ||
1631 | { 15375, 14250, }, | ||
1632 | { 15500, 14375, }, | ||
1633 | { 15625, 14500, }, | ||
1634 | { 15750, 14625, }, | ||
1635 | { 15875, 14750, }, | ||
1636 | { 16000, 14875, }, | ||
1637 | { 16125, 15000, }, | ||
1638 | }; | ||
1639 | if (dev_priv->info->is_mobile) | ||
1640 | return v_table[pxvid].vm; | ||
1641 | else | ||
1642 | return v_table[pxvid].vd; | ||
1830 | } | 1643 | } |
1831 | 1644 | ||
1832 | void i915_update_gfx_val(struct drm_i915_private *dev_priv) | 1645 | void i915_update_gfx_val(struct drm_i915_private *dev_priv) |
@@ -1907,7 +1720,7 @@ static struct drm_i915_private *i915_mch_dev; | |||
1907 | * - dev_priv->fmax | 1720 | * - dev_priv->fmax |
1908 | * - dev_priv->gpu_busy | 1721 | * - dev_priv->gpu_busy |
1909 | */ | 1722 | */ |
1910 | DEFINE_SPINLOCK(mchdev_lock); | 1723 | static DEFINE_SPINLOCK(mchdev_lock); |
1911 | 1724 | ||
1912 | /** | 1725 | /** |
1913 | * i915_read_mch_val - return value for IPS use | 1726 | * i915_read_mch_val - return value for IPS use |
@@ -2047,6 +1860,26 @@ out_unlock: | |||
2047 | EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); | 1860 | EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); |
2048 | 1861 | ||
2049 | /** | 1862 | /** |
1863 | * Tells the intel_ips driver that the i915 driver is now loaded, if | ||
1864 | * IPS got loaded first. | ||
1865 | * | ||
1866 | * This awkward dance is so that neither module has to depend on the | ||
1867 | * other in order for IPS to do the appropriate communication of | ||
1868 | * GPU turbo limits to i915. | ||
1869 | */ | ||
1870 | static void | ||
1871 | ips_ping_for_i915_load(void) | ||
1872 | { | ||
1873 | void (*link)(void); | ||
1874 | |||
1875 | link = symbol_get(ips_link_to_i915_driver); | ||
1876 | if (link) { | ||
1877 | link(); | ||
1878 | symbol_put(ips_link_to_i915_driver); | ||
1879 | } | ||
1880 | } | ||
1881 | |||
1882 | /** | ||
2050 | * i915_driver_load - setup chip and create an initial config | 1883 | * i915_driver_load - setup chip and create an initial config |
2051 | * @dev: DRM device | 1884 | * @dev: DRM device |
2052 | * @flags: startup flags | 1885 | * @flags: startup flags |
@@ -2060,9 +1893,9 @@ EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); | |||
2060 | int i915_driver_load(struct drm_device *dev, unsigned long flags) | 1893 | int i915_driver_load(struct drm_device *dev, unsigned long flags) |
2061 | { | 1894 | { |
2062 | struct drm_i915_private *dev_priv; | 1895 | struct drm_i915_private *dev_priv; |
2063 | resource_size_t base, size; | ||
2064 | int ret = 0, mmio_bar; | 1896 | int ret = 0, mmio_bar; |
2065 | uint32_t agp_size, prealloc_size, prealloc_start; | 1897 | uint32_t agp_size; |
1898 | |||
2066 | /* i915 has 4 more counters */ | 1899 | /* i915 has 4 more counters */ |
2067 | dev->counters += 4; | 1900 | dev->counters += 4; |
2068 | dev->types[6] = _DRM_STAT_IRQ; | 1901 | dev->types[6] = _DRM_STAT_IRQ; |
@@ -2078,11 +1911,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2078 | dev_priv->dev = dev; | 1911 | dev_priv->dev = dev; |
2079 | dev_priv->info = (struct intel_device_info *) flags; | 1912 | dev_priv->info = (struct intel_device_info *) flags; |
2080 | 1913 | ||
2081 | /* Add register map (needed for suspend/resume) */ | ||
2082 | mmio_bar = IS_I9XX(dev) ? 0 : 1; | ||
2083 | base = pci_resource_start(dev->pdev, mmio_bar); | ||
2084 | size = pci_resource_len(dev->pdev, mmio_bar); | ||
2085 | |||
2086 | if (i915_get_bridge_dev(dev)) { | 1914 | if (i915_get_bridge_dev(dev)) { |
2087 | ret = -EIO; | 1915 | ret = -EIO; |
2088 | goto free_priv; | 1916 | goto free_priv; |
@@ -2092,16 +1920,36 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2092 | if (IS_GEN2(dev)) | 1920 | if (IS_GEN2(dev)) |
2093 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); | 1921 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); |
2094 | 1922 | ||
2095 | dev_priv->regs = ioremap(base, size); | 1923 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) |
1924 | * using 32bit addressing, overwriting memory if HWS is located | ||
1925 | * above 4GB. | ||
1926 | * | ||
1927 | * The documentation also mentions an issue with undefined | ||
1928 | * behaviour if any general state is accessed within a page above 4GB, | ||
1929 | * which also needs to be handled carefully. | ||
1930 | */ | ||
1931 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | ||
1932 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); | ||
1933 | |||
1934 | mmio_bar = IS_GEN2(dev) ? 1 : 0; | ||
1935 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); | ||
2096 | if (!dev_priv->regs) { | 1936 | if (!dev_priv->regs) { |
2097 | DRM_ERROR("failed to map registers\n"); | 1937 | DRM_ERROR("failed to map registers\n"); |
2098 | ret = -EIO; | 1938 | ret = -EIO; |
2099 | goto put_bridge; | 1939 | goto put_bridge; |
2100 | } | 1940 | } |
2101 | 1941 | ||
1942 | dev_priv->mm.gtt = intel_gtt_get(); | ||
1943 | if (!dev_priv->mm.gtt) { | ||
1944 | DRM_ERROR("Failed to initialize GTT\n"); | ||
1945 | ret = -ENODEV; | ||
1946 | goto out_rmmap; | ||
1947 | } | ||
1948 | |||
1949 | agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | ||
1950 | |||
2102 | dev_priv->mm.gtt_mapping = | 1951 | dev_priv->mm.gtt_mapping = |
2103 | io_mapping_create_wc(dev->agp->base, | 1952 | io_mapping_create_wc(dev->agp->base, agp_size); |
2104 | dev->agp->agp_info.aper_size * 1024*1024); | ||
2105 | if (dev_priv->mm.gtt_mapping == NULL) { | 1953 | if (dev_priv->mm.gtt_mapping == NULL) { |
2106 | ret = -EIO; | 1954 | ret = -EIO; |
2107 | goto out_rmmap; | 1955 | goto out_rmmap; |
@@ -2113,72 +1961,60 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2113 | * MTRR if present. Even if a UC MTRR isn't present. | 1961 | * MTRR if present. Even if a UC MTRR isn't present. |
2114 | */ | 1962 | */ |
2115 | dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, | 1963 | dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, |
2116 | dev->agp->agp_info.aper_size * | 1964 | agp_size, |
2117 | 1024 * 1024, | ||
2118 | MTRR_TYPE_WRCOMB, 1); | 1965 | MTRR_TYPE_WRCOMB, 1); |
2119 | if (dev_priv->mm.gtt_mtrr < 0) { | 1966 | if (dev_priv->mm.gtt_mtrr < 0) { |
2120 | DRM_INFO("MTRR allocation failed. Graphics " | 1967 | DRM_INFO("MTRR allocation failed. Graphics " |
2121 | "performance may suffer.\n"); | 1968 | "performance may suffer.\n"); |
2122 | } | 1969 | } |
2123 | 1970 | ||
2124 | ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start); | 1971 | /* The i915 workqueue is primarily used for batched retirement of |
2125 | if (ret) | 1972 | * requests (and thus managing bo) once the task has been completed |
2126 | goto out_iomapfree; | 1973 | * by the GPU. i915_gem_retire_requests() is called directly when we |
2127 | 1974 | * need high-priority retirement, such as waiting for an explicit | |
2128 | if (prealloc_size > intel_max_stolen) { | 1975 | * bo. |
2129 | DRM_INFO("detected %dM stolen memory, trimming to %dM\n", | 1976 | * |
2130 | prealloc_size >> 20, intel_max_stolen >> 20); | 1977 | * It is also used for periodic low-priority events, such as |
2131 | prealloc_size = intel_max_stolen; | 1978 | * idle-timers and recording error state. |
2132 | } | 1979 | * |
2133 | 1980 | * All tasks on the workqueue are expected to acquire the dev mutex | |
2134 | dev_priv->wq = create_singlethread_workqueue("i915"); | 1981 | * so there is no point in running more than one instance of the |
1982 | * workqueue at any time: max_active = 1 and NON_REENTRANT. | ||
1983 | */ | ||
1984 | dev_priv->wq = alloc_workqueue("i915", | ||
1985 | WQ_UNBOUND | WQ_NON_REENTRANT, | ||
1986 | 1); | ||
2135 | if (dev_priv->wq == NULL) { | 1987 | if (dev_priv->wq == NULL) { |
2136 | DRM_ERROR("Failed to create our workqueue.\n"); | 1988 | DRM_ERROR("Failed to create our workqueue.\n"); |
2137 | ret = -ENOMEM; | 1989 | ret = -ENOMEM; |
2138 | goto out_iomapfree; | 1990 | goto out_mtrrfree; |
2139 | } | 1991 | } |
2140 | 1992 | ||
2141 | /* enable GEM by default */ | 1993 | /* enable GEM by default */ |
2142 | dev_priv->has_gem = 1; | 1994 | dev_priv->has_gem = 1; |
2143 | 1995 | ||
2144 | if (prealloc_size > agp_size * 3 / 4) { | 1996 | intel_irq_init(dev); |
2145 | DRM_ERROR("Detected broken video BIOS with %d/%dkB of video " | ||
2146 | "memory stolen.\n", | ||
2147 | prealloc_size / 1024, agp_size / 1024); | ||
2148 | DRM_ERROR("Disabling GEM. (try reducing stolen memory or " | ||
2149 | "updating the BIOS to fix).\n"); | ||
2150 | dev_priv->has_gem = 0; | ||
2151 | } | ||
2152 | |||
2153 | if (dev_priv->has_gem == 0 && | ||
2154 | drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
2155 | DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); | ||
2156 | ret = -ENODEV; | ||
2157 | goto out_iomapfree; | ||
2158 | } | ||
2159 | |||
2160 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | ||
2161 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | ||
2162 | if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { | ||
2163 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ | ||
2164 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | ||
2165 | } | ||
2166 | 1997 | ||
2167 | /* Try to make sure MCHBAR is enabled before poking at it */ | 1998 | /* Try to make sure MCHBAR is enabled before poking at it */ |
2168 | intel_setup_mchbar(dev); | 1999 | intel_setup_mchbar(dev); |
2000 | intel_setup_gmbus(dev); | ||
2001 | intel_opregion_setup(dev); | ||
2002 | |||
2003 | /* Make sure the bios did its job and set up vital registers */ | ||
2004 | intel_setup_bios(dev); | ||
2169 | 2005 | ||
2170 | i915_gem_load(dev); | 2006 | i915_gem_load(dev); |
2171 | 2007 | ||
2172 | /* Init HWS */ | 2008 | /* Init HWS */ |
2173 | if (!I915_NEED_GFX_HWS(dev)) { | 2009 | if (!I915_NEED_GFX_HWS(dev)) { |
2174 | ret = i915_init_phys_hws(dev); | 2010 | ret = i915_init_phys_hws(dev); |
2175 | if (ret != 0) | 2011 | if (ret) |
2176 | goto out_workqueue_free; | 2012 | goto out_gem_unload; |
2177 | } | 2013 | } |
2178 | 2014 | ||
2179 | if (IS_PINEVIEW(dev)) | 2015 | if (IS_PINEVIEW(dev)) |
2180 | i915_pineview_get_mem_freq(dev); | 2016 | i915_pineview_get_mem_freq(dev); |
2181 | else if (IS_IRONLAKE(dev)) | 2017 | else if (IS_GEN5(dev)) |
2182 | i915_ironlake_get_mem_freq(dev); | 2018 | i915_ironlake_get_mem_freq(dev); |
2183 | 2019 | ||
2184 | /* On the 945G/GM, the chipset reports the MSI capability on the | 2020 | /* On the 945G/GM, the chipset reports the MSI capability on the |
@@ -2195,16 +2031,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2195 | if (!IS_I945G(dev) && !IS_I945GM(dev)) | 2031 | if (!IS_I945G(dev) && !IS_I945GM(dev)) |
2196 | pci_enable_msi(dev->pdev); | 2032 | pci_enable_msi(dev->pdev); |
2197 | 2033 | ||
2198 | spin_lock_init(&dev_priv->user_irq_lock); | 2034 | spin_lock_init(&dev_priv->irq_lock); |
2199 | spin_lock_init(&dev_priv->error_lock); | 2035 | spin_lock_init(&dev_priv->error_lock); |
2200 | dev_priv->trace_irq_seqno = 0; | 2036 | spin_lock_init(&dev_priv->rps_lock); |
2201 | 2037 | ||
2202 | ret = drm_vblank_init(dev, I915_NUM_PIPE); | 2038 | if (IS_MOBILE(dev) || !IS_GEN2(dev)) |
2039 | dev_priv->num_pipe = 2; | ||
2040 | else | ||
2041 | dev_priv->num_pipe = 1; | ||
2203 | 2042 | ||
2204 | if (ret) { | 2043 | ret = drm_vblank_init(dev, dev_priv->num_pipe); |
2205 | (void) i915_driver_unload(dev); | 2044 | if (ret) |
2206 | return ret; | 2045 | goto out_gem_unload; |
2207 | } | ||
2208 | 2046 | ||
2209 | /* Start out suspended */ | 2047 | /* Start out suspended */ |
2210 | dev_priv->mm.suspended = 1; | 2048 | dev_priv->mm.suspended = 1; |
@@ -2212,16 +2050,16 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2212 | intel_detect_pch(dev); | 2050 | intel_detect_pch(dev); |
2213 | 2051 | ||
2214 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 2052 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
2215 | ret = i915_load_modeset_init(dev, prealloc_start, | 2053 | ret = i915_load_modeset_init(dev); |
2216 | prealloc_size, agp_size); | ||
2217 | if (ret < 0) { | 2054 | if (ret < 0) { |
2218 | DRM_ERROR("failed to init modeset\n"); | 2055 | DRM_ERROR("failed to init modeset\n"); |
2219 | goto out_workqueue_free; | 2056 | goto out_gem_unload; |
2220 | } | 2057 | } |
2221 | } | 2058 | } |
2222 | 2059 | ||
2223 | /* Must be done after probing outputs */ | 2060 | /* Must be done after probing outputs */ |
2224 | intel_opregion_init(dev, 0); | 2061 | intel_opregion_init(dev); |
2062 | acpi_video_register(); | ||
2225 | 2063 | ||
2226 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, | 2064 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, |
2227 | (unsigned long) dev); | 2065 | (unsigned long) dev); |
@@ -2231,17 +2069,29 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2231 | dev_priv->mchdev_lock = &mchdev_lock; | 2069 | dev_priv->mchdev_lock = &mchdev_lock; |
2232 | spin_unlock(&mchdev_lock); | 2070 | spin_unlock(&mchdev_lock); |
2233 | 2071 | ||
2234 | /* XXX Prevent module unload due to memory corruption bugs. */ | 2072 | ips_ping_for_i915_load(); |
2235 | __module_get(THIS_MODULE); | ||
2236 | 2073 | ||
2237 | return 0; | 2074 | return 0; |
2238 | 2075 | ||
2239 | out_workqueue_free: | 2076 | out_gem_unload: |
2077 | if (dev_priv->mm.inactive_shrinker.shrink) | ||
2078 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); | ||
2079 | |||
2080 | if (dev->pdev->msi_enabled) | ||
2081 | pci_disable_msi(dev->pdev); | ||
2082 | |||
2083 | intel_teardown_gmbus(dev); | ||
2084 | intel_teardown_mchbar(dev); | ||
2240 | destroy_workqueue(dev_priv->wq); | 2085 | destroy_workqueue(dev_priv->wq); |
2241 | out_iomapfree: | 2086 | out_mtrrfree: |
2087 | if (dev_priv->mm.gtt_mtrr >= 0) { | ||
2088 | mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, | ||
2089 | dev->agp->agp_info.aper_size * 1024 * 1024); | ||
2090 | dev_priv->mm.gtt_mtrr = -1; | ||
2091 | } | ||
2242 | io_mapping_free(dev_priv->mm.gtt_mapping); | 2092 | io_mapping_free(dev_priv->mm.gtt_mapping); |
2243 | out_rmmap: | 2093 | out_rmmap: |
2244 | iounmap(dev_priv->regs); | 2094 | pci_iounmap(dev->pdev, dev_priv->regs); |
2245 | put_bridge: | 2095 | put_bridge: |
2246 | pci_dev_put(dev_priv->bridge_dev); | 2096 | pci_dev_put(dev_priv->bridge_dev); |
2247 | free_priv: | 2097 | free_priv: |
@@ -2252,15 +2102,23 @@ free_priv: | |||
2252 | int i915_driver_unload(struct drm_device *dev) | 2102 | int i915_driver_unload(struct drm_device *dev) |
2253 | { | 2103 | { |
2254 | struct drm_i915_private *dev_priv = dev->dev_private; | 2104 | struct drm_i915_private *dev_priv = dev->dev_private; |
2255 | 2105 | int ret; | |
2256 | i915_destroy_error_state(dev); | ||
2257 | 2106 | ||
2258 | spin_lock(&mchdev_lock); | 2107 | spin_lock(&mchdev_lock); |
2259 | i915_mch_dev = NULL; | 2108 | i915_mch_dev = NULL; |
2260 | spin_unlock(&mchdev_lock); | 2109 | spin_unlock(&mchdev_lock); |
2261 | 2110 | ||
2262 | destroy_workqueue(dev_priv->wq); | 2111 | if (dev_priv->mm.inactive_shrinker.shrink) |
2263 | del_timer_sync(&dev_priv->hangcheck_timer); | 2112 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
2113 | |||
2114 | mutex_lock(&dev->struct_mutex); | ||
2115 | ret = i915_gpu_idle(dev); | ||
2116 | if (ret) | ||
2117 | DRM_ERROR("failed to idle hardware: %d\n", ret); | ||
2118 | mutex_unlock(&dev->struct_mutex); | ||
2119 | |||
2120 | /* Cancel the retire work handler, which should be idle now. */ | ||
2121 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); | ||
2264 | 2122 | ||
2265 | io_mapping_free(dev_priv->mm.gtt_mapping); | 2123 | io_mapping_free(dev_priv->mm.gtt_mapping); |
2266 | if (dev_priv->mm.gtt_mtrr >= 0) { | 2124 | if (dev_priv->mm.gtt_mtrr >= 0) { |
@@ -2269,7 +2127,10 @@ int i915_driver_unload(struct drm_device *dev) | |||
2269 | dev_priv->mm.gtt_mtrr = -1; | 2127 | dev_priv->mm.gtt_mtrr = -1; |
2270 | } | 2128 | } |
2271 | 2129 | ||
2130 | acpi_video_unregister(); | ||
2131 | |||
2272 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 2132 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
2133 | intel_fbdev_fini(dev); | ||
2273 | intel_modeset_cleanup(dev); | 2134 | intel_modeset_cleanup(dev); |
2274 | 2135 | ||
2275 | /* | 2136 | /* |
@@ -2281,55 +2142,66 @@ int i915_driver_unload(struct drm_device *dev) | |||
2281 | dev_priv->child_dev = NULL; | 2142 | dev_priv->child_dev = NULL; |
2282 | dev_priv->child_dev_num = 0; | 2143 | dev_priv->child_dev_num = 0; |
2283 | } | 2144 | } |
2284 | drm_irq_uninstall(dev); | 2145 | |
2285 | vga_switcheroo_unregister_client(dev->pdev); | 2146 | vga_switcheroo_unregister_client(dev->pdev); |
2286 | vga_client_register(dev->pdev, NULL, NULL, NULL); | 2147 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
2287 | } | 2148 | } |
2288 | 2149 | ||
2150 | /* Free error state after interrupts are fully disabled. */ | ||
2151 | del_timer_sync(&dev_priv->hangcheck_timer); | ||
2152 | cancel_work_sync(&dev_priv->error_work); | ||
2153 | i915_destroy_error_state(dev); | ||
2154 | |||
2289 | if (dev->pdev->msi_enabled) | 2155 | if (dev->pdev->msi_enabled) |
2290 | pci_disable_msi(dev->pdev); | 2156 | pci_disable_msi(dev->pdev); |
2291 | 2157 | ||
2292 | if (dev_priv->regs != NULL) | 2158 | intel_opregion_fini(dev); |
2293 | iounmap(dev_priv->regs); | ||
2294 | |||
2295 | intel_opregion_free(dev, 0); | ||
2296 | 2159 | ||
2297 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 2160 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
2298 | i915_gem_free_all_phys_object(dev); | 2161 | /* Flush any outstanding unpin_work. */ |
2162 | flush_workqueue(dev_priv->wq); | ||
2299 | 2163 | ||
2300 | mutex_lock(&dev->struct_mutex); | 2164 | mutex_lock(&dev->struct_mutex); |
2165 | i915_gem_free_all_phys_object(dev); | ||
2301 | i915_gem_cleanup_ringbuffer(dev); | 2166 | i915_gem_cleanup_ringbuffer(dev); |
2302 | mutex_unlock(&dev->struct_mutex); | 2167 | mutex_unlock(&dev->struct_mutex); |
2303 | if (I915_HAS_FBC(dev) && i915_powersave) | 2168 | if (I915_HAS_FBC(dev) && i915_powersave) |
2304 | i915_cleanup_compression(dev); | 2169 | i915_cleanup_compression(dev); |
2305 | drm_mm_takedown(&dev_priv->vram); | 2170 | drm_mm_takedown(&dev_priv->mm.stolen); |
2306 | i915_gem_lastclose(dev); | ||
2307 | 2171 | ||
2308 | intel_cleanup_overlay(dev); | 2172 | intel_cleanup_overlay(dev); |
2173 | |||
2174 | if (!I915_NEED_GFX_HWS(dev)) | ||
2175 | i915_free_hws(dev); | ||
2309 | } | 2176 | } |
2310 | 2177 | ||
2178 | if (dev_priv->regs != NULL) | ||
2179 | pci_iounmap(dev->pdev, dev_priv->regs); | ||
2180 | |||
2181 | intel_teardown_gmbus(dev); | ||
2311 | intel_teardown_mchbar(dev); | 2182 | intel_teardown_mchbar(dev); |
2312 | 2183 | ||
2184 | destroy_workqueue(dev_priv->wq); | ||
2185 | |||
2313 | pci_dev_put(dev_priv->bridge_dev); | 2186 | pci_dev_put(dev_priv->bridge_dev); |
2314 | kfree(dev->dev_private); | 2187 | kfree(dev->dev_private); |
2315 | 2188 | ||
2316 | return 0; | 2189 | return 0; |
2317 | } | 2190 | } |
2318 | 2191 | ||
2319 | int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) | 2192 | int i915_driver_open(struct drm_device *dev, struct drm_file *file) |
2320 | { | 2193 | { |
2321 | struct drm_i915_file_private *i915_file_priv; | 2194 | struct drm_i915_file_private *file_priv; |
2322 | 2195 | ||
2323 | DRM_DEBUG_DRIVER("\n"); | 2196 | DRM_DEBUG_DRIVER("\n"); |
2324 | i915_file_priv = (struct drm_i915_file_private *) | 2197 | file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); |
2325 | kmalloc(sizeof(*i915_file_priv), GFP_KERNEL); | 2198 | if (!file_priv) |
2326 | |||
2327 | if (!i915_file_priv) | ||
2328 | return -ENOMEM; | 2199 | return -ENOMEM; |
2329 | 2200 | ||
2330 | file_priv->driver_priv = i915_file_priv; | 2201 | file->driver_priv = file_priv; |
2331 | 2202 | ||
2332 | INIT_LIST_HEAD(&i915_file_priv->mm.request_list); | 2203 | spin_lock_init(&file_priv->mm.lock); |
2204 | INIT_LIST_HEAD(&file_priv->mm.request_list); | ||
2333 | 2205 | ||
2334 | return 0; | 2206 | return 0; |
2335 | } | 2207 | } |
@@ -2351,7 +2223,7 @@ void i915_driver_lastclose(struct drm_device * dev) | |||
2351 | drm_i915_private_t *dev_priv = dev->dev_private; | 2223 | drm_i915_private_t *dev_priv = dev->dev_private; |
2352 | 2224 | ||
2353 | if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { | 2225 | if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { |
2354 | drm_fb_helper_restore(); | 2226 | intel_fb_restore_mode(dev); |
2355 | vga_switcheroo_process_delayed_switch(); | 2227 | vga_switcheroo_process_delayed_switch(); |
2356 | return; | 2228 | return; |
2357 | } | 2229 | } |
@@ -2372,11 +2244,11 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) | |||
2372 | i915_mem_release(dev, file_priv, dev_priv->agp_heap); | 2244 | i915_mem_release(dev, file_priv, dev_priv->agp_heap); |
2373 | } | 2245 | } |
2374 | 2246 | ||
2375 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) | 2247 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) |
2376 | { | 2248 | { |
2377 | struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; | 2249 | struct drm_i915_file_private *file_priv = file->driver_priv; |
2378 | 2250 | ||
2379 | kfree(i915_file_priv); | 2251 | kfree(file_priv); |
2380 | } | 2252 | } |
2381 | 2253 | ||
2382 | struct drm_ioctl_desc i915_ioctls[] = { | 2254 | struct drm_ioctl_desc i915_ioctls[] = { |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6dbe14cc4f74..eb91e2dd7914 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include "drm.h" | 32 | #include "drm.h" |
33 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
34 | #include "i915_drv.h" | 34 | #include "i915_drv.h" |
35 | #include "intel_drv.h" | ||
35 | 36 | ||
36 | #include <linux/console.h> | 37 | #include <linux/console.h> |
37 | #include "drm_crtc_helper.h" | 38 | #include "drm_crtc_helper.h" |
@@ -42,18 +43,39 @@ module_param_named(modeset, i915_modeset, int, 0400); | |||
42 | unsigned int i915_fbpercrtc = 0; | 43 | unsigned int i915_fbpercrtc = 0; |
43 | module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); | 44 | module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); |
44 | 45 | ||
46 | int i915_panel_ignore_lid = 0; | ||
47 | module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); | ||
48 | |||
45 | unsigned int i915_powersave = 1; | 49 | unsigned int i915_powersave = 1; |
46 | module_param_named(powersave, i915_powersave, int, 0400); | 50 | module_param_named(powersave, i915_powersave, int, 0600); |
51 | |||
52 | unsigned int i915_semaphores = 0; | ||
53 | module_param_named(semaphores, i915_semaphores, int, 0600); | ||
54 | |||
55 | unsigned int i915_enable_rc6 = 0; | ||
56 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); | ||
57 | |||
58 | unsigned int i915_enable_fbc = 0; | ||
59 | module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); | ||
47 | 60 | ||
48 | unsigned int i915_lvds_downclock = 0; | 61 | unsigned int i915_lvds_downclock = 0; |
49 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); | 62 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); |
50 | 63 | ||
64 | unsigned int i915_panel_use_ssc = 1; | ||
65 | module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); | ||
66 | |||
67 | int i915_vbt_sdvo_panel_type = -1; | ||
68 | module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); | ||
69 | |||
70 | static bool i915_try_reset = true; | ||
71 | module_param_named(reset, i915_try_reset, bool, 0600); | ||
72 | |||
51 | static struct drm_driver driver; | 73 | static struct drm_driver driver; |
52 | extern int intel_agp_enabled; | 74 | extern int intel_agp_enabled; |
53 | 75 | ||
54 | #define INTEL_VGA_DEVICE(id, info) { \ | 76 | #define INTEL_VGA_DEVICE(id, info) { \ |
55 | .class = PCI_CLASS_DISPLAY_VGA << 8, \ | 77 | .class = PCI_CLASS_DISPLAY_VGA << 8, \ |
56 | .class_mask = 0xffff00, \ | 78 | .class_mask = 0xff0000, \ |
57 | .vendor = 0x8086, \ | 79 | .vendor = 0x8086, \ |
58 | .device = id, \ | 80 | .device = id, \ |
59 | .subvendor = PCI_ANY_ID, \ | 81 | .subvendor = PCI_ANY_ID, \ |
@@ -61,86 +83,127 @@ extern int intel_agp_enabled; | |||
61 | .driver_data = (unsigned long) info } | 83 | .driver_data = (unsigned long) info } |
62 | 84 | ||
63 | static const struct intel_device_info intel_i830_info = { | 85 | static const struct intel_device_info intel_i830_info = { |
64 | .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, | 86 | .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, |
87 | .has_overlay = 1, .overlay_needs_physical = 1, | ||
65 | }; | 88 | }; |
66 | 89 | ||
67 | static const struct intel_device_info intel_845g_info = { | 90 | static const struct intel_device_info intel_845g_info = { |
68 | .gen = 2, .is_i8xx = 1, | 91 | .gen = 2, |
92 | .has_overlay = 1, .overlay_needs_physical = 1, | ||
69 | }; | 93 | }; |
70 | 94 | ||
71 | static const struct intel_device_info intel_i85x_info = { | 95 | static const struct intel_device_info intel_i85x_info = { |
72 | .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, | 96 | .gen = 2, .is_i85x = 1, .is_mobile = 1, |
73 | .cursor_needs_physical = 1, | 97 | .cursor_needs_physical = 1, |
98 | .has_overlay = 1, .overlay_needs_physical = 1, | ||
74 | }; | 99 | }; |
75 | 100 | ||
76 | static const struct intel_device_info intel_i865g_info = { | 101 | static const struct intel_device_info intel_i865g_info = { |
77 | .gen = 2, .is_i8xx = 1, | 102 | .gen = 2, |
103 | .has_overlay = 1, .overlay_needs_physical = 1, | ||
78 | }; | 104 | }; |
79 | 105 | ||
80 | static const struct intel_device_info intel_i915g_info = { | 106 | static const struct intel_device_info intel_i915g_info = { |
81 | .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, | 107 | .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, |
108 | .has_overlay = 1, .overlay_needs_physical = 1, | ||
82 | }; | 109 | }; |
83 | static const struct intel_device_info intel_i915gm_info = { | 110 | static const struct intel_device_info intel_i915gm_info = { |
84 | .gen = 3, .is_i9xx = 1, .is_mobile = 1, | 111 | .gen = 3, .is_mobile = 1, |
85 | .cursor_needs_physical = 1, | 112 | .cursor_needs_physical = 1, |
113 | .has_overlay = 1, .overlay_needs_physical = 1, | ||
114 | .supports_tv = 1, | ||
86 | }; | 115 | }; |
87 | static const struct intel_device_info intel_i945g_info = { | 116 | static const struct intel_device_info intel_i945g_info = { |
88 | .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, | 117 | .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, |
118 | .has_overlay = 1, .overlay_needs_physical = 1, | ||
89 | }; | 119 | }; |
90 | static const struct intel_device_info intel_i945gm_info = { | 120 | static const struct intel_device_info intel_i945gm_info = { |
91 | .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, | 121 | .gen = 3, .is_i945gm = 1, .is_mobile = 1, |
92 | .has_hotplug = 1, .cursor_needs_physical = 1, | 122 | .has_hotplug = 1, .cursor_needs_physical = 1, |
123 | .has_overlay = 1, .overlay_needs_physical = 1, | ||
124 | .supports_tv = 1, | ||
93 | }; | 125 | }; |
94 | 126 | ||
95 | static const struct intel_device_info intel_i965g_info = { | 127 | static const struct intel_device_info intel_i965g_info = { |
96 | .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, | 128 | .gen = 4, .is_broadwater = 1, |
97 | .has_hotplug = 1, | 129 | .has_hotplug = 1, |
130 | .has_overlay = 1, | ||
98 | }; | 131 | }; |
99 | 132 | ||
100 | static const struct intel_device_info intel_i965gm_info = { | 133 | static const struct intel_device_info intel_i965gm_info = { |
101 | .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, | 134 | .gen = 4, .is_crestline = 1, |
102 | .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, | 135 | .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, |
136 | .has_overlay = 1, | ||
137 | .supports_tv = 1, | ||
103 | }; | 138 | }; |
104 | 139 | ||
105 | static const struct intel_device_info intel_g33_info = { | 140 | static const struct intel_device_info intel_g33_info = { |
106 | .gen = 3, .is_g33 = 1, .is_i9xx = 1, | 141 | .gen = 3, .is_g33 = 1, |
107 | .need_gfx_hws = 1, .has_hotplug = 1, | 142 | .need_gfx_hws = 1, .has_hotplug = 1, |
143 | .has_overlay = 1, | ||
108 | }; | 144 | }; |
109 | 145 | ||
110 | static const struct intel_device_info intel_g45_info = { | 146 | static const struct intel_device_info intel_g45_info = { |
111 | .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, | 147 | .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, |
112 | .has_pipe_cxsr = 1, .has_hotplug = 1, | 148 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
149 | .has_bsd_ring = 1, | ||
113 | }; | 150 | }; |
114 | 151 | ||
115 | static const struct intel_device_info intel_gm45_info = { | 152 | static const struct intel_device_info intel_gm45_info = { |
116 | .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, | 153 | .gen = 4, .is_g4x = 1, |
117 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, | 154 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, |
118 | .has_pipe_cxsr = 1, .has_hotplug = 1, | 155 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
156 | .supports_tv = 1, | ||
157 | .has_bsd_ring = 1, | ||
119 | }; | 158 | }; |
120 | 159 | ||
121 | static const struct intel_device_info intel_pineview_info = { | 160 | static const struct intel_device_info intel_pineview_info = { |
122 | .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, | 161 | .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, |
123 | .need_gfx_hws = 1, .has_hotplug = 1, | 162 | .need_gfx_hws = 1, .has_hotplug = 1, |
163 | .has_overlay = 1, | ||
124 | }; | 164 | }; |
125 | 165 | ||
126 | static const struct intel_device_info intel_ironlake_d_info = { | 166 | static const struct intel_device_info intel_ironlake_d_info = { |
127 | .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, | 167 | .gen = 5, |
128 | .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, | 168 | .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, |
169 | .has_bsd_ring = 1, | ||
129 | }; | 170 | }; |
130 | 171 | ||
131 | static const struct intel_device_info intel_ironlake_m_info = { | 172 | static const struct intel_device_info intel_ironlake_m_info = { |
132 | .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, | 173 | .gen = 5, .is_mobile = 1, |
133 | .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, | 174 | .need_gfx_hws = 1, .has_hotplug = 1, |
175 | .has_fbc = 1, | ||
176 | .has_bsd_ring = 1, | ||
134 | }; | 177 | }; |
135 | 178 | ||
136 | static const struct intel_device_info intel_sandybridge_d_info = { | 179 | static const struct intel_device_info intel_sandybridge_d_info = { |
137 | .gen = 6, .is_i965g = 1, .is_i9xx = 1, | 180 | .gen = 6, |
138 | .need_gfx_hws = 1, .has_hotplug = 1, | 181 | .need_gfx_hws = 1, .has_hotplug = 1, |
182 | .has_bsd_ring = 1, | ||
183 | .has_blt_ring = 1, | ||
139 | }; | 184 | }; |
140 | 185 | ||
141 | static const struct intel_device_info intel_sandybridge_m_info = { | 186 | static const struct intel_device_info intel_sandybridge_m_info = { |
142 | .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, | 187 | .gen = 6, .is_mobile = 1, |
143 | .need_gfx_hws = 1, .has_hotplug = 1, | 188 | .need_gfx_hws = 1, .has_hotplug = 1, |
189 | .has_fbc = 1, | ||
190 | .has_bsd_ring = 1, | ||
191 | .has_blt_ring = 1, | ||
192 | }; | ||
193 | |||
194 | static const struct intel_device_info intel_ivybridge_d_info = { | ||
195 | .is_ivybridge = 1, .gen = 7, | ||
196 | .need_gfx_hws = 1, .has_hotplug = 1, | ||
197 | .has_bsd_ring = 1, | ||
198 | .has_blt_ring = 1, | ||
199 | }; | ||
200 | |||
201 | static const struct intel_device_info intel_ivybridge_m_info = { | ||
202 | .is_ivybridge = 1, .gen = 7, .is_mobile = 1, | ||
203 | .need_gfx_hws = 1, .has_hotplug = 1, | ||
204 | .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */ | ||
205 | .has_bsd_ring = 1, | ||
206 | .has_blt_ring = 1, | ||
144 | }; | 207 | }; |
145 | 208 | ||
146 | static const struct pci_device_id pciidlist[] = { /* aka */ | 209 | static const struct pci_device_id pciidlist[] = { /* aka */ |
@@ -182,6 +245,11 @@ static const struct pci_device_id pciidlist[] = { /* aka */ | |||
182 | INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), | 245 | INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), |
183 | INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), | 246 | INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), |
184 | INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), | 247 | INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), |
248 | INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */ | ||
249 | INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */ | ||
250 | INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ | ||
251 | INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ | ||
252 | INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ | ||
185 | {0, 0, 0} | 253 | {0, 0, 0} |
186 | }; | 254 | }; |
187 | 255 | ||
@@ -190,7 +258,9 @@ MODULE_DEVICE_TABLE(pci, pciidlist); | |||
190 | #endif | 258 | #endif |
191 | 259 | ||
192 | #define INTEL_PCH_DEVICE_ID_MASK 0xff00 | 260 | #define INTEL_PCH_DEVICE_ID_MASK 0xff00 |
261 | #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 | ||
193 | #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 | 262 | #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 |
263 | #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 | ||
194 | 264 | ||
195 | void intel_detect_pch (struct drm_device *dev) | 265 | void intel_detect_pch (struct drm_device *dev) |
196 | { | 266 | { |
@@ -209,19 +279,86 @@ void intel_detect_pch (struct drm_device *dev) | |||
209 | int id; | 279 | int id; |
210 | id = pch->device & INTEL_PCH_DEVICE_ID_MASK; | 280 | id = pch->device & INTEL_PCH_DEVICE_ID_MASK; |
211 | 281 | ||
212 | if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { | 282 | if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { |
283 | dev_priv->pch_type = PCH_IBX; | ||
284 | DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); | ||
285 | } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { | ||
213 | dev_priv->pch_type = PCH_CPT; | 286 | dev_priv->pch_type = PCH_CPT; |
214 | DRM_DEBUG_KMS("Found CougarPoint PCH\n"); | 287 | DRM_DEBUG_KMS("Found CougarPoint PCH\n"); |
288 | } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { | ||
289 | /* PantherPoint is CPT compatible */ | ||
290 | dev_priv->pch_type = PCH_CPT; | ||
291 | DRM_DEBUG_KMS("Found PatherPoint PCH\n"); | ||
215 | } | 292 | } |
216 | } | 293 | } |
217 | pci_dev_put(pch); | 294 | pci_dev_put(pch); |
218 | } | 295 | } |
219 | } | 296 | } |
220 | 297 | ||
298 | static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | ||
299 | { | ||
300 | int count; | ||
301 | |||
302 | count = 0; | ||
303 | while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) | ||
304 | udelay(10); | ||
305 | |||
306 | I915_WRITE_NOTRACE(FORCEWAKE, 1); | ||
307 | POSTING_READ(FORCEWAKE); | ||
308 | |||
309 | count = 0; | ||
310 | while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0) | ||
311 | udelay(10); | ||
312 | } | ||
313 | |||
314 | /* | ||
315 | * Generally this is called implicitly by the register read function. However, | ||
316 | * if some sequence requires the GT to not power down then this function should | ||
317 | * be called at the beginning of the sequence followed by a call to | ||
318 | * gen6_gt_force_wake_put() at the end of the sequence. | ||
319 | */ | ||
320 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | ||
321 | { | ||
322 | WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); | ||
323 | |||
324 | /* Forcewake is atomic in case we get in here without the lock */ | ||
325 | if (atomic_add_return(1, &dev_priv->forcewake_count) == 1) | ||
326 | __gen6_gt_force_wake_get(dev_priv); | ||
327 | } | ||
328 | |||
329 | static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | ||
330 | { | ||
331 | I915_WRITE_NOTRACE(FORCEWAKE, 0); | ||
332 | POSTING_READ(FORCEWAKE); | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * see gen6_gt_force_wake_get() | ||
337 | */ | ||
338 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | ||
339 | { | ||
340 | WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); | ||
341 | |||
342 | if (atomic_dec_and_test(&dev_priv->forcewake_count)) | ||
343 | __gen6_gt_force_wake_put(dev_priv); | ||
344 | } | ||
345 | |||
346 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | ||
347 | { | ||
348 | int loop = 500; | ||
349 | u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||
350 | while (fifo < 20 && loop--) { | ||
351 | udelay(10); | ||
352 | fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||
353 | } | ||
354 | } | ||
355 | |||
221 | static int i915_drm_freeze(struct drm_device *dev) | 356 | static int i915_drm_freeze(struct drm_device *dev) |
222 | { | 357 | { |
223 | struct drm_i915_private *dev_priv = dev->dev_private; | 358 | struct drm_i915_private *dev_priv = dev->dev_private; |
224 | 359 | ||
360 | drm_kms_helper_poll_disable(dev); | ||
361 | |||
225 | pci_save_state(dev->pdev); | 362 | pci_save_state(dev->pdev); |
226 | 363 | ||
227 | /* If KMS is active, we do the leavevt stuff here */ | 364 | /* If KMS is active, we do the leavevt stuff here */ |
@@ -237,7 +374,7 @@ static int i915_drm_freeze(struct drm_device *dev) | |||
237 | 374 | ||
238 | i915_save_state(dev); | 375 | i915_save_state(dev); |
239 | 376 | ||
240 | intel_opregion_free(dev, 1); | 377 | intel_opregion_fini(dev); |
241 | 378 | ||
242 | /* Modeset on resume, not lid events */ | 379 | /* Modeset on resume, not lid events */ |
243 | dev_priv->modeset_on_lid = 0; | 380 | dev_priv->modeset_on_lid = 0; |
@@ -258,6 +395,10 @@ int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
258 | if (state.event == PM_EVENT_PRETHAW) | 395 | if (state.event == PM_EVENT_PRETHAW) |
259 | return 0; | 396 | return 0; |
260 | 397 | ||
398 | |||
399 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | ||
400 | return 0; | ||
401 | |||
261 | error = i915_drm_freeze(dev); | 402 | error = i915_drm_freeze(dev); |
262 | if (error) | 403 | if (error) |
263 | return error; | 404 | return error; |
@@ -276,9 +417,14 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
276 | struct drm_i915_private *dev_priv = dev->dev_private; | 417 | struct drm_i915_private *dev_priv = dev->dev_private; |
277 | int error = 0; | 418 | int error = 0; |
278 | 419 | ||
279 | i915_restore_state(dev); | 420 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
421 | mutex_lock(&dev->struct_mutex); | ||
422 | i915_gem_restore_gtt_mappings(dev); | ||
423 | mutex_unlock(&dev->struct_mutex); | ||
424 | } | ||
280 | 425 | ||
281 | intel_opregion_init(dev, 1); | 426 | i915_restore_state(dev); |
427 | intel_opregion_setup(dev); | ||
282 | 428 | ||
283 | /* KMS EnterVT equivalent */ | 429 | /* KMS EnterVT equivalent */ |
284 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 430 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
@@ -288,12 +434,18 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
288 | error = i915_gem_init_ringbuffer(dev); | 434 | error = i915_gem_init_ringbuffer(dev); |
289 | mutex_unlock(&dev->struct_mutex); | 435 | mutex_unlock(&dev->struct_mutex); |
290 | 436 | ||
437 | drm_mode_config_reset(dev); | ||
291 | drm_irq_install(dev); | 438 | drm_irq_install(dev); |
292 | 439 | ||
293 | /* Resume the modeset for every activated CRTC */ | 440 | /* Resume the modeset for every activated CRTC */ |
294 | drm_helper_resume_force_mode(dev); | 441 | drm_helper_resume_force_mode(dev); |
442 | |||
443 | if (IS_IRONLAKE_M(dev)) | ||
444 | ironlake_enable_rc6(dev); | ||
295 | } | 445 | } |
296 | 446 | ||
447 | intel_opregion_init(dev); | ||
448 | |||
297 | dev_priv->modeset_on_lid = 0; | 449 | dev_priv->modeset_on_lid = 0; |
298 | 450 | ||
299 | return error; | 451 | return error; |
@@ -301,12 +453,90 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
301 | 453 | ||
302 | int i915_resume(struct drm_device *dev) | 454 | int i915_resume(struct drm_device *dev) |
303 | { | 455 | { |
456 | int ret; | ||
457 | |||
458 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | ||
459 | return 0; | ||
460 | |||
304 | if (pci_enable_device(dev->pdev)) | 461 | if (pci_enable_device(dev->pdev)) |
305 | return -EIO; | 462 | return -EIO; |
306 | 463 | ||
307 | pci_set_master(dev->pdev); | 464 | pci_set_master(dev->pdev); |
308 | 465 | ||
309 | return i915_drm_thaw(dev); | 466 | ret = i915_drm_thaw(dev); |
467 | if (ret) | ||
468 | return ret; | ||
469 | |||
470 | drm_kms_helper_poll_enable(dev); | ||
471 | return 0; | ||
472 | } | ||
473 | |||
474 | static int i8xx_do_reset(struct drm_device *dev, u8 flags) | ||
475 | { | ||
476 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
477 | |||
478 | if (IS_I85X(dev)) | ||
479 | return -ENODEV; | ||
480 | |||
481 | I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); | ||
482 | POSTING_READ(D_STATE); | ||
483 | |||
484 | if (IS_I830(dev) || IS_845G(dev)) { | ||
485 | I915_WRITE(DEBUG_RESET_I830, | ||
486 | DEBUG_RESET_DISPLAY | | ||
487 | DEBUG_RESET_RENDER | | ||
488 | DEBUG_RESET_FULL); | ||
489 | POSTING_READ(DEBUG_RESET_I830); | ||
490 | msleep(1); | ||
491 | |||
492 | I915_WRITE(DEBUG_RESET_I830, 0); | ||
493 | POSTING_READ(DEBUG_RESET_I830); | ||
494 | } | ||
495 | |||
496 | msleep(1); | ||
497 | |||
498 | I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); | ||
499 | POSTING_READ(D_STATE); | ||
500 | |||
501 | return 0; | ||
502 | } | ||
503 | |||
504 | static int i965_reset_complete(struct drm_device *dev) | ||
505 | { | ||
506 | u8 gdrst; | ||
507 | pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); | ||
508 | return gdrst & 0x1; | ||
509 | } | ||
510 | |||
511 | static int i965_do_reset(struct drm_device *dev, u8 flags) | ||
512 | { | ||
513 | u8 gdrst; | ||
514 | |||
515 | /* | ||
516 | * Set the domains we want to reset (GRDOM/bits 2 and 3) as | ||
517 | * well as the reset bit (GR/bit 0). Setting the GR bit | ||
518 | * triggers the reset; when done, the hardware will clear it. | ||
519 | */ | ||
520 | pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); | ||
521 | pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1); | ||
522 | |||
523 | return wait_for(i965_reset_complete(dev), 500); | ||
524 | } | ||
525 | |||
526 | static int ironlake_do_reset(struct drm_device *dev, u8 flags) | ||
527 | { | ||
528 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
529 | u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); | ||
530 | I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1); | ||
531 | return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); | ||
532 | } | ||
533 | |||
534 | static int gen6_do_reset(struct drm_device *dev, u8 flags) | ||
535 | { | ||
536 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
537 | |||
538 | I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL); | ||
539 | return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); | ||
310 | } | 540 | } |
311 | 541 | ||
312 | /** | 542 | /** |
@@ -325,54 +555,50 @@ int i915_resume(struct drm_device *dev) | |||
325 | * - re-init interrupt state | 555 | * - re-init interrupt state |
326 | * - re-init display | 556 | * - re-init display |
327 | */ | 557 | */ |
328 | int i965_reset(struct drm_device *dev, u8 flags) | 558 | int i915_reset(struct drm_device *dev, u8 flags) |
329 | { | 559 | { |
330 | drm_i915_private_t *dev_priv = dev->dev_private; | 560 | drm_i915_private_t *dev_priv = dev->dev_private; |
331 | unsigned long timeout; | ||
332 | u8 gdrst; | ||
333 | /* | 561 | /* |
334 | * We really should only reset the display subsystem if we actually | 562 | * We really should only reset the display subsystem if we actually |
335 | * need to | 563 | * need to |
336 | */ | 564 | */ |
337 | bool need_display = true; | 565 | bool need_display = true; |
566 | int ret; | ||
338 | 567 | ||
339 | mutex_lock(&dev->struct_mutex); | 568 | if (!i915_try_reset) |
569 | return 0; | ||
340 | 570 | ||
341 | /* | 571 | if (!mutex_trylock(&dev->struct_mutex)) |
342 | * Clear request list | 572 | return -EBUSY; |
343 | */ | 573 | |
344 | i915_gem_retire_requests(dev); | 574 | i915_gem_reset(dev); |
345 | 575 | ||
346 | if (need_display) | 576 | ret = -ENODEV; |
347 | i915_save_display(dev); | 577 | if (get_seconds() - dev_priv->last_gpu_reset < 5) { |
348 | 578 | DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); | |
349 | if (IS_I965G(dev) || IS_G4X(dev)) { | 579 | } else switch (INTEL_INFO(dev)->gen) { |
350 | /* | 580 | case 7: |
351 | * Set the domains we want to reset, then the reset bit (bit 0). | 581 | case 6: |
352 | * Clear the reset bit after a while and wait for hardware status | 582 | ret = gen6_do_reset(dev, flags); |
353 | * bit (bit 1) to be set | 583 | /* If reset with a user forcewake, try to restore */ |
354 | */ | 584 | if (atomic_read(&dev_priv->forcewake_count)) |
355 | pci_read_config_byte(dev->pdev, GDRST, &gdrst); | 585 | __gen6_gt_force_wake_get(dev_priv); |
356 | pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0)); | 586 | break; |
357 | udelay(50); | 587 | case 5: |
358 | pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe); | 588 | ret = ironlake_do_reset(dev, flags); |
359 | 589 | break; | |
360 | /* ...we don't want to loop forever though, 500ms should be plenty */ | 590 | case 4: |
361 | timeout = jiffies + msecs_to_jiffies(500); | 591 | ret = i965_do_reset(dev, flags); |
362 | do { | 592 | break; |
363 | udelay(100); | 593 | case 2: |
364 | pci_read_config_byte(dev->pdev, GDRST, &gdrst); | 594 | ret = i8xx_do_reset(dev, flags); |
365 | } while ((gdrst & 0x1) && time_after(timeout, jiffies)); | 595 | break; |
366 | 596 | } | |
367 | if (gdrst & 0x1) { | 597 | dev_priv->last_gpu_reset = get_seconds(); |
368 | WARN(true, "i915: Failed to reset chip\n"); | 598 | if (ret) { |
369 | mutex_unlock(&dev->struct_mutex); | 599 | DRM_ERROR("Failed to reset chip.\n"); |
370 | return -EIO; | ||
371 | } | ||
372 | } else { | ||
373 | DRM_ERROR("Error occurred. Don't know how to reset this chip.\n"); | ||
374 | mutex_unlock(&dev->struct_mutex); | 600 | mutex_unlock(&dev->struct_mutex); |
375 | return -ENODEV; | 601 | return ret; |
376 | } | 602 | } |
377 | 603 | ||
378 | /* Ok, now get things going again... */ | 604 | /* Ok, now get things going again... */ |
@@ -391,22 +617,34 @@ int i965_reset(struct drm_device *dev, u8 flags) | |||
391 | */ | 617 | */ |
392 | if (drm_core_check_feature(dev, DRIVER_MODESET) || | 618 | if (drm_core_check_feature(dev, DRIVER_MODESET) || |
393 | !dev_priv->mm.suspended) { | 619 | !dev_priv->mm.suspended) { |
394 | struct intel_ring_buffer *ring = &dev_priv->render_ring; | ||
395 | dev_priv->mm.suspended = 0; | 620 | dev_priv->mm.suspended = 0; |
396 | ring->init(dev, ring); | 621 | |
622 | dev_priv->ring[RCS].init(&dev_priv->ring[RCS]); | ||
623 | if (HAS_BSD(dev)) | ||
624 | dev_priv->ring[VCS].init(&dev_priv->ring[VCS]); | ||
625 | if (HAS_BLT(dev)) | ||
626 | dev_priv->ring[BCS].init(&dev_priv->ring[BCS]); | ||
627 | |||
397 | mutex_unlock(&dev->struct_mutex); | 628 | mutex_unlock(&dev->struct_mutex); |
398 | drm_irq_uninstall(dev); | 629 | drm_irq_uninstall(dev); |
630 | drm_mode_config_reset(dev); | ||
399 | drm_irq_install(dev); | 631 | drm_irq_install(dev); |
400 | mutex_lock(&dev->struct_mutex); | 632 | mutex_lock(&dev->struct_mutex); |
401 | } | 633 | } |
402 | 634 | ||
635 | mutex_unlock(&dev->struct_mutex); | ||
636 | |||
403 | /* | 637 | /* |
404 | * Display needs restore too... | 638 | * Perform a full modeset as on later generations, e.g. Ironlake, we may |
639 | * need to retrain the display link and cannot just restore the register | ||
640 | * values. | ||
405 | */ | 641 | */ |
406 | if (need_display) | 642 | if (need_display) { |
407 | i915_restore_display(dev); | 643 | mutex_lock(&dev->mode_config.mutex); |
644 | drm_helper_resume_force_mode(dev); | ||
645 | mutex_unlock(&dev->mode_config.mutex); | ||
646 | } | ||
408 | 647 | ||
409 | mutex_unlock(&dev->struct_mutex); | ||
410 | return 0; | 648 | return 0; |
411 | } | 649 | } |
412 | 650 | ||
@@ -414,6 +652,14 @@ int i965_reset(struct drm_device *dev, u8 flags) | |||
414 | static int __devinit | 652 | static int __devinit |
415 | i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 653 | i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
416 | { | 654 | { |
655 | /* Only bind to function 0 of the device. Early generations | ||
656 | * used function 1 as a placeholder for multi-head. This causes | ||
657 | * us confusion instead, especially on the systems where both | ||
658 | * functions have the same PCI-ID! | ||
659 | */ | ||
660 | if (PCI_FUNC(pdev->devfn)) | ||
661 | return -ENODEV; | ||
662 | |||
417 | return drm_get_pci_dev(pdev, ent, &driver); | 663 | return drm_get_pci_dev(pdev, ent, &driver); |
418 | } | 664 | } |
419 | 665 | ||
@@ -436,6 +682,9 @@ static int i915_pm_suspend(struct device *dev) | |||
436 | return -ENODEV; | 682 | return -ENODEV; |
437 | } | 683 | } |
438 | 684 | ||
685 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | ||
686 | return 0; | ||
687 | |||
439 | error = i915_drm_freeze(drm_dev); | 688 | error = i915_drm_freeze(drm_dev); |
440 | if (error) | 689 | if (error) |
441 | return error; | 690 | return error; |
@@ -517,15 +766,7 @@ static struct drm_driver driver = { | |||
517 | .resume = i915_resume, | 766 | .resume = i915_resume, |
518 | 767 | ||
519 | .device_is_agp = i915_driver_device_is_agp, | 768 | .device_is_agp = i915_driver_device_is_agp, |
520 | .enable_vblank = i915_enable_vblank, | ||
521 | .disable_vblank = i915_disable_vblank, | ||
522 | .irq_preinstall = i915_driver_irq_preinstall, | ||
523 | .irq_postinstall = i915_driver_irq_postinstall, | ||
524 | .irq_uninstall = i915_driver_irq_uninstall, | ||
525 | .irq_handler = i915_driver_irq_handler, | ||
526 | .reclaim_buffers = drm_core_reclaim_buffers, | 769 | .reclaim_buffers = drm_core_reclaim_buffers, |
527 | .get_map_ofs = drm_core_get_map_ofs, | ||
528 | .get_reg_ofs = drm_core_get_reg_ofs, | ||
529 | .master_create = i915_master_create, | 770 | .master_create = i915_master_create, |
530 | .master_destroy = i915_master_destroy, | 771 | .master_destroy = i915_master_destroy, |
531 | #if defined(CONFIG_DEBUG_FS) | 772 | #if defined(CONFIG_DEBUG_FS) |
@@ -535,6 +776,9 @@ static struct drm_driver driver = { | |||
535 | .gem_init_object = i915_gem_init_object, | 776 | .gem_init_object = i915_gem_init_object, |
536 | .gem_free_object = i915_gem_free_object, | 777 | .gem_free_object = i915_gem_free_object, |
537 | .gem_vm_ops = &i915_gem_vm_ops, | 778 | .gem_vm_ops = &i915_gem_vm_ops, |
779 | .dumb_create = i915_gem_dumb_create, | ||
780 | .dumb_map_offset = i915_gem_mmap_gtt, | ||
781 | .dumb_destroy = i915_gem_dumb_destroy, | ||
538 | .ioctls = i915_ioctls, | 782 | .ioctls = i915_ioctls, |
539 | .fops = { | 783 | .fops = { |
540 | .owner = THIS_MODULE, | 784 | .owner = THIS_MODULE, |
@@ -548,14 +792,7 @@ static struct drm_driver driver = { | |||
548 | #ifdef CONFIG_COMPAT | 792 | #ifdef CONFIG_COMPAT |
549 | .compat_ioctl = i915_compat_ioctl, | 793 | .compat_ioctl = i915_compat_ioctl, |
550 | #endif | 794 | #endif |
551 | }, | 795 | .llseek = noop_llseek, |
552 | |||
553 | .pci_driver = { | ||
554 | .name = DRIVER_NAME, | ||
555 | .id_table = pciidlist, | ||
556 | .probe = i915_pci_probe, | ||
557 | .remove = i915_pci_remove, | ||
558 | .driver.pm = &i915_pm_ops, | ||
559 | }, | 796 | }, |
560 | 797 | ||
561 | .name = DRIVER_NAME, | 798 | .name = DRIVER_NAME, |
@@ -566,6 +803,14 @@ static struct drm_driver driver = { | |||
566 | .patchlevel = DRIVER_PATCHLEVEL, | 803 | .patchlevel = DRIVER_PATCHLEVEL, |
567 | }; | 804 | }; |
568 | 805 | ||
806 | static struct pci_driver i915_pci_driver = { | ||
807 | .name = DRIVER_NAME, | ||
808 | .id_table = pciidlist, | ||
809 | .probe = i915_pci_probe, | ||
810 | .remove = i915_pci_remove, | ||
811 | .driver.pm = &i915_pm_ops, | ||
812 | }; | ||
813 | |||
569 | static int __init i915_init(void) | 814 | static int __init i915_init(void) |
570 | { | 815 | { |
571 | if (!intel_agp_enabled) { | 816 | if (!intel_agp_enabled) { |
@@ -575,8 +820,6 @@ static int __init i915_init(void) | |||
575 | 820 | ||
576 | driver.num_ioctls = i915_max_ioctl; | 821 | driver.num_ioctls = i915_max_ioctl; |
577 | 822 | ||
578 | i915_gem_shrinker_init(); | ||
579 | |||
580 | /* | 823 | /* |
581 | * If CONFIG_DRM_I915_KMS is set, default to KMS unless | 824 | * If CONFIG_DRM_I915_KMS is set, default to KMS unless |
582 | * explicitly disabled with the module pararmeter. | 825 | * explicitly disabled with the module pararmeter. |
@@ -598,18 +841,15 @@ static int __init i915_init(void) | |||
598 | driver.driver_features &= ~DRIVER_MODESET; | 841 | driver.driver_features &= ~DRIVER_MODESET; |
599 | #endif | 842 | #endif |
600 | 843 | ||
601 | if (!(driver.driver_features & DRIVER_MODESET)) { | 844 | if (!(driver.driver_features & DRIVER_MODESET)) |
602 | driver.suspend = i915_suspend; | 845 | driver.get_vblank_timestamp = NULL; |
603 | driver.resume = i915_resume; | ||
604 | } | ||
605 | 846 | ||
606 | return drm_init(&driver); | 847 | return drm_pci_init(&driver, &i915_pci_driver); |
607 | } | 848 | } |
608 | 849 | ||
609 | static void __exit i915_exit(void) | 850 | static void __exit i915_exit(void) |
610 | { | 851 | { |
611 | i915_gem_shrinker_exit(); | 852 | drm_pci_exit(&driver, &i915_pci_driver); |
612 | drm_exit(&driver); | ||
613 | } | 853 | } |
614 | 854 | ||
615 | module_init(i915_init); | 855 | module_init(i915_init); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index af4a263cf257..ce7914c4c044 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -34,6 +34,8 @@ | |||
34 | #include "intel_bios.h" | 34 | #include "intel_bios.h" |
35 | #include "intel_ringbuffer.h" | 35 | #include "intel_ringbuffer.h" |
36 | #include <linux/io-mapping.h> | 36 | #include <linux/io-mapping.h> |
37 | #include <linux/i2c.h> | ||
38 | #include <drm/intel-gtt.h> | ||
37 | 39 | ||
38 | /* General customization: | 40 | /* General customization: |
39 | */ | 41 | */ |
@@ -47,17 +49,22 @@ | |||
47 | enum pipe { | 49 | enum pipe { |
48 | PIPE_A = 0, | 50 | PIPE_A = 0, |
49 | PIPE_B, | 51 | PIPE_B, |
52 | PIPE_C, | ||
53 | I915_MAX_PIPES | ||
50 | }; | 54 | }; |
55 | #define pipe_name(p) ((p) + 'A') | ||
51 | 56 | ||
52 | enum plane { | 57 | enum plane { |
53 | PLANE_A = 0, | 58 | PLANE_A = 0, |
54 | PLANE_B, | 59 | PLANE_B, |
60 | PLANE_C, | ||
55 | }; | 61 | }; |
56 | 62 | #define plane_name(p) ((p) + 'A') | |
57 | #define I915_NUM_PIPE 2 | ||
58 | 63 | ||
59 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) | 64 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) |
60 | 65 | ||
66 | #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) | ||
67 | |||
61 | /* Interface history: | 68 | /* Interface history: |
62 | * | 69 | * |
63 | * 1.1: Original. | 70 | * 1.1: Original. |
@@ -73,12 +80,7 @@ enum plane { | |||
73 | #define DRIVER_PATCHLEVEL 0 | 80 | #define DRIVER_PATCHLEVEL 0 |
74 | 81 | ||
75 | #define WATCH_COHERENCY 0 | 82 | #define WATCH_COHERENCY 0 |
76 | #define WATCH_BUF 0 | 83 | #define WATCH_LISTS 0 |
77 | #define WATCH_EXEC 0 | ||
78 | #define WATCH_LRU 0 | ||
79 | #define WATCH_RELOC 0 | ||
80 | #define WATCH_INACTIVE 0 | ||
81 | #define WATCH_PWRITE 0 | ||
82 | 84 | ||
83 | #define I915_GEM_PHYS_CURSOR_0 1 | 85 | #define I915_GEM_PHYS_CURSOR_0 1 |
84 | #define I915_GEM_PHYS_CURSOR_1 2 | 86 | #define I915_GEM_PHYS_CURSOR_1 2 |
@@ -89,7 +91,7 @@ struct drm_i915_gem_phys_object { | |||
89 | int id; | 91 | int id; |
90 | struct page **page_list; | 92 | struct page **page_list; |
91 | drm_dma_handle_t *handle; | 93 | drm_dma_handle_t *handle; |
92 | struct drm_gem_object *cur_obj; | 94 | struct drm_i915_gem_object *cur_obj; |
93 | }; | 95 | }; |
94 | 96 | ||
95 | struct mem_block { | 97 | struct mem_block { |
@@ -110,8 +112,10 @@ struct intel_opregion { | |||
110 | struct opregion_acpi *acpi; | 112 | struct opregion_acpi *acpi; |
111 | struct opregion_swsci *swsci; | 113 | struct opregion_swsci *swsci; |
112 | struct opregion_asle *asle; | 114 | struct opregion_asle *asle; |
113 | int enabled; | 115 | void *vbt; |
116 | u32 __iomem *lid_state; | ||
114 | }; | 117 | }; |
118 | #define OPREGION_SIZE (8*1024) | ||
115 | 119 | ||
116 | struct intel_overlay; | 120 | struct intel_overlay; |
117 | struct intel_overlay_error_state; | 121 | struct intel_overlay_error_state; |
@@ -123,53 +127,72 @@ struct drm_i915_master_private { | |||
123 | #define I915_FENCE_REG_NONE -1 | 127 | #define I915_FENCE_REG_NONE -1 |
124 | 128 | ||
125 | struct drm_i915_fence_reg { | 129 | struct drm_i915_fence_reg { |
126 | struct drm_gem_object *obj; | ||
127 | struct list_head lru_list; | 130 | struct list_head lru_list; |
131 | struct drm_i915_gem_object *obj; | ||
132 | uint32_t setup_seqno; | ||
128 | }; | 133 | }; |
129 | 134 | ||
130 | struct sdvo_device_mapping { | 135 | struct sdvo_device_mapping { |
136 | u8 initialized; | ||
131 | u8 dvo_port; | 137 | u8 dvo_port; |
132 | u8 slave_addr; | 138 | u8 slave_addr; |
133 | u8 dvo_wiring; | 139 | u8 dvo_wiring; |
134 | u8 initialized; | 140 | u8 i2c_pin; |
141 | u8 i2c_speed; | ||
135 | u8 ddc_pin; | 142 | u8 ddc_pin; |
136 | }; | 143 | }; |
137 | 144 | ||
145 | struct intel_display_error_state; | ||
146 | |||
138 | struct drm_i915_error_state { | 147 | struct drm_i915_error_state { |
139 | u32 eir; | 148 | u32 eir; |
140 | u32 pgtbl_er; | 149 | u32 pgtbl_er; |
141 | u32 pipeastat; | 150 | u32 pipestat[I915_MAX_PIPES]; |
142 | u32 pipebstat; | ||
143 | u32 ipeir; | 151 | u32 ipeir; |
144 | u32 ipehr; | 152 | u32 ipehr; |
145 | u32 instdone; | 153 | u32 instdone; |
146 | u32 acthd; | 154 | u32 acthd; |
155 | u32 error; /* gen6+ */ | ||
156 | u32 bcs_acthd; /* gen6+ blt engine */ | ||
157 | u32 bcs_ipehr; | ||
158 | u32 bcs_ipeir; | ||
159 | u32 bcs_instdone; | ||
160 | u32 bcs_seqno; | ||
161 | u32 vcs_acthd; /* gen6+ bsd engine */ | ||
162 | u32 vcs_ipehr; | ||
163 | u32 vcs_ipeir; | ||
164 | u32 vcs_instdone; | ||
165 | u32 vcs_seqno; | ||
147 | u32 instpm; | 166 | u32 instpm; |
148 | u32 instps; | 167 | u32 instps; |
149 | u32 instdone1; | 168 | u32 instdone1; |
150 | u32 seqno; | 169 | u32 seqno; |
151 | u64 bbaddr; | 170 | u64 bbaddr; |
171 | u64 fence[16]; | ||
152 | struct timeval time; | 172 | struct timeval time; |
153 | struct drm_i915_error_object { | 173 | struct drm_i915_error_object { |
154 | int page_count; | 174 | int page_count; |
155 | u32 gtt_offset; | 175 | u32 gtt_offset; |
156 | u32 *pages[0]; | 176 | u32 *pages[0]; |
157 | } *ringbuffer, *batchbuffer[2]; | 177 | } *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS]; |
158 | struct drm_i915_error_buffer { | 178 | struct drm_i915_error_buffer { |
159 | size_t size; | 179 | u32 size; |
160 | u32 name; | 180 | u32 name; |
161 | u32 seqno; | 181 | u32 seqno; |
162 | u32 gtt_offset; | 182 | u32 gtt_offset; |
163 | u32 read_domains; | 183 | u32 read_domains; |
164 | u32 write_domain; | 184 | u32 write_domain; |
165 | u32 fence_reg; | 185 | s32 fence_reg:5; |
166 | s32 pinned:2; | 186 | s32 pinned:2; |
167 | u32 tiling:2; | 187 | u32 tiling:2; |
168 | u32 dirty:1; | 188 | u32 dirty:1; |
169 | u32 purgeable:1; | 189 | u32 purgeable:1; |
170 | } *active_bo; | 190 | u32 ring:4; |
171 | u32 active_bo_count; | 191 | u32 cache_level:2; |
192 | } *active_bo, *pinned_bo; | ||
193 | u32 active_bo_count, pinned_bo_count; | ||
172 | struct intel_overlay_error_state *overlay; | 194 | struct intel_overlay_error_state *overlay; |
195 | struct intel_display_error_state *display; | ||
173 | }; | 196 | }; |
174 | 197 | ||
175 | struct drm_i915_display_funcs { | 198 | struct drm_i915_display_funcs { |
@@ -179,48 +202,58 @@ struct drm_i915_display_funcs { | |||
179 | void (*disable_fbc)(struct drm_device *dev); | 202 | void (*disable_fbc)(struct drm_device *dev); |
180 | int (*get_display_clock_speed)(struct drm_device *dev); | 203 | int (*get_display_clock_speed)(struct drm_device *dev); |
181 | int (*get_fifo_size)(struct drm_device *dev, int plane); | 204 | int (*get_fifo_size)(struct drm_device *dev, int plane); |
182 | void (*update_wm)(struct drm_device *dev, int planea_clock, | 205 | void (*update_wm)(struct drm_device *dev); |
183 | int planeb_clock, int sr_hdisplay, int sr_htotal, | 206 | int (*crtc_mode_set)(struct drm_crtc *crtc, |
184 | int pixel_size); | 207 | struct drm_display_mode *mode, |
208 | struct drm_display_mode *adjusted_mode, | ||
209 | int x, int y, | ||
210 | struct drm_framebuffer *old_fb); | ||
211 | void (*fdi_link_train)(struct drm_crtc *crtc); | ||
212 | void (*init_clock_gating)(struct drm_device *dev); | ||
213 | void (*init_pch_clock_gating)(struct drm_device *dev); | ||
214 | int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, | ||
215 | struct drm_framebuffer *fb, | ||
216 | struct drm_i915_gem_object *obj); | ||
185 | /* clock updates for mode set */ | 217 | /* clock updates for mode set */ |
186 | /* cursor updates */ | 218 | /* cursor updates */ |
187 | /* render clock increase/decrease */ | 219 | /* render clock increase/decrease */ |
188 | /* display clock increase/decrease */ | 220 | /* display clock increase/decrease */ |
189 | /* pll clock increase/decrease */ | 221 | /* pll clock increase/decrease */ |
190 | /* clock gating init */ | ||
191 | }; | 222 | }; |
192 | 223 | ||
193 | struct intel_device_info { | 224 | struct intel_device_info { |
194 | u8 gen; | 225 | u8 gen; |
195 | u8 is_mobile : 1; | 226 | u8 is_mobile : 1; |
196 | u8 is_i8xx : 1; | ||
197 | u8 is_i85x : 1; | 227 | u8 is_i85x : 1; |
198 | u8 is_i915g : 1; | 228 | u8 is_i915g : 1; |
199 | u8 is_i9xx : 1; | ||
200 | u8 is_i945gm : 1; | 229 | u8 is_i945gm : 1; |
201 | u8 is_i965g : 1; | ||
202 | u8 is_i965gm : 1; | ||
203 | u8 is_g33 : 1; | 230 | u8 is_g33 : 1; |
204 | u8 need_gfx_hws : 1; | 231 | u8 need_gfx_hws : 1; |
205 | u8 is_g4x : 1; | 232 | u8 is_g4x : 1; |
206 | u8 is_pineview : 1; | 233 | u8 is_pineview : 1; |
207 | u8 is_broadwater : 1; | 234 | u8 is_broadwater : 1; |
208 | u8 is_crestline : 1; | 235 | u8 is_crestline : 1; |
209 | u8 is_ironlake : 1; | 236 | u8 is_ivybridge : 1; |
210 | u8 has_fbc : 1; | 237 | u8 has_fbc : 1; |
211 | u8 has_rc6 : 1; | ||
212 | u8 has_pipe_cxsr : 1; | 238 | u8 has_pipe_cxsr : 1; |
213 | u8 has_hotplug : 1; | 239 | u8 has_hotplug : 1; |
214 | u8 cursor_needs_physical : 1; | 240 | u8 cursor_needs_physical : 1; |
241 | u8 has_overlay : 1; | ||
242 | u8 overlay_needs_physical : 1; | ||
243 | u8 supports_tv : 1; | ||
244 | u8 has_bsd_ring : 1; | ||
245 | u8 has_blt_ring : 1; | ||
215 | }; | 246 | }; |
216 | 247 | ||
217 | enum no_fbc_reason { | 248 | enum no_fbc_reason { |
249 | FBC_NO_OUTPUT, /* no outputs enabled to compress */ | ||
218 | FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ | 250 | FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ |
219 | FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ | 251 | FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ |
220 | FBC_MODE_TOO_LARGE, /* mode too large for compression */ | 252 | FBC_MODE_TOO_LARGE, /* mode too large for compression */ |
221 | FBC_BAD_PLANE, /* fbc not supported on plane */ | 253 | FBC_BAD_PLANE, /* fbc not supported on plane */ |
222 | FBC_NOT_TILED, /* buffer not tiled */ | 254 | FBC_NOT_TILED, /* buffer not tiled */ |
223 | FBC_MULTIPLE_PIPES, /* more than one pipe active */ | 255 | FBC_MULTIPLE_PIPES, /* more than one pipe active */ |
256 | FBC_MODULE_PARAM, | ||
224 | }; | 257 | }; |
225 | 258 | ||
226 | enum intel_pch { | 259 | enum intel_pch { |
@@ -229,6 +262,7 @@ enum intel_pch { | |||
229 | }; | 262 | }; |
230 | 263 | ||
231 | #define QUIRK_PIPEA_FORCE (1<<0) | 264 | #define QUIRK_PIPEA_FORCE (1<<0) |
265 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) | ||
232 | 266 | ||
233 | struct intel_fbdev; | 267 | struct intel_fbdev; |
234 | 268 | ||
@@ -238,23 +272,25 @@ typedef struct drm_i915_private { | |||
238 | const struct intel_device_info *info; | 272 | const struct intel_device_info *info; |
239 | 273 | ||
240 | int has_gem; | 274 | int has_gem; |
275 | int relative_constants_mode; | ||
241 | 276 | ||
242 | void __iomem *regs; | 277 | void __iomem *regs; |
243 | 278 | ||
279 | struct intel_gmbus { | ||
280 | struct i2c_adapter adapter; | ||
281 | struct i2c_adapter *force_bit; | ||
282 | u32 reg0; | ||
283 | } *gmbus; | ||
284 | |||
244 | struct pci_dev *bridge_dev; | 285 | struct pci_dev *bridge_dev; |
245 | struct intel_ring_buffer render_ring; | 286 | struct intel_ring_buffer ring[I915_NUM_RINGS]; |
246 | struct intel_ring_buffer bsd_ring; | ||
247 | uint32_t next_seqno; | 287 | uint32_t next_seqno; |
248 | 288 | ||
249 | drm_dma_handle_t *status_page_dmah; | 289 | drm_dma_handle_t *status_page_dmah; |
250 | void *seqno_page; | ||
251 | dma_addr_t dma_status_page; | ||
252 | uint32_t counter; | 290 | uint32_t counter; |
253 | unsigned int seqno_gfx_addr; | ||
254 | drm_local_map_t hws_map; | 291 | drm_local_map_t hws_map; |
255 | struct drm_gem_object *seqno_obj; | 292 | struct drm_i915_gem_object *pwrctx; |
256 | struct drm_gem_object *pwrctx; | 293 | struct drm_i915_gem_object *renderctx; |
257 | struct drm_gem_object *renderctx; | ||
258 | 294 | ||
259 | struct resource mch_res; | 295 | struct resource mch_res; |
260 | 296 | ||
@@ -264,21 +300,15 @@ typedef struct drm_i915_private { | |||
264 | int current_page; | 300 | int current_page; |
265 | int page_flipping; | 301 | int page_flipping; |
266 | 302 | ||
267 | wait_queue_head_t irq_queue; | ||
268 | atomic_t irq_received; | 303 | atomic_t irq_received; |
269 | /** Protects user_irq_refcount and irq_mask_reg */ | 304 | |
270 | spinlock_t user_irq_lock; | 305 | /* protects the irq masks */ |
271 | u32 trace_irq_seqno; | 306 | spinlock_t irq_lock; |
272 | /** Cached value of IMR to avoid reads in updating the bitfield */ | 307 | /** Cached value of IMR to avoid reads in updating the bitfield */ |
273 | u32 irq_mask_reg; | ||
274 | u32 pipestat[2]; | 308 | u32 pipestat[2]; |
275 | /** splitted irq regs for graphics and display engine on Ironlake, | 309 | u32 irq_mask; |
276 | irq_mask_reg is still used for display irq. */ | 310 | u32 gt_irq_mask; |
277 | u32 gt_irq_mask_reg; | 311 | u32 pch_irq_mask; |
278 | u32 gt_irq_enable_reg; | ||
279 | u32 de_irq_enable_reg; | ||
280 | u32 pch_irq_mask_reg; | ||
281 | u32 pch_irq_enable_reg; | ||
282 | 312 | ||
283 | u32 hotplug_supported_mask; | 313 | u32 hotplug_supported_mask; |
284 | struct work_struct hotplug_work; | 314 | struct work_struct hotplug_work; |
@@ -289,26 +319,21 @@ typedef struct drm_i915_private { | |||
289 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; | 319 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; |
290 | int vblank_pipe; | 320 | int vblank_pipe; |
291 | int num_pipe; | 321 | int num_pipe; |
292 | u32 flush_rings; | ||
293 | #define FLUSH_RENDER_RING 0x1 | ||
294 | #define FLUSH_BSD_RING 0x2 | ||
295 | 322 | ||
296 | /* For hangcheck timer */ | 323 | /* For hangcheck timer */ |
297 | #define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ | 324 | #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ |
298 | struct timer_list hangcheck_timer; | 325 | struct timer_list hangcheck_timer; |
299 | int hangcheck_count; | 326 | int hangcheck_count; |
300 | uint32_t last_acthd; | 327 | uint32_t last_acthd; |
301 | uint32_t last_instdone; | 328 | uint32_t last_instdone; |
302 | uint32_t last_instdone1; | 329 | uint32_t last_instdone1; |
303 | 330 | ||
304 | struct drm_mm vram; | ||
305 | |||
306 | unsigned long cfb_size; | 331 | unsigned long cfb_size; |
307 | unsigned long cfb_pitch; | 332 | unsigned long cfb_pitch; |
333 | unsigned long cfb_offset; | ||
308 | int cfb_fence; | 334 | int cfb_fence; |
309 | int cfb_plane; | 335 | int cfb_plane; |
310 | 336 | int cfb_y; | |
311 | int irq_enabled; | ||
312 | 337 | ||
313 | struct intel_opregion opregion; | 338 | struct intel_opregion opregion; |
314 | 339 | ||
@@ -316,8 +341,8 @@ typedef struct drm_i915_private { | |||
316 | struct intel_overlay *overlay; | 341 | struct intel_overlay *overlay; |
317 | 342 | ||
318 | /* LVDS info */ | 343 | /* LVDS info */ |
319 | int backlight_duty_cycle; /* restore backlight to this value */ | 344 | int backlight_level; /* restore backlight to this value */ |
320 | bool panel_wants_dither; | 345 | bool backlight_enabled; |
321 | struct drm_display_mode *panel_fixed_mode; | 346 | struct drm_display_mode *panel_fixed_mode; |
322 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ | 347 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ |
323 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ | 348 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ |
@@ -328,13 +353,23 @@ typedef struct drm_i915_private { | |||
328 | unsigned int lvds_vbt:1; | 353 | unsigned int lvds_vbt:1; |
329 | unsigned int int_crt_support:1; | 354 | unsigned int int_crt_support:1; |
330 | unsigned int lvds_use_ssc:1; | 355 | unsigned int lvds_use_ssc:1; |
331 | unsigned int edp_support:1; | ||
332 | int lvds_ssc_freq; | 356 | int lvds_ssc_freq; |
333 | int edp_bpp; | 357 | struct { |
358 | int rate; | ||
359 | int lanes; | ||
360 | int preemphasis; | ||
361 | int vswing; | ||
362 | |||
363 | bool initialized; | ||
364 | bool support; | ||
365 | int bpp; | ||
366 | struct edp_power_seq pps; | ||
367 | } edp; | ||
368 | bool no_aux_handshake; | ||
334 | 369 | ||
335 | struct notifier_block lid_notifier; | 370 | struct notifier_block lid_notifier; |
336 | 371 | ||
337 | int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */ | 372 | int crt_ddc_pin; |
338 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ | 373 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ |
339 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ | 374 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
340 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ | 375 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
@@ -344,6 +379,7 @@ typedef struct drm_i915_private { | |||
344 | spinlock_t error_lock; | 379 | spinlock_t error_lock; |
345 | struct drm_i915_error_state *first_error; | 380 | struct drm_i915_error_state *first_error; |
346 | struct work_struct error_work; | 381 | struct work_struct error_work; |
382 | struct completion error_completion; | ||
347 | struct workqueue_struct *wq; | 383 | struct workqueue_struct *wq; |
348 | 384 | ||
349 | /* Display functions */ | 385 | /* Display functions */ |
@@ -507,21 +543,36 @@ typedef struct drm_i915_private { | |||
507 | u32 saveMCHBAR_RENDER_STANDBY; | 543 | u32 saveMCHBAR_RENDER_STANDBY; |
508 | 544 | ||
509 | struct { | 545 | struct { |
546 | /** Bridge to intel-gtt-ko */ | ||
547 | const struct intel_gtt *gtt; | ||
548 | /** Memory allocator for GTT stolen memory */ | ||
549 | struct drm_mm stolen; | ||
550 | /** Memory allocator for GTT */ | ||
510 | struct drm_mm gtt_space; | 551 | struct drm_mm gtt_space; |
552 | /** List of all objects in gtt_space. Used to restore gtt | ||
553 | * mappings on resume */ | ||
554 | struct list_head gtt_list; | ||
555 | |||
556 | /** Usable portion of the GTT for GEM */ | ||
557 | unsigned long gtt_start; | ||
558 | unsigned long gtt_mappable_end; | ||
559 | unsigned long gtt_end; | ||
511 | 560 | ||
512 | struct io_mapping *gtt_mapping; | 561 | struct io_mapping *gtt_mapping; |
513 | int gtt_mtrr; | 562 | int gtt_mtrr; |
514 | 563 | ||
564 | struct shrinker inactive_shrinker; | ||
565 | |||
515 | /** | 566 | /** |
516 | * Membership on list of all loaded devices, used to evict | 567 | * List of objects currently involved in rendering. |
517 | * inactive buffers under memory pressure. | ||
518 | * | 568 | * |
519 | * Modifications should only be done whilst holding the | 569 | * Includes buffers having the contents of their GPU caches |
520 | * shrink_list_lock spinlock. | 570 | * flushed, not necessarily primitives. last_rendering_seqno |
571 | * represents when the rendering involved will be completed. | ||
572 | * | ||
573 | * A reference is held on the buffer while on this list. | ||
521 | */ | 574 | */ |
522 | struct list_head shrink_list; | 575 | struct list_head active_list; |
523 | |||
524 | spinlock_t active_list_lock; | ||
525 | 576 | ||
526 | /** | 577 | /** |
527 | * List of objects which are not in the ringbuffer but which | 578 | * List of objects which are not in the ringbuffer but which |
@@ -535,15 +586,6 @@ typedef struct drm_i915_private { | |||
535 | struct list_head flushing_list; | 586 | struct list_head flushing_list; |
536 | 587 | ||
537 | /** | 588 | /** |
538 | * List of objects currently pending a GPU write flush. | ||
539 | * | ||
540 | * All elements on this list will belong to either the | ||
541 | * active_list or flushing_list, last_rendering_seqno can | ||
542 | * be used to differentiate between the two elements. | ||
543 | */ | ||
544 | struct list_head gpu_write_list; | ||
545 | |||
546 | /** | ||
547 | * LRU list of objects which are not in the ringbuffer and | 589 | * LRU list of objects which are not in the ringbuffer and |
548 | * are ready to unbind, but are still in the GTT. | 590 | * are ready to unbind, but are still in the GTT. |
549 | * | 591 | * |
@@ -555,6 +597,12 @@ typedef struct drm_i915_private { | |||
555 | */ | 597 | */ |
556 | struct list_head inactive_list; | 598 | struct list_head inactive_list; |
557 | 599 | ||
600 | /** | ||
601 | * LRU list of objects which are not in the ringbuffer but | ||
602 | * are still pinned in the GTT. | ||
603 | */ | ||
604 | struct list_head pinned_list; | ||
605 | |||
558 | /** LRU list of objects with fence regs on them. */ | 606 | /** LRU list of objects with fence regs on them. */ |
559 | struct list_head fence_list; | 607 | struct list_head fence_list; |
560 | 608 | ||
@@ -576,14 +624,10 @@ typedef struct drm_i915_private { | |||
576 | struct delayed_work retire_work; | 624 | struct delayed_work retire_work; |
577 | 625 | ||
578 | /** | 626 | /** |
579 | * Waiting sequence number, if any | 627 | * Are we in a non-interruptible section of code like |
580 | */ | 628 | * modesetting? |
581 | uint32_t waiting_gem_seqno; | ||
582 | |||
583 | /** | ||
584 | * Last seq seen at irq time | ||
585 | */ | 629 | */ |
586 | uint32_t irq_gem_seqno; | 630 | bool interruptible; |
587 | 631 | ||
588 | /** | 632 | /** |
589 | * Flag if the X Server, and thus DRM, is not currently in | 633 | * Flag if the X Server, and thus DRM, is not currently in |
@@ -599,7 +643,7 @@ typedef struct drm_i915_private { | |||
599 | * Flag if the hardware appears to be wedged. | 643 | * Flag if the hardware appears to be wedged. |
600 | * | 644 | * |
601 | * This is set when attempts to idle the device timeout. | 645 | * This is set when attempts to idle the device timeout. |
602 | * It prevents command submission from occuring and makes | 646 | * It prevents command submission from occurring and makes |
603 | * every pending request fail | 647 | * every pending request fail |
604 | */ | 648 | */ |
605 | atomic_t wedged; | 649 | atomic_t wedged; |
@@ -611,12 +655,19 @@ typedef struct drm_i915_private { | |||
611 | 655 | ||
612 | /* storage for physical objects */ | 656 | /* storage for physical objects */ |
613 | struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; | 657 | struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; |
658 | |||
659 | /* accounting, useful for userland debugging */ | ||
660 | size_t gtt_total; | ||
661 | size_t mappable_gtt_total; | ||
662 | size_t object_memory; | ||
663 | u32 object_count; | ||
614 | } mm; | 664 | } mm; |
615 | struct sdvo_device_mapping sdvo_mappings[2]; | 665 | struct sdvo_device_mapping sdvo_mappings[2]; |
616 | /* indicate whether the LVDS_BORDER should be enabled or not */ | 666 | /* indicate whether the LVDS_BORDER should be enabled or not */ |
617 | unsigned int lvds_border_bits; | 667 | unsigned int lvds_border_bits; |
618 | /* Panel fitter placement and size for Ironlake+ */ | 668 | /* Panel fitter placement and size for Ironlake+ */ |
619 | u32 pch_pf_pos, pch_pf_size; | 669 | u32 pch_pf_pos, pch_pf_size; |
670 | int panel_t3, panel_t12; | ||
620 | 671 | ||
621 | struct drm_crtc *plane_to_crtc_mapping[2]; | 672 | struct drm_crtc *plane_to_crtc_mapping[2]; |
622 | struct drm_crtc *pipe_to_crtc_mapping[2]; | 673 | struct drm_crtc *pipe_to_crtc_mapping[2]; |
@@ -626,8 +677,6 @@ typedef struct drm_i915_private { | |||
626 | /* Reclocking support */ | 677 | /* Reclocking support */ |
627 | bool render_reclock_avail; | 678 | bool render_reclock_avail; |
628 | bool lvds_downclock_avail; | 679 | bool lvds_downclock_avail; |
629 | /* indicate whether the LVDS EDID is OK */ | ||
630 | bool lvds_edid_good; | ||
631 | /* indicates the reduced downclock for LVDS*/ | 680 | /* indicates the reduced downclock for LVDS*/ |
632 | int lvds_downclock; | 681 | int lvds_downclock; |
633 | struct work_struct idle_work; | 682 | struct work_struct idle_work; |
@@ -640,20 +689,24 @@ typedef struct drm_i915_private { | |||
640 | 689 | ||
641 | bool mchbar_need_disable; | 690 | bool mchbar_need_disable; |
642 | 691 | ||
692 | struct work_struct rps_work; | ||
693 | spinlock_t rps_lock; | ||
694 | u32 pm_iir; | ||
695 | |||
643 | u8 cur_delay; | 696 | u8 cur_delay; |
644 | u8 min_delay; | 697 | u8 min_delay; |
645 | u8 max_delay; | 698 | u8 max_delay; |
646 | u8 fmax; | 699 | u8 fmax; |
647 | u8 fstart; | 700 | u8 fstart; |
648 | 701 | ||
649 | u64 last_count1; | 702 | u64 last_count1; |
650 | unsigned long last_time1; | 703 | unsigned long last_time1; |
651 | u64 last_count2; | 704 | u64 last_count2; |
652 | struct timespec last_time2; | 705 | struct timespec last_time2; |
653 | unsigned long gfx_power; | 706 | unsigned long gfx_power; |
654 | int c_m; | 707 | int c_m; |
655 | int r_t; | 708 | int r_t; |
656 | u8 corr; | 709 | u8 corr; |
657 | spinlock_t *mchdev_lock; | 710 | spinlock_t *mchdev_lock; |
658 | 711 | ||
659 | enum no_fbc_reason no_fbc_reason; | 712 | enum no_fbc_reason no_fbc_reason; |
@@ -661,23 +714,37 @@ typedef struct drm_i915_private { | |||
661 | struct drm_mm_node *compressed_fb; | 714 | struct drm_mm_node *compressed_fb; |
662 | struct drm_mm_node *compressed_llb; | 715 | struct drm_mm_node *compressed_llb; |
663 | 716 | ||
717 | unsigned long last_gpu_reset; | ||
718 | |||
664 | /* list of fbdev register on this device */ | 719 | /* list of fbdev register on this device */ |
665 | struct intel_fbdev *fbdev; | 720 | struct intel_fbdev *fbdev; |
721 | |||
722 | struct drm_property *broadcast_rgb_property; | ||
723 | struct drm_property *force_audio_property; | ||
724 | |||
725 | atomic_t forcewake_count; | ||
666 | } drm_i915_private_t; | 726 | } drm_i915_private_t; |
667 | 727 | ||
668 | /** driver private structure attached to each drm_gem_object */ | 728 | enum i915_cache_level { |
729 | I915_CACHE_NONE, | ||
730 | I915_CACHE_LLC, | ||
731 | I915_CACHE_LLC_MLC, /* gen6+ */ | ||
732 | }; | ||
733 | |||
669 | struct drm_i915_gem_object { | 734 | struct drm_i915_gem_object { |
670 | struct drm_gem_object base; | 735 | struct drm_gem_object base; |
671 | 736 | ||
672 | /** Current space allocated to this object in the GTT, if any. */ | 737 | /** Current space allocated to this object in the GTT, if any. */ |
673 | struct drm_mm_node *gtt_space; | 738 | struct drm_mm_node *gtt_space; |
739 | struct list_head gtt_list; | ||
674 | 740 | ||
675 | /** This object's place on the active/flushing/inactive lists */ | 741 | /** This object's place on the active/flushing/inactive lists */ |
676 | struct list_head list; | 742 | struct list_head ring_list; |
743 | struct list_head mm_list; | ||
677 | /** This object's place on GPU write list */ | 744 | /** This object's place on GPU write list */ |
678 | struct list_head gpu_write_list; | 745 | struct list_head gpu_write_list; |
679 | /** This object's place on eviction list */ | 746 | /** This object's place in the batchbuffer or on the eviction list */ |
680 | struct list_head evict_list; | 747 | struct list_head exec_list; |
681 | 748 | ||
682 | /** | 749 | /** |
683 | * This is set if the object is on the active or flushing lists | 750 | * This is set if the object is on the active or flushing lists |
@@ -693,6 +760,12 @@ struct drm_i915_gem_object { | |||
693 | unsigned int dirty : 1; | 760 | unsigned int dirty : 1; |
694 | 761 | ||
695 | /** | 762 | /** |
763 | * This is set if the object has been written to since the last | ||
764 | * GPU flush. | ||
765 | */ | ||
766 | unsigned int pending_gpu_write : 1; | ||
767 | |||
768 | /** | ||
696 | * Fence register bits (if any) for this object. Will be set | 769 | * Fence register bits (if any) for this object. Will be set |
697 | * as needed when mapped into the GTT. | 770 | * as needed when mapped into the GTT. |
698 | * Protected by dev->struct_mutex. | 771 | * Protected by dev->struct_mutex. |
@@ -702,29 +775,15 @@ struct drm_i915_gem_object { | |||
702 | signed int fence_reg : 5; | 775 | signed int fence_reg : 5; |
703 | 776 | ||
704 | /** | 777 | /** |
705 | * Used for checking the object doesn't appear more than once | ||
706 | * in an execbuffer object list. | ||
707 | */ | ||
708 | unsigned int in_execbuffer : 1; | ||
709 | |||
710 | /** | ||
711 | * Advice: are the backing pages purgeable? | 778 | * Advice: are the backing pages purgeable? |
712 | */ | 779 | */ |
713 | unsigned int madv : 2; | 780 | unsigned int madv : 2; |
714 | 781 | ||
715 | /** | 782 | /** |
716 | * Refcount for the pages array. With the current locking scheme, there | ||
717 | * are at most two concurrent users: Binding a bo to the gtt and | ||
718 | * pwrite/pread using physical addresses. So two bits for a maximum | ||
719 | * of two users are enough. | ||
720 | */ | ||
721 | unsigned int pages_refcount : 2; | ||
722 | #define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3 | ||
723 | |||
724 | /** | ||
725 | * Current tiling mode for the object. | 783 | * Current tiling mode for the object. |
726 | */ | 784 | */ |
727 | unsigned int tiling_mode : 2; | 785 | unsigned int tiling_mode : 2; |
786 | unsigned int tiling_changed : 1; | ||
728 | 787 | ||
729 | /** How many users have pinned this object in GTT space. The following | 788 | /** How many users have pinned this object in GTT space. The following |
730 | * users can each hold at most one reference: pwrite/pread, pin_ioctl | 789 | * users can each hold at most one reference: pwrite/pread, pin_ioctl |
@@ -738,28 +797,57 @@ struct drm_i915_gem_object { | |||
738 | unsigned int pin_count : 4; | 797 | unsigned int pin_count : 4; |
739 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf | 798 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf |
740 | 799 | ||
741 | /** AGP memory structure for our GTT binding. */ | 800 | /** |
742 | DRM_AGP_MEM *agp_mem; | 801 | * Is the object at the current location in the gtt mappable and |
802 | * fenceable? Used to avoid costly recalculations. | ||
803 | */ | ||
804 | unsigned int map_and_fenceable : 1; | ||
805 | |||
806 | /** | ||
807 | * Whether the current gtt mapping needs to be mappable (and isn't just | ||
808 | * mappable by accident). Track pin and fault separate for a more | ||
809 | * accurate mappable working set. | ||
810 | */ | ||
811 | unsigned int fault_mappable : 1; | ||
812 | unsigned int pin_mappable : 1; | ||
813 | |||
814 | /* | ||
815 | * Is the GPU currently using a fence to access this buffer, | ||
816 | */ | ||
817 | unsigned int pending_fenced_gpu_access:1; | ||
818 | unsigned int fenced_gpu_access:1; | ||
819 | |||
820 | unsigned int cache_level:2; | ||
743 | 821 | ||
744 | struct page **pages; | 822 | struct page **pages; |
745 | 823 | ||
746 | /** | 824 | /** |
747 | * Current offset of the object in GTT space. | 825 | * DMAR support |
748 | * | ||
749 | * This is the same as gtt_space->start | ||
750 | */ | 826 | */ |
751 | uint32_t gtt_offset; | 827 | struct scatterlist *sg_list; |
828 | int num_sg; | ||
752 | 829 | ||
753 | /* Which ring is refering to is this object */ | 830 | /** |
754 | struct intel_ring_buffer *ring; | 831 | * Used for performing relocations during execbuffer insertion. |
832 | */ | ||
833 | struct hlist_node exec_node; | ||
834 | unsigned long exec_handle; | ||
835 | struct drm_i915_gem_exec_object2 *exec_entry; | ||
755 | 836 | ||
756 | /** | 837 | /** |
757 | * Fake offset for use by mmap(2) | 838 | * Current offset of the object in GTT space. |
839 | * | ||
840 | * This is the same as gtt_space->start | ||
758 | */ | 841 | */ |
759 | uint64_t mmap_offset; | 842 | uint32_t gtt_offset; |
760 | 843 | ||
761 | /** Breadcrumb of last rendering to the buffer. */ | 844 | /** Breadcrumb of last rendering to the buffer. */ |
762 | uint32_t last_rendering_seqno; | 845 | uint32_t last_rendering_seqno; |
846 | struct intel_ring_buffer *ring; | ||
847 | |||
848 | /** Breadcrumb of last fenced GPU access to the buffer. */ | ||
849 | uint32_t last_fenced_seqno; | ||
850 | struct intel_ring_buffer *last_fenced_ring; | ||
763 | 851 | ||
764 | /** Current tiling stride for the object, if it's tiled. */ | 852 | /** Current tiling stride for the object, if it's tiled. */ |
765 | uint32_t stride; | 853 | uint32_t stride; |
@@ -767,8 +855,6 @@ struct drm_i915_gem_object { | |||
767 | /** Record of address bit 17 of each page at last unbind. */ | 855 | /** Record of address bit 17 of each page at last unbind. */ |
768 | unsigned long *bit_17; | 856 | unsigned long *bit_17; |
769 | 857 | ||
770 | /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ | ||
771 | uint32_t agp_type; | ||
772 | 858 | ||
773 | /** | 859 | /** |
774 | * If present, while GEM_DOMAIN_CPU is in the read domain this array | 860 | * If present, while GEM_DOMAIN_CPU is in the read domain this array |
@@ -816,33 +902,102 @@ struct drm_i915_gem_request { | |||
816 | /** global list entry for this request */ | 902 | /** global list entry for this request */ |
817 | struct list_head list; | 903 | struct list_head list; |
818 | 904 | ||
905 | struct drm_i915_file_private *file_priv; | ||
819 | /** file_priv list entry for this request */ | 906 | /** file_priv list entry for this request */ |
820 | struct list_head client_list; | 907 | struct list_head client_list; |
821 | }; | 908 | }; |
822 | 909 | ||
823 | struct drm_i915_file_private { | 910 | struct drm_i915_file_private { |
824 | struct { | 911 | struct { |
912 | struct spinlock lock; | ||
825 | struct list_head request_list; | 913 | struct list_head request_list; |
826 | } mm; | 914 | } mm; |
827 | }; | 915 | }; |
828 | 916 | ||
829 | enum intel_chip_family { | 917 | #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) |
830 | CHIP_I8XX = 0x01, | 918 | |
831 | CHIP_I9XX = 0x02, | 919 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) |
832 | CHIP_I915 = 0x04, | 920 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) |
833 | CHIP_I965 = 0x08, | 921 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) |
834 | }; | 922 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) |
923 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) | ||
924 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) | ||
925 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) | ||
926 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) | ||
927 | #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) | ||
928 | #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) | ||
929 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) | ||
930 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) | ||
931 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) | ||
932 | #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) | ||
933 | #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) | ||
934 | #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) | ||
935 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) | ||
936 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) | ||
937 | #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) | ||
938 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) | ||
939 | |||
940 | /* | ||
941 | * The genX designation typically refers to the render engine, so render | ||
942 | * capability related checks should use IS_GEN, while display and other checks | ||
943 | * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular | ||
944 | * chips, etc.). | ||
945 | */ | ||
946 | #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) | ||
947 | #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) | ||
948 | #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) | ||
949 | #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) | ||
950 | #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) | ||
951 | #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) | ||
952 | |||
953 | #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) | ||
954 | #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) | ||
955 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) | ||
956 | |||
957 | #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) | ||
958 | #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) | ||
959 | |||
960 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | ||
961 | * rows, which changed the alignment requirements and fence programming. | ||
962 | */ | ||
963 | #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ | ||
964 | IS_I915GM(dev))) | ||
965 | #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) | ||
966 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) | ||
967 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) | ||
968 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) | ||
969 | #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) | ||
970 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) | ||
971 | /* dsparb controlled by hw only */ | ||
972 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) | ||
973 | |||
974 | #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) | ||
975 | #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) | ||
976 | #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) | ||
977 | |||
978 | #define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) | ||
979 | #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) | ||
980 | |||
981 | #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) | ||
982 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) | ||
983 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) | ||
984 | |||
985 | #include "i915_trace.h" | ||
835 | 986 | ||
836 | extern struct drm_ioctl_desc i915_ioctls[]; | 987 | extern struct drm_ioctl_desc i915_ioctls[]; |
837 | extern int i915_max_ioctl; | 988 | extern int i915_max_ioctl; |
838 | extern unsigned int i915_fbpercrtc; | 989 | extern unsigned int i915_fbpercrtc; |
990 | extern int i915_panel_ignore_lid; | ||
839 | extern unsigned int i915_powersave; | 991 | extern unsigned int i915_powersave; |
992 | extern unsigned int i915_semaphores; | ||
840 | extern unsigned int i915_lvds_downclock; | 993 | extern unsigned int i915_lvds_downclock; |
994 | extern unsigned int i915_panel_use_ssc; | ||
995 | extern int i915_vbt_sdvo_panel_type; | ||
996 | extern unsigned int i915_enable_rc6; | ||
997 | extern unsigned int i915_enable_fbc; | ||
841 | 998 | ||
842 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); | 999 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); |
843 | extern int i915_resume(struct drm_device *dev); | 1000 | extern int i915_resume(struct drm_device *dev); |
844 | extern void i915_save_display(struct drm_device *dev); | ||
845 | extern void i915_restore_display(struct drm_device *dev); | ||
846 | extern int i915_master_create(struct drm_device *dev, struct drm_master *master); | 1001 | extern int i915_master_create(struct drm_device *dev, struct drm_master *master); |
847 | extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); | 1002 | extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); |
848 | 1003 | ||
@@ -860,9 +1015,9 @@ extern int i915_driver_device_is_agp(struct drm_device * dev); | |||
860 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, | 1015 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, |
861 | unsigned long arg); | 1016 | unsigned long arg); |
862 | extern int i915_emit_box(struct drm_device *dev, | 1017 | extern int i915_emit_box(struct drm_device *dev, |
863 | struct drm_clip_rect *boxes, | 1018 | struct drm_clip_rect *box, |
864 | int i, int DR1, int DR4); | 1019 | int DR1, int DR4); |
865 | extern int i965_reset(struct drm_device *dev, u8 flags); | 1020 | extern int i915_reset(struct drm_device *dev, u8 flags); |
866 | extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); | 1021 | extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); |
867 | extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); | 1022 | extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); |
868 | extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); | 1023 | extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); |
@@ -871,34 +1026,20 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); | |||
871 | 1026 | ||
872 | /* i915_irq.c */ | 1027 | /* i915_irq.c */ |
873 | void i915_hangcheck_elapsed(unsigned long data); | 1028 | void i915_hangcheck_elapsed(unsigned long data); |
874 | void i915_destroy_error_state(struct drm_device *dev); | 1029 | void i915_handle_error(struct drm_device *dev, bool wedged); |
875 | extern int i915_irq_emit(struct drm_device *dev, void *data, | 1030 | extern int i915_irq_emit(struct drm_device *dev, void *data, |
876 | struct drm_file *file_priv); | 1031 | struct drm_file *file_priv); |
877 | extern int i915_irq_wait(struct drm_device *dev, void *data, | 1032 | extern int i915_irq_wait(struct drm_device *dev, void *data, |
878 | struct drm_file *file_priv); | 1033 | struct drm_file *file_priv); |
879 | void i915_trace_irq_get(struct drm_device *dev, u32 seqno); | ||
880 | extern void i915_enable_interrupt (struct drm_device *dev); | ||
881 | 1034 | ||
882 | extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); | 1035 | extern void intel_irq_init(struct drm_device *dev); |
883 | extern void i915_driver_irq_preinstall(struct drm_device * dev); | 1036 | |
884 | extern int i915_driver_irq_postinstall(struct drm_device *dev); | ||
885 | extern void i915_driver_irq_uninstall(struct drm_device * dev); | ||
886 | extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, | 1037 | extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, |
887 | struct drm_file *file_priv); | 1038 | struct drm_file *file_priv); |
888 | extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, | 1039 | extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, |
889 | struct drm_file *file_priv); | 1040 | struct drm_file *file_priv); |
890 | extern int i915_enable_vblank(struct drm_device *dev, int crtc); | ||
891 | extern void i915_disable_vblank(struct drm_device *dev, int crtc); | ||
892 | extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); | ||
893 | extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); | ||
894 | extern int i915_vblank_swap(struct drm_device *dev, void *data, | 1041 | extern int i915_vblank_swap(struct drm_device *dev, void *data, |
895 | struct drm_file *file_priv); | 1042 | struct drm_file *file_priv); |
896 | extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); | ||
897 | extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask); | ||
898 | extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, | ||
899 | u32 mask); | ||
900 | extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, | ||
901 | u32 mask); | ||
902 | 1043 | ||
903 | void | 1044 | void |
904 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); | 1045 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); |
@@ -908,6 +1049,12 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); | |||
908 | 1049 | ||
909 | void intel_enable_asle (struct drm_device *dev); | 1050 | void intel_enable_asle (struct drm_device *dev); |
910 | 1051 | ||
1052 | #ifdef CONFIG_DEBUG_FS | ||
1053 | extern void i915_destroy_error_state(struct drm_device *dev); | ||
1054 | #else | ||
1055 | #define i915_destroy_error_state(x) | ||
1056 | #endif | ||
1057 | |||
911 | 1058 | ||
912 | /* i915_mem.c */ | 1059 | /* i915_mem.c */ |
913 | extern int i915_mem_alloc(struct drm_device *dev, void *data, | 1060 | extern int i915_mem_alloc(struct drm_device *dev, void *data, |
@@ -964,83 +1111,124 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
964 | struct drm_file *file_priv); | 1111 | struct drm_file *file_priv); |
965 | void i915_gem_load(struct drm_device *dev); | 1112 | void i915_gem_load(struct drm_device *dev); |
966 | int i915_gem_init_object(struct drm_gem_object *obj); | 1113 | int i915_gem_init_object(struct drm_gem_object *obj); |
967 | struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, | 1114 | int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring, |
968 | size_t size); | 1115 | uint32_t invalidate_domains, |
1116 | uint32_t flush_domains); | ||
1117 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | ||
1118 | size_t size); | ||
969 | void i915_gem_free_object(struct drm_gem_object *obj); | 1119 | void i915_gem_free_object(struct drm_gem_object *obj); |
970 | int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); | 1120 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, |
971 | void i915_gem_object_unpin(struct drm_gem_object *obj); | 1121 | uint32_t alignment, |
972 | int i915_gem_object_unbind(struct drm_gem_object *obj); | 1122 | bool map_and_fenceable); |
973 | void i915_gem_release_mmap(struct drm_gem_object *obj); | 1123 | void i915_gem_object_unpin(struct drm_i915_gem_object *obj); |
1124 | int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); | ||
1125 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); | ||
974 | void i915_gem_lastclose(struct drm_device *dev); | 1126 | void i915_gem_lastclose(struct drm_device *dev); |
975 | uint32_t i915_get_gem_seqno(struct drm_device *dev, | 1127 | |
976 | struct intel_ring_buffer *ring); | 1128 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); |
977 | bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); | 1129 | int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); |
978 | int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); | 1130 | void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
979 | int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); | 1131 | struct intel_ring_buffer *ring, |
1132 | u32 seqno); | ||
1133 | |||
1134 | int i915_gem_dumb_create(struct drm_file *file_priv, | ||
1135 | struct drm_device *dev, | ||
1136 | struct drm_mode_create_dumb *args); | ||
1137 | int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, | ||
1138 | uint32_t handle, uint64_t *offset); | ||
1139 | int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, | ||
1140 | uint32_t handle); | ||
1141 | /** | ||
1142 | * Returns true if seq1 is later than seq2. | ||
1143 | */ | ||
1144 | static inline bool | ||
1145 | i915_seqno_passed(uint32_t seq1, uint32_t seq2) | ||
1146 | { | ||
1147 | return (int32_t)(seq1 - seq2) >= 0; | ||
1148 | } | ||
1149 | |||
1150 | static inline u32 | ||
1151 | i915_gem_next_request_seqno(struct intel_ring_buffer *ring) | ||
1152 | { | ||
1153 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | ||
1154 | return ring->outstanding_lazy_request = dev_priv->next_seqno; | ||
1155 | } | ||
1156 | |||
1157 | int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | ||
1158 | struct intel_ring_buffer *pipelined); | ||
1159 | int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); | ||
1160 | |||
980 | void i915_gem_retire_requests(struct drm_device *dev); | 1161 | void i915_gem_retire_requests(struct drm_device *dev); |
981 | void i915_gem_retire_work_handler(struct work_struct *work); | 1162 | void i915_gem_reset(struct drm_device *dev); |
982 | void i915_gem_clflush_object(struct drm_gem_object *obj); | 1163 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj); |
983 | int i915_gem_object_set_domain(struct drm_gem_object *obj, | 1164 | int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, |
984 | uint32_t read_domains, | 1165 | uint32_t read_domains, |
985 | uint32_t write_domain); | 1166 | uint32_t write_domain); |
986 | int i915_gem_init_ringbuffer(struct drm_device *dev); | 1167 | int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj); |
1168 | int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); | ||
987 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | 1169 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
988 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | 1170 | void i915_gem_do_init(struct drm_device *dev, |
989 | unsigned long end); | 1171 | unsigned long start, |
990 | int i915_gpu_idle(struct drm_device *dev); | 1172 | unsigned long mappable_end, |
991 | int i915_gem_idle(struct drm_device *dev); | 1173 | unsigned long end); |
992 | uint32_t i915_add_request(struct drm_device *dev, | 1174 | int __must_check i915_gpu_idle(struct drm_device *dev); |
993 | struct drm_file *file_priv, | 1175 | int __must_check i915_gem_idle(struct drm_device *dev); |
994 | uint32_t flush_domains, | 1176 | int __must_check i915_add_request(struct intel_ring_buffer *ring, |
995 | struct intel_ring_buffer *ring); | 1177 | struct drm_file *file, |
996 | int i915_do_wait_request(struct drm_device *dev, | 1178 | struct drm_i915_gem_request *request); |
997 | uint32_t seqno, int interruptible, | 1179 | int __must_check i915_wait_request(struct intel_ring_buffer *ring, |
998 | struct intel_ring_buffer *ring); | 1180 | uint32_t seqno); |
999 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 1181 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
1000 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, | 1182 | int __must_check |
1001 | int write); | 1183 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, |
1002 | int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj); | 1184 | bool write); |
1185 | int __must_check | ||
1186 | i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, | ||
1187 | struct intel_ring_buffer *pipelined); | ||
1003 | int i915_gem_attach_phys_object(struct drm_device *dev, | 1188 | int i915_gem_attach_phys_object(struct drm_device *dev, |
1004 | struct drm_gem_object *obj, | 1189 | struct drm_i915_gem_object *obj, |
1005 | int id, | 1190 | int id, |
1006 | int align); | 1191 | int align); |
1007 | void i915_gem_detach_phys_object(struct drm_device *dev, | 1192 | void i915_gem_detach_phys_object(struct drm_device *dev, |
1008 | struct drm_gem_object *obj); | 1193 | struct drm_i915_gem_object *obj); |
1009 | void i915_gem_free_all_phys_object(struct drm_device *dev); | 1194 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
1010 | int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); | 1195 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
1011 | void i915_gem_object_put_pages(struct drm_gem_object *obj); | ||
1012 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); | ||
1013 | int i915_gem_object_flush_write_domain(struct drm_gem_object *obj); | ||
1014 | 1196 | ||
1015 | void i915_gem_shrinker_init(void); | 1197 | uint32_t |
1016 | void i915_gem_shrinker_exit(void); | 1198 | i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, |
1199 | uint32_t size, | ||
1200 | int tiling_mode); | ||
1201 | |||
1202 | /* i915_gem_gtt.c */ | ||
1203 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); | ||
1204 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); | ||
1205 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); | ||
1017 | 1206 | ||
1018 | /* i915_gem_evict.c */ | 1207 | /* i915_gem_evict.c */ |
1019 | int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment); | 1208 | int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, |
1020 | int i915_gem_evict_everything(struct drm_device *dev); | 1209 | unsigned alignment, bool mappable); |
1021 | int i915_gem_evict_inactive(struct drm_device *dev); | 1210 | int __must_check i915_gem_evict_everything(struct drm_device *dev, |
1211 | bool purgeable_only); | ||
1212 | int __must_check i915_gem_evict_inactive(struct drm_device *dev, | ||
1213 | bool purgeable_only); | ||
1022 | 1214 | ||
1023 | /* i915_gem_tiling.c */ | 1215 | /* i915_gem_tiling.c */ |
1024 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); | 1216 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
1025 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); | 1217 | void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); |
1026 | void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); | 1218 | void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); |
1027 | bool i915_tiling_ok(struct drm_device *dev, int stride, int size, | ||
1028 | int tiling_mode); | ||
1029 | bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, | ||
1030 | int tiling_mode); | ||
1031 | 1219 | ||
1032 | /* i915_gem_debug.c */ | 1220 | /* i915_gem_debug.c */ |
1033 | void i915_gem_dump_object(struct drm_gem_object *obj, int len, | 1221 | void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, |
1034 | const char *where, uint32_t mark); | 1222 | const char *where, uint32_t mark); |
1035 | #if WATCH_INACTIVE | 1223 | #if WATCH_LISTS |
1036 | void i915_verify_inactive(struct drm_device *dev, char *file, int line); | 1224 | int i915_verify_lists(struct drm_device *dev); |
1037 | #else | 1225 | #else |
1038 | #define i915_verify_inactive(dev, file, line) | 1226 | #define i915_verify_lists(dev) 0 |
1039 | #endif | 1227 | #endif |
1040 | void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); | 1228 | void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, |
1041 | void i915_gem_dump_object(struct drm_gem_object *obj, int len, | 1229 | int handle); |
1230 | void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, | ||
1042 | const char *where, uint32_t mark); | 1231 | const char *where, uint32_t mark); |
1043 | void i915_dump_lru(struct drm_device *dev, const char *where); | ||
1044 | 1232 | ||
1045 | /* i915_debugfs.c */ | 1233 | /* i915_debugfs.c */ |
1046 | int i915_debugfs_init(struct drm_minor *minor); | 1234 | int i915_debugfs_init(struct drm_minor *minor); |
@@ -1054,23 +1242,45 @@ extern int i915_restore_state(struct drm_device *dev); | |||
1054 | extern int i915_save_state(struct drm_device *dev); | 1242 | extern int i915_save_state(struct drm_device *dev); |
1055 | extern int i915_restore_state(struct drm_device *dev); | 1243 | extern int i915_restore_state(struct drm_device *dev); |
1056 | 1244 | ||
1245 | /* intel_i2c.c */ | ||
1246 | extern int intel_setup_gmbus(struct drm_device *dev); | ||
1247 | extern void intel_teardown_gmbus(struct drm_device *dev); | ||
1248 | extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); | ||
1249 | extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); | ||
1250 | extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) | ||
1251 | { | ||
1252 | return container_of(adapter, struct intel_gmbus, adapter)->force_bit; | ||
1253 | } | ||
1254 | extern void intel_i2c_reset(struct drm_device *dev); | ||
1255 | |||
1256 | /* intel_opregion.c */ | ||
1257 | extern int intel_opregion_setup(struct drm_device *dev); | ||
1057 | #ifdef CONFIG_ACPI | 1258 | #ifdef CONFIG_ACPI |
1058 | /* i915_opregion.c */ | 1259 | extern void intel_opregion_init(struct drm_device *dev); |
1059 | extern int intel_opregion_init(struct drm_device *dev, int resume); | 1260 | extern void intel_opregion_fini(struct drm_device *dev); |
1060 | extern void intel_opregion_free(struct drm_device *dev, int suspend); | 1261 | extern void intel_opregion_asle_intr(struct drm_device *dev); |
1061 | extern void opregion_asle_intr(struct drm_device *dev); | 1262 | extern void intel_opregion_gse_intr(struct drm_device *dev); |
1062 | extern void ironlake_opregion_gse_intr(struct drm_device *dev); | 1263 | extern void intel_opregion_enable_asle(struct drm_device *dev); |
1063 | extern void opregion_enable_asle(struct drm_device *dev); | ||
1064 | #else | 1264 | #else |
1065 | static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; } | 1265 | static inline void intel_opregion_init(struct drm_device *dev) { return; } |
1066 | static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; } | 1266 | static inline void intel_opregion_fini(struct drm_device *dev) { return; } |
1067 | static inline void opregion_asle_intr(struct drm_device *dev) { return; } | 1267 | static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } |
1068 | static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; } | 1268 | static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; } |
1069 | static inline void opregion_enable_asle(struct drm_device *dev) { return; } | 1269 | static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; } |
1070 | #endif | 1270 | #endif |
1071 | 1271 | ||
1272 | /* intel_acpi.c */ | ||
1273 | #ifdef CONFIG_ACPI | ||
1274 | extern void intel_register_dsm_handler(void); | ||
1275 | extern void intel_unregister_dsm_handler(void); | ||
1276 | #else | ||
1277 | static inline void intel_register_dsm_handler(void) { return; } | ||
1278 | static inline void intel_unregister_dsm_handler(void) { return; } | ||
1279 | #endif /* CONFIG_ACPI */ | ||
1280 | |||
1072 | /* modesetting */ | 1281 | /* modesetting */ |
1073 | extern void intel_modeset_init(struct drm_device *dev); | 1282 | extern void intel_modeset_init(struct drm_device *dev); |
1283 | extern void intel_modeset_gem_init(struct drm_device *dev); | ||
1074 | extern void intel_modeset_cleanup(struct drm_device *dev); | 1284 | extern void intel_modeset_cleanup(struct drm_device *dev); |
1075 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); | 1285 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
1076 | extern void i8xx_disable_fbc(struct drm_device *dev); | 1286 | extern void i8xx_disable_fbc(struct drm_device *dev); |
@@ -1080,145 +1290,110 @@ extern void intel_disable_fbc(struct drm_device *dev); | |||
1080 | extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); | 1290 | extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); |
1081 | extern bool intel_fbc_enabled(struct drm_device *dev); | 1291 | extern bool intel_fbc_enabled(struct drm_device *dev); |
1082 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); | 1292 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
1293 | extern void ironlake_enable_rc6(struct drm_device *dev); | ||
1294 | extern void gen6_set_rps(struct drm_device *dev, u8 val); | ||
1083 | extern void intel_detect_pch (struct drm_device *dev); | 1295 | extern void intel_detect_pch (struct drm_device *dev); |
1084 | extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); | 1296 | extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); |
1085 | 1297 | ||
1086 | /* overlay */ | 1298 | /* overlay */ |
1299 | #ifdef CONFIG_DEBUG_FS | ||
1087 | extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); | 1300 | extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); |
1088 | extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); | 1301 | extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); |
1089 | 1302 | ||
1090 | /** | 1303 | extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); |
1091 | * Lock test for when it's just for synchronization of ring access. | 1304 | extern void intel_display_print_error_state(struct seq_file *m, |
1092 | * | 1305 | struct drm_device *dev, |
1093 | * In that case, we don't need to do it when GEM is initialized as nobody else | 1306 | struct intel_display_error_state *error); |
1094 | * has access to the ring. | 1307 | #endif |
1095 | */ | ||
1096 | #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ | ||
1097 | if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \ | ||
1098 | == NULL) \ | ||
1099 | LOCK_TEST_WITH_RETURN(dev, file_priv); \ | ||
1100 | } while (0) | ||
1101 | 1308 | ||
1102 | #define I915_READ(reg) readl(dev_priv->regs + (reg)) | 1309 | #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) |
1103 | #define I915_WRITE(reg, val) writel(val, dev_priv->regs + (reg)) | ||
1104 | #define I915_READ16(reg) readw(dev_priv->regs + (reg)) | ||
1105 | #define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) | ||
1106 | #define I915_READ8(reg) readb(dev_priv->regs + (reg)) | ||
1107 | #define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg)) | ||
1108 | #define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg)) | ||
1109 | #define I915_READ64(reg) readq(dev_priv->regs + (reg)) | ||
1110 | #define POSTING_READ(reg) (void)I915_READ(reg) | ||
1111 | #define POSTING_READ16(reg) (void)I915_READ16(reg) | ||
1112 | |||
1113 | #define I915_VERBOSE 0 | ||
1114 | |||
1115 | #define BEGIN_LP_RING(n) do { \ | ||
1116 | drm_i915_private_t *dev_priv__ = dev->dev_private; \ | ||
1117 | if (I915_VERBOSE) \ | ||
1118 | DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ | ||
1119 | intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \ | ||
1120 | } while (0) | ||
1121 | 1310 | ||
1311 | #define BEGIN_LP_RING(n) \ | ||
1312 | intel_ring_begin(LP_RING(dev_priv), (n)) | ||
1122 | 1313 | ||
1123 | #define OUT_RING(x) do { \ | 1314 | #define OUT_RING(x) \ |
1124 | drm_i915_private_t *dev_priv__ = dev->dev_private; \ | 1315 | intel_ring_emit(LP_RING(dev_priv), x) |
1125 | if (I915_VERBOSE) \ | ||
1126 | DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \ | ||
1127 | intel_ring_emit(dev, &dev_priv__->render_ring, x); \ | ||
1128 | } while (0) | ||
1129 | 1316 | ||
1130 | #define ADVANCE_LP_RING() do { \ | 1317 | #define ADVANCE_LP_RING() \ |
1131 | drm_i915_private_t *dev_priv__ = dev->dev_private; \ | 1318 | intel_ring_advance(LP_RING(dev_priv)) |
1132 | if (I915_VERBOSE) \ | ||
1133 | DRM_DEBUG("ADVANCE_LP_RING %x\n", \ | ||
1134 | dev_priv__->render_ring.tail); \ | ||
1135 | intel_ring_advance(dev, &dev_priv__->render_ring); \ | ||
1136 | } while(0) | ||
1137 | 1319 | ||
1138 | /** | 1320 | /** |
1139 | * Reads a dword out of the status page, which is written to from the command | 1321 | * Lock test for when it's just for synchronization of ring access. |
1140 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or | ||
1141 | * MI_STORE_DATA_IMM. | ||
1142 | * | ||
1143 | * The following dwords have a reserved meaning: | ||
1144 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. | ||
1145 | * 0x04: ring 0 head pointer | ||
1146 | * 0x05: ring 1 head pointer (915-class) | ||
1147 | * 0x06: ring 2 head pointer (915-class) | ||
1148 | * 0x10-0x1b: Context status DWords (GM45) | ||
1149 | * 0x1f: Last written status offset. (GM45) | ||
1150 | * | 1322 | * |
1151 | * The area from dword 0x20 to 0x3ff is available for driver usage. | 1323 | * In that case, we don't need to do it when GEM is initialized as nobody else |
1324 | * has access to the ring. | ||
1152 | */ | 1325 | */ |
1153 | #define READ_HWSP(dev_priv, reg) (((volatile u32 *)\ | 1326 | #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ |
1154 | (dev_priv->render_ring.status_page.page_addr))[reg]) | 1327 | if (LP_RING(dev->dev_private)->obj == NULL) \ |
1155 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) | 1328 | LOCK_TEST_WITH_RETURN(dev, file); \ |
1156 | #define I915_GEM_HWS_INDEX 0x20 | 1329 | } while (0) |
1157 | #define I915_BREADCRUMB_INDEX 0x21 | ||
1158 | |||
1159 | #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) | ||
1160 | |||
1161 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) | ||
1162 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) | ||
1163 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) | ||
1164 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | ||
1165 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) | ||
1166 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) | ||
1167 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) | ||
1168 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) | ||
1169 | #define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g) | ||
1170 | #define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm) | ||
1171 | #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) | ||
1172 | #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) | ||
1173 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) | ||
1174 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) | ||
1175 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) | ||
1176 | #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) | ||
1177 | #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) | ||
1178 | #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) | ||
1179 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) | ||
1180 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) | ||
1181 | #define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) | ||
1182 | #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) | ||
1183 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) | ||
1184 | |||
1185 | #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) | ||
1186 | #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) | ||
1187 | #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) | ||
1188 | #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) | ||
1189 | #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) | ||
1190 | |||
1191 | #define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) | ||
1192 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) | ||
1193 | 1330 | ||
1194 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | 1331 | /* On SNB platform, before reading ring registers forcewake bit |
1195 | * rows, which changed the alignment requirements and fence programming. | 1332 | * must be set to prevent GT core from power down and stale values being |
1333 | * returned. | ||
1196 | */ | 1334 | */ |
1197 | #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ | 1335 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); |
1198 | IS_I915GM(dev))) | 1336 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); |
1199 | #define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_PINEVIEW(dev)) | 1337 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); |
1200 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) | 1338 | |
1201 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) | 1339 | /* We give fast paths for the really cool registers */ |
1202 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) | 1340 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ |
1203 | #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ | 1341 | (((dev_priv)->info->gen >= 6) && \ |
1204 | !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \ | 1342 | ((reg) < 0x40000) && \ |
1205 | !IS_GEN6(dev)) | 1343 | ((reg) != FORCEWAKE)) |
1206 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) | 1344 | |
1207 | /* dsparb controlled by hw only */ | 1345 | #define __i915_read(x, y) \ |
1208 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) | 1346 | static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ |
1209 | 1347 | u##x val = 0; \ | |
1210 | #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) | 1348 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
1211 | #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) | 1349 | gen6_gt_force_wake_get(dev_priv); \ |
1212 | #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) | 1350 | val = read##y(dev_priv->regs + reg); \ |
1213 | #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) | 1351 | gen6_gt_force_wake_put(dev_priv); \ |
1214 | 1352 | } else { \ | |
1215 | #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ | 1353 | val = read##y(dev_priv->regs + reg); \ |
1216 | IS_GEN6(dev)) | 1354 | } \ |
1217 | #define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev)) | 1355 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ |
1218 | 1356 | return val; \ | |
1219 | #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) | 1357 | } |
1220 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) | 1358 | |
1359 | __i915_read(8, b) | ||
1360 | __i915_read(16, w) | ||
1361 | __i915_read(32, l) | ||
1362 | __i915_read(64, q) | ||
1363 | #undef __i915_read | ||
1364 | |||
1365 | #define __i915_write(x, y) \ | ||
1366 | static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | ||
1367 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ | ||
1368 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | ||
1369 | __gen6_gt_wait_for_fifo(dev_priv); \ | ||
1370 | } \ | ||
1371 | write##y(val, dev_priv->regs + reg); \ | ||
1372 | } | ||
1373 | __i915_write(8, b) | ||
1374 | __i915_write(16, w) | ||
1375 | __i915_write(32, l) | ||
1376 | __i915_write(64, q) | ||
1377 | #undef __i915_write | ||
1378 | |||
1379 | #define I915_READ8(reg) i915_read8(dev_priv, (reg)) | ||
1380 | #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) | ||
1381 | |||
1382 | #define I915_READ16(reg) i915_read16(dev_priv, (reg)) | ||
1383 | #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) | ||
1384 | #define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg)) | ||
1385 | #define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg)) | ||
1386 | |||
1387 | #define I915_READ(reg) i915_read32(dev_priv, (reg)) | ||
1388 | #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) | ||
1389 | #define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg)) | ||
1390 | #define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg)) | ||
1391 | |||
1392 | #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) | ||
1393 | #define I915_READ64(reg) i915_read64(dev_priv, (reg)) | ||
1394 | |||
1395 | #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) | ||
1396 | #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) | ||
1221 | 1397 | ||
1222 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | ||
1223 | 1398 | ||
1224 | #endif | 1399 | #endif |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 90b1d6753b9d..a087e1bf0c2f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -31,149 +31,229 @@ | |||
31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
32 | #include "i915_trace.h" | 32 | #include "i915_trace.h" |
33 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
34 | #include <linux/shmem_fs.h> | ||
34 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
35 | #include <linux/swap.h> | 36 | #include <linux/swap.h> |
36 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
37 | #include <linux/intel-gtt.h> | 38 | |
38 | 39 | static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); | |
39 | static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); | 40 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
40 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); | 41 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); |
41 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); | 42 | static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, |
42 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); | 43 | bool write); |
43 | static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, | 44 | static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, |
44 | int write); | 45 | uint64_t offset, |
45 | static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | 46 | uint64_t size); |
46 | uint64_t offset, | 47 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); |
47 | uint64_t size); | 48 | static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
48 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); | 49 | unsigned alignment, |
49 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); | 50 | bool map_and_fenceable); |
50 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 51 | static void i915_gem_clear_fence_reg(struct drm_device *dev, |
51 | unsigned alignment); | 52 | struct drm_i915_fence_reg *reg); |
52 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); | 53 | static int i915_gem_phys_pwrite(struct drm_device *dev, |
53 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 54 | struct drm_i915_gem_object *obj, |
54 | struct drm_i915_gem_pwrite *args, | 55 | struct drm_i915_gem_pwrite *args, |
55 | struct drm_file *file_priv); | 56 | struct drm_file *file); |
56 | static void i915_gem_free_object_tail(struct drm_gem_object *obj); | 57 | static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); |
57 | 58 | ||
58 | static LIST_HEAD(shrink_list); | 59 | static int i915_gem_inactive_shrink(struct shrinker *shrinker, |
59 | static DEFINE_SPINLOCK(shrink_list_lock); | 60 | struct shrink_control *sc); |
60 | 61 | ||
61 | static inline bool | 62 | /* some bookkeeping */ |
62 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) | 63 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, |
64 | size_t size) | ||
63 | { | 65 | { |
64 | return obj_priv->gtt_space && | 66 | dev_priv->mm.object_count++; |
65 | !obj_priv->active && | 67 | dev_priv->mm.object_memory += size; |
66 | obj_priv->pin_count == 0; | ||
67 | } | 68 | } |
68 | 69 | ||
69 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | 70 | static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, |
70 | unsigned long end) | 71 | size_t size) |
71 | { | 72 | { |
72 | drm_i915_private_t *dev_priv = dev->dev_private; | 73 | dev_priv->mm.object_count--; |
74 | dev_priv->mm.object_memory -= size; | ||
75 | } | ||
73 | 76 | ||
74 | if (start >= end || | 77 | static int |
75 | (start & (PAGE_SIZE - 1)) != 0 || | 78 | i915_gem_wait_for_error(struct drm_device *dev) |
76 | (end & (PAGE_SIZE - 1)) != 0) { | 79 | { |
77 | return -EINVAL; | 80 | struct drm_i915_private *dev_priv = dev->dev_private; |
81 | struct completion *x = &dev_priv->error_completion; | ||
82 | unsigned long flags; | ||
83 | int ret; | ||
84 | |||
85 | if (!atomic_read(&dev_priv->mm.wedged)) | ||
86 | return 0; | ||
87 | |||
88 | ret = wait_for_completion_interruptible(x); | ||
89 | if (ret) | ||
90 | return ret; | ||
91 | |||
92 | if (atomic_read(&dev_priv->mm.wedged)) { | ||
93 | /* GPU is hung, bump the completion count to account for | ||
94 | * the token we just consumed so that we never hit zero and | ||
95 | * end up waiting upon a subsequent completion event that | ||
96 | * will never happen. | ||
97 | */ | ||
98 | spin_lock_irqsave(&x->wait.lock, flags); | ||
99 | x->done++; | ||
100 | spin_unlock_irqrestore(&x->wait.lock, flags); | ||
78 | } | 101 | } |
102 | return 0; | ||
103 | } | ||
79 | 104 | ||
80 | drm_mm_init(&dev_priv->mm.gtt_space, start, | 105 | int i915_mutex_lock_interruptible(struct drm_device *dev) |
81 | end - start); | 106 | { |
107 | int ret; | ||
82 | 108 | ||
83 | dev->gtt_total = (uint32_t) (end - start); | 109 | ret = i915_gem_wait_for_error(dev); |
110 | if (ret) | ||
111 | return ret; | ||
84 | 112 | ||
113 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
114 | if (ret) | ||
115 | return ret; | ||
116 | |||
117 | WARN_ON(i915_verify_lists(dev)); | ||
85 | return 0; | 118 | return 0; |
86 | } | 119 | } |
87 | 120 | ||
121 | static inline bool | ||
122 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) | ||
123 | { | ||
124 | return obj->gtt_space && !obj->active && obj->pin_count == 0; | ||
125 | } | ||
126 | |||
127 | void i915_gem_do_init(struct drm_device *dev, | ||
128 | unsigned long start, | ||
129 | unsigned long mappable_end, | ||
130 | unsigned long end) | ||
131 | { | ||
132 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
133 | |||
134 | drm_mm_init(&dev_priv->mm.gtt_space, start, end - start); | ||
135 | |||
136 | dev_priv->mm.gtt_start = start; | ||
137 | dev_priv->mm.gtt_mappable_end = mappable_end; | ||
138 | dev_priv->mm.gtt_end = end; | ||
139 | dev_priv->mm.gtt_total = end - start; | ||
140 | dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; | ||
141 | |||
142 | /* Take over this portion of the GTT */ | ||
143 | intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); | ||
144 | } | ||
145 | |||
88 | int | 146 | int |
89 | i915_gem_init_ioctl(struct drm_device *dev, void *data, | 147 | i915_gem_init_ioctl(struct drm_device *dev, void *data, |
90 | struct drm_file *file_priv) | 148 | struct drm_file *file) |
91 | { | 149 | { |
92 | struct drm_i915_gem_init *args = data; | 150 | struct drm_i915_gem_init *args = data; |
93 | int ret; | 151 | |
152 | if (args->gtt_start >= args->gtt_end || | ||
153 | (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) | ||
154 | return -EINVAL; | ||
94 | 155 | ||
95 | mutex_lock(&dev->struct_mutex); | 156 | mutex_lock(&dev->struct_mutex); |
96 | ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end); | 157 | i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end); |
97 | mutex_unlock(&dev->struct_mutex); | 158 | mutex_unlock(&dev->struct_mutex); |
98 | 159 | ||
99 | return ret; | 160 | return 0; |
100 | } | 161 | } |
101 | 162 | ||
102 | int | 163 | int |
103 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | 164 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
104 | struct drm_file *file_priv) | 165 | struct drm_file *file) |
105 | { | 166 | { |
167 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
106 | struct drm_i915_gem_get_aperture *args = data; | 168 | struct drm_i915_gem_get_aperture *args = data; |
169 | struct drm_i915_gem_object *obj; | ||
170 | size_t pinned; | ||
107 | 171 | ||
108 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 172 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
109 | return -ENODEV; | 173 | return -ENODEV; |
110 | 174 | ||
111 | args->aper_size = dev->gtt_total; | 175 | pinned = 0; |
112 | args->aper_available_size = (args->aper_size - | 176 | mutex_lock(&dev->struct_mutex); |
113 | atomic_read(&dev->pin_memory)); | 177 | list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) |
178 | pinned += obj->gtt_space->size; | ||
179 | mutex_unlock(&dev->struct_mutex); | ||
180 | |||
181 | args->aper_size = dev_priv->mm.gtt_total; | ||
182 | args->aper_available_size = args->aper_size -pinned; | ||
114 | 183 | ||
115 | return 0; | 184 | return 0; |
116 | } | 185 | } |
117 | 186 | ||
118 | 187 | static int | |
119 | /** | 188 | i915_gem_create(struct drm_file *file, |
120 | * Creates a new mm object and returns a handle to it. | 189 | struct drm_device *dev, |
121 | */ | 190 | uint64_t size, |
122 | int | 191 | uint32_t *handle_p) |
123 | i915_gem_create_ioctl(struct drm_device *dev, void *data, | ||
124 | struct drm_file *file_priv) | ||
125 | { | 192 | { |
126 | struct drm_i915_gem_create *args = data; | 193 | struct drm_i915_gem_object *obj; |
127 | struct drm_gem_object *obj; | ||
128 | int ret; | 194 | int ret; |
129 | u32 handle; | 195 | u32 handle; |
130 | 196 | ||
131 | args->size = roundup(args->size, PAGE_SIZE); | 197 | size = roundup(size, PAGE_SIZE); |
132 | 198 | ||
133 | /* Allocate the new object */ | 199 | /* Allocate the new object */ |
134 | obj = i915_gem_alloc_object(dev, args->size); | 200 | obj = i915_gem_alloc_object(dev, size); |
135 | if (obj == NULL) | 201 | if (obj == NULL) |
136 | return -ENOMEM; | 202 | return -ENOMEM; |
137 | 203 | ||
138 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 204 | ret = drm_gem_handle_create(file, &obj->base, &handle); |
139 | /* drop reference from allocate - handle holds it now */ | ||
140 | drm_gem_object_unreference_unlocked(obj); | ||
141 | if (ret) { | 205 | if (ret) { |
206 | drm_gem_object_release(&obj->base); | ||
207 | i915_gem_info_remove_obj(dev->dev_private, obj->base.size); | ||
208 | kfree(obj); | ||
142 | return ret; | 209 | return ret; |
143 | } | 210 | } |
144 | 211 | ||
145 | args->handle = handle; | 212 | /* drop reference from allocate - handle holds it now */ |
213 | drm_gem_object_unreference(&obj->base); | ||
214 | trace_i915_gem_object_create(obj); | ||
215 | |||
216 | *handle_p = handle; | ||
146 | return 0; | 217 | return 0; |
147 | } | 218 | } |
148 | 219 | ||
149 | static inline int | 220 | int |
150 | fast_shmem_read(struct page **pages, | 221 | i915_gem_dumb_create(struct drm_file *file, |
151 | loff_t page_base, int page_offset, | 222 | struct drm_device *dev, |
152 | char __user *data, | 223 | struct drm_mode_create_dumb *args) |
153 | int length) | ||
154 | { | 224 | { |
155 | char __iomem *vaddr; | 225 | /* have to work out size/pitch and return them */ |
156 | int unwritten; | 226 | args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); |
157 | 227 | args->size = args->pitch * args->height; | |
158 | vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); | 228 | return i915_gem_create(file, dev, |
159 | if (vaddr == NULL) | 229 | args->size, &args->handle); |
160 | return -ENOMEM; | 230 | } |
161 | unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); | ||
162 | kunmap_atomic(vaddr, KM_USER0); | ||
163 | 231 | ||
164 | if (unwritten) | 232 | int i915_gem_dumb_destroy(struct drm_file *file, |
165 | return -EFAULT; | 233 | struct drm_device *dev, |
234 | uint32_t handle) | ||
235 | { | ||
236 | return drm_gem_handle_delete(file, handle); | ||
237 | } | ||
166 | 238 | ||
167 | return 0; | 239 | /** |
240 | * Creates a new mm object and returns a handle to it. | ||
241 | */ | ||
242 | int | ||
243 | i915_gem_create_ioctl(struct drm_device *dev, void *data, | ||
244 | struct drm_file *file) | ||
245 | { | ||
246 | struct drm_i915_gem_create *args = data; | ||
247 | return i915_gem_create(file, dev, | ||
248 | args->size, &args->handle); | ||
168 | } | 249 | } |
169 | 250 | ||
170 | static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) | 251 | static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
171 | { | 252 | { |
172 | drm_i915_private_t *dev_priv = obj->dev->dev_private; | 253 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
173 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
174 | 254 | ||
175 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && | 255 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
176 | obj_priv->tiling_mode != I915_TILING_NONE; | 256 | obj->tiling_mode != I915_TILING_NONE; |
177 | } | 257 | } |
178 | 258 | ||
179 | static inline void | 259 | static inline void |
@@ -249,88 +329,58 @@ slow_shmem_bit17_copy(struct page *gpu_page, | |||
249 | * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). | 329 | * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). |
250 | */ | 330 | */ |
251 | static int | 331 | static int |
252 | i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | 332 | i915_gem_shmem_pread_fast(struct drm_device *dev, |
333 | struct drm_i915_gem_object *obj, | ||
253 | struct drm_i915_gem_pread *args, | 334 | struct drm_i915_gem_pread *args, |
254 | struct drm_file *file_priv) | 335 | struct drm_file *file) |
255 | { | 336 | { |
256 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 337 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
257 | ssize_t remain; | 338 | ssize_t remain; |
258 | loff_t offset, page_base; | 339 | loff_t offset; |
259 | char __user *user_data; | 340 | char __user *user_data; |
260 | int page_offset, page_length; | 341 | int page_offset, page_length; |
261 | int ret; | ||
262 | 342 | ||
263 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 343 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
264 | remain = args->size; | 344 | remain = args->size; |
265 | 345 | ||
266 | mutex_lock(&dev->struct_mutex); | ||
267 | |||
268 | ret = i915_gem_object_get_pages(obj, 0); | ||
269 | if (ret != 0) | ||
270 | goto fail_unlock; | ||
271 | |||
272 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, | ||
273 | args->size); | ||
274 | if (ret != 0) | ||
275 | goto fail_put_pages; | ||
276 | |||
277 | obj_priv = to_intel_bo(obj); | ||
278 | offset = args->offset; | 346 | offset = args->offset; |
279 | 347 | ||
280 | while (remain > 0) { | 348 | while (remain > 0) { |
349 | struct page *page; | ||
350 | char *vaddr; | ||
351 | int ret; | ||
352 | |||
281 | /* Operation in this page | 353 | /* Operation in this page |
282 | * | 354 | * |
283 | * page_base = page offset within aperture | ||
284 | * page_offset = offset within page | 355 | * page_offset = offset within page |
285 | * page_length = bytes to copy for this page | 356 | * page_length = bytes to copy for this page |
286 | */ | 357 | */ |
287 | page_base = (offset & ~(PAGE_SIZE-1)); | 358 | page_offset = offset_in_page(offset); |
288 | page_offset = offset & (PAGE_SIZE-1); | ||
289 | page_length = remain; | 359 | page_length = remain; |
290 | if ((page_offset + remain) > PAGE_SIZE) | 360 | if ((page_offset + remain) > PAGE_SIZE) |
291 | page_length = PAGE_SIZE - page_offset; | 361 | page_length = PAGE_SIZE - page_offset; |
292 | 362 | ||
293 | ret = fast_shmem_read(obj_priv->pages, | 363 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
294 | page_base, page_offset, | 364 | if (IS_ERR(page)) |
295 | user_data, page_length); | 365 | return PTR_ERR(page); |
366 | |||
367 | vaddr = kmap_atomic(page); | ||
368 | ret = __copy_to_user_inatomic(user_data, | ||
369 | vaddr + page_offset, | ||
370 | page_length); | ||
371 | kunmap_atomic(vaddr); | ||
372 | |||
373 | mark_page_accessed(page); | ||
374 | page_cache_release(page); | ||
296 | if (ret) | 375 | if (ret) |
297 | goto fail_put_pages; | 376 | return -EFAULT; |
298 | 377 | ||
299 | remain -= page_length; | 378 | remain -= page_length; |
300 | user_data += page_length; | 379 | user_data += page_length; |
301 | offset += page_length; | 380 | offset += page_length; |
302 | } | 381 | } |
303 | 382 | ||
304 | fail_put_pages: | 383 | return 0; |
305 | i915_gem_object_put_pages(obj); | ||
306 | fail_unlock: | ||
307 | mutex_unlock(&dev->struct_mutex); | ||
308 | |||
309 | return ret; | ||
310 | } | ||
311 | |||
312 | static int | ||
313 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) | ||
314 | { | ||
315 | int ret; | ||
316 | |||
317 | ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN); | ||
318 | |||
319 | /* If we've insufficient memory to map in the pages, attempt | ||
320 | * to make some space by throwing out some old buffers. | ||
321 | */ | ||
322 | if (ret == -ENOMEM) { | ||
323 | struct drm_device *dev = obj->dev; | ||
324 | |||
325 | ret = i915_gem_evict_something(dev, obj->size, | ||
326 | i915_gem_get_gtt_alignment(obj)); | ||
327 | if (ret) | ||
328 | return ret; | ||
329 | |||
330 | ret = i915_gem_object_get_pages(obj, 0); | ||
331 | } | ||
332 | |||
333 | return ret; | ||
334 | } | 384 | } |
335 | 385 | ||
336 | /** | 386 | /** |
@@ -340,18 +390,19 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) | |||
340 | * and not take page faults. | 390 | * and not take page faults. |
341 | */ | 391 | */ |
342 | static int | 392 | static int |
343 | i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | 393 | i915_gem_shmem_pread_slow(struct drm_device *dev, |
394 | struct drm_i915_gem_object *obj, | ||
344 | struct drm_i915_gem_pread *args, | 395 | struct drm_i915_gem_pread *args, |
345 | struct drm_file *file_priv) | 396 | struct drm_file *file) |
346 | { | 397 | { |
347 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 398 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
348 | struct mm_struct *mm = current->mm; | 399 | struct mm_struct *mm = current->mm; |
349 | struct page **user_pages; | 400 | struct page **user_pages; |
350 | ssize_t remain; | 401 | ssize_t remain; |
351 | loff_t offset, pinned_pages, i; | 402 | loff_t offset, pinned_pages, i; |
352 | loff_t first_data_page, last_data_page, num_pages; | 403 | loff_t first_data_page, last_data_page, num_pages; |
353 | int shmem_page_index, shmem_page_offset; | 404 | int shmem_page_offset; |
354 | int data_page_index, data_page_offset; | 405 | int data_page_index, data_page_offset; |
355 | int page_length; | 406 | int page_length; |
356 | int ret; | 407 | int ret; |
357 | uint64_t data_ptr = args->data_ptr; | 408 | uint64_t data_ptr = args->data_ptr; |
@@ -367,48 +418,44 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
367 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | 418 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; |
368 | num_pages = last_data_page - first_data_page + 1; | 419 | num_pages = last_data_page - first_data_page + 1; |
369 | 420 | ||
370 | user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); | 421 | user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); |
371 | if (user_pages == NULL) | 422 | if (user_pages == NULL) |
372 | return -ENOMEM; | 423 | return -ENOMEM; |
373 | 424 | ||
425 | mutex_unlock(&dev->struct_mutex); | ||
374 | down_read(&mm->mmap_sem); | 426 | down_read(&mm->mmap_sem); |
375 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | 427 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, |
376 | num_pages, 1, 0, user_pages, NULL); | 428 | num_pages, 1, 0, user_pages, NULL); |
377 | up_read(&mm->mmap_sem); | 429 | up_read(&mm->mmap_sem); |
430 | mutex_lock(&dev->struct_mutex); | ||
378 | if (pinned_pages < num_pages) { | 431 | if (pinned_pages < num_pages) { |
379 | ret = -EFAULT; | 432 | ret = -EFAULT; |
380 | goto fail_put_user_pages; | 433 | goto out; |
381 | } | 434 | } |
382 | 435 | ||
383 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | 436 | ret = i915_gem_object_set_cpu_read_domain_range(obj, |
384 | 437 | args->offset, | |
385 | mutex_lock(&dev->struct_mutex); | 438 | args->size); |
386 | |||
387 | ret = i915_gem_object_get_pages_or_evict(obj); | ||
388 | if (ret) | 439 | if (ret) |
389 | goto fail_unlock; | 440 | goto out; |
390 | 441 | ||
391 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, | 442 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
392 | args->size); | ||
393 | if (ret != 0) | ||
394 | goto fail_put_pages; | ||
395 | 443 | ||
396 | obj_priv = to_intel_bo(obj); | ||
397 | offset = args->offset; | 444 | offset = args->offset; |
398 | 445 | ||
399 | while (remain > 0) { | 446 | while (remain > 0) { |
447 | struct page *page; | ||
448 | |||
400 | /* Operation in this page | 449 | /* Operation in this page |
401 | * | 450 | * |
402 | * shmem_page_index = page number within shmem file | ||
403 | * shmem_page_offset = offset within page in shmem file | 451 | * shmem_page_offset = offset within page in shmem file |
404 | * data_page_index = page number in get_user_pages return | 452 | * data_page_index = page number in get_user_pages return |
405 | * data_page_offset = offset with data_page_index page. | 453 | * data_page_offset = offset with data_page_index page. |
406 | * page_length = bytes to copy for this page | 454 | * page_length = bytes to copy for this page |
407 | */ | 455 | */ |
408 | shmem_page_index = offset / PAGE_SIZE; | 456 | shmem_page_offset = offset_in_page(offset); |
409 | shmem_page_offset = offset & ~PAGE_MASK; | ||
410 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | 457 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; |
411 | data_page_offset = data_ptr & ~PAGE_MASK; | 458 | data_page_offset = offset_in_page(data_ptr); |
412 | 459 | ||
413 | page_length = remain; | 460 | page_length = remain; |
414 | if ((shmem_page_offset + page_length) > PAGE_SIZE) | 461 | if ((shmem_page_offset + page_length) > PAGE_SIZE) |
@@ -416,8 +463,14 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
416 | if ((data_page_offset + page_length) > PAGE_SIZE) | 463 | if ((data_page_offset + page_length) > PAGE_SIZE) |
417 | page_length = PAGE_SIZE - data_page_offset; | 464 | page_length = PAGE_SIZE - data_page_offset; |
418 | 465 | ||
466 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); | ||
467 | if (IS_ERR(page)) { | ||
468 | ret = PTR_ERR(page); | ||
469 | goto out; | ||
470 | } | ||
471 | |||
419 | if (do_bit17_swizzling) { | 472 | if (do_bit17_swizzling) { |
420 | slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], | 473 | slow_shmem_bit17_copy(page, |
421 | shmem_page_offset, | 474 | shmem_page_offset, |
422 | user_pages[data_page_index], | 475 | user_pages[data_page_index], |
423 | data_page_offset, | 476 | data_page_offset, |
@@ -426,23 +479,23 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
426 | } else { | 479 | } else { |
427 | slow_shmem_copy(user_pages[data_page_index], | 480 | slow_shmem_copy(user_pages[data_page_index], |
428 | data_page_offset, | 481 | data_page_offset, |
429 | obj_priv->pages[shmem_page_index], | 482 | page, |
430 | shmem_page_offset, | 483 | shmem_page_offset, |
431 | page_length); | 484 | page_length); |
432 | } | 485 | } |
433 | 486 | ||
487 | mark_page_accessed(page); | ||
488 | page_cache_release(page); | ||
489 | |||
434 | remain -= page_length; | 490 | remain -= page_length; |
435 | data_ptr += page_length; | 491 | data_ptr += page_length; |
436 | offset += page_length; | 492 | offset += page_length; |
437 | } | 493 | } |
438 | 494 | ||
439 | fail_put_pages: | 495 | out: |
440 | i915_gem_object_put_pages(obj); | ||
441 | fail_unlock: | ||
442 | mutex_unlock(&dev->struct_mutex); | ||
443 | fail_put_user_pages: | ||
444 | for (i = 0; i < pinned_pages; i++) { | 496 | for (i = 0; i < pinned_pages; i++) { |
445 | SetPageDirty(user_pages[i]); | 497 | SetPageDirty(user_pages[i]); |
498 | mark_page_accessed(user_pages[i]); | ||
446 | page_cache_release(user_pages[i]); | 499 | page_cache_release(user_pages[i]); |
447 | } | 500 | } |
448 | drm_free_large(user_pages); | 501 | drm_free_large(user_pages); |
@@ -457,42 +510,60 @@ fail_put_user_pages: | |||
457 | */ | 510 | */ |
458 | int | 511 | int |
459 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, | 512 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, |
460 | struct drm_file *file_priv) | 513 | struct drm_file *file) |
461 | { | 514 | { |
462 | struct drm_i915_gem_pread *args = data; | 515 | struct drm_i915_gem_pread *args = data; |
463 | struct drm_gem_object *obj; | 516 | struct drm_i915_gem_object *obj; |
464 | struct drm_i915_gem_object *obj_priv; | 517 | int ret = 0; |
465 | int ret; | ||
466 | |||
467 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
468 | if (obj == NULL) | ||
469 | return -ENOENT; | ||
470 | obj_priv = to_intel_bo(obj); | ||
471 | 518 | ||
472 | /* Bounds check source. */ | 519 | if (args->size == 0) |
473 | if (args->offset > obj->size || args->size > obj->size - args->offset) { | 520 | return 0; |
474 | ret = -EINVAL; | ||
475 | goto err; | ||
476 | } | ||
477 | 521 | ||
478 | if (!access_ok(VERIFY_WRITE, | 522 | if (!access_ok(VERIFY_WRITE, |
479 | (char __user *)(uintptr_t)args->data_ptr, | 523 | (char __user *)(uintptr_t)args->data_ptr, |
480 | args->size)) { | 524 | args->size)) |
481 | ret = -EFAULT; | 525 | return -EFAULT; |
482 | goto err; | 526 | |
527 | ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr, | ||
528 | args->size); | ||
529 | if (ret) | ||
530 | return -EFAULT; | ||
531 | |||
532 | ret = i915_mutex_lock_interruptible(dev); | ||
533 | if (ret) | ||
534 | return ret; | ||
535 | |||
536 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | ||
537 | if (&obj->base == NULL) { | ||
538 | ret = -ENOENT; | ||
539 | goto unlock; | ||
483 | } | 540 | } |
484 | 541 | ||
485 | if (i915_gem_object_needs_bit17_swizzle(obj)) { | 542 | /* Bounds check source. */ |
486 | ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); | 543 | if (args->offset > obj->base.size || |
487 | } else { | 544 | args->size > obj->base.size - args->offset) { |
488 | ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); | 545 | ret = -EINVAL; |
489 | if (ret != 0) | 546 | goto out; |
490 | ret = i915_gem_shmem_pread_slow(dev, obj, args, | ||
491 | file_priv); | ||
492 | } | 547 | } |
493 | 548 | ||
494 | err: | 549 | trace_i915_gem_object_pread(obj, args->offset, args->size); |
495 | drm_gem_object_unreference_unlocked(obj); | 550 | |
551 | ret = i915_gem_object_set_cpu_read_domain_range(obj, | ||
552 | args->offset, | ||
553 | args->size); | ||
554 | if (ret) | ||
555 | goto out; | ||
556 | |||
557 | ret = -EFAULT; | ||
558 | if (!i915_gem_object_needs_bit17_swizzle(obj)) | ||
559 | ret = i915_gem_shmem_pread_fast(dev, obj, args, file); | ||
560 | if (ret == -EFAULT) | ||
561 | ret = i915_gem_shmem_pread_slow(dev, obj, args, file); | ||
562 | |||
563 | out: | ||
564 | drm_gem_object_unreference(&obj->base); | ||
565 | unlock: | ||
566 | mutex_unlock(&dev->struct_mutex); | ||
496 | return ret; | 567 | return ret; |
497 | } | 568 | } |
498 | 569 | ||
@@ -509,13 +580,11 @@ fast_user_write(struct io_mapping *mapping, | |||
509 | char *vaddr_atomic; | 580 | char *vaddr_atomic; |
510 | unsigned long unwritten; | 581 | unsigned long unwritten; |
511 | 582 | ||
512 | vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0); | 583 | vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); |
513 | unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, | 584 | unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, |
514 | user_data, length); | 585 | user_data, length); |
515 | io_mapping_unmap_atomic(vaddr_atomic, KM_USER0); | 586 | io_mapping_unmap_atomic(vaddr_atomic); |
516 | if (unwritten) | 587 | return unwritten; |
517 | return -EFAULT; | ||
518 | return 0; | ||
519 | } | 588 | } |
520 | 589 | ||
521 | /* Here's the write path which can sleep for | 590 | /* Here's the write path which can sleep for |
@@ -542,59 +611,26 @@ slow_kernel_write(struct io_mapping *mapping, | |||
542 | io_mapping_unmap(dst_vaddr); | 611 | io_mapping_unmap(dst_vaddr); |
543 | } | 612 | } |
544 | 613 | ||
545 | static inline int | ||
546 | fast_shmem_write(struct page **pages, | ||
547 | loff_t page_base, int page_offset, | ||
548 | char __user *data, | ||
549 | int length) | ||
550 | { | ||
551 | char __iomem *vaddr; | ||
552 | unsigned long unwritten; | ||
553 | |||
554 | vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); | ||
555 | if (vaddr == NULL) | ||
556 | return -ENOMEM; | ||
557 | unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length); | ||
558 | kunmap_atomic(vaddr, KM_USER0); | ||
559 | |||
560 | if (unwritten) | ||
561 | return -EFAULT; | ||
562 | return 0; | ||
563 | } | ||
564 | |||
565 | /** | 614 | /** |
566 | * This is the fast pwrite path, where we copy the data directly from the | 615 | * This is the fast pwrite path, where we copy the data directly from the |
567 | * user into the GTT, uncached. | 616 | * user into the GTT, uncached. |
568 | */ | 617 | */ |
569 | static int | 618 | static int |
570 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | 619 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, |
620 | struct drm_i915_gem_object *obj, | ||
571 | struct drm_i915_gem_pwrite *args, | 621 | struct drm_i915_gem_pwrite *args, |
572 | struct drm_file *file_priv) | 622 | struct drm_file *file) |
573 | { | 623 | { |
574 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
575 | drm_i915_private_t *dev_priv = dev->dev_private; | 624 | drm_i915_private_t *dev_priv = dev->dev_private; |
576 | ssize_t remain; | 625 | ssize_t remain; |
577 | loff_t offset, page_base; | 626 | loff_t offset, page_base; |
578 | char __user *user_data; | 627 | char __user *user_data; |
579 | int page_offset, page_length; | 628 | int page_offset, page_length; |
580 | int ret; | ||
581 | 629 | ||
582 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 630 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
583 | remain = args->size; | 631 | remain = args->size; |
584 | 632 | ||
585 | 633 | offset = obj->gtt_offset + args->offset; | |
586 | mutex_lock(&dev->struct_mutex); | ||
587 | ret = i915_gem_object_pin(obj, 0); | ||
588 | if (ret) { | ||
589 | mutex_unlock(&dev->struct_mutex); | ||
590 | return ret; | ||
591 | } | ||
592 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | ||
593 | if (ret) | ||
594 | goto fail; | ||
595 | |||
596 | obj_priv = to_intel_bo(obj); | ||
597 | offset = obj_priv->gtt_offset + args->offset; | ||
598 | 634 | ||
599 | while (remain > 0) { | 635 | while (remain > 0) { |
600 | /* Operation in this page | 636 | /* Operation in this page |
@@ -603,32 +639,26 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
603 | * page_offset = offset within page | 639 | * page_offset = offset within page |
604 | * page_length = bytes to copy for this page | 640 | * page_length = bytes to copy for this page |
605 | */ | 641 | */ |
606 | page_base = (offset & ~(PAGE_SIZE-1)); | 642 | page_base = offset & PAGE_MASK; |
607 | page_offset = offset & (PAGE_SIZE-1); | 643 | page_offset = offset_in_page(offset); |
608 | page_length = remain; | 644 | page_length = remain; |
609 | if ((page_offset + remain) > PAGE_SIZE) | 645 | if ((page_offset + remain) > PAGE_SIZE) |
610 | page_length = PAGE_SIZE - page_offset; | 646 | page_length = PAGE_SIZE - page_offset; |
611 | 647 | ||
612 | ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base, | ||
613 | page_offset, user_data, page_length); | ||
614 | |||
615 | /* If we get a fault while copying data, then (presumably) our | 648 | /* If we get a fault while copying data, then (presumably) our |
616 | * source page isn't available. Return the error and we'll | 649 | * source page isn't available. Return the error and we'll |
617 | * retry in the slow path. | 650 | * retry in the slow path. |
618 | */ | 651 | */ |
619 | if (ret) | 652 | if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, |
620 | goto fail; | 653 | page_offset, user_data, page_length)) |
654 | return -EFAULT; | ||
621 | 655 | ||
622 | remain -= page_length; | 656 | remain -= page_length; |
623 | user_data += page_length; | 657 | user_data += page_length; |
624 | offset += page_length; | 658 | offset += page_length; |
625 | } | 659 | } |
626 | 660 | ||
627 | fail: | 661 | return 0; |
628 | i915_gem_object_unpin(obj); | ||
629 | mutex_unlock(&dev->struct_mutex); | ||
630 | |||
631 | return ret; | ||
632 | } | 662 | } |
633 | 663 | ||
634 | /** | 664 | /** |
@@ -639,11 +669,11 @@ fail: | |||
639 | * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). | 669 | * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). |
640 | */ | 670 | */ |
641 | static int | 671 | static int |
642 | i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | 672 | i915_gem_gtt_pwrite_slow(struct drm_device *dev, |
673 | struct drm_i915_gem_object *obj, | ||
643 | struct drm_i915_gem_pwrite *args, | 674 | struct drm_i915_gem_pwrite *args, |
644 | struct drm_file *file_priv) | 675 | struct drm_file *file) |
645 | { | 676 | { |
646 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
647 | drm_i915_private_t *dev_priv = dev->dev_private; | 677 | drm_i915_private_t *dev_priv = dev->dev_private; |
648 | ssize_t remain; | 678 | ssize_t remain; |
649 | loff_t gtt_page_base, offset; | 679 | loff_t gtt_page_base, offset; |
@@ -665,30 +695,30 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
665 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | 695 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; |
666 | num_pages = last_data_page - first_data_page + 1; | 696 | num_pages = last_data_page - first_data_page + 1; |
667 | 697 | ||
668 | user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); | 698 | user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); |
669 | if (user_pages == NULL) | 699 | if (user_pages == NULL) |
670 | return -ENOMEM; | 700 | return -ENOMEM; |
671 | 701 | ||
702 | mutex_unlock(&dev->struct_mutex); | ||
672 | down_read(&mm->mmap_sem); | 703 | down_read(&mm->mmap_sem); |
673 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | 704 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, |
674 | num_pages, 0, 0, user_pages, NULL); | 705 | num_pages, 0, 0, user_pages, NULL); |
675 | up_read(&mm->mmap_sem); | 706 | up_read(&mm->mmap_sem); |
707 | mutex_lock(&dev->struct_mutex); | ||
676 | if (pinned_pages < num_pages) { | 708 | if (pinned_pages < num_pages) { |
677 | ret = -EFAULT; | 709 | ret = -EFAULT; |
678 | goto out_unpin_pages; | 710 | goto out_unpin_pages; |
679 | } | 711 | } |
680 | 712 | ||
681 | mutex_lock(&dev->struct_mutex); | 713 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
682 | ret = i915_gem_object_pin(obj, 0); | ||
683 | if (ret) | 714 | if (ret) |
684 | goto out_unlock; | 715 | goto out_unpin_pages; |
685 | 716 | ||
686 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | 717 | ret = i915_gem_object_put_fence(obj); |
687 | if (ret) | 718 | if (ret) |
688 | goto out_unpin_object; | 719 | goto out_unpin_pages; |
689 | 720 | ||
690 | obj_priv = to_intel_bo(obj); | 721 | offset = obj->gtt_offset + args->offset; |
691 | offset = obj_priv->gtt_offset + args->offset; | ||
692 | 722 | ||
693 | while (remain > 0) { | 723 | while (remain > 0) { |
694 | /* Operation in this page | 724 | /* Operation in this page |
@@ -700,9 +730,9 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
700 | * page_length = bytes to copy for this page | 730 | * page_length = bytes to copy for this page |
701 | */ | 731 | */ |
702 | gtt_page_base = offset & PAGE_MASK; | 732 | gtt_page_base = offset & PAGE_MASK; |
703 | gtt_page_offset = offset & ~PAGE_MASK; | 733 | gtt_page_offset = offset_in_page(offset); |
704 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | 734 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; |
705 | data_page_offset = data_ptr & ~PAGE_MASK; | 735 | data_page_offset = offset_in_page(data_ptr); |
706 | 736 | ||
707 | page_length = remain; | 737 | page_length = remain; |
708 | if ((gtt_page_offset + page_length) > PAGE_SIZE) | 738 | if ((gtt_page_offset + page_length) > PAGE_SIZE) |
@@ -721,10 +751,6 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
721 | data_ptr += page_length; | 751 | data_ptr += page_length; |
722 | } | 752 | } |
723 | 753 | ||
724 | out_unpin_object: | ||
725 | i915_gem_object_unpin(obj); | ||
726 | out_unlock: | ||
727 | mutex_unlock(&dev->struct_mutex); | ||
728 | out_unpin_pages: | 754 | out_unpin_pages: |
729 | for (i = 0; i < pinned_pages; i++) | 755 | for (i = 0; i < pinned_pages; i++) |
730 | page_cache_release(user_pages[i]); | 756 | page_cache_release(user_pages[i]); |
@@ -738,64 +764,65 @@ out_unpin_pages: | |||
738 | * copy_from_user into the kmapped pages backing the object. | 764 | * copy_from_user into the kmapped pages backing the object. |
739 | */ | 765 | */ |
740 | static int | 766 | static int |
741 | i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | 767 | i915_gem_shmem_pwrite_fast(struct drm_device *dev, |
768 | struct drm_i915_gem_object *obj, | ||
742 | struct drm_i915_gem_pwrite *args, | 769 | struct drm_i915_gem_pwrite *args, |
743 | struct drm_file *file_priv) | 770 | struct drm_file *file) |
744 | { | 771 | { |
745 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 772 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
746 | ssize_t remain; | 773 | ssize_t remain; |
747 | loff_t offset, page_base; | 774 | loff_t offset; |
748 | char __user *user_data; | 775 | char __user *user_data; |
749 | int page_offset, page_length; | 776 | int page_offset, page_length; |
750 | int ret; | ||
751 | 777 | ||
752 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 778 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
753 | remain = args->size; | 779 | remain = args->size; |
754 | 780 | ||
755 | mutex_lock(&dev->struct_mutex); | ||
756 | |||
757 | ret = i915_gem_object_get_pages(obj, 0); | ||
758 | if (ret != 0) | ||
759 | goto fail_unlock; | ||
760 | |||
761 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | ||
762 | if (ret != 0) | ||
763 | goto fail_put_pages; | ||
764 | |||
765 | obj_priv = to_intel_bo(obj); | ||
766 | offset = args->offset; | 781 | offset = args->offset; |
767 | obj_priv->dirty = 1; | 782 | obj->dirty = 1; |
768 | 783 | ||
769 | while (remain > 0) { | 784 | while (remain > 0) { |
785 | struct page *page; | ||
786 | char *vaddr; | ||
787 | int ret; | ||
788 | |||
770 | /* Operation in this page | 789 | /* Operation in this page |
771 | * | 790 | * |
772 | * page_base = page offset within aperture | ||
773 | * page_offset = offset within page | 791 | * page_offset = offset within page |
774 | * page_length = bytes to copy for this page | 792 | * page_length = bytes to copy for this page |
775 | */ | 793 | */ |
776 | page_base = (offset & ~(PAGE_SIZE-1)); | 794 | page_offset = offset_in_page(offset); |
777 | page_offset = offset & (PAGE_SIZE-1); | ||
778 | page_length = remain; | 795 | page_length = remain; |
779 | if ((page_offset + remain) > PAGE_SIZE) | 796 | if ((page_offset + remain) > PAGE_SIZE) |
780 | page_length = PAGE_SIZE - page_offset; | 797 | page_length = PAGE_SIZE - page_offset; |
781 | 798 | ||
782 | ret = fast_shmem_write(obj_priv->pages, | 799 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
783 | page_base, page_offset, | 800 | if (IS_ERR(page)) |
784 | user_data, page_length); | 801 | return PTR_ERR(page); |
802 | |||
803 | vaddr = kmap_atomic(page, KM_USER0); | ||
804 | ret = __copy_from_user_inatomic(vaddr + page_offset, | ||
805 | user_data, | ||
806 | page_length); | ||
807 | kunmap_atomic(vaddr, KM_USER0); | ||
808 | |||
809 | set_page_dirty(page); | ||
810 | mark_page_accessed(page); | ||
811 | page_cache_release(page); | ||
812 | |||
813 | /* If we get a fault while copying data, then (presumably) our | ||
814 | * source page isn't available. Return the error and we'll | ||
815 | * retry in the slow path. | ||
816 | */ | ||
785 | if (ret) | 817 | if (ret) |
786 | goto fail_put_pages; | 818 | return -EFAULT; |
787 | 819 | ||
788 | remain -= page_length; | 820 | remain -= page_length; |
789 | user_data += page_length; | 821 | user_data += page_length; |
790 | offset += page_length; | 822 | offset += page_length; |
791 | } | 823 | } |
792 | 824 | ||
793 | fail_put_pages: | 825 | return 0; |
794 | i915_gem_object_put_pages(obj); | ||
795 | fail_unlock: | ||
796 | mutex_unlock(&dev->struct_mutex); | ||
797 | |||
798 | return ret; | ||
799 | } | 826 | } |
800 | 827 | ||
801 | /** | 828 | /** |
@@ -806,17 +833,18 @@ fail_unlock: | |||
806 | * struct_mutex is held. | 833 | * struct_mutex is held. |
807 | */ | 834 | */ |
808 | static int | 835 | static int |
809 | i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | 836 | i915_gem_shmem_pwrite_slow(struct drm_device *dev, |
837 | struct drm_i915_gem_object *obj, | ||
810 | struct drm_i915_gem_pwrite *args, | 838 | struct drm_i915_gem_pwrite *args, |
811 | struct drm_file *file_priv) | 839 | struct drm_file *file) |
812 | { | 840 | { |
813 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 841 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
814 | struct mm_struct *mm = current->mm; | 842 | struct mm_struct *mm = current->mm; |
815 | struct page **user_pages; | 843 | struct page **user_pages; |
816 | ssize_t remain; | 844 | ssize_t remain; |
817 | loff_t offset, pinned_pages, i; | 845 | loff_t offset, pinned_pages, i; |
818 | loff_t first_data_page, last_data_page, num_pages; | 846 | loff_t first_data_page, last_data_page, num_pages; |
819 | int shmem_page_index, shmem_page_offset; | 847 | int shmem_page_offset; |
820 | int data_page_index, data_page_offset; | 848 | int data_page_index, data_page_offset; |
821 | int page_length; | 849 | int page_length; |
822 | int ret; | 850 | int ret; |
@@ -833,48 +861,43 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
833 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | 861 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; |
834 | num_pages = last_data_page - first_data_page + 1; | 862 | num_pages = last_data_page - first_data_page + 1; |
835 | 863 | ||
836 | user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); | 864 | user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); |
837 | if (user_pages == NULL) | 865 | if (user_pages == NULL) |
838 | return -ENOMEM; | 866 | return -ENOMEM; |
839 | 867 | ||
868 | mutex_unlock(&dev->struct_mutex); | ||
840 | down_read(&mm->mmap_sem); | 869 | down_read(&mm->mmap_sem); |
841 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | 870 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, |
842 | num_pages, 0, 0, user_pages, NULL); | 871 | num_pages, 0, 0, user_pages, NULL); |
843 | up_read(&mm->mmap_sem); | 872 | up_read(&mm->mmap_sem); |
873 | mutex_lock(&dev->struct_mutex); | ||
844 | if (pinned_pages < num_pages) { | 874 | if (pinned_pages < num_pages) { |
845 | ret = -EFAULT; | 875 | ret = -EFAULT; |
846 | goto fail_put_user_pages; | 876 | goto out; |
847 | } | 877 | } |
848 | 878 | ||
849 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | 879 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
850 | |||
851 | mutex_lock(&dev->struct_mutex); | ||
852 | |||
853 | ret = i915_gem_object_get_pages_or_evict(obj); | ||
854 | if (ret) | 880 | if (ret) |
855 | goto fail_unlock; | 881 | goto out; |
856 | 882 | ||
857 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | 883 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
858 | if (ret != 0) | ||
859 | goto fail_put_pages; | ||
860 | 884 | ||
861 | obj_priv = to_intel_bo(obj); | ||
862 | offset = args->offset; | 885 | offset = args->offset; |
863 | obj_priv->dirty = 1; | 886 | obj->dirty = 1; |
864 | 887 | ||
865 | while (remain > 0) { | 888 | while (remain > 0) { |
889 | struct page *page; | ||
890 | |||
866 | /* Operation in this page | 891 | /* Operation in this page |
867 | * | 892 | * |
868 | * shmem_page_index = page number within shmem file | ||
869 | * shmem_page_offset = offset within page in shmem file | 893 | * shmem_page_offset = offset within page in shmem file |
870 | * data_page_index = page number in get_user_pages return | 894 | * data_page_index = page number in get_user_pages return |
871 | * data_page_offset = offset with data_page_index page. | 895 | * data_page_offset = offset with data_page_index page. |
872 | * page_length = bytes to copy for this page | 896 | * page_length = bytes to copy for this page |
873 | */ | 897 | */ |
874 | shmem_page_index = offset / PAGE_SIZE; | 898 | shmem_page_offset = offset_in_page(offset); |
875 | shmem_page_offset = offset & ~PAGE_MASK; | ||
876 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | 899 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; |
877 | data_page_offset = data_ptr & ~PAGE_MASK; | 900 | data_page_offset = offset_in_page(data_ptr); |
878 | 901 | ||
879 | page_length = remain; | 902 | page_length = remain; |
880 | if ((shmem_page_offset + page_length) > PAGE_SIZE) | 903 | if ((shmem_page_offset + page_length) > PAGE_SIZE) |
@@ -882,31 +905,37 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
882 | if ((data_page_offset + page_length) > PAGE_SIZE) | 905 | if ((data_page_offset + page_length) > PAGE_SIZE) |
883 | page_length = PAGE_SIZE - data_page_offset; | 906 | page_length = PAGE_SIZE - data_page_offset; |
884 | 907 | ||
908 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); | ||
909 | if (IS_ERR(page)) { | ||
910 | ret = PTR_ERR(page); | ||
911 | goto out; | ||
912 | } | ||
913 | |||
885 | if (do_bit17_swizzling) { | 914 | if (do_bit17_swizzling) { |
886 | slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], | 915 | slow_shmem_bit17_copy(page, |
887 | shmem_page_offset, | 916 | shmem_page_offset, |
888 | user_pages[data_page_index], | 917 | user_pages[data_page_index], |
889 | data_page_offset, | 918 | data_page_offset, |
890 | page_length, | 919 | page_length, |
891 | 0); | 920 | 0); |
892 | } else { | 921 | } else { |
893 | slow_shmem_copy(obj_priv->pages[shmem_page_index], | 922 | slow_shmem_copy(page, |
894 | shmem_page_offset, | 923 | shmem_page_offset, |
895 | user_pages[data_page_index], | 924 | user_pages[data_page_index], |
896 | data_page_offset, | 925 | data_page_offset, |
897 | page_length); | 926 | page_length); |
898 | } | 927 | } |
899 | 928 | ||
929 | set_page_dirty(page); | ||
930 | mark_page_accessed(page); | ||
931 | page_cache_release(page); | ||
932 | |||
900 | remain -= page_length; | 933 | remain -= page_length; |
901 | data_ptr += page_length; | 934 | data_ptr += page_length; |
902 | offset += page_length; | 935 | offset += page_length; |
903 | } | 936 | } |
904 | 937 | ||
905 | fail_put_pages: | 938 | out: |
906 | i915_gem_object_put_pages(obj); | ||
907 | fail_unlock: | ||
908 | mutex_unlock(&dev->struct_mutex); | ||
909 | fail_put_user_pages: | ||
910 | for (i = 0; i < pinned_pages; i++) | 939 | for (i = 0; i < pinned_pages; i++) |
911 | page_cache_release(user_pages[i]); | 940 | page_cache_release(user_pages[i]); |
912 | drm_free_large(user_pages); | 941 | drm_free_large(user_pages); |
@@ -921,30 +950,43 @@ fail_put_user_pages: | |||
921 | */ | 950 | */ |
922 | int | 951 | int |
923 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | 952 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, |
924 | struct drm_file *file_priv) | 953 | struct drm_file *file) |
925 | { | 954 | { |
926 | struct drm_i915_gem_pwrite *args = data; | 955 | struct drm_i915_gem_pwrite *args = data; |
927 | struct drm_gem_object *obj; | 956 | struct drm_i915_gem_object *obj; |
928 | struct drm_i915_gem_object *obj_priv; | 957 | int ret; |
929 | int ret = 0; | ||
930 | 958 | ||
931 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 959 | if (args->size == 0) |
932 | if (obj == NULL) | 960 | return 0; |
933 | return -ENOENT; | 961 | |
934 | obj_priv = to_intel_bo(obj); | 962 | if (!access_ok(VERIFY_READ, |
963 | (char __user *)(uintptr_t)args->data_ptr, | ||
964 | args->size)) | ||
965 | return -EFAULT; | ||
966 | |||
967 | ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr, | ||
968 | args->size); | ||
969 | if (ret) | ||
970 | return -EFAULT; | ||
971 | |||
972 | ret = i915_mutex_lock_interruptible(dev); | ||
973 | if (ret) | ||
974 | return ret; | ||
975 | |||
976 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | ||
977 | if (&obj->base == NULL) { | ||
978 | ret = -ENOENT; | ||
979 | goto unlock; | ||
980 | } | ||
935 | 981 | ||
936 | /* Bounds check destination. */ | 982 | /* Bounds check destination. */ |
937 | if (args->offset > obj->size || args->size > obj->size - args->offset) { | 983 | if (args->offset > obj->base.size || |
984 | args->size > obj->base.size - args->offset) { | ||
938 | ret = -EINVAL; | 985 | ret = -EINVAL; |
939 | goto err; | 986 | goto out; |
940 | } | 987 | } |
941 | 988 | ||
942 | if (!access_ok(VERIFY_READ, | 989 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); |
943 | (char __user *)(uintptr_t)args->data_ptr, | ||
944 | args->size)) { | ||
945 | ret = -EFAULT; | ||
946 | goto err; | ||
947 | } | ||
948 | 990 | ||
949 | /* We can only do the GTT pwrite on untiled buffers, as otherwise | 991 | /* We can only do the GTT pwrite on untiled buffers, as otherwise |
950 | * it would end up going through the fenced access, and we'll get | 992 | * it would end up going through the fenced access, and we'll get |
@@ -952,33 +994,44 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
952 | * pread/pwrite currently are reading and writing from the CPU | 994 | * pread/pwrite currently are reading and writing from the CPU |
953 | * perspective, requiring manual detiling by the client. | 995 | * perspective, requiring manual detiling by the client. |
954 | */ | 996 | */ |
955 | if (obj_priv->phys_obj) | 997 | if (obj->phys_obj) |
956 | ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); | 998 | ret = i915_gem_phys_pwrite(dev, obj, args, file); |
957 | else if (obj_priv->tiling_mode == I915_TILING_NONE && | 999 | else if (obj->gtt_space && |
958 | dev->gtt_total != 0 && | 1000 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
959 | obj->write_domain != I915_GEM_DOMAIN_CPU) { | 1001 | ret = i915_gem_object_pin(obj, 0, true); |
960 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); | 1002 | if (ret) |
961 | if (ret == -EFAULT) { | 1003 | goto out; |
962 | ret = i915_gem_gtt_pwrite_slow(dev, obj, args, | 1004 | |
963 | file_priv); | 1005 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
964 | } | 1006 | if (ret) |
965 | } else if (i915_gem_object_needs_bit17_swizzle(obj)) { | 1007 | goto out_unpin; |
966 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv); | 1008 | |
1009 | ret = i915_gem_object_put_fence(obj); | ||
1010 | if (ret) | ||
1011 | goto out_unpin; | ||
1012 | |||
1013 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); | ||
1014 | if (ret == -EFAULT) | ||
1015 | ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file); | ||
1016 | |||
1017 | out_unpin: | ||
1018 | i915_gem_object_unpin(obj); | ||
967 | } else { | 1019 | } else { |
968 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); | 1020 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
969 | if (ret == -EFAULT) { | 1021 | if (ret) |
970 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, | 1022 | goto out; |
971 | file_priv); | ||
972 | } | ||
973 | } | ||
974 | 1023 | ||
975 | #if WATCH_PWRITE | 1024 | ret = -EFAULT; |
976 | if (ret) | 1025 | if (!i915_gem_object_needs_bit17_swizzle(obj)) |
977 | DRM_INFO("pwrite failed %d\n", ret); | 1026 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); |
978 | #endif | 1027 | if (ret == -EFAULT) |
1028 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); | ||
1029 | } | ||
979 | 1030 | ||
980 | err: | 1031 | out: |
981 | drm_gem_object_unreference_unlocked(obj); | 1032 | drm_gem_object_unreference(&obj->base); |
1033 | unlock: | ||
1034 | mutex_unlock(&dev->struct_mutex); | ||
982 | return ret; | 1035 | return ret; |
983 | } | 1036 | } |
984 | 1037 | ||
@@ -988,12 +1041,10 @@ err: | |||
988 | */ | 1041 | */ |
989 | int | 1042 | int |
990 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | 1043 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
991 | struct drm_file *file_priv) | 1044 | struct drm_file *file) |
992 | { | 1045 | { |
993 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
994 | struct drm_i915_gem_set_domain *args = data; | 1046 | struct drm_i915_gem_set_domain *args = data; |
995 | struct drm_gem_object *obj; | 1047 | struct drm_i915_gem_object *obj; |
996 | struct drm_i915_gem_object *obj_priv; | ||
997 | uint32_t read_domains = args->read_domains; | 1048 | uint32_t read_domains = args->read_domains; |
998 | uint32_t write_domain = args->write_domain; | 1049 | uint32_t write_domain = args->write_domain; |
999 | int ret; | 1050 | int ret; |
@@ -1014,32 +1065,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1014 | if (write_domain != 0 && read_domains != write_domain) | 1065 | if (write_domain != 0 && read_domains != write_domain) |
1015 | return -EINVAL; | 1066 | return -EINVAL; |
1016 | 1067 | ||
1017 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1068 | ret = i915_mutex_lock_interruptible(dev); |
1018 | if (obj == NULL) | 1069 | if (ret) |
1019 | return -ENOENT; | 1070 | return ret; |
1020 | obj_priv = to_intel_bo(obj); | ||
1021 | |||
1022 | mutex_lock(&dev->struct_mutex); | ||
1023 | 1071 | ||
1024 | intel_mark_busy(dev, obj); | 1072 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
1073 | if (&obj->base == NULL) { | ||
1074 | ret = -ENOENT; | ||
1075 | goto unlock; | ||
1076 | } | ||
1025 | 1077 | ||
1026 | #if WATCH_BUF | ||
1027 | DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n", | ||
1028 | obj, obj->size, read_domains, write_domain); | ||
1029 | #endif | ||
1030 | if (read_domains & I915_GEM_DOMAIN_GTT) { | 1078 | if (read_domains & I915_GEM_DOMAIN_GTT) { |
1031 | ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); | 1079 | ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); |
1032 | 1080 | ||
1033 | /* Update the LRU on the fence for the CPU access that's | ||
1034 | * about to occur. | ||
1035 | */ | ||
1036 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | ||
1037 | struct drm_i915_fence_reg *reg = | ||
1038 | &dev_priv->fence_regs[obj_priv->fence_reg]; | ||
1039 | list_move_tail(®->lru_list, | ||
1040 | &dev_priv->mm.fence_list); | ||
1041 | } | ||
1042 | |||
1043 | /* Silently promote "you're not bound, there was nothing to do" | 1081 | /* Silently promote "you're not bound, there was nothing to do" |
1044 | * to success, since the client was just asking us to | 1082 | * to success, since the client was just asking us to |
1045 | * make sure everything was done. | 1083 | * make sure everything was done. |
@@ -1050,12 +1088,8 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1050 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); | 1088 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); |
1051 | } | 1089 | } |
1052 | 1090 | ||
1053 | 1091 | drm_gem_object_unreference(&obj->base); | |
1054 | /* Maintain LRU order of "inactive" objects */ | 1092 | unlock: |
1055 | if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) | ||
1056 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | ||
1057 | |||
1058 | drm_gem_object_unreference(obj); | ||
1059 | mutex_unlock(&dev->struct_mutex); | 1093 | mutex_unlock(&dev->struct_mutex); |
1060 | return ret; | 1094 | return ret; |
1061 | } | 1095 | } |
@@ -1065,34 +1099,31 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1065 | */ | 1099 | */ |
1066 | int | 1100 | int |
1067 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | 1101 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
1068 | struct drm_file *file_priv) | 1102 | struct drm_file *file) |
1069 | { | 1103 | { |
1070 | struct drm_i915_gem_sw_finish *args = data; | 1104 | struct drm_i915_gem_sw_finish *args = data; |
1071 | struct drm_gem_object *obj; | 1105 | struct drm_i915_gem_object *obj; |
1072 | struct drm_i915_gem_object *obj_priv; | ||
1073 | int ret = 0; | 1106 | int ret = 0; |
1074 | 1107 | ||
1075 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 1108 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
1076 | return -ENODEV; | 1109 | return -ENODEV; |
1077 | 1110 | ||
1078 | mutex_lock(&dev->struct_mutex); | 1111 | ret = i915_mutex_lock_interruptible(dev); |
1079 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1112 | if (ret) |
1080 | if (obj == NULL) { | 1113 | return ret; |
1081 | mutex_unlock(&dev->struct_mutex); | ||
1082 | return -ENOENT; | ||
1083 | } | ||
1084 | 1114 | ||
1085 | #if WATCH_BUF | 1115 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
1086 | DRM_INFO("%s: sw_finish %d (%p %zd)\n", | 1116 | if (&obj->base == NULL) { |
1087 | __func__, args->handle, obj, obj->size); | 1117 | ret = -ENOENT; |
1088 | #endif | 1118 | goto unlock; |
1089 | obj_priv = to_intel_bo(obj); | 1119 | } |
1090 | 1120 | ||
1091 | /* Pinned buffers may be scanout, so flush the cache */ | 1121 | /* Pinned buffers may be scanout, so flush the cache */ |
1092 | if (obj_priv->pin_count) | 1122 | if (obj->pin_count) |
1093 | i915_gem_object_flush_cpu_write_domain(obj); | 1123 | i915_gem_object_flush_cpu_write_domain(obj); |
1094 | 1124 | ||
1095 | drm_gem_object_unreference(obj); | 1125 | drm_gem_object_unreference(&obj->base); |
1126 | unlock: | ||
1096 | mutex_unlock(&dev->struct_mutex); | 1127 | mutex_unlock(&dev->struct_mutex); |
1097 | return ret; | 1128 | return ret; |
1098 | } | 1129 | } |
@@ -1106,21 +1137,24 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
1106 | */ | 1137 | */ |
1107 | int | 1138 | int |
1108 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | 1139 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, |
1109 | struct drm_file *file_priv) | 1140 | struct drm_file *file) |
1110 | { | 1141 | { |
1142 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1111 | struct drm_i915_gem_mmap *args = data; | 1143 | struct drm_i915_gem_mmap *args = data; |
1112 | struct drm_gem_object *obj; | 1144 | struct drm_gem_object *obj; |
1113 | loff_t offset; | ||
1114 | unsigned long addr; | 1145 | unsigned long addr; |
1115 | 1146 | ||
1116 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 1147 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
1117 | return -ENODEV; | 1148 | return -ENODEV; |
1118 | 1149 | ||
1119 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1150 | obj = drm_gem_object_lookup(dev, file, args->handle); |
1120 | if (obj == NULL) | 1151 | if (obj == NULL) |
1121 | return -ENOENT; | 1152 | return -ENOENT; |
1122 | 1153 | ||
1123 | offset = args->offset; | 1154 | if (obj->size > dev_priv->mm.gtt_mappable_end) { |
1155 | drm_gem_object_unreference_unlocked(obj); | ||
1156 | return -E2BIG; | ||
1157 | } | ||
1124 | 1158 | ||
1125 | down_write(¤t->mm->mmap_sem); | 1159 | down_write(¤t->mm->mmap_sem); |
1126 | addr = do_mmap(obj->filp, 0, args->size, | 1160 | addr = do_mmap(obj->filp, 0, args->size, |
@@ -1154,10 +1188,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1154 | */ | 1188 | */ |
1155 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1189 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
1156 | { | 1190 | { |
1157 | struct drm_gem_object *obj = vma->vm_private_data; | 1191 | struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); |
1158 | struct drm_device *dev = obj->dev; | 1192 | struct drm_device *dev = obj->base.dev; |
1159 | drm_i915_private_t *dev_priv = dev->dev_private; | 1193 | drm_i915_private_t *dev_priv = dev->dev_private; |
1160 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1161 | pgoff_t page_offset; | 1194 | pgoff_t page_offset; |
1162 | unsigned long pfn; | 1195 | unsigned long pfn; |
1163 | int ret = 0; | 1196 | int ret = 0; |
@@ -1167,42 +1200,64 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1167 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> | 1200 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> |
1168 | PAGE_SHIFT; | 1201 | PAGE_SHIFT; |
1169 | 1202 | ||
1203 | ret = i915_mutex_lock_interruptible(dev); | ||
1204 | if (ret) | ||
1205 | goto out; | ||
1206 | |||
1207 | trace_i915_gem_object_fault(obj, page_offset, true, write); | ||
1208 | |||
1170 | /* Now bind it into the GTT if needed */ | 1209 | /* Now bind it into the GTT if needed */ |
1171 | mutex_lock(&dev->struct_mutex); | 1210 | if (!obj->map_and_fenceable) { |
1172 | if (!obj_priv->gtt_space) { | 1211 | ret = i915_gem_object_unbind(obj); |
1173 | ret = i915_gem_object_bind_to_gtt(obj, 0); | ||
1174 | if (ret) | 1212 | if (ret) |
1175 | goto unlock; | 1213 | goto unlock; |
1176 | 1214 | } | |
1177 | ret = i915_gem_object_set_to_gtt_domain(obj, write); | 1215 | if (!obj->gtt_space) { |
1216 | ret = i915_gem_object_bind_to_gtt(obj, 0, true); | ||
1178 | if (ret) | 1217 | if (ret) |
1179 | goto unlock; | 1218 | goto unlock; |
1180 | } | ||
1181 | 1219 | ||
1182 | /* Need a new fence register? */ | 1220 | ret = i915_gem_object_set_to_gtt_domain(obj, write); |
1183 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | ||
1184 | ret = i915_gem_object_get_fence_reg(obj); | ||
1185 | if (ret) | 1221 | if (ret) |
1186 | goto unlock; | 1222 | goto unlock; |
1187 | } | 1223 | } |
1188 | 1224 | ||
1189 | if (i915_gem_object_is_inactive(obj_priv)) | 1225 | if (obj->tiling_mode == I915_TILING_NONE) |
1190 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 1226 | ret = i915_gem_object_put_fence(obj); |
1227 | else | ||
1228 | ret = i915_gem_object_get_fence(obj, NULL); | ||
1229 | if (ret) | ||
1230 | goto unlock; | ||
1231 | |||
1232 | if (i915_gem_object_is_inactive(obj)) | ||
1233 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); | ||
1234 | |||
1235 | obj->fault_mappable = true; | ||
1191 | 1236 | ||
1192 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + | 1237 | pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) + |
1193 | page_offset; | 1238 | page_offset; |
1194 | 1239 | ||
1195 | /* Finally, remap it using the new GTT offset */ | 1240 | /* Finally, remap it using the new GTT offset */ |
1196 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); | 1241 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); |
1197 | unlock: | 1242 | unlock: |
1198 | mutex_unlock(&dev->struct_mutex); | 1243 | mutex_unlock(&dev->struct_mutex); |
1199 | 1244 | out: | |
1200 | switch (ret) { | 1245 | switch (ret) { |
1246 | case -EIO: | ||
1247 | case -EAGAIN: | ||
1248 | /* Give the error handler a chance to run and move the | ||
1249 | * objects off the GPU active list. Next time we service the | ||
1250 | * fault, we should be able to transition the page into the | ||
1251 | * GTT without touching the GPU (and so avoid further | ||
1252 | * EIO/EGAIN). If the GPU is wedged, then there is no issue | ||
1253 | * with coherency, just lost writes. | ||
1254 | */ | ||
1255 | set_need_resched(); | ||
1201 | case 0: | 1256 | case 0: |
1202 | case -ERESTARTSYS: | 1257 | case -ERESTARTSYS: |
1258 | case -EINTR: | ||
1203 | return VM_FAULT_NOPAGE; | 1259 | return VM_FAULT_NOPAGE; |
1204 | case -ENOMEM: | 1260 | case -ENOMEM: |
1205 | case -EAGAIN: | ||
1206 | return VM_FAULT_OOM; | 1261 | return VM_FAULT_OOM; |
1207 | default: | 1262 | default: |
1208 | return VM_FAULT_SIGBUS; | 1263 | return VM_FAULT_SIGBUS; |
@@ -1221,59 +1276,58 @@ unlock: | |||
1221 | * This routine allocates and attaches a fake offset for @obj. | 1276 | * This routine allocates and attaches a fake offset for @obj. |
1222 | */ | 1277 | */ |
1223 | static int | 1278 | static int |
1224 | i915_gem_create_mmap_offset(struct drm_gem_object *obj) | 1279 | i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj) |
1225 | { | 1280 | { |
1226 | struct drm_device *dev = obj->dev; | 1281 | struct drm_device *dev = obj->base.dev; |
1227 | struct drm_gem_mm *mm = dev->mm_private; | 1282 | struct drm_gem_mm *mm = dev->mm_private; |
1228 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1229 | struct drm_map_list *list; | 1283 | struct drm_map_list *list; |
1230 | struct drm_local_map *map; | 1284 | struct drm_local_map *map; |
1231 | int ret = 0; | 1285 | int ret = 0; |
1232 | 1286 | ||
1233 | /* Set the object up for mmap'ing */ | 1287 | /* Set the object up for mmap'ing */ |
1234 | list = &obj->map_list; | 1288 | list = &obj->base.map_list; |
1235 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); | 1289 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); |
1236 | if (!list->map) | 1290 | if (!list->map) |
1237 | return -ENOMEM; | 1291 | return -ENOMEM; |
1238 | 1292 | ||
1239 | map = list->map; | 1293 | map = list->map; |
1240 | map->type = _DRM_GEM; | 1294 | map->type = _DRM_GEM; |
1241 | map->size = obj->size; | 1295 | map->size = obj->base.size; |
1242 | map->handle = obj; | 1296 | map->handle = obj; |
1243 | 1297 | ||
1244 | /* Get a DRM GEM mmap offset allocated... */ | 1298 | /* Get a DRM GEM mmap offset allocated... */ |
1245 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, | 1299 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, |
1246 | obj->size / PAGE_SIZE, 0, 0); | 1300 | obj->base.size / PAGE_SIZE, |
1301 | 0, 0); | ||
1247 | if (!list->file_offset_node) { | 1302 | if (!list->file_offset_node) { |
1248 | DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); | 1303 | DRM_ERROR("failed to allocate offset for bo %d\n", |
1249 | ret = -ENOMEM; | 1304 | obj->base.name); |
1305 | ret = -ENOSPC; | ||
1250 | goto out_free_list; | 1306 | goto out_free_list; |
1251 | } | 1307 | } |
1252 | 1308 | ||
1253 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, | 1309 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, |
1254 | obj->size / PAGE_SIZE, 0); | 1310 | obj->base.size / PAGE_SIZE, |
1311 | 0); | ||
1255 | if (!list->file_offset_node) { | 1312 | if (!list->file_offset_node) { |
1256 | ret = -ENOMEM; | 1313 | ret = -ENOMEM; |
1257 | goto out_free_list; | 1314 | goto out_free_list; |
1258 | } | 1315 | } |
1259 | 1316 | ||
1260 | list->hash.key = list->file_offset_node->start; | 1317 | list->hash.key = list->file_offset_node->start; |
1261 | if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { | 1318 | ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); |
1319 | if (ret) { | ||
1262 | DRM_ERROR("failed to add to map hash\n"); | 1320 | DRM_ERROR("failed to add to map hash\n"); |
1263 | ret = -ENOMEM; | ||
1264 | goto out_free_mm; | 1321 | goto out_free_mm; |
1265 | } | 1322 | } |
1266 | 1323 | ||
1267 | /* By now we should be all set, any drm_mmap request on the offset | ||
1268 | * below will get to our mmap & fault handler */ | ||
1269 | obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT; | ||
1270 | |||
1271 | return 0; | 1324 | return 0; |
1272 | 1325 | ||
1273 | out_free_mm: | 1326 | out_free_mm: |
1274 | drm_mm_put_block(list->file_offset_node); | 1327 | drm_mm_put_block(list->file_offset_node); |
1275 | out_free_list: | 1328 | out_free_list: |
1276 | kfree(list->map); | 1329 | kfree(list->map); |
1330 | list->map = NULL; | ||
1277 | 1331 | ||
1278 | return ret; | 1332 | return ret; |
1279 | } | 1333 | } |
@@ -1293,38 +1347,51 @@ out_free_list: | |||
1293 | * fixup by i915_gem_fault(). | 1347 | * fixup by i915_gem_fault(). |
1294 | */ | 1348 | */ |
1295 | void | 1349 | void |
1296 | i915_gem_release_mmap(struct drm_gem_object *obj) | 1350 | i915_gem_release_mmap(struct drm_i915_gem_object *obj) |
1297 | { | 1351 | { |
1298 | struct drm_device *dev = obj->dev; | 1352 | if (!obj->fault_mappable) |
1299 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1353 | return; |
1354 | |||
1355 | if (obj->base.dev->dev_mapping) | ||
1356 | unmap_mapping_range(obj->base.dev->dev_mapping, | ||
1357 | (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT, | ||
1358 | obj->base.size, 1); | ||
1300 | 1359 | ||
1301 | if (dev->dev_mapping) | 1360 | obj->fault_mappable = false; |
1302 | unmap_mapping_range(dev->dev_mapping, | ||
1303 | obj_priv->mmap_offset, obj->size, 1); | ||
1304 | } | 1361 | } |
1305 | 1362 | ||
1306 | static void | 1363 | static void |
1307 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) | 1364 | i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj) |
1308 | { | 1365 | { |
1309 | struct drm_device *dev = obj->dev; | 1366 | struct drm_device *dev = obj->base.dev; |
1310 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1311 | struct drm_gem_mm *mm = dev->mm_private; | 1367 | struct drm_gem_mm *mm = dev->mm_private; |
1312 | struct drm_map_list *list; | 1368 | struct drm_map_list *list = &obj->base.map_list; |
1313 | 1369 | ||
1314 | list = &obj->map_list; | ||
1315 | drm_ht_remove_item(&mm->offset_hash, &list->hash); | 1370 | drm_ht_remove_item(&mm->offset_hash, &list->hash); |
1371 | drm_mm_put_block(list->file_offset_node); | ||
1372 | kfree(list->map); | ||
1373 | list->map = NULL; | ||
1374 | } | ||
1316 | 1375 | ||
1317 | if (list->file_offset_node) { | 1376 | static uint32_t |
1318 | drm_mm_put_block(list->file_offset_node); | 1377 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) |
1319 | list->file_offset_node = NULL; | 1378 | { |
1320 | } | 1379 | uint32_t gtt_size; |
1321 | 1380 | ||
1322 | if (list->map) { | 1381 | if (INTEL_INFO(dev)->gen >= 4 || |
1323 | kfree(list->map); | 1382 | tiling_mode == I915_TILING_NONE) |
1324 | list->map = NULL; | 1383 | return size; |
1325 | } | 1384 | |
1385 | /* Previous chips need a power-of-two fence region when tiling */ | ||
1386 | if (INTEL_INFO(dev)->gen == 3) | ||
1387 | gtt_size = 1024*1024; | ||
1388 | else | ||
1389 | gtt_size = 512*1024; | ||
1390 | |||
1391 | while (gtt_size < size) | ||
1392 | gtt_size <<= 1; | ||
1326 | 1393 | ||
1327 | obj_priv->mmap_offset = 0; | 1394 | return gtt_size; |
1328 | } | 1395 | } |
1329 | 1396 | ||
1330 | /** | 1397 | /** |
@@ -1332,42 +1399,111 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj) | |||
1332 | * @obj: object to check | 1399 | * @obj: object to check |
1333 | * | 1400 | * |
1334 | * Return the required GTT alignment for an object, taking into account | 1401 | * Return the required GTT alignment for an object, taking into account |
1335 | * potential fence register mapping if needed. | 1402 | * potential fence register mapping. |
1336 | */ | 1403 | */ |
1337 | static uint32_t | 1404 | static uint32_t |
1338 | i915_gem_get_gtt_alignment(struct drm_gem_object *obj) | 1405 | i915_gem_get_gtt_alignment(struct drm_device *dev, |
1406 | uint32_t size, | ||
1407 | int tiling_mode) | ||
1339 | { | 1408 | { |
1340 | struct drm_device *dev = obj->dev; | ||
1341 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1342 | int start, i; | ||
1343 | |||
1344 | /* | 1409 | /* |
1345 | * Minimum alignment is 4k (GTT page size), but might be greater | 1410 | * Minimum alignment is 4k (GTT page size), but might be greater |
1346 | * if a fence register is needed for the object. | 1411 | * if a fence register is needed for the object. |
1347 | */ | 1412 | */ |
1348 | if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE) | 1413 | if (INTEL_INFO(dev)->gen >= 4 || |
1414 | tiling_mode == I915_TILING_NONE) | ||
1349 | return 4096; | 1415 | return 4096; |
1350 | 1416 | ||
1351 | /* | 1417 | /* |
1352 | * Previous chips need to be aligned to the size of the smallest | 1418 | * Previous chips need to be aligned to the size of the smallest |
1353 | * fence register that can contain the object. | 1419 | * fence register that can contain the object. |
1354 | */ | 1420 | */ |
1355 | if (IS_I9XX(dev)) | 1421 | return i915_gem_get_gtt_size(dev, size, tiling_mode); |
1356 | start = 1024*1024; | 1422 | } |
1357 | else | ||
1358 | start = 512*1024; | ||
1359 | 1423 | ||
1360 | for (i = start; i < obj->size; i <<= 1) | 1424 | /** |
1361 | ; | 1425 | * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an |
1426 | * unfenced object | ||
1427 | * @dev: the device | ||
1428 | * @size: size of the object | ||
1429 | * @tiling_mode: tiling mode of the object | ||
1430 | * | ||
1431 | * Return the required GTT alignment for an object, only taking into account | ||
1432 | * unfenced tiled surface requirements. | ||
1433 | */ | ||
1434 | uint32_t | ||
1435 | i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, | ||
1436 | uint32_t size, | ||
1437 | int tiling_mode) | ||
1438 | { | ||
1439 | /* | ||
1440 | * Minimum alignment is 4k (GTT page size) for sane hw. | ||
1441 | */ | ||
1442 | if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || | ||
1443 | tiling_mode == I915_TILING_NONE) | ||
1444 | return 4096; | ||
1362 | 1445 | ||
1363 | return i; | 1446 | /* Previous hardware however needs to be aligned to a power-of-two |
1447 | * tile height. The simplest method for determining this is to reuse | ||
1448 | * the power-of-tile object size. | ||
1449 | */ | ||
1450 | return i915_gem_get_gtt_size(dev, size, tiling_mode); | ||
1451 | } | ||
1452 | |||
1453 | int | ||
1454 | i915_gem_mmap_gtt(struct drm_file *file, | ||
1455 | struct drm_device *dev, | ||
1456 | uint32_t handle, | ||
1457 | uint64_t *offset) | ||
1458 | { | ||
1459 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1460 | struct drm_i915_gem_object *obj; | ||
1461 | int ret; | ||
1462 | |||
1463 | if (!(dev->driver->driver_features & DRIVER_GEM)) | ||
1464 | return -ENODEV; | ||
1465 | |||
1466 | ret = i915_mutex_lock_interruptible(dev); | ||
1467 | if (ret) | ||
1468 | return ret; | ||
1469 | |||
1470 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); | ||
1471 | if (&obj->base == NULL) { | ||
1472 | ret = -ENOENT; | ||
1473 | goto unlock; | ||
1474 | } | ||
1475 | |||
1476 | if (obj->base.size > dev_priv->mm.gtt_mappable_end) { | ||
1477 | ret = -E2BIG; | ||
1478 | goto unlock; | ||
1479 | } | ||
1480 | |||
1481 | if (obj->madv != I915_MADV_WILLNEED) { | ||
1482 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); | ||
1483 | ret = -EINVAL; | ||
1484 | goto out; | ||
1485 | } | ||
1486 | |||
1487 | if (!obj->base.map_list.map) { | ||
1488 | ret = i915_gem_create_mmap_offset(obj); | ||
1489 | if (ret) | ||
1490 | goto out; | ||
1491 | } | ||
1492 | |||
1493 | *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; | ||
1494 | |||
1495 | out: | ||
1496 | drm_gem_object_unreference(&obj->base); | ||
1497 | unlock: | ||
1498 | mutex_unlock(&dev->struct_mutex); | ||
1499 | return ret; | ||
1364 | } | 1500 | } |
1365 | 1501 | ||
1366 | /** | 1502 | /** |
1367 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing | 1503 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing |
1368 | * @dev: DRM device | 1504 | * @dev: DRM device |
1369 | * @data: GTT mapping ioctl data | 1505 | * @data: GTT mapping ioctl data |
1370 | * @file_priv: GEM object info | 1506 | * @file: GEM object info |
1371 | * | 1507 | * |
1372 | * Simply returns the fake offset to userspace so it can mmap it. | 1508 | * Simply returns the fake offset to userspace so it can mmap it. |
1373 | * The mmap call will end up in drm_gem_mmap(), which will set things | 1509 | * The mmap call will end up in drm_gem_mmap(), which will set things |
@@ -1380,236 +1516,233 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj) | |||
1380 | */ | 1516 | */ |
1381 | int | 1517 | int |
1382 | i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | 1518 | i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, |
1383 | struct drm_file *file_priv) | 1519 | struct drm_file *file) |
1384 | { | 1520 | { |
1385 | struct drm_i915_gem_mmap_gtt *args = data; | 1521 | struct drm_i915_gem_mmap_gtt *args = data; |
1386 | struct drm_gem_object *obj; | ||
1387 | struct drm_i915_gem_object *obj_priv; | ||
1388 | int ret; | ||
1389 | 1522 | ||
1390 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 1523 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
1391 | return -ENODEV; | 1524 | return -ENODEV; |
1392 | 1525 | ||
1393 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1526 | return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); |
1394 | if (obj == NULL) | 1527 | } |
1395 | return -ENOENT; | ||
1396 | |||
1397 | mutex_lock(&dev->struct_mutex); | ||
1398 | 1528 | ||
1399 | obj_priv = to_intel_bo(obj); | ||
1400 | 1529 | ||
1401 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 1530 | static int |
1402 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); | 1531 | i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, |
1403 | drm_gem_object_unreference(obj); | 1532 | gfp_t gfpmask) |
1404 | mutex_unlock(&dev->struct_mutex); | 1533 | { |
1405 | return -EINVAL; | 1534 | int page_count, i; |
1406 | } | 1535 | struct address_space *mapping; |
1536 | struct inode *inode; | ||
1537 | struct page *page; | ||
1407 | 1538 | ||
1539 | /* Get the list of pages out of our struct file. They'll be pinned | ||
1540 | * at this point until we release them. | ||
1541 | */ | ||
1542 | page_count = obj->base.size / PAGE_SIZE; | ||
1543 | BUG_ON(obj->pages != NULL); | ||
1544 | obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); | ||
1545 | if (obj->pages == NULL) | ||
1546 | return -ENOMEM; | ||
1408 | 1547 | ||
1409 | if (!obj_priv->mmap_offset) { | 1548 | inode = obj->base.filp->f_path.dentry->d_inode; |
1410 | ret = i915_gem_create_mmap_offset(obj); | 1549 | mapping = inode->i_mapping; |
1411 | if (ret) { | 1550 | gfpmask |= mapping_gfp_mask(mapping); |
1412 | drm_gem_object_unreference(obj); | ||
1413 | mutex_unlock(&dev->struct_mutex); | ||
1414 | return ret; | ||
1415 | } | ||
1416 | } | ||
1417 | 1551 | ||
1418 | args->offset = obj_priv->mmap_offset; | 1552 | for (i = 0; i < page_count; i++) { |
1553 | page = shmem_read_mapping_page_gfp(mapping, i, gfpmask); | ||
1554 | if (IS_ERR(page)) | ||
1555 | goto err_pages; | ||
1419 | 1556 | ||
1420 | /* | 1557 | obj->pages[i] = page; |
1421 | * Pull it into the GTT so that we have a page list (makes the | ||
1422 | * initial fault faster and any subsequent flushing possible). | ||
1423 | */ | ||
1424 | if (!obj_priv->agp_mem) { | ||
1425 | ret = i915_gem_object_bind_to_gtt(obj, 0); | ||
1426 | if (ret) { | ||
1427 | drm_gem_object_unreference(obj); | ||
1428 | mutex_unlock(&dev->struct_mutex); | ||
1429 | return ret; | ||
1430 | } | ||
1431 | } | 1558 | } |
1432 | 1559 | ||
1433 | drm_gem_object_unreference(obj); | 1560 | if (obj->tiling_mode != I915_TILING_NONE) |
1434 | mutex_unlock(&dev->struct_mutex); | 1561 | i915_gem_object_do_bit_17_swizzle(obj); |
1435 | 1562 | ||
1436 | return 0; | 1563 | return 0; |
1564 | |||
1565 | err_pages: | ||
1566 | while (i--) | ||
1567 | page_cache_release(obj->pages[i]); | ||
1568 | |||
1569 | drm_free_large(obj->pages); | ||
1570 | obj->pages = NULL; | ||
1571 | return PTR_ERR(page); | ||
1437 | } | 1572 | } |
1438 | 1573 | ||
1439 | void | 1574 | static void |
1440 | i915_gem_object_put_pages(struct drm_gem_object *obj) | 1575 | i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) |
1441 | { | 1576 | { |
1442 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1577 | int page_count = obj->base.size / PAGE_SIZE; |
1443 | int page_count = obj->size / PAGE_SIZE; | ||
1444 | int i; | 1578 | int i; |
1445 | 1579 | ||
1446 | BUG_ON(obj_priv->pages_refcount == 0); | 1580 | BUG_ON(obj->madv == __I915_MADV_PURGED); |
1447 | BUG_ON(obj_priv->madv == __I915_MADV_PURGED); | ||
1448 | 1581 | ||
1449 | if (--obj_priv->pages_refcount != 0) | 1582 | if (obj->tiling_mode != I915_TILING_NONE) |
1450 | return; | ||
1451 | |||
1452 | if (obj_priv->tiling_mode != I915_TILING_NONE) | ||
1453 | i915_gem_object_save_bit_17_swizzle(obj); | 1583 | i915_gem_object_save_bit_17_swizzle(obj); |
1454 | 1584 | ||
1455 | if (obj_priv->madv == I915_MADV_DONTNEED) | 1585 | if (obj->madv == I915_MADV_DONTNEED) |
1456 | obj_priv->dirty = 0; | 1586 | obj->dirty = 0; |
1457 | 1587 | ||
1458 | for (i = 0; i < page_count; i++) { | 1588 | for (i = 0; i < page_count; i++) { |
1459 | if (obj_priv->dirty) | 1589 | if (obj->dirty) |
1460 | set_page_dirty(obj_priv->pages[i]); | 1590 | set_page_dirty(obj->pages[i]); |
1461 | 1591 | ||
1462 | if (obj_priv->madv == I915_MADV_WILLNEED) | 1592 | if (obj->madv == I915_MADV_WILLNEED) |
1463 | mark_page_accessed(obj_priv->pages[i]); | 1593 | mark_page_accessed(obj->pages[i]); |
1464 | 1594 | ||
1465 | page_cache_release(obj_priv->pages[i]); | 1595 | page_cache_release(obj->pages[i]); |
1466 | } | 1596 | } |
1467 | obj_priv->dirty = 0; | 1597 | obj->dirty = 0; |
1468 | 1598 | ||
1469 | drm_free_large(obj_priv->pages); | 1599 | drm_free_large(obj->pages); |
1470 | obj_priv->pages = NULL; | 1600 | obj->pages = NULL; |
1471 | } | 1601 | } |
1472 | 1602 | ||
1473 | static void | 1603 | void |
1474 | i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno, | 1604 | i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
1475 | struct intel_ring_buffer *ring) | 1605 | struct intel_ring_buffer *ring, |
1606 | u32 seqno) | ||
1476 | { | 1607 | { |
1477 | struct drm_device *dev = obj->dev; | 1608 | struct drm_device *dev = obj->base.dev; |
1478 | drm_i915_private_t *dev_priv = dev->dev_private; | 1609 | struct drm_i915_private *dev_priv = dev->dev_private; |
1479 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1610 | |
1480 | BUG_ON(ring == NULL); | 1611 | BUG_ON(ring == NULL); |
1481 | obj_priv->ring = ring; | 1612 | obj->ring = ring; |
1482 | 1613 | ||
1483 | /* Add a reference if we're newly entering the active list. */ | 1614 | /* Add a reference if we're newly entering the active list. */ |
1484 | if (!obj_priv->active) { | 1615 | if (!obj->active) { |
1485 | drm_gem_object_reference(obj); | 1616 | drm_gem_object_reference(&obj->base); |
1486 | obj_priv->active = 1; | 1617 | obj->active = 1; |
1487 | } | 1618 | } |
1619 | |||
1488 | /* Move from whatever list we were on to the tail of execution. */ | 1620 | /* Move from whatever list we were on to the tail of execution. */ |
1489 | spin_lock(&dev_priv->mm.active_list_lock); | 1621 | list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); |
1490 | list_move_tail(&obj_priv->list, &ring->active_list); | 1622 | list_move_tail(&obj->ring_list, &ring->active_list); |
1491 | spin_unlock(&dev_priv->mm.active_list_lock); | 1623 | |
1492 | obj_priv->last_rendering_seqno = seqno; | 1624 | obj->last_rendering_seqno = seqno; |
1625 | if (obj->fenced_gpu_access) { | ||
1626 | struct drm_i915_fence_reg *reg; | ||
1627 | |||
1628 | BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE); | ||
1629 | |||
1630 | obj->last_fenced_seqno = seqno; | ||
1631 | obj->last_fenced_ring = ring; | ||
1632 | |||
1633 | reg = &dev_priv->fence_regs[obj->fence_reg]; | ||
1634 | list_move_tail(®->lru_list, &dev_priv->mm.fence_list); | ||
1635 | } | ||
1493 | } | 1636 | } |
1494 | 1637 | ||
1495 | static void | 1638 | static void |
1496 | i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | 1639 | i915_gem_object_move_off_active(struct drm_i915_gem_object *obj) |
1497 | { | 1640 | { |
1498 | struct drm_device *dev = obj->dev; | 1641 | list_del_init(&obj->ring_list); |
1642 | obj->last_rendering_seqno = 0; | ||
1643 | } | ||
1644 | |||
1645 | static void | ||
1646 | i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj) | ||
1647 | { | ||
1648 | struct drm_device *dev = obj->base.dev; | ||
1499 | drm_i915_private_t *dev_priv = dev->dev_private; | 1649 | drm_i915_private_t *dev_priv = dev->dev_private; |
1500 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1501 | 1650 | ||
1502 | BUG_ON(!obj_priv->active); | 1651 | BUG_ON(!obj->active); |
1503 | list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); | 1652 | list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list); |
1504 | obj_priv->last_rendering_seqno = 0; | 1653 | |
1654 | i915_gem_object_move_off_active(obj); | ||
1655 | } | ||
1656 | |||
1657 | static void | ||
1658 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | ||
1659 | { | ||
1660 | struct drm_device *dev = obj->base.dev; | ||
1661 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1662 | |||
1663 | if (obj->pin_count != 0) | ||
1664 | list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list); | ||
1665 | else | ||
1666 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); | ||
1667 | |||
1668 | BUG_ON(!list_empty(&obj->gpu_write_list)); | ||
1669 | BUG_ON(!obj->active); | ||
1670 | obj->ring = NULL; | ||
1671 | |||
1672 | i915_gem_object_move_off_active(obj); | ||
1673 | obj->fenced_gpu_access = false; | ||
1674 | |||
1675 | obj->active = 0; | ||
1676 | obj->pending_gpu_write = false; | ||
1677 | drm_gem_object_unreference(&obj->base); | ||
1678 | |||
1679 | WARN_ON(i915_verify_lists(dev)); | ||
1505 | } | 1680 | } |
1506 | 1681 | ||
1507 | /* Immediately discard the backing storage */ | 1682 | /* Immediately discard the backing storage */ |
1508 | static void | 1683 | static void |
1509 | i915_gem_object_truncate(struct drm_gem_object *obj) | 1684 | i915_gem_object_truncate(struct drm_i915_gem_object *obj) |
1510 | { | 1685 | { |
1511 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1512 | struct inode *inode; | 1686 | struct inode *inode; |
1513 | 1687 | ||
1514 | /* Our goal here is to return as much of the memory as | 1688 | /* Our goal here is to return as much of the memory as |
1515 | * is possible back to the system as we are called from OOM. | 1689 | * is possible back to the system as we are called from OOM. |
1516 | * To do this we must instruct the shmfs to drop all of its | 1690 | * To do this we must instruct the shmfs to drop all of its |
1517 | * backing pages, *now*. Here we mirror the actions taken | 1691 | * backing pages, *now*. |
1518 | * when by shmem_delete_inode() to release the backing store. | ||
1519 | */ | 1692 | */ |
1520 | inode = obj->filp->f_path.dentry->d_inode; | 1693 | inode = obj->base.filp->f_path.dentry->d_inode; |
1521 | truncate_inode_pages(inode->i_mapping, 0); | 1694 | shmem_truncate_range(inode, 0, (loff_t)-1); |
1522 | if (inode->i_op->truncate_range) | ||
1523 | inode->i_op->truncate_range(inode, 0, (loff_t)-1); | ||
1524 | 1695 | ||
1525 | obj_priv->madv = __I915_MADV_PURGED; | 1696 | obj->madv = __I915_MADV_PURGED; |
1526 | } | 1697 | } |
1527 | 1698 | ||
1528 | static inline int | 1699 | static inline int |
1529 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) | 1700 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) |
1530 | { | ||
1531 | return obj_priv->madv == I915_MADV_DONTNEED; | ||
1532 | } | ||
1533 | |||
1534 | static void | ||
1535 | i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | ||
1536 | { | 1701 | { |
1537 | struct drm_device *dev = obj->dev; | 1702 | return obj->madv == I915_MADV_DONTNEED; |
1538 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1539 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1540 | |||
1541 | i915_verify_inactive(dev, __FILE__, __LINE__); | ||
1542 | if (obj_priv->pin_count != 0) | ||
1543 | list_del_init(&obj_priv->list); | ||
1544 | else | ||
1545 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | ||
1546 | |||
1547 | BUG_ON(!list_empty(&obj_priv->gpu_write_list)); | ||
1548 | |||
1549 | obj_priv->last_rendering_seqno = 0; | ||
1550 | obj_priv->ring = NULL; | ||
1551 | if (obj_priv->active) { | ||
1552 | obj_priv->active = 0; | ||
1553 | drm_gem_object_unreference(obj); | ||
1554 | } | ||
1555 | i915_verify_inactive(dev, __FILE__, __LINE__); | ||
1556 | } | 1703 | } |
1557 | 1704 | ||
1558 | static void | 1705 | static void |
1559 | i915_gem_process_flushing_list(struct drm_device *dev, | 1706 | i915_gem_process_flushing_list(struct intel_ring_buffer *ring, |
1560 | uint32_t flush_domains, uint32_t seqno, | 1707 | uint32_t flush_domains) |
1561 | struct intel_ring_buffer *ring) | ||
1562 | { | 1708 | { |
1563 | drm_i915_private_t *dev_priv = dev->dev_private; | 1709 | struct drm_i915_gem_object *obj, *next; |
1564 | struct drm_i915_gem_object *obj_priv, *next; | ||
1565 | 1710 | ||
1566 | list_for_each_entry_safe(obj_priv, next, | 1711 | list_for_each_entry_safe(obj, next, |
1567 | &dev_priv->mm.gpu_write_list, | 1712 | &ring->gpu_write_list, |
1568 | gpu_write_list) { | 1713 | gpu_write_list) { |
1569 | struct drm_gem_object *obj = &obj_priv->base; | 1714 | if (obj->base.write_domain & flush_domains) { |
1570 | 1715 | uint32_t old_write_domain = obj->base.write_domain; | |
1571 | if ((obj->write_domain & flush_domains) == | 1716 | |
1572 | obj->write_domain && | 1717 | obj->base.write_domain = 0; |
1573 | obj_priv->ring->ring_flag == ring->ring_flag) { | 1718 | list_del_init(&obj->gpu_write_list); |
1574 | uint32_t old_write_domain = obj->write_domain; | 1719 | i915_gem_object_move_to_active(obj, ring, |
1575 | 1720 | i915_gem_next_request_seqno(ring)); | |
1576 | obj->write_domain = 0; | ||
1577 | list_del_init(&obj_priv->gpu_write_list); | ||
1578 | i915_gem_object_move_to_active(obj, seqno, ring); | ||
1579 | |||
1580 | /* update the fence lru list */ | ||
1581 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | ||
1582 | struct drm_i915_fence_reg *reg = | ||
1583 | &dev_priv->fence_regs[obj_priv->fence_reg]; | ||
1584 | list_move_tail(®->lru_list, | ||
1585 | &dev_priv->mm.fence_list); | ||
1586 | } | ||
1587 | 1721 | ||
1588 | trace_i915_gem_object_change_domain(obj, | 1722 | trace_i915_gem_object_change_domain(obj, |
1589 | obj->read_domains, | 1723 | obj->base.read_domains, |
1590 | old_write_domain); | 1724 | old_write_domain); |
1591 | } | 1725 | } |
1592 | } | 1726 | } |
1593 | } | 1727 | } |
1594 | 1728 | ||
1595 | uint32_t | 1729 | int |
1596 | i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | 1730 | i915_add_request(struct intel_ring_buffer *ring, |
1597 | uint32_t flush_domains, struct intel_ring_buffer *ring) | 1731 | struct drm_file *file, |
1732 | struct drm_i915_gem_request *request) | ||
1598 | { | 1733 | { |
1599 | drm_i915_private_t *dev_priv = dev->dev_private; | 1734 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1600 | struct drm_i915_file_private *i915_file_priv = NULL; | ||
1601 | struct drm_i915_gem_request *request; | ||
1602 | uint32_t seqno; | 1735 | uint32_t seqno; |
1603 | int was_empty; | 1736 | int was_empty; |
1737 | int ret; | ||
1604 | 1738 | ||
1605 | if (file_priv != NULL) | 1739 | BUG_ON(request == NULL); |
1606 | i915_file_priv = file_priv->driver_priv; | ||
1607 | 1740 | ||
1608 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 1741 | ret = ring->add_request(ring, &seqno); |
1609 | if (request == NULL) | 1742 | if (ret) |
1610 | return 0; | 1743 | return ret; |
1611 | 1744 | ||
1612 | seqno = ring->add_request(dev, ring, file_priv, flush_domains); | 1745 | trace_i915_gem_request_add(ring, seqno); |
1613 | 1746 | ||
1614 | request->seqno = seqno; | 1747 | request->seqno = seqno; |
1615 | request->ring = ring; | 1748 | request->ring = ring; |
@@ -1617,260 +1750,353 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
1617 | was_empty = list_empty(&ring->request_list); | 1750 | was_empty = list_empty(&ring->request_list); |
1618 | list_add_tail(&request->list, &ring->request_list); | 1751 | list_add_tail(&request->list, &ring->request_list); |
1619 | 1752 | ||
1620 | if (i915_file_priv) { | 1753 | if (file) { |
1754 | struct drm_i915_file_private *file_priv = file->driver_priv; | ||
1755 | |||
1756 | spin_lock(&file_priv->mm.lock); | ||
1757 | request->file_priv = file_priv; | ||
1621 | list_add_tail(&request->client_list, | 1758 | list_add_tail(&request->client_list, |
1622 | &i915_file_priv->mm.request_list); | 1759 | &file_priv->mm.request_list); |
1623 | } else { | 1760 | spin_unlock(&file_priv->mm.lock); |
1624 | INIT_LIST_HEAD(&request->client_list); | ||
1625 | } | 1761 | } |
1626 | 1762 | ||
1627 | /* Associate any objects on the flushing list matching the write | 1763 | ring->outstanding_lazy_request = false; |
1628 | * domain we're flushing with our flush. | ||
1629 | */ | ||
1630 | if (flush_domains != 0) | ||
1631 | i915_gem_process_flushing_list(dev, flush_domains, seqno, ring); | ||
1632 | 1764 | ||
1633 | if (!dev_priv->mm.suspended) { | 1765 | if (!dev_priv->mm.suspended) { |
1634 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | 1766 | mod_timer(&dev_priv->hangcheck_timer, |
1767 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | ||
1635 | if (was_empty) | 1768 | if (was_empty) |
1636 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | 1769 | queue_delayed_work(dev_priv->wq, |
1770 | &dev_priv->mm.retire_work, HZ); | ||
1637 | } | 1771 | } |
1638 | return seqno; | 1772 | return 0; |
1639 | } | 1773 | } |
1640 | 1774 | ||
1641 | /** | 1775 | static inline void |
1642 | * Command execution barrier | 1776 | i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) |
1643 | * | ||
1644 | * Ensures that all commands in the ring are finished | ||
1645 | * before signalling the CPU | ||
1646 | */ | ||
1647 | static uint32_t | ||
1648 | i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) | ||
1649 | { | 1777 | { |
1650 | uint32_t flush_domains = 0; | 1778 | struct drm_i915_file_private *file_priv = request->file_priv; |
1651 | 1779 | ||
1652 | /* The sampler always gets flushed on i965 (sigh) */ | 1780 | if (!file_priv) |
1653 | if (IS_I965G(dev)) | 1781 | return; |
1654 | flush_domains |= I915_GEM_DOMAIN_SAMPLER; | ||
1655 | 1782 | ||
1656 | ring->flush(dev, ring, | 1783 | spin_lock(&file_priv->mm.lock); |
1657 | I915_GEM_DOMAIN_COMMAND, flush_domains); | 1784 | if (request->file_priv) { |
1658 | return flush_domains; | 1785 | list_del(&request->client_list); |
1786 | request->file_priv = NULL; | ||
1787 | } | ||
1788 | spin_unlock(&file_priv->mm.lock); | ||
1659 | } | 1789 | } |
1660 | 1790 | ||
1661 | /** | 1791 | static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, |
1662 | * Moves buffers associated only with the given active seqno from the active | 1792 | struct intel_ring_buffer *ring) |
1663 | * to inactive list, potentially freeing them. | ||
1664 | */ | ||
1665 | static void | ||
1666 | i915_gem_retire_request(struct drm_device *dev, | ||
1667 | struct drm_i915_gem_request *request) | ||
1668 | { | 1793 | { |
1669 | drm_i915_private_t *dev_priv = dev->dev_private; | 1794 | while (!list_empty(&ring->request_list)) { |
1795 | struct drm_i915_gem_request *request; | ||
1670 | 1796 | ||
1671 | trace_i915_gem_request_retire(dev, request->seqno); | 1797 | request = list_first_entry(&ring->request_list, |
1798 | struct drm_i915_gem_request, | ||
1799 | list); | ||
1672 | 1800 | ||
1673 | /* Move any buffers on the active list that are no longer referenced | 1801 | list_del(&request->list); |
1674 | * by the ringbuffer to the flushing/inactive lists as appropriate. | 1802 | i915_gem_request_remove_from_client(request); |
1675 | */ | 1803 | kfree(request); |
1676 | spin_lock(&dev_priv->mm.active_list_lock); | 1804 | } |
1677 | while (!list_empty(&request->ring->active_list)) { | ||
1678 | struct drm_gem_object *obj; | ||
1679 | struct drm_i915_gem_object *obj_priv; | ||
1680 | |||
1681 | obj_priv = list_first_entry(&request->ring->active_list, | ||
1682 | struct drm_i915_gem_object, | ||
1683 | list); | ||
1684 | obj = &obj_priv->base; | ||
1685 | |||
1686 | /* If the seqno being retired doesn't match the oldest in the | ||
1687 | * list, then the oldest in the list must still be newer than | ||
1688 | * this seqno. | ||
1689 | */ | ||
1690 | if (obj_priv->last_rendering_seqno != request->seqno) | ||
1691 | goto out; | ||
1692 | 1805 | ||
1693 | #if WATCH_LRU | 1806 | while (!list_empty(&ring->active_list)) { |
1694 | DRM_INFO("%s: retire %d moves to inactive list %p\n", | 1807 | struct drm_i915_gem_object *obj; |
1695 | __func__, request->seqno, obj); | ||
1696 | #endif | ||
1697 | 1808 | ||
1698 | if (obj->write_domain != 0) | 1809 | obj = list_first_entry(&ring->active_list, |
1699 | i915_gem_object_move_to_flushing(obj); | 1810 | struct drm_i915_gem_object, |
1700 | else { | 1811 | ring_list); |
1701 | /* Take a reference on the object so it won't be | 1812 | |
1702 | * freed while the spinlock is held. The list | 1813 | obj->base.write_domain = 0; |
1703 | * protection for this spinlock is safe when breaking | 1814 | list_del_init(&obj->gpu_write_list); |
1704 | * the lock like this since the next thing we do | 1815 | i915_gem_object_move_to_inactive(obj); |
1705 | * is just get the head of the list again. | ||
1706 | */ | ||
1707 | drm_gem_object_reference(obj); | ||
1708 | i915_gem_object_move_to_inactive(obj); | ||
1709 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
1710 | drm_gem_object_unreference(obj); | ||
1711 | spin_lock(&dev_priv->mm.active_list_lock); | ||
1712 | } | ||
1713 | } | 1816 | } |
1714 | out: | ||
1715 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
1716 | } | 1817 | } |
1717 | 1818 | ||
1718 | /** | 1819 | static void i915_gem_reset_fences(struct drm_device *dev) |
1719 | * Returns true if seq1 is later than seq2. | ||
1720 | */ | ||
1721 | bool | ||
1722 | i915_seqno_passed(uint32_t seq1, uint32_t seq2) | ||
1723 | { | 1820 | { |
1724 | return (int32_t)(seq1 - seq2) >= 0; | 1821 | struct drm_i915_private *dev_priv = dev->dev_private; |
1822 | int i; | ||
1823 | |||
1824 | for (i = 0; i < 16; i++) { | ||
1825 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; | ||
1826 | struct drm_i915_gem_object *obj = reg->obj; | ||
1827 | |||
1828 | if (!obj) | ||
1829 | continue; | ||
1830 | |||
1831 | if (obj->tiling_mode) | ||
1832 | i915_gem_release_mmap(obj); | ||
1833 | |||
1834 | reg->obj->fence_reg = I915_FENCE_REG_NONE; | ||
1835 | reg->obj->fenced_gpu_access = false; | ||
1836 | reg->obj->last_fenced_seqno = 0; | ||
1837 | reg->obj->last_fenced_ring = NULL; | ||
1838 | i915_gem_clear_fence_reg(dev, reg); | ||
1839 | } | ||
1725 | } | 1840 | } |
1726 | 1841 | ||
1727 | uint32_t | 1842 | void i915_gem_reset(struct drm_device *dev) |
1728 | i915_get_gem_seqno(struct drm_device *dev, | ||
1729 | struct intel_ring_buffer *ring) | ||
1730 | { | 1843 | { |
1731 | return ring->get_gem_seqno(dev, ring); | 1844 | struct drm_i915_private *dev_priv = dev->dev_private; |
1845 | struct drm_i915_gem_object *obj; | ||
1846 | int i; | ||
1847 | |||
1848 | for (i = 0; i < I915_NUM_RINGS; i++) | ||
1849 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]); | ||
1850 | |||
1851 | /* Remove anything from the flushing lists. The GPU cache is likely | ||
1852 | * to be lost on reset along with the data, so simply move the | ||
1853 | * lost bo to the inactive list. | ||
1854 | */ | ||
1855 | while (!list_empty(&dev_priv->mm.flushing_list)) { | ||
1856 | obj= list_first_entry(&dev_priv->mm.flushing_list, | ||
1857 | struct drm_i915_gem_object, | ||
1858 | mm_list); | ||
1859 | |||
1860 | obj->base.write_domain = 0; | ||
1861 | list_del_init(&obj->gpu_write_list); | ||
1862 | i915_gem_object_move_to_inactive(obj); | ||
1863 | } | ||
1864 | |||
1865 | /* Move everything out of the GPU domains to ensure we do any | ||
1866 | * necessary invalidation upon reuse. | ||
1867 | */ | ||
1868 | list_for_each_entry(obj, | ||
1869 | &dev_priv->mm.inactive_list, | ||
1870 | mm_list) | ||
1871 | { | ||
1872 | obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; | ||
1873 | } | ||
1874 | |||
1875 | /* The fence registers are invalidated so clear them out */ | ||
1876 | i915_gem_reset_fences(dev); | ||
1732 | } | 1877 | } |
1733 | 1878 | ||
1734 | /** | 1879 | /** |
1735 | * This function clears the request list as sequence numbers are passed. | 1880 | * This function clears the request list as sequence numbers are passed. |
1736 | */ | 1881 | */ |
1737 | static void | 1882 | static void |
1738 | i915_gem_retire_requests_ring(struct drm_device *dev, | 1883 | i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) |
1739 | struct intel_ring_buffer *ring) | ||
1740 | { | 1884 | { |
1741 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1742 | uint32_t seqno; | 1885 | uint32_t seqno; |
1886 | int i; | ||
1743 | 1887 | ||
1744 | if (!ring->status_page.page_addr | 1888 | if (list_empty(&ring->request_list)) |
1745 | || list_empty(&ring->request_list)) | ||
1746 | return; | 1889 | return; |
1747 | 1890 | ||
1748 | seqno = i915_get_gem_seqno(dev, ring); | 1891 | WARN_ON(i915_verify_lists(ring->dev)); |
1892 | |||
1893 | seqno = ring->get_seqno(ring); | ||
1894 | |||
1895 | for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) | ||
1896 | if (seqno >= ring->sync_seqno[i]) | ||
1897 | ring->sync_seqno[i] = 0; | ||
1749 | 1898 | ||
1750 | while (!list_empty(&ring->request_list)) { | 1899 | while (!list_empty(&ring->request_list)) { |
1751 | struct drm_i915_gem_request *request; | 1900 | struct drm_i915_gem_request *request; |
1752 | uint32_t retiring_seqno; | ||
1753 | 1901 | ||
1754 | request = list_first_entry(&ring->request_list, | 1902 | request = list_first_entry(&ring->request_list, |
1755 | struct drm_i915_gem_request, | 1903 | struct drm_i915_gem_request, |
1756 | list); | 1904 | list); |
1757 | retiring_seqno = request->seqno; | ||
1758 | 1905 | ||
1759 | if (i915_seqno_passed(seqno, retiring_seqno) || | 1906 | if (!i915_seqno_passed(seqno, request->seqno)) |
1760 | atomic_read(&dev_priv->mm.wedged)) { | ||
1761 | i915_gem_retire_request(dev, request); | ||
1762 | |||
1763 | list_del(&request->list); | ||
1764 | list_del(&request->client_list); | ||
1765 | kfree(request); | ||
1766 | } else | ||
1767 | break; | 1907 | break; |
1908 | |||
1909 | trace_i915_gem_request_retire(ring, request->seqno); | ||
1910 | |||
1911 | list_del(&request->list); | ||
1912 | i915_gem_request_remove_from_client(request); | ||
1913 | kfree(request); | ||
1768 | } | 1914 | } |
1769 | 1915 | ||
1770 | if (unlikely (dev_priv->trace_irq_seqno && | 1916 | /* Move any buffers on the active list that are no longer referenced |
1771 | i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { | 1917 | * by the ringbuffer to the flushing/inactive lists as appropriate. |
1918 | */ | ||
1919 | while (!list_empty(&ring->active_list)) { | ||
1920 | struct drm_i915_gem_object *obj; | ||
1921 | |||
1922 | obj= list_first_entry(&ring->active_list, | ||
1923 | struct drm_i915_gem_object, | ||
1924 | ring_list); | ||
1925 | |||
1926 | if (!i915_seqno_passed(seqno, obj->last_rendering_seqno)) | ||
1927 | break; | ||
1928 | |||
1929 | if (obj->base.write_domain != 0) | ||
1930 | i915_gem_object_move_to_flushing(obj); | ||
1931 | else | ||
1932 | i915_gem_object_move_to_inactive(obj); | ||
1933 | } | ||
1772 | 1934 | ||
1773 | ring->user_irq_put(dev, ring); | 1935 | if (unlikely(ring->trace_irq_seqno && |
1774 | dev_priv->trace_irq_seqno = 0; | 1936 | i915_seqno_passed(seqno, ring->trace_irq_seqno))) { |
1937 | ring->irq_put(ring); | ||
1938 | ring->trace_irq_seqno = 0; | ||
1775 | } | 1939 | } |
1940 | |||
1941 | WARN_ON(i915_verify_lists(ring->dev)); | ||
1776 | } | 1942 | } |
1777 | 1943 | ||
1778 | void | 1944 | void |
1779 | i915_gem_retire_requests(struct drm_device *dev) | 1945 | i915_gem_retire_requests(struct drm_device *dev) |
1780 | { | 1946 | { |
1781 | drm_i915_private_t *dev_priv = dev->dev_private; | 1947 | drm_i915_private_t *dev_priv = dev->dev_private; |
1948 | int i; | ||
1782 | 1949 | ||
1783 | if (!list_empty(&dev_priv->mm.deferred_free_list)) { | 1950 | if (!list_empty(&dev_priv->mm.deferred_free_list)) { |
1784 | struct drm_i915_gem_object *obj_priv, *tmp; | 1951 | struct drm_i915_gem_object *obj, *next; |
1785 | 1952 | ||
1786 | /* We must be careful that during unbind() we do not | 1953 | /* We must be careful that during unbind() we do not |
1787 | * accidentally infinitely recurse into retire requests. | 1954 | * accidentally infinitely recurse into retire requests. |
1788 | * Currently: | 1955 | * Currently: |
1789 | * retire -> free -> unbind -> wait -> retire_ring | 1956 | * retire -> free -> unbind -> wait -> retire_ring |
1790 | */ | 1957 | */ |
1791 | list_for_each_entry_safe(obj_priv, tmp, | 1958 | list_for_each_entry_safe(obj, next, |
1792 | &dev_priv->mm.deferred_free_list, | 1959 | &dev_priv->mm.deferred_free_list, |
1793 | list) | 1960 | mm_list) |
1794 | i915_gem_free_object_tail(&obj_priv->base); | 1961 | i915_gem_free_object_tail(obj); |
1795 | } | 1962 | } |
1796 | 1963 | ||
1797 | i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); | 1964 | for (i = 0; i < I915_NUM_RINGS; i++) |
1798 | if (HAS_BSD(dev)) | 1965 | i915_gem_retire_requests_ring(&dev_priv->ring[i]); |
1799 | i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); | ||
1800 | } | 1966 | } |
1801 | 1967 | ||
1802 | void | 1968 | static void |
1803 | i915_gem_retire_work_handler(struct work_struct *work) | 1969 | i915_gem_retire_work_handler(struct work_struct *work) |
1804 | { | 1970 | { |
1805 | drm_i915_private_t *dev_priv; | 1971 | drm_i915_private_t *dev_priv; |
1806 | struct drm_device *dev; | 1972 | struct drm_device *dev; |
1973 | bool idle; | ||
1974 | int i; | ||
1807 | 1975 | ||
1808 | dev_priv = container_of(work, drm_i915_private_t, | 1976 | dev_priv = container_of(work, drm_i915_private_t, |
1809 | mm.retire_work.work); | 1977 | mm.retire_work.work); |
1810 | dev = dev_priv->dev; | 1978 | dev = dev_priv->dev; |
1811 | 1979 | ||
1812 | mutex_lock(&dev->struct_mutex); | 1980 | /* Come back later if the device is busy... */ |
1981 | if (!mutex_trylock(&dev->struct_mutex)) { | ||
1982 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | ||
1983 | return; | ||
1984 | } | ||
1985 | |||
1813 | i915_gem_retire_requests(dev); | 1986 | i915_gem_retire_requests(dev); |
1814 | 1987 | ||
1815 | if (!dev_priv->mm.suspended && | 1988 | /* Send a periodic flush down the ring so we don't hold onto GEM |
1816 | (!list_empty(&dev_priv->render_ring.request_list) || | 1989 | * objects indefinitely. |
1817 | (HAS_BSD(dev) && | 1990 | */ |
1818 | !list_empty(&dev_priv->bsd_ring.request_list)))) | 1991 | idle = true; |
1992 | for (i = 0; i < I915_NUM_RINGS; i++) { | ||
1993 | struct intel_ring_buffer *ring = &dev_priv->ring[i]; | ||
1994 | |||
1995 | if (!list_empty(&ring->gpu_write_list)) { | ||
1996 | struct drm_i915_gem_request *request; | ||
1997 | int ret; | ||
1998 | |||
1999 | ret = i915_gem_flush_ring(ring, | ||
2000 | 0, I915_GEM_GPU_DOMAINS); | ||
2001 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
2002 | if (ret || request == NULL || | ||
2003 | i915_add_request(ring, NULL, request)) | ||
2004 | kfree(request); | ||
2005 | } | ||
2006 | |||
2007 | idle &= list_empty(&ring->request_list); | ||
2008 | } | ||
2009 | |||
2010 | if (!dev_priv->mm.suspended && !idle) | ||
1819 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | 2011 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); |
2012 | |||
1820 | mutex_unlock(&dev->struct_mutex); | 2013 | mutex_unlock(&dev->struct_mutex); |
1821 | } | 2014 | } |
1822 | 2015 | ||
2016 | /** | ||
2017 | * Waits for a sequence number to be signaled, and cleans up the | ||
2018 | * request and object lists appropriately for that event. | ||
2019 | */ | ||
1823 | int | 2020 | int |
1824 | i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | 2021 | i915_wait_request(struct intel_ring_buffer *ring, |
1825 | int interruptible, struct intel_ring_buffer *ring) | 2022 | uint32_t seqno) |
1826 | { | 2023 | { |
1827 | drm_i915_private_t *dev_priv = dev->dev_private; | 2024 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1828 | u32 ier; | 2025 | u32 ier; |
1829 | int ret = 0; | 2026 | int ret = 0; |
1830 | 2027 | ||
1831 | BUG_ON(seqno == 0); | 2028 | BUG_ON(seqno == 0); |
1832 | 2029 | ||
1833 | if (atomic_read(&dev_priv->mm.wedged)) | 2030 | if (atomic_read(&dev_priv->mm.wedged)) { |
1834 | return -EIO; | 2031 | struct completion *x = &dev_priv->error_completion; |
2032 | bool recovery_complete; | ||
2033 | unsigned long flags; | ||
2034 | |||
2035 | /* Give the error handler a chance to run. */ | ||
2036 | spin_lock_irqsave(&x->wait.lock, flags); | ||
2037 | recovery_complete = x->done > 0; | ||
2038 | spin_unlock_irqrestore(&x->wait.lock, flags); | ||
2039 | |||
2040 | return recovery_complete ? -EIO : -EAGAIN; | ||
2041 | } | ||
1835 | 2042 | ||
1836 | if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) { | 2043 | if (seqno == ring->outstanding_lazy_request) { |
1837 | if (HAS_PCH_SPLIT(dev)) | 2044 | struct drm_i915_gem_request *request; |
2045 | |||
2046 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
2047 | if (request == NULL) | ||
2048 | return -ENOMEM; | ||
2049 | |||
2050 | ret = i915_add_request(ring, NULL, request); | ||
2051 | if (ret) { | ||
2052 | kfree(request); | ||
2053 | return ret; | ||
2054 | } | ||
2055 | |||
2056 | seqno = request->seqno; | ||
2057 | } | ||
2058 | |||
2059 | if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { | ||
2060 | if (HAS_PCH_SPLIT(ring->dev)) | ||
1838 | ier = I915_READ(DEIER) | I915_READ(GTIER); | 2061 | ier = I915_READ(DEIER) | I915_READ(GTIER); |
1839 | else | 2062 | else |
1840 | ier = I915_READ(IER); | 2063 | ier = I915_READ(IER); |
1841 | if (!ier) { | 2064 | if (!ier) { |
1842 | DRM_ERROR("something (likely vbetool) disabled " | 2065 | DRM_ERROR("something (likely vbetool) disabled " |
1843 | "interrupts, re-enabling\n"); | 2066 | "interrupts, re-enabling\n"); |
1844 | i915_driver_irq_preinstall(dev); | 2067 | ring->dev->driver->irq_preinstall(ring->dev); |
1845 | i915_driver_irq_postinstall(dev); | 2068 | ring->dev->driver->irq_postinstall(ring->dev); |
1846 | } | 2069 | } |
1847 | 2070 | ||
1848 | trace_i915_gem_request_wait_begin(dev, seqno); | 2071 | trace_i915_gem_request_wait_begin(ring, seqno); |
1849 | 2072 | ||
1850 | ring->waiting_gem_seqno = seqno; | 2073 | ring->waiting_seqno = seqno; |
1851 | ring->user_irq_get(dev, ring); | 2074 | if (ring->irq_get(ring)) { |
1852 | if (interruptible) | 2075 | if (dev_priv->mm.interruptible) |
1853 | ret = wait_event_interruptible(ring->irq_queue, | 2076 | ret = wait_event_interruptible(ring->irq_queue, |
1854 | i915_seqno_passed( | 2077 | i915_seqno_passed(ring->get_seqno(ring), seqno) |
1855 | ring->get_gem_seqno(dev, ring), seqno) | 2078 | || atomic_read(&dev_priv->mm.wedged)); |
1856 | || atomic_read(&dev_priv->mm.wedged)); | 2079 | else |
1857 | else | 2080 | wait_event(ring->irq_queue, |
1858 | wait_event(ring->irq_queue, | 2081 | i915_seqno_passed(ring->get_seqno(ring), seqno) |
1859 | i915_seqno_passed( | 2082 | || atomic_read(&dev_priv->mm.wedged)); |
1860 | ring->get_gem_seqno(dev, ring), seqno) | ||
1861 | || atomic_read(&dev_priv->mm.wedged)); | ||
1862 | 2083 | ||
1863 | ring->user_irq_put(dev, ring); | 2084 | ring->irq_put(ring); |
1864 | ring->waiting_gem_seqno = 0; | 2085 | } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring), |
2086 | seqno) || | ||
2087 | atomic_read(&dev_priv->mm.wedged), 3000)) | ||
2088 | ret = -EBUSY; | ||
2089 | ring->waiting_seqno = 0; | ||
1865 | 2090 | ||
1866 | trace_i915_gem_request_wait_end(dev, seqno); | 2091 | trace_i915_gem_request_wait_end(ring, seqno); |
1867 | } | 2092 | } |
1868 | if (atomic_read(&dev_priv->mm.wedged)) | 2093 | if (atomic_read(&dev_priv->mm.wedged)) |
1869 | ret = -EIO; | 2094 | ret = -EAGAIN; |
1870 | 2095 | ||
1871 | if (ret && ret != -ERESTARTSYS) | 2096 | if (ret && ret != -ERESTARTSYS) |
1872 | DRM_ERROR("%s returns %d (awaiting %d at %d)\n", | 2097 | DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n", |
1873 | __func__, ret, seqno, ring->get_gem_seqno(dev, ring)); | 2098 | __func__, ret, seqno, ring->get_seqno(ring), |
2099 | dev_priv->next_seqno); | ||
1874 | 2100 | ||
1875 | /* Directly dispatch request retiring. While we have the work queue | 2101 | /* Directly dispatch request retiring. While we have the work queue |
1876 | * to handle this, the waiter on a request often wants an associated | 2102 | * to handle this, the waiter on a request often wants an associated |
@@ -1878,67 +2104,31 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
1878 | * a separate wait queue to handle that. | 2104 | * a separate wait queue to handle that. |
1879 | */ | 2105 | */ |
1880 | if (ret == 0) | 2106 | if (ret == 0) |
1881 | i915_gem_retire_requests_ring(dev, ring); | 2107 | i915_gem_retire_requests_ring(ring); |
1882 | 2108 | ||
1883 | return ret; | 2109 | return ret; |
1884 | } | 2110 | } |
1885 | 2111 | ||
1886 | /** | 2112 | /** |
1887 | * Waits for a sequence number to be signaled, and cleans up the | ||
1888 | * request and object lists appropriately for that event. | ||
1889 | */ | ||
1890 | static int | ||
1891 | i915_wait_request(struct drm_device *dev, uint32_t seqno, | ||
1892 | struct intel_ring_buffer *ring) | ||
1893 | { | ||
1894 | return i915_do_wait_request(dev, seqno, 1, ring); | ||
1895 | } | ||
1896 | |||
1897 | static void | ||
1898 | i915_gem_flush(struct drm_device *dev, | ||
1899 | uint32_t invalidate_domains, | ||
1900 | uint32_t flush_domains) | ||
1901 | { | ||
1902 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1903 | if (flush_domains & I915_GEM_DOMAIN_CPU) | ||
1904 | drm_agp_chipset_flush(dev); | ||
1905 | dev_priv->render_ring.flush(dev, &dev_priv->render_ring, | ||
1906 | invalidate_domains, | ||
1907 | flush_domains); | ||
1908 | |||
1909 | if (HAS_BSD(dev)) | ||
1910 | dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring, | ||
1911 | invalidate_domains, | ||
1912 | flush_domains); | ||
1913 | } | ||
1914 | |||
1915 | /** | ||
1916 | * Ensures that all rendering to the object has completed and the object is | 2113 | * Ensures that all rendering to the object has completed and the object is |
1917 | * safe to unbind from the GTT or access from the CPU. | 2114 | * safe to unbind from the GTT or access from the CPU. |
1918 | */ | 2115 | */ |
1919 | static int | 2116 | int |
1920 | i915_gem_object_wait_rendering(struct drm_gem_object *obj) | 2117 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) |
1921 | { | 2118 | { |
1922 | struct drm_device *dev = obj->dev; | ||
1923 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1924 | int ret; | 2119 | int ret; |
1925 | 2120 | ||
1926 | /* This function only exists to support waiting for existing rendering, | 2121 | /* This function only exists to support waiting for existing rendering, |
1927 | * not for emitting required flushes. | 2122 | * not for emitting required flushes. |
1928 | */ | 2123 | */ |
1929 | BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0); | 2124 | BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0); |
1930 | 2125 | ||
1931 | /* If there is rendering queued on the buffer being evicted, wait for | 2126 | /* If there is rendering queued on the buffer being evicted, wait for |
1932 | * it. | 2127 | * it. |
1933 | */ | 2128 | */ |
1934 | if (obj_priv->active) { | 2129 | if (obj->active) { |
1935 | #if WATCH_BUF | 2130 | ret = i915_wait_request(obj->ring, obj->last_rendering_seqno); |
1936 | DRM_INFO("%s: object %p wait for seqno %08x\n", | 2131 | if (ret) |
1937 | __func__, obj, obj_priv->last_rendering_seqno); | ||
1938 | #endif | ||
1939 | ret = i915_wait_request(dev, | ||
1940 | obj_priv->last_rendering_seqno, obj_priv->ring); | ||
1941 | if (ret != 0) | ||
1942 | return ret; | 2132 | return ret; |
1943 | } | 2133 | } |
1944 | 2134 | ||
@@ -1949,21 +2139,14 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) | |||
1949 | * Unbinds an object from the GTT aperture. | 2139 | * Unbinds an object from the GTT aperture. |
1950 | */ | 2140 | */ |
1951 | int | 2141 | int |
1952 | i915_gem_object_unbind(struct drm_gem_object *obj) | 2142 | i915_gem_object_unbind(struct drm_i915_gem_object *obj) |
1953 | { | 2143 | { |
1954 | struct drm_device *dev = obj->dev; | ||
1955 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1956 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1957 | int ret = 0; | 2144 | int ret = 0; |
1958 | 2145 | ||
1959 | #if WATCH_BUF | 2146 | if (obj->gtt_space == NULL) |
1960 | DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj); | ||
1961 | DRM_INFO("gtt_space %p\n", obj_priv->gtt_space); | ||
1962 | #endif | ||
1963 | if (obj_priv->gtt_space == NULL) | ||
1964 | return 0; | 2147 | return 0; |
1965 | 2148 | ||
1966 | if (obj_priv->pin_count != 0) { | 2149 | if (obj->pin_count != 0) { |
1967 | DRM_ERROR("Attempting to unbind pinned buffer\n"); | 2150 | DRM_ERROR("Attempting to unbind pinned buffer\n"); |
1968 | return -EINVAL; | 2151 | return -EINVAL; |
1969 | } | 2152 | } |
@@ -1984,319 +2167,383 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1984 | * should be safe and we need to cleanup or else we might | 2167 | * should be safe and we need to cleanup or else we might |
1985 | * cause memory corruption through use-after-free. | 2168 | * cause memory corruption through use-after-free. |
1986 | */ | 2169 | */ |
2170 | if (ret) { | ||
2171 | i915_gem_clflush_object(obj); | ||
2172 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; | ||
2173 | } | ||
1987 | 2174 | ||
1988 | /* release the fence reg _after_ flushing */ | 2175 | /* release the fence reg _after_ flushing */ |
1989 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | 2176 | ret = i915_gem_object_put_fence(obj); |
1990 | i915_gem_clear_fence_reg(obj); | 2177 | if (ret == -ERESTARTSYS) |
2178 | return ret; | ||
1991 | 2179 | ||
1992 | if (obj_priv->agp_mem != NULL) { | 2180 | trace_i915_gem_object_unbind(obj); |
1993 | drm_unbind_agp(obj_priv->agp_mem); | ||
1994 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); | ||
1995 | obj_priv->agp_mem = NULL; | ||
1996 | } | ||
1997 | 2181 | ||
1998 | i915_gem_object_put_pages(obj); | 2182 | i915_gem_gtt_unbind_object(obj); |
1999 | BUG_ON(obj_priv->pages_refcount); | 2183 | i915_gem_object_put_pages_gtt(obj); |
2000 | 2184 | ||
2001 | if (obj_priv->gtt_space) { | 2185 | list_del_init(&obj->gtt_list); |
2002 | atomic_dec(&dev->gtt_count); | 2186 | list_del_init(&obj->mm_list); |
2003 | atomic_sub(obj->size, &dev->gtt_memory); | 2187 | /* Avoid an unnecessary call to unbind on rebind. */ |
2188 | obj->map_and_fenceable = true; | ||
2004 | 2189 | ||
2005 | drm_mm_put_block(obj_priv->gtt_space); | 2190 | drm_mm_put_block(obj->gtt_space); |
2006 | obj_priv->gtt_space = NULL; | 2191 | obj->gtt_space = NULL; |
2007 | } | 2192 | obj->gtt_offset = 0; |
2008 | 2193 | ||
2009 | /* Remove ourselves from the LRU list if present. */ | 2194 | if (i915_gem_object_is_purgeable(obj)) |
2010 | spin_lock(&dev_priv->mm.active_list_lock); | ||
2011 | if (!list_empty(&obj_priv->list)) | ||
2012 | list_del_init(&obj_priv->list); | ||
2013 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
2014 | |||
2015 | if (i915_gem_object_is_purgeable(obj_priv)) | ||
2016 | i915_gem_object_truncate(obj); | 2195 | i915_gem_object_truncate(obj); |
2017 | 2196 | ||
2018 | trace_i915_gem_object_unbind(obj); | ||
2019 | |||
2020 | return ret; | 2197 | return ret; |
2021 | } | 2198 | } |
2022 | 2199 | ||
2023 | int | 2200 | int |
2024 | i915_gpu_idle(struct drm_device *dev) | 2201 | i915_gem_flush_ring(struct intel_ring_buffer *ring, |
2202 | uint32_t invalidate_domains, | ||
2203 | uint32_t flush_domains) | ||
2025 | { | 2204 | { |
2026 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2027 | bool lists_empty; | ||
2028 | uint32_t seqno1, seqno2; | ||
2029 | int ret; | 2205 | int ret; |
2030 | 2206 | ||
2031 | spin_lock(&dev_priv->mm.active_list_lock); | 2207 | if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0) |
2032 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && | ||
2033 | list_empty(&dev_priv->render_ring.active_list) && | ||
2034 | (!HAS_BSD(dev) || | ||
2035 | list_empty(&dev_priv->bsd_ring.active_list))); | ||
2036 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
2037 | |||
2038 | if (lists_empty) | ||
2039 | return 0; | 2208 | return 0; |
2040 | 2209 | ||
2041 | /* Flush everything onto the inactive list. */ | 2210 | trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains); |
2042 | i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | ||
2043 | seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, | ||
2044 | &dev_priv->render_ring); | ||
2045 | if (seqno1 == 0) | ||
2046 | return -ENOMEM; | ||
2047 | ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring); | ||
2048 | 2211 | ||
2049 | if (HAS_BSD(dev)) { | 2212 | ret = ring->flush(ring, invalidate_domains, flush_domains); |
2050 | seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, | 2213 | if (ret) |
2051 | &dev_priv->bsd_ring); | 2214 | return ret; |
2052 | if (seqno2 == 0) | 2215 | |
2053 | return -ENOMEM; | 2216 | if (flush_domains & I915_GEM_GPU_DOMAINS) |
2217 | i915_gem_process_flushing_list(ring, flush_domains); | ||
2218 | |||
2219 | return 0; | ||
2220 | } | ||
2221 | |||
2222 | static int i915_ring_idle(struct intel_ring_buffer *ring) | ||
2223 | { | ||
2224 | int ret; | ||
2225 | |||
2226 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) | ||
2227 | return 0; | ||
2054 | 2228 | ||
2055 | ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring); | 2229 | if (!list_empty(&ring->gpu_write_list)) { |
2230 | ret = i915_gem_flush_ring(ring, | ||
2231 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | ||
2056 | if (ret) | 2232 | if (ret) |
2057 | return ret; | 2233 | return ret; |
2058 | } | 2234 | } |
2059 | 2235 | ||
2060 | 2236 | return i915_wait_request(ring, i915_gem_next_request_seqno(ring)); | |
2061 | return ret; | ||
2062 | } | 2237 | } |
2063 | 2238 | ||
2064 | int | 2239 | int |
2065 | i915_gem_object_get_pages(struct drm_gem_object *obj, | 2240 | i915_gpu_idle(struct drm_device *dev) |
2066 | gfp_t gfpmask) | ||
2067 | { | 2241 | { |
2068 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2242 | drm_i915_private_t *dev_priv = dev->dev_private; |
2069 | int page_count, i; | 2243 | bool lists_empty; |
2070 | struct address_space *mapping; | 2244 | int ret, i; |
2071 | struct inode *inode; | ||
2072 | struct page *page; | ||
2073 | |||
2074 | BUG_ON(obj_priv->pages_refcount | ||
2075 | == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT); | ||
2076 | 2245 | ||
2077 | if (obj_priv->pages_refcount++ != 0) | 2246 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && |
2247 | list_empty(&dev_priv->mm.active_list)); | ||
2248 | if (lists_empty) | ||
2078 | return 0; | 2249 | return 0; |
2079 | 2250 | ||
2080 | /* Get the list of pages out of our struct file. They'll be pinned | 2251 | /* Flush everything onto the inactive list. */ |
2081 | * at this point until we release them. | 2252 | for (i = 0; i < I915_NUM_RINGS; i++) { |
2082 | */ | 2253 | ret = i915_ring_idle(&dev_priv->ring[i]); |
2083 | page_count = obj->size / PAGE_SIZE; | 2254 | if (ret) |
2084 | BUG_ON(obj_priv->pages != NULL); | 2255 | return ret; |
2085 | obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *)); | ||
2086 | if (obj_priv->pages == NULL) { | ||
2087 | obj_priv->pages_refcount--; | ||
2088 | return -ENOMEM; | ||
2089 | } | ||
2090 | |||
2091 | inode = obj->filp->f_path.dentry->d_inode; | ||
2092 | mapping = inode->i_mapping; | ||
2093 | for (i = 0; i < page_count; i++) { | ||
2094 | page = read_cache_page_gfp(mapping, i, | ||
2095 | GFP_HIGHUSER | | ||
2096 | __GFP_COLD | | ||
2097 | __GFP_RECLAIMABLE | | ||
2098 | gfpmask); | ||
2099 | if (IS_ERR(page)) | ||
2100 | goto err_pages; | ||
2101 | |||
2102 | obj_priv->pages[i] = page; | ||
2103 | } | 2256 | } |
2104 | 2257 | ||
2105 | if (obj_priv->tiling_mode != I915_TILING_NONE) | ||
2106 | i915_gem_object_do_bit_17_swizzle(obj); | ||
2107 | |||
2108 | return 0; | 2258 | return 0; |
2109 | |||
2110 | err_pages: | ||
2111 | while (i--) | ||
2112 | page_cache_release(obj_priv->pages[i]); | ||
2113 | |||
2114 | drm_free_large(obj_priv->pages); | ||
2115 | obj_priv->pages = NULL; | ||
2116 | obj_priv->pages_refcount--; | ||
2117 | return PTR_ERR(page); | ||
2118 | } | 2259 | } |
2119 | 2260 | ||
2120 | static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) | 2261 | static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj, |
2262 | struct intel_ring_buffer *pipelined) | ||
2121 | { | 2263 | { |
2122 | struct drm_gem_object *obj = reg->obj; | 2264 | struct drm_device *dev = obj->base.dev; |
2123 | struct drm_device *dev = obj->dev; | ||
2124 | drm_i915_private_t *dev_priv = dev->dev_private; | 2265 | drm_i915_private_t *dev_priv = dev->dev_private; |
2125 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2266 | u32 size = obj->gtt_space->size; |
2126 | int regnum = obj_priv->fence_reg; | 2267 | int regnum = obj->fence_reg; |
2127 | uint64_t val; | 2268 | uint64_t val; |
2128 | 2269 | ||
2129 | val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & | 2270 | val = (uint64_t)((obj->gtt_offset + size - 4096) & |
2130 | 0xfffff000) << 32; | 2271 | 0xfffff000) << 32; |
2131 | val |= obj_priv->gtt_offset & 0xfffff000; | 2272 | val |= obj->gtt_offset & 0xfffff000; |
2132 | val |= (uint64_t)((obj_priv->stride / 128) - 1) << | 2273 | val |= (uint64_t)((obj->stride / 128) - 1) << |
2133 | SANDYBRIDGE_FENCE_PITCH_SHIFT; | 2274 | SANDYBRIDGE_FENCE_PITCH_SHIFT; |
2134 | 2275 | ||
2135 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2276 | if (obj->tiling_mode == I915_TILING_Y) |
2136 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; | 2277 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
2137 | val |= I965_FENCE_REG_VALID; | 2278 | val |= I965_FENCE_REG_VALID; |
2138 | 2279 | ||
2139 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); | 2280 | if (pipelined) { |
2281 | int ret = intel_ring_begin(pipelined, 6); | ||
2282 | if (ret) | ||
2283 | return ret; | ||
2284 | |||
2285 | intel_ring_emit(pipelined, MI_NOOP); | ||
2286 | intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2)); | ||
2287 | intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8); | ||
2288 | intel_ring_emit(pipelined, (u32)val); | ||
2289 | intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4); | ||
2290 | intel_ring_emit(pipelined, (u32)(val >> 32)); | ||
2291 | intel_ring_advance(pipelined); | ||
2292 | } else | ||
2293 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val); | ||
2294 | |||
2295 | return 0; | ||
2140 | } | 2296 | } |
2141 | 2297 | ||
2142 | static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) | 2298 | static int i965_write_fence_reg(struct drm_i915_gem_object *obj, |
2299 | struct intel_ring_buffer *pipelined) | ||
2143 | { | 2300 | { |
2144 | struct drm_gem_object *obj = reg->obj; | 2301 | struct drm_device *dev = obj->base.dev; |
2145 | struct drm_device *dev = obj->dev; | ||
2146 | drm_i915_private_t *dev_priv = dev->dev_private; | 2302 | drm_i915_private_t *dev_priv = dev->dev_private; |
2147 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2303 | u32 size = obj->gtt_space->size; |
2148 | int regnum = obj_priv->fence_reg; | 2304 | int regnum = obj->fence_reg; |
2149 | uint64_t val; | 2305 | uint64_t val; |
2150 | 2306 | ||
2151 | val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & | 2307 | val = (uint64_t)((obj->gtt_offset + size - 4096) & |
2152 | 0xfffff000) << 32; | 2308 | 0xfffff000) << 32; |
2153 | val |= obj_priv->gtt_offset & 0xfffff000; | 2309 | val |= obj->gtt_offset & 0xfffff000; |
2154 | val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; | 2310 | val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; |
2155 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2311 | if (obj->tiling_mode == I915_TILING_Y) |
2156 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; | 2312 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
2157 | val |= I965_FENCE_REG_VALID; | 2313 | val |= I965_FENCE_REG_VALID; |
2158 | 2314 | ||
2159 | I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); | 2315 | if (pipelined) { |
2316 | int ret = intel_ring_begin(pipelined, 6); | ||
2317 | if (ret) | ||
2318 | return ret; | ||
2319 | |||
2320 | intel_ring_emit(pipelined, MI_NOOP); | ||
2321 | intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2)); | ||
2322 | intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8); | ||
2323 | intel_ring_emit(pipelined, (u32)val); | ||
2324 | intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4); | ||
2325 | intel_ring_emit(pipelined, (u32)(val >> 32)); | ||
2326 | intel_ring_advance(pipelined); | ||
2327 | } else | ||
2328 | I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val); | ||
2329 | |||
2330 | return 0; | ||
2160 | } | 2331 | } |
2161 | 2332 | ||
2162 | static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) | 2333 | static int i915_write_fence_reg(struct drm_i915_gem_object *obj, |
2334 | struct intel_ring_buffer *pipelined) | ||
2163 | { | 2335 | { |
2164 | struct drm_gem_object *obj = reg->obj; | 2336 | struct drm_device *dev = obj->base.dev; |
2165 | struct drm_device *dev = obj->dev; | ||
2166 | drm_i915_private_t *dev_priv = dev->dev_private; | 2337 | drm_i915_private_t *dev_priv = dev->dev_private; |
2167 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2338 | u32 size = obj->gtt_space->size; |
2168 | int regnum = obj_priv->fence_reg; | 2339 | u32 fence_reg, val, pitch_val; |
2169 | int tile_width; | 2340 | int tile_width; |
2170 | uint32_t fence_reg, val; | ||
2171 | uint32_t pitch_val; | ||
2172 | 2341 | ||
2173 | if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || | 2342 | if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || |
2174 | (obj_priv->gtt_offset & (obj->size - 1))) { | 2343 | (size & -size) != size || |
2175 | WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n", | 2344 | (obj->gtt_offset & (size - 1)), |
2176 | __func__, obj_priv->gtt_offset, obj->size); | 2345 | "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", |
2177 | return; | 2346 | obj->gtt_offset, obj->map_and_fenceable, size)) |
2178 | } | 2347 | return -EINVAL; |
2179 | 2348 | ||
2180 | if (obj_priv->tiling_mode == I915_TILING_Y && | 2349 | if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) |
2181 | HAS_128_BYTE_Y_TILING(dev)) | ||
2182 | tile_width = 128; | 2350 | tile_width = 128; |
2183 | else | 2351 | else |
2184 | tile_width = 512; | 2352 | tile_width = 512; |
2185 | 2353 | ||
2186 | /* Note: pitch better be a power of two tile widths */ | 2354 | /* Note: pitch better be a power of two tile widths */ |
2187 | pitch_val = obj_priv->stride / tile_width; | 2355 | pitch_val = obj->stride / tile_width; |
2188 | pitch_val = ffs(pitch_val) - 1; | 2356 | pitch_val = ffs(pitch_val) - 1; |
2189 | 2357 | ||
2190 | if (obj_priv->tiling_mode == I915_TILING_Y && | 2358 | val = obj->gtt_offset; |
2191 | HAS_128_BYTE_Y_TILING(dev)) | 2359 | if (obj->tiling_mode == I915_TILING_Y) |
2192 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); | ||
2193 | else | ||
2194 | WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); | ||
2195 | |||
2196 | val = obj_priv->gtt_offset; | ||
2197 | if (obj_priv->tiling_mode == I915_TILING_Y) | ||
2198 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | 2360 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
2199 | val |= I915_FENCE_SIZE_BITS(obj->size); | 2361 | val |= I915_FENCE_SIZE_BITS(size); |
2200 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; | 2362 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; |
2201 | val |= I830_FENCE_REG_VALID; | 2363 | val |= I830_FENCE_REG_VALID; |
2202 | 2364 | ||
2203 | if (regnum < 8) | 2365 | fence_reg = obj->fence_reg; |
2204 | fence_reg = FENCE_REG_830_0 + (regnum * 4); | 2366 | if (fence_reg < 8) |
2367 | fence_reg = FENCE_REG_830_0 + fence_reg * 4; | ||
2205 | else | 2368 | else |
2206 | fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4); | 2369 | fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; |
2207 | I915_WRITE(fence_reg, val); | 2370 | |
2371 | if (pipelined) { | ||
2372 | int ret = intel_ring_begin(pipelined, 4); | ||
2373 | if (ret) | ||
2374 | return ret; | ||
2375 | |||
2376 | intel_ring_emit(pipelined, MI_NOOP); | ||
2377 | intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1)); | ||
2378 | intel_ring_emit(pipelined, fence_reg); | ||
2379 | intel_ring_emit(pipelined, val); | ||
2380 | intel_ring_advance(pipelined); | ||
2381 | } else | ||
2382 | I915_WRITE(fence_reg, val); | ||
2383 | |||
2384 | return 0; | ||
2208 | } | 2385 | } |
2209 | 2386 | ||
2210 | static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) | 2387 | static int i830_write_fence_reg(struct drm_i915_gem_object *obj, |
2388 | struct intel_ring_buffer *pipelined) | ||
2211 | { | 2389 | { |
2212 | struct drm_gem_object *obj = reg->obj; | 2390 | struct drm_device *dev = obj->base.dev; |
2213 | struct drm_device *dev = obj->dev; | ||
2214 | drm_i915_private_t *dev_priv = dev->dev_private; | 2391 | drm_i915_private_t *dev_priv = dev->dev_private; |
2215 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2392 | u32 size = obj->gtt_space->size; |
2216 | int regnum = obj_priv->fence_reg; | 2393 | int regnum = obj->fence_reg; |
2217 | uint32_t val; | 2394 | uint32_t val; |
2218 | uint32_t pitch_val; | 2395 | uint32_t pitch_val; |
2219 | uint32_t fence_size_bits; | ||
2220 | 2396 | ||
2221 | if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) || | 2397 | if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || |
2222 | (obj_priv->gtt_offset & (obj->size - 1))) { | 2398 | (size & -size) != size || |
2223 | WARN(1, "%s: object 0x%08x not 512K or size aligned\n", | 2399 | (obj->gtt_offset & (size - 1)), |
2224 | __func__, obj_priv->gtt_offset); | 2400 | "object 0x%08x not 512K or pot-size 0x%08x aligned\n", |
2225 | return; | 2401 | obj->gtt_offset, size)) |
2226 | } | 2402 | return -EINVAL; |
2227 | 2403 | ||
2228 | pitch_val = obj_priv->stride / 128; | 2404 | pitch_val = obj->stride / 128; |
2229 | pitch_val = ffs(pitch_val) - 1; | 2405 | pitch_val = ffs(pitch_val) - 1; |
2230 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); | ||
2231 | 2406 | ||
2232 | val = obj_priv->gtt_offset; | 2407 | val = obj->gtt_offset; |
2233 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2408 | if (obj->tiling_mode == I915_TILING_Y) |
2234 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | 2409 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
2235 | fence_size_bits = I830_FENCE_SIZE_BITS(obj->size); | 2410 | val |= I830_FENCE_SIZE_BITS(size); |
2236 | WARN_ON(fence_size_bits & ~0x00000f00); | ||
2237 | val |= fence_size_bits; | ||
2238 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; | 2411 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; |
2239 | val |= I830_FENCE_REG_VALID; | 2412 | val |= I830_FENCE_REG_VALID; |
2240 | 2413 | ||
2241 | I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); | 2414 | if (pipelined) { |
2415 | int ret = intel_ring_begin(pipelined, 4); | ||
2416 | if (ret) | ||
2417 | return ret; | ||
2418 | |||
2419 | intel_ring_emit(pipelined, MI_NOOP); | ||
2420 | intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1)); | ||
2421 | intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4); | ||
2422 | intel_ring_emit(pipelined, val); | ||
2423 | intel_ring_advance(pipelined); | ||
2424 | } else | ||
2425 | I915_WRITE(FENCE_REG_830_0 + regnum * 4, val); | ||
2426 | |||
2427 | return 0; | ||
2428 | } | ||
2429 | |||
2430 | static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno) | ||
2431 | { | ||
2432 | return i915_seqno_passed(ring->get_seqno(ring), seqno); | ||
2433 | } | ||
2434 | |||
2435 | static int | ||
2436 | i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, | ||
2437 | struct intel_ring_buffer *pipelined) | ||
2438 | { | ||
2439 | int ret; | ||
2440 | |||
2441 | if (obj->fenced_gpu_access) { | ||
2442 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | ||
2443 | ret = i915_gem_flush_ring(obj->last_fenced_ring, | ||
2444 | 0, obj->base.write_domain); | ||
2445 | if (ret) | ||
2446 | return ret; | ||
2447 | } | ||
2448 | |||
2449 | obj->fenced_gpu_access = false; | ||
2450 | } | ||
2451 | |||
2452 | if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) { | ||
2453 | if (!ring_passed_seqno(obj->last_fenced_ring, | ||
2454 | obj->last_fenced_seqno)) { | ||
2455 | ret = i915_wait_request(obj->last_fenced_ring, | ||
2456 | obj->last_fenced_seqno); | ||
2457 | if (ret) | ||
2458 | return ret; | ||
2459 | } | ||
2460 | |||
2461 | obj->last_fenced_seqno = 0; | ||
2462 | obj->last_fenced_ring = NULL; | ||
2463 | } | ||
2464 | |||
2465 | /* Ensure that all CPU reads are completed before installing a fence | ||
2466 | * and all writes before removing the fence. | ||
2467 | */ | ||
2468 | if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) | ||
2469 | mb(); | ||
2470 | |||
2471 | return 0; | ||
2472 | } | ||
2473 | |||
2474 | int | ||
2475 | i915_gem_object_put_fence(struct drm_i915_gem_object *obj) | ||
2476 | { | ||
2477 | int ret; | ||
2478 | |||
2479 | if (obj->tiling_mode) | ||
2480 | i915_gem_release_mmap(obj); | ||
2481 | |||
2482 | ret = i915_gem_object_flush_fence(obj, NULL); | ||
2483 | if (ret) | ||
2484 | return ret; | ||
2485 | |||
2486 | if (obj->fence_reg != I915_FENCE_REG_NONE) { | ||
2487 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | ||
2488 | i915_gem_clear_fence_reg(obj->base.dev, | ||
2489 | &dev_priv->fence_regs[obj->fence_reg]); | ||
2490 | |||
2491 | obj->fence_reg = I915_FENCE_REG_NONE; | ||
2492 | } | ||
2493 | |||
2494 | return 0; | ||
2242 | } | 2495 | } |
2243 | 2496 | ||
2244 | static int i915_find_fence_reg(struct drm_device *dev) | 2497 | static struct drm_i915_fence_reg * |
2498 | i915_find_fence_reg(struct drm_device *dev, | ||
2499 | struct intel_ring_buffer *pipelined) | ||
2245 | { | 2500 | { |
2246 | struct drm_i915_fence_reg *reg = NULL; | ||
2247 | struct drm_i915_gem_object *obj_priv = NULL; | ||
2248 | struct drm_i915_private *dev_priv = dev->dev_private; | 2501 | struct drm_i915_private *dev_priv = dev->dev_private; |
2249 | struct drm_gem_object *obj = NULL; | 2502 | struct drm_i915_fence_reg *reg, *first, *avail; |
2250 | int i, avail, ret; | 2503 | int i; |
2251 | 2504 | ||
2252 | /* First try to find a free reg */ | 2505 | /* First try to find a free reg */ |
2253 | avail = 0; | 2506 | avail = NULL; |
2254 | for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { | 2507 | for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { |
2255 | reg = &dev_priv->fence_regs[i]; | 2508 | reg = &dev_priv->fence_regs[i]; |
2256 | if (!reg->obj) | 2509 | if (!reg->obj) |
2257 | return i; | 2510 | return reg; |
2258 | 2511 | ||
2259 | obj_priv = to_intel_bo(reg->obj); | 2512 | if (!reg->obj->pin_count) |
2260 | if (!obj_priv->pin_count) | 2513 | avail = reg; |
2261 | avail++; | ||
2262 | } | 2514 | } |
2263 | 2515 | ||
2264 | if (avail == 0) | 2516 | if (avail == NULL) |
2265 | return -ENOSPC; | 2517 | return NULL; |
2266 | 2518 | ||
2267 | /* None available, try to steal one or wait for a user to finish */ | 2519 | /* None available, try to steal one or wait for a user to finish */ |
2268 | i = I915_FENCE_REG_NONE; | 2520 | avail = first = NULL; |
2269 | list_for_each_entry(reg, &dev_priv->mm.fence_list, | 2521 | list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { |
2270 | lru_list) { | 2522 | if (reg->obj->pin_count) |
2271 | obj = reg->obj; | ||
2272 | obj_priv = to_intel_bo(obj); | ||
2273 | |||
2274 | if (obj_priv->pin_count) | ||
2275 | continue; | 2523 | continue; |
2276 | 2524 | ||
2277 | /* found one! */ | 2525 | if (first == NULL) |
2278 | i = obj_priv->fence_reg; | 2526 | first = reg; |
2279 | break; | ||
2280 | } | ||
2281 | 2527 | ||
2282 | BUG_ON(i == I915_FENCE_REG_NONE); | 2528 | if (!pipelined || |
2529 | !reg->obj->last_fenced_ring || | ||
2530 | reg->obj->last_fenced_ring == pipelined) { | ||
2531 | avail = reg; | ||
2532 | break; | ||
2533 | } | ||
2534 | } | ||
2283 | 2535 | ||
2284 | /* We only have a reference on obj from the active list. put_fence_reg | 2536 | if (avail == NULL) |
2285 | * might drop that one, causing a use-after-free in it. So hold a | 2537 | avail = first; |
2286 | * private reference to obj like the other callers of put_fence_reg | ||
2287 | * (set_tiling ioctl) do. */ | ||
2288 | drm_gem_object_reference(obj); | ||
2289 | ret = i915_gem_object_put_fence_reg(obj); | ||
2290 | drm_gem_object_unreference(obj); | ||
2291 | if (ret != 0) | ||
2292 | return ret; | ||
2293 | 2538 | ||
2294 | return i; | 2539 | return avail; |
2295 | } | 2540 | } |
2296 | 2541 | ||
2297 | /** | 2542 | /** |
2298 | * i915_gem_object_get_fence_reg - set up a fence reg for an object | 2543 | * i915_gem_object_get_fence - set up a fence reg for an object |
2299 | * @obj: object to map through a fence reg | 2544 | * @obj: object to map through a fence reg |
2545 | * @pipelined: ring on which to queue the change, or NULL for CPU access | ||
2546 | * @interruptible: must we wait uninterruptibly for the register to retire? | ||
2300 | * | 2547 | * |
2301 | * When mapping objects through the GTT, userspace wants to be able to write | 2548 | * When mapping objects through the GTT, userspace wants to be able to write |
2302 | * to them without having to worry about swizzling if the object is tiled. | 2549 | * to them without having to worry about swizzling if the object is tiled. |
@@ -2308,71 +2555,125 @@ static int i915_find_fence_reg(struct drm_device *dev) | |||
2308 | * and tiling format. | 2555 | * and tiling format. |
2309 | */ | 2556 | */ |
2310 | int | 2557 | int |
2311 | i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | 2558 | i915_gem_object_get_fence(struct drm_i915_gem_object *obj, |
2559 | struct intel_ring_buffer *pipelined) | ||
2312 | { | 2560 | { |
2313 | struct drm_device *dev = obj->dev; | 2561 | struct drm_device *dev = obj->base.dev; |
2314 | struct drm_i915_private *dev_priv = dev->dev_private; | 2562 | struct drm_i915_private *dev_priv = dev->dev_private; |
2315 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2563 | struct drm_i915_fence_reg *reg; |
2316 | struct drm_i915_fence_reg *reg = NULL; | ||
2317 | int ret; | 2564 | int ret; |
2318 | 2565 | ||
2319 | /* Just update our place in the LRU if our fence is getting used. */ | 2566 | /* XXX disable pipelining. There are bugs. Shocking. */ |
2320 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | 2567 | pipelined = NULL; |
2321 | reg = &dev_priv->fence_regs[obj_priv->fence_reg]; | 2568 | |
2569 | /* Just update our place in the LRU if our fence is getting reused. */ | ||
2570 | if (obj->fence_reg != I915_FENCE_REG_NONE) { | ||
2571 | reg = &dev_priv->fence_regs[obj->fence_reg]; | ||
2322 | list_move_tail(®->lru_list, &dev_priv->mm.fence_list); | 2572 | list_move_tail(®->lru_list, &dev_priv->mm.fence_list); |
2573 | |||
2574 | if (obj->tiling_changed) { | ||
2575 | ret = i915_gem_object_flush_fence(obj, pipelined); | ||
2576 | if (ret) | ||
2577 | return ret; | ||
2578 | |||
2579 | if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) | ||
2580 | pipelined = NULL; | ||
2581 | |||
2582 | if (pipelined) { | ||
2583 | reg->setup_seqno = | ||
2584 | i915_gem_next_request_seqno(pipelined); | ||
2585 | obj->last_fenced_seqno = reg->setup_seqno; | ||
2586 | obj->last_fenced_ring = pipelined; | ||
2587 | } | ||
2588 | |||
2589 | goto update; | ||
2590 | } | ||
2591 | |||
2592 | if (!pipelined) { | ||
2593 | if (reg->setup_seqno) { | ||
2594 | if (!ring_passed_seqno(obj->last_fenced_ring, | ||
2595 | reg->setup_seqno)) { | ||
2596 | ret = i915_wait_request(obj->last_fenced_ring, | ||
2597 | reg->setup_seqno); | ||
2598 | if (ret) | ||
2599 | return ret; | ||
2600 | } | ||
2601 | |||
2602 | reg->setup_seqno = 0; | ||
2603 | } | ||
2604 | } else if (obj->last_fenced_ring && | ||
2605 | obj->last_fenced_ring != pipelined) { | ||
2606 | ret = i915_gem_object_flush_fence(obj, pipelined); | ||
2607 | if (ret) | ||
2608 | return ret; | ||
2609 | } | ||
2610 | |||
2323 | return 0; | 2611 | return 0; |
2324 | } | 2612 | } |
2325 | 2613 | ||
2326 | switch (obj_priv->tiling_mode) { | 2614 | reg = i915_find_fence_reg(dev, pipelined); |
2327 | case I915_TILING_NONE: | 2615 | if (reg == NULL) |
2328 | WARN(1, "allocating a fence for non-tiled object?\n"); | 2616 | return -ENOSPC; |
2329 | break; | ||
2330 | case I915_TILING_X: | ||
2331 | if (!obj_priv->stride) | ||
2332 | return -EINVAL; | ||
2333 | WARN((obj_priv->stride & (512 - 1)), | ||
2334 | "object 0x%08x is X tiled but has non-512B pitch\n", | ||
2335 | obj_priv->gtt_offset); | ||
2336 | break; | ||
2337 | case I915_TILING_Y: | ||
2338 | if (!obj_priv->stride) | ||
2339 | return -EINVAL; | ||
2340 | WARN((obj_priv->stride & (128 - 1)), | ||
2341 | "object 0x%08x is Y tiled but has non-128B pitch\n", | ||
2342 | obj_priv->gtt_offset); | ||
2343 | break; | ||
2344 | } | ||
2345 | 2617 | ||
2346 | ret = i915_find_fence_reg(dev); | 2618 | ret = i915_gem_object_flush_fence(obj, pipelined); |
2347 | if (ret < 0) | 2619 | if (ret) |
2348 | return ret; | 2620 | return ret; |
2349 | 2621 | ||
2350 | obj_priv->fence_reg = ret; | 2622 | if (reg->obj) { |
2351 | reg = &dev_priv->fence_regs[obj_priv->fence_reg]; | 2623 | struct drm_i915_gem_object *old = reg->obj; |
2352 | list_add_tail(®->lru_list, &dev_priv->mm.fence_list); | 2624 | |
2625 | drm_gem_object_reference(&old->base); | ||
2626 | |||
2627 | if (old->tiling_mode) | ||
2628 | i915_gem_release_mmap(old); | ||
2629 | |||
2630 | ret = i915_gem_object_flush_fence(old, pipelined); | ||
2631 | if (ret) { | ||
2632 | drm_gem_object_unreference(&old->base); | ||
2633 | return ret; | ||
2634 | } | ||
2635 | |||
2636 | if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0) | ||
2637 | pipelined = NULL; | ||
2638 | |||
2639 | old->fence_reg = I915_FENCE_REG_NONE; | ||
2640 | old->last_fenced_ring = pipelined; | ||
2641 | old->last_fenced_seqno = | ||
2642 | pipelined ? i915_gem_next_request_seqno(pipelined) : 0; | ||
2643 | |||
2644 | drm_gem_object_unreference(&old->base); | ||
2645 | } else if (obj->last_fenced_seqno == 0) | ||
2646 | pipelined = NULL; | ||
2353 | 2647 | ||
2354 | reg->obj = obj; | 2648 | reg->obj = obj; |
2649 | list_move_tail(®->lru_list, &dev_priv->mm.fence_list); | ||
2650 | obj->fence_reg = reg - dev_priv->fence_regs; | ||
2651 | obj->last_fenced_ring = pipelined; | ||
2355 | 2652 | ||
2653 | reg->setup_seqno = | ||
2654 | pipelined ? i915_gem_next_request_seqno(pipelined) : 0; | ||
2655 | obj->last_fenced_seqno = reg->setup_seqno; | ||
2656 | |||
2657 | update: | ||
2658 | obj->tiling_changed = false; | ||
2356 | switch (INTEL_INFO(dev)->gen) { | 2659 | switch (INTEL_INFO(dev)->gen) { |
2660 | case 7: | ||
2357 | case 6: | 2661 | case 6: |
2358 | sandybridge_write_fence_reg(reg); | 2662 | ret = sandybridge_write_fence_reg(obj, pipelined); |
2359 | break; | 2663 | break; |
2360 | case 5: | 2664 | case 5: |
2361 | case 4: | 2665 | case 4: |
2362 | i965_write_fence_reg(reg); | 2666 | ret = i965_write_fence_reg(obj, pipelined); |
2363 | break; | 2667 | break; |
2364 | case 3: | 2668 | case 3: |
2365 | i915_write_fence_reg(reg); | 2669 | ret = i915_write_fence_reg(obj, pipelined); |
2366 | break; | 2670 | break; |
2367 | case 2: | 2671 | case 2: |
2368 | i830_write_fence_reg(reg); | 2672 | ret = i830_write_fence_reg(obj, pipelined); |
2369 | break; | 2673 | break; |
2370 | } | 2674 | } |
2371 | 2675 | ||
2372 | trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, | 2676 | return ret; |
2373 | obj_priv->tiling_mode); | ||
2374 | |||
2375 | return 0; | ||
2376 | } | 2677 | } |
2377 | 2678 | ||
2378 | /** | 2679 | /** |
@@ -2380,157 +2681,133 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | |||
2380 | * @obj: object to clear | 2681 | * @obj: object to clear |
2381 | * | 2682 | * |
2382 | * Zeroes out the fence register itself and clears out the associated | 2683 | * Zeroes out the fence register itself and clears out the associated |
2383 | * data structures in dev_priv and obj_priv. | 2684 | * data structures in dev_priv and obj. |
2384 | */ | 2685 | */ |
2385 | static void | 2686 | static void |
2386 | i915_gem_clear_fence_reg(struct drm_gem_object *obj) | 2687 | i915_gem_clear_fence_reg(struct drm_device *dev, |
2688 | struct drm_i915_fence_reg *reg) | ||
2387 | { | 2689 | { |
2388 | struct drm_device *dev = obj->dev; | ||
2389 | drm_i915_private_t *dev_priv = dev->dev_private; | 2690 | drm_i915_private_t *dev_priv = dev->dev_private; |
2390 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2691 | uint32_t fence_reg = reg - dev_priv->fence_regs; |
2391 | struct drm_i915_fence_reg *reg = | ||
2392 | &dev_priv->fence_regs[obj_priv->fence_reg]; | ||
2393 | uint32_t fence_reg; | ||
2394 | 2692 | ||
2395 | switch (INTEL_INFO(dev)->gen) { | 2693 | switch (INTEL_INFO(dev)->gen) { |
2694 | case 7: | ||
2396 | case 6: | 2695 | case 6: |
2397 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + | 2696 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0); |
2398 | (obj_priv->fence_reg * 8), 0); | ||
2399 | break; | 2697 | break; |
2400 | case 5: | 2698 | case 5: |
2401 | case 4: | 2699 | case 4: |
2402 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); | 2700 | I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0); |
2403 | break; | 2701 | break; |
2404 | case 3: | 2702 | case 3: |
2405 | if (obj_priv->fence_reg >= 8) | 2703 | if (fence_reg >= 8) |
2406 | fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; | 2704 | fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; |
2407 | else | 2705 | else |
2408 | case 2: | 2706 | case 2: |
2409 | fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; | 2707 | fence_reg = FENCE_REG_830_0 + fence_reg * 4; |
2410 | 2708 | ||
2411 | I915_WRITE(fence_reg, 0); | 2709 | I915_WRITE(fence_reg, 0); |
2412 | break; | 2710 | break; |
2413 | } | 2711 | } |
2414 | 2712 | ||
2415 | reg->obj = NULL; | ||
2416 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | ||
2417 | list_del_init(®->lru_list); | 2713 | list_del_init(®->lru_list); |
2418 | } | 2714 | reg->obj = NULL; |
2419 | 2715 | reg->setup_seqno = 0; | |
2420 | /** | ||
2421 | * i915_gem_object_put_fence_reg - waits on outstanding fenced access | ||
2422 | * to the buffer to finish, and then resets the fence register. | ||
2423 | * @obj: tiled object holding a fence register. | ||
2424 | * | ||
2425 | * Zeroes out the fence register itself and clears out the associated | ||
2426 | * data structures in dev_priv and obj_priv. | ||
2427 | */ | ||
2428 | int | ||
2429 | i915_gem_object_put_fence_reg(struct drm_gem_object *obj) | ||
2430 | { | ||
2431 | struct drm_device *dev = obj->dev; | ||
2432 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2433 | |||
2434 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE) | ||
2435 | return 0; | ||
2436 | |||
2437 | /* If we've changed tiling, GTT-mappings of the object | ||
2438 | * need to re-fault to ensure that the correct fence register | ||
2439 | * setup is in place. | ||
2440 | */ | ||
2441 | i915_gem_release_mmap(obj); | ||
2442 | |||
2443 | /* On the i915, GPU access to tiled buffers is via a fence, | ||
2444 | * therefore we must wait for any outstanding access to complete | ||
2445 | * before clearing the fence. | ||
2446 | */ | ||
2447 | if (!IS_I965G(dev)) { | ||
2448 | int ret; | ||
2449 | |||
2450 | ret = i915_gem_object_flush_gpu_write_domain(obj); | ||
2451 | if (ret != 0) | ||
2452 | return ret; | ||
2453 | |||
2454 | ret = i915_gem_object_wait_rendering(obj); | ||
2455 | if (ret != 0) | ||
2456 | return ret; | ||
2457 | } | ||
2458 | |||
2459 | i915_gem_object_flush_gtt_write_domain(obj); | ||
2460 | i915_gem_clear_fence_reg (obj); | ||
2461 | |||
2462 | return 0; | ||
2463 | } | 2716 | } |
2464 | 2717 | ||
2465 | /** | 2718 | /** |
2466 | * Finds free space in the GTT aperture and binds the object there. | 2719 | * Finds free space in the GTT aperture and binds the object there. |
2467 | */ | 2720 | */ |
2468 | static int | 2721 | static int |
2469 | i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | 2722 | i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
2723 | unsigned alignment, | ||
2724 | bool map_and_fenceable) | ||
2470 | { | 2725 | { |
2471 | struct drm_device *dev = obj->dev; | 2726 | struct drm_device *dev = obj->base.dev; |
2472 | drm_i915_private_t *dev_priv = dev->dev_private; | 2727 | drm_i915_private_t *dev_priv = dev->dev_private; |
2473 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2474 | struct drm_mm_node *free_space; | 2728 | struct drm_mm_node *free_space; |
2475 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; | 2729 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; |
2730 | u32 size, fence_size, fence_alignment, unfenced_alignment; | ||
2731 | bool mappable, fenceable; | ||
2476 | int ret; | 2732 | int ret; |
2477 | 2733 | ||
2478 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 2734 | if (obj->madv != I915_MADV_WILLNEED) { |
2479 | DRM_ERROR("Attempting to bind a purgeable object\n"); | 2735 | DRM_ERROR("Attempting to bind a purgeable object\n"); |
2480 | return -EINVAL; | 2736 | return -EINVAL; |
2481 | } | 2737 | } |
2482 | 2738 | ||
2739 | fence_size = i915_gem_get_gtt_size(dev, | ||
2740 | obj->base.size, | ||
2741 | obj->tiling_mode); | ||
2742 | fence_alignment = i915_gem_get_gtt_alignment(dev, | ||
2743 | obj->base.size, | ||
2744 | obj->tiling_mode); | ||
2745 | unfenced_alignment = | ||
2746 | i915_gem_get_unfenced_gtt_alignment(dev, | ||
2747 | obj->base.size, | ||
2748 | obj->tiling_mode); | ||
2749 | |||
2483 | if (alignment == 0) | 2750 | if (alignment == 0) |
2484 | alignment = i915_gem_get_gtt_alignment(obj); | 2751 | alignment = map_and_fenceable ? fence_alignment : |
2485 | if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) { | 2752 | unfenced_alignment; |
2753 | if (map_and_fenceable && alignment & (fence_alignment - 1)) { | ||
2486 | DRM_ERROR("Invalid object alignment requested %u\n", alignment); | 2754 | DRM_ERROR("Invalid object alignment requested %u\n", alignment); |
2487 | return -EINVAL; | 2755 | return -EINVAL; |
2488 | } | 2756 | } |
2489 | 2757 | ||
2758 | size = map_and_fenceable ? fence_size : obj->base.size; | ||
2759 | |||
2490 | /* If the object is bigger than the entire aperture, reject it early | 2760 | /* If the object is bigger than the entire aperture, reject it early |
2491 | * before evicting everything in a vain attempt to find space. | 2761 | * before evicting everything in a vain attempt to find space. |
2492 | */ | 2762 | */ |
2493 | if (obj->size > dev->gtt_total) { | 2763 | if (obj->base.size > |
2764 | (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { | ||
2494 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); | 2765 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); |
2495 | return -E2BIG; | 2766 | return -E2BIG; |
2496 | } | 2767 | } |
2497 | 2768 | ||
2498 | search_free: | 2769 | search_free: |
2499 | free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, | 2770 | if (map_and_fenceable) |
2500 | obj->size, alignment, 0); | 2771 | free_space = |
2772 | drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, | ||
2773 | size, alignment, 0, | ||
2774 | dev_priv->mm.gtt_mappable_end, | ||
2775 | 0); | ||
2776 | else | ||
2777 | free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, | ||
2778 | size, alignment, 0); | ||
2779 | |||
2501 | if (free_space != NULL) { | 2780 | if (free_space != NULL) { |
2502 | obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, | 2781 | if (map_and_fenceable) |
2503 | alignment); | 2782 | obj->gtt_space = |
2504 | if (obj_priv->gtt_space != NULL) | 2783 | drm_mm_get_block_range_generic(free_space, |
2505 | obj_priv->gtt_offset = obj_priv->gtt_space->start; | 2784 | size, alignment, 0, |
2785 | dev_priv->mm.gtt_mappable_end, | ||
2786 | 0); | ||
2787 | else | ||
2788 | obj->gtt_space = | ||
2789 | drm_mm_get_block(free_space, size, alignment); | ||
2506 | } | 2790 | } |
2507 | if (obj_priv->gtt_space == NULL) { | 2791 | if (obj->gtt_space == NULL) { |
2508 | /* If the gtt is empty and we're still having trouble | 2792 | /* If the gtt is empty and we're still having trouble |
2509 | * fitting our object in, we're out of memory. | 2793 | * fitting our object in, we're out of memory. |
2510 | */ | 2794 | */ |
2511 | #if WATCH_LRU | 2795 | ret = i915_gem_evict_something(dev, size, alignment, |
2512 | DRM_INFO("%s: GTT full, evicting something\n", __func__); | 2796 | map_and_fenceable); |
2513 | #endif | ||
2514 | ret = i915_gem_evict_something(dev, obj->size, alignment); | ||
2515 | if (ret) | 2797 | if (ret) |
2516 | return ret; | 2798 | return ret; |
2517 | 2799 | ||
2518 | goto search_free; | 2800 | goto search_free; |
2519 | } | 2801 | } |
2520 | 2802 | ||
2521 | #if WATCH_BUF | 2803 | ret = i915_gem_object_get_pages_gtt(obj, gfpmask); |
2522 | DRM_INFO("Binding object of size %zd at 0x%08x\n", | ||
2523 | obj->size, obj_priv->gtt_offset); | ||
2524 | #endif | ||
2525 | ret = i915_gem_object_get_pages(obj, gfpmask); | ||
2526 | if (ret) { | 2804 | if (ret) { |
2527 | drm_mm_put_block(obj_priv->gtt_space); | 2805 | drm_mm_put_block(obj->gtt_space); |
2528 | obj_priv->gtt_space = NULL; | 2806 | obj->gtt_space = NULL; |
2529 | 2807 | ||
2530 | if (ret == -ENOMEM) { | 2808 | if (ret == -ENOMEM) { |
2531 | /* first try to clear up some space from the GTT */ | 2809 | /* first try to reclaim some memory by clearing the GTT */ |
2532 | ret = i915_gem_evict_something(dev, obj->size, | 2810 | ret = i915_gem_evict_everything(dev, false); |
2533 | alignment); | ||
2534 | if (ret) { | 2811 | if (ret) { |
2535 | /* now try to shrink everyone else */ | 2812 | /* now try to shrink everyone else */ |
2536 | if (gfpmask) { | 2813 | if (gfpmask) { |
@@ -2538,7 +2815,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2538 | goto search_free; | 2815 | goto search_free; |
2539 | } | 2816 | } |
2540 | 2817 | ||
2541 | return ret; | 2818 | return -ENOMEM; |
2542 | } | 2819 | } |
2543 | 2820 | ||
2544 | goto search_free; | 2821 | goto search_free; |
@@ -2547,144 +2824,126 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2547 | return ret; | 2824 | return ret; |
2548 | } | 2825 | } |
2549 | 2826 | ||
2550 | /* Create an AGP memory structure pointing at our pages, and bind it | 2827 | ret = i915_gem_gtt_bind_object(obj); |
2551 | * into the GTT. | 2828 | if (ret) { |
2552 | */ | 2829 | i915_gem_object_put_pages_gtt(obj); |
2553 | obj_priv->agp_mem = drm_agp_bind_pages(dev, | 2830 | drm_mm_put_block(obj->gtt_space); |
2554 | obj_priv->pages, | 2831 | obj->gtt_space = NULL; |
2555 | obj->size >> PAGE_SHIFT, | 2832 | |
2556 | obj_priv->gtt_offset, | 2833 | if (i915_gem_evict_everything(dev, false)) |
2557 | obj_priv->agp_type); | ||
2558 | if (obj_priv->agp_mem == NULL) { | ||
2559 | i915_gem_object_put_pages(obj); | ||
2560 | drm_mm_put_block(obj_priv->gtt_space); | ||
2561 | obj_priv->gtt_space = NULL; | ||
2562 | |||
2563 | ret = i915_gem_evict_something(dev, obj->size, alignment); | ||
2564 | if (ret) | ||
2565 | return ret; | 2834 | return ret; |
2566 | 2835 | ||
2567 | goto search_free; | 2836 | goto search_free; |
2568 | } | 2837 | } |
2569 | atomic_inc(&dev->gtt_count); | ||
2570 | atomic_add(obj->size, &dev->gtt_memory); | ||
2571 | 2838 | ||
2572 | /* keep track of bounds object by adding it to the inactive list */ | 2839 | list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); |
2573 | list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 2840 | list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
2574 | 2841 | ||
2575 | /* Assert that the object is not currently in any GPU domain. As it | 2842 | /* Assert that the object is not currently in any GPU domain. As it |
2576 | * wasn't in the GTT, there shouldn't be any way it could have been in | 2843 | * wasn't in the GTT, there shouldn't be any way it could have been in |
2577 | * a GPU cache | 2844 | * a GPU cache |
2578 | */ | 2845 | */ |
2579 | BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); | 2846 | BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); |
2580 | BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); | 2847 | BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); |
2848 | |||
2849 | obj->gtt_offset = obj->gtt_space->start; | ||
2850 | |||
2851 | fenceable = | ||
2852 | obj->gtt_space->size == fence_size && | ||
2853 | (obj->gtt_space->start & (fence_alignment -1)) == 0; | ||
2854 | |||
2855 | mappable = | ||
2856 | obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; | ||
2581 | 2857 | ||
2582 | trace_i915_gem_object_bind(obj, obj_priv->gtt_offset); | 2858 | obj->map_and_fenceable = mappable && fenceable; |
2583 | 2859 | ||
2860 | trace_i915_gem_object_bind(obj, map_and_fenceable); | ||
2584 | return 0; | 2861 | return 0; |
2585 | } | 2862 | } |
2586 | 2863 | ||
2587 | void | 2864 | void |
2588 | i915_gem_clflush_object(struct drm_gem_object *obj) | 2865 | i915_gem_clflush_object(struct drm_i915_gem_object *obj) |
2589 | { | 2866 | { |
2590 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2591 | |||
2592 | /* If we don't have a page list set up, then we're not pinned | 2867 | /* If we don't have a page list set up, then we're not pinned |
2593 | * to GPU, and we can ignore the cache flush because it'll happen | 2868 | * to GPU, and we can ignore the cache flush because it'll happen |
2594 | * again at bind time. | 2869 | * again at bind time. |
2595 | */ | 2870 | */ |
2596 | if (obj_priv->pages == NULL) | 2871 | if (obj->pages == NULL) |
2872 | return; | ||
2873 | |||
2874 | /* If the GPU is snooping the contents of the CPU cache, | ||
2875 | * we do not need to manually clear the CPU cache lines. However, | ||
2876 | * the caches are only snooped when the render cache is | ||
2877 | * flushed/invalidated. As we always have to emit invalidations | ||
2878 | * and flushes when moving into and out of the RENDER domain, correct | ||
2879 | * snooping behaviour occurs naturally as the result of our domain | ||
2880 | * tracking. | ||
2881 | */ | ||
2882 | if (obj->cache_level != I915_CACHE_NONE) | ||
2597 | return; | 2883 | return; |
2598 | 2884 | ||
2599 | trace_i915_gem_object_clflush(obj); | 2885 | trace_i915_gem_object_clflush(obj); |
2600 | 2886 | ||
2601 | drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); | 2887 | drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); |
2602 | } | 2888 | } |
2603 | 2889 | ||
2604 | /** Flushes any GPU write domain for the object if it's dirty. */ | 2890 | /** Flushes any GPU write domain for the object if it's dirty. */ |
2605 | static int | 2891 | static int |
2606 | i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | 2892 | i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) |
2607 | { | 2893 | { |
2608 | struct drm_device *dev = obj->dev; | 2894 | if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) |
2609 | uint32_t old_write_domain; | ||
2610 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2611 | |||
2612 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | ||
2613 | return 0; | 2895 | return 0; |
2614 | 2896 | ||
2615 | /* Queue the GPU write cache flushing we need. */ | 2897 | /* Queue the GPU write cache flushing we need. */ |
2616 | old_write_domain = obj->write_domain; | 2898 | return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); |
2617 | i915_gem_flush(dev, 0, obj->write_domain); | ||
2618 | if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0) | ||
2619 | return -ENOMEM; | ||
2620 | |||
2621 | trace_i915_gem_object_change_domain(obj, | ||
2622 | obj->read_domains, | ||
2623 | old_write_domain); | ||
2624 | return 0; | ||
2625 | } | 2899 | } |
2626 | 2900 | ||
2627 | /** Flushes the GTT write domain for the object if it's dirty. */ | 2901 | /** Flushes the GTT write domain for the object if it's dirty. */ |
2628 | static void | 2902 | static void |
2629 | i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) | 2903 | i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) |
2630 | { | 2904 | { |
2631 | uint32_t old_write_domain; | 2905 | uint32_t old_write_domain; |
2632 | 2906 | ||
2633 | if (obj->write_domain != I915_GEM_DOMAIN_GTT) | 2907 | if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) |
2634 | return; | 2908 | return; |
2635 | 2909 | ||
2636 | /* No actual flushing is required for the GTT write domain. Writes | 2910 | /* No actual flushing is required for the GTT write domain. Writes |
2637 | * to it immediately go to main memory as far as we know, so there's | 2911 | * to it immediately go to main memory as far as we know, so there's |
2638 | * no chipset flush. It also doesn't land in render cache. | 2912 | * no chipset flush. It also doesn't land in render cache. |
2913 | * | ||
2914 | * However, we do have to enforce the order so that all writes through | ||
2915 | * the GTT land before any writes to the device, such as updates to | ||
2916 | * the GATT itself. | ||
2639 | */ | 2917 | */ |
2640 | old_write_domain = obj->write_domain; | 2918 | wmb(); |
2641 | obj->write_domain = 0; | 2919 | |
2920 | old_write_domain = obj->base.write_domain; | ||
2921 | obj->base.write_domain = 0; | ||
2642 | 2922 | ||
2643 | trace_i915_gem_object_change_domain(obj, | 2923 | trace_i915_gem_object_change_domain(obj, |
2644 | obj->read_domains, | 2924 | obj->base.read_domains, |
2645 | old_write_domain); | 2925 | old_write_domain); |
2646 | } | 2926 | } |
2647 | 2927 | ||
2648 | /** Flushes the CPU write domain for the object if it's dirty. */ | 2928 | /** Flushes the CPU write domain for the object if it's dirty. */ |
2649 | static void | 2929 | static void |
2650 | i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | 2930 | i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) |
2651 | { | 2931 | { |
2652 | struct drm_device *dev = obj->dev; | ||
2653 | uint32_t old_write_domain; | 2932 | uint32_t old_write_domain; |
2654 | 2933 | ||
2655 | if (obj->write_domain != I915_GEM_DOMAIN_CPU) | 2934 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) |
2656 | return; | 2935 | return; |
2657 | 2936 | ||
2658 | i915_gem_clflush_object(obj); | 2937 | i915_gem_clflush_object(obj); |
2659 | drm_agp_chipset_flush(dev); | 2938 | intel_gtt_chipset_flush(); |
2660 | old_write_domain = obj->write_domain; | 2939 | old_write_domain = obj->base.write_domain; |
2661 | obj->write_domain = 0; | 2940 | obj->base.write_domain = 0; |
2662 | 2941 | ||
2663 | trace_i915_gem_object_change_domain(obj, | 2942 | trace_i915_gem_object_change_domain(obj, |
2664 | obj->read_domains, | 2943 | obj->base.read_domains, |
2665 | old_write_domain); | 2944 | old_write_domain); |
2666 | } | 2945 | } |
2667 | 2946 | ||
2668 | int | ||
2669 | i915_gem_object_flush_write_domain(struct drm_gem_object *obj) | ||
2670 | { | ||
2671 | int ret = 0; | ||
2672 | |||
2673 | switch (obj->write_domain) { | ||
2674 | case I915_GEM_DOMAIN_GTT: | ||
2675 | i915_gem_object_flush_gtt_write_domain(obj); | ||
2676 | break; | ||
2677 | case I915_GEM_DOMAIN_CPU: | ||
2678 | i915_gem_object_flush_cpu_write_domain(obj); | ||
2679 | break; | ||
2680 | default: | ||
2681 | ret = i915_gem_object_flush_gpu_write_domain(obj); | ||
2682 | break; | ||
2683 | } | ||
2684 | |||
2685 | return ret; | ||
2686 | } | ||
2687 | |||
2688 | /** | 2947 | /** |
2689 | * Moves a single object to the GTT read, and possibly write domain. | 2948 | * Moves a single object to the GTT read, and possibly write domain. |
2690 | * | 2949 | * |
@@ -2692,44 +2951,42 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj) | |||
2692 | * flushes to occur. | 2951 | * flushes to occur. |
2693 | */ | 2952 | */ |
2694 | int | 2953 | int |
2695 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | 2954 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) |
2696 | { | 2955 | { |
2697 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2698 | uint32_t old_write_domain, old_read_domains; | 2956 | uint32_t old_write_domain, old_read_domains; |
2699 | int ret; | 2957 | int ret; |
2700 | 2958 | ||
2701 | /* Not valid to be called on unbound objects. */ | 2959 | /* Not valid to be called on unbound objects. */ |
2702 | if (obj_priv->gtt_space == NULL) | 2960 | if (obj->gtt_space == NULL) |
2703 | return -EINVAL; | 2961 | return -EINVAL; |
2704 | 2962 | ||
2705 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 2963 | if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) |
2706 | if (ret != 0) | 2964 | return 0; |
2707 | return ret; | ||
2708 | 2965 | ||
2709 | /* Wait on any GPU rendering and flushing to occur. */ | 2966 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2710 | ret = i915_gem_object_wait_rendering(obj); | 2967 | if (ret) |
2711 | if (ret != 0) | ||
2712 | return ret; | 2968 | return ret; |
2713 | 2969 | ||
2714 | old_write_domain = obj->write_domain; | 2970 | if (obj->pending_gpu_write || write) { |
2715 | old_read_domains = obj->read_domains; | 2971 | ret = i915_gem_object_wait_rendering(obj); |
2716 | 2972 | if (ret) | |
2717 | /* If we're writing through the GTT domain, then CPU and GPU caches | 2973 | return ret; |
2718 | * will need to be invalidated at next use. | 2974 | } |
2719 | */ | ||
2720 | if (write) | ||
2721 | obj->read_domains &= I915_GEM_DOMAIN_GTT; | ||
2722 | 2975 | ||
2723 | i915_gem_object_flush_cpu_write_domain(obj); | 2976 | i915_gem_object_flush_cpu_write_domain(obj); |
2724 | 2977 | ||
2978 | old_write_domain = obj->base.write_domain; | ||
2979 | old_read_domains = obj->base.read_domains; | ||
2980 | |||
2725 | /* It should now be out of any other write domains, and we can update | 2981 | /* It should now be out of any other write domains, and we can update |
2726 | * the domain values for our changes. | 2982 | * the domain values for our changes. |
2727 | */ | 2983 | */ |
2728 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | 2984 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); |
2729 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | 2985 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; |
2730 | if (write) { | 2986 | if (write) { |
2731 | obj->write_domain = I915_GEM_DOMAIN_GTT; | 2987 | obj->base.read_domains = I915_GEM_DOMAIN_GTT; |
2732 | obj_priv->dirty = 1; | 2988 | obj->base.write_domain = I915_GEM_DOMAIN_GTT; |
2989 | obj->dirty = 1; | ||
2733 | } | 2990 | } |
2734 | 2991 | ||
2735 | trace_i915_gem_object_change_domain(obj, | 2992 | trace_i915_gem_object_change_domain(obj, |
@@ -2744,55 +3001,57 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2744 | * wait, as in modesetting process we're not supposed to be interrupted. | 3001 | * wait, as in modesetting process we're not supposed to be interrupted. |
2745 | */ | 3002 | */ |
2746 | int | 3003 | int |
2747 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) | 3004 | i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, |
3005 | struct intel_ring_buffer *pipelined) | ||
2748 | { | 3006 | { |
2749 | struct drm_device *dev = obj->dev; | 3007 | uint32_t old_read_domains; |
2750 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2751 | uint32_t old_write_domain, old_read_domains; | ||
2752 | int ret; | 3008 | int ret; |
2753 | 3009 | ||
2754 | /* Not valid to be called on unbound objects. */ | 3010 | /* Not valid to be called on unbound objects. */ |
2755 | if (obj_priv->gtt_space == NULL) | 3011 | if (obj->gtt_space == NULL) |
2756 | return -EINVAL; | 3012 | return -EINVAL; |
2757 | 3013 | ||
2758 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 3014 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2759 | if (ret) | 3015 | if (ret) |
2760 | return ret; | 3016 | return ret; |
2761 | 3017 | ||
2762 | /* Wait on any GPU rendering and flushing to occur. */ | 3018 | |
2763 | if (obj_priv->active) { | 3019 | /* Currently, we are always called from an non-interruptible context. */ |
2764 | #if WATCH_BUF | 3020 | if (pipelined != obj->ring) { |
2765 | DRM_INFO("%s: object %p wait for seqno %08x\n", | 3021 | ret = i915_gem_object_wait_rendering(obj); |
2766 | __func__, obj, obj_priv->last_rendering_seqno); | 3022 | if (ret) |
2767 | #endif | ||
2768 | ret = i915_do_wait_request(dev, | ||
2769 | obj_priv->last_rendering_seqno, | ||
2770 | 0, | ||
2771 | obj_priv->ring); | ||
2772 | if (ret != 0) | ||
2773 | return ret; | 3023 | return ret; |
2774 | } | 3024 | } |
2775 | 3025 | ||
2776 | i915_gem_object_flush_cpu_write_domain(obj); | 3026 | i915_gem_object_flush_cpu_write_domain(obj); |
2777 | 3027 | ||
2778 | old_write_domain = obj->write_domain; | 3028 | old_read_domains = obj->base.read_domains; |
2779 | old_read_domains = obj->read_domains; | 3029 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; |
2780 | |||
2781 | /* It should now be out of any other write domains, and we can update | ||
2782 | * the domain values for our changes. | ||
2783 | */ | ||
2784 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | ||
2785 | obj->read_domains = I915_GEM_DOMAIN_GTT; | ||
2786 | obj->write_domain = I915_GEM_DOMAIN_GTT; | ||
2787 | obj_priv->dirty = 1; | ||
2788 | 3030 | ||
2789 | trace_i915_gem_object_change_domain(obj, | 3031 | trace_i915_gem_object_change_domain(obj, |
2790 | old_read_domains, | 3032 | old_read_domains, |
2791 | old_write_domain); | 3033 | obj->base.write_domain); |
2792 | 3034 | ||
2793 | return 0; | 3035 | return 0; |
2794 | } | 3036 | } |
2795 | 3037 | ||
3038 | int | ||
3039 | i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj) | ||
3040 | { | ||
3041 | int ret; | ||
3042 | |||
3043 | if (!obj->active) | ||
3044 | return 0; | ||
3045 | |||
3046 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | ||
3047 | ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); | ||
3048 | if (ret) | ||
3049 | return ret; | ||
3050 | } | ||
3051 | |||
3052 | return i915_gem_object_wait_rendering(obj); | ||
3053 | } | ||
3054 | |||
2796 | /** | 3055 | /** |
2797 | * Moves a single object to the CPU read, and possibly write domain. | 3056 | * Moves a single object to the CPU read, and possibly write domain. |
2798 | * | 3057 | * |
@@ -2800,18 +3059,20 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) | |||
2800 | * flushes to occur. | 3059 | * flushes to occur. |
2801 | */ | 3060 | */ |
2802 | static int | 3061 | static int |
2803 | i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | 3062 | i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) |
2804 | { | 3063 | { |
2805 | uint32_t old_write_domain, old_read_domains; | 3064 | uint32_t old_write_domain, old_read_domains; |
2806 | int ret; | 3065 | int ret; |
2807 | 3066 | ||
3067 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) | ||
3068 | return 0; | ||
3069 | |||
2808 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 3070 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2809 | if (ret) | 3071 | if (ret) |
2810 | return ret; | 3072 | return ret; |
2811 | 3073 | ||
2812 | /* Wait on any GPU rendering and flushing to occur. */ | ||
2813 | ret = i915_gem_object_wait_rendering(obj); | 3074 | ret = i915_gem_object_wait_rendering(obj); |
2814 | if (ret != 0) | 3075 | if (ret) |
2815 | return ret; | 3076 | return ret; |
2816 | 3077 | ||
2817 | i915_gem_object_flush_gtt_write_domain(obj); | 3078 | i915_gem_object_flush_gtt_write_domain(obj); |
@@ -2821,27 +3082,27 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
2821 | */ | 3082 | */ |
2822 | i915_gem_object_set_to_full_cpu_read_domain(obj); | 3083 | i915_gem_object_set_to_full_cpu_read_domain(obj); |
2823 | 3084 | ||
2824 | old_write_domain = obj->write_domain; | 3085 | old_write_domain = obj->base.write_domain; |
2825 | old_read_domains = obj->read_domains; | 3086 | old_read_domains = obj->base.read_domains; |
2826 | 3087 | ||
2827 | /* Flush the CPU cache if it's still invalid. */ | 3088 | /* Flush the CPU cache if it's still invalid. */ |
2828 | if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { | 3089 | if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { |
2829 | i915_gem_clflush_object(obj); | 3090 | i915_gem_clflush_object(obj); |
2830 | 3091 | ||
2831 | obj->read_domains |= I915_GEM_DOMAIN_CPU; | 3092 | obj->base.read_domains |= I915_GEM_DOMAIN_CPU; |
2832 | } | 3093 | } |
2833 | 3094 | ||
2834 | /* It should now be out of any other write domains, and we can update | 3095 | /* It should now be out of any other write domains, and we can update |
2835 | * the domain values for our changes. | 3096 | * the domain values for our changes. |
2836 | */ | 3097 | */ |
2837 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); | 3098 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); |
2838 | 3099 | ||
2839 | /* If we're writing through the CPU, then the GPU read domains will | 3100 | /* If we're writing through the CPU, then the GPU read domains will |
2840 | * need to be invalidated at next use. | 3101 | * need to be invalidated at next use. |
2841 | */ | 3102 | */ |
2842 | if (write) { | 3103 | if (write) { |
2843 | obj->read_domains &= I915_GEM_DOMAIN_CPU; | 3104 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
2844 | obj->write_domain = I915_GEM_DOMAIN_CPU; | 3105 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
2845 | } | 3106 | } |
2846 | 3107 | ||
2847 | trace_i915_gem_object_change_domain(obj, | 3108 | trace_i915_gem_object_change_domain(obj, |
@@ -2851,205 +3112,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
2851 | return 0; | 3112 | return 0; |
2852 | } | 3113 | } |
2853 | 3114 | ||
2854 | /* | ||
2855 | * Set the next domain for the specified object. This | ||
2856 | * may not actually perform the necessary flushing/invaliding though, | ||
2857 | * as that may want to be batched with other set_domain operations | ||
2858 | * | ||
2859 | * This is (we hope) the only really tricky part of gem. The goal | ||
2860 | * is fairly simple -- track which caches hold bits of the object | ||
2861 | * and make sure they remain coherent. A few concrete examples may | ||
2862 | * help to explain how it works. For shorthand, we use the notation | ||
2863 | * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the | ||
2864 | * a pair of read and write domain masks. | ||
2865 | * | ||
2866 | * Case 1: the batch buffer | ||
2867 | * | ||
2868 | * 1. Allocated | ||
2869 | * 2. Written by CPU | ||
2870 | * 3. Mapped to GTT | ||
2871 | * 4. Read by GPU | ||
2872 | * 5. Unmapped from GTT | ||
2873 | * 6. Freed | ||
2874 | * | ||
2875 | * Let's take these a step at a time | ||
2876 | * | ||
2877 | * 1. Allocated | ||
2878 | * Pages allocated from the kernel may still have | ||
2879 | * cache contents, so we set them to (CPU, CPU) always. | ||
2880 | * 2. Written by CPU (using pwrite) | ||
2881 | * The pwrite function calls set_domain (CPU, CPU) and | ||
2882 | * this function does nothing (as nothing changes) | ||
2883 | * 3. Mapped by GTT | ||
2884 | * This function asserts that the object is not | ||
2885 | * currently in any GPU-based read or write domains | ||
2886 | * 4. Read by GPU | ||
2887 | * i915_gem_execbuffer calls set_domain (COMMAND, 0). | ||
2888 | * As write_domain is zero, this function adds in the | ||
2889 | * current read domains (CPU+COMMAND, 0). | ||
2890 | * flush_domains is set to CPU. | ||
2891 | * invalidate_domains is set to COMMAND | ||
2892 | * clflush is run to get data out of the CPU caches | ||
2893 | * then i915_dev_set_domain calls i915_gem_flush to | ||
2894 | * emit an MI_FLUSH and drm_agp_chipset_flush | ||
2895 | * 5. Unmapped from GTT | ||
2896 | * i915_gem_object_unbind calls set_domain (CPU, CPU) | ||
2897 | * flush_domains and invalidate_domains end up both zero | ||
2898 | * so no flushing/invalidating happens | ||
2899 | * 6. Freed | ||
2900 | * yay, done | ||
2901 | * | ||
2902 | * Case 2: The shared render buffer | ||
2903 | * | ||
2904 | * 1. Allocated | ||
2905 | * 2. Mapped to GTT | ||
2906 | * 3. Read/written by GPU | ||
2907 | * 4. set_domain to (CPU,CPU) | ||
2908 | * 5. Read/written by CPU | ||
2909 | * 6. Read/written by GPU | ||
2910 | * | ||
2911 | * 1. Allocated | ||
2912 | * Same as last example, (CPU, CPU) | ||
2913 | * 2. Mapped to GTT | ||
2914 | * Nothing changes (assertions find that it is not in the GPU) | ||
2915 | * 3. Read/written by GPU | ||
2916 | * execbuffer calls set_domain (RENDER, RENDER) | ||
2917 | * flush_domains gets CPU | ||
2918 | * invalidate_domains gets GPU | ||
2919 | * clflush (obj) | ||
2920 | * MI_FLUSH and drm_agp_chipset_flush | ||
2921 | * 4. set_domain (CPU, CPU) | ||
2922 | * flush_domains gets GPU | ||
2923 | * invalidate_domains gets CPU | ||
2924 | * wait_rendering (obj) to make sure all drawing is complete. | ||
2925 | * This will include an MI_FLUSH to get the data from GPU | ||
2926 | * to memory | ||
2927 | * clflush (obj) to invalidate the CPU cache | ||
2928 | * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) | ||
2929 | * 5. Read/written by CPU | ||
2930 | * cache lines are loaded and dirtied | ||
2931 | * 6. Read written by GPU | ||
2932 | * Same as last GPU access | ||
2933 | * | ||
2934 | * Case 3: The constant buffer | ||
2935 | * | ||
2936 | * 1. Allocated | ||
2937 | * 2. Written by CPU | ||
2938 | * 3. Read by GPU | ||
2939 | * 4. Updated (written) by CPU again | ||
2940 | * 5. Read by GPU | ||
2941 | * | ||
2942 | * 1. Allocated | ||
2943 | * (CPU, CPU) | ||
2944 | * 2. Written by CPU | ||
2945 | * (CPU, CPU) | ||
2946 | * 3. Read by GPU | ||
2947 | * (CPU+RENDER, 0) | ||
2948 | * flush_domains = CPU | ||
2949 | * invalidate_domains = RENDER | ||
2950 | * clflush (obj) | ||
2951 | * MI_FLUSH | ||
2952 | * drm_agp_chipset_flush | ||
2953 | * 4. Updated (written) by CPU again | ||
2954 | * (CPU, CPU) | ||
2955 | * flush_domains = 0 (no previous write domain) | ||
2956 | * invalidate_domains = 0 (no new read domains) | ||
2957 | * 5. Read by GPU | ||
2958 | * (CPU+RENDER, 0) | ||
2959 | * flush_domains = CPU | ||
2960 | * invalidate_domains = RENDER | ||
2961 | * clflush (obj) | ||
2962 | * MI_FLUSH | ||
2963 | * drm_agp_chipset_flush | ||
2964 | */ | ||
2965 | static void | ||
2966 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | ||
2967 | { | ||
2968 | struct drm_device *dev = obj->dev; | ||
2969 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2970 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2971 | uint32_t invalidate_domains = 0; | ||
2972 | uint32_t flush_domains = 0; | ||
2973 | uint32_t old_read_domains; | ||
2974 | |||
2975 | BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); | ||
2976 | BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); | ||
2977 | |||
2978 | intel_mark_busy(dev, obj); | ||
2979 | |||
2980 | #if WATCH_BUF | ||
2981 | DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", | ||
2982 | __func__, obj, | ||
2983 | obj->read_domains, obj->pending_read_domains, | ||
2984 | obj->write_domain, obj->pending_write_domain); | ||
2985 | #endif | ||
2986 | /* | ||
2987 | * If the object isn't moving to a new write domain, | ||
2988 | * let the object stay in multiple read domains | ||
2989 | */ | ||
2990 | if (obj->pending_write_domain == 0) | ||
2991 | obj->pending_read_domains |= obj->read_domains; | ||
2992 | else | ||
2993 | obj_priv->dirty = 1; | ||
2994 | |||
2995 | /* | ||
2996 | * Flush the current write domain if | ||
2997 | * the new read domains don't match. Invalidate | ||
2998 | * any read domains which differ from the old | ||
2999 | * write domain | ||
3000 | */ | ||
3001 | if (obj->write_domain && | ||
3002 | obj->write_domain != obj->pending_read_domains) { | ||
3003 | flush_domains |= obj->write_domain; | ||
3004 | invalidate_domains |= | ||
3005 | obj->pending_read_domains & ~obj->write_domain; | ||
3006 | } | ||
3007 | /* | ||
3008 | * Invalidate any read caches which may have | ||
3009 | * stale data. That is, any new read domains. | ||
3010 | */ | ||
3011 | invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; | ||
3012 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { | ||
3013 | #if WATCH_BUF | ||
3014 | DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", | ||
3015 | __func__, flush_domains, invalidate_domains); | ||
3016 | #endif | ||
3017 | i915_gem_clflush_object(obj); | ||
3018 | } | ||
3019 | |||
3020 | old_read_domains = obj->read_domains; | ||
3021 | |||
3022 | /* The actual obj->write_domain will be updated with | ||
3023 | * pending_write_domain after we emit the accumulated flush for all | ||
3024 | * of our domain changes in execbuffers (which clears objects' | ||
3025 | * write_domains). So if we have a current write domain that we | ||
3026 | * aren't changing, set pending_write_domain to that. | ||
3027 | */ | ||
3028 | if (flush_domains == 0 && obj->pending_write_domain == 0) | ||
3029 | obj->pending_write_domain = obj->write_domain; | ||
3030 | obj->read_domains = obj->pending_read_domains; | ||
3031 | |||
3032 | if (flush_domains & I915_GEM_GPU_DOMAINS) { | ||
3033 | if (obj_priv->ring == &dev_priv->render_ring) | ||
3034 | dev_priv->flush_rings |= FLUSH_RENDER_RING; | ||
3035 | else if (obj_priv->ring == &dev_priv->bsd_ring) | ||
3036 | dev_priv->flush_rings |= FLUSH_BSD_RING; | ||
3037 | } | ||
3038 | |||
3039 | dev->invalidate_domains |= invalidate_domains; | ||
3040 | dev->flush_domains |= flush_domains; | ||
3041 | #if WATCH_BUF | ||
3042 | DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n", | ||
3043 | __func__, | ||
3044 | obj->read_domains, obj->write_domain, | ||
3045 | dev->invalidate_domains, dev->flush_domains); | ||
3046 | #endif | ||
3047 | |||
3048 | trace_i915_gem_object_change_domain(obj, | ||
3049 | old_read_domains, | ||
3050 | obj->write_domain); | ||
3051 | } | ||
3052 | |||
3053 | /** | 3115 | /** |
3054 | * Moves the object from a partially CPU read to a full one. | 3116 | * Moves the object from a partially CPU read to a full one. |
3055 | * | 3117 | * |
@@ -3057,30 +3119,28 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
3057 | * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). | 3119 | * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). |
3058 | */ | 3120 | */ |
3059 | static void | 3121 | static void |
3060 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) | 3122 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj) |
3061 | { | 3123 | { |
3062 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 3124 | if (!obj->page_cpu_valid) |
3063 | |||
3064 | if (!obj_priv->page_cpu_valid) | ||
3065 | return; | 3125 | return; |
3066 | 3126 | ||
3067 | /* If we're partially in the CPU read domain, finish moving it in. | 3127 | /* If we're partially in the CPU read domain, finish moving it in. |
3068 | */ | 3128 | */ |
3069 | if (obj->read_domains & I915_GEM_DOMAIN_CPU) { | 3129 | if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) { |
3070 | int i; | 3130 | int i; |
3071 | 3131 | ||
3072 | for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { | 3132 | for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) { |
3073 | if (obj_priv->page_cpu_valid[i]) | 3133 | if (obj->page_cpu_valid[i]) |
3074 | continue; | 3134 | continue; |
3075 | drm_clflush_pages(obj_priv->pages + i, 1); | 3135 | drm_clflush_pages(obj->pages + i, 1); |
3076 | } | 3136 | } |
3077 | } | 3137 | } |
3078 | 3138 | ||
3079 | /* Free the page_cpu_valid mappings which are now stale, whether | 3139 | /* Free the page_cpu_valid mappings which are now stale, whether |
3080 | * or not we've got I915_GEM_DOMAIN_CPU. | 3140 | * or not we've got I915_GEM_DOMAIN_CPU. |
3081 | */ | 3141 | */ |
3082 | kfree(obj_priv->page_cpu_valid); | 3142 | kfree(obj->page_cpu_valid); |
3083 | obj_priv->page_cpu_valid = NULL; | 3143 | obj->page_cpu_valid = NULL; |
3084 | } | 3144 | } |
3085 | 3145 | ||
3086 | /** | 3146 | /** |
@@ -3096,282 +3156,66 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) | |||
3096 | * flushes to occur. | 3156 | * flushes to occur. |
3097 | */ | 3157 | */ |
3098 | static int | 3158 | static int |
3099 | i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | 3159 | i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, |
3100 | uint64_t offset, uint64_t size) | 3160 | uint64_t offset, uint64_t size) |
3101 | { | 3161 | { |
3102 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
3103 | uint32_t old_read_domains; | 3162 | uint32_t old_read_domains; |
3104 | int i, ret; | 3163 | int i, ret; |
3105 | 3164 | ||
3106 | if (offset == 0 && size == obj->size) | 3165 | if (offset == 0 && size == obj->base.size) |
3107 | return i915_gem_object_set_to_cpu_domain(obj, 0); | 3166 | return i915_gem_object_set_to_cpu_domain(obj, 0); |
3108 | 3167 | ||
3109 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 3168 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
3110 | if (ret) | 3169 | if (ret) |
3111 | return ret; | 3170 | return ret; |
3112 | 3171 | ||
3113 | /* Wait on any GPU rendering and flushing to occur. */ | ||
3114 | ret = i915_gem_object_wait_rendering(obj); | 3172 | ret = i915_gem_object_wait_rendering(obj); |
3115 | if (ret != 0) | 3173 | if (ret) |
3116 | return ret; | 3174 | return ret; |
3175 | |||
3117 | i915_gem_object_flush_gtt_write_domain(obj); | 3176 | i915_gem_object_flush_gtt_write_domain(obj); |
3118 | 3177 | ||
3119 | /* If we're already fully in the CPU read domain, we're done. */ | 3178 | /* If we're already fully in the CPU read domain, we're done. */ |
3120 | if (obj_priv->page_cpu_valid == NULL && | 3179 | if (obj->page_cpu_valid == NULL && |
3121 | (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0) | 3180 | (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0) |
3122 | return 0; | 3181 | return 0; |
3123 | 3182 | ||
3124 | /* Otherwise, create/clear the per-page CPU read domain flag if we're | 3183 | /* Otherwise, create/clear the per-page CPU read domain flag if we're |
3125 | * newly adding I915_GEM_DOMAIN_CPU | 3184 | * newly adding I915_GEM_DOMAIN_CPU |
3126 | */ | 3185 | */ |
3127 | if (obj_priv->page_cpu_valid == NULL) { | 3186 | if (obj->page_cpu_valid == NULL) { |
3128 | obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE, | 3187 | obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE, |
3129 | GFP_KERNEL); | 3188 | GFP_KERNEL); |
3130 | if (obj_priv->page_cpu_valid == NULL) | 3189 | if (obj->page_cpu_valid == NULL) |
3131 | return -ENOMEM; | 3190 | return -ENOMEM; |
3132 | } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) | 3191 | } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) |
3133 | memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); | 3192 | memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE); |
3134 | 3193 | ||
3135 | /* Flush the cache on any pages that are still invalid from the CPU's | 3194 | /* Flush the cache on any pages that are still invalid from the CPU's |
3136 | * perspective. | 3195 | * perspective. |
3137 | */ | 3196 | */ |
3138 | for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; | 3197 | for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; |
3139 | i++) { | 3198 | i++) { |
3140 | if (obj_priv->page_cpu_valid[i]) | 3199 | if (obj->page_cpu_valid[i]) |
3141 | continue; | 3200 | continue; |
3142 | 3201 | ||
3143 | drm_clflush_pages(obj_priv->pages + i, 1); | 3202 | drm_clflush_pages(obj->pages + i, 1); |
3144 | 3203 | ||
3145 | obj_priv->page_cpu_valid[i] = 1; | 3204 | obj->page_cpu_valid[i] = 1; |
3146 | } | 3205 | } |
3147 | 3206 | ||
3148 | /* It should now be out of any other write domains, and we can update | 3207 | /* It should now be out of any other write domains, and we can update |
3149 | * the domain values for our changes. | 3208 | * the domain values for our changes. |
3150 | */ | 3209 | */ |
3151 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); | 3210 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); |
3152 | 3211 | ||
3153 | old_read_domains = obj->read_domains; | 3212 | old_read_domains = obj->base.read_domains; |
3154 | obj->read_domains |= I915_GEM_DOMAIN_CPU; | 3213 | obj->base.read_domains |= I915_GEM_DOMAIN_CPU; |
3155 | 3214 | ||
3156 | trace_i915_gem_object_change_domain(obj, | 3215 | trace_i915_gem_object_change_domain(obj, |
3157 | old_read_domains, | 3216 | old_read_domains, |
3158 | obj->write_domain); | 3217 | obj->base.write_domain); |
3159 | |||
3160 | return 0; | ||
3161 | } | ||
3162 | |||
3163 | /** | ||
3164 | * Pin an object to the GTT and evaluate the relocations landing in it. | ||
3165 | */ | ||
3166 | static int | ||
3167 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | ||
3168 | struct drm_file *file_priv, | ||
3169 | struct drm_i915_gem_exec_object2 *entry, | ||
3170 | struct drm_i915_gem_relocation_entry *relocs) | ||
3171 | { | ||
3172 | struct drm_device *dev = obj->dev; | ||
3173 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3174 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
3175 | int i, ret; | ||
3176 | void __iomem *reloc_page; | ||
3177 | bool need_fence; | ||
3178 | |||
3179 | need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
3180 | obj_priv->tiling_mode != I915_TILING_NONE; | ||
3181 | |||
3182 | /* Check fence reg constraints and rebind if necessary */ | ||
3183 | if (need_fence && | ||
3184 | !i915_gem_object_fence_offset_ok(obj, | ||
3185 | obj_priv->tiling_mode)) { | ||
3186 | ret = i915_gem_object_unbind(obj); | ||
3187 | if (ret) | ||
3188 | return ret; | ||
3189 | } | ||
3190 | |||
3191 | /* Choose the GTT offset for our buffer and put it there. */ | ||
3192 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); | ||
3193 | if (ret) | ||
3194 | return ret; | ||
3195 | |||
3196 | /* | ||
3197 | * Pre-965 chips need a fence register set up in order to | ||
3198 | * properly handle blits to/from tiled surfaces. | ||
3199 | */ | ||
3200 | if (need_fence) { | ||
3201 | ret = i915_gem_object_get_fence_reg(obj); | ||
3202 | if (ret != 0) { | ||
3203 | i915_gem_object_unpin(obj); | ||
3204 | return ret; | ||
3205 | } | ||
3206 | } | ||
3207 | |||
3208 | entry->offset = obj_priv->gtt_offset; | ||
3209 | |||
3210 | /* Apply the relocations, using the GTT aperture to avoid cache | ||
3211 | * flushing requirements. | ||
3212 | */ | ||
3213 | for (i = 0; i < entry->relocation_count; i++) { | ||
3214 | struct drm_i915_gem_relocation_entry *reloc= &relocs[i]; | ||
3215 | struct drm_gem_object *target_obj; | ||
3216 | struct drm_i915_gem_object *target_obj_priv; | ||
3217 | uint32_t reloc_val, reloc_offset; | ||
3218 | uint32_t __iomem *reloc_entry; | ||
3219 | |||
3220 | target_obj = drm_gem_object_lookup(obj->dev, file_priv, | ||
3221 | reloc->target_handle); | ||
3222 | if (target_obj == NULL) { | ||
3223 | i915_gem_object_unpin(obj); | ||
3224 | return -ENOENT; | ||
3225 | } | ||
3226 | target_obj_priv = to_intel_bo(target_obj); | ||
3227 | |||
3228 | #if WATCH_RELOC | ||
3229 | DRM_INFO("%s: obj %p offset %08x target %d " | ||
3230 | "read %08x write %08x gtt %08x " | ||
3231 | "presumed %08x delta %08x\n", | ||
3232 | __func__, | ||
3233 | obj, | ||
3234 | (int) reloc->offset, | ||
3235 | (int) reloc->target_handle, | ||
3236 | (int) reloc->read_domains, | ||
3237 | (int) reloc->write_domain, | ||
3238 | (int) target_obj_priv->gtt_offset, | ||
3239 | (int) reloc->presumed_offset, | ||
3240 | reloc->delta); | ||
3241 | #endif | ||
3242 | |||
3243 | /* The target buffer should have appeared before us in the | ||
3244 | * exec_object list, so it should have a GTT space bound by now. | ||
3245 | */ | ||
3246 | if (target_obj_priv->gtt_space == NULL) { | ||
3247 | DRM_ERROR("No GTT space found for object %d\n", | ||
3248 | reloc->target_handle); | ||
3249 | drm_gem_object_unreference(target_obj); | ||
3250 | i915_gem_object_unpin(obj); | ||
3251 | return -EINVAL; | ||
3252 | } | ||
3253 | 3218 | ||
3254 | /* Validate that the target is in a valid r/w GPU domain */ | ||
3255 | if (reloc->write_domain & (reloc->write_domain - 1)) { | ||
3256 | DRM_ERROR("reloc with multiple write domains: " | ||
3257 | "obj %p target %d offset %d " | ||
3258 | "read %08x write %08x", | ||
3259 | obj, reloc->target_handle, | ||
3260 | (int) reloc->offset, | ||
3261 | reloc->read_domains, | ||
3262 | reloc->write_domain); | ||
3263 | drm_gem_object_unreference(target_obj); | ||
3264 | i915_gem_object_unpin(obj); | ||
3265 | return -EINVAL; | ||
3266 | } | ||
3267 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || | ||
3268 | reloc->read_domains & I915_GEM_DOMAIN_CPU) { | ||
3269 | DRM_ERROR("reloc with read/write CPU domains: " | ||
3270 | "obj %p target %d offset %d " | ||
3271 | "read %08x write %08x", | ||
3272 | obj, reloc->target_handle, | ||
3273 | (int) reloc->offset, | ||
3274 | reloc->read_domains, | ||
3275 | reloc->write_domain); | ||
3276 | drm_gem_object_unreference(target_obj); | ||
3277 | i915_gem_object_unpin(obj); | ||
3278 | return -EINVAL; | ||
3279 | } | ||
3280 | if (reloc->write_domain && target_obj->pending_write_domain && | ||
3281 | reloc->write_domain != target_obj->pending_write_domain) { | ||
3282 | DRM_ERROR("Write domain conflict: " | ||
3283 | "obj %p target %d offset %d " | ||
3284 | "new %08x old %08x\n", | ||
3285 | obj, reloc->target_handle, | ||
3286 | (int) reloc->offset, | ||
3287 | reloc->write_domain, | ||
3288 | target_obj->pending_write_domain); | ||
3289 | drm_gem_object_unreference(target_obj); | ||
3290 | i915_gem_object_unpin(obj); | ||
3291 | return -EINVAL; | ||
3292 | } | ||
3293 | |||
3294 | target_obj->pending_read_domains |= reloc->read_domains; | ||
3295 | target_obj->pending_write_domain |= reloc->write_domain; | ||
3296 | |||
3297 | /* If the relocation already has the right value in it, no | ||
3298 | * more work needs to be done. | ||
3299 | */ | ||
3300 | if (target_obj_priv->gtt_offset == reloc->presumed_offset) { | ||
3301 | drm_gem_object_unreference(target_obj); | ||
3302 | continue; | ||
3303 | } | ||
3304 | |||
3305 | /* Check that the relocation address is valid... */ | ||
3306 | if (reloc->offset > obj->size - 4) { | ||
3307 | DRM_ERROR("Relocation beyond object bounds: " | ||
3308 | "obj %p target %d offset %d size %d.\n", | ||
3309 | obj, reloc->target_handle, | ||
3310 | (int) reloc->offset, (int) obj->size); | ||
3311 | drm_gem_object_unreference(target_obj); | ||
3312 | i915_gem_object_unpin(obj); | ||
3313 | return -EINVAL; | ||
3314 | } | ||
3315 | if (reloc->offset & 3) { | ||
3316 | DRM_ERROR("Relocation not 4-byte aligned: " | ||
3317 | "obj %p target %d offset %d.\n", | ||
3318 | obj, reloc->target_handle, | ||
3319 | (int) reloc->offset); | ||
3320 | drm_gem_object_unreference(target_obj); | ||
3321 | i915_gem_object_unpin(obj); | ||
3322 | return -EINVAL; | ||
3323 | } | ||
3324 | |||
3325 | /* and points to somewhere within the target object. */ | ||
3326 | if (reloc->delta >= target_obj->size) { | ||
3327 | DRM_ERROR("Relocation beyond target object bounds: " | ||
3328 | "obj %p target %d delta %d size %d.\n", | ||
3329 | obj, reloc->target_handle, | ||
3330 | (int) reloc->delta, (int) target_obj->size); | ||
3331 | drm_gem_object_unreference(target_obj); | ||
3332 | i915_gem_object_unpin(obj); | ||
3333 | return -EINVAL; | ||
3334 | } | ||
3335 | |||
3336 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | ||
3337 | if (ret != 0) { | ||
3338 | drm_gem_object_unreference(target_obj); | ||
3339 | i915_gem_object_unpin(obj); | ||
3340 | return -EINVAL; | ||
3341 | } | ||
3342 | |||
3343 | /* Map the page containing the relocation we're going to | ||
3344 | * perform. | ||
3345 | */ | ||
3346 | reloc_offset = obj_priv->gtt_offset + reloc->offset; | ||
3347 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | ||
3348 | (reloc_offset & | ||
3349 | ~(PAGE_SIZE - 1)), | ||
3350 | KM_USER0); | ||
3351 | reloc_entry = (uint32_t __iomem *)(reloc_page + | ||
3352 | (reloc_offset & (PAGE_SIZE - 1))); | ||
3353 | reloc_val = target_obj_priv->gtt_offset + reloc->delta; | ||
3354 | |||
3355 | #if WATCH_BUF | ||
3356 | DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", | ||
3357 | obj, (unsigned int) reloc->offset, | ||
3358 | readl(reloc_entry), reloc_val); | ||
3359 | #endif | ||
3360 | writel(reloc_val, reloc_entry); | ||
3361 | io_mapping_unmap_atomic(reloc_page, KM_USER0); | ||
3362 | |||
3363 | /* The updated presumed offset for this entry will be | ||
3364 | * copied back out to the user. | ||
3365 | */ | ||
3366 | reloc->presumed_offset = target_obj_priv->gtt_offset; | ||
3367 | |||
3368 | drm_gem_object_unreference(target_obj); | ||
3369 | } | ||
3370 | |||
3371 | #if WATCH_BUF | ||
3372 | if (0) | ||
3373 | i915_gem_dump_object(obj, 128, __func__, ~0); | ||
3374 | #endif | ||
3375 | return 0; | 3219 | return 0; |
3376 | } | 3220 | } |
3377 | 3221 | ||
@@ -3386,857 +3230,254 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3386 | * relatively low latency when blocking on a particular request to finish. | 3230 | * relatively low latency when blocking on a particular request to finish. |
3387 | */ | 3231 | */ |
3388 | static int | 3232 | static int |
3389 | i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) | 3233 | i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) |
3390 | { | 3234 | { |
3391 | struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; | 3235 | struct drm_i915_private *dev_priv = dev->dev_private; |
3392 | int ret = 0; | 3236 | struct drm_i915_file_private *file_priv = file->driver_priv; |
3393 | unsigned long recent_enough = jiffies - msecs_to_jiffies(20); | 3237 | unsigned long recent_enough = jiffies - msecs_to_jiffies(20); |
3394 | 3238 | struct drm_i915_gem_request *request; | |
3395 | mutex_lock(&dev->struct_mutex); | ||
3396 | while (!list_empty(&i915_file_priv->mm.request_list)) { | ||
3397 | struct drm_i915_gem_request *request; | ||
3398 | |||
3399 | request = list_first_entry(&i915_file_priv->mm.request_list, | ||
3400 | struct drm_i915_gem_request, | ||
3401 | client_list); | ||
3402 | |||
3403 | if (time_after_eq(request->emitted_jiffies, recent_enough)) | ||
3404 | break; | ||
3405 | |||
3406 | ret = i915_wait_request(dev, request->seqno, request->ring); | ||
3407 | if (ret != 0) | ||
3408 | break; | ||
3409 | } | ||
3410 | mutex_unlock(&dev->struct_mutex); | ||
3411 | |||
3412 | return ret; | ||
3413 | } | ||
3414 | |||
3415 | static int | ||
3416 | i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list, | ||
3417 | uint32_t buffer_count, | ||
3418 | struct drm_i915_gem_relocation_entry **relocs) | ||
3419 | { | ||
3420 | uint32_t reloc_count = 0, reloc_index = 0, i; | ||
3421 | int ret; | ||
3422 | |||
3423 | *relocs = NULL; | ||
3424 | for (i = 0; i < buffer_count; i++) { | ||
3425 | if (reloc_count + exec_list[i].relocation_count < reloc_count) | ||
3426 | return -EINVAL; | ||
3427 | reloc_count += exec_list[i].relocation_count; | ||
3428 | } | ||
3429 | |||
3430 | *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); | ||
3431 | if (*relocs == NULL) { | ||
3432 | DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count); | ||
3433 | return -ENOMEM; | ||
3434 | } | ||
3435 | |||
3436 | for (i = 0; i < buffer_count; i++) { | ||
3437 | struct drm_i915_gem_relocation_entry __user *user_relocs; | ||
3438 | |||
3439 | user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; | ||
3440 | |||
3441 | ret = copy_from_user(&(*relocs)[reloc_index], | ||
3442 | user_relocs, | ||
3443 | exec_list[i].relocation_count * | ||
3444 | sizeof(**relocs)); | ||
3445 | if (ret != 0) { | ||
3446 | drm_free_large(*relocs); | ||
3447 | *relocs = NULL; | ||
3448 | return -EFAULT; | ||
3449 | } | ||
3450 | |||
3451 | reloc_index += exec_list[i].relocation_count; | ||
3452 | } | ||
3453 | |||
3454 | return 0; | ||
3455 | } | ||
3456 | |||
3457 | static int | ||
3458 | i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, | ||
3459 | uint32_t buffer_count, | ||
3460 | struct drm_i915_gem_relocation_entry *relocs) | ||
3461 | { | ||
3462 | uint32_t reloc_count = 0, i; | ||
3463 | int ret = 0; | ||
3464 | |||
3465 | if (relocs == NULL) | ||
3466 | return 0; | ||
3467 | |||
3468 | for (i = 0; i < buffer_count; i++) { | ||
3469 | struct drm_i915_gem_relocation_entry __user *user_relocs; | ||
3470 | int unwritten; | ||
3471 | |||
3472 | user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; | ||
3473 | |||
3474 | unwritten = copy_to_user(user_relocs, | ||
3475 | &relocs[reloc_count], | ||
3476 | exec_list[i].relocation_count * | ||
3477 | sizeof(*relocs)); | ||
3478 | |||
3479 | if (unwritten) { | ||
3480 | ret = -EFAULT; | ||
3481 | goto err; | ||
3482 | } | ||
3483 | |||
3484 | reloc_count += exec_list[i].relocation_count; | ||
3485 | } | ||
3486 | |||
3487 | err: | ||
3488 | drm_free_large(relocs); | ||
3489 | |||
3490 | return ret; | ||
3491 | } | ||
3492 | |||
3493 | static int | ||
3494 | i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, | ||
3495 | uint64_t exec_offset) | ||
3496 | { | ||
3497 | uint32_t exec_start, exec_len; | ||
3498 | |||
3499 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | ||
3500 | exec_len = (uint32_t) exec->batch_len; | ||
3501 | |||
3502 | if ((exec_start | exec_len) & 0x7) | ||
3503 | return -EINVAL; | ||
3504 | |||
3505 | if (!exec_start) | ||
3506 | return -EINVAL; | ||
3507 | |||
3508 | return 0; | ||
3509 | } | ||
3510 | |||
3511 | static int | ||
3512 | i915_gem_wait_for_pending_flip(struct drm_device *dev, | ||
3513 | struct drm_gem_object **object_list, | ||
3514 | int count) | ||
3515 | { | ||
3516 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3517 | struct drm_i915_gem_object *obj_priv; | ||
3518 | DEFINE_WAIT(wait); | ||
3519 | int i, ret = 0; | ||
3520 | |||
3521 | for (;;) { | ||
3522 | prepare_to_wait(&dev_priv->pending_flip_queue, | ||
3523 | &wait, TASK_INTERRUPTIBLE); | ||
3524 | for (i = 0; i < count; i++) { | ||
3525 | obj_priv = to_intel_bo(object_list[i]); | ||
3526 | if (atomic_read(&obj_priv->pending_flip) > 0) | ||
3527 | break; | ||
3528 | } | ||
3529 | if (i == count) | ||
3530 | break; | ||
3531 | |||
3532 | if (!signal_pending(current)) { | ||
3533 | mutex_unlock(&dev->struct_mutex); | ||
3534 | schedule(); | ||
3535 | mutex_lock(&dev->struct_mutex); | ||
3536 | continue; | ||
3537 | } | ||
3538 | ret = -ERESTARTSYS; | ||
3539 | break; | ||
3540 | } | ||
3541 | finish_wait(&dev_priv->pending_flip_queue, &wait); | ||
3542 | |||
3543 | return ret; | ||
3544 | } | ||
3545 | |||
3546 | |||
3547 | int | ||
3548 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, | ||
3549 | struct drm_file *file_priv, | ||
3550 | struct drm_i915_gem_execbuffer2 *args, | ||
3551 | struct drm_i915_gem_exec_object2 *exec_list) | ||
3552 | { | ||
3553 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3554 | struct drm_gem_object **object_list = NULL; | ||
3555 | struct drm_gem_object *batch_obj; | ||
3556 | struct drm_i915_gem_object *obj_priv; | ||
3557 | struct drm_clip_rect *cliprects = NULL; | ||
3558 | struct drm_i915_gem_relocation_entry *relocs = NULL; | ||
3559 | int ret = 0, ret2, i, pinned = 0; | ||
3560 | uint64_t exec_offset; | ||
3561 | uint32_t seqno, flush_domains, reloc_index; | ||
3562 | int pin_tries, flips; | ||
3563 | |||
3564 | struct intel_ring_buffer *ring = NULL; | 3239 | struct intel_ring_buffer *ring = NULL; |
3240 | u32 seqno = 0; | ||
3241 | int ret; | ||
3565 | 3242 | ||
3566 | #if WATCH_EXEC | 3243 | if (atomic_read(&dev_priv->mm.wedged)) |
3567 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | 3244 | return -EIO; |
3568 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
3569 | #endif | ||
3570 | if (args->flags & I915_EXEC_BSD) { | ||
3571 | if (!HAS_BSD(dev)) { | ||
3572 | DRM_ERROR("execbuf with wrong flag\n"); | ||
3573 | return -EINVAL; | ||
3574 | } | ||
3575 | ring = &dev_priv->bsd_ring; | ||
3576 | } else { | ||
3577 | ring = &dev_priv->render_ring; | ||
3578 | } | ||
3579 | |||
3580 | if (args->buffer_count < 1) { | ||
3581 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | ||
3582 | return -EINVAL; | ||
3583 | } | ||
3584 | object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); | ||
3585 | if (object_list == NULL) { | ||
3586 | DRM_ERROR("Failed to allocate object list for %d buffers\n", | ||
3587 | args->buffer_count); | ||
3588 | ret = -ENOMEM; | ||
3589 | goto pre_mutex_err; | ||
3590 | } | ||
3591 | |||
3592 | if (args->num_cliprects != 0) { | ||
3593 | cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), | ||
3594 | GFP_KERNEL); | ||
3595 | if (cliprects == NULL) { | ||
3596 | ret = -ENOMEM; | ||
3597 | goto pre_mutex_err; | ||
3598 | } | ||
3599 | |||
3600 | ret = copy_from_user(cliprects, | ||
3601 | (struct drm_clip_rect __user *) | ||
3602 | (uintptr_t) args->cliprects_ptr, | ||
3603 | sizeof(*cliprects) * args->num_cliprects); | ||
3604 | if (ret != 0) { | ||
3605 | DRM_ERROR("copy %d cliprects failed: %d\n", | ||
3606 | args->num_cliprects, ret); | ||
3607 | ret = -EFAULT; | ||
3608 | goto pre_mutex_err; | ||
3609 | } | ||
3610 | } | ||
3611 | |||
3612 | ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count, | ||
3613 | &relocs); | ||
3614 | if (ret != 0) | ||
3615 | goto pre_mutex_err; | ||
3616 | |||
3617 | mutex_lock(&dev->struct_mutex); | ||
3618 | |||
3619 | i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3620 | |||
3621 | if (atomic_read(&dev_priv->mm.wedged)) { | ||
3622 | mutex_unlock(&dev->struct_mutex); | ||
3623 | ret = -EIO; | ||
3624 | goto pre_mutex_err; | ||
3625 | } | ||
3626 | |||
3627 | if (dev_priv->mm.suspended) { | ||
3628 | mutex_unlock(&dev->struct_mutex); | ||
3629 | ret = -EBUSY; | ||
3630 | goto pre_mutex_err; | ||
3631 | } | ||
3632 | |||
3633 | /* Look up object handles */ | ||
3634 | flips = 0; | ||
3635 | for (i = 0; i < args->buffer_count; i++) { | ||
3636 | object_list[i] = drm_gem_object_lookup(dev, file_priv, | ||
3637 | exec_list[i].handle); | ||
3638 | if (object_list[i] == NULL) { | ||
3639 | DRM_ERROR("Invalid object handle %d at index %d\n", | ||
3640 | exec_list[i].handle, i); | ||
3641 | /* prevent error path from reading uninitialized data */ | ||
3642 | args->buffer_count = i + 1; | ||
3643 | ret = -ENOENT; | ||
3644 | goto err; | ||
3645 | } | ||
3646 | |||
3647 | obj_priv = to_intel_bo(object_list[i]); | ||
3648 | if (obj_priv->in_execbuffer) { | ||
3649 | DRM_ERROR("Object %p appears more than once in object list\n", | ||
3650 | object_list[i]); | ||
3651 | /* prevent error path from reading uninitialized data */ | ||
3652 | args->buffer_count = i + 1; | ||
3653 | ret = -EINVAL; | ||
3654 | goto err; | ||
3655 | } | ||
3656 | obj_priv->in_execbuffer = true; | ||
3657 | flips += atomic_read(&obj_priv->pending_flip); | ||
3658 | } | ||
3659 | |||
3660 | if (flips > 0) { | ||
3661 | ret = i915_gem_wait_for_pending_flip(dev, object_list, | ||
3662 | args->buffer_count); | ||
3663 | if (ret) | ||
3664 | goto err; | ||
3665 | } | ||
3666 | 3245 | ||
3667 | /* Pin and relocate */ | 3246 | spin_lock(&file_priv->mm.lock); |
3668 | for (pin_tries = 0; ; pin_tries++) { | 3247 | list_for_each_entry(request, &file_priv->mm.request_list, client_list) { |
3669 | ret = 0; | 3248 | if (time_after_eq(request->emitted_jiffies, recent_enough)) |
3670 | reloc_index = 0; | ||
3671 | |||
3672 | for (i = 0; i < args->buffer_count; i++) { | ||
3673 | object_list[i]->pending_read_domains = 0; | ||
3674 | object_list[i]->pending_write_domain = 0; | ||
3675 | ret = i915_gem_object_pin_and_relocate(object_list[i], | ||
3676 | file_priv, | ||
3677 | &exec_list[i], | ||
3678 | &relocs[reloc_index]); | ||
3679 | if (ret) | ||
3680 | break; | ||
3681 | pinned = i + 1; | ||
3682 | reloc_index += exec_list[i].relocation_count; | ||
3683 | } | ||
3684 | /* success */ | ||
3685 | if (ret == 0) | ||
3686 | break; | 3249 | break; |
3687 | 3250 | ||
3688 | /* error other than GTT full, or we've already tried again */ | 3251 | ring = request->ring; |
3689 | if (ret != -ENOSPC || pin_tries >= 1) { | 3252 | seqno = request->seqno; |
3690 | if (ret != -ERESTARTSYS) { | ||
3691 | unsigned long long total_size = 0; | ||
3692 | int num_fences = 0; | ||
3693 | for (i = 0; i < args->buffer_count; i++) { | ||
3694 | obj_priv = to_intel_bo(object_list[i]); | ||
3695 | |||
3696 | total_size += object_list[i]->size; | ||
3697 | num_fences += | ||
3698 | exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE && | ||
3699 | obj_priv->tiling_mode != I915_TILING_NONE; | ||
3700 | } | ||
3701 | DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n", | ||
3702 | pinned+1, args->buffer_count, | ||
3703 | total_size, num_fences, | ||
3704 | ret); | ||
3705 | DRM_ERROR("%d objects [%d pinned], " | ||
3706 | "%d object bytes [%d pinned], " | ||
3707 | "%d/%d gtt bytes\n", | ||
3708 | atomic_read(&dev->object_count), | ||
3709 | atomic_read(&dev->pin_count), | ||
3710 | atomic_read(&dev->object_memory), | ||
3711 | atomic_read(&dev->pin_memory), | ||
3712 | atomic_read(&dev->gtt_memory), | ||
3713 | dev->gtt_total); | ||
3714 | } | ||
3715 | goto err; | ||
3716 | } | ||
3717 | |||
3718 | /* unpin all of our buffers */ | ||
3719 | for (i = 0; i < pinned; i++) | ||
3720 | i915_gem_object_unpin(object_list[i]); | ||
3721 | pinned = 0; | ||
3722 | |||
3723 | /* evict everyone we can from the aperture */ | ||
3724 | ret = i915_gem_evict_everything(dev); | ||
3725 | if (ret && ret != -ENOSPC) | ||
3726 | goto err; | ||
3727 | } | ||
3728 | |||
3729 | /* Set the pending read domains for the batch buffer to COMMAND */ | ||
3730 | batch_obj = object_list[args->buffer_count-1]; | ||
3731 | if (batch_obj->pending_write_domain) { | ||
3732 | DRM_ERROR("Attempting to use self-modifying batch buffer\n"); | ||
3733 | ret = -EINVAL; | ||
3734 | goto err; | ||
3735 | } | ||
3736 | batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; | ||
3737 | |||
3738 | /* Sanity check the batch buffer, prior to moving objects */ | ||
3739 | exec_offset = exec_list[args->buffer_count - 1].offset; | ||
3740 | ret = i915_gem_check_execbuffer (args, exec_offset); | ||
3741 | if (ret != 0) { | ||
3742 | DRM_ERROR("execbuf with invalid offset/length\n"); | ||
3743 | goto err; | ||
3744 | } | ||
3745 | |||
3746 | i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3747 | |||
3748 | /* Zero the global flush/invalidate flags. These | ||
3749 | * will be modified as new domains are computed | ||
3750 | * for each object | ||
3751 | */ | ||
3752 | dev->invalidate_domains = 0; | ||
3753 | dev->flush_domains = 0; | ||
3754 | dev_priv->flush_rings = 0; | ||
3755 | |||
3756 | for (i = 0; i < args->buffer_count; i++) { | ||
3757 | struct drm_gem_object *obj = object_list[i]; | ||
3758 | |||
3759 | /* Compute new gpu domains and update invalidate/flush */ | ||
3760 | i915_gem_object_set_to_gpu_domain(obj); | ||
3761 | } | 3253 | } |
3254 | spin_unlock(&file_priv->mm.lock); | ||
3762 | 3255 | ||
3763 | i915_verify_inactive(dev, __FILE__, __LINE__); | 3256 | if (seqno == 0) |
3764 | 3257 | return 0; | |
3765 | if (dev->invalidate_domains | dev->flush_domains) { | ||
3766 | #if WATCH_EXEC | ||
3767 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | ||
3768 | __func__, | ||
3769 | dev->invalidate_domains, | ||
3770 | dev->flush_domains); | ||
3771 | #endif | ||
3772 | i915_gem_flush(dev, | ||
3773 | dev->invalidate_domains, | ||
3774 | dev->flush_domains); | ||
3775 | if (dev_priv->flush_rings & FLUSH_RENDER_RING) | ||
3776 | (void)i915_add_request(dev, file_priv, | ||
3777 | dev->flush_domains, | ||
3778 | &dev_priv->render_ring); | ||
3779 | if (dev_priv->flush_rings & FLUSH_BSD_RING) | ||
3780 | (void)i915_add_request(dev, file_priv, | ||
3781 | dev->flush_domains, | ||
3782 | &dev_priv->bsd_ring); | ||
3783 | } | ||
3784 | |||
3785 | for (i = 0; i < args->buffer_count; i++) { | ||
3786 | struct drm_gem_object *obj = object_list[i]; | ||
3787 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
3788 | uint32_t old_write_domain = obj->write_domain; | ||
3789 | |||
3790 | obj->write_domain = obj->pending_write_domain; | ||
3791 | if (obj->write_domain) | ||
3792 | list_move_tail(&obj_priv->gpu_write_list, | ||
3793 | &dev_priv->mm.gpu_write_list); | ||
3794 | else | ||
3795 | list_del_init(&obj_priv->gpu_write_list); | ||
3796 | |||
3797 | trace_i915_gem_object_change_domain(obj, | ||
3798 | obj->read_domains, | ||
3799 | old_write_domain); | ||
3800 | } | ||
3801 | |||
3802 | i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3803 | |||
3804 | #if WATCH_COHERENCY | ||
3805 | for (i = 0; i < args->buffer_count; i++) { | ||
3806 | i915_gem_object_check_coherency(object_list[i], | ||
3807 | exec_list[i].handle); | ||
3808 | } | ||
3809 | #endif | ||
3810 | |||
3811 | #if WATCH_EXEC | ||
3812 | i915_gem_dump_object(batch_obj, | ||
3813 | args->batch_len, | ||
3814 | __func__, | ||
3815 | ~0); | ||
3816 | #endif | ||
3817 | |||
3818 | /* Exec the batchbuffer */ | ||
3819 | ret = ring->dispatch_gem_execbuffer(dev, ring, args, | ||
3820 | cliprects, exec_offset); | ||
3821 | if (ret) { | ||
3822 | DRM_ERROR("dispatch failed %d\n", ret); | ||
3823 | goto err; | ||
3824 | } | ||
3825 | |||
3826 | /* | ||
3827 | * Ensure that the commands in the batch buffer are | ||
3828 | * finished before the interrupt fires | ||
3829 | */ | ||
3830 | flush_domains = i915_retire_commands(dev, ring); | ||
3831 | |||
3832 | i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3833 | |||
3834 | /* | ||
3835 | * Get a seqno representing the execution of the current buffer, | ||
3836 | * which we can wait on. We would like to mitigate these interrupts, | ||
3837 | * likely by only creating seqnos occasionally (so that we have | ||
3838 | * *some* interrupts representing completion of buffers that we can | ||
3839 | * wait on when trying to clear up gtt space). | ||
3840 | */ | ||
3841 | seqno = i915_add_request(dev, file_priv, flush_domains, ring); | ||
3842 | BUG_ON(seqno == 0); | ||
3843 | for (i = 0; i < args->buffer_count; i++) { | ||
3844 | struct drm_gem_object *obj = object_list[i]; | ||
3845 | obj_priv = to_intel_bo(obj); | ||
3846 | |||
3847 | i915_gem_object_move_to_active(obj, seqno, ring); | ||
3848 | #if WATCH_LRU | ||
3849 | DRM_INFO("%s: move to exec list %p\n", __func__, obj); | ||
3850 | #endif | ||
3851 | } | ||
3852 | #if WATCH_LRU | ||
3853 | i915_dump_lru(dev, __func__); | ||
3854 | #endif | ||
3855 | |||
3856 | i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3857 | |||
3858 | err: | ||
3859 | for (i = 0; i < pinned; i++) | ||
3860 | i915_gem_object_unpin(object_list[i]); | ||
3861 | |||
3862 | for (i = 0; i < args->buffer_count; i++) { | ||
3863 | if (object_list[i]) { | ||
3864 | obj_priv = to_intel_bo(object_list[i]); | ||
3865 | obj_priv->in_execbuffer = false; | ||
3866 | } | ||
3867 | drm_gem_object_unreference(object_list[i]); | ||
3868 | } | ||
3869 | |||
3870 | mutex_unlock(&dev->struct_mutex); | ||
3871 | |||
3872 | pre_mutex_err: | ||
3873 | /* Copy the updated relocations out regardless of current error | ||
3874 | * state. Failure to update the relocs would mean that the next | ||
3875 | * time userland calls execbuf, it would do so with presumed offset | ||
3876 | * state that didn't match the actual object state. | ||
3877 | */ | ||
3878 | ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count, | ||
3879 | relocs); | ||
3880 | if (ret2 != 0) { | ||
3881 | DRM_ERROR("Failed to copy relocations back out: %d\n", ret2); | ||
3882 | |||
3883 | if (ret == 0) | ||
3884 | ret = ret2; | ||
3885 | } | ||
3886 | |||
3887 | drm_free_large(object_list); | ||
3888 | kfree(cliprects); | ||
3889 | |||
3890 | return ret; | ||
3891 | } | ||
3892 | |||
3893 | /* | ||
3894 | * Legacy execbuffer just creates an exec2 list from the original exec object | ||
3895 | * list array and passes it to the real function. | ||
3896 | */ | ||
3897 | int | ||
3898 | i915_gem_execbuffer(struct drm_device *dev, void *data, | ||
3899 | struct drm_file *file_priv) | ||
3900 | { | ||
3901 | struct drm_i915_gem_execbuffer *args = data; | ||
3902 | struct drm_i915_gem_execbuffer2 exec2; | ||
3903 | struct drm_i915_gem_exec_object *exec_list = NULL; | ||
3904 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
3905 | int ret, i; | ||
3906 | |||
3907 | #if WATCH_EXEC | ||
3908 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
3909 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
3910 | #endif | ||
3911 | |||
3912 | if (args->buffer_count < 1) { | ||
3913 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | ||
3914 | return -EINVAL; | ||
3915 | } | ||
3916 | |||
3917 | /* Copy in the exec list from userland */ | ||
3918 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | ||
3919 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | ||
3920 | if (exec_list == NULL || exec2_list == NULL) { | ||
3921 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | ||
3922 | args->buffer_count); | ||
3923 | drm_free_large(exec_list); | ||
3924 | drm_free_large(exec2_list); | ||
3925 | return -ENOMEM; | ||
3926 | } | ||
3927 | ret = copy_from_user(exec_list, | ||
3928 | (struct drm_i915_relocation_entry __user *) | ||
3929 | (uintptr_t) args->buffers_ptr, | ||
3930 | sizeof(*exec_list) * args->buffer_count); | ||
3931 | if (ret != 0) { | ||
3932 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
3933 | args->buffer_count, ret); | ||
3934 | drm_free_large(exec_list); | ||
3935 | drm_free_large(exec2_list); | ||
3936 | return -EFAULT; | ||
3937 | } | ||
3938 | 3258 | ||
3939 | for (i = 0; i < args->buffer_count; i++) { | 3259 | ret = 0; |
3940 | exec2_list[i].handle = exec_list[i].handle; | 3260 | if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { |
3941 | exec2_list[i].relocation_count = exec_list[i].relocation_count; | 3261 | /* And wait for the seqno passing without holding any locks and |
3942 | exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; | 3262 | * causing extra latency for others. This is safe as the irq |
3943 | exec2_list[i].alignment = exec_list[i].alignment; | 3263 | * generation is designed to be run atomically and so is |
3944 | exec2_list[i].offset = exec_list[i].offset; | 3264 | * lockless. |
3945 | if (!IS_I965G(dev)) | 3265 | */ |
3946 | exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; | 3266 | if (ring->irq_get(ring)) { |
3947 | else | 3267 | ret = wait_event_interruptible(ring->irq_queue, |
3948 | exec2_list[i].flags = 0; | 3268 | i915_seqno_passed(ring->get_seqno(ring), seqno) |
3949 | } | 3269 | || atomic_read(&dev_priv->mm.wedged)); |
3270 | ring->irq_put(ring); | ||
3950 | 3271 | ||
3951 | exec2.buffers_ptr = args->buffers_ptr; | 3272 | if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) |
3952 | exec2.buffer_count = args->buffer_count; | 3273 | ret = -EIO; |
3953 | exec2.batch_start_offset = args->batch_start_offset; | ||
3954 | exec2.batch_len = args->batch_len; | ||
3955 | exec2.DR1 = args->DR1; | ||
3956 | exec2.DR4 = args->DR4; | ||
3957 | exec2.num_cliprects = args->num_cliprects; | ||
3958 | exec2.cliprects_ptr = args->cliprects_ptr; | ||
3959 | exec2.flags = I915_EXEC_RENDER; | ||
3960 | |||
3961 | ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); | ||
3962 | if (!ret) { | ||
3963 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
3964 | for (i = 0; i < args->buffer_count; i++) | ||
3965 | exec_list[i].offset = exec2_list[i].offset; | ||
3966 | /* ... and back out to userspace */ | ||
3967 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
3968 | (uintptr_t) args->buffers_ptr, | ||
3969 | exec_list, | ||
3970 | sizeof(*exec_list) * args->buffer_count); | ||
3971 | if (ret) { | ||
3972 | ret = -EFAULT; | ||
3973 | DRM_ERROR("failed to copy %d exec entries " | ||
3974 | "back to user (%d)\n", | ||
3975 | args->buffer_count, ret); | ||
3976 | } | 3274 | } |
3977 | } | 3275 | } |
3978 | 3276 | ||
3979 | drm_free_large(exec_list); | 3277 | if (ret == 0) |
3980 | drm_free_large(exec2_list); | 3278 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); |
3981 | return ret; | ||
3982 | } | ||
3983 | |||
3984 | int | ||
3985 | i915_gem_execbuffer2(struct drm_device *dev, void *data, | ||
3986 | struct drm_file *file_priv) | ||
3987 | { | ||
3988 | struct drm_i915_gem_execbuffer2 *args = data; | ||
3989 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
3990 | int ret; | ||
3991 | |||
3992 | #if WATCH_EXEC | ||
3993 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
3994 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
3995 | #endif | ||
3996 | |||
3997 | if (args->buffer_count < 1) { | ||
3998 | DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); | ||
3999 | return -EINVAL; | ||
4000 | } | ||
4001 | |||
4002 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | ||
4003 | if (exec2_list == NULL) { | ||
4004 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | ||
4005 | args->buffer_count); | ||
4006 | return -ENOMEM; | ||
4007 | } | ||
4008 | ret = copy_from_user(exec2_list, | ||
4009 | (struct drm_i915_relocation_entry __user *) | ||
4010 | (uintptr_t) args->buffers_ptr, | ||
4011 | sizeof(*exec2_list) * args->buffer_count); | ||
4012 | if (ret != 0) { | ||
4013 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
4014 | args->buffer_count, ret); | ||
4015 | drm_free_large(exec2_list); | ||
4016 | return -EFAULT; | ||
4017 | } | ||
4018 | |||
4019 | ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list); | ||
4020 | if (!ret) { | ||
4021 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
4022 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
4023 | (uintptr_t) args->buffers_ptr, | ||
4024 | exec2_list, | ||
4025 | sizeof(*exec2_list) * args->buffer_count); | ||
4026 | if (ret) { | ||
4027 | ret = -EFAULT; | ||
4028 | DRM_ERROR("failed to copy %d exec entries " | ||
4029 | "back to user (%d)\n", | ||
4030 | args->buffer_count, ret); | ||
4031 | } | ||
4032 | } | ||
4033 | 3279 | ||
4034 | drm_free_large(exec2_list); | ||
4035 | return ret; | 3280 | return ret; |
4036 | } | 3281 | } |
4037 | 3282 | ||
4038 | int | 3283 | int |
4039 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | 3284 | i915_gem_object_pin(struct drm_i915_gem_object *obj, |
3285 | uint32_t alignment, | ||
3286 | bool map_and_fenceable) | ||
4040 | { | 3287 | { |
4041 | struct drm_device *dev = obj->dev; | 3288 | struct drm_device *dev = obj->base.dev; |
4042 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 3289 | struct drm_i915_private *dev_priv = dev->dev_private; |
4043 | int ret; | 3290 | int ret; |
4044 | 3291 | ||
4045 | BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); | 3292 | BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); |
4046 | 3293 | WARN_ON(i915_verify_lists(dev)); | |
4047 | i915_verify_inactive(dev, __FILE__, __LINE__); | ||
4048 | 3294 | ||
4049 | if (obj_priv->gtt_space != NULL) { | 3295 | if (obj->gtt_space != NULL) { |
4050 | if (alignment == 0) | 3296 | if ((alignment && obj->gtt_offset & (alignment - 1)) || |
4051 | alignment = i915_gem_get_gtt_alignment(obj); | 3297 | (map_and_fenceable && !obj->map_and_fenceable)) { |
4052 | if (obj_priv->gtt_offset & (alignment - 1)) { | 3298 | WARN(obj->pin_count, |
4053 | WARN(obj_priv->pin_count, | ||
4054 | "bo is already pinned with incorrect alignment:" | 3299 | "bo is already pinned with incorrect alignment:" |
4055 | " offset=%x, req.alignment=%x\n", | 3300 | " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," |
4056 | obj_priv->gtt_offset, alignment); | 3301 | " obj->map_and_fenceable=%d\n", |
3302 | obj->gtt_offset, alignment, | ||
3303 | map_and_fenceable, | ||
3304 | obj->map_and_fenceable); | ||
4057 | ret = i915_gem_object_unbind(obj); | 3305 | ret = i915_gem_object_unbind(obj); |
4058 | if (ret) | 3306 | if (ret) |
4059 | return ret; | 3307 | return ret; |
4060 | } | 3308 | } |
4061 | } | 3309 | } |
4062 | 3310 | ||
4063 | if (obj_priv->gtt_space == NULL) { | 3311 | if (obj->gtt_space == NULL) { |
4064 | ret = i915_gem_object_bind_to_gtt(obj, alignment); | 3312 | ret = i915_gem_object_bind_to_gtt(obj, alignment, |
3313 | map_and_fenceable); | ||
4065 | if (ret) | 3314 | if (ret) |
4066 | return ret; | 3315 | return ret; |
4067 | } | 3316 | } |
4068 | 3317 | ||
4069 | obj_priv->pin_count++; | 3318 | if (obj->pin_count++ == 0) { |
4070 | 3319 | if (!obj->active) | |
4071 | /* If the object is not active and not pending a flush, | 3320 | list_move_tail(&obj->mm_list, |
4072 | * remove it from the inactive list | 3321 | &dev_priv->mm.pinned_list); |
4073 | */ | ||
4074 | if (obj_priv->pin_count == 1) { | ||
4075 | atomic_inc(&dev->pin_count); | ||
4076 | atomic_add(obj->size, &dev->pin_memory); | ||
4077 | if (!obj_priv->active && | ||
4078 | (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | ||
4079 | list_del_init(&obj_priv->list); | ||
4080 | } | 3322 | } |
4081 | i915_verify_inactive(dev, __FILE__, __LINE__); | 3323 | obj->pin_mappable |= map_and_fenceable; |
4082 | 3324 | ||
3325 | WARN_ON(i915_verify_lists(dev)); | ||
4083 | return 0; | 3326 | return 0; |
4084 | } | 3327 | } |
4085 | 3328 | ||
4086 | void | 3329 | void |
4087 | i915_gem_object_unpin(struct drm_gem_object *obj) | 3330 | i915_gem_object_unpin(struct drm_i915_gem_object *obj) |
4088 | { | 3331 | { |
4089 | struct drm_device *dev = obj->dev; | 3332 | struct drm_device *dev = obj->base.dev; |
4090 | drm_i915_private_t *dev_priv = dev->dev_private; | 3333 | drm_i915_private_t *dev_priv = dev->dev_private; |
4091 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
4092 | 3334 | ||
4093 | i915_verify_inactive(dev, __FILE__, __LINE__); | 3335 | WARN_ON(i915_verify_lists(dev)); |
4094 | obj_priv->pin_count--; | 3336 | BUG_ON(obj->pin_count == 0); |
4095 | BUG_ON(obj_priv->pin_count < 0); | 3337 | BUG_ON(obj->gtt_space == NULL); |
4096 | BUG_ON(obj_priv->gtt_space == NULL); | ||
4097 | 3338 | ||
4098 | /* If the object is no longer pinned, and is | 3339 | if (--obj->pin_count == 0) { |
4099 | * neither active nor being flushed, then stick it on | 3340 | if (!obj->active) |
4100 | * the inactive list | 3341 | list_move_tail(&obj->mm_list, |
4101 | */ | ||
4102 | if (obj_priv->pin_count == 0) { | ||
4103 | if (!obj_priv->active && | ||
4104 | (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | ||
4105 | list_move_tail(&obj_priv->list, | ||
4106 | &dev_priv->mm.inactive_list); | 3342 | &dev_priv->mm.inactive_list); |
4107 | atomic_dec(&dev->pin_count); | 3343 | obj->pin_mappable = false; |
4108 | atomic_sub(obj->size, &dev->pin_memory); | ||
4109 | } | 3344 | } |
4110 | i915_verify_inactive(dev, __FILE__, __LINE__); | 3345 | WARN_ON(i915_verify_lists(dev)); |
4111 | } | 3346 | } |
4112 | 3347 | ||
4113 | int | 3348 | int |
4114 | i915_gem_pin_ioctl(struct drm_device *dev, void *data, | 3349 | i915_gem_pin_ioctl(struct drm_device *dev, void *data, |
4115 | struct drm_file *file_priv) | 3350 | struct drm_file *file) |
4116 | { | 3351 | { |
4117 | struct drm_i915_gem_pin *args = data; | 3352 | struct drm_i915_gem_pin *args = data; |
4118 | struct drm_gem_object *obj; | 3353 | struct drm_i915_gem_object *obj; |
4119 | struct drm_i915_gem_object *obj_priv; | ||
4120 | int ret; | 3354 | int ret; |
4121 | 3355 | ||
4122 | mutex_lock(&dev->struct_mutex); | 3356 | ret = i915_mutex_lock_interruptible(dev); |
3357 | if (ret) | ||
3358 | return ret; | ||
4123 | 3359 | ||
4124 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 3360 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
4125 | if (obj == NULL) { | 3361 | if (&obj->base == NULL) { |
4126 | DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", | 3362 | ret = -ENOENT; |
4127 | args->handle); | 3363 | goto unlock; |
4128 | mutex_unlock(&dev->struct_mutex); | ||
4129 | return -ENOENT; | ||
4130 | } | 3364 | } |
4131 | obj_priv = to_intel_bo(obj); | ||
4132 | 3365 | ||
4133 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 3366 | if (obj->madv != I915_MADV_WILLNEED) { |
4134 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); | 3367 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); |
4135 | drm_gem_object_unreference(obj); | 3368 | ret = -EINVAL; |
4136 | mutex_unlock(&dev->struct_mutex); | 3369 | goto out; |
4137 | return -EINVAL; | ||
4138 | } | 3370 | } |
4139 | 3371 | ||
4140 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { | 3372 | if (obj->pin_filp != NULL && obj->pin_filp != file) { |
4141 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", | 3373 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", |
4142 | args->handle); | 3374 | args->handle); |
4143 | drm_gem_object_unreference(obj); | 3375 | ret = -EINVAL; |
4144 | mutex_unlock(&dev->struct_mutex); | 3376 | goto out; |
4145 | return -EINVAL; | ||
4146 | } | 3377 | } |
4147 | 3378 | ||
4148 | obj_priv->user_pin_count++; | 3379 | obj->user_pin_count++; |
4149 | obj_priv->pin_filp = file_priv; | 3380 | obj->pin_filp = file; |
4150 | if (obj_priv->user_pin_count == 1) { | 3381 | if (obj->user_pin_count == 1) { |
4151 | ret = i915_gem_object_pin(obj, args->alignment); | 3382 | ret = i915_gem_object_pin(obj, args->alignment, true); |
4152 | if (ret != 0) { | 3383 | if (ret) |
4153 | drm_gem_object_unreference(obj); | 3384 | goto out; |
4154 | mutex_unlock(&dev->struct_mutex); | ||
4155 | return ret; | ||
4156 | } | ||
4157 | } | 3385 | } |
4158 | 3386 | ||
4159 | /* XXX - flush the CPU caches for pinned objects | 3387 | /* XXX - flush the CPU caches for pinned objects |
4160 | * as the X server doesn't manage domains yet | 3388 | * as the X server doesn't manage domains yet |
4161 | */ | 3389 | */ |
4162 | i915_gem_object_flush_cpu_write_domain(obj); | 3390 | i915_gem_object_flush_cpu_write_domain(obj); |
4163 | args->offset = obj_priv->gtt_offset; | 3391 | args->offset = obj->gtt_offset; |
4164 | drm_gem_object_unreference(obj); | 3392 | out: |
3393 | drm_gem_object_unreference(&obj->base); | ||
3394 | unlock: | ||
4165 | mutex_unlock(&dev->struct_mutex); | 3395 | mutex_unlock(&dev->struct_mutex); |
4166 | 3396 | return ret; | |
4167 | return 0; | ||
4168 | } | 3397 | } |
4169 | 3398 | ||
4170 | int | 3399 | int |
4171 | i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | 3400 | i915_gem_unpin_ioctl(struct drm_device *dev, void *data, |
4172 | struct drm_file *file_priv) | 3401 | struct drm_file *file) |
4173 | { | 3402 | { |
4174 | struct drm_i915_gem_pin *args = data; | 3403 | struct drm_i915_gem_pin *args = data; |
4175 | struct drm_gem_object *obj; | 3404 | struct drm_i915_gem_object *obj; |
4176 | struct drm_i915_gem_object *obj_priv; | 3405 | int ret; |
4177 | 3406 | ||
4178 | mutex_lock(&dev->struct_mutex); | 3407 | ret = i915_mutex_lock_interruptible(dev); |
3408 | if (ret) | ||
3409 | return ret; | ||
4179 | 3410 | ||
4180 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 3411 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
4181 | if (obj == NULL) { | 3412 | if (&obj->base == NULL) { |
4182 | DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", | 3413 | ret = -ENOENT; |
4183 | args->handle); | 3414 | goto unlock; |
4184 | mutex_unlock(&dev->struct_mutex); | ||
4185 | return -ENOENT; | ||
4186 | } | 3415 | } |
4187 | 3416 | ||
4188 | obj_priv = to_intel_bo(obj); | 3417 | if (obj->pin_filp != file) { |
4189 | if (obj_priv->pin_filp != file_priv) { | ||
4190 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", | 3418 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", |
4191 | args->handle); | 3419 | args->handle); |
4192 | drm_gem_object_unreference(obj); | 3420 | ret = -EINVAL; |
4193 | mutex_unlock(&dev->struct_mutex); | 3421 | goto out; |
4194 | return -EINVAL; | ||
4195 | } | 3422 | } |
4196 | obj_priv->user_pin_count--; | 3423 | obj->user_pin_count--; |
4197 | if (obj_priv->user_pin_count == 0) { | 3424 | if (obj->user_pin_count == 0) { |
4198 | obj_priv->pin_filp = NULL; | 3425 | obj->pin_filp = NULL; |
4199 | i915_gem_object_unpin(obj); | 3426 | i915_gem_object_unpin(obj); |
4200 | } | 3427 | } |
4201 | 3428 | ||
4202 | drm_gem_object_unreference(obj); | 3429 | out: |
3430 | drm_gem_object_unreference(&obj->base); | ||
3431 | unlock: | ||
4203 | mutex_unlock(&dev->struct_mutex); | 3432 | mutex_unlock(&dev->struct_mutex); |
4204 | return 0; | 3433 | return ret; |
4205 | } | 3434 | } |
4206 | 3435 | ||
4207 | int | 3436 | int |
4208 | i915_gem_busy_ioctl(struct drm_device *dev, void *data, | 3437 | i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
4209 | struct drm_file *file_priv) | 3438 | struct drm_file *file) |
4210 | { | 3439 | { |
4211 | struct drm_i915_gem_busy *args = data; | 3440 | struct drm_i915_gem_busy *args = data; |
4212 | struct drm_gem_object *obj; | 3441 | struct drm_i915_gem_object *obj; |
4213 | struct drm_i915_gem_object *obj_priv; | 3442 | int ret; |
4214 | 3443 | ||
4215 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 3444 | ret = i915_mutex_lock_interruptible(dev); |
4216 | if (obj == NULL) { | 3445 | if (ret) |
4217 | DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", | 3446 | return ret; |
4218 | args->handle); | ||
4219 | return -ENOENT; | ||
4220 | } | ||
4221 | 3447 | ||
4222 | mutex_lock(&dev->struct_mutex); | 3448 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
3449 | if (&obj->base == NULL) { | ||
3450 | ret = -ENOENT; | ||
3451 | goto unlock; | ||
3452 | } | ||
4223 | 3453 | ||
4224 | /* Count all active objects as busy, even if they are currently not used | 3454 | /* Count all active objects as busy, even if they are currently not used |
4225 | * by the gpu. Users of this interface expect objects to eventually | 3455 | * by the gpu. Users of this interface expect objects to eventually |
4226 | * become non-busy without any further actions, therefore emit any | 3456 | * become non-busy without any further actions, therefore emit any |
4227 | * necessary flushes here. | 3457 | * necessary flushes here. |
4228 | */ | 3458 | */ |
4229 | obj_priv = to_intel_bo(obj); | 3459 | args->busy = obj->active; |
4230 | args->busy = obj_priv->active; | ||
4231 | if (args->busy) { | 3460 | if (args->busy) { |
4232 | /* Unconditionally flush objects, even when the gpu still uses this | 3461 | /* Unconditionally flush objects, even when the gpu still uses this |
4233 | * object. Userspace calling this function indicates that it wants to | 3462 | * object. Userspace calling this function indicates that it wants to |
4234 | * use this buffer rather sooner than later, so issuing the required | 3463 | * use this buffer rather sooner than later, so issuing the required |
4235 | * flush earlier is beneficial. | 3464 | * flush earlier is beneficial. |
4236 | */ | 3465 | */ |
4237 | if (obj->write_domain) { | 3466 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
4238 | i915_gem_flush(dev, 0, obj->write_domain); | 3467 | ret = i915_gem_flush_ring(obj->ring, |
4239 | (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring); | 3468 | 0, obj->base.write_domain); |
3469 | } else if (obj->ring->outstanding_lazy_request == | ||
3470 | obj->last_rendering_seqno) { | ||
3471 | struct drm_i915_gem_request *request; | ||
3472 | |||
3473 | /* This ring is not being cleared by active usage, | ||
3474 | * so emit a request to do so. | ||
3475 | */ | ||
3476 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
3477 | if (request) | ||
3478 | ret = i915_add_request(obj->ring, NULL,request); | ||
3479 | else | ||
3480 | ret = -ENOMEM; | ||
4240 | } | 3481 | } |
4241 | 3482 | ||
4242 | /* Update the active list for the hardware's current position. | 3483 | /* Update the active list for the hardware's current position. |
@@ -4244,14 +3485,15 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4244 | * are actually unmasked, and our working set ends up being | 3485 | * are actually unmasked, and our working set ends up being |
4245 | * larger than required. | 3486 | * larger than required. |
4246 | */ | 3487 | */ |
4247 | i915_gem_retire_requests_ring(dev, obj_priv->ring); | 3488 | i915_gem_retire_requests_ring(obj->ring); |
4248 | 3489 | ||
4249 | args->busy = obj_priv->active; | 3490 | args->busy = obj->active; |
4250 | } | 3491 | } |
4251 | 3492 | ||
4252 | drm_gem_object_unreference(obj); | 3493 | drm_gem_object_unreference(&obj->base); |
3494 | unlock: | ||
4253 | mutex_unlock(&dev->struct_mutex); | 3495 | mutex_unlock(&dev->struct_mutex); |
4254 | return 0; | 3496 | return ret; |
4255 | } | 3497 | } |
4256 | 3498 | ||
4257 | int | 3499 | int |
@@ -4266,8 +3508,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
4266 | struct drm_file *file_priv) | 3508 | struct drm_file *file_priv) |
4267 | { | 3509 | { |
4268 | struct drm_i915_gem_madvise *args = data; | 3510 | struct drm_i915_gem_madvise *args = data; |
4269 | struct drm_gem_object *obj; | 3511 | struct drm_i915_gem_object *obj; |
4270 | struct drm_i915_gem_object *obj_priv; | 3512 | int ret; |
4271 | 3513 | ||
4272 | switch (args->madv) { | 3514 | switch (args->madv) { |
4273 | case I915_MADV_DONTNEED: | 3515 | case I915_MADV_DONTNEED: |
@@ -4277,44 +3519,44 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
4277 | return -EINVAL; | 3519 | return -EINVAL; |
4278 | } | 3520 | } |
4279 | 3521 | ||
4280 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 3522 | ret = i915_mutex_lock_interruptible(dev); |
4281 | if (obj == NULL) { | 3523 | if (ret) |
4282 | DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n", | 3524 | return ret; |
4283 | args->handle); | ||
4284 | return -ENOENT; | ||
4285 | } | ||
4286 | |||
4287 | mutex_lock(&dev->struct_mutex); | ||
4288 | obj_priv = to_intel_bo(obj); | ||
4289 | 3525 | ||
4290 | if (obj_priv->pin_count) { | 3526 | obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); |
4291 | drm_gem_object_unreference(obj); | 3527 | if (&obj->base == NULL) { |
4292 | mutex_unlock(&dev->struct_mutex); | 3528 | ret = -ENOENT; |
3529 | goto unlock; | ||
3530 | } | ||
4293 | 3531 | ||
4294 | DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n"); | 3532 | if (obj->pin_count) { |
4295 | return -EINVAL; | 3533 | ret = -EINVAL; |
3534 | goto out; | ||
4296 | } | 3535 | } |
4297 | 3536 | ||
4298 | if (obj_priv->madv != __I915_MADV_PURGED) | 3537 | if (obj->madv != __I915_MADV_PURGED) |
4299 | obj_priv->madv = args->madv; | 3538 | obj->madv = args->madv; |
4300 | 3539 | ||
4301 | /* if the object is no longer bound, discard its backing storage */ | 3540 | /* if the object is no longer bound, discard its backing storage */ |
4302 | if (i915_gem_object_is_purgeable(obj_priv) && | 3541 | if (i915_gem_object_is_purgeable(obj) && |
4303 | obj_priv->gtt_space == NULL) | 3542 | obj->gtt_space == NULL) |
4304 | i915_gem_object_truncate(obj); | 3543 | i915_gem_object_truncate(obj); |
4305 | 3544 | ||
4306 | args->retained = obj_priv->madv != __I915_MADV_PURGED; | 3545 | args->retained = obj->madv != __I915_MADV_PURGED; |
4307 | 3546 | ||
4308 | drm_gem_object_unreference(obj); | 3547 | out: |
3548 | drm_gem_object_unreference(&obj->base); | ||
3549 | unlock: | ||
4309 | mutex_unlock(&dev->struct_mutex); | 3550 | mutex_unlock(&dev->struct_mutex); |
4310 | 3551 | return ret; | |
4311 | return 0; | ||
4312 | } | 3552 | } |
4313 | 3553 | ||
4314 | struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, | 3554 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
4315 | size_t size) | 3555 | size_t size) |
4316 | { | 3556 | { |
3557 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4317 | struct drm_i915_gem_object *obj; | 3558 | struct drm_i915_gem_object *obj; |
3559 | struct address_space *mapping; | ||
4318 | 3560 | ||
4319 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); | 3561 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
4320 | if (obj == NULL) | 3562 | if (obj == NULL) |
@@ -4325,19 +3567,27 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, | |||
4325 | return NULL; | 3567 | return NULL; |
4326 | } | 3568 | } |
4327 | 3569 | ||
3570 | mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; | ||
3571 | mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
3572 | |||
3573 | i915_gem_info_add_obj(dev_priv, size); | ||
3574 | |||
4328 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 3575 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
4329 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | 3576 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
4330 | 3577 | ||
4331 | obj->agp_type = AGP_USER_MEMORY; | 3578 | obj->cache_level = I915_CACHE_NONE; |
4332 | obj->base.driver_private = NULL; | 3579 | obj->base.driver_private = NULL; |
4333 | obj->fence_reg = I915_FENCE_REG_NONE; | 3580 | obj->fence_reg = I915_FENCE_REG_NONE; |
4334 | INIT_LIST_HEAD(&obj->list); | 3581 | INIT_LIST_HEAD(&obj->mm_list); |
3582 | INIT_LIST_HEAD(&obj->gtt_list); | ||
3583 | INIT_LIST_HEAD(&obj->ring_list); | ||
3584 | INIT_LIST_HEAD(&obj->exec_list); | ||
4335 | INIT_LIST_HEAD(&obj->gpu_write_list); | 3585 | INIT_LIST_HEAD(&obj->gpu_write_list); |
4336 | obj->madv = I915_MADV_WILLNEED; | 3586 | obj->madv = I915_MADV_WILLNEED; |
3587 | /* Avoid an unnecessary call to unbind on the first bind. */ | ||
3588 | obj->map_and_fenceable = true; | ||
4337 | 3589 | ||
4338 | trace_i915_gem_object_create(&obj->base); | 3590 | return obj; |
4339 | |||
4340 | return &obj->base; | ||
4341 | } | 3591 | } |
4342 | 3592 | ||
4343 | int i915_gem_init_object(struct drm_gem_object *obj) | 3593 | int i915_gem_init_object(struct drm_gem_object *obj) |
@@ -4347,41 +3597,41 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
4347 | return 0; | 3597 | return 0; |
4348 | } | 3598 | } |
4349 | 3599 | ||
4350 | static void i915_gem_free_object_tail(struct drm_gem_object *obj) | 3600 | static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) |
4351 | { | 3601 | { |
4352 | struct drm_device *dev = obj->dev; | 3602 | struct drm_device *dev = obj->base.dev; |
4353 | drm_i915_private_t *dev_priv = dev->dev_private; | 3603 | drm_i915_private_t *dev_priv = dev->dev_private; |
4354 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
4355 | int ret; | 3604 | int ret; |
4356 | 3605 | ||
4357 | ret = i915_gem_object_unbind(obj); | 3606 | ret = i915_gem_object_unbind(obj); |
4358 | if (ret == -ERESTARTSYS) { | 3607 | if (ret == -ERESTARTSYS) { |
4359 | list_move(&obj_priv->list, | 3608 | list_move(&obj->mm_list, |
4360 | &dev_priv->mm.deferred_free_list); | 3609 | &dev_priv->mm.deferred_free_list); |
4361 | return; | 3610 | return; |
4362 | } | 3611 | } |
4363 | 3612 | ||
4364 | if (obj_priv->mmap_offset) | 3613 | trace_i915_gem_object_destroy(obj); |
3614 | |||
3615 | if (obj->base.map_list.map) | ||
4365 | i915_gem_free_mmap_offset(obj); | 3616 | i915_gem_free_mmap_offset(obj); |
4366 | 3617 | ||
4367 | drm_gem_object_release(obj); | 3618 | drm_gem_object_release(&obj->base); |
3619 | i915_gem_info_remove_obj(dev_priv, obj->base.size); | ||
4368 | 3620 | ||
4369 | kfree(obj_priv->page_cpu_valid); | 3621 | kfree(obj->page_cpu_valid); |
4370 | kfree(obj_priv->bit_17); | 3622 | kfree(obj->bit_17); |
4371 | kfree(obj_priv); | 3623 | kfree(obj); |
4372 | } | 3624 | } |
4373 | 3625 | ||
4374 | void i915_gem_free_object(struct drm_gem_object *obj) | 3626 | void i915_gem_free_object(struct drm_gem_object *gem_obj) |
4375 | { | 3627 | { |
4376 | struct drm_device *dev = obj->dev; | 3628 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
4377 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 3629 | struct drm_device *dev = obj->base.dev; |
4378 | |||
4379 | trace_i915_gem_object_destroy(obj); | ||
4380 | 3630 | ||
4381 | while (obj_priv->pin_count > 0) | 3631 | while (obj->pin_count > 0) |
4382 | i915_gem_object_unpin(obj); | 3632 | i915_gem_object_unpin(obj); |
4383 | 3633 | ||
4384 | if (obj_priv->phys_obj) | 3634 | if (obj->phys_obj) |
4385 | i915_gem_detach_phys_object(dev, obj); | 3635 | i915_gem_detach_phys_object(dev, obj); |
4386 | 3636 | ||
4387 | i915_gem_free_object_tail(obj); | 3637 | i915_gem_free_object_tail(obj); |
@@ -4395,10 +3645,7 @@ i915_gem_idle(struct drm_device *dev) | |||
4395 | 3645 | ||
4396 | mutex_lock(&dev->struct_mutex); | 3646 | mutex_lock(&dev->struct_mutex); |
4397 | 3647 | ||
4398 | if (dev_priv->mm.suspended || | 3648 | if (dev_priv->mm.suspended) { |
4399 | (dev_priv->render_ring.gem_object == NULL) || | ||
4400 | (HAS_BSD(dev) && | ||
4401 | dev_priv->bsd_ring.gem_object == NULL)) { | ||
4402 | mutex_unlock(&dev->struct_mutex); | 3649 | mutex_unlock(&dev->struct_mutex); |
4403 | return 0; | 3650 | return 0; |
4404 | } | 3651 | } |
@@ -4411,19 +3658,21 @@ i915_gem_idle(struct drm_device *dev) | |||
4411 | 3658 | ||
4412 | /* Under UMS, be paranoid and evict. */ | 3659 | /* Under UMS, be paranoid and evict. */ |
4413 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) { | 3660 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) { |
4414 | ret = i915_gem_evict_inactive(dev); | 3661 | ret = i915_gem_evict_inactive(dev, false); |
4415 | if (ret) { | 3662 | if (ret) { |
4416 | mutex_unlock(&dev->struct_mutex); | 3663 | mutex_unlock(&dev->struct_mutex); |
4417 | return ret; | 3664 | return ret; |
4418 | } | 3665 | } |
4419 | } | 3666 | } |
4420 | 3667 | ||
3668 | i915_gem_reset_fences(dev); | ||
3669 | |||
4421 | /* Hack! Don't let anybody do execbuf while we don't control the chip. | 3670 | /* Hack! Don't let anybody do execbuf while we don't control the chip. |
4422 | * We need to replace this with a semaphore, or something. | 3671 | * We need to replace this with a semaphore, or something. |
4423 | * And not confound mm.suspended! | 3672 | * And not confound mm.suspended! |
4424 | */ | 3673 | */ |
4425 | dev_priv->mm.suspended = 1; | 3674 | dev_priv->mm.suspended = 1; |
4426 | del_timer(&dev_priv->hangcheck_timer); | 3675 | del_timer_sync(&dev_priv->hangcheck_timer); |
4427 | 3676 | ||
4428 | i915_kernel_lost_context(dev); | 3677 | i915_kernel_lost_context(dev); |
4429 | i915_gem_cleanup_ringbuffer(dev); | 3678 | i915_gem_cleanup_ringbuffer(dev); |
@@ -4436,108 +3685,36 @@ i915_gem_idle(struct drm_device *dev) | |||
4436 | return 0; | 3685 | return 0; |
4437 | } | 3686 | } |
4438 | 3687 | ||
4439 | /* | ||
4440 | * 965+ support PIPE_CONTROL commands, which provide finer grained control | ||
4441 | * over cache flushing. | ||
4442 | */ | ||
4443 | static int | ||
4444 | i915_gem_init_pipe_control(struct drm_device *dev) | ||
4445 | { | ||
4446 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4447 | struct drm_gem_object *obj; | ||
4448 | struct drm_i915_gem_object *obj_priv; | ||
4449 | int ret; | ||
4450 | |||
4451 | obj = i915_gem_alloc_object(dev, 4096); | ||
4452 | if (obj == NULL) { | ||
4453 | DRM_ERROR("Failed to allocate seqno page\n"); | ||
4454 | ret = -ENOMEM; | ||
4455 | goto err; | ||
4456 | } | ||
4457 | obj_priv = to_intel_bo(obj); | ||
4458 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | ||
4459 | |||
4460 | ret = i915_gem_object_pin(obj, 4096); | ||
4461 | if (ret) | ||
4462 | goto err_unref; | ||
4463 | |||
4464 | dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; | ||
4465 | dev_priv->seqno_page = kmap(obj_priv->pages[0]); | ||
4466 | if (dev_priv->seqno_page == NULL) | ||
4467 | goto err_unpin; | ||
4468 | |||
4469 | dev_priv->seqno_obj = obj; | ||
4470 | memset(dev_priv->seqno_page, 0, PAGE_SIZE); | ||
4471 | |||
4472 | return 0; | ||
4473 | |||
4474 | err_unpin: | ||
4475 | i915_gem_object_unpin(obj); | ||
4476 | err_unref: | ||
4477 | drm_gem_object_unreference(obj); | ||
4478 | err: | ||
4479 | return ret; | ||
4480 | } | ||
4481 | |||
4482 | |||
4483 | static void | ||
4484 | i915_gem_cleanup_pipe_control(struct drm_device *dev) | ||
4485 | { | ||
4486 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4487 | struct drm_gem_object *obj; | ||
4488 | struct drm_i915_gem_object *obj_priv; | ||
4489 | |||
4490 | obj = dev_priv->seqno_obj; | ||
4491 | obj_priv = to_intel_bo(obj); | ||
4492 | kunmap(obj_priv->pages[0]); | ||
4493 | i915_gem_object_unpin(obj); | ||
4494 | drm_gem_object_unreference(obj); | ||
4495 | dev_priv->seqno_obj = NULL; | ||
4496 | |||
4497 | dev_priv->seqno_page = NULL; | ||
4498 | } | ||
4499 | |||
4500 | int | 3688 | int |
4501 | i915_gem_init_ringbuffer(struct drm_device *dev) | 3689 | i915_gem_init_ringbuffer(struct drm_device *dev) |
4502 | { | 3690 | { |
4503 | drm_i915_private_t *dev_priv = dev->dev_private; | 3691 | drm_i915_private_t *dev_priv = dev->dev_private; |
4504 | int ret; | 3692 | int ret; |
4505 | 3693 | ||
4506 | dev_priv->render_ring = render_ring; | 3694 | ret = intel_init_render_ring_buffer(dev); |
4507 | |||
4508 | if (!I915_NEED_GFX_HWS(dev)) { | ||
4509 | dev_priv->render_ring.status_page.page_addr | ||
4510 | = dev_priv->status_page_dmah->vaddr; | ||
4511 | memset(dev_priv->render_ring.status_page.page_addr, | ||
4512 | 0, PAGE_SIZE); | ||
4513 | } | ||
4514 | |||
4515 | if (HAS_PIPE_CONTROL(dev)) { | ||
4516 | ret = i915_gem_init_pipe_control(dev); | ||
4517 | if (ret) | ||
4518 | return ret; | ||
4519 | } | ||
4520 | |||
4521 | ret = intel_init_ring_buffer(dev, &dev_priv->render_ring); | ||
4522 | if (ret) | 3695 | if (ret) |
4523 | goto cleanup_pipe_control; | 3696 | return ret; |
4524 | 3697 | ||
4525 | if (HAS_BSD(dev)) { | 3698 | if (HAS_BSD(dev)) { |
4526 | dev_priv->bsd_ring = bsd_ring; | 3699 | ret = intel_init_bsd_ring_buffer(dev); |
4527 | ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring); | ||
4528 | if (ret) | 3700 | if (ret) |
4529 | goto cleanup_render_ring; | 3701 | goto cleanup_render_ring; |
4530 | } | 3702 | } |
4531 | 3703 | ||
3704 | if (HAS_BLT(dev)) { | ||
3705 | ret = intel_init_blt_ring_buffer(dev); | ||
3706 | if (ret) | ||
3707 | goto cleanup_bsd_ring; | ||
3708 | } | ||
3709 | |||
4532 | dev_priv->next_seqno = 1; | 3710 | dev_priv->next_seqno = 1; |
4533 | 3711 | ||
4534 | return 0; | 3712 | return 0; |
4535 | 3713 | ||
3714 | cleanup_bsd_ring: | ||
3715 | intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); | ||
4536 | cleanup_render_ring: | 3716 | cleanup_render_ring: |
4537 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); | 3717 | intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); |
4538 | cleanup_pipe_control: | ||
4539 | if (HAS_PIPE_CONTROL(dev)) | ||
4540 | i915_gem_cleanup_pipe_control(dev); | ||
4541 | return ret; | 3718 | return ret; |
4542 | } | 3719 | } |
4543 | 3720 | ||
@@ -4545,12 +3722,10 @@ void | |||
4545 | i915_gem_cleanup_ringbuffer(struct drm_device *dev) | 3722 | i915_gem_cleanup_ringbuffer(struct drm_device *dev) |
4546 | { | 3723 | { |
4547 | drm_i915_private_t *dev_priv = dev->dev_private; | 3724 | drm_i915_private_t *dev_priv = dev->dev_private; |
3725 | int i; | ||
4548 | 3726 | ||
4549 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); | 3727 | for (i = 0; i < I915_NUM_RINGS; i++) |
4550 | if (HAS_BSD(dev)) | 3728 | intel_cleanup_ring_buffer(&dev_priv->ring[i]); |
4551 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); | ||
4552 | if (HAS_PIPE_CONTROL(dev)) | ||
4553 | i915_gem_cleanup_pipe_control(dev); | ||
4554 | } | 3729 | } |
4555 | 3730 | ||
4556 | int | 3731 | int |
@@ -4558,7 +3733,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
4558 | struct drm_file *file_priv) | 3733 | struct drm_file *file_priv) |
4559 | { | 3734 | { |
4560 | drm_i915_private_t *dev_priv = dev->dev_private; | 3735 | drm_i915_private_t *dev_priv = dev->dev_private; |
4561 | int ret; | 3736 | int ret, i; |
4562 | 3737 | ||
4563 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 3738 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
4564 | return 0; | 3739 | return 0; |
@@ -4577,15 +3752,13 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
4577 | return ret; | 3752 | return ret; |
4578 | } | 3753 | } |
4579 | 3754 | ||
4580 | spin_lock(&dev_priv->mm.active_list_lock); | 3755 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); |
4581 | BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); | ||
4582 | BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list)); | ||
4583 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
4584 | |||
4585 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | 3756 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); |
4586 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); | 3757 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); |
4587 | BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); | 3758 | for (i = 0; i < I915_NUM_RINGS; i++) { |
4588 | BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); | 3759 | BUG_ON(!list_empty(&dev_priv->ring[i].active_list)); |
3760 | BUG_ON(!list_empty(&dev_priv->ring[i].request_list)); | ||
3761 | } | ||
4589 | mutex_unlock(&dev->struct_mutex); | 3762 | mutex_unlock(&dev->struct_mutex); |
4590 | 3763 | ||
4591 | ret = drm_irq_install(dev); | 3764 | ret = drm_irq_install(dev); |
@@ -4627,31 +3800,34 @@ i915_gem_lastclose(struct drm_device *dev) | |||
4627 | DRM_ERROR("failed to idle hardware: %d\n", ret); | 3800 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
4628 | } | 3801 | } |
4629 | 3802 | ||
3803 | static void | ||
3804 | init_ring_lists(struct intel_ring_buffer *ring) | ||
3805 | { | ||
3806 | INIT_LIST_HEAD(&ring->active_list); | ||
3807 | INIT_LIST_HEAD(&ring->request_list); | ||
3808 | INIT_LIST_HEAD(&ring->gpu_write_list); | ||
3809 | } | ||
3810 | |||
4630 | void | 3811 | void |
4631 | i915_gem_load(struct drm_device *dev) | 3812 | i915_gem_load(struct drm_device *dev) |
4632 | { | 3813 | { |
4633 | int i; | 3814 | int i; |
4634 | drm_i915_private_t *dev_priv = dev->dev_private; | 3815 | drm_i915_private_t *dev_priv = dev->dev_private; |
4635 | 3816 | ||
4636 | spin_lock_init(&dev_priv->mm.active_list_lock); | 3817 | INIT_LIST_HEAD(&dev_priv->mm.active_list); |
4637 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | 3818 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); |
4638 | INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); | ||
4639 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 3819 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
3820 | INIT_LIST_HEAD(&dev_priv->mm.pinned_list); | ||
4640 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 3821 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
4641 | INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); | 3822 | INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); |
4642 | INIT_LIST_HEAD(&dev_priv->render_ring.active_list); | 3823 | INIT_LIST_HEAD(&dev_priv->mm.gtt_list); |
4643 | INIT_LIST_HEAD(&dev_priv->render_ring.request_list); | 3824 | for (i = 0; i < I915_NUM_RINGS; i++) |
4644 | if (HAS_BSD(dev)) { | 3825 | init_ring_lists(&dev_priv->ring[i]); |
4645 | INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list); | ||
4646 | INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list); | ||
4647 | } | ||
4648 | for (i = 0; i < 16; i++) | 3826 | for (i = 0; i < 16; i++) |
4649 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); | 3827 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); |
4650 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, | 3828 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, |
4651 | i915_gem_retire_work_handler); | 3829 | i915_gem_retire_work_handler); |
4652 | spin_lock(&shrink_list_lock); | 3830 | init_completion(&dev_priv->error_completion); |
4653 | list_add(&dev_priv->mm.shrink_list, &shrink_list); | ||
4654 | spin_unlock(&shrink_list_lock); | ||
4655 | 3831 | ||
4656 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ | 3832 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ |
4657 | if (IS_GEN3(dev)) { | 3833 | if (IS_GEN3(dev)) { |
@@ -4663,36 +3839,38 @@ i915_gem_load(struct drm_device *dev) | |||
4663 | } | 3839 | } |
4664 | } | 3840 | } |
4665 | 3841 | ||
3842 | dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; | ||
3843 | |||
4666 | /* Old X drivers will take 0-2 for front, back, depth buffers */ | 3844 | /* Old X drivers will take 0-2 for front, back, depth buffers */ |
4667 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 3845 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
4668 | dev_priv->fence_reg_start = 3; | 3846 | dev_priv->fence_reg_start = 3; |
4669 | 3847 | ||
4670 | if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 3848 | if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
4671 | dev_priv->num_fence_regs = 16; | 3849 | dev_priv->num_fence_regs = 16; |
4672 | else | 3850 | else |
4673 | dev_priv->num_fence_regs = 8; | 3851 | dev_priv->num_fence_regs = 8; |
4674 | 3852 | ||
4675 | /* Initialize fence registers to zero */ | 3853 | /* Initialize fence registers to zero */ |
4676 | if (IS_I965G(dev)) { | 3854 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
4677 | for (i = 0; i < 16; i++) | 3855 | i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]); |
4678 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0); | ||
4679 | } else { | ||
4680 | for (i = 0; i < 8; i++) | ||
4681 | I915_WRITE(FENCE_REG_830_0 + (i * 4), 0); | ||
4682 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
4683 | for (i = 0; i < 8; i++) | ||
4684 | I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); | ||
4685 | } | 3856 | } |
3857 | |||
4686 | i915_gem_detect_bit_6_swizzle(dev); | 3858 | i915_gem_detect_bit_6_swizzle(dev); |
4687 | init_waitqueue_head(&dev_priv->pending_flip_queue); | 3859 | init_waitqueue_head(&dev_priv->pending_flip_queue); |
3860 | |||
3861 | dev_priv->mm.interruptible = true; | ||
3862 | |||
3863 | dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; | ||
3864 | dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; | ||
3865 | register_shrinker(&dev_priv->mm.inactive_shrinker); | ||
4688 | } | 3866 | } |
4689 | 3867 | ||
4690 | /* | 3868 | /* |
4691 | * Create a physically contiguous memory object for this object | 3869 | * Create a physically contiguous memory object for this object |
4692 | * e.g. for cursor + overlay regs | 3870 | * e.g. for cursor + overlay regs |
4693 | */ | 3871 | */ |
4694 | int i915_gem_init_phys_object(struct drm_device *dev, | 3872 | static int i915_gem_init_phys_object(struct drm_device *dev, |
4695 | int id, int size, int align) | 3873 | int id, int size, int align) |
4696 | { | 3874 | { |
4697 | drm_i915_private_t *dev_priv = dev->dev_private; | 3875 | drm_i915_private_t *dev_priv = dev->dev_private; |
4698 | struct drm_i915_gem_phys_object *phys_obj; | 3876 | struct drm_i915_gem_phys_object *phys_obj; |
@@ -4724,7 +3902,7 @@ kfree_obj: | |||
4724 | return ret; | 3902 | return ret; |
4725 | } | 3903 | } |
4726 | 3904 | ||
4727 | void i915_gem_free_phys_object(struct drm_device *dev, int id) | 3905 | static void i915_gem_free_phys_object(struct drm_device *dev, int id) |
4728 | { | 3906 | { |
4729 | drm_i915_private_t *dev_priv = dev->dev_private; | 3907 | drm_i915_private_t *dev_priv = dev->dev_private; |
4730 | struct drm_i915_gem_phys_object *phys_obj; | 3908 | struct drm_i915_gem_phys_object *phys_obj; |
@@ -4754,47 +3932,46 @@ void i915_gem_free_all_phys_object(struct drm_device *dev) | |||
4754 | } | 3932 | } |
4755 | 3933 | ||
4756 | void i915_gem_detach_phys_object(struct drm_device *dev, | 3934 | void i915_gem_detach_phys_object(struct drm_device *dev, |
4757 | struct drm_gem_object *obj) | 3935 | struct drm_i915_gem_object *obj) |
4758 | { | 3936 | { |
4759 | struct drm_i915_gem_object *obj_priv; | 3937 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
3938 | char *vaddr; | ||
4760 | int i; | 3939 | int i; |
4761 | int ret; | ||
4762 | int page_count; | 3940 | int page_count; |
4763 | 3941 | ||
4764 | obj_priv = to_intel_bo(obj); | 3942 | if (!obj->phys_obj) |
4765 | if (!obj_priv->phys_obj) | ||
4766 | return; | 3943 | return; |
3944 | vaddr = obj->phys_obj->handle->vaddr; | ||
4767 | 3945 | ||
4768 | ret = i915_gem_object_get_pages(obj, 0); | 3946 | page_count = obj->base.size / PAGE_SIZE; |
4769 | if (ret) | ||
4770 | goto out; | ||
4771 | |||
4772 | page_count = obj->size / PAGE_SIZE; | ||
4773 | |||
4774 | for (i = 0; i < page_count; i++) { | 3947 | for (i = 0; i < page_count; i++) { |
4775 | char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0); | 3948 | struct page *page = shmem_read_mapping_page(mapping, i); |
4776 | char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); | 3949 | if (!IS_ERR(page)) { |
3950 | char *dst = kmap_atomic(page); | ||
3951 | memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); | ||
3952 | kunmap_atomic(dst); | ||
4777 | 3953 | ||
4778 | memcpy(dst, src, PAGE_SIZE); | 3954 | drm_clflush_pages(&page, 1); |
4779 | kunmap_atomic(dst, KM_USER0); | 3955 | |
3956 | set_page_dirty(page); | ||
3957 | mark_page_accessed(page); | ||
3958 | page_cache_release(page); | ||
3959 | } | ||
4780 | } | 3960 | } |
4781 | drm_clflush_pages(obj_priv->pages, page_count); | 3961 | intel_gtt_chipset_flush(); |
4782 | drm_agp_chipset_flush(dev); | ||
4783 | 3962 | ||
4784 | i915_gem_object_put_pages(obj); | 3963 | obj->phys_obj->cur_obj = NULL; |
4785 | out: | 3964 | obj->phys_obj = NULL; |
4786 | obj_priv->phys_obj->cur_obj = NULL; | ||
4787 | obj_priv->phys_obj = NULL; | ||
4788 | } | 3965 | } |
4789 | 3966 | ||
4790 | int | 3967 | int |
4791 | i915_gem_attach_phys_object(struct drm_device *dev, | 3968 | i915_gem_attach_phys_object(struct drm_device *dev, |
4792 | struct drm_gem_object *obj, | 3969 | struct drm_i915_gem_object *obj, |
4793 | int id, | 3970 | int id, |
4794 | int align) | 3971 | int align) |
4795 | { | 3972 | { |
3973 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; | ||
4796 | drm_i915_private_t *dev_priv = dev->dev_private; | 3974 | drm_i915_private_t *dev_priv = dev->dev_private; |
4797 | struct drm_i915_gem_object *obj_priv; | ||
4798 | int ret = 0; | 3975 | int ret = 0; |
4799 | int page_count; | 3976 | int page_count; |
4800 | int i; | 3977 | int i; |
@@ -4802,10 +3979,8 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
4802 | if (id > I915_MAX_PHYS_OBJECT) | 3979 | if (id > I915_MAX_PHYS_OBJECT) |
4803 | return -EINVAL; | 3980 | return -EINVAL; |
4804 | 3981 | ||
4805 | obj_priv = to_intel_bo(obj); | 3982 | if (obj->phys_obj) { |
4806 | 3983 | if (obj->phys_obj->id == id) | |
4807 | if (obj_priv->phys_obj) { | ||
4808 | if (obj_priv->phys_obj->id == id) | ||
4809 | return 0; | 3984 | return 0; |
4810 | i915_gem_detach_phys_object(dev, obj); | 3985 | i915_gem_detach_phys_object(dev, obj); |
4811 | } | 3986 | } |
@@ -4813,74 +3988,86 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
4813 | /* create a new object */ | 3988 | /* create a new object */ |
4814 | if (!dev_priv->mm.phys_objs[id - 1]) { | 3989 | if (!dev_priv->mm.phys_objs[id - 1]) { |
4815 | ret = i915_gem_init_phys_object(dev, id, | 3990 | ret = i915_gem_init_phys_object(dev, id, |
4816 | obj->size, align); | 3991 | obj->base.size, align); |
4817 | if (ret) { | 3992 | if (ret) { |
4818 | DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); | 3993 | DRM_ERROR("failed to init phys object %d size: %zu\n", |
4819 | goto out; | 3994 | id, obj->base.size); |
3995 | return ret; | ||
4820 | } | 3996 | } |
4821 | } | 3997 | } |
4822 | 3998 | ||
4823 | /* bind to the object */ | 3999 | /* bind to the object */ |
4824 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; | 4000 | obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; |
4825 | obj_priv->phys_obj->cur_obj = obj; | 4001 | obj->phys_obj->cur_obj = obj; |
4826 | |||
4827 | ret = i915_gem_object_get_pages(obj, 0); | ||
4828 | if (ret) { | ||
4829 | DRM_ERROR("failed to get page list\n"); | ||
4830 | goto out; | ||
4831 | } | ||
4832 | 4002 | ||
4833 | page_count = obj->size / PAGE_SIZE; | 4003 | page_count = obj->base.size / PAGE_SIZE; |
4834 | 4004 | ||
4835 | for (i = 0; i < page_count; i++) { | 4005 | for (i = 0; i < page_count; i++) { |
4836 | char *src = kmap_atomic(obj_priv->pages[i], KM_USER0); | 4006 | struct page *page; |
4837 | char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); | 4007 | char *dst, *src; |
4838 | 4008 | ||
4009 | page = shmem_read_mapping_page(mapping, i); | ||
4010 | if (IS_ERR(page)) | ||
4011 | return PTR_ERR(page); | ||
4012 | |||
4013 | src = kmap_atomic(page); | ||
4014 | dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE); | ||
4839 | memcpy(dst, src, PAGE_SIZE); | 4015 | memcpy(dst, src, PAGE_SIZE); |
4840 | kunmap_atomic(src, KM_USER0); | 4016 | kunmap_atomic(src); |
4841 | } | ||
4842 | 4017 | ||
4843 | i915_gem_object_put_pages(obj); | 4018 | mark_page_accessed(page); |
4019 | page_cache_release(page); | ||
4020 | } | ||
4844 | 4021 | ||
4845 | return 0; | 4022 | return 0; |
4846 | out: | ||
4847 | return ret; | ||
4848 | } | 4023 | } |
4849 | 4024 | ||
4850 | static int | 4025 | static int |
4851 | i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 4026 | i915_gem_phys_pwrite(struct drm_device *dev, |
4027 | struct drm_i915_gem_object *obj, | ||
4852 | struct drm_i915_gem_pwrite *args, | 4028 | struct drm_i915_gem_pwrite *args, |
4853 | struct drm_file *file_priv) | 4029 | struct drm_file *file_priv) |
4854 | { | 4030 | { |
4855 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 4031 | void *vaddr = obj->phys_obj->handle->vaddr + args->offset; |
4856 | void *obj_addr; | 4032 | char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; |
4857 | int ret; | ||
4858 | char __user *user_data; | ||
4859 | 4033 | ||
4860 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 4034 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { |
4861 | obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; | 4035 | unsigned long unwritten; |
4862 | 4036 | ||
4863 | DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size); | 4037 | /* The physical object once assigned is fixed for the lifetime |
4864 | ret = copy_from_user(obj_addr, user_data, args->size); | 4038 | * of the obj, so we can safely drop the lock and continue |
4865 | if (ret) | 4039 | * to access vaddr. |
4866 | return -EFAULT; | 4040 | */ |
4041 | mutex_unlock(&dev->struct_mutex); | ||
4042 | unwritten = copy_from_user(vaddr, user_data, args->size); | ||
4043 | mutex_lock(&dev->struct_mutex); | ||
4044 | if (unwritten) | ||
4045 | return -EFAULT; | ||
4046 | } | ||
4867 | 4047 | ||
4868 | drm_agp_chipset_flush(dev); | 4048 | intel_gtt_chipset_flush(); |
4869 | return 0; | 4049 | return 0; |
4870 | } | 4050 | } |
4871 | 4051 | ||
4872 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv) | 4052 | void i915_gem_release(struct drm_device *dev, struct drm_file *file) |
4873 | { | 4053 | { |
4874 | struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; | 4054 | struct drm_i915_file_private *file_priv = file->driver_priv; |
4875 | 4055 | ||
4876 | /* Clean up our request list when the client is going away, so that | 4056 | /* Clean up our request list when the client is going away, so that |
4877 | * later retire_requests won't dereference our soon-to-be-gone | 4057 | * later retire_requests won't dereference our soon-to-be-gone |
4878 | * file_priv. | 4058 | * file_priv. |
4879 | */ | 4059 | */ |
4880 | mutex_lock(&dev->struct_mutex); | 4060 | spin_lock(&file_priv->mm.lock); |
4881 | while (!list_empty(&i915_file_priv->mm.request_list)) | 4061 | while (!list_empty(&file_priv->mm.request_list)) { |
4882 | list_del_init(i915_file_priv->mm.request_list.next); | 4062 | struct drm_i915_gem_request *request; |
4883 | mutex_unlock(&dev->struct_mutex); | 4063 | |
4064 | request = list_first_entry(&file_priv->mm.request_list, | ||
4065 | struct drm_i915_gem_request, | ||
4066 | client_list); | ||
4067 | list_del(&request->client_list); | ||
4068 | request->file_priv = NULL; | ||
4069 | } | ||
4070 | spin_unlock(&file_priv->mm.lock); | ||
4884 | } | 4071 | } |
4885 | 4072 | ||
4886 | static int | 4073 | static int |
@@ -4889,155 +4076,74 @@ i915_gpu_is_active(struct drm_device *dev) | |||
4889 | drm_i915_private_t *dev_priv = dev->dev_private; | 4076 | drm_i915_private_t *dev_priv = dev->dev_private; |
4890 | int lists_empty; | 4077 | int lists_empty; |
4891 | 4078 | ||
4892 | spin_lock(&dev_priv->mm.active_list_lock); | ||
4893 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && | 4079 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && |
4894 | list_empty(&dev_priv->render_ring.active_list); | 4080 | list_empty(&dev_priv->mm.active_list); |
4895 | if (HAS_BSD(dev)) | ||
4896 | lists_empty &= list_empty(&dev_priv->bsd_ring.active_list); | ||
4897 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
4898 | 4081 | ||
4899 | return !lists_empty; | 4082 | return !lists_empty; |
4900 | } | 4083 | } |
4901 | 4084 | ||
4902 | static int | 4085 | static int |
4903 | i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) | 4086 | i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) |
4904 | { | 4087 | { |
4905 | drm_i915_private_t *dev_priv, *next_dev; | 4088 | struct drm_i915_private *dev_priv = |
4906 | struct drm_i915_gem_object *obj_priv, *next_obj; | 4089 | container_of(shrinker, |
4907 | int cnt = 0; | 4090 | struct drm_i915_private, |
4908 | int would_deadlock = 1; | 4091 | mm.inactive_shrinker); |
4092 | struct drm_device *dev = dev_priv->dev; | ||
4093 | struct drm_i915_gem_object *obj, *next; | ||
4094 | int nr_to_scan = sc->nr_to_scan; | ||
4095 | int cnt; | ||
4096 | |||
4097 | if (!mutex_trylock(&dev->struct_mutex)) | ||
4098 | return 0; | ||
4909 | 4099 | ||
4910 | /* "fast-path" to count number of available objects */ | 4100 | /* "fast-path" to count number of available objects */ |
4911 | if (nr_to_scan == 0) { | 4101 | if (nr_to_scan == 0) { |
4912 | spin_lock(&shrink_list_lock); | 4102 | cnt = 0; |
4913 | list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { | 4103 | list_for_each_entry(obj, |
4914 | struct drm_device *dev = dev_priv->dev; | 4104 | &dev_priv->mm.inactive_list, |
4915 | 4105 | mm_list) | |
4916 | if (mutex_trylock(&dev->struct_mutex)) { | 4106 | cnt++; |
4917 | list_for_each_entry(obj_priv, | 4107 | mutex_unlock(&dev->struct_mutex); |
4918 | &dev_priv->mm.inactive_list, | 4108 | return cnt / 100 * sysctl_vfs_cache_pressure; |
4919 | list) | ||
4920 | cnt++; | ||
4921 | mutex_unlock(&dev->struct_mutex); | ||
4922 | } | ||
4923 | } | ||
4924 | spin_unlock(&shrink_list_lock); | ||
4925 | |||
4926 | return (cnt / 100) * sysctl_vfs_cache_pressure; | ||
4927 | } | 4109 | } |
4928 | 4110 | ||
4929 | spin_lock(&shrink_list_lock); | ||
4930 | |||
4931 | rescan: | 4111 | rescan: |
4932 | /* first scan for clean buffers */ | 4112 | /* first scan for clean buffers */ |
4933 | list_for_each_entry_safe(dev_priv, next_dev, | 4113 | i915_gem_retire_requests(dev); |
4934 | &shrink_list, mm.shrink_list) { | ||
4935 | struct drm_device *dev = dev_priv->dev; | ||
4936 | |||
4937 | if (! mutex_trylock(&dev->struct_mutex)) | ||
4938 | continue; | ||
4939 | |||
4940 | spin_unlock(&shrink_list_lock); | ||
4941 | i915_gem_retire_requests(dev); | ||
4942 | 4114 | ||
4943 | list_for_each_entry_safe(obj_priv, next_obj, | 4115 | list_for_each_entry_safe(obj, next, |
4944 | &dev_priv->mm.inactive_list, | 4116 | &dev_priv->mm.inactive_list, |
4945 | list) { | 4117 | mm_list) { |
4946 | if (i915_gem_object_is_purgeable(obj_priv)) { | 4118 | if (i915_gem_object_is_purgeable(obj)) { |
4947 | i915_gem_object_unbind(&obj_priv->base); | 4119 | if (i915_gem_object_unbind(obj) == 0 && |
4948 | if (--nr_to_scan <= 0) | 4120 | --nr_to_scan == 0) |
4949 | break; | 4121 | break; |
4950 | } | ||
4951 | } | 4122 | } |
4952 | |||
4953 | spin_lock(&shrink_list_lock); | ||
4954 | mutex_unlock(&dev->struct_mutex); | ||
4955 | |||
4956 | would_deadlock = 0; | ||
4957 | |||
4958 | if (nr_to_scan <= 0) | ||
4959 | break; | ||
4960 | } | 4123 | } |
4961 | 4124 | ||
4962 | /* second pass, evict/count anything still on the inactive list */ | 4125 | /* second pass, evict/count anything still on the inactive list */ |
4963 | list_for_each_entry_safe(dev_priv, next_dev, | 4126 | cnt = 0; |
4964 | &shrink_list, mm.shrink_list) { | 4127 | list_for_each_entry_safe(obj, next, |
4965 | struct drm_device *dev = dev_priv->dev; | 4128 | &dev_priv->mm.inactive_list, |
4966 | 4129 | mm_list) { | |
4967 | if (! mutex_trylock(&dev->struct_mutex)) | 4130 | if (nr_to_scan && |
4968 | continue; | 4131 | i915_gem_object_unbind(obj) == 0) |
4969 | 4132 | nr_to_scan--; | |
4970 | spin_unlock(&shrink_list_lock); | 4133 | else |
4971 | 4134 | cnt++; | |
4972 | list_for_each_entry_safe(obj_priv, next_obj, | ||
4973 | &dev_priv->mm.inactive_list, | ||
4974 | list) { | ||
4975 | if (nr_to_scan > 0) { | ||
4976 | i915_gem_object_unbind(&obj_priv->base); | ||
4977 | nr_to_scan--; | ||
4978 | } else | ||
4979 | cnt++; | ||
4980 | } | ||
4981 | |||
4982 | spin_lock(&shrink_list_lock); | ||
4983 | mutex_unlock(&dev->struct_mutex); | ||
4984 | |||
4985 | would_deadlock = 0; | ||
4986 | } | 4135 | } |
4987 | 4136 | ||
4988 | if (nr_to_scan) { | 4137 | if (nr_to_scan && i915_gpu_is_active(dev)) { |
4989 | int active = 0; | ||
4990 | |||
4991 | /* | 4138 | /* |
4992 | * We are desperate for pages, so as a last resort, wait | 4139 | * We are desperate for pages, so as a last resort, wait |
4993 | * for the GPU to finish and discard whatever we can. | 4140 | * for the GPU to finish and discard whatever we can. |
4994 | * This has a dramatic impact to reduce the number of | 4141 | * This has a dramatic impact to reduce the number of |
4995 | * OOM-killer events whilst running the GPU aggressively. | 4142 | * OOM-killer events whilst running the GPU aggressively. |
4996 | */ | 4143 | */ |
4997 | list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { | 4144 | if (i915_gpu_idle(dev) == 0) |
4998 | struct drm_device *dev = dev_priv->dev; | ||
4999 | |||
5000 | if (!mutex_trylock(&dev->struct_mutex)) | ||
5001 | continue; | ||
5002 | |||
5003 | spin_unlock(&shrink_list_lock); | ||
5004 | |||
5005 | if (i915_gpu_is_active(dev)) { | ||
5006 | i915_gpu_idle(dev); | ||
5007 | active++; | ||
5008 | } | ||
5009 | |||
5010 | spin_lock(&shrink_list_lock); | ||
5011 | mutex_unlock(&dev->struct_mutex); | ||
5012 | } | ||
5013 | |||
5014 | if (active) | ||
5015 | goto rescan; | 4145 | goto rescan; |
5016 | } | 4146 | } |
5017 | 4147 | mutex_unlock(&dev->struct_mutex); | |
5018 | spin_unlock(&shrink_list_lock); | 4148 | return cnt / 100 * sysctl_vfs_cache_pressure; |
5019 | |||
5020 | if (would_deadlock) | ||
5021 | return -1; | ||
5022 | else if (cnt > 0) | ||
5023 | return (cnt / 100) * sysctl_vfs_cache_pressure; | ||
5024 | else | ||
5025 | return 0; | ||
5026 | } | ||
5027 | |||
5028 | static struct shrinker shrinker = { | ||
5029 | .shrink = i915_gem_shrink, | ||
5030 | .seeks = DEFAULT_SEEKS, | ||
5031 | }; | ||
5032 | |||
5033 | __init void | ||
5034 | i915_gem_shrinker_init(void) | ||
5035 | { | ||
5036 | register_shrinker(&shrinker); | ||
5037 | } | ||
5038 | |||
5039 | __exit void | ||
5040 | i915_gem_shrinker_exit(void) | ||
5041 | { | ||
5042 | unregister_shrinker(&shrinker); | ||
5043 | } | 4149 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 80f380b1d951..8da1899bd24f 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c | |||
@@ -30,125 +30,125 @@ | |||
30 | #include "i915_drm.h" | 30 | #include "i915_drm.h" |
31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
32 | 32 | ||
33 | #if WATCH_INACTIVE | 33 | #if WATCH_LISTS |
34 | void | 34 | int |
35 | i915_verify_inactive(struct drm_device *dev, char *file, int line) | 35 | i915_verify_lists(struct drm_device *dev) |
36 | { | 36 | { |
37 | static int warned; | ||
37 | drm_i915_private_t *dev_priv = dev->dev_private; | 38 | drm_i915_private_t *dev_priv = dev->dev_private; |
38 | struct drm_gem_object *obj; | 39 | struct drm_i915_gem_object *obj; |
39 | struct drm_i915_gem_object *obj_priv; | 40 | int err = 0; |
40 | 41 | ||
41 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { | 42 | if (warned) |
42 | obj = &obj_priv->base; | 43 | return 0; |
43 | if (obj_priv->pin_count || obj_priv->active || | 44 | |
44 | (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | | 45 | list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) { |
45 | I915_GEM_DOMAIN_GTT))) | 46 | if (obj->base.dev != dev || |
46 | DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n", | 47 | !atomic_read(&obj->base.refcount.refcount)) { |
48 | DRM_ERROR("freed render active %p\n", obj); | ||
49 | err++; | ||
50 | break; | ||
51 | } else if (!obj->active || | ||
52 | (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) { | ||
53 | DRM_ERROR("invalid render active %p (a %d r %x)\n", | ||
54 | obj, | ||
55 | obj->active, | ||
56 | obj->base.read_domains); | ||
57 | err++; | ||
58 | } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) { | ||
59 | DRM_ERROR("invalid render active %p (w %x, gwl %d)\n", | ||
47 | obj, | 60 | obj, |
48 | obj_priv->pin_count, obj_priv->active, | 61 | obj->base.write_domain, |
49 | obj->write_domain, file, line); | 62 | !list_empty(&obj->gpu_write_list)); |
63 | err++; | ||
64 | } | ||
50 | } | 65 | } |
51 | } | ||
52 | #endif /* WATCH_INACTIVE */ | ||
53 | |||
54 | |||
55 | #if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE | ||
56 | static void | ||
57 | i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, | ||
58 | uint32_t bias, uint32_t mark) | ||
59 | { | ||
60 | uint32_t *mem = kmap_atomic(page, KM_USER0); | ||
61 | int i; | ||
62 | for (i = start; i < end; i += 4) | ||
63 | DRM_INFO("%08x: %08x%s\n", | ||
64 | (int) (bias + i), mem[i / 4], | ||
65 | (bias + i == mark) ? " ********" : ""); | ||
66 | kunmap_atomic(mem, KM_USER0); | ||
67 | /* give syslog time to catch up */ | ||
68 | msleep(1); | ||
69 | } | ||
70 | |||
71 | void | ||
72 | i915_gem_dump_object(struct drm_gem_object *obj, int len, | ||
73 | const char *where, uint32_t mark) | ||
74 | { | ||
75 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
76 | int page; | ||
77 | 66 | ||
78 | DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); | 67 | list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) { |
79 | for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { | 68 | if (obj->base.dev != dev || |
80 | int page_len, chunk, chunk_len; | 69 | !atomic_read(&obj->base.refcount.refcount)) { |
81 | 70 | DRM_ERROR("freed flushing %p\n", obj); | |
82 | page_len = len - page * PAGE_SIZE; | 71 | err++; |
83 | if (page_len > PAGE_SIZE) | 72 | break; |
84 | page_len = PAGE_SIZE; | 73 | } else if (!obj->active || |
85 | 74 | (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 || | |
86 | for (chunk = 0; chunk < page_len; chunk += 128) { | 75 | list_empty(&obj->gpu_write_list)){ |
87 | chunk_len = page_len - chunk; | 76 | DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n", |
88 | if (chunk_len > 128) | 77 | obj, |
89 | chunk_len = 128; | 78 | obj->active, |
90 | i915_gem_dump_page(obj_priv->pages[page], | 79 | obj->base.write_domain, |
91 | chunk, chunk + chunk_len, | 80 | !list_empty(&obj->gpu_write_list)); |
92 | obj_priv->gtt_offset + | 81 | err++; |
93 | page * PAGE_SIZE, | ||
94 | mark); | ||
95 | } | 82 | } |
96 | } | 83 | } |
97 | } | ||
98 | #endif | ||
99 | 84 | ||
100 | #if WATCH_LRU | 85 | list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) { |
101 | void | 86 | if (obj->base.dev != dev || |
102 | i915_dump_lru(struct drm_device *dev, const char *where) | 87 | !atomic_read(&obj->base.refcount.refcount)) { |
103 | { | 88 | DRM_ERROR("freed gpu write %p\n", obj); |
104 | drm_i915_private_t *dev_priv = dev->dev_private; | 89 | err++; |
105 | struct drm_i915_gem_object *obj_priv; | 90 | break; |
106 | 91 | } else if (!obj->active || | |
107 | DRM_INFO("active list %s {\n", where); | 92 | (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) { |
108 | spin_lock(&dev_priv->mm.active_list_lock); | 93 | DRM_ERROR("invalid gpu write %p (a %d w %x)\n", |
109 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, | 94 | obj, |
110 | list) | 95 | obj->active, |
111 | { | 96 | obj->base.write_domain); |
112 | DRM_INFO(" %p: %08x\n", obj_priv, | 97 | err++; |
113 | obj_priv->last_rendering_seqno); | 98 | } |
114 | } | 99 | } |
115 | spin_unlock(&dev_priv->mm.active_list_lock); | 100 | |
116 | DRM_INFO("}\n"); | 101 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) { |
117 | DRM_INFO("flushing list %s {\n", where); | 102 | if (obj->base.dev != dev || |
118 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, | 103 | !atomic_read(&obj->base.refcount.refcount)) { |
119 | list) | 104 | DRM_ERROR("freed inactive %p\n", obj); |
120 | { | 105 | err++; |
121 | DRM_INFO(" %p: %08x\n", obj_priv, | 106 | break; |
122 | obj_priv->last_rendering_seqno); | 107 | } else if (obj->pin_count || obj->active || |
108 | (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) { | ||
109 | DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n", | ||
110 | obj, | ||
111 | obj->pin_count, obj->active, | ||
112 | obj->base.write_domain); | ||
113 | err++; | ||
114 | } | ||
123 | } | 115 | } |
124 | DRM_INFO("}\n"); | 116 | |
125 | DRM_INFO("inactive %s {\n", where); | 117 | list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) { |
126 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { | 118 | if (obj->base.dev != dev || |
127 | DRM_INFO(" %p: %08x\n", obj_priv, | 119 | !atomic_read(&obj->base.refcount.refcount)) { |
128 | obj_priv->last_rendering_seqno); | 120 | DRM_ERROR("freed pinned %p\n", obj); |
121 | err++; | ||
122 | break; | ||
123 | } else if (!obj->pin_count || obj->active || | ||
124 | (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) { | ||
125 | DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n", | ||
126 | obj, | ||
127 | obj->pin_count, obj->active, | ||
128 | obj->base.write_domain); | ||
129 | err++; | ||
130 | } | ||
129 | } | 131 | } |
130 | DRM_INFO("}\n"); | ||
131 | } | ||
132 | #endif | ||
133 | 132 | ||
133 | return warned = err; | ||
134 | } | ||
135 | #endif /* WATCH_INACTIVE */ | ||
134 | 136 | ||
135 | #if WATCH_COHERENCY | 137 | #if WATCH_COHERENCY |
136 | void | 138 | void |
137 | i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) | 139 | i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) |
138 | { | 140 | { |
139 | struct drm_device *dev = obj->dev; | 141 | struct drm_device *dev = obj->base.dev; |
140 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
141 | int page; | 142 | int page; |
142 | uint32_t *gtt_mapping; | 143 | uint32_t *gtt_mapping; |
143 | uint32_t *backing_map = NULL; | 144 | uint32_t *backing_map = NULL; |
144 | int bad_count = 0; | 145 | int bad_count = 0; |
145 | 146 | ||
146 | DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", | 147 | DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", |
147 | __func__, obj, obj_priv->gtt_offset, handle, | 148 | __func__, obj, obj->gtt_offset, handle, |
148 | obj->size / 1024); | 149 | obj->size / 1024); |
149 | 150 | ||
150 | gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset, | 151 | gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size); |
151 | obj->size); | ||
152 | if (gtt_mapping == NULL) { | 152 | if (gtt_mapping == NULL) { |
153 | DRM_ERROR("failed to map GTT space\n"); | 153 | DRM_ERROR("failed to map GTT space\n"); |
154 | return; | 154 | return; |
@@ -157,7 +157,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) | |||
157 | for (page = 0; page < obj->size / PAGE_SIZE; page++) { | 157 | for (page = 0; page < obj->size / PAGE_SIZE; page++) { |
158 | int i; | 158 | int i; |
159 | 159 | ||
160 | backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0); | 160 | backing_map = kmap_atomic(obj->pages[page], KM_USER0); |
161 | 161 | ||
162 | if (backing_map == NULL) { | 162 | if (backing_map == NULL) { |
163 | DRM_ERROR("failed to map backing page\n"); | 163 | DRM_ERROR("failed to map backing page\n"); |
@@ -172,7 +172,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) | |||
172 | if (cpuval != gttval) { | 172 | if (cpuval != gttval) { |
173 | DRM_INFO("incoherent CPU vs GPU at 0x%08x: " | 173 | DRM_INFO("incoherent CPU vs GPU at 0x%08x: " |
174 | "0x%08x vs 0x%08x\n", | 174 | "0x%08x vs 0x%08x\n", |
175 | (int)(obj_priv->gtt_offset + | 175 | (int)(obj->gtt_offset + |
176 | page * PAGE_SIZE + i * 4), | 176 | page * PAGE_SIZE + i * 4), |
177 | cpuval, gttval); | 177 | cpuval, gttval); |
178 | if (bad_count++ >= 8) { | 178 | if (bad_count++ >= 8) { |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 5c428fa3e0b3..da05a2692a75 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -30,79 +30,41 @@ | |||
30 | #include "drm.h" | 30 | #include "drm.h" |
31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
32 | #include "i915_drm.h" | 32 | #include "i915_drm.h" |
33 | 33 | #include "i915_trace.h" | |
34 | static struct drm_i915_gem_object * | ||
35 | i915_gem_next_active_object(struct drm_device *dev, | ||
36 | struct list_head **render_iter, | ||
37 | struct list_head **bsd_iter) | ||
38 | { | ||
39 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
40 | struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL; | ||
41 | |||
42 | if (*render_iter != &dev_priv->render_ring.active_list) | ||
43 | render_obj = list_entry(*render_iter, | ||
44 | struct drm_i915_gem_object, | ||
45 | list); | ||
46 | |||
47 | if (HAS_BSD(dev)) { | ||
48 | if (*bsd_iter != &dev_priv->bsd_ring.active_list) | ||
49 | bsd_obj = list_entry(*bsd_iter, | ||
50 | struct drm_i915_gem_object, | ||
51 | list); | ||
52 | |||
53 | if (render_obj == NULL) { | ||
54 | *bsd_iter = (*bsd_iter)->next; | ||
55 | return bsd_obj; | ||
56 | } | ||
57 | |||
58 | if (bsd_obj == NULL) { | ||
59 | *render_iter = (*render_iter)->next; | ||
60 | return render_obj; | ||
61 | } | ||
62 | |||
63 | /* XXX can we handle seqno wrapping? */ | ||
64 | if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) { | ||
65 | *render_iter = (*render_iter)->next; | ||
66 | return render_obj; | ||
67 | } else { | ||
68 | *bsd_iter = (*bsd_iter)->next; | ||
69 | return bsd_obj; | ||
70 | } | ||
71 | } else { | ||
72 | *render_iter = (*render_iter)->next; | ||
73 | return render_obj; | ||
74 | } | ||
75 | } | ||
76 | 34 | ||
77 | static bool | 35 | static bool |
78 | mark_free(struct drm_i915_gem_object *obj_priv, | 36 | mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) |
79 | struct list_head *unwind) | ||
80 | { | 37 | { |
81 | list_add(&obj_priv->evict_list, unwind); | 38 | list_add(&obj->exec_list, unwind); |
82 | drm_gem_object_reference(&obj_priv->base); | 39 | drm_gem_object_reference(&obj->base); |
83 | return drm_mm_scan_add_block(obj_priv->gtt_space); | 40 | return drm_mm_scan_add_block(obj->gtt_space); |
84 | } | 41 | } |
85 | 42 | ||
86 | #define i915_for_each_active_object(OBJ, R, B) \ | ||
87 | *(R) = dev_priv->render_ring.active_list.next; \ | ||
88 | *(B) = dev_priv->bsd_ring.active_list.next; \ | ||
89 | while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL) | ||
90 | |||
91 | int | 43 | int |
92 | i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment) | 44 | i915_gem_evict_something(struct drm_device *dev, int min_size, |
45 | unsigned alignment, bool mappable) | ||
93 | { | 46 | { |
94 | drm_i915_private_t *dev_priv = dev->dev_private; | 47 | drm_i915_private_t *dev_priv = dev->dev_private; |
95 | struct list_head eviction_list, unwind_list; | 48 | struct list_head eviction_list, unwind_list; |
96 | struct drm_i915_gem_object *obj_priv; | 49 | struct drm_i915_gem_object *obj; |
97 | struct list_head *render_iter, *bsd_iter; | ||
98 | int ret = 0; | 50 | int ret = 0; |
99 | 51 | ||
100 | i915_gem_retire_requests(dev); | 52 | i915_gem_retire_requests(dev); |
101 | 53 | ||
102 | /* Re-check for free space after retiring requests */ | 54 | /* Re-check for free space after retiring requests */ |
103 | if (drm_mm_search_free(&dev_priv->mm.gtt_space, | 55 | if (mappable) { |
104 | min_size, alignment, 0)) | 56 | if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, |
105 | return 0; | 57 | min_size, alignment, 0, |
58 | dev_priv->mm.gtt_mappable_end, | ||
59 | 0)) | ||
60 | return 0; | ||
61 | } else { | ||
62 | if (drm_mm_search_free(&dev_priv->mm.gtt_space, | ||
63 | min_size, alignment, 0)) | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | trace_i915_gem_evict(dev, min_size, alignment, mappable); | ||
106 | 68 | ||
107 | /* | 69 | /* |
108 | * The goal is to evict objects and amalgamate space in LRU order. | 70 | * The goal is to evict objects and amalgamate space in LRU order. |
@@ -128,45 +90,56 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen | |||
128 | */ | 90 | */ |
129 | 91 | ||
130 | INIT_LIST_HEAD(&unwind_list); | 92 | INIT_LIST_HEAD(&unwind_list); |
131 | drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); | 93 | if (mappable) |
94 | drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size, | ||
95 | alignment, 0, | ||
96 | dev_priv->mm.gtt_mappable_end); | ||
97 | else | ||
98 | drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); | ||
132 | 99 | ||
133 | /* First see if there is a large enough contiguous idle region... */ | 100 | /* First see if there is a large enough contiguous idle region... */ |
134 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { | 101 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { |
135 | if (mark_free(obj_priv, &unwind_list)) | 102 | if (mark_free(obj, &unwind_list)) |
136 | goto found; | 103 | goto found; |
137 | } | 104 | } |
138 | 105 | ||
139 | /* Now merge in the soon-to-be-expired objects... */ | 106 | /* Now merge in the soon-to-be-expired objects... */ |
140 | i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { | 107 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
141 | /* Does the object require an outstanding flush? */ | 108 | /* Does the object require an outstanding flush? */ |
142 | if (obj_priv->base.write_domain || obj_priv->pin_count) | 109 | if (obj->base.write_domain || obj->pin_count) |
143 | continue; | 110 | continue; |
144 | 111 | ||
145 | if (mark_free(obj_priv, &unwind_list)) | 112 | if (mark_free(obj, &unwind_list)) |
146 | goto found; | 113 | goto found; |
147 | } | 114 | } |
148 | 115 | ||
149 | /* Finally add anything with a pending flush (in order of retirement) */ | 116 | /* Finally add anything with a pending flush (in order of retirement) */ |
150 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { | 117 | list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { |
151 | if (obj_priv->pin_count) | 118 | if (obj->pin_count) |
152 | continue; | 119 | continue; |
153 | 120 | ||
154 | if (mark_free(obj_priv, &unwind_list)) | 121 | if (mark_free(obj, &unwind_list)) |
155 | goto found; | 122 | goto found; |
156 | } | 123 | } |
157 | i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { | 124 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
158 | if (! obj_priv->base.write_domain || obj_priv->pin_count) | 125 | if (! obj->base.write_domain || obj->pin_count) |
159 | continue; | 126 | continue; |
160 | 127 | ||
161 | if (mark_free(obj_priv, &unwind_list)) | 128 | if (mark_free(obj, &unwind_list)) |
162 | goto found; | 129 | goto found; |
163 | } | 130 | } |
164 | 131 | ||
165 | /* Nothing found, clean up and bail out! */ | 132 | /* Nothing found, clean up and bail out! */ |
166 | list_for_each_entry(obj_priv, &unwind_list, evict_list) { | 133 | while (!list_empty(&unwind_list)) { |
167 | ret = drm_mm_scan_remove_block(obj_priv->gtt_space); | 134 | obj = list_first_entry(&unwind_list, |
135 | struct drm_i915_gem_object, | ||
136 | exec_list); | ||
137 | |||
138 | ret = drm_mm_scan_remove_block(obj->gtt_space); | ||
168 | BUG_ON(ret); | 139 | BUG_ON(ret); |
169 | drm_gem_object_unreference(&obj_priv->base); | 140 | |
141 | list_del_init(&obj->exec_list); | ||
142 | drm_gem_object_unreference(&obj->base); | ||
170 | } | 143 | } |
171 | 144 | ||
172 | /* We expect the caller to unpin, evict all and try again, or give up. | 145 | /* We expect the caller to unpin, evict all and try again, or give up. |
@@ -180,49 +153,47 @@ found: | |||
180 | * temporary list. */ | 153 | * temporary list. */ |
181 | INIT_LIST_HEAD(&eviction_list); | 154 | INIT_LIST_HEAD(&eviction_list); |
182 | while (!list_empty(&unwind_list)) { | 155 | while (!list_empty(&unwind_list)) { |
183 | obj_priv = list_first_entry(&unwind_list, | 156 | obj = list_first_entry(&unwind_list, |
184 | struct drm_i915_gem_object, | 157 | struct drm_i915_gem_object, |
185 | evict_list); | 158 | exec_list); |
186 | if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { | 159 | if (drm_mm_scan_remove_block(obj->gtt_space)) { |
187 | list_move(&obj_priv->evict_list, &eviction_list); | 160 | list_move(&obj->exec_list, &eviction_list); |
188 | continue; | 161 | continue; |
189 | } | 162 | } |
190 | list_del(&obj_priv->evict_list); | 163 | list_del_init(&obj->exec_list); |
191 | drm_gem_object_unreference(&obj_priv->base); | 164 | drm_gem_object_unreference(&obj->base); |
192 | } | 165 | } |
193 | 166 | ||
194 | /* Unbinding will emit any required flushes */ | 167 | /* Unbinding will emit any required flushes */ |
195 | while (!list_empty(&eviction_list)) { | 168 | while (!list_empty(&eviction_list)) { |
196 | obj_priv = list_first_entry(&eviction_list, | 169 | obj = list_first_entry(&eviction_list, |
197 | struct drm_i915_gem_object, | 170 | struct drm_i915_gem_object, |
198 | evict_list); | 171 | exec_list); |
199 | if (ret == 0) | 172 | if (ret == 0) |
200 | ret = i915_gem_object_unbind(&obj_priv->base); | 173 | ret = i915_gem_object_unbind(obj); |
201 | list_del(&obj_priv->evict_list); | 174 | |
202 | drm_gem_object_unreference(&obj_priv->base); | 175 | list_del_init(&obj->exec_list); |
176 | drm_gem_object_unreference(&obj->base); | ||
203 | } | 177 | } |
204 | 178 | ||
205 | return ret; | 179 | return ret; |
206 | } | 180 | } |
207 | 181 | ||
208 | int | 182 | int |
209 | i915_gem_evict_everything(struct drm_device *dev) | 183 | i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) |
210 | { | 184 | { |
211 | drm_i915_private_t *dev_priv = dev->dev_private; | 185 | drm_i915_private_t *dev_priv = dev->dev_private; |
212 | int ret; | 186 | int ret; |
213 | bool lists_empty; | 187 | bool lists_empty; |
214 | 188 | ||
215 | spin_lock(&dev_priv->mm.active_list_lock); | ||
216 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | 189 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && |
217 | list_empty(&dev_priv->mm.flushing_list) && | 190 | list_empty(&dev_priv->mm.flushing_list) && |
218 | list_empty(&dev_priv->render_ring.active_list) && | 191 | list_empty(&dev_priv->mm.active_list)); |
219 | (!HAS_BSD(dev) | ||
220 | || list_empty(&dev_priv->bsd_ring.active_list))); | ||
221 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
222 | |||
223 | if (lists_empty) | 192 | if (lists_empty) |
224 | return -ENOSPC; | 193 | return -ENOSPC; |
225 | 194 | ||
195 | trace_i915_gem_evict_everything(dev, purgeable_only); | ||
196 | |||
226 | /* Flush everything (on to the inactive lists) and evict */ | 197 | /* Flush everything (on to the inactive lists) and evict */ |
227 | ret = i915_gpu_idle(dev); | 198 | ret = i915_gpu_idle(dev); |
228 | if (ret) | 199 | if (ret) |
@@ -230,40 +201,22 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
230 | 201 | ||
231 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | 202 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); |
232 | 203 | ||
233 | ret = i915_gem_evict_inactive(dev); | 204 | return i915_gem_evict_inactive(dev, purgeable_only); |
234 | if (ret) | ||
235 | return ret; | ||
236 | |||
237 | spin_lock(&dev_priv->mm.active_list_lock); | ||
238 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | ||
239 | list_empty(&dev_priv->mm.flushing_list) && | ||
240 | list_empty(&dev_priv->render_ring.active_list) && | ||
241 | (!HAS_BSD(dev) | ||
242 | || list_empty(&dev_priv->bsd_ring.active_list))); | ||
243 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
244 | BUG_ON(!lists_empty); | ||
245 | |||
246 | return 0; | ||
247 | } | 205 | } |
248 | 206 | ||
249 | /** Unbinds all inactive objects. */ | 207 | /** Unbinds all inactive objects. */ |
250 | int | 208 | int |
251 | i915_gem_evict_inactive(struct drm_device *dev) | 209 | i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only) |
252 | { | 210 | { |
253 | drm_i915_private_t *dev_priv = dev->dev_private; | 211 | drm_i915_private_t *dev_priv = dev->dev_private; |
254 | 212 | struct drm_i915_gem_object *obj, *next; | |
255 | while (!list_empty(&dev_priv->mm.inactive_list)) { | 213 | |
256 | struct drm_gem_object *obj; | 214 | list_for_each_entry_safe(obj, next, |
257 | int ret; | 215 | &dev_priv->mm.inactive_list, mm_list) { |
258 | 216 | if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { | |
259 | obj = &list_first_entry(&dev_priv->mm.inactive_list, | 217 | int ret = i915_gem_object_unbind(obj); |
260 | struct drm_i915_gem_object, | 218 | if (ret) |
261 | list)->base; | 219 | return ret; |
262 | |||
263 | ret = i915_gem_object_unbind(obj); | ||
264 | if (ret != 0) { | ||
265 | DRM_ERROR("Error unbinding object: %d\n", ret); | ||
266 | return ret; | ||
267 | } | 220 | } |
268 | } | 221 | } |
269 | 222 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c new file mode 100644 index 000000000000..4934cf84c320 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -0,0 +1,1342 @@ | |||
1 | /* | ||
2 | * Copyright © 2008,2010 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Eric Anholt <eric@anholt.net> | ||
25 | * Chris Wilson <chris@chris-wilson.co.uk> | ||
26 | * | ||
27 | */ | ||
28 | |||
29 | #include "drmP.h" | ||
30 | #include "drm.h" | ||
31 | #include "i915_drm.h" | ||
32 | #include "i915_drv.h" | ||
33 | #include "i915_trace.h" | ||
34 | #include "intel_drv.h" | ||
35 | |||
36 | struct change_domains { | ||
37 | uint32_t invalidate_domains; | ||
38 | uint32_t flush_domains; | ||
39 | uint32_t flush_rings; | ||
40 | uint32_t flips; | ||
41 | }; | ||
42 | |||
43 | /* | ||
44 | * Set the next domain for the specified object. This | ||
45 | * may not actually perform the necessary flushing/invaliding though, | ||
46 | * as that may want to be batched with other set_domain operations | ||
47 | * | ||
48 | * This is (we hope) the only really tricky part of gem. The goal | ||
49 | * is fairly simple -- track which caches hold bits of the object | ||
50 | * and make sure they remain coherent. A few concrete examples may | ||
51 | * help to explain how it works. For shorthand, we use the notation | ||
52 | * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the | ||
53 | * a pair of read and write domain masks. | ||
54 | * | ||
55 | * Case 1: the batch buffer | ||
56 | * | ||
57 | * 1. Allocated | ||
58 | * 2. Written by CPU | ||
59 | * 3. Mapped to GTT | ||
60 | * 4. Read by GPU | ||
61 | * 5. Unmapped from GTT | ||
62 | * 6. Freed | ||
63 | * | ||
64 | * Let's take these a step at a time | ||
65 | * | ||
66 | * 1. Allocated | ||
67 | * Pages allocated from the kernel may still have | ||
68 | * cache contents, so we set them to (CPU, CPU) always. | ||
69 | * 2. Written by CPU (using pwrite) | ||
70 | * The pwrite function calls set_domain (CPU, CPU) and | ||
71 | * this function does nothing (as nothing changes) | ||
72 | * 3. Mapped by GTT | ||
73 | * This function asserts that the object is not | ||
74 | * currently in any GPU-based read or write domains | ||
75 | * 4. Read by GPU | ||
76 | * i915_gem_execbuffer calls set_domain (COMMAND, 0). | ||
77 | * As write_domain is zero, this function adds in the | ||
78 | * current read domains (CPU+COMMAND, 0). | ||
79 | * flush_domains is set to CPU. | ||
80 | * invalidate_domains is set to COMMAND | ||
81 | * clflush is run to get data out of the CPU caches | ||
82 | * then i915_dev_set_domain calls i915_gem_flush to | ||
83 | * emit an MI_FLUSH and drm_agp_chipset_flush | ||
84 | * 5. Unmapped from GTT | ||
85 | * i915_gem_object_unbind calls set_domain (CPU, CPU) | ||
86 | * flush_domains and invalidate_domains end up both zero | ||
87 | * so no flushing/invalidating happens | ||
88 | * 6. Freed | ||
89 | * yay, done | ||
90 | * | ||
91 | * Case 2: The shared render buffer | ||
92 | * | ||
93 | * 1. Allocated | ||
94 | * 2. Mapped to GTT | ||
95 | * 3. Read/written by GPU | ||
96 | * 4. set_domain to (CPU,CPU) | ||
97 | * 5. Read/written by CPU | ||
98 | * 6. Read/written by GPU | ||
99 | * | ||
100 | * 1. Allocated | ||
101 | * Same as last example, (CPU, CPU) | ||
102 | * 2. Mapped to GTT | ||
103 | * Nothing changes (assertions find that it is not in the GPU) | ||
104 | * 3. Read/written by GPU | ||
105 | * execbuffer calls set_domain (RENDER, RENDER) | ||
106 | * flush_domains gets CPU | ||
107 | * invalidate_domains gets GPU | ||
108 | * clflush (obj) | ||
109 | * MI_FLUSH and drm_agp_chipset_flush | ||
110 | * 4. set_domain (CPU, CPU) | ||
111 | * flush_domains gets GPU | ||
112 | * invalidate_domains gets CPU | ||
113 | * wait_rendering (obj) to make sure all drawing is complete. | ||
114 | * This will include an MI_FLUSH to get the data from GPU | ||
115 | * to memory | ||
116 | * clflush (obj) to invalidate the CPU cache | ||
117 | * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) | ||
118 | * 5. Read/written by CPU | ||
119 | * cache lines are loaded and dirtied | ||
120 | * 6. Read written by GPU | ||
121 | * Same as last GPU access | ||
122 | * | ||
123 | * Case 3: The constant buffer | ||
124 | * | ||
125 | * 1. Allocated | ||
126 | * 2. Written by CPU | ||
127 | * 3. Read by GPU | ||
128 | * 4. Updated (written) by CPU again | ||
129 | * 5. Read by GPU | ||
130 | * | ||
131 | * 1. Allocated | ||
132 | * (CPU, CPU) | ||
133 | * 2. Written by CPU | ||
134 | * (CPU, CPU) | ||
135 | * 3. Read by GPU | ||
136 | * (CPU+RENDER, 0) | ||
137 | * flush_domains = CPU | ||
138 | * invalidate_domains = RENDER | ||
139 | * clflush (obj) | ||
140 | * MI_FLUSH | ||
141 | * drm_agp_chipset_flush | ||
142 | * 4. Updated (written) by CPU again | ||
143 | * (CPU, CPU) | ||
144 | * flush_domains = 0 (no previous write domain) | ||
145 | * invalidate_domains = 0 (no new read domains) | ||
146 | * 5. Read by GPU | ||
147 | * (CPU+RENDER, 0) | ||
148 | * flush_domains = CPU | ||
149 | * invalidate_domains = RENDER | ||
150 | * clflush (obj) | ||
151 | * MI_FLUSH | ||
152 | * drm_agp_chipset_flush | ||
153 | */ | ||
154 | static void | ||
155 | i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, | ||
156 | struct intel_ring_buffer *ring, | ||
157 | struct change_domains *cd) | ||
158 | { | ||
159 | uint32_t invalidate_domains = 0, flush_domains = 0; | ||
160 | |||
161 | /* | ||
162 | * If the object isn't moving to a new write domain, | ||
163 | * let the object stay in multiple read domains | ||
164 | */ | ||
165 | if (obj->base.pending_write_domain == 0) | ||
166 | obj->base.pending_read_domains |= obj->base.read_domains; | ||
167 | |||
168 | /* | ||
169 | * Flush the current write domain if | ||
170 | * the new read domains don't match. Invalidate | ||
171 | * any read domains which differ from the old | ||
172 | * write domain | ||
173 | */ | ||
174 | if (obj->base.write_domain && | ||
175 | (((obj->base.write_domain != obj->base.pending_read_domains || | ||
176 | obj->ring != ring)) || | ||
177 | (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) { | ||
178 | flush_domains |= obj->base.write_domain; | ||
179 | invalidate_domains |= | ||
180 | obj->base.pending_read_domains & ~obj->base.write_domain; | ||
181 | } | ||
182 | /* | ||
183 | * Invalidate any read caches which may have | ||
184 | * stale data. That is, any new read domains. | ||
185 | */ | ||
186 | invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains; | ||
187 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) | ||
188 | i915_gem_clflush_object(obj); | ||
189 | |||
190 | if (obj->base.pending_write_domain) | ||
191 | cd->flips |= atomic_read(&obj->pending_flip); | ||
192 | |||
193 | /* The actual obj->write_domain will be updated with | ||
194 | * pending_write_domain after we emit the accumulated flush for all | ||
195 | * of our domain changes in execbuffers (which clears objects' | ||
196 | * write_domains). So if we have a current write domain that we | ||
197 | * aren't changing, set pending_write_domain to that. | ||
198 | */ | ||
199 | if (flush_domains == 0 && obj->base.pending_write_domain == 0) | ||
200 | obj->base.pending_write_domain = obj->base.write_domain; | ||
201 | |||
202 | cd->invalidate_domains |= invalidate_domains; | ||
203 | cd->flush_domains |= flush_domains; | ||
204 | if (flush_domains & I915_GEM_GPU_DOMAINS) | ||
205 | cd->flush_rings |= obj->ring->id; | ||
206 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) | ||
207 | cd->flush_rings |= ring->id; | ||
208 | } | ||
209 | |||
210 | struct eb_objects { | ||
211 | int and; | ||
212 | struct hlist_head buckets[0]; | ||
213 | }; | ||
214 | |||
215 | static struct eb_objects * | ||
216 | eb_create(int size) | ||
217 | { | ||
218 | struct eb_objects *eb; | ||
219 | int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; | ||
220 | while (count > size) | ||
221 | count >>= 1; | ||
222 | eb = kzalloc(count*sizeof(struct hlist_head) + | ||
223 | sizeof(struct eb_objects), | ||
224 | GFP_KERNEL); | ||
225 | if (eb == NULL) | ||
226 | return eb; | ||
227 | |||
228 | eb->and = count - 1; | ||
229 | return eb; | ||
230 | } | ||
231 | |||
232 | static void | ||
233 | eb_reset(struct eb_objects *eb) | ||
234 | { | ||
235 | memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); | ||
236 | } | ||
237 | |||
238 | static void | ||
239 | eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj) | ||
240 | { | ||
241 | hlist_add_head(&obj->exec_node, | ||
242 | &eb->buckets[obj->exec_handle & eb->and]); | ||
243 | } | ||
244 | |||
245 | static struct drm_i915_gem_object * | ||
246 | eb_get_object(struct eb_objects *eb, unsigned long handle) | ||
247 | { | ||
248 | struct hlist_head *head; | ||
249 | struct hlist_node *node; | ||
250 | struct drm_i915_gem_object *obj; | ||
251 | |||
252 | head = &eb->buckets[handle & eb->and]; | ||
253 | hlist_for_each(node, head) { | ||
254 | obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); | ||
255 | if (obj->exec_handle == handle) | ||
256 | return obj; | ||
257 | } | ||
258 | |||
259 | return NULL; | ||
260 | } | ||
261 | |||
262 | static void | ||
263 | eb_destroy(struct eb_objects *eb) | ||
264 | { | ||
265 | kfree(eb); | ||
266 | } | ||
267 | |||
268 | static int | ||
269 | i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | ||
270 | struct eb_objects *eb, | ||
271 | struct drm_i915_gem_relocation_entry *reloc) | ||
272 | { | ||
273 | struct drm_device *dev = obj->base.dev; | ||
274 | struct drm_gem_object *target_obj; | ||
275 | uint32_t target_offset; | ||
276 | int ret = -EINVAL; | ||
277 | |||
278 | /* we've already hold a reference to all valid objects */ | ||
279 | target_obj = &eb_get_object(eb, reloc->target_handle)->base; | ||
280 | if (unlikely(target_obj == NULL)) | ||
281 | return -ENOENT; | ||
282 | |||
283 | target_offset = to_intel_bo(target_obj)->gtt_offset; | ||
284 | |||
285 | /* The target buffer should have appeared before us in the | ||
286 | * exec_object list, so it should have a GTT space bound by now. | ||
287 | */ | ||
288 | if (unlikely(target_offset == 0)) { | ||
289 | DRM_ERROR("No GTT space found for object %d\n", | ||
290 | reloc->target_handle); | ||
291 | return ret; | ||
292 | } | ||
293 | |||
294 | /* Validate that the target is in a valid r/w GPU domain */ | ||
295 | if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { | ||
296 | DRM_ERROR("reloc with multiple write domains: " | ||
297 | "obj %p target %d offset %d " | ||
298 | "read %08x write %08x", | ||
299 | obj, reloc->target_handle, | ||
300 | (int) reloc->offset, | ||
301 | reloc->read_domains, | ||
302 | reloc->write_domain); | ||
303 | return ret; | ||
304 | } | ||
305 | if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) { | ||
306 | DRM_ERROR("reloc with read/write CPU domains: " | ||
307 | "obj %p target %d offset %d " | ||
308 | "read %08x write %08x", | ||
309 | obj, reloc->target_handle, | ||
310 | (int) reloc->offset, | ||
311 | reloc->read_domains, | ||
312 | reloc->write_domain); | ||
313 | return ret; | ||
314 | } | ||
315 | if (unlikely(reloc->write_domain && target_obj->pending_write_domain && | ||
316 | reloc->write_domain != target_obj->pending_write_domain)) { | ||
317 | DRM_ERROR("Write domain conflict: " | ||
318 | "obj %p target %d offset %d " | ||
319 | "new %08x old %08x\n", | ||
320 | obj, reloc->target_handle, | ||
321 | (int) reloc->offset, | ||
322 | reloc->write_domain, | ||
323 | target_obj->pending_write_domain); | ||
324 | return ret; | ||
325 | } | ||
326 | |||
327 | target_obj->pending_read_domains |= reloc->read_domains; | ||
328 | target_obj->pending_write_domain |= reloc->write_domain; | ||
329 | |||
330 | /* If the relocation already has the right value in it, no | ||
331 | * more work needs to be done. | ||
332 | */ | ||
333 | if (target_offset == reloc->presumed_offset) | ||
334 | return 0; | ||
335 | |||
336 | /* Check that the relocation address is valid... */ | ||
337 | if (unlikely(reloc->offset > obj->base.size - 4)) { | ||
338 | DRM_ERROR("Relocation beyond object bounds: " | ||
339 | "obj %p target %d offset %d size %d.\n", | ||
340 | obj, reloc->target_handle, | ||
341 | (int) reloc->offset, | ||
342 | (int) obj->base.size); | ||
343 | return ret; | ||
344 | } | ||
345 | if (unlikely(reloc->offset & 3)) { | ||
346 | DRM_ERROR("Relocation not 4-byte aligned: " | ||
347 | "obj %p target %d offset %d.\n", | ||
348 | obj, reloc->target_handle, | ||
349 | (int) reloc->offset); | ||
350 | return ret; | ||
351 | } | ||
352 | |||
353 | reloc->delta += target_offset; | ||
354 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { | ||
355 | uint32_t page_offset = reloc->offset & ~PAGE_MASK; | ||
356 | char *vaddr; | ||
357 | |||
358 | vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]); | ||
359 | *(uint32_t *)(vaddr + page_offset) = reloc->delta; | ||
360 | kunmap_atomic(vaddr); | ||
361 | } else { | ||
362 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
363 | uint32_t __iomem *reloc_entry; | ||
364 | void __iomem *reloc_page; | ||
365 | |||
366 | /* We can't wait for rendering with pagefaults disabled */ | ||
367 | if (obj->active && in_atomic()) | ||
368 | return -EFAULT; | ||
369 | |||
370 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | ||
371 | if (ret) | ||
372 | return ret; | ||
373 | |||
374 | /* Map the page containing the relocation we're going to perform. */ | ||
375 | reloc->offset += obj->gtt_offset; | ||
376 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | ||
377 | reloc->offset & PAGE_MASK); | ||
378 | reloc_entry = (uint32_t __iomem *) | ||
379 | (reloc_page + (reloc->offset & ~PAGE_MASK)); | ||
380 | iowrite32(reloc->delta, reloc_entry); | ||
381 | io_mapping_unmap_atomic(reloc_page); | ||
382 | } | ||
383 | |||
384 | /* and update the user's relocation entry */ | ||
385 | reloc->presumed_offset = target_offset; | ||
386 | |||
387 | return 0; | ||
388 | } | ||
389 | |||
390 | static int | ||
391 | i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, | ||
392 | struct eb_objects *eb) | ||
393 | { | ||
394 | struct drm_i915_gem_relocation_entry __user *user_relocs; | ||
395 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | ||
396 | int i, ret; | ||
397 | |||
398 | user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; | ||
399 | for (i = 0; i < entry->relocation_count; i++) { | ||
400 | struct drm_i915_gem_relocation_entry reloc; | ||
401 | |||
402 | if (__copy_from_user_inatomic(&reloc, | ||
403 | user_relocs+i, | ||
404 | sizeof(reloc))) | ||
405 | return -EFAULT; | ||
406 | |||
407 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc); | ||
408 | if (ret) | ||
409 | return ret; | ||
410 | |||
411 | if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, | ||
412 | &reloc.presumed_offset, | ||
413 | sizeof(reloc.presumed_offset))) | ||
414 | return -EFAULT; | ||
415 | } | ||
416 | |||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | static int | ||
421 | i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, | ||
422 | struct eb_objects *eb, | ||
423 | struct drm_i915_gem_relocation_entry *relocs) | ||
424 | { | ||
425 | const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | ||
426 | int i, ret; | ||
427 | |||
428 | for (i = 0; i < entry->relocation_count; i++) { | ||
429 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]); | ||
430 | if (ret) | ||
431 | return ret; | ||
432 | } | ||
433 | |||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | static int | ||
438 | i915_gem_execbuffer_relocate(struct drm_device *dev, | ||
439 | struct eb_objects *eb, | ||
440 | struct list_head *objects) | ||
441 | { | ||
442 | struct drm_i915_gem_object *obj; | ||
443 | int ret = 0; | ||
444 | |||
445 | /* This is the fast path and we cannot handle a pagefault whilst | ||
446 | * holding the struct mutex lest the user pass in the relocations | ||
447 | * contained within a mmaped bo. For in such a case we, the page | ||
448 | * fault handler would call i915_gem_fault() and we would try to | ||
449 | * acquire the struct mutex again. Obviously this is bad and so | ||
450 | * lockdep complains vehemently. | ||
451 | */ | ||
452 | pagefault_disable(); | ||
453 | list_for_each_entry(obj, objects, exec_list) { | ||
454 | ret = i915_gem_execbuffer_relocate_object(obj, eb); | ||
455 | if (ret) | ||
456 | break; | ||
457 | } | ||
458 | pagefault_enable(); | ||
459 | |||
460 | return ret; | ||
461 | } | ||
462 | |||
463 | static int | ||
464 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | ||
465 | struct drm_file *file, | ||
466 | struct list_head *objects) | ||
467 | { | ||
468 | struct drm_i915_gem_object *obj; | ||
469 | int ret, retry; | ||
470 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; | ||
471 | struct list_head ordered_objects; | ||
472 | |||
473 | INIT_LIST_HEAD(&ordered_objects); | ||
474 | while (!list_empty(objects)) { | ||
475 | struct drm_i915_gem_exec_object2 *entry; | ||
476 | bool need_fence, need_mappable; | ||
477 | |||
478 | obj = list_first_entry(objects, | ||
479 | struct drm_i915_gem_object, | ||
480 | exec_list); | ||
481 | entry = obj->exec_entry; | ||
482 | |||
483 | need_fence = | ||
484 | has_fenced_gpu_access && | ||
485 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
486 | obj->tiling_mode != I915_TILING_NONE; | ||
487 | need_mappable = | ||
488 | entry->relocation_count ? true : need_fence; | ||
489 | |||
490 | if (need_mappable) | ||
491 | list_move(&obj->exec_list, &ordered_objects); | ||
492 | else | ||
493 | list_move_tail(&obj->exec_list, &ordered_objects); | ||
494 | |||
495 | obj->base.pending_read_domains = 0; | ||
496 | obj->base.pending_write_domain = 0; | ||
497 | } | ||
498 | list_splice(&ordered_objects, objects); | ||
499 | |||
500 | /* Attempt to pin all of the buffers into the GTT. | ||
501 | * This is done in 3 phases: | ||
502 | * | ||
503 | * 1a. Unbind all objects that do not match the GTT constraints for | ||
504 | * the execbuffer (fenceable, mappable, alignment etc). | ||
505 | * 1b. Increment pin count for already bound objects. | ||
506 | * 2. Bind new objects. | ||
507 | * 3. Decrement pin count. | ||
508 | * | ||
509 | * This avoid unnecessary unbinding of later objects in order to makr | ||
510 | * room for the earlier objects *unless* we need to defragment. | ||
511 | */ | ||
512 | retry = 0; | ||
513 | do { | ||
514 | ret = 0; | ||
515 | |||
516 | /* Unbind any ill-fitting objects or pin. */ | ||
517 | list_for_each_entry(obj, objects, exec_list) { | ||
518 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | ||
519 | bool need_fence, need_mappable; | ||
520 | if (!obj->gtt_space) | ||
521 | continue; | ||
522 | |||
523 | need_fence = | ||
524 | has_fenced_gpu_access && | ||
525 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
526 | obj->tiling_mode != I915_TILING_NONE; | ||
527 | need_mappable = | ||
528 | entry->relocation_count ? true : need_fence; | ||
529 | |||
530 | if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || | ||
531 | (need_mappable && !obj->map_and_fenceable)) | ||
532 | ret = i915_gem_object_unbind(obj); | ||
533 | else | ||
534 | ret = i915_gem_object_pin(obj, | ||
535 | entry->alignment, | ||
536 | need_mappable); | ||
537 | if (ret) | ||
538 | goto err; | ||
539 | |||
540 | entry++; | ||
541 | } | ||
542 | |||
543 | /* Bind fresh objects */ | ||
544 | list_for_each_entry(obj, objects, exec_list) { | ||
545 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | ||
546 | bool need_fence; | ||
547 | |||
548 | need_fence = | ||
549 | has_fenced_gpu_access && | ||
550 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
551 | obj->tiling_mode != I915_TILING_NONE; | ||
552 | |||
553 | if (!obj->gtt_space) { | ||
554 | bool need_mappable = | ||
555 | entry->relocation_count ? true : need_fence; | ||
556 | |||
557 | ret = i915_gem_object_pin(obj, | ||
558 | entry->alignment, | ||
559 | need_mappable); | ||
560 | if (ret) | ||
561 | break; | ||
562 | } | ||
563 | |||
564 | if (has_fenced_gpu_access) { | ||
565 | if (need_fence) { | ||
566 | ret = i915_gem_object_get_fence(obj, ring); | ||
567 | if (ret) | ||
568 | break; | ||
569 | } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
570 | obj->tiling_mode == I915_TILING_NONE) { | ||
571 | /* XXX pipelined! */ | ||
572 | ret = i915_gem_object_put_fence(obj); | ||
573 | if (ret) | ||
574 | break; | ||
575 | } | ||
576 | obj->pending_fenced_gpu_access = need_fence; | ||
577 | } | ||
578 | |||
579 | entry->offset = obj->gtt_offset; | ||
580 | } | ||
581 | |||
582 | /* Decrement pin count for bound objects */ | ||
583 | list_for_each_entry(obj, objects, exec_list) { | ||
584 | if (obj->gtt_space) | ||
585 | i915_gem_object_unpin(obj); | ||
586 | } | ||
587 | |||
588 | if (ret != -ENOSPC || retry > 1) | ||
589 | return ret; | ||
590 | |||
591 | /* First attempt, just clear anything that is purgeable. | ||
592 | * Second attempt, clear the entire GTT. | ||
593 | */ | ||
594 | ret = i915_gem_evict_everything(ring->dev, retry == 0); | ||
595 | if (ret) | ||
596 | return ret; | ||
597 | |||
598 | retry++; | ||
599 | } while (1); | ||
600 | |||
601 | err: | ||
602 | obj = list_entry(obj->exec_list.prev, | ||
603 | struct drm_i915_gem_object, | ||
604 | exec_list); | ||
605 | while (objects != &obj->exec_list) { | ||
606 | if (obj->gtt_space) | ||
607 | i915_gem_object_unpin(obj); | ||
608 | |||
609 | obj = list_entry(obj->exec_list.prev, | ||
610 | struct drm_i915_gem_object, | ||
611 | exec_list); | ||
612 | } | ||
613 | |||
614 | return ret; | ||
615 | } | ||
616 | |||
617 | static int | ||
618 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | ||
619 | struct drm_file *file, | ||
620 | struct intel_ring_buffer *ring, | ||
621 | struct list_head *objects, | ||
622 | struct eb_objects *eb, | ||
623 | struct drm_i915_gem_exec_object2 *exec, | ||
624 | int count) | ||
625 | { | ||
626 | struct drm_i915_gem_relocation_entry *reloc; | ||
627 | struct drm_i915_gem_object *obj; | ||
628 | int *reloc_offset; | ||
629 | int i, total, ret; | ||
630 | |||
631 | /* We may process another execbuffer during the unlock... */ | ||
632 | while (!list_empty(objects)) { | ||
633 | obj = list_first_entry(objects, | ||
634 | struct drm_i915_gem_object, | ||
635 | exec_list); | ||
636 | list_del_init(&obj->exec_list); | ||
637 | drm_gem_object_unreference(&obj->base); | ||
638 | } | ||
639 | |||
640 | mutex_unlock(&dev->struct_mutex); | ||
641 | |||
642 | total = 0; | ||
643 | for (i = 0; i < count; i++) | ||
644 | total += exec[i].relocation_count; | ||
645 | |||
646 | reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset)); | ||
647 | reloc = drm_malloc_ab(total, sizeof(*reloc)); | ||
648 | if (reloc == NULL || reloc_offset == NULL) { | ||
649 | drm_free_large(reloc); | ||
650 | drm_free_large(reloc_offset); | ||
651 | mutex_lock(&dev->struct_mutex); | ||
652 | return -ENOMEM; | ||
653 | } | ||
654 | |||
655 | total = 0; | ||
656 | for (i = 0; i < count; i++) { | ||
657 | struct drm_i915_gem_relocation_entry __user *user_relocs; | ||
658 | |||
659 | user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; | ||
660 | |||
661 | if (copy_from_user(reloc+total, user_relocs, | ||
662 | exec[i].relocation_count * sizeof(*reloc))) { | ||
663 | ret = -EFAULT; | ||
664 | mutex_lock(&dev->struct_mutex); | ||
665 | goto err; | ||
666 | } | ||
667 | |||
668 | reloc_offset[i] = total; | ||
669 | total += exec[i].relocation_count; | ||
670 | } | ||
671 | |||
672 | ret = i915_mutex_lock_interruptible(dev); | ||
673 | if (ret) { | ||
674 | mutex_lock(&dev->struct_mutex); | ||
675 | goto err; | ||
676 | } | ||
677 | |||
678 | /* reacquire the objects */ | ||
679 | eb_reset(eb); | ||
680 | for (i = 0; i < count; i++) { | ||
681 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, | ||
682 | exec[i].handle)); | ||
683 | if (&obj->base == NULL) { | ||
684 | DRM_ERROR("Invalid object handle %d at index %d\n", | ||
685 | exec[i].handle, i); | ||
686 | ret = -ENOENT; | ||
687 | goto err; | ||
688 | } | ||
689 | |||
690 | list_add_tail(&obj->exec_list, objects); | ||
691 | obj->exec_handle = exec[i].handle; | ||
692 | obj->exec_entry = &exec[i]; | ||
693 | eb_add_object(eb, obj); | ||
694 | } | ||
695 | |||
696 | ret = i915_gem_execbuffer_reserve(ring, file, objects); | ||
697 | if (ret) | ||
698 | goto err; | ||
699 | |||
700 | list_for_each_entry(obj, objects, exec_list) { | ||
701 | int offset = obj->exec_entry - exec; | ||
702 | ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, | ||
703 | reloc + reloc_offset[offset]); | ||
704 | if (ret) | ||
705 | goto err; | ||
706 | } | ||
707 | |||
708 | /* Leave the user relocations as are, this is the painfully slow path, | ||
709 | * and we want to avoid the complication of dropping the lock whilst | ||
710 | * having buffers reserved in the aperture and so causing spurious | ||
711 | * ENOSPC for random operations. | ||
712 | */ | ||
713 | |||
714 | err: | ||
715 | drm_free_large(reloc); | ||
716 | drm_free_large(reloc_offset); | ||
717 | return ret; | ||
718 | } | ||
719 | |||
720 | static int | ||
721 | i915_gem_execbuffer_flush(struct drm_device *dev, | ||
722 | uint32_t invalidate_domains, | ||
723 | uint32_t flush_domains, | ||
724 | uint32_t flush_rings) | ||
725 | { | ||
726 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
727 | int i, ret; | ||
728 | |||
729 | if (flush_domains & I915_GEM_DOMAIN_CPU) | ||
730 | intel_gtt_chipset_flush(); | ||
731 | |||
732 | if (flush_domains & I915_GEM_DOMAIN_GTT) | ||
733 | wmb(); | ||
734 | |||
735 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { | ||
736 | for (i = 0; i < I915_NUM_RINGS; i++) | ||
737 | if (flush_rings & (1 << i)) { | ||
738 | ret = i915_gem_flush_ring(&dev_priv->ring[i], | ||
739 | invalidate_domains, | ||
740 | flush_domains); | ||
741 | if (ret) | ||
742 | return ret; | ||
743 | } | ||
744 | } | ||
745 | |||
746 | return 0; | ||
747 | } | ||
748 | |||
749 | static int | ||
750 | i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, | ||
751 | struct intel_ring_buffer *to) | ||
752 | { | ||
753 | struct intel_ring_buffer *from = obj->ring; | ||
754 | u32 seqno; | ||
755 | int ret, idx; | ||
756 | |||
757 | if (from == NULL || to == from) | ||
758 | return 0; | ||
759 | |||
760 | /* XXX gpu semaphores are implicated in various hard hangs on SNB */ | ||
761 | if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores) | ||
762 | return i915_gem_object_wait_rendering(obj); | ||
763 | |||
764 | idx = intel_ring_sync_index(from, to); | ||
765 | |||
766 | seqno = obj->last_rendering_seqno; | ||
767 | if (seqno <= from->sync_seqno[idx]) | ||
768 | return 0; | ||
769 | |||
770 | if (seqno == from->outstanding_lazy_request) { | ||
771 | struct drm_i915_gem_request *request; | ||
772 | |||
773 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
774 | if (request == NULL) | ||
775 | return -ENOMEM; | ||
776 | |||
777 | ret = i915_add_request(from, NULL, request); | ||
778 | if (ret) { | ||
779 | kfree(request); | ||
780 | return ret; | ||
781 | } | ||
782 | |||
783 | seqno = request->seqno; | ||
784 | } | ||
785 | |||
786 | from->sync_seqno[idx] = seqno; | ||
787 | return intel_ring_sync(to, from, seqno - 1); | ||
788 | } | ||
789 | |||
790 | static int | ||
791 | i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) | ||
792 | { | ||
793 | u32 plane, flip_mask; | ||
794 | int ret; | ||
795 | |||
796 | /* Check for any pending flips. As we only maintain a flip queue depth | ||
797 | * of 1, we can simply insert a WAIT for the next display flip prior | ||
798 | * to executing the batch and avoid stalling the CPU. | ||
799 | */ | ||
800 | |||
801 | for (plane = 0; flips >> plane; plane++) { | ||
802 | if (((flips >> plane) & 1) == 0) | ||
803 | continue; | ||
804 | |||
805 | if (plane) | ||
806 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | ||
807 | else | ||
808 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | ||
809 | |||
810 | ret = intel_ring_begin(ring, 2); | ||
811 | if (ret) | ||
812 | return ret; | ||
813 | |||
814 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); | ||
815 | intel_ring_emit(ring, MI_NOOP); | ||
816 | intel_ring_advance(ring); | ||
817 | } | ||
818 | |||
819 | return 0; | ||
820 | } | ||
821 | |||
822 | |||
823 | static int | ||
824 | i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, | ||
825 | struct list_head *objects) | ||
826 | { | ||
827 | struct drm_i915_gem_object *obj; | ||
828 | struct change_domains cd; | ||
829 | int ret; | ||
830 | |||
831 | memset(&cd, 0, sizeof(cd)); | ||
832 | list_for_each_entry(obj, objects, exec_list) | ||
833 | i915_gem_object_set_to_gpu_domain(obj, ring, &cd); | ||
834 | |||
835 | if (cd.invalidate_domains | cd.flush_domains) { | ||
836 | ret = i915_gem_execbuffer_flush(ring->dev, | ||
837 | cd.invalidate_domains, | ||
838 | cd.flush_domains, | ||
839 | cd.flush_rings); | ||
840 | if (ret) | ||
841 | return ret; | ||
842 | } | ||
843 | |||
844 | if (cd.flips) { | ||
845 | ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips); | ||
846 | if (ret) | ||
847 | return ret; | ||
848 | } | ||
849 | |||
850 | list_for_each_entry(obj, objects, exec_list) { | ||
851 | ret = i915_gem_execbuffer_sync_rings(obj, ring); | ||
852 | if (ret) | ||
853 | return ret; | ||
854 | } | ||
855 | |||
856 | return 0; | ||
857 | } | ||
858 | |||
859 | static bool | ||
860 | i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) | ||
861 | { | ||
862 | return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; | ||
863 | } | ||
864 | |||
865 | static int | ||
866 | validate_exec_list(struct drm_i915_gem_exec_object2 *exec, | ||
867 | int count) | ||
868 | { | ||
869 | int i; | ||
870 | |||
871 | for (i = 0; i < count; i++) { | ||
872 | char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; | ||
873 | int length; /* limited by fault_in_pages_readable() */ | ||
874 | |||
875 | /* First check for malicious input causing overflow */ | ||
876 | if (exec[i].relocation_count > | ||
877 | INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) | ||
878 | return -EINVAL; | ||
879 | |||
880 | length = exec[i].relocation_count * | ||
881 | sizeof(struct drm_i915_gem_relocation_entry); | ||
882 | if (!access_ok(VERIFY_READ, ptr, length)) | ||
883 | return -EFAULT; | ||
884 | |||
885 | /* we may also need to update the presumed offsets */ | ||
886 | if (!access_ok(VERIFY_WRITE, ptr, length)) | ||
887 | return -EFAULT; | ||
888 | |||
889 | if (fault_in_pages_readable(ptr, length)) | ||
890 | return -EFAULT; | ||
891 | } | ||
892 | |||
893 | return 0; | ||
894 | } | ||
895 | |||
896 | static void | ||
897 | i915_gem_execbuffer_move_to_active(struct list_head *objects, | ||
898 | struct intel_ring_buffer *ring, | ||
899 | u32 seqno) | ||
900 | { | ||
901 | struct drm_i915_gem_object *obj; | ||
902 | |||
903 | list_for_each_entry(obj, objects, exec_list) { | ||
904 | u32 old_read = obj->base.read_domains; | ||
905 | u32 old_write = obj->base.write_domain; | ||
906 | |||
907 | |||
908 | obj->base.read_domains = obj->base.pending_read_domains; | ||
909 | obj->base.write_domain = obj->base.pending_write_domain; | ||
910 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; | ||
911 | |||
912 | i915_gem_object_move_to_active(obj, ring, seqno); | ||
913 | if (obj->base.write_domain) { | ||
914 | obj->dirty = 1; | ||
915 | obj->pending_gpu_write = true; | ||
916 | list_move_tail(&obj->gpu_write_list, | ||
917 | &ring->gpu_write_list); | ||
918 | intel_mark_busy(ring->dev, obj); | ||
919 | } | ||
920 | |||
921 | trace_i915_gem_object_change_domain(obj, old_read, old_write); | ||
922 | } | ||
923 | } | ||
924 | |||
925 | static void | ||
926 | i915_gem_execbuffer_retire_commands(struct drm_device *dev, | ||
927 | struct drm_file *file, | ||
928 | struct intel_ring_buffer *ring) | ||
929 | { | ||
930 | struct drm_i915_gem_request *request; | ||
931 | u32 invalidate; | ||
932 | |||
933 | /* | ||
934 | * Ensure that the commands in the batch buffer are | ||
935 | * finished before the interrupt fires. | ||
936 | * | ||
937 | * The sampler always gets flushed on i965 (sigh). | ||
938 | */ | ||
939 | invalidate = I915_GEM_DOMAIN_COMMAND; | ||
940 | if (INTEL_INFO(dev)->gen >= 4) | ||
941 | invalidate |= I915_GEM_DOMAIN_SAMPLER; | ||
942 | if (ring->flush(ring, invalidate, 0)) { | ||
943 | i915_gem_next_request_seqno(ring); | ||
944 | return; | ||
945 | } | ||
946 | |||
947 | /* Add a breadcrumb for the completion of the batch buffer */ | ||
948 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
949 | if (request == NULL || i915_add_request(ring, file, request)) { | ||
950 | i915_gem_next_request_seqno(ring); | ||
951 | kfree(request); | ||
952 | } | ||
953 | } | ||
954 | |||
955 | static int | ||
956 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, | ||
957 | struct drm_file *file, | ||
958 | struct drm_i915_gem_execbuffer2 *args, | ||
959 | struct drm_i915_gem_exec_object2 *exec) | ||
960 | { | ||
961 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
962 | struct list_head objects; | ||
963 | struct eb_objects *eb; | ||
964 | struct drm_i915_gem_object *batch_obj; | ||
965 | struct drm_clip_rect *cliprects = NULL; | ||
966 | struct intel_ring_buffer *ring; | ||
967 | u32 exec_start, exec_len; | ||
968 | u32 seqno; | ||
969 | int ret, mode, i; | ||
970 | |||
971 | if (!i915_gem_check_execbuffer(args)) { | ||
972 | DRM_ERROR("execbuf with invalid offset/length\n"); | ||
973 | return -EINVAL; | ||
974 | } | ||
975 | |||
976 | ret = validate_exec_list(exec, args->buffer_count); | ||
977 | if (ret) | ||
978 | return ret; | ||
979 | |||
980 | switch (args->flags & I915_EXEC_RING_MASK) { | ||
981 | case I915_EXEC_DEFAULT: | ||
982 | case I915_EXEC_RENDER: | ||
983 | ring = &dev_priv->ring[RCS]; | ||
984 | break; | ||
985 | case I915_EXEC_BSD: | ||
986 | if (!HAS_BSD(dev)) { | ||
987 | DRM_ERROR("execbuf with invalid ring (BSD)\n"); | ||
988 | return -EINVAL; | ||
989 | } | ||
990 | ring = &dev_priv->ring[VCS]; | ||
991 | break; | ||
992 | case I915_EXEC_BLT: | ||
993 | if (!HAS_BLT(dev)) { | ||
994 | DRM_ERROR("execbuf with invalid ring (BLT)\n"); | ||
995 | return -EINVAL; | ||
996 | } | ||
997 | ring = &dev_priv->ring[BCS]; | ||
998 | break; | ||
999 | default: | ||
1000 | DRM_ERROR("execbuf with unknown ring: %d\n", | ||
1001 | (int)(args->flags & I915_EXEC_RING_MASK)); | ||
1002 | return -EINVAL; | ||
1003 | } | ||
1004 | |||
1005 | mode = args->flags & I915_EXEC_CONSTANTS_MASK; | ||
1006 | switch (mode) { | ||
1007 | case I915_EXEC_CONSTANTS_REL_GENERAL: | ||
1008 | case I915_EXEC_CONSTANTS_ABSOLUTE: | ||
1009 | case I915_EXEC_CONSTANTS_REL_SURFACE: | ||
1010 | if (ring == &dev_priv->ring[RCS] && | ||
1011 | mode != dev_priv->relative_constants_mode) { | ||
1012 | if (INTEL_INFO(dev)->gen < 4) | ||
1013 | return -EINVAL; | ||
1014 | |||
1015 | if (INTEL_INFO(dev)->gen > 5 && | ||
1016 | mode == I915_EXEC_CONSTANTS_REL_SURFACE) | ||
1017 | return -EINVAL; | ||
1018 | |||
1019 | ret = intel_ring_begin(ring, 4); | ||
1020 | if (ret) | ||
1021 | return ret; | ||
1022 | |||
1023 | intel_ring_emit(ring, MI_NOOP); | ||
1024 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); | ||
1025 | intel_ring_emit(ring, INSTPM); | ||
1026 | intel_ring_emit(ring, | ||
1027 | I915_EXEC_CONSTANTS_MASK << 16 | mode); | ||
1028 | intel_ring_advance(ring); | ||
1029 | |||
1030 | dev_priv->relative_constants_mode = mode; | ||
1031 | } | ||
1032 | break; | ||
1033 | default: | ||
1034 | DRM_ERROR("execbuf with unknown constants: %d\n", mode); | ||
1035 | return -EINVAL; | ||
1036 | } | ||
1037 | |||
1038 | if (args->buffer_count < 1) { | ||
1039 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | ||
1040 | return -EINVAL; | ||
1041 | } | ||
1042 | |||
1043 | if (args->num_cliprects != 0) { | ||
1044 | if (ring != &dev_priv->ring[RCS]) { | ||
1045 | DRM_ERROR("clip rectangles are only valid with the render ring\n"); | ||
1046 | return -EINVAL; | ||
1047 | } | ||
1048 | |||
1049 | cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), | ||
1050 | GFP_KERNEL); | ||
1051 | if (cliprects == NULL) { | ||
1052 | ret = -ENOMEM; | ||
1053 | goto pre_mutex_err; | ||
1054 | } | ||
1055 | |||
1056 | if (copy_from_user(cliprects, | ||
1057 | (struct drm_clip_rect __user *)(uintptr_t) | ||
1058 | args->cliprects_ptr, | ||
1059 | sizeof(*cliprects)*args->num_cliprects)) { | ||
1060 | ret = -EFAULT; | ||
1061 | goto pre_mutex_err; | ||
1062 | } | ||
1063 | } | ||
1064 | |||
1065 | ret = i915_mutex_lock_interruptible(dev); | ||
1066 | if (ret) | ||
1067 | goto pre_mutex_err; | ||
1068 | |||
1069 | if (dev_priv->mm.suspended) { | ||
1070 | mutex_unlock(&dev->struct_mutex); | ||
1071 | ret = -EBUSY; | ||
1072 | goto pre_mutex_err; | ||
1073 | } | ||
1074 | |||
1075 | eb = eb_create(args->buffer_count); | ||
1076 | if (eb == NULL) { | ||
1077 | mutex_unlock(&dev->struct_mutex); | ||
1078 | ret = -ENOMEM; | ||
1079 | goto pre_mutex_err; | ||
1080 | } | ||
1081 | |||
1082 | /* Look up object handles */ | ||
1083 | INIT_LIST_HEAD(&objects); | ||
1084 | for (i = 0; i < args->buffer_count; i++) { | ||
1085 | struct drm_i915_gem_object *obj; | ||
1086 | |||
1087 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, | ||
1088 | exec[i].handle)); | ||
1089 | if (&obj->base == NULL) { | ||
1090 | DRM_ERROR("Invalid object handle %d at index %d\n", | ||
1091 | exec[i].handle, i); | ||
1092 | /* prevent error path from reading uninitialized data */ | ||
1093 | ret = -ENOENT; | ||
1094 | goto err; | ||
1095 | } | ||
1096 | |||
1097 | if (!list_empty(&obj->exec_list)) { | ||
1098 | DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n", | ||
1099 | obj, exec[i].handle, i); | ||
1100 | ret = -EINVAL; | ||
1101 | goto err; | ||
1102 | } | ||
1103 | |||
1104 | list_add_tail(&obj->exec_list, &objects); | ||
1105 | obj->exec_handle = exec[i].handle; | ||
1106 | obj->exec_entry = &exec[i]; | ||
1107 | eb_add_object(eb, obj); | ||
1108 | } | ||
1109 | |||
1110 | /* take note of the batch buffer before we might reorder the lists */ | ||
1111 | batch_obj = list_entry(objects.prev, | ||
1112 | struct drm_i915_gem_object, | ||
1113 | exec_list); | ||
1114 | |||
1115 | /* Move the objects en-masse into the GTT, evicting if necessary. */ | ||
1116 | ret = i915_gem_execbuffer_reserve(ring, file, &objects); | ||
1117 | if (ret) | ||
1118 | goto err; | ||
1119 | |||
1120 | /* The objects are in their final locations, apply the relocations. */ | ||
1121 | ret = i915_gem_execbuffer_relocate(dev, eb, &objects); | ||
1122 | if (ret) { | ||
1123 | if (ret == -EFAULT) { | ||
1124 | ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, | ||
1125 | &objects, eb, | ||
1126 | exec, | ||
1127 | args->buffer_count); | ||
1128 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
1129 | } | ||
1130 | if (ret) | ||
1131 | goto err; | ||
1132 | } | ||
1133 | |||
1134 | /* Set the pending read domains for the batch buffer to COMMAND */ | ||
1135 | if (batch_obj->base.pending_write_domain) { | ||
1136 | DRM_ERROR("Attempting to use self-modifying batch buffer\n"); | ||
1137 | ret = -EINVAL; | ||
1138 | goto err; | ||
1139 | } | ||
1140 | batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; | ||
1141 | |||
1142 | ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); | ||
1143 | if (ret) | ||
1144 | goto err; | ||
1145 | |||
1146 | seqno = i915_gem_next_request_seqno(ring); | ||
1147 | for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) { | ||
1148 | if (seqno < ring->sync_seqno[i]) { | ||
1149 | /* The GPU can not handle its semaphore value wrapping, | ||
1150 | * so every billion or so execbuffers, we need to stall | ||
1151 | * the GPU in order to reset the counters. | ||
1152 | */ | ||
1153 | ret = i915_gpu_idle(dev); | ||
1154 | if (ret) | ||
1155 | goto err; | ||
1156 | |||
1157 | BUG_ON(ring->sync_seqno[i]); | ||
1158 | } | ||
1159 | } | ||
1160 | |||
1161 | trace_i915_gem_ring_dispatch(ring, seqno); | ||
1162 | |||
1163 | exec_start = batch_obj->gtt_offset + args->batch_start_offset; | ||
1164 | exec_len = args->batch_len; | ||
1165 | if (cliprects) { | ||
1166 | for (i = 0; i < args->num_cliprects; i++) { | ||
1167 | ret = i915_emit_box(dev, &cliprects[i], | ||
1168 | args->DR1, args->DR4); | ||
1169 | if (ret) | ||
1170 | goto err; | ||
1171 | |||
1172 | ret = ring->dispatch_execbuffer(ring, | ||
1173 | exec_start, exec_len); | ||
1174 | if (ret) | ||
1175 | goto err; | ||
1176 | } | ||
1177 | } else { | ||
1178 | ret = ring->dispatch_execbuffer(ring, exec_start, exec_len); | ||
1179 | if (ret) | ||
1180 | goto err; | ||
1181 | } | ||
1182 | |||
1183 | i915_gem_execbuffer_move_to_active(&objects, ring, seqno); | ||
1184 | i915_gem_execbuffer_retire_commands(dev, file, ring); | ||
1185 | |||
1186 | err: | ||
1187 | eb_destroy(eb); | ||
1188 | while (!list_empty(&objects)) { | ||
1189 | struct drm_i915_gem_object *obj; | ||
1190 | |||
1191 | obj = list_first_entry(&objects, | ||
1192 | struct drm_i915_gem_object, | ||
1193 | exec_list); | ||
1194 | list_del_init(&obj->exec_list); | ||
1195 | drm_gem_object_unreference(&obj->base); | ||
1196 | } | ||
1197 | |||
1198 | mutex_unlock(&dev->struct_mutex); | ||
1199 | |||
1200 | pre_mutex_err: | ||
1201 | kfree(cliprects); | ||
1202 | return ret; | ||
1203 | } | ||
1204 | |||
1205 | /* | ||
1206 | * Legacy execbuffer just creates an exec2 list from the original exec object | ||
1207 | * list array and passes it to the real function. | ||
1208 | */ | ||
1209 | int | ||
1210 | i915_gem_execbuffer(struct drm_device *dev, void *data, | ||
1211 | struct drm_file *file) | ||
1212 | { | ||
1213 | struct drm_i915_gem_execbuffer *args = data; | ||
1214 | struct drm_i915_gem_execbuffer2 exec2; | ||
1215 | struct drm_i915_gem_exec_object *exec_list = NULL; | ||
1216 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
1217 | int ret, i; | ||
1218 | |||
1219 | if (args->buffer_count < 1) { | ||
1220 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | ||
1221 | return -EINVAL; | ||
1222 | } | ||
1223 | |||
1224 | /* Copy in the exec list from userland */ | ||
1225 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | ||
1226 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | ||
1227 | if (exec_list == NULL || exec2_list == NULL) { | ||
1228 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | ||
1229 | args->buffer_count); | ||
1230 | drm_free_large(exec_list); | ||
1231 | drm_free_large(exec2_list); | ||
1232 | return -ENOMEM; | ||
1233 | } | ||
1234 | ret = copy_from_user(exec_list, | ||
1235 | (struct drm_i915_relocation_entry __user *) | ||
1236 | (uintptr_t) args->buffers_ptr, | ||
1237 | sizeof(*exec_list) * args->buffer_count); | ||
1238 | if (ret != 0) { | ||
1239 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
1240 | args->buffer_count, ret); | ||
1241 | drm_free_large(exec_list); | ||
1242 | drm_free_large(exec2_list); | ||
1243 | return -EFAULT; | ||
1244 | } | ||
1245 | |||
1246 | for (i = 0; i < args->buffer_count; i++) { | ||
1247 | exec2_list[i].handle = exec_list[i].handle; | ||
1248 | exec2_list[i].relocation_count = exec_list[i].relocation_count; | ||
1249 | exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; | ||
1250 | exec2_list[i].alignment = exec_list[i].alignment; | ||
1251 | exec2_list[i].offset = exec_list[i].offset; | ||
1252 | if (INTEL_INFO(dev)->gen < 4) | ||
1253 | exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; | ||
1254 | else | ||
1255 | exec2_list[i].flags = 0; | ||
1256 | } | ||
1257 | |||
1258 | exec2.buffers_ptr = args->buffers_ptr; | ||
1259 | exec2.buffer_count = args->buffer_count; | ||
1260 | exec2.batch_start_offset = args->batch_start_offset; | ||
1261 | exec2.batch_len = args->batch_len; | ||
1262 | exec2.DR1 = args->DR1; | ||
1263 | exec2.DR4 = args->DR4; | ||
1264 | exec2.num_cliprects = args->num_cliprects; | ||
1265 | exec2.cliprects_ptr = args->cliprects_ptr; | ||
1266 | exec2.flags = I915_EXEC_RENDER; | ||
1267 | |||
1268 | ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); | ||
1269 | if (!ret) { | ||
1270 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
1271 | for (i = 0; i < args->buffer_count; i++) | ||
1272 | exec_list[i].offset = exec2_list[i].offset; | ||
1273 | /* ... and back out to userspace */ | ||
1274 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
1275 | (uintptr_t) args->buffers_ptr, | ||
1276 | exec_list, | ||
1277 | sizeof(*exec_list) * args->buffer_count); | ||
1278 | if (ret) { | ||
1279 | ret = -EFAULT; | ||
1280 | DRM_ERROR("failed to copy %d exec entries " | ||
1281 | "back to user (%d)\n", | ||
1282 | args->buffer_count, ret); | ||
1283 | } | ||
1284 | } | ||
1285 | |||
1286 | drm_free_large(exec_list); | ||
1287 | drm_free_large(exec2_list); | ||
1288 | return ret; | ||
1289 | } | ||
1290 | |||
1291 | int | ||
1292 | i915_gem_execbuffer2(struct drm_device *dev, void *data, | ||
1293 | struct drm_file *file) | ||
1294 | { | ||
1295 | struct drm_i915_gem_execbuffer2 *args = data; | ||
1296 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
1297 | int ret; | ||
1298 | |||
1299 | if (args->buffer_count < 1) { | ||
1300 | DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); | ||
1301 | return -EINVAL; | ||
1302 | } | ||
1303 | |||
1304 | exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, | ||
1305 | GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); | ||
1306 | if (exec2_list == NULL) | ||
1307 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), | ||
1308 | args->buffer_count); | ||
1309 | if (exec2_list == NULL) { | ||
1310 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | ||
1311 | args->buffer_count); | ||
1312 | return -ENOMEM; | ||
1313 | } | ||
1314 | ret = copy_from_user(exec2_list, | ||
1315 | (struct drm_i915_relocation_entry __user *) | ||
1316 | (uintptr_t) args->buffers_ptr, | ||
1317 | sizeof(*exec2_list) * args->buffer_count); | ||
1318 | if (ret != 0) { | ||
1319 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
1320 | args->buffer_count, ret); | ||
1321 | drm_free_large(exec2_list); | ||
1322 | return -EFAULT; | ||
1323 | } | ||
1324 | |||
1325 | ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); | ||
1326 | if (!ret) { | ||
1327 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
1328 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
1329 | (uintptr_t) args->buffers_ptr, | ||
1330 | exec2_list, | ||
1331 | sizeof(*exec2_list) * args->buffer_count); | ||
1332 | if (ret) { | ||
1333 | ret = -EFAULT; | ||
1334 | DRM_ERROR("failed to copy %d exec entries " | ||
1335 | "back to user (%d)\n", | ||
1336 | args->buffer_count, ret); | ||
1337 | } | ||
1338 | } | ||
1339 | |||
1340 | drm_free_large(exec2_list); | ||
1341 | return ret; | ||
1342 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c new file mode 100644 index 000000000000..e46b645773cf --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * Copyright © 2010 Daniel Vetter | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "drm.h" | ||
27 | #include "i915_drm.h" | ||
28 | #include "i915_drv.h" | ||
29 | #include "i915_trace.h" | ||
30 | #include "intel_drv.h" | ||
31 | |||
32 | /* XXX kill agp_type! */ | ||
33 | static unsigned int cache_level_to_agp_type(struct drm_device *dev, | ||
34 | enum i915_cache_level cache_level) | ||
35 | { | ||
36 | switch (cache_level) { | ||
37 | case I915_CACHE_LLC_MLC: | ||
38 | if (INTEL_INFO(dev)->gen >= 6) | ||
39 | return AGP_USER_CACHED_MEMORY_LLC_MLC; | ||
40 | /* Older chipsets do not have this extra level of CPU | ||
41 | * cacheing, so fallthrough and request the PTE simply | ||
42 | * as cached. | ||
43 | */ | ||
44 | case I915_CACHE_LLC: | ||
45 | return AGP_USER_CACHED_MEMORY; | ||
46 | default: | ||
47 | case I915_CACHE_NONE: | ||
48 | return AGP_USER_MEMORY; | ||
49 | } | ||
50 | } | ||
51 | |||
52 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) | ||
53 | { | ||
54 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
55 | struct drm_i915_gem_object *obj; | ||
56 | |||
57 | /* First fill our portion of the GTT with scratch pages */ | ||
58 | intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, | ||
59 | (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); | ||
60 | |||
61 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { | ||
62 | unsigned int agp_type = | ||
63 | cache_level_to_agp_type(dev, obj->cache_level); | ||
64 | |||
65 | i915_gem_clflush_object(obj); | ||
66 | |||
67 | if (dev_priv->mm.gtt->needs_dmar) { | ||
68 | BUG_ON(!obj->sg_list); | ||
69 | |||
70 | intel_gtt_insert_sg_entries(obj->sg_list, | ||
71 | obj->num_sg, | ||
72 | obj->gtt_space->start >> PAGE_SHIFT, | ||
73 | agp_type); | ||
74 | } else | ||
75 | intel_gtt_insert_pages(obj->gtt_space->start | ||
76 | >> PAGE_SHIFT, | ||
77 | obj->base.size >> PAGE_SHIFT, | ||
78 | obj->pages, | ||
79 | agp_type); | ||
80 | } | ||
81 | |||
82 | intel_gtt_chipset_flush(); | ||
83 | } | ||
84 | |||
85 | int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) | ||
86 | { | ||
87 | struct drm_device *dev = obj->base.dev; | ||
88 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
89 | unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level); | ||
90 | int ret; | ||
91 | |||
92 | if (dev_priv->mm.gtt->needs_dmar) { | ||
93 | ret = intel_gtt_map_memory(obj->pages, | ||
94 | obj->base.size >> PAGE_SHIFT, | ||
95 | &obj->sg_list, | ||
96 | &obj->num_sg); | ||
97 | if (ret != 0) | ||
98 | return ret; | ||
99 | |||
100 | intel_gtt_insert_sg_entries(obj->sg_list, | ||
101 | obj->num_sg, | ||
102 | obj->gtt_space->start >> PAGE_SHIFT, | ||
103 | agp_type); | ||
104 | } else | ||
105 | intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, | ||
106 | obj->base.size >> PAGE_SHIFT, | ||
107 | obj->pages, | ||
108 | agp_type); | ||
109 | |||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) | ||
114 | { | ||
115 | intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, | ||
116 | obj->base.size >> PAGE_SHIFT); | ||
117 | |||
118 | if (obj->sg_list) { | ||
119 | intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); | ||
120 | obj->sg_list = NULL; | ||
121 | } | ||
122 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 710eca70b323..99c4faa59d8f 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -92,13 +92,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
92 | uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | 92 | uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; |
93 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | 93 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; |
94 | 94 | ||
95 | if (IS_IRONLAKE(dev) || IS_GEN6(dev)) { | 95 | if (INTEL_INFO(dev)->gen >= 5) { |
96 | /* On Ironlake whatever DRAM config, GPU always do | 96 | /* On Ironlake whatever DRAM config, GPU always do |
97 | * same swizzling setup. | 97 | * same swizzling setup. |
98 | */ | 98 | */ |
99 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | 99 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
100 | swizzle_y = I915_BIT_6_SWIZZLE_9; | 100 | swizzle_y = I915_BIT_6_SWIZZLE_9; |
101 | } else if (!IS_I9XX(dev)) { | 101 | } else if (IS_GEN2(dev)) { |
102 | /* As far as we know, the 865 doesn't have these bit 6 | 102 | /* As far as we know, the 865 doesn't have these bit 6 |
103 | * swizzling issues. | 103 | * swizzling issues. |
104 | */ | 104 | */ |
@@ -181,7 +181,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
181 | } | 181 | } |
182 | 182 | ||
183 | /* Check pitch constriants for all chips & tiling formats */ | 183 | /* Check pitch constriants for all chips & tiling formats */ |
184 | bool | 184 | static bool |
185 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | 185 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) |
186 | { | 186 | { |
187 | int tile_width; | 187 | int tile_width; |
@@ -190,19 +190,19 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | |||
190 | if (tiling_mode == I915_TILING_NONE) | 190 | if (tiling_mode == I915_TILING_NONE) |
191 | return true; | 191 | return true; |
192 | 192 | ||
193 | if (!IS_I9XX(dev) || | 193 | if (IS_GEN2(dev) || |
194 | (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) | 194 | (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) |
195 | tile_width = 128; | 195 | tile_width = 128; |
196 | else | 196 | else |
197 | tile_width = 512; | 197 | tile_width = 512; |
198 | 198 | ||
199 | /* check maximum stride & object size */ | 199 | /* check maximum stride & object size */ |
200 | if (IS_I965G(dev)) { | 200 | if (INTEL_INFO(dev)->gen >= 4) { |
201 | /* i965 stores the end address of the gtt mapping in the fence | 201 | /* i965 stores the end address of the gtt mapping in the fence |
202 | * reg, so dont bother to check the size */ | 202 | * reg, so dont bother to check the size */ |
203 | if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) | 203 | if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) |
204 | return false; | 204 | return false; |
205 | } else if (IS_GEN3(dev) || IS_GEN2(dev)) { | 205 | } else { |
206 | if (stride > 8192) | 206 | if (stride > 8192) |
207 | return false; | 207 | return false; |
208 | 208 | ||
@@ -216,7 +216,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | |||
216 | } | 216 | } |
217 | 217 | ||
218 | /* 965+ just needs multiples of tile width */ | 218 | /* 965+ just needs multiples of tile width */ |
219 | if (IS_I965G(dev)) { | 219 | if (INTEL_INFO(dev)->gen >= 4) { |
220 | if (stride & (tile_width - 1)) | 220 | if (stride & (tile_width - 1)) |
221 | return false; | 221 | return false; |
222 | return true; | 222 | return true; |
@@ -232,30 +232,44 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | |||
232 | return true; | 232 | return true; |
233 | } | 233 | } |
234 | 234 | ||
235 | bool | 235 | /* Is the current GTT allocation valid for the change in tiling? */ |
236 | i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) | 236 | static bool |
237 | i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) | ||
237 | { | 238 | { |
238 | struct drm_device *dev = obj->dev; | 239 | u32 size; |
239 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
240 | 240 | ||
241 | if (obj_priv->gtt_space == NULL) | 241 | if (tiling_mode == I915_TILING_NONE) |
242 | return true; | 242 | return true; |
243 | 243 | ||
244 | if (tiling_mode == I915_TILING_NONE) | 244 | if (INTEL_INFO(obj->base.dev)->gen >= 4) |
245 | return true; | 245 | return true; |
246 | 246 | ||
247 | if (!IS_I965G(dev)) { | 247 | if (INTEL_INFO(obj->base.dev)->gen == 3) { |
248 | if (obj_priv->gtt_offset & (obj->size - 1)) | 248 | if (obj->gtt_offset & ~I915_FENCE_START_MASK) |
249 | return false; | ||
250 | } else { | ||
251 | if (obj->gtt_offset & ~I830_FENCE_START_MASK) | ||
249 | return false; | 252 | return false; |
250 | if (IS_I9XX(dev)) { | ||
251 | if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) | ||
252 | return false; | ||
253 | } else { | ||
254 | if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) | ||
255 | return false; | ||
256 | } | ||
257 | } | 253 | } |
258 | 254 | ||
255 | /* | ||
256 | * Previous chips need to be aligned to the size of the smallest | ||
257 | * fence register that can contain the object. | ||
258 | */ | ||
259 | if (INTEL_INFO(obj->base.dev)->gen == 3) | ||
260 | size = 1024*1024; | ||
261 | else | ||
262 | size = 512*1024; | ||
263 | |||
264 | while (size < obj->base.size) | ||
265 | size <<= 1; | ||
266 | |||
267 | if (obj->gtt_space->size != size) | ||
268 | return false; | ||
269 | |||
270 | if (obj->gtt_offset & (size - 1)) | ||
271 | return false; | ||
272 | |||
259 | return true; | 273 | return true; |
260 | } | 274 | } |
261 | 275 | ||
@@ -265,26 +279,25 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) | |||
265 | */ | 279 | */ |
266 | int | 280 | int |
267 | i915_gem_set_tiling(struct drm_device *dev, void *data, | 281 | i915_gem_set_tiling(struct drm_device *dev, void *data, |
268 | struct drm_file *file_priv) | 282 | struct drm_file *file) |
269 | { | 283 | { |
270 | struct drm_i915_gem_set_tiling *args = data; | 284 | struct drm_i915_gem_set_tiling *args = data; |
271 | drm_i915_private_t *dev_priv = dev->dev_private; | 285 | drm_i915_private_t *dev_priv = dev->dev_private; |
272 | struct drm_gem_object *obj; | 286 | struct drm_i915_gem_object *obj; |
273 | struct drm_i915_gem_object *obj_priv; | ||
274 | int ret = 0; | 287 | int ret = 0; |
275 | 288 | ||
276 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 289 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
277 | if (obj == NULL) | 290 | if (&obj->base == NULL) |
278 | return -ENOENT; | 291 | return -ENOENT; |
279 | obj_priv = to_intel_bo(obj); | ||
280 | 292 | ||
281 | if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { | 293 | if (!i915_tiling_ok(dev, |
282 | drm_gem_object_unreference_unlocked(obj); | 294 | args->stride, obj->base.size, args->tiling_mode)) { |
295 | drm_gem_object_unreference_unlocked(&obj->base); | ||
283 | return -EINVAL; | 296 | return -EINVAL; |
284 | } | 297 | } |
285 | 298 | ||
286 | if (obj_priv->pin_count) { | 299 | if (obj->pin_count) { |
287 | drm_gem_object_unreference_unlocked(obj); | 300 | drm_gem_object_unreference_unlocked(&obj->base); |
288 | return -EBUSY; | 301 | return -EBUSY; |
289 | } | 302 | } |
290 | 303 | ||
@@ -318,31 +331,40 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
318 | } | 331 | } |
319 | 332 | ||
320 | mutex_lock(&dev->struct_mutex); | 333 | mutex_lock(&dev->struct_mutex); |
321 | if (args->tiling_mode != obj_priv->tiling_mode || | 334 | if (args->tiling_mode != obj->tiling_mode || |
322 | args->stride != obj_priv->stride) { | 335 | args->stride != obj->stride) { |
323 | /* We need to rebind the object if its current allocation | 336 | /* We need to rebind the object if its current allocation |
324 | * no longer meets the alignment restrictions for its new | 337 | * no longer meets the alignment restrictions for its new |
325 | * tiling mode. Otherwise we can just leave it alone, but | 338 | * tiling mode. Otherwise we can just leave it alone, but |
326 | * need to ensure that any fence register is cleared. | 339 | * need to ensure that any fence register is cleared. |
327 | */ | 340 | */ |
328 | if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) | 341 | i915_gem_release_mmap(obj); |
329 | ret = i915_gem_object_unbind(obj); | 342 | |
330 | else if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | 343 | obj->map_and_fenceable = |
331 | ret = i915_gem_object_put_fence_reg(obj); | 344 | obj->gtt_space == NULL || |
332 | else | 345 | (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && |
333 | i915_gem_release_mmap(obj); | 346 | i915_gem_object_fence_ok(obj, args->tiling_mode)); |
334 | 347 | ||
335 | if (ret != 0) { | 348 | /* Rebind if we need a change of alignment */ |
336 | args->tiling_mode = obj_priv->tiling_mode; | 349 | if (!obj->map_and_fenceable) { |
337 | args->stride = obj_priv->stride; | 350 | u32 unfenced_alignment = |
338 | goto err; | 351 | i915_gem_get_unfenced_gtt_alignment(dev, |
352 | obj->base.size, | ||
353 | args->tiling_mode); | ||
354 | if (obj->gtt_offset & (unfenced_alignment - 1)) | ||
355 | ret = i915_gem_object_unbind(obj); | ||
339 | } | 356 | } |
340 | 357 | ||
341 | obj_priv->tiling_mode = args->tiling_mode; | 358 | if (ret == 0) { |
342 | obj_priv->stride = args->stride; | 359 | obj->tiling_changed = true; |
360 | obj->tiling_mode = args->tiling_mode; | ||
361 | obj->stride = args->stride; | ||
362 | } | ||
343 | } | 363 | } |
344 | err: | 364 | /* we have to maintain this existing ABI... */ |
345 | drm_gem_object_unreference(obj); | 365 | args->stride = obj->stride; |
366 | args->tiling_mode = obj->tiling_mode; | ||
367 | drm_gem_object_unreference(&obj->base); | ||
346 | mutex_unlock(&dev->struct_mutex); | 368 | mutex_unlock(&dev->struct_mutex); |
347 | 369 | ||
348 | return ret; | 370 | return ret; |
@@ -353,22 +375,20 @@ err: | |||
353 | */ | 375 | */ |
354 | int | 376 | int |
355 | i915_gem_get_tiling(struct drm_device *dev, void *data, | 377 | i915_gem_get_tiling(struct drm_device *dev, void *data, |
356 | struct drm_file *file_priv) | 378 | struct drm_file *file) |
357 | { | 379 | { |
358 | struct drm_i915_gem_get_tiling *args = data; | 380 | struct drm_i915_gem_get_tiling *args = data; |
359 | drm_i915_private_t *dev_priv = dev->dev_private; | 381 | drm_i915_private_t *dev_priv = dev->dev_private; |
360 | struct drm_gem_object *obj; | 382 | struct drm_i915_gem_object *obj; |
361 | struct drm_i915_gem_object *obj_priv; | ||
362 | 383 | ||
363 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 384 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
364 | if (obj == NULL) | 385 | if (&obj->base == NULL) |
365 | return -ENOENT; | 386 | return -ENOENT; |
366 | obj_priv = to_intel_bo(obj); | ||
367 | 387 | ||
368 | mutex_lock(&dev->struct_mutex); | 388 | mutex_lock(&dev->struct_mutex); |
369 | 389 | ||
370 | args->tiling_mode = obj_priv->tiling_mode; | 390 | args->tiling_mode = obj->tiling_mode; |
371 | switch (obj_priv->tiling_mode) { | 391 | switch (obj->tiling_mode) { |
372 | case I915_TILING_X: | 392 | case I915_TILING_X: |
373 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; | 393 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; |
374 | break; | 394 | break; |
@@ -388,7 +408,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, | |||
388 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) | 408 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) |
389 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; | 409 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; |
390 | 410 | ||
391 | drm_gem_object_unreference(obj); | 411 | drm_gem_object_unreference(&obj->base); |
392 | mutex_unlock(&dev->struct_mutex); | 412 | mutex_unlock(&dev->struct_mutex); |
393 | 413 | ||
394 | return 0; | 414 | return 0; |
@@ -399,16 +419,14 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, | |||
399 | * bit 17 of its physical address and therefore being interpreted differently | 419 | * bit 17 of its physical address and therefore being interpreted differently |
400 | * by the GPU. | 420 | * by the GPU. |
401 | */ | 421 | */ |
402 | static int | 422 | static void |
403 | i915_gem_swizzle_page(struct page *page) | 423 | i915_gem_swizzle_page(struct page *page) |
404 | { | 424 | { |
425 | char temp[64]; | ||
405 | char *vaddr; | 426 | char *vaddr; |
406 | int i; | 427 | int i; |
407 | char temp[64]; | ||
408 | 428 | ||
409 | vaddr = kmap(page); | 429 | vaddr = kmap(page); |
410 | if (vaddr == NULL) | ||
411 | return -ENOMEM; | ||
412 | 430 | ||
413 | for (i = 0; i < PAGE_SIZE; i += 128) { | 431 | for (i = 0; i < PAGE_SIZE; i += 128) { |
414 | memcpy(temp, &vaddr[i], 64); | 432 | memcpy(temp, &vaddr[i], 64); |
@@ -417,55 +435,47 @@ i915_gem_swizzle_page(struct page *page) | |||
417 | } | 435 | } |
418 | 436 | ||
419 | kunmap(page); | 437 | kunmap(page); |
420 | |||
421 | return 0; | ||
422 | } | 438 | } |
423 | 439 | ||
424 | void | 440 | void |
425 | i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) | 441 | i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) |
426 | { | 442 | { |
427 | struct drm_device *dev = obj->dev; | 443 | struct drm_device *dev = obj->base.dev; |
428 | drm_i915_private_t *dev_priv = dev->dev_private; | 444 | drm_i915_private_t *dev_priv = dev->dev_private; |
429 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 445 | int page_count = obj->base.size >> PAGE_SHIFT; |
430 | int page_count = obj->size >> PAGE_SHIFT; | ||
431 | int i; | 446 | int i; |
432 | 447 | ||
433 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) | 448 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) |
434 | return; | 449 | return; |
435 | 450 | ||
436 | if (obj_priv->bit_17 == NULL) | 451 | if (obj->bit_17 == NULL) |
437 | return; | 452 | return; |
438 | 453 | ||
439 | for (i = 0; i < page_count; i++) { | 454 | for (i = 0; i < page_count; i++) { |
440 | char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; | 455 | char new_bit_17 = page_to_phys(obj->pages[i]) >> 17; |
441 | if ((new_bit_17 & 0x1) != | 456 | if ((new_bit_17 & 0x1) != |
442 | (test_bit(i, obj_priv->bit_17) != 0)) { | 457 | (test_bit(i, obj->bit_17) != 0)) { |
443 | int ret = i915_gem_swizzle_page(obj_priv->pages[i]); | 458 | i915_gem_swizzle_page(obj->pages[i]); |
444 | if (ret != 0) { | 459 | set_page_dirty(obj->pages[i]); |
445 | DRM_ERROR("Failed to swizzle page\n"); | ||
446 | return; | ||
447 | } | ||
448 | set_page_dirty(obj_priv->pages[i]); | ||
449 | } | 460 | } |
450 | } | 461 | } |
451 | } | 462 | } |
452 | 463 | ||
453 | void | 464 | void |
454 | i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) | 465 | i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) |
455 | { | 466 | { |
456 | struct drm_device *dev = obj->dev; | 467 | struct drm_device *dev = obj->base.dev; |
457 | drm_i915_private_t *dev_priv = dev->dev_private; | 468 | drm_i915_private_t *dev_priv = dev->dev_private; |
458 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 469 | int page_count = obj->base.size >> PAGE_SHIFT; |
459 | int page_count = obj->size >> PAGE_SHIFT; | ||
460 | int i; | 470 | int i; |
461 | 471 | ||
462 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) | 472 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) |
463 | return; | 473 | return; |
464 | 474 | ||
465 | if (obj_priv->bit_17 == NULL) { | 475 | if (obj->bit_17 == NULL) { |
466 | obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * | 476 | obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * |
467 | sizeof(long), GFP_KERNEL); | 477 | sizeof(long), GFP_KERNEL); |
468 | if (obj_priv->bit_17 == NULL) { | 478 | if (obj->bit_17 == NULL) { |
469 | DRM_ERROR("Failed to allocate memory for bit 17 " | 479 | DRM_ERROR("Failed to allocate memory for bit 17 " |
470 | "record\n"); | 480 | "record\n"); |
471 | return; | 481 | return; |
@@ -473,9 +483,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) | |||
473 | } | 483 | } |
474 | 484 | ||
475 | for (i = 0; i < page_count; i++) { | 485 | for (i = 0; i < page_count; i++) { |
476 | if (page_to_phys(obj_priv->pages[i]) & (1 << 17)) | 486 | if (page_to_phys(obj->pages[i]) & (1 << 17)) |
477 | __set_bit(i, obj_priv->bit_17); | 487 | __set_bit(i, obj->bit_17); |
478 | else | 488 | else |
479 | __clear_bit(i, obj_priv->bit_17); | 489 | __clear_bit(i, obj->bit_17); |
480 | } | 490 | } |
481 | } | 491 | } |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 744225ebb4b2..3b03f85ea627 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -64,87 +64,37 @@ | |||
64 | #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ | 64 | #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ |
65 | DRM_I915_VBLANK_PIPE_B) | 65 | DRM_I915_VBLANK_PIPE_B) |
66 | 66 | ||
67 | void | ||
68 | ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
69 | { | ||
70 | if ((dev_priv->gt_irq_mask_reg & mask) != 0) { | ||
71 | dev_priv->gt_irq_mask_reg &= ~mask; | ||
72 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); | ||
73 | (void) I915_READ(GTIMR); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | void | ||
78 | ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
79 | { | ||
80 | if ((dev_priv->gt_irq_mask_reg & mask) != mask) { | ||
81 | dev_priv->gt_irq_mask_reg |= mask; | ||
82 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); | ||
83 | (void) I915_READ(GTIMR); | ||
84 | } | ||
85 | } | ||
86 | |||
87 | /* For display hotplug interrupt */ | 67 | /* For display hotplug interrupt */ |
88 | void | 68 | static void |
89 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | 69 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
90 | { | 70 | { |
91 | if ((dev_priv->irq_mask_reg & mask) != 0) { | 71 | if ((dev_priv->irq_mask & mask) != 0) { |
92 | dev_priv->irq_mask_reg &= ~mask; | 72 | dev_priv->irq_mask &= ~mask; |
93 | I915_WRITE(DEIMR, dev_priv->irq_mask_reg); | 73 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
94 | (void) I915_READ(DEIMR); | 74 | POSTING_READ(DEIMR); |
95 | } | 75 | } |
96 | } | 76 | } |
97 | 77 | ||
98 | static inline void | 78 | static inline void |
99 | ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | 79 | ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
100 | { | 80 | { |
101 | if ((dev_priv->irq_mask_reg & mask) != mask) { | 81 | if ((dev_priv->irq_mask & mask) != mask) { |
102 | dev_priv->irq_mask_reg |= mask; | 82 | dev_priv->irq_mask |= mask; |
103 | I915_WRITE(DEIMR, dev_priv->irq_mask_reg); | 83 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
104 | (void) I915_READ(DEIMR); | 84 | POSTING_READ(DEIMR); |
105 | } | ||
106 | } | ||
107 | |||
108 | void | ||
109 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
110 | { | ||
111 | if ((dev_priv->irq_mask_reg & mask) != 0) { | ||
112 | dev_priv->irq_mask_reg &= ~mask; | ||
113 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | ||
114 | (void) I915_READ(IMR); | ||
115 | } | 85 | } |
116 | } | 86 | } |
117 | 87 | ||
118 | void | 88 | void |
119 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
120 | { | ||
121 | if ((dev_priv->irq_mask_reg & mask) != mask) { | ||
122 | dev_priv->irq_mask_reg |= mask; | ||
123 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | ||
124 | (void) I915_READ(IMR); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | static inline u32 | ||
129 | i915_pipestat(int pipe) | ||
130 | { | ||
131 | if (pipe == 0) | ||
132 | return PIPEASTAT; | ||
133 | if (pipe == 1) | ||
134 | return PIPEBSTAT; | ||
135 | BUG(); | ||
136 | } | ||
137 | |||
138 | void | ||
139 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | 89 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) |
140 | { | 90 | { |
141 | if ((dev_priv->pipestat[pipe] & mask) != mask) { | 91 | if ((dev_priv->pipestat[pipe] & mask) != mask) { |
142 | u32 reg = i915_pipestat(pipe); | 92 | u32 reg = PIPESTAT(pipe); |
143 | 93 | ||
144 | dev_priv->pipestat[pipe] |= mask; | 94 | dev_priv->pipestat[pipe] |= mask; |
145 | /* Enable the interrupt, clear any pending status */ | 95 | /* Enable the interrupt, clear any pending status */ |
146 | I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); | 96 | I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); |
147 | (void) I915_READ(reg); | 97 | POSTING_READ(reg); |
148 | } | 98 | } |
149 | } | 99 | } |
150 | 100 | ||
@@ -152,30 +102,35 @@ void | |||
152 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | 102 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) |
153 | { | 103 | { |
154 | if ((dev_priv->pipestat[pipe] & mask) != 0) { | 104 | if ((dev_priv->pipestat[pipe] & mask) != 0) { |
155 | u32 reg = i915_pipestat(pipe); | 105 | u32 reg = PIPESTAT(pipe); |
156 | 106 | ||
157 | dev_priv->pipestat[pipe] &= ~mask; | 107 | dev_priv->pipestat[pipe] &= ~mask; |
158 | I915_WRITE(reg, dev_priv->pipestat[pipe]); | 108 | I915_WRITE(reg, dev_priv->pipestat[pipe]); |
159 | (void) I915_READ(reg); | 109 | POSTING_READ(reg); |
160 | } | 110 | } |
161 | } | 111 | } |
162 | 112 | ||
163 | /** | 113 | /** |
164 | * intel_enable_asle - enable ASLE interrupt for OpRegion | 114 | * intel_enable_asle - enable ASLE interrupt for OpRegion |
165 | */ | 115 | */ |
166 | void intel_enable_asle (struct drm_device *dev) | 116 | void intel_enable_asle(struct drm_device *dev) |
167 | { | 117 | { |
168 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 118 | drm_i915_private_t *dev_priv = dev->dev_private; |
119 | unsigned long irqflags; | ||
120 | |||
121 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
169 | 122 | ||
170 | if (HAS_PCH_SPLIT(dev)) | 123 | if (HAS_PCH_SPLIT(dev)) |
171 | ironlake_enable_display_irq(dev_priv, DE_GSE); | 124 | ironlake_enable_display_irq(dev_priv, DE_GSE); |
172 | else { | 125 | else { |
173 | i915_enable_pipestat(dev_priv, 1, | 126 | i915_enable_pipestat(dev_priv, 1, |
174 | PIPE_LEGACY_BLC_EVENT_ENABLE); | 127 | PIPE_LEGACY_BLC_EVENT_ENABLE); |
175 | if (IS_I965G(dev)) | 128 | if (INTEL_INFO(dev)->gen >= 4) |
176 | i915_enable_pipestat(dev_priv, 0, | 129 | i915_enable_pipestat(dev_priv, 0, |
177 | PIPE_LEGACY_BLC_EVENT_ENABLE); | 130 | PIPE_LEGACY_BLC_EVENT_ENABLE); |
178 | } | 131 | } |
132 | |||
133 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
179 | } | 134 | } |
180 | 135 | ||
181 | /** | 136 | /** |
@@ -191,66 +146,155 @@ static int | |||
191 | i915_pipe_enabled(struct drm_device *dev, int pipe) | 146 | i915_pipe_enabled(struct drm_device *dev, int pipe) |
192 | { | 147 | { |
193 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 148 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
194 | unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF; | 149 | return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; |
195 | |||
196 | if (I915_READ(pipeconf) & PIPEACONF_ENABLE) | ||
197 | return 1; | ||
198 | |||
199 | return 0; | ||
200 | } | 150 | } |
201 | 151 | ||
202 | /* Called from drm generic code, passed a 'crtc', which | 152 | /* Called from drm generic code, passed a 'crtc', which |
203 | * we use as a pipe index | 153 | * we use as a pipe index |
204 | */ | 154 | */ |
205 | u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | 155 | static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) |
206 | { | 156 | { |
207 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 157 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
208 | unsigned long high_frame; | 158 | unsigned long high_frame; |
209 | unsigned long low_frame; | 159 | unsigned long low_frame; |
210 | u32 high1, high2, low, count; | 160 | u32 high1, high2, low; |
211 | |||
212 | high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; | ||
213 | low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; | ||
214 | 161 | ||
215 | if (!i915_pipe_enabled(dev, pipe)) { | 162 | if (!i915_pipe_enabled(dev, pipe)) { |
216 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " | 163 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
217 | "pipe %d\n", pipe); | 164 | "pipe %c\n", pipe_name(pipe)); |
218 | return 0; | 165 | return 0; |
219 | } | 166 | } |
220 | 167 | ||
168 | high_frame = PIPEFRAME(pipe); | ||
169 | low_frame = PIPEFRAMEPIXEL(pipe); | ||
170 | |||
221 | /* | 171 | /* |
222 | * High & low register fields aren't synchronized, so make sure | 172 | * High & low register fields aren't synchronized, so make sure |
223 | * we get a low value that's stable across two reads of the high | 173 | * we get a low value that's stable across two reads of the high |
224 | * register. | 174 | * register. |
225 | */ | 175 | */ |
226 | do { | 176 | do { |
227 | high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> | 177 | high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
228 | PIPE_FRAME_HIGH_SHIFT); | 178 | low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; |
229 | low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >> | 179 | high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
230 | PIPE_FRAME_LOW_SHIFT); | ||
231 | high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> | ||
232 | PIPE_FRAME_HIGH_SHIFT); | ||
233 | } while (high1 != high2); | 180 | } while (high1 != high2); |
234 | 181 | ||
235 | count = (high1 << 8) | low; | 182 | high1 >>= PIPE_FRAME_HIGH_SHIFT; |
236 | 183 | low >>= PIPE_FRAME_LOW_SHIFT; | |
237 | return count; | 184 | return (high1 << 8) | low; |
238 | } | 185 | } |
239 | 186 | ||
240 | u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | 187 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) |
241 | { | 188 | { |
242 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 189 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
243 | int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; | 190 | int reg = PIPE_FRMCOUNT_GM45(pipe); |
244 | 191 | ||
245 | if (!i915_pipe_enabled(dev, pipe)) { | 192 | if (!i915_pipe_enabled(dev, pipe)) { |
246 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " | 193 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
247 | "pipe %d\n", pipe); | 194 | "pipe %c\n", pipe_name(pipe)); |
248 | return 0; | 195 | return 0; |
249 | } | 196 | } |
250 | 197 | ||
251 | return I915_READ(reg); | 198 | return I915_READ(reg); |
252 | } | 199 | } |
253 | 200 | ||
201 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | ||
202 | int *vpos, int *hpos) | ||
203 | { | ||
204 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
205 | u32 vbl = 0, position = 0; | ||
206 | int vbl_start, vbl_end, htotal, vtotal; | ||
207 | bool in_vbl = true; | ||
208 | int ret = 0; | ||
209 | |||
210 | if (!i915_pipe_enabled(dev, pipe)) { | ||
211 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " | ||
212 | "pipe %c\n", pipe_name(pipe)); | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | /* Get vtotal. */ | ||
217 | vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); | ||
218 | |||
219 | if (INTEL_INFO(dev)->gen >= 4) { | ||
220 | /* No obvious pixelcount register. Only query vertical | ||
221 | * scanout position from Display scan line register. | ||
222 | */ | ||
223 | position = I915_READ(PIPEDSL(pipe)); | ||
224 | |||
225 | /* Decode into vertical scanout position. Don't have | ||
226 | * horizontal scanout position. | ||
227 | */ | ||
228 | *vpos = position & 0x1fff; | ||
229 | *hpos = 0; | ||
230 | } else { | ||
231 | /* Have access to pixelcount since start of frame. | ||
232 | * We can split this into vertical and horizontal | ||
233 | * scanout position. | ||
234 | */ | ||
235 | position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; | ||
236 | |||
237 | htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); | ||
238 | *vpos = position / htotal; | ||
239 | *hpos = position - (*vpos * htotal); | ||
240 | } | ||
241 | |||
242 | /* Query vblank area. */ | ||
243 | vbl = I915_READ(VBLANK(pipe)); | ||
244 | |||
245 | /* Test position against vblank region. */ | ||
246 | vbl_start = vbl & 0x1fff; | ||
247 | vbl_end = (vbl >> 16) & 0x1fff; | ||
248 | |||
249 | if ((*vpos < vbl_start) || (*vpos > vbl_end)) | ||
250 | in_vbl = false; | ||
251 | |||
252 | /* Inside "upper part" of vblank area? Apply corrective offset: */ | ||
253 | if (in_vbl && (*vpos >= vbl_start)) | ||
254 | *vpos = *vpos - vtotal; | ||
255 | |||
256 | /* Readouts valid? */ | ||
257 | if (vbl > 0) | ||
258 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; | ||
259 | |||
260 | /* In vblank? */ | ||
261 | if (in_vbl) | ||
262 | ret |= DRM_SCANOUTPOS_INVBL; | ||
263 | |||
264 | return ret; | ||
265 | } | ||
266 | |||
267 | static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, | ||
268 | int *max_error, | ||
269 | struct timeval *vblank_time, | ||
270 | unsigned flags) | ||
271 | { | ||
272 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
273 | struct drm_crtc *crtc; | ||
274 | |||
275 | if (pipe < 0 || pipe >= dev_priv->num_pipe) { | ||
276 | DRM_ERROR("Invalid crtc %d\n", pipe); | ||
277 | return -EINVAL; | ||
278 | } | ||
279 | |||
280 | /* Get drm_crtc to timestamp: */ | ||
281 | crtc = intel_get_crtc_for_pipe(dev, pipe); | ||
282 | if (crtc == NULL) { | ||
283 | DRM_ERROR("Invalid crtc %d\n", pipe); | ||
284 | return -EINVAL; | ||
285 | } | ||
286 | |||
287 | if (!crtc->enabled) { | ||
288 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | ||
289 | return -EBUSY; | ||
290 | } | ||
291 | |||
292 | /* Helper routine in DRM core does all the work: */ | ||
293 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, | ||
294 | vblank_time, flags, | ||
295 | crtc); | ||
296 | } | ||
297 | |||
254 | /* | 298 | /* |
255 | * Handle hotplug events outside the interrupt handler proper. | 299 | * Handle hotplug events outside the interrupt handler proper. |
256 | */ | 300 | */ |
@@ -260,16 +304,14 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
260 | hotplug_work); | 304 | hotplug_work); |
261 | struct drm_device *dev = dev_priv->dev; | 305 | struct drm_device *dev = dev_priv->dev; |
262 | struct drm_mode_config *mode_config = &dev->mode_config; | 306 | struct drm_mode_config *mode_config = &dev->mode_config; |
263 | struct drm_encoder *encoder; | 307 | struct intel_encoder *encoder; |
264 | 308 | ||
265 | if (mode_config->num_encoder) { | 309 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
266 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { | 310 | |
267 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 311 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) |
268 | 312 | if (encoder->hot_plug) | |
269 | if (intel_encoder->hot_plug) | 313 | encoder->hot_plug(encoder); |
270 | (*intel_encoder->hot_plug) (intel_encoder); | 314 | |
271 | } | ||
272 | } | ||
273 | /* Just fire off a uevent and let userspace tell us what to do */ | 315 | /* Just fire off a uevent and let userspace tell us what to do */ |
274 | drm_helper_hpd_irq_event(dev); | 316 | drm_helper_hpd_irq_event(dev); |
275 | } | 317 | } |
@@ -305,24 +347,142 @@ static void i915_handle_rps_change(struct drm_device *dev) | |||
305 | return; | 347 | return; |
306 | } | 348 | } |
307 | 349 | ||
308 | irqreturn_t ironlake_irq_handler(struct drm_device *dev) | 350 | static void notify_ring(struct drm_device *dev, |
351 | struct intel_ring_buffer *ring) | ||
352 | { | ||
353 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
354 | u32 seqno; | ||
355 | |||
356 | if (ring->obj == NULL) | ||
357 | return; | ||
358 | |||
359 | seqno = ring->get_seqno(ring); | ||
360 | trace_i915_gem_request_complete(ring, seqno); | ||
361 | |||
362 | ring->irq_seqno = seqno; | ||
363 | wake_up_all(&ring->irq_queue); | ||
364 | |||
365 | dev_priv->hangcheck_count = 0; | ||
366 | mod_timer(&dev_priv->hangcheck_timer, | ||
367 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | ||
368 | } | ||
369 | |||
370 | static void gen6_pm_rps_work(struct work_struct *work) | ||
371 | { | ||
372 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | ||
373 | rps_work); | ||
374 | u8 new_delay = dev_priv->cur_delay; | ||
375 | u32 pm_iir, pm_imr; | ||
376 | |||
377 | spin_lock_irq(&dev_priv->rps_lock); | ||
378 | pm_iir = dev_priv->pm_iir; | ||
379 | dev_priv->pm_iir = 0; | ||
380 | pm_imr = I915_READ(GEN6_PMIMR); | ||
381 | spin_unlock_irq(&dev_priv->rps_lock); | ||
382 | |||
383 | if (!pm_iir) | ||
384 | return; | ||
385 | |||
386 | mutex_lock(&dev_priv->dev->struct_mutex); | ||
387 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { | ||
388 | if (dev_priv->cur_delay != dev_priv->max_delay) | ||
389 | new_delay = dev_priv->cur_delay + 1; | ||
390 | if (new_delay > dev_priv->max_delay) | ||
391 | new_delay = dev_priv->max_delay; | ||
392 | } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) { | ||
393 | gen6_gt_force_wake_get(dev_priv); | ||
394 | if (dev_priv->cur_delay != dev_priv->min_delay) | ||
395 | new_delay = dev_priv->cur_delay - 1; | ||
396 | if (new_delay < dev_priv->min_delay) { | ||
397 | new_delay = dev_priv->min_delay; | ||
398 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | ||
399 | I915_READ(GEN6_RP_INTERRUPT_LIMITS) | | ||
400 | ((new_delay << 16) & 0x3f0000)); | ||
401 | } else { | ||
402 | /* Make sure we continue to get down interrupts | ||
403 | * until we hit the minimum frequency */ | ||
404 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | ||
405 | I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000); | ||
406 | } | ||
407 | gen6_gt_force_wake_put(dev_priv); | ||
408 | } | ||
409 | |||
410 | gen6_set_rps(dev_priv->dev, new_delay); | ||
411 | dev_priv->cur_delay = new_delay; | ||
412 | |||
413 | /* | ||
414 | * rps_lock not held here because clearing is non-destructive. There is | ||
415 | * an *extremely* unlikely race with gen6_rps_enable() that is prevented | ||
416 | * by holding struct_mutex for the duration of the write. | ||
417 | */ | ||
418 | I915_WRITE(GEN6_PMIMR, pm_imr & ~pm_iir); | ||
419 | mutex_unlock(&dev_priv->dev->struct_mutex); | ||
420 | } | ||
421 | |||
422 | static void pch_irq_handler(struct drm_device *dev) | ||
423 | { | ||
424 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
425 | u32 pch_iir; | ||
426 | int pipe; | ||
427 | |||
428 | pch_iir = I915_READ(SDEIIR); | ||
429 | |||
430 | if (pch_iir & SDE_AUDIO_POWER_MASK) | ||
431 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", | ||
432 | (pch_iir & SDE_AUDIO_POWER_MASK) >> | ||
433 | SDE_AUDIO_POWER_SHIFT); | ||
434 | |||
435 | if (pch_iir & SDE_GMBUS) | ||
436 | DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); | ||
437 | |||
438 | if (pch_iir & SDE_AUDIO_HDCP_MASK) | ||
439 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); | ||
440 | |||
441 | if (pch_iir & SDE_AUDIO_TRANS_MASK) | ||
442 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); | ||
443 | |||
444 | if (pch_iir & SDE_POISON) | ||
445 | DRM_ERROR("PCH poison interrupt\n"); | ||
446 | |||
447 | if (pch_iir & SDE_FDI_MASK) | ||
448 | for_each_pipe(pipe) | ||
449 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | ||
450 | pipe_name(pipe), | ||
451 | I915_READ(FDI_RX_IIR(pipe))); | ||
452 | |||
453 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) | ||
454 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); | ||
455 | |||
456 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) | ||
457 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); | ||
458 | |||
459 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) | ||
460 | DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); | ||
461 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) | ||
462 | DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); | ||
463 | } | ||
464 | |||
465 | static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) | ||
309 | { | 466 | { |
467 | struct drm_device *dev = (struct drm_device *) arg; | ||
310 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 468 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
311 | int ret = IRQ_NONE; | 469 | int ret = IRQ_NONE; |
312 | u32 de_iir, gt_iir, de_ier, pch_iir; | 470 | u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; |
313 | struct drm_i915_master_private *master_priv; | 471 | struct drm_i915_master_private *master_priv; |
314 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | 472 | |
473 | atomic_inc(&dev_priv->irq_received); | ||
315 | 474 | ||
316 | /* disable master interrupt before clearing iir */ | 475 | /* disable master interrupt before clearing iir */ |
317 | de_ier = I915_READ(DEIER); | 476 | de_ier = I915_READ(DEIER); |
318 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | 477 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); |
319 | (void)I915_READ(DEIER); | 478 | POSTING_READ(DEIER); |
320 | 479 | ||
321 | de_iir = I915_READ(DEIIR); | 480 | de_iir = I915_READ(DEIIR); |
322 | gt_iir = I915_READ(GTIIR); | 481 | gt_iir = I915_READ(GTIIR); |
323 | pch_iir = I915_READ(SDEIIR); | 482 | pch_iir = I915_READ(SDEIIR); |
483 | pm_iir = I915_READ(GEN6_PMIIR); | ||
324 | 484 | ||
325 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) | 485 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0) |
326 | goto done; | 486 | goto done; |
327 | 487 | ||
328 | ret = IRQ_HANDLED; | 488 | ret = IRQ_HANDLED; |
@@ -334,29 +494,123 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
334 | READ_BREADCRUMB(dev_priv); | 494 | READ_BREADCRUMB(dev_priv); |
335 | } | 495 | } |
336 | 496 | ||
337 | if (gt_iir & GT_PIPE_NOTIFY) { | 497 | if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) |
338 | u32 seqno = render_ring->get_gem_seqno(dev, render_ring); | 498 | notify_ring(dev, &dev_priv->ring[RCS]); |
339 | render_ring->irq_gem_seqno = seqno; | 499 | if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT) |
340 | trace_i915_gem_request_complete(dev, seqno); | 500 | notify_ring(dev, &dev_priv->ring[VCS]); |
341 | DRM_WAKEUP(&dev_priv->render_ring.irq_queue); | 501 | if (gt_iir & GT_BLT_USER_INTERRUPT) |
342 | dev_priv->hangcheck_count = 0; | 502 | notify_ring(dev, &dev_priv->ring[BCS]); |
343 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | 503 | |
504 | if (de_iir & DE_GSE_IVB) | ||
505 | intel_opregion_gse_intr(dev); | ||
506 | |||
507 | if (de_iir & DE_PLANEA_FLIP_DONE_IVB) { | ||
508 | intel_prepare_page_flip(dev, 0); | ||
509 | intel_finish_page_flip_plane(dev, 0); | ||
510 | } | ||
511 | |||
512 | if (de_iir & DE_PLANEB_FLIP_DONE_IVB) { | ||
513 | intel_prepare_page_flip(dev, 1); | ||
514 | intel_finish_page_flip_plane(dev, 1); | ||
515 | } | ||
516 | |||
517 | if (de_iir & DE_PIPEA_VBLANK_IVB) | ||
518 | drm_handle_vblank(dev, 0); | ||
519 | |||
520 | if (de_iir & DE_PIPEB_VBLANK_IVB) | ||
521 | drm_handle_vblank(dev, 1); | ||
522 | |||
523 | /* check event from PCH */ | ||
524 | if (de_iir & DE_PCH_EVENT_IVB) { | ||
525 | if (pch_iir & SDE_HOTPLUG_MASK_CPT) | ||
526 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | ||
527 | pch_irq_handler(dev); | ||
344 | } | 528 | } |
345 | if (gt_iir & GT_BSD_USER_INTERRUPT) | ||
346 | DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); | ||
347 | 529 | ||
530 | if (pm_iir & GEN6_PM_DEFERRED_EVENTS) { | ||
531 | unsigned long flags; | ||
532 | spin_lock_irqsave(&dev_priv->rps_lock, flags); | ||
533 | WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); | ||
534 | I915_WRITE(GEN6_PMIMR, pm_iir); | ||
535 | dev_priv->pm_iir |= pm_iir; | ||
536 | spin_unlock_irqrestore(&dev_priv->rps_lock, flags); | ||
537 | queue_work(dev_priv->wq, &dev_priv->rps_work); | ||
538 | } | ||
539 | |||
540 | /* should clear PCH hotplug event before clear CPU irq */ | ||
541 | I915_WRITE(SDEIIR, pch_iir); | ||
542 | I915_WRITE(GTIIR, gt_iir); | ||
543 | I915_WRITE(DEIIR, de_iir); | ||
544 | I915_WRITE(GEN6_PMIIR, pm_iir); | ||
545 | |||
546 | done: | ||
547 | I915_WRITE(DEIER, de_ier); | ||
548 | POSTING_READ(DEIER); | ||
549 | |||
550 | return ret; | ||
551 | } | ||
552 | |||
553 | static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) | ||
554 | { | ||
555 | struct drm_device *dev = (struct drm_device *) arg; | ||
556 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
557 | int ret = IRQ_NONE; | ||
558 | u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; | ||
559 | u32 hotplug_mask; | ||
560 | struct drm_i915_master_private *master_priv; | ||
561 | u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; | ||
562 | |||
563 | atomic_inc(&dev_priv->irq_received); | ||
564 | |||
565 | if (IS_GEN6(dev)) | ||
566 | bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT; | ||
567 | |||
568 | /* disable master interrupt before clearing iir */ | ||
569 | de_ier = I915_READ(DEIER); | ||
570 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | ||
571 | POSTING_READ(DEIER); | ||
572 | |||
573 | de_iir = I915_READ(DEIIR); | ||
574 | gt_iir = I915_READ(GTIIR); | ||
575 | pch_iir = I915_READ(SDEIIR); | ||
576 | pm_iir = I915_READ(GEN6_PMIIR); | ||
577 | |||
578 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && | ||
579 | (!IS_GEN6(dev) || pm_iir == 0)) | ||
580 | goto done; | ||
581 | |||
582 | if (HAS_PCH_CPT(dev)) | ||
583 | hotplug_mask = SDE_HOTPLUG_MASK_CPT; | ||
584 | else | ||
585 | hotplug_mask = SDE_HOTPLUG_MASK; | ||
586 | |||
587 | ret = IRQ_HANDLED; | ||
588 | |||
589 | if (dev->primary->master) { | ||
590 | master_priv = dev->primary->master->driver_priv; | ||
591 | if (master_priv->sarea_priv) | ||
592 | master_priv->sarea_priv->last_dispatch = | ||
593 | READ_BREADCRUMB(dev_priv); | ||
594 | } | ||
595 | |||
596 | if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) | ||
597 | notify_ring(dev, &dev_priv->ring[RCS]); | ||
598 | if (gt_iir & bsd_usr_interrupt) | ||
599 | notify_ring(dev, &dev_priv->ring[VCS]); | ||
600 | if (gt_iir & GT_BLT_USER_INTERRUPT) | ||
601 | notify_ring(dev, &dev_priv->ring[BCS]); | ||
348 | 602 | ||
349 | if (de_iir & DE_GSE) | 603 | if (de_iir & DE_GSE) |
350 | ironlake_opregion_gse_intr(dev); | 604 | intel_opregion_gse_intr(dev); |
351 | 605 | ||
352 | if (de_iir & DE_PLANEA_FLIP_DONE) { | 606 | if (de_iir & DE_PLANEA_FLIP_DONE) { |
353 | intel_prepare_page_flip(dev, 0); | 607 | intel_prepare_page_flip(dev, 0); |
354 | intel_finish_page_flip(dev, 0); | 608 | intel_finish_page_flip_plane(dev, 0); |
355 | } | 609 | } |
356 | 610 | ||
357 | if (de_iir & DE_PLANEB_FLIP_DONE) { | 611 | if (de_iir & DE_PLANEB_FLIP_DONE) { |
358 | intel_prepare_page_flip(dev, 1); | 612 | intel_prepare_page_flip(dev, 1); |
359 | intel_finish_page_flip(dev, 1); | 613 | intel_finish_page_flip_plane(dev, 1); |
360 | } | 614 | } |
361 | 615 | ||
362 | if (de_iir & DE_PIPEA_VBLANK) | 616 | if (de_iir & DE_PIPEA_VBLANK) |
@@ -366,9 +620,10 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
366 | drm_handle_vblank(dev, 1); | 620 | drm_handle_vblank(dev, 1); |
367 | 621 | ||
368 | /* check event from PCH */ | 622 | /* check event from PCH */ |
369 | if ((de_iir & DE_PCH_EVENT) && | 623 | if (de_iir & DE_PCH_EVENT) { |
370 | (pch_iir & SDE_HOTPLUG_MASK)) { | 624 | if (pch_iir & hotplug_mask) |
371 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | 625 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
626 | pch_irq_handler(dev); | ||
372 | } | 627 | } |
373 | 628 | ||
374 | if (de_iir & DE_PCU_EVENT) { | 629 | if (de_iir & DE_PCU_EVENT) { |
@@ -376,14 +631,34 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
376 | i915_handle_rps_change(dev); | 631 | i915_handle_rps_change(dev); |
377 | } | 632 | } |
378 | 633 | ||
634 | if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) { | ||
635 | /* | ||
636 | * IIR bits should never already be set because IMR should | ||
637 | * prevent an interrupt from being shown in IIR. The warning | ||
638 | * displays a case where we've unsafely cleared | ||
639 | * dev_priv->pm_iir. Although missing an interrupt of the same | ||
640 | * type is not a problem, it displays a problem in the logic. | ||
641 | * | ||
642 | * The mask bit in IMR is cleared by rps_work. | ||
643 | */ | ||
644 | unsigned long flags; | ||
645 | spin_lock_irqsave(&dev_priv->rps_lock, flags); | ||
646 | WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); | ||
647 | I915_WRITE(GEN6_PMIMR, pm_iir); | ||
648 | dev_priv->pm_iir |= pm_iir; | ||
649 | spin_unlock_irqrestore(&dev_priv->rps_lock, flags); | ||
650 | queue_work(dev_priv->wq, &dev_priv->rps_work); | ||
651 | } | ||
652 | |||
379 | /* should clear PCH hotplug event before clear CPU irq */ | 653 | /* should clear PCH hotplug event before clear CPU irq */ |
380 | I915_WRITE(SDEIIR, pch_iir); | 654 | I915_WRITE(SDEIIR, pch_iir); |
381 | I915_WRITE(GTIIR, gt_iir); | 655 | I915_WRITE(GTIIR, gt_iir); |
382 | I915_WRITE(DEIIR, de_iir); | 656 | I915_WRITE(DEIIR, de_iir); |
657 | I915_WRITE(GEN6_PMIIR, pm_iir); | ||
383 | 658 | ||
384 | done: | 659 | done: |
385 | I915_WRITE(DEIER, de_ier); | 660 | I915_WRITE(DEIER, de_ier); |
386 | (void)I915_READ(DEIER); | 661 | POSTING_READ(DEIER); |
387 | 662 | ||
388 | return ret; | 663 | return ret; |
389 | } | 664 | } |
@@ -404,47 +679,38 @@ static void i915_error_work_func(struct work_struct *work) | |||
404 | char *reset_event[] = { "RESET=1", NULL }; | 679 | char *reset_event[] = { "RESET=1", NULL }; |
405 | char *reset_done_event[] = { "ERROR=0", NULL }; | 680 | char *reset_done_event[] = { "ERROR=0", NULL }; |
406 | 681 | ||
407 | DRM_DEBUG_DRIVER("generating error event\n"); | ||
408 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); | 682 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); |
409 | 683 | ||
410 | if (atomic_read(&dev_priv->mm.wedged)) { | 684 | if (atomic_read(&dev_priv->mm.wedged)) { |
411 | if (IS_I965G(dev)) { | 685 | DRM_DEBUG_DRIVER("resetting chip\n"); |
412 | DRM_DEBUG_DRIVER("resetting chip\n"); | 686 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); |
413 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); | 687 | if (!i915_reset(dev, GRDOM_RENDER)) { |
414 | if (!i965_reset(dev, GDRST_RENDER)) { | 688 | atomic_set(&dev_priv->mm.wedged, 0); |
415 | atomic_set(&dev_priv->mm.wedged, 0); | 689 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); |
416 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); | ||
417 | } | ||
418 | } else { | ||
419 | DRM_DEBUG_DRIVER("reboot required\n"); | ||
420 | } | 690 | } |
691 | complete_all(&dev_priv->error_completion); | ||
421 | } | 692 | } |
422 | } | 693 | } |
423 | 694 | ||
695 | #ifdef CONFIG_DEBUG_FS | ||
424 | static struct drm_i915_error_object * | 696 | static struct drm_i915_error_object * |
425 | i915_error_object_create(struct drm_device *dev, | 697 | i915_error_object_create(struct drm_i915_private *dev_priv, |
426 | struct drm_gem_object *src) | 698 | struct drm_i915_gem_object *src) |
427 | { | 699 | { |
428 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
429 | struct drm_i915_error_object *dst; | 700 | struct drm_i915_error_object *dst; |
430 | struct drm_i915_gem_object *src_priv; | ||
431 | int page, page_count; | 701 | int page, page_count; |
432 | u32 reloc_offset; | 702 | u32 reloc_offset; |
433 | 703 | ||
434 | if (src == NULL) | 704 | if (src == NULL || src->pages == NULL) |
435 | return NULL; | 705 | return NULL; |
436 | 706 | ||
437 | src_priv = to_intel_bo(src); | 707 | page_count = src->base.size / PAGE_SIZE; |
438 | if (src_priv->pages == NULL) | ||
439 | return NULL; | ||
440 | |||
441 | page_count = src->size / PAGE_SIZE; | ||
442 | 708 | ||
443 | dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); | 709 | dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); |
444 | if (dst == NULL) | 710 | if (dst == NULL) |
445 | return NULL; | 711 | return NULL; |
446 | 712 | ||
447 | reloc_offset = src_priv->gtt_offset; | 713 | reloc_offset = src->gtt_offset; |
448 | for (page = 0; page < page_count; page++) { | 714 | for (page = 0; page < page_count; page++) { |
449 | unsigned long flags; | 715 | unsigned long flags; |
450 | void __iomem *s; | 716 | void __iomem *s; |
@@ -456,10 +722,9 @@ i915_error_object_create(struct drm_device *dev, | |||
456 | 722 | ||
457 | local_irq_save(flags); | 723 | local_irq_save(flags); |
458 | s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | 724 | s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, |
459 | reloc_offset, | 725 | reloc_offset); |
460 | KM_IRQ0); | ||
461 | memcpy_fromio(d, s, PAGE_SIZE); | 726 | memcpy_fromio(d, s, PAGE_SIZE); |
462 | io_mapping_unmap_atomic(s, KM_IRQ0); | 727 | io_mapping_unmap_atomic(s); |
463 | local_irq_restore(flags); | 728 | local_irq_restore(flags); |
464 | 729 | ||
465 | dst->pages[page] = d; | 730 | dst->pages[page] = d; |
@@ -467,7 +732,7 @@ i915_error_object_create(struct drm_device *dev, | |||
467 | reloc_offset += PAGE_SIZE; | 732 | reloc_offset += PAGE_SIZE; |
468 | } | 733 | } |
469 | dst->page_count = page_count; | 734 | dst->page_count = page_count; |
470 | dst->gtt_offset = src_priv->gtt_offset; | 735 | dst->gtt_offset = src->gtt_offset; |
471 | 736 | ||
472 | return dst; | 737 | return dst; |
473 | 738 | ||
@@ -496,61 +761,111 @@ static void | |||
496 | i915_error_state_free(struct drm_device *dev, | 761 | i915_error_state_free(struct drm_device *dev, |
497 | struct drm_i915_error_state *error) | 762 | struct drm_i915_error_state *error) |
498 | { | 763 | { |
499 | i915_error_object_free(error->batchbuffer[0]); | 764 | int i; |
500 | i915_error_object_free(error->batchbuffer[1]); | 765 | |
501 | i915_error_object_free(error->ringbuffer); | 766 | for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) |
767 | i915_error_object_free(error->batchbuffer[i]); | ||
768 | |||
769 | for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) | ||
770 | i915_error_object_free(error->ringbuffer[i]); | ||
771 | |||
502 | kfree(error->active_bo); | 772 | kfree(error->active_bo); |
503 | kfree(error->overlay); | 773 | kfree(error->overlay); |
504 | kfree(error); | 774 | kfree(error); |
505 | } | 775 | } |
506 | 776 | ||
507 | static u32 | 777 | static u32 capture_bo_list(struct drm_i915_error_buffer *err, |
508 | i915_get_bbaddr(struct drm_device *dev, u32 *ring) | 778 | int count, |
779 | struct list_head *head) | ||
509 | { | 780 | { |
510 | u32 cmd; | 781 | struct drm_i915_gem_object *obj; |
782 | int i = 0; | ||
783 | |||
784 | list_for_each_entry(obj, head, mm_list) { | ||
785 | err->size = obj->base.size; | ||
786 | err->name = obj->base.name; | ||
787 | err->seqno = obj->last_rendering_seqno; | ||
788 | err->gtt_offset = obj->gtt_offset; | ||
789 | err->read_domains = obj->base.read_domains; | ||
790 | err->write_domain = obj->base.write_domain; | ||
791 | err->fence_reg = obj->fence_reg; | ||
792 | err->pinned = 0; | ||
793 | if (obj->pin_count > 0) | ||
794 | err->pinned = 1; | ||
795 | if (obj->user_pin_count > 0) | ||
796 | err->pinned = -1; | ||
797 | err->tiling = obj->tiling_mode; | ||
798 | err->dirty = obj->dirty; | ||
799 | err->purgeable = obj->madv != I915_MADV_WILLNEED; | ||
800 | err->ring = obj->ring ? obj->ring->id : 0; | ||
801 | err->cache_level = obj->cache_level; | ||
802 | |||
803 | if (++i == count) | ||
804 | break; | ||
511 | 805 | ||
512 | if (IS_I830(dev) || IS_845G(dev)) | 806 | err++; |
513 | cmd = MI_BATCH_BUFFER; | 807 | } |
514 | else if (IS_I965G(dev)) | ||
515 | cmd = (MI_BATCH_BUFFER_START | (2 << 6) | | ||
516 | MI_BATCH_NON_SECURE_I965); | ||
517 | else | ||
518 | cmd = (MI_BATCH_BUFFER_START | (2 << 6)); | ||
519 | 808 | ||
520 | return ring[0] == cmd ? ring[1] : 0; | 809 | return i; |
521 | } | 810 | } |
522 | 811 | ||
523 | static u32 | 812 | static void i915_gem_record_fences(struct drm_device *dev, |
524 | i915_ringbuffer_last_batch(struct drm_device *dev) | 813 | struct drm_i915_error_state *error) |
525 | { | 814 | { |
526 | struct drm_i915_private *dev_priv = dev->dev_private; | 815 | struct drm_i915_private *dev_priv = dev->dev_private; |
527 | u32 head, bbaddr; | 816 | int i; |
528 | u32 *ring; | 817 | |
529 | 818 | /* Fences */ | |
530 | /* Locate the current position in the ringbuffer and walk back | 819 | switch (INTEL_INFO(dev)->gen) { |
531 | * to find the most recently dispatched batch buffer. | 820 | case 6: |
532 | */ | 821 | for (i = 0; i < 16; i++) |
533 | bbaddr = 0; | 822 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); |
534 | head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | 823 | break; |
535 | ring = (u32 *)(dev_priv->render_ring.virtual_start + head); | 824 | case 5: |
825 | case 4: | ||
826 | for (i = 0; i < 16; i++) | ||
827 | error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | ||
828 | break; | ||
829 | case 3: | ||
830 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
831 | for (i = 0; i < 8; i++) | ||
832 | error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | ||
833 | case 2: | ||
834 | for (i = 0; i < 8; i++) | ||
835 | error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | ||
836 | break; | ||
536 | 837 | ||
537 | while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { | ||
538 | bbaddr = i915_get_bbaddr(dev, ring); | ||
539 | if (bbaddr) | ||
540 | break; | ||
541 | } | 838 | } |
839 | } | ||
542 | 840 | ||
543 | if (bbaddr == 0) { | 841 | static struct drm_i915_error_object * |
544 | ring = (u32 *)(dev_priv->render_ring.virtual_start | 842 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, |
545 | + dev_priv->render_ring.size); | 843 | struct intel_ring_buffer *ring) |
546 | while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { | 844 | { |
547 | bbaddr = i915_get_bbaddr(dev, ring); | 845 | struct drm_i915_gem_object *obj; |
548 | if (bbaddr) | 846 | u32 seqno; |
549 | break; | 847 | |
550 | } | 848 | if (!ring->get_seqno) |
849 | return NULL; | ||
850 | |||
851 | seqno = ring->get_seqno(ring); | ||
852 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | ||
853 | if (obj->ring != ring) | ||
854 | continue; | ||
855 | |||
856 | if (i915_seqno_passed(seqno, obj->last_rendering_seqno)) | ||
857 | continue; | ||
858 | |||
859 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) | ||
860 | continue; | ||
861 | |||
862 | /* We need to copy these to an anonymous buffer as the simplest | ||
863 | * method to avoid being overwritten by userspace. | ||
864 | */ | ||
865 | return i915_error_object_create(dev_priv, obj); | ||
551 | } | 866 | } |
552 | 867 | ||
553 | return bbaddr; | 868 | return NULL; |
554 | } | 869 | } |
555 | 870 | ||
556 | /** | 871 | /** |
@@ -565,12 +880,10 @@ i915_ringbuffer_last_batch(struct drm_device *dev) | |||
565 | static void i915_capture_error_state(struct drm_device *dev) | 880 | static void i915_capture_error_state(struct drm_device *dev) |
566 | { | 881 | { |
567 | struct drm_i915_private *dev_priv = dev->dev_private; | 882 | struct drm_i915_private *dev_priv = dev->dev_private; |
568 | struct drm_i915_gem_object *obj_priv; | 883 | struct drm_i915_gem_object *obj; |
569 | struct drm_i915_error_state *error; | 884 | struct drm_i915_error_state *error; |
570 | struct drm_gem_object *batchbuffer[2]; | ||
571 | unsigned long flags; | 885 | unsigned long flags; |
572 | u32 bbaddr; | 886 | int i, pipe; |
573 | int count; | ||
574 | 887 | ||
575 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 888 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
576 | error = dev_priv->first_error; | 889 | error = dev_priv->first_error; |
@@ -578,25 +891,43 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
578 | if (error) | 891 | if (error) |
579 | return; | 892 | return; |
580 | 893 | ||
894 | /* Account for pipe specific data like PIPE*STAT */ | ||
581 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | 895 | error = kmalloc(sizeof(*error), GFP_ATOMIC); |
582 | if (!error) { | 896 | if (!error) { |
583 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); | 897 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); |
584 | return; | 898 | return; |
585 | } | 899 | } |
586 | 900 | ||
587 | error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring); | 901 | DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", |
902 | dev->primary->index); | ||
903 | |||
904 | error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]); | ||
588 | error->eir = I915_READ(EIR); | 905 | error->eir = I915_READ(EIR); |
589 | error->pgtbl_er = I915_READ(PGTBL_ER); | 906 | error->pgtbl_er = I915_READ(PGTBL_ER); |
590 | error->pipeastat = I915_READ(PIPEASTAT); | 907 | for_each_pipe(pipe) |
591 | error->pipebstat = I915_READ(PIPEBSTAT); | 908 | error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); |
592 | error->instpm = I915_READ(INSTPM); | 909 | error->instpm = I915_READ(INSTPM); |
593 | if (!IS_I965G(dev)) { | 910 | error->error = 0; |
594 | error->ipeir = I915_READ(IPEIR); | 911 | if (INTEL_INFO(dev)->gen >= 6) { |
595 | error->ipehr = I915_READ(IPEHR); | 912 | error->error = I915_READ(ERROR_GEN6); |
596 | error->instdone = I915_READ(INSTDONE); | 913 | |
597 | error->acthd = I915_READ(ACTHD); | 914 | error->bcs_acthd = I915_READ(BCS_ACTHD); |
598 | error->bbaddr = 0; | 915 | error->bcs_ipehr = I915_READ(BCS_IPEHR); |
599 | } else { | 916 | error->bcs_ipeir = I915_READ(BCS_IPEIR); |
917 | error->bcs_instdone = I915_READ(BCS_INSTDONE); | ||
918 | error->bcs_seqno = 0; | ||
919 | if (dev_priv->ring[BCS].get_seqno) | ||
920 | error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]); | ||
921 | |||
922 | error->vcs_acthd = I915_READ(VCS_ACTHD); | ||
923 | error->vcs_ipehr = I915_READ(VCS_IPEHR); | ||
924 | error->vcs_ipeir = I915_READ(VCS_IPEIR); | ||
925 | error->vcs_instdone = I915_READ(VCS_INSTDONE); | ||
926 | error->vcs_seqno = 0; | ||
927 | if (dev_priv->ring[VCS].get_seqno) | ||
928 | error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]); | ||
929 | } | ||
930 | if (INTEL_INFO(dev)->gen >= 4) { | ||
600 | error->ipeir = I915_READ(IPEIR_I965); | 931 | error->ipeir = I915_READ(IPEIR_I965); |
601 | error->ipehr = I915_READ(IPEHR_I965); | 932 | error->ipehr = I915_READ(IPEHR_I965); |
602 | error->instdone = I915_READ(INSTDONE_I965); | 933 | error->instdone = I915_READ(INSTDONE_I965); |
@@ -604,121 +935,64 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
604 | error->instdone1 = I915_READ(INSTDONE1); | 935 | error->instdone1 = I915_READ(INSTDONE1); |
605 | error->acthd = I915_READ(ACTHD_I965); | 936 | error->acthd = I915_READ(ACTHD_I965); |
606 | error->bbaddr = I915_READ64(BB_ADDR); | 937 | error->bbaddr = I915_READ64(BB_ADDR); |
938 | } else { | ||
939 | error->ipeir = I915_READ(IPEIR); | ||
940 | error->ipehr = I915_READ(IPEHR); | ||
941 | error->instdone = I915_READ(INSTDONE); | ||
942 | error->acthd = I915_READ(ACTHD); | ||
943 | error->bbaddr = 0; | ||
607 | } | 944 | } |
945 | i915_gem_record_fences(dev, error); | ||
608 | 946 | ||
609 | bbaddr = i915_ringbuffer_last_batch(dev); | 947 | /* Record the active batch and ring buffers */ |
610 | 948 | for (i = 0; i < I915_NUM_RINGS; i++) { | |
611 | /* Grab the current batchbuffer, most likely to have crashed. */ | 949 | error->batchbuffer[i] = |
612 | batchbuffer[0] = NULL; | 950 | i915_error_first_batchbuffer(dev_priv, |
613 | batchbuffer[1] = NULL; | 951 | &dev_priv->ring[i]); |
614 | count = 0; | ||
615 | list_for_each_entry(obj_priv, | ||
616 | &dev_priv->render_ring.active_list, list) { | ||
617 | |||
618 | struct drm_gem_object *obj = &obj_priv->base; | ||
619 | |||
620 | if (batchbuffer[0] == NULL && | ||
621 | bbaddr >= obj_priv->gtt_offset && | ||
622 | bbaddr < obj_priv->gtt_offset + obj->size) | ||
623 | batchbuffer[0] = obj; | ||
624 | |||
625 | if (batchbuffer[1] == NULL && | ||
626 | error->acthd >= obj_priv->gtt_offset && | ||
627 | error->acthd < obj_priv->gtt_offset + obj->size) | ||
628 | batchbuffer[1] = obj; | ||
629 | |||
630 | count++; | ||
631 | } | ||
632 | /* Scan the other lists for completeness for those bizarre errors. */ | ||
633 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | ||
634 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { | ||
635 | struct drm_gem_object *obj = &obj_priv->base; | ||
636 | |||
637 | if (batchbuffer[0] == NULL && | ||
638 | bbaddr >= obj_priv->gtt_offset && | ||
639 | bbaddr < obj_priv->gtt_offset + obj->size) | ||
640 | batchbuffer[0] = obj; | ||
641 | |||
642 | if (batchbuffer[1] == NULL && | ||
643 | error->acthd >= obj_priv->gtt_offset && | ||
644 | error->acthd < obj_priv->gtt_offset + obj->size) | ||
645 | batchbuffer[1] = obj; | ||
646 | |||
647 | if (batchbuffer[0] && batchbuffer[1]) | ||
648 | break; | ||
649 | } | ||
650 | } | ||
651 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | ||
652 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { | ||
653 | struct drm_gem_object *obj = &obj_priv->base; | ||
654 | |||
655 | if (batchbuffer[0] == NULL && | ||
656 | bbaddr >= obj_priv->gtt_offset && | ||
657 | bbaddr < obj_priv->gtt_offset + obj->size) | ||
658 | batchbuffer[0] = obj; | ||
659 | |||
660 | if (batchbuffer[1] == NULL && | ||
661 | error->acthd >= obj_priv->gtt_offset && | ||
662 | error->acthd < obj_priv->gtt_offset + obj->size) | ||
663 | batchbuffer[1] = obj; | ||
664 | 952 | ||
665 | if (batchbuffer[0] && batchbuffer[1]) | 953 | error->ringbuffer[i] = |
666 | break; | 954 | i915_error_object_create(dev_priv, |
667 | } | 955 | dev_priv->ring[i].obj); |
668 | } | 956 | } |
669 | 957 | ||
670 | /* We need to copy these to an anonymous buffer as the simplest | 958 | /* Record buffers on the active and pinned lists. */ |
671 | * method to avoid being overwritten by userpace. | 959 | error->active_bo = NULL; |
672 | */ | 960 | error->pinned_bo = NULL; |
673 | error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); | ||
674 | if (batchbuffer[1] != batchbuffer[0]) | ||
675 | error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); | ||
676 | else | ||
677 | error->batchbuffer[1] = NULL; | ||
678 | 961 | ||
679 | /* Record the ringbuffer */ | 962 | i = 0; |
680 | error->ringbuffer = i915_error_object_create(dev, | 963 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) |
681 | dev_priv->render_ring.gem_object); | 964 | i++; |
965 | error->active_bo_count = i; | ||
966 | list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) | ||
967 | i++; | ||
968 | error->pinned_bo_count = i - error->active_bo_count; | ||
682 | 969 | ||
683 | /* Record buffers on the active list. */ | ||
684 | error->active_bo = NULL; | 970 | error->active_bo = NULL; |
685 | error->active_bo_count = 0; | 971 | error->pinned_bo = NULL; |
686 | 972 | if (i) { | |
687 | if (count) | 973 | error->active_bo = kmalloc(sizeof(*error->active_bo)*i, |
688 | error->active_bo = kmalloc(sizeof(*error->active_bo)*count, | ||
689 | GFP_ATOMIC); | 974 | GFP_ATOMIC); |
690 | 975 | if (error->active_bo) | |
691 | if (error->active_bo) { | 976 | error->pinned_bo = |
692 | int i = 0; | 977 | error->active_bo + error->active_bo_count; |
693 | list_for_each_entry(obj_priv, | ||
694 | &dev_priv->render_ring.active_list, list) { | ||
695 | struct drm_gem_object *obj = &obj_priv->base; | ||
696 | |||
697 | error->active_bo[i].size = obj->size; | ||
698 | error->active_bo[i].name = obj->name; | ||
699 | error->active_bo[i].seqno = obj_priv->last_rendering_seqno; | ||
700 | error->active_bo[i].gtt_offset = obj_priv->gtt_offset; | ||
701 | error->active_bo[i].read_domains = obj->read_domains; | ||
702 | error->active_bo[i].write_domain = obj->write_domain; | ||
703 | error->active_bo[i].fence_reg = obj_priv->fence_reg; | ||
704 | error->active_bo[i].pinned = 0; | ||
705 | if (obj_priv->pin_count > 0) | ||
706 | error->active_bo[i].pinned = 1; | ||
707 | if (obj_priv->user_pin_count > 0) | ||
708 | error->active_bo[i].pinned = -1; | ||
709 | error->active_bo[i].tiling = obj_priv->tiling_mode; | ||
710 | error->active_bo[i].dirty = obj_priv->dirty; | ||
711 | error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED; | ||
712 | |||
713 | if (++i == count) | ||
714 | break; | ||
715 | } | ||
716 | error->active_bo_count = i; | ||
717 | } | 978 | } |
718 | 979 | ||
980 | if (error->active_bo) | ||
981 | error->active_bo_count = | ||
982 | capture_bo_list(error->active_bo, | ||
983 | error->active_bo_count, | ||
984 | &dev_priv->mm.active_list); | ||
985 | |||
986 | if (error->pinned_bo) | ||
987 | error->pinned_bo_count = | ||
988 | capture_bo_list(error->pinned_bo, | ||
989 | error->pinned_bo_count, | ||
990 | &dev_priv->mm.pinned_list); | ||
991 | |||
719 | do_gettimeofday(&error->time); | 992 | do_gettimeofday(&error->time); |
720 | 993 | ||
721 | error->overlay = intel_overlay_capture_error_state(dev); | 994 | error->overlay = intel_overlay_capture_error_state(dev); |
995 | error->display = intel_display_capture_error_state(dev); | ||
722 | 996 | ||
723 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 997 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
724 | if (dev_priv->first_error == NULL) { | 998 | if (dev_priv->first_error == NULL) { |
@@ -744,11 +1018,15 @@ void i915_destroy_error_state(struct drm_device *dev) | |||
744 | if (error) | 1018 | if (error) |
745 | i915_error_state_free(dev, error); | 1019 | i915_error_state_free(dev, error); |
746 | } | 1020 | } |
1021 | #else | ||
1022 | #define i915_capture_error_state(x) | ||
1023 | #endif | ||
747 | 1024 | ||
748 | static void i915_report_and_clear_eir(struct drm_device *dev) | 1025 | static void i915_report_and_clear_eir(struct drm_device *dev) |
749 | { | 1026 | { |
750 | struct drm_i915_private *dev_priv = dev->dev_private; | 1027 | struct drm_i915_private *dev_priv = dev->dev_private; |
751 | u32 eir = I915_READ(EIR); | 1028 | u32 eir = I915_READ(EIR); |
1029 | int pipe; | ||
752 | 1030 | ||
753 | if (!eir) | 1031 | if (!eir) |
754 | return; | 1032 | return; |
@@ -773,7 +1051,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
773 | printk(KERN_ERR " ACTHD: 0x%08x\n", | 1051 | printk(KERN_ERR " ACTHD: 0x%08x\n", |
774 | I915_READ(ACTHD_I965)); | 1052 | I915_READ(ACTHD_I965)); |
775 | I915_WRITE(IPEIR_I965, ipeir); | 1053 | I915_WRITE(IPEIR_I965, ipeir); |
776 | (void)I915_READ(IPEIR_I965); | 1054 | POSTING_READ(IPEIR_I965); |
777 | } | 1055 | } |
778 | if (eir & GM45_ERROR_PAGE_TABLE) { | 1056 | if (eir & GM45_ERROR_PAGE_TABLE) { |
779 | u32 pgtbl_err = I915_READ(PGTBL_ER); | 1057 | u32 pgtbl_err = I915_READ(PGTBL_ER); |
@@ -781,37 +1059,33 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
781 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", | 1059 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", |
782 | pgtbl_err); | 1060 | pgtbl_err); |
783 | I915_WRITE(PGTBL_ER, pgtbl_err); | 1061 | I915_WRITE(PGTBL_ER, pgtbl_err); |
784 | (void)I915_READ(PGTBL_ER); | 1062 | POSTING_READ(PGTBL_ER); |
785 | } | 1063 | } |
786 | } | 1064 | } |
787 | 1065 | ||
788 | if (IS_I9XX(dev)) { | 1066 | if (!IS_GEN2(dev)) { |
789 | if (eir & I915_ERROR_PAGE_TABLE) { | 1067 | if (eir & I915_ERROR_PAGE_TABLE) { |
790 | u32 pgtbl_err = I915_READ(PGTBL_ER); | 1068 | u32 pgtbl_err = I915_READ(PGTBL_ER); |
791 | printk(KERN_ERR "page table error\n"); | 1069 | printk(KERN_ERR "page table error\n"); |
792 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", | 1070 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", |
793 | pgtbl_err); | 1071 | pgtbl_err); |
794 | I915_WRITE(PGTBL_ER, pgtbl_err); | 1072 | I915_WRITE(PGTBL_ER, pgtbl_err); |
795 | (void)I915_READ(PGTBL_ER); | 1073 | POSTING_READ(PGTBL_ER); |
796 | } | 1074 | } |
797 | } | 1075 | } |
798 | 1076 | ||
799 | if (eir & I915_ERROR_MEMORY_REFRESH) { | 1077 | if (eir & I915_ERROR_MEMORY_REFRESH) { |
800 | u32 pipea_stats = I915_READ(PIPEASTAT); | 1078 | printk(KERN_ERR "memory refresh error:\n"); |
801 | u32 pipeb_stats = I915_READ(PIPEBSTAT); | 1079 | for_each_pipe(pipe) |
802 | 1080 | printk(KERN_ERR "pipe %c stat: 0x%08x\n", | |
803 | printk(KERN_ERR "memory refresh error\n"); | 1081 | pipe_name(pipe), I915_READ(PIPESTAT(pipe))); |
804 | printk(KERN_ERR "PIPEASTAT: 0x%08x\n", | ||
805 | pipea_stats); | ||
806 | printk(KERN_ERR "PIPEBSTAT: 0x%08x\n", | ||
807 | pipeb_stats); | ||
808 | /* pipestat has already been acked */ | 1082 | /* pipestat has already been acked */ |
809 | } | 1083 | } |
810 | if (eir & I915_ERROR_INSTRUCTION) { | 1084 | if (eir & I915_ERROR_INSTRUCTION) { |
811 | printk(KERN_ERR "instruction error\n"); | 1085 | printk(KERN_ERR "instruction error\n"); |
812 | printk(KERN_ERR " INSTPM: 0x%08x\n", | 1086 | printk(KERN_ERR " INSTPM: 0x%08x\n", |
813 | I915_READ(INSTPM)); | 1087 | I915_READ(INSTPM)); |
814 | if (!IS_I965G(dev)) { | 1088 | if (INTEL_INFO(dev)->gen < 4) { |
815 | u32 ipeir = I915_READ(IPEIR); | 1089 | u32 ipeir = I915_READ(IPEIR); |
816 | 1090 | ||
817 | printk(KERN_ERR " IPEIR: 0x%08x\n", | 1091 | printk(KERN_ERR " IPEIR: 0x%08x\n", |
@@ -823,7 +1097,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
823 | printk(KERN_ERR " ACTHD: 0x%08x\n", | 1097 | printk(KERN_ERR " ACTHD: 0x%08x\n", |
824 | I915_READ(ACTHD)); | 1098 | I915_READ(ACTHD)); |
825 | I915_WRITE(IPEIR, ipeir); | 1099 | I915_WRITE(IPEIR, ipeir); |
826 | (void)I915_READ(IPEIR); | 1100 | POSTING_READ(IPEIR); |
827 | } else { | 1101 | } else { |
828 | u32 ipeir = I915_READ(IPEIR_I965); | 1102 | u32 ipeir = I915_READ(IPEIR_I965); |
829 | 1103 | ||
@@ -840,12 +1114,12 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
840 | printk(KERN_ERR " ACTHD: 0x%08x\n", | 1114 | printk(KERN_ERR " ACTHD: 0x%08x\n", |
841 | I915_READ(ACTHD_I965)); | 1115 | I915_READ(ACTHD_I965)); |
842 | I915_WRITE(IPEIR_I965, ipeir); | 1116 | I915_WRITE(IPEIR_I965, ipeir); |
843 | (void)I915_READ(IPEIR_I965); | 1117 | POSTING_READ(IPEIR_I965); |
844 | } | 1118 | } |
845 | } | 1119 | } |
846 | 1120 | ||
847 | I915_WRITE(EIR, eir); | 1121 | I915_WRITE(EIR, eir); |
848 | (void)I915_READ(EIR); | 1122 | POSTING_READ(EIR); |
849 | eir = I915_READ(EIR); | 1123 | eir = I915_READ(EIR); |
850 | if (eir) { | 1124 | if (eir) { |
851 | /* | 1125 | /* |
@@ -868,7 +1142,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
868 | * so userspace knows something bad happened (should trigger collection | 1142 | * so userspace knows something bad happened (should trigger collection |
869 | * of a ring dump etc.). | 1143 | * of a ring dump etc.). |
870 | */ | 1144 | */ |
871 | static void i915_handle_error(struct drm_device *dev, bool wedged) | 1145 | void i915_handle_error(struct drm_device *dev, bool wedged) |
872 | { | 1146 | { |
873 | struct drm_i915_private *dev_priv = dev->dev_private; | 1147 | struct drm_i915_private *dev_priv = dev->dev_private; |
874 | 1148 | ||
@@ -876,12 +1150,17 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) | |||
876 | i915_report_and_clear_eir(dev); | 1150 | i915_report_and_clear_eir(dev); |
877 | 1151 | ||
878 | if (wedged) { | 1152 | if (wedged) { |
1153 | INIT_COMPLETION(dev_priv->error_completion); | ||
879 | atomic_set(&dev_priv->mm.wedged, 1); | 1154 | atomic_set(&dev_priv->mm.wedged, 1); |
880 | 1155 | ||
881 | /* | 1156 | /* |
882 | * Wakeup waiting processes so they don't hang | 1157 | * Wakeup waiting processes so they don't hang |
883 | */ | 1158 | */ |
884 | DRM_WAKEUP(&dev_priv->render_ring.irq_queue); | 1159 | wake_up_all(&dev_priv->ring[RCS].irq_queue); |
1160 | if (HAS_BSD(dev)) | ||
1161 | wake_up_all(&dev_priv->ring[VCS].irq_queue); | ||
1162 | if (HAS_BLT(dev)) | ||
1163 | wake_up_all(&dev_priv->ring[BCS].irq_queue); | ||
885 | } | 1164 | } |
886 | 1165 | ||
887 | queue_work(dev_priv->wq, &dev_priv->error_work); | 1166 | queue_work(dev_priv->wq, &dev_priv->error_work); |
@@ -892,7 +1171,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) | |||
892 | drm_i915_private_t *dev_priv = dev->dev_private; | 1171 | drm_i915_private_t *dev_priv = dev->dev_private; |
893 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 1172 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
894 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1173 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
895 | struct drm_i915_gem_object *obj_priv; | 1174 | struct drm_i915_gem_object *obj; |
896 | struct intel_unpin_work *work; | 1175 | struct intel_unpin_work *work; |
897 | unsigned long flags; | 1176 | unsigned long flags; |
898 | bool stall_detected; | 1177 | bool stall_detected; |
@@ -911,13 +1190,13 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) | |||
911 | } | 1190 | } |
912 | 1191 | ||
913 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ | 1192 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ |
914 | obj_priv = to_intel_bo(work->pending_flip_obj); | 1193 | obj = work->pending_flip_obj; |
915 | if(IS_I965G(dev)) { | 1194 | if (INTEL_INFO(dev)->gen >= 4) { |
916 | int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; | 1195 | int dspsurf = DSPSURF(intel_crtc->plane); |
917 | stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; | 1196 | stall_detected = I915_READ(dspsurf) == obj->gtt_offset; |
918 | } else { | 1197 | } else { |
919 | int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; | 1198 | int dspaddr = DSPADDR(intel_crtc->plane); |
920 | stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset + | 1199 | stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + |
921 | crtc->y * crtc->fb->pitch + | 1200 | crtc->y * crtc->fb->pitch + |
922 | crtc->x * crtc->fb->bits_per_pixel/8); | 1201 | crtc->x * crtc->fb->bits_per_pixel/8); |
923 | } | 1202 | } |
@@ -930,28 +1209,25 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) | |||
930 | } | 1209 | } |
931 | } | 1210 | } |
932 | 1211 | ||
933 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | 1212 | static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) |
934 | { | 1213 | { |
935 | struct drm_device *dev = (struct drm_device *) arg; | 1214 | struct drm_device *dev = (struct drm_device *) arg; |
936 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1215 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
937 | struct drm_i915_master_private *master_priv; | 1216 | struct drm_i915_master_private *master_priv; |
938 | u32 iir, new_iir; | 1217 | u32 iir, new_iir; |
939 | u32 pipea_stats, pipeb_stats; | 1218 | u32 pipe_stats[I915_MAX_PIPES]; |
940 | u32 vblank_status; | 1219 | u32 vblank_status; |
941 | int vblank = 0; | 1220 | int vblank = 0; |
942 | unsigned long irqflags; | 1221 | unsigned long irqflags; |
943 | int irq_received; | 1222 | int irq_received; |
944 | int ret = IRQ_NONE; | 1223 | int ret = IRQ_NONE, pipe; |
945 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | 1224 | bool blc_event = false; |
946 | 1225 | ||
947 | atomic_inc(&dev_priv->irq_received); | 1226 | atomic_inc(&dev_priv->irq_received); |
948 | 1227 | ||
949 | if (HAS_PCH_SPLIT(dev)) | ||
950 | return ironlake_irq_handler(dev); | ||
951 | |||
952 | iir = I915_READ(IIR); | 1228 | iir = I915_READ(IIR); |
953 | 1229 | ||
954 | if (IS_I965G(dev)) | 1230 | if (INTEL_INFO(dev)->gen >= 4) |
955 | vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS; | 1231 | vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS; |
956 | else | 1232 | else |
957 | vblank_status = PIPE_VBLANK_INTERRUPT_STATUS; | 1233 | vblank_status = PIPE_VBLANK_INTERRUPT_STATUS; |
@@ -964,30 +1240,26 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
964 | * It doesn't set the bit in iir again, but it still produces | 1240 | * It doesn't set the bit in iir again, but it still produces |
965 | * interrupts (for non-MSI). | 1241 | * interrupts (for non-MSI). |
966 | */ | 1242 | */ |
967 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1243 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
968 | pipea_stats = I915_READ(PIPEASTAT); | ||
969 | pipeb_stats = I915_READ(PIPEBSTAT); | ||
970 | |||
971 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 1244 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
972 | i915_handle_error(dev, false); | 1245 | i915_handle_error(dev, false); |
973 | 1246 | ||
974 | /* | 1247 | for_each_pipe(pipe) { |
975 | * Clear the PIPE(A|B)STAT regs before the IIR | 1248 | int reg = PIPESTAT(pipe); |
976 | */ | 1249 | pipe_stats[pipe] = I915_READ(reg); |
977 | if (pipea_stats & 0x8000ffff) { | 1250 | |
978 | if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS) | 1251 | /* |
979 | DRM_DEBUG_DRIVER("pipe a underrun\n"); | 1252 | * Clear the PIPE*STAT regs before the IIR |
980 | I915_WRITE(PIPEASTAT, pipea_stats); | 1253 | */ |
981 | irq_received = 1; | 1254 | if (pipe_stats[pipe] & 0x8000ffff) { |
982 | } | 1255 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
983 | 1256 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
984 | if (pipeb_stats & 0x8000ffff) { | 1257 | pipe_name(pipe)); |
985 | if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS) | 1258 | I915_WRITE(reg, pipe_stats[pipe]); |
986 | DRM_DEBUG_DRIVER("pipe b underrun\n"); | 1259 | irq_received = 1; |
987 | I915_WRITE(PIPEBSTAT, pipeb_stats); | 1260 | } |
988 | irq_received = 1; | ||
989 | } | 1261 | } |
990 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 1262 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
991 | 1263 | ||
992 | if (!irq_received) | 1264 | if (!irq_received) |
993 | break; | 1265 | break; |
@@ -1019,18 +1291,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
1019 | READ_BREADCRUMB(dev_priv); | 1291 | READ_BREADCRUMB(dev_priv); |
1020 | } | 1292 | } |
1021 | 1293 | ||
1022 | if (iir & I915_USER_INTERRUPT) { | 1294 | if (iir & I915_USER_INTERRUPT) |
1023 | u32 seqno = | 1295 | notify_ring(dev, &dev_priv->ring[RCS]); |
1024 | render_ring->get_gem_seqno(dev, render_ring); | 1296 | if (iir & I915_BSD_USER_INTERRUPT) |
1025 | render_ring->irq_gem_seqno = seqno; | 1297 | notify_ring(dev, &dev_priv->ring[VCS]); |
1026 | trace_i915_gem_request_complete(dev, seqno); | ||
1027 | DRM_WAKEUP(&dev_priv->render_ring.irq_queue); | ||
1028 | dev_priv->hangcheck_count = 0; | ||
1029 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | ||
1030 | } | ||
1031 | |||
1032 | if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) | ||
1033 | DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); | ||
1034 | 1298 | ||
1035 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { | 1299 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { |
1036 | intel_prepare_page_flip(dev, 0); | 1300 | intel_prepare_page_flip(dev, 0); |
@@ -1044,28 +1308,23 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
1044 | intel_finish_page_flip_plane(dev, 1); | 1308 | intel_finish_page_flip_plane(dev, 1); |
1045 | } | 1309 | } |
1046 | 1310 | ||
1047 | if (pipea_stats & vblank_status) { | 1311 | for_each_pipe(pipe) { |
1048 | vblank++; | 1312 | if (pipe_stats[pipe] & vblank_status && |
1049 | drm_handle_vblank(dev, 0); | 1313 | drm_handle_vblank(dev, pipe)) { |
1050 | if (!dev_priv->flip_pending_is_done) { | 1314 | vblank++; |
1051 | i915_pageflip_stall_check(dev, 0); | 1315 | if (!dev_priv->flip_pending_is_done) { |
1052 | intel_finish_page_flip(dev, 0); | 1316 | i915_pageflip_stall_check(dev, pipe); |
1317 | intel_finish_page_flip(dev, pipe); | ||
1318 | } | ||
1053 | } | 1319 | } |
1054 | } | ||
1055 | 1320 | ||
1056 | if (pipeb_stats & vblank_status) { | 1321 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) |
1057 | vblank++; | 1322 | blc_event = true; |
1058 | drm_handle_vblank(dev, 1); | ||
1059 | if (!dev_priv->flip_pending_is_done) { | ||
1060 | i915_pageflip_stall_check(dev, 1); | ||
1061 | intel_finish_page_flip(dev, 1); | ||
1062 | } | ||
1063 | } | 1323 | } |
1064 | 1324 | ||
1065 | if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || | 1325 | |
1066 | (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || | 1326 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) |
1067 | (iir & I915_ASLE_INTERRUPT)) | 1327 | intel_opregion_asle_intr(dev); |
1068 | opregion_asle_intr(dev); | ||
1069 | 1328 | ||
1070 | /* With MSI, interrupts are only generated when iir | 1329 | /* With MSI, interrupts are only generated when iir |
1071 | * transitions from zero to nonzero. If another bit got | 1330 | * transitions from zero to nonzero. If another bit got |
@@ -1103,33 +1362,23 @@ static int i915_emit_irq(struct drm_device * dev) | |||
1103 | if (master_priv->sarea_priv) | 1362 | if (master_priv->sarea_priv) |
1104 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; | 1363 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; |
1105 | 1364 | ||
1106 | BEGIN_LP_RING(4); | 1365 | if (BEGIN_LP_RING(4) == 0) { |
1107 | OUT_RING(MI_STORE_DWORD_INDEX); | 1366 | OUT_RING(MI_STORE_DWORD_INDEX); |
1108 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 1367 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
1109 | OUT_RING(dev_priv->counter); | 1368 | OUT_RING(dev_priv->counter); |
1110 | OUT_RING(MI_USER_INTERRUPT); | 1369 | OUT_RING(MI_USER_INTERRUPT); |
1111 | ADVANCE_LP_RING(); | 1370 | ADVANCE_LP_RING(); |
1371 | } | ||
1112 | 1372 | ||
1113 | return dev_priv->counter; | 1373 | return dev_priv->counter; |
1114 | } | 1374 | } |
1115 | 1375 | ||
1116 | void i915_trace_irq_get(struct drm_device *dev, u32 seqno) | ||
1117 | { | ||
1118 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
1119 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | ||
1120 | |||
1121 | if (dev_priv->trace_irq_seqno == 0) | ||
1122 | render_ring->user_irq_get(dev, render_ring); | ||
1123 | |||
1124 | dev_priv->trace_irq_seqno = seqno; | ||
1125 | } | ||
1126 | |||
1127 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) | 1376 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) |
1128 | { | 1377 | { |
1129 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1378 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1130 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 1379 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
1131 | int ret = 0; | 1380 | int ret = 0; |
1132 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | 1381 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
1133 | 1382 | ||
1134 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, | 1383 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, |
1135 | READ_BREADCRUMB(dev_priv)); | 1384 | READ_BREADCRUMB(dev_priv)); |
@@ -1143,10 +1392,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |||
1143 | if (master_priv->sarea_priv) | 1392 | if (master_priv->sarea_priv) |
1144 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 1393 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
1145 | 1394 | ||
1146 | render_ring->user_irq_get(dev, render_ring); | 1395 | if (ring->irq_get(ring)) { |
1147 | DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, | 1396 | DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, |
1148 | READ_BREADCRUMB(dev_priv) >= irq_nr); | 1397 | READ_BREADCRUMB(dev_priv) >= irq_nr); |
1149 | render_ring->user_irq_put(dev, render_ring); | 1398 | ring->irq_put(ring); |
1399 | } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) | ||
1400 | ret = -EBUSY; | ||
1150 | 1401 | ||
1151 | if (ret == -EBUSY) { | 1402 | if (ret == -EBUSY) { |
1152 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", | 1403 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", |
@@ -1165,7 +1416,7 @@ int i915_irq_emit(struct drm_device *dev, void *data, | |||
1165 | drm_i915_irq_emit_t *emit = data; | 1416 | drm_i915_irq_emit_t *emit = data; |
1166 | int result; | 1417 | int result; |
1167 | 1418 | ||
1168 | if (!dev_priv || !dev_priv->render_ring.virtual_start) { | 1419 | if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { |
1169 | DRM_ERROR("called with no initialization\n"); | 1420 | DRM_ERROR("called with no initialization\n"); |
1170 | return -EINVAL; | 1421 | return -EINVAL; |
1171 | } | 1422 | } |
@@ -1203,59 +1454,102 @@ int i915_irq_wait(struct drm_device *dev, void *data, | |||
1203 | /* Called from drm generic code, passed 'crtc' which | 1454 | /* Called from drm generic code, passed 'crtc' which |
1204 | * we use as a pipe index | 1455 | * we use as a pipe index |
1205 | */ | 1456 | */ |
1206 | int i915_enable_vblank(struct drm_device *dev, int pipe) | 1457 | static int i915_enable_vblank(struct drm_device *dev, int pipe) |
1207 | { | 1458 | { |
1208 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1459 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1209 | unsigned long irqflags; | 1460 | unsigned long irqflags; |
1210 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | ||
1211 | u32 pipeconf; | ||
1212 | 1461 | ||
1213 | pipeconf = I915_READ(pipeconf_reg); | 1462 | if (!i915_pipe_enabled(dev, pipe)) |
1214 | if (!(pipeconf & PIPEACONF_ENABLE)) | ||
1215 | return -EINVAL; | 1463 | return -EINVAL; |
1216 | 1464 | ||
1217 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1465 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1218 | if (HAS_PCH_SPLIT(dev)) | 1466 | if (INTEL_INFO(dev)->gen >= 4) |
1219 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | ||
1220 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); | ||
1221 | else if (IS_I965G(dev)) | ||
1222 | i915_enable_pipestat(dev_priv, pipe, | 1467 | i915_enable_pipestat(dev_priv, pipe, |
1223 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 1468 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
1224 | else | 1469 | else |
1225 | i915_enable_pipestat(dev_priv, pipe, | 1470 | i915_enable_pipestat(dev_priv, pipe, |
1226 | PIPE_VBLANK_INTERRUPT_ENABLE); | 1471 | PIPE_VBLANK_INTERRUPT_ENABLE); |
1227 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 1472 | |
1473 | /* maintain vblank delivery even in deep C-states */ | ||
1474 | if (dev_priv->info->gen == 3) | ||
1475 | I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16); | ||
1476 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
1477 | |||
1478 | return 0; | ||
1479 | } | ||
1480 | |||
1481 | static int ironlake_enable_vblank(struct drm_device *dev, int pipe) | ||
1482 | { | ||
1483 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
1484 | unsigned long irqflags; | ||
1485 | |||
1486 | if (!i915_pipe_enabled(dev, pipe)) | ||
1487 | return -EINVAL; | ||
1488 | |||
1489 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
1490 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | ||
1491 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); | ||
1492 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
1493 | |||
1494 | return 0; | ||
1495 | } | ||
1496 | |||
1497 | static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) | ||
1498 | { | ||
1499 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
1500 | unsigned long irqflags; | ||
1501 | |||
1502 | if (!i915_pipe_enabled(dev, pipe)) | ||
1503 | return -EINVAL; | ||
1504 | |||
1505 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
1506 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | ||
1507 | DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); | ||
1508 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
1509 | |||
1228 | return 0; | 1510 | return 0; |
1229 | } | 1511 | } |
1230 | 1512 | ||
1231 | /* Called from drm generic code, passed 'crtc' which | 1513 | /* Called from drm generic code, passed 'crtc' which |
1232 | * we use as a pipe index | 1514 | * we use as a pipe index |
1233 | */ | 1515 | */ |
1234 | void i915_disable_vblank(struct drm_device *dev, int pipe) | 1516 | static void i915_disable_vblank(struct drm_device *dev, int pipe) |
1235 | { | 1517 | { |
1236 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1518 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1237 | unsigned long irqflags; | 1519 | unsigned long irqflags; |
1238 | 1520 | ||
1239 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1521 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1240 | if (HAS_PCH_SPLIT(dev)) | 1522 | if (dev_priv->info->gen == 3) |
1241 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? | 1523 | I915_WRITE(INSTPM, |
1242 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); | 1524 | INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS); |
1243 | else | 1525 | |
1244 | i915_disable_pipestat(dev_priv, pipe, | 1526 | i915_disable_pipestat(dev_priv, pipe, |
1245 | PIPE_VBLANK_INTERRUPT_ENABLE | | 1527 | PIPE_VBLANK_INTERRUPT_ENABLE | |
1246 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 1528 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
1247 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 1529 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1248 | } | 1530 | } |
1249 | 1531 | ||
1250 | void i915_enable_interrupt (struct drm_device *dev) | 1532 | static void ironlake_disable_vblank(struct drm_device *dev, int pipe) |
1251 | { | 1533 | { |
1252 | struct drm_i915_private *dev_priv = dev->dev_private; | 1534 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1535 | unsigned long irqflags; | ||
1253 | 1536 | ||
1254 | if (!HAS_PCH_SPLIT(dev)) | 1537 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1255 | opregion_enable_asle(dev); | 1538 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? |
1256 | dev_priv->irq_enabled = 1; | 1539 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); |
1540 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
1257 | } | 1541 | } |
1258 | 1542 | ||
1543 | static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) | ||
1544 | { | ||
1545 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
1546 | unsigned long irqflags; | ||
1547 | |||
1548 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
1549 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? | ||
1550 | DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); | ||
1551 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
1552 | } | ||
1259 | 1553 | ||
1260 | /* Set the vblank monitor pipe | 1554 | /* Set the vblank monitor pipe |
1261 | */ | 1555 | */ |
@@ -1311,12 +1605,50 @@ int i915_vblank_swap(struct drm_device *dev, void *data, | |||
1311 | return -EINVAL; | 1605 | return -EINVAL; |
1312 | } | 1606 | } |
1313 | 1607 | ||
1314 | struct drm_i915_gem_request * | 1608 | static u32 |
1315 | i915_get_tail_request(struct drm_device *dev) | 1609 | ring_last_seqno(struct intel_ring_buffer *ring) |
1316 | { | 1610 | { |
1317 | drm_i915_private_t *dev_priv = dev->dev_private; | 1611 | return list_entry(ring->request_list.prev, |
1318 | return list_entry(dev_priv->render_ring.request_list.prev, | 1612 | struct drm_i915_gem_request, list)->seqno; |
1319 | struct drm_i915_gem_request, list); | 1613 | } |
1614 | |||
1615 | static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) | ||
1616 | { | ||
1617 | if (list_empty(&ring->request_list) || | ||
1618 | i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { | ||
1619 | /* Issue a wake-up to catch stuck h/w. */ | ||
1620 | if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) { | ||
1621 | DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n", | ||
1622 | ring->name, | ||
1623 | ring->waiting_seqno, | ||
1624 | ring->get_seqno(ring)); | ||
1625 | wake_up_all(&ring->irq_queue); | ||
1626 | *err = true; | ||
1627 | } | ||
1628 | return true; | ||
1629 | } | ||
1630 | return false; | ||
1631 | } | ||
1632 | |||
1633 | static bool kick_ring(struct intel_ring_buffer *ring) | ||
1634 | { | ||
1635 | struct drm_device *dev = ring->dev; | ||
1636 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1637 | u32 tmp = I915_READ_CTL(ring); | ||
1638 | if (tmp & RING_WAIT) { | ||
1639 | DRM_ERROR("Kicking stuck wait on %s\n", | ||
1640 | ring->name); | ||
1641 | I915_WRITE_CTL(ring, tmp); | ||
1642 | return true; | ||
1643 | } | ||
1644 | if (IS_GEN6(dev) && | ||
1645 | (tmp & RING_WAIT_SEMAPHORE)) { | ||
1646 | DRM_ERROR("Kicking stuck semaphore on %s\n", | ||
1647 | ring->name); | ||
1648 | I915_WRITE_CTL(ring, tmp); | ||
1649 | return true; | ||
1650 | } | ||
1651 | return false; | ||
1320 | } | 1652 | } |
1321 | 1653 | ||
1322 | /** | 1654 | /** |
@@ -1330,12 +1662,19 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1330 | struct drm_device *dev = (struct drm_device *)data; | 1662 | struct drm_device *dev = (struct drm_device *)data; |
1331 | drm_i915_private_t *dev_priv = dev->dev_private; | 1663 | drm_i915_private_t *dev_priv = dev->dev_private; |
1332 | uint32_t acthd, instdone, instdone1; | 1664 | uint32_t acthd, instdone, instdone1; |
1665 | bool err = false; | ||
1333 | 1666 | ||
1334 | /* No reset support on this chip yet. */ | 1667 | /* If all work is done then ACTHD clearly hasn't advanced. */ |
1335 | if (IS_GEN6(dev)) | 1668 | if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && |
1669 | i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && | ||
1670 | i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) { | ||
1671 | dev_priv->hangcheck_count = 0; | ||
1672 | if (err) | ||
1673 | goto repeat; | ||
1336 | return; | 1674 | return; |
1675 | } | ||
1337 | 1676 | ||
1338 | if (!IS_I965G(dev)) { | 1677 | if (INTEL_INFO(dev)->gen < 4) { |
1339 | acthd = I915_READ(ACTHD); | 1678 | acthd = I915_READ(ACTHD); |
1340 | instdone = I915_READ(INSTDONE); | 1679 | instdone = I915_READ(INSTDONE); |
1341 | instdone1 = 0; | 1680 | instdone1 = 0; |
@@ -1345,38 +1684,31 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1345 | instdone1 = I915_READ(INSTDONE1); | 1684 | instdone1 = I915_READ(INSTDONE1); |
1346 | } | 1685 | } |
1347 | 1686 | ||
1348 | /* If all work is done then ACTHD clearly hasn't advanced. */ | ||
1349 | if (list_empty(&dev_priv->render_ring.request_list) || | ||
1350 | i915_seqno_passed(i915_get_gem_seqno(dev, | ||
1351 | &dev_priv->render_ring), | ||
1352 | i915_get_tail_request(dev)->seqno)) { | ||
1353 | bool missed_wakeup = false; | ||
1354 | |||
1355 | dev_priv->hangcheck_count = 0; | ||
1356 | |||
1357 | /* Issue a wake-up to catch stuck h/w. */ | ||
1358 | if (dev_priv->render_ring.waiting_gem_seqno && | ||
1359 | waitqueue_active(&dev_priv->render_ring.irq_queue)) { | ||
1360 | DRM_WAKEUP(&dev_priv->render_ring.irq_queue); | ||
1361 | missed_wakeup = true; | ||
1362 | } | ||
1363 | |||
1364 | if (dev_priv->bsd_ring.waiting_gem_seqno && | ||
1365 | waitqueue_active(&dev_priv->bsd_ring.irq_queue)) { | ||
1366 | DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); | ||
1367 | missed_wakeup = true; | ||
1368 | } | ||
1369 | |||
1370 | if (missed_wakeup) | ||
1371 | DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); | ||
1372 | return; | ||
1373 | } | ||
1374 | |||
1375 | if (dev_priv->last_acthd == acthd && | 1687 | if (dev_priv->last_acthd == acthd && |
1376 | dev_priv->last_instdone == instdone && | 1688 | dev_priv->last_instdone == instdone && |
1377 | dev_priv->last_instdone1 == instdone1) { | 1689 | dev_priv->last_instdone1 == instdone1) { |
1378 | if (dev_priv->hangcheck_count++ > 1) { | 1690 | if (dev_priv->hangcheck_count++ > 1) { |
1379 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); | 1691 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); |
1692 | |||
1693 | if (!IS_GEN2(dev)) { | ||
1694 | /* Is the chip hanging on a WAIT_FOR_EVENT? | ||
1695 | * If so we can simply poke the RB_WAIT bit | ||
1696 | * and break the hang. This should work on | ||
1697 | * all but the second generation chipsets. | ||
1698 | */ | ||
1699 | |||
1700 | if (kick_ring(&dev_priv->ring[RCS])) | ||
1701 | goto repeat; | ||
1702 | |||
1703 | if (HAS_BSD(dev) && | ||
1704 | kick_ring(&dev_priv->ring[VCS])) | ||
1705 | goto repeat; | ||
1706 | |||
1707 | if (HAS_BLT(dev) && | ||
1708 | kick_ring(&dev_priv->ring[BCS])) | ||
1709 | goto repeat; | ||
1710 | } | ||
1711 | |||
1380 | i915_handle_error(dev, true); | 1712 | i915_handle_error(dev, true); |
1381 | return; | 1713 | return; |
1382 | } | 1714 | } |
@@ -1388,8 +1720,10 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1388 | dev_priv->last_instdone1 = instdone1; | 1720 | dev_priv->last_instdone1 = instdone1; |
1389 | } | 1721 | } |
1390 | 1722 | ||
1723 | repeat: | ||
1391 | /* Reset timer case chip hangs without another request being added */ | 1724 | /* Reset timer case chip hangs without another request being added */ |
1392 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | 1725 | mod_timer(&dev_priv->hangcheck_timer, |
1726 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | ||
1393 | } | 1727 | } |
1394 | 1728 | ||
1395 | /* drm_dma.h hooks | 1729 | /* drm_dma.h hooks |
@@ -1398,23 +1732,41 @@ static void ironlake_irq_preinstall(struct drm_device *dev) | |||
1398 | { | 1732 | { |
1399 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1733 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1400 | 1734 | ||
1735 | atomic_set(&dev_priv->irq_received, 0); | ||
1736 | |||
1737 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | ||
1738 | INIT_WORK(&dev_priv->error_work, i915_error_work_func); | ||
1739 | if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) | ||
1740 | INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); | ||
1741 | |||
1401 | I915_WRITE(HWSTAM, 0xeffe); | 1742 | I915_WRITE(HWSTAM, 0xeffe); |
1743 | if (IS_GEN6(dev) || IS_GEN7(dev)) { | ||
1744 | /* Workaround stalls observed on Sandy Bridge GPUs by | ||
1745 | * making the blitter command streamer generate a | ||
1746 | * write to the Hardware Status Page for | ||
1747 | * MI_USER_INTERRUPT. This appears to serialize the | ||
1748 | * previous seqno write out before the interrupt | ||
1749 | * happens. | ||
1750 | */ | ||
1751 | I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT); | ||
1752 | I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT); | ||
1753 | } | ||
1402 | 1754 | ||
1403 | /* XXX hotplug from PCH */ | 1755 | /* XXX hotplug from PCH */ |
1404 | 1756 | ||
1405 | I915_WRITE(DEIMR, 0xffffffff); | 1757 | I915_WRITE(DEIMR, 0xffffffff); |
1406 | I915_WRITE(DEIER, 0x0); | 1758 | I915_WRITE(DEIER, 0x0); |
1407 | (void) I915_READ(DEIER); | 1759 | POSTING_READ(DEIER); |
1408 | 1760 | ||
1409 | /* and GT */ | 1761 | /* and GT */ |
1410 | I915_WRITE(GTIMR, 0xffffffff); | 1762 | I915_WRITE(GTIMR, 0xffffffff); |
1411 | I915_WRITE(GTIER, 0x0); | 1763 | I915_WRITE(GTIER, 0x0); |
1412 | (void) I915_READ(GTIER); | 1764 | POSTING_READ(GTIER); |
1413 | 1765 | ||
1414 | /* south display irq */ | 1766 | /* south display irq */ |
1415 | I915_WRITE(SDEIMR, 0xffffffff); | 1767 | I915_WRITE(SDEIMR, 0xffffffff); |
1416 | I915_WRITE(SDEIER, 0x0); | 1768 | I915_WRITE(SDEIER, 0x0); |
1417 | (void) I915_READ(SDEIER); | 1769 | POSTING_READ(SDEIER); |
1418 | } | 1770 | } |
1419 | 1771 | ||
1420 | static int ironlake_irq_postinstall(struct drm_device *dev) | 1772 | static int ironlake_irq_postinstall(struct drm_device *dev) |
@@ -1423,40 +1775,61 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1423 | /* enable kind of interrupts always enabled */ | 1775 | /* enable kind of interrupts always enabled */ |
1424 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | 1776 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
1425 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; | 1777 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; |
1426 | u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; | 1778 | u32 render_irqs; |
1427 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1779 | u32 hotplug_mask; |
1428 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1780 | |
1781 | DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue); | ||
1782 | if (HAS_BSD(dev)) | ||
1783 | DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue); | ||
1784 | if (HAS_BLT(dev)) | ||
1785 | DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue); | ||
1429 | 1786 | ||
1430 | dev_priv->irq_mask_reg = ~display_mask; | 1787 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; |
1431 | dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; | 1788 | dev_priv->irq_mask = ~display_mask; |
1432 | 1789 | ||
1433 | /* should always can generate irq */ | 1790 | /* should always can generate irq */ |
1434 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | 1791 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
1435 | I915_WRITE(DEIMR, dev_priv->irq_mask_reg); | 1792 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
1436 | I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); | 1793 | I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); |
1437 | (void) I915_READ(DEIER); | 1794 | POSTING_READ(DEIER); |
1438 | |||
1439 | /* Gen6 only needs render pipe_control now */ | ||
1440 | if (IS_GEN6(dev)) | ||
1441 | render_mask = GT_PIPE_NOTIFY; | ||
1442 | 1795 | ||
1443 | dev_priv->gt_irq_mask_reg = ~render_mask; | 1796 | dev_priv->gt_irq_mask = ~0; |
1444 | dev_priv->gt_irq_enable_reg = render_mask; | ||
1445 | 1797 | ||
1446 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 1798 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
1447 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); | 1799 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
1800 | |||
1448 | if (IS_GEN6(dev)) | 1801 | if (IS_GEN6(dev)) |
1449 | I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); | 1802 | render_irqs = |
1450 | I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); | 1803 | GT_USER_INTERRUPT | |
1451 | (void) I915_READ(GTIER); | 1804 | GT_GEN6_BSD_USER_INTERRUPT | |
1805 | GT_BLT_USER_INTERRUPT; | ||
1806 | else | ||
1807 | render_irqs = | ||
1808 | GT_USER_INTERRUPT | | ||
1809 | GT_PIPE_NOTIFY | | ||
1810 | GT_BSD_USER_INTERRUPT; | ||
1811 | I915_WRITE(GTIER, render_irqs); | ||
1812 | POSTING_READ(GTIER); | ||
1813 | |||
1814 | if (HAS_PCH_CPT(dev)) { | ||
1815 | hotplug_mask = (SDE_CRT_HOTPLUG_CPT | | ||
1816 | SDE_PORTB_HOTPLUG_CPT | | ||
1817 | SDE_PORTC_HOTPLUG_CPT | | ||
1818 | SDE_PORTD_HOTPLUG_CPT); | ||
1819 | } else { | ||
1820 | hotplug_mask = (SDE_CRT_HOTPLUG | | ||
1821 | SDE_PORTB_HOTPLUG | | ||
1822 | SDE_PORTC_HOTPLUG | | ||
1823 | SDE_PORTD_HOTPLUG | | ||
1824 | SDE_AUX_MASK); | ||
1825 | } | ||
1452 | 1826 | ||
1453 | dev_priv->pch_irq_mask_reg = ~hotplug_mask; | 1827 | dev_priv->pch_irq_mask = ~hotplug_mask; |
1454 | dev_priv->pch_irq_enable_reg = hotplug_mask; | ||
1455 | 1828 | ||
1456 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | 1829 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
1457 | I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg); | 1830 | I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); |
1458 | I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); | 1831 | I915_WRITE(SDEIER, hotplug_mask); |
1459 | (void) I915_READ(SDEIER); | 1832 | POSTING_READ(SDEIER); |
1460 | 1833 | ||
1461 | if (IS_IRONLAKE_M(dev)) { | 1834 | if (IS_IRONLAKE_M(dev)) { |
1462 | /* Clear & enable PCU event interrupts */ | 1835 | /* Clear & enable PCU event interrupts */ |
@@ -1468,55 +1841,93 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1468 | return 0; | 1841 | return 0; |
1469 | } | 1842 | } |
1470 | 1843 | ||
1471 | void i915_driver_irq_preinstall(struct drm_device * dev) | 1844 | static int ivybridge_irq_postinstall(struct drm_device *dev) |
1845 | { | ||
1846 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
1847 | /* enable kind of interrupts always enabled */ | ||
1848 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | | ||
1849 | DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB | | ||
1850 | DE_PLANEB_FLIP_DONE_IVB; | ||
1851 | u32 render_irqs; | ||
1852 | u32 hotplug_mask; | ||
1853 | |||
1854 | DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue); | ||
1855 | if (HAS_BSD(dev)) | ||
1856 | DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue); | ||
1857 | if (HAS_BLT(dev)) | ||
1858 | DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue); | ||
1859 | |||
1860 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; | ||
1861 | dev_priv->irq_mask = ~display_mask; | ||
1862 | |||
1863 | /* should always can generate irq */ | ||
1864 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | ||
1865 | I915_WRITE(DEIMR, dev_priv->irq_mask); | ||
1866 | I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB | | ||
1867 | DE_PIPEB_VBLANK_IVB); | ||
1868 | POSTING_READ(DEIER); | ||
1869 | |||
1870 | dev_priv->gt_irq_mask = ~0; | ||
1871 | |||
1872 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | ||
1873 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
1874 | |||
1875 | render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT | | ||
1876 | GT_BLT_USER_INTERRUPT; | ||
1877 | I915_WRITE(GTIER, render_irqs); | ||
1878 | POSTING_READ(GTIER); | ||
1879 | |||
1880 | hotplug_mask = (SDE_CRT_HOTPLUG_CPT | | ||
1881 | SDE_PORTB_HOTPLUG_CPT | | ||
1882 | SDE_PORTC_HOTPLUG_CPT | | ||
1883 | SDE_PORTD_HOTPLUG_CPT); | ||
1884 | dev_priv->pch_irq_mask = ~hotplug_mask; | ||
1885 | |||
1886 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | ||
1887 | I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); | ||
1888 | I915_WRITE(SDEIER, hotplug_mask); | ||
1889 | POSTING_READ(SDEIER); | ||
1890 | |||
1891 | return 0; | ||
1892 | } | ||
1893 | |||
1894 | static void i915_driver_irq_preinstall(struct drm_device * dev) | ||
1472 | { | 1895 | { |
1473 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1896 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1897 | int pipe; | ||
1474 | 1898 | ||
1475 | atomic_set(&dev_priv->irq_received, 0); | 1899 | atomic_set(&dev_priv->irq_received, 0); |
1476 | 1900 | ||
1477 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | 1901 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); |
1478 | INIT_WORK(&dev_priv->error_work, i915_error_work_func); | 1902 | INIT_WORK(&dev_priv->error_work, i915_error_work_func); |
1479 | 1903 | ||
1480 | if (HAS_PCH_SPLIT(dev)) { | ||
1481 | ironlake_irq_preinstall(dev); | ||
1482 | return; | ||
1483 | } | ||
1484 | |||
1485 | if (I915_HAS_HOTPLUG(dev)) { | 1904 | if (I915_HAS_HOTPLUG(dev)) { |
1486 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 1905 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
1487 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 1906 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
1488 | } | 1907 | } |
1489 | 1908 | ||
1490 | I915_WRITE(HWSTAM, 0xeffe); | 1909 | I915_WRITE(HWSTAM, 0xeffe); |
1491 | I915_WRITE(PIPEASTAT, 0); | 1910 | for_each_pipe(pipe) |
1492 | I915_WRITE(PIPEBSTAT, 0); | 1911 | I915_WRITE(PIPESTAT(pipe), 0); |
1493 | I915_WRITE(IMR, 0xffffffff); | 1912 | I915_WRITE(IMR, 0xffffffff); |
1494 | I915_WRITE(IER, 0x0); | 1913 | I915_WRITE(IER, 0x0); |
1495 | (void) I915_READ(IER); | 1914 | POSTING_READ(IER); |
1496 | } | 1915 | } |
1497 | 1916 | ||
1498 | /* | 1917 | /* |
1499 | * Must be called after intel_modeset_init or hotplug interrupts won't be | 1918 | * Must be called after intel_modeset_init or hotplug interrupts won't be |
1500 | * enabled correctly. | 1919 | * enabled correctly. |
1501 | */ | 1920 | */ |
1502 | int i915_driver_irq_postinstall(struct drm_device *dev) | 1921 | static int i915_driver_irq_postinstall(struct drm_device *dev) |
1503 | { | 1922 | { |
1504 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1923 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1505 | u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; | 1924 | u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; |
1506 | u32 error_mask; | 1925 | u32 error_mask; |
1507 | 1926 | ||
1508 | DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); | ||
1509 | |||
1510 | if (HAS_BSD(dev)) | ||
1511 | DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); | ||
1512 | |||
1513 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; | 1927 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; |
1514 | 1928 | ||
1515 | if (HAS_PCH_SPLIT(dev)) | ||
1516 | return ironlake_irq_postinstall(dev); | ||
1517 | |||
1518 | /* Unmask the interrupts that we always want on. */ | 1929 | /* Unmask the interrupts that we always want on. */ |
1519 | dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; | 1930 | dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX; |
1520 | 1931 | ||
1521 | dev_priv->pipestat[0] = 0; | 1932 | dev_priv->pipestat[0] = 0; |
1522 | dev_priv->pipestat[1] = 0; | 1933 | dev_priv->pipestat[1] = 0; |
@@ -1525,7 +1936,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1525 | /* Enable in IER... */ | 1936 | /* Enable in IER... */ |
1526 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | 1937 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; |
1527 | /* and unmask in IMR */ | 1938 | /* and unmask in IMR */ |
1528 | dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT; | 1939 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; |
1529 | } | 1940 | } |
1530 | 1941 | ||
1531 | /* | 1942 | /* |
@@ -1543,9 +1954,9 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1543 | } | 1954 | } |
1544 | I915_WRITE(EMR, error_mask); | 1955 | I915_WRITE(EMR, error_mask); |
1545 | 1956 | ||
1546 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | 1957 | I915_WRITE(IMR, dev_priv->irq_mask); |
1547 | I915_WRITE(IER, enable_mask); | 1958 | I915_WRITE(IER, enable_mask); |
1548 | (void) I915_READ(IER); | 1959 | POSTING_READ(IER); |
1549 | 1960 | ||
1550 | if (I915_HAS_HOTPLUG(dev)) { | 1961 | if (I915_HAS_HOTPLUG(dev)) { |
1551 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | 1962 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
@@ -1578,7 +1989,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1578 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | 1989 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); |
1579 | } | 1990 | } |
1580 | 1991 | ||
1581 | opregion_enable_asle(dev); | 1992 | intel_opregion_enable_asle(dev); |
1582 | 1993 | ||
1583 | return 0; | 1994 | return 0; |
1584 | } | 1995 | } |
@@ -1586,6 +1997,12 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1586 | static void ironlake_irq_uninstall(struct drm_device *dev) | 1997 | static void ironlake_irq_uninstall(struct drm_device *dev) |
1587 | { | 1998 | { |
1588 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1999 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
2000 | |||
2001 | if (!dev_priv) | ||
2002 | return; | ||
2003 | |||
2004 | dev_priv->vblank_pipe = 0; | ||
2005 | |||
1589 | I915_WRITE(HWSTAM, 0xffffffff); | 2006 | I915_WRITE(HWSTAM, 0xffffffff); |
1590 | 2007 | ||
1591 | I915_WRITE(DEIMR, 0xffffffff); | 2008 | I915_WRITE(DEIMR, 0xffffffff); |
@@ -1597,32 +2014,67 @@ static void ironlake_irq_uninstall(struct drm_device *dev) | |||
1597 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 2014 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
1598 | } | 2015 | } |
1599 | 2016 | ||
1600 | void i915_driver_irq_uninstall(struct drm_device * dev) | 2017 | static void i915_driver_irq_uninstall(struct drm_device * dev) |
1601 | { | 2018 | { |
1602 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2019 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
2020 | int pipe; | ||
1603 | 2021 | ||
1604 | if (!dev_priv) | 2022 | if (!dev_priv) |
1605 | return; | 2023 | return; |
1606 | 2024 | ||
1607 | dev_priv->vblank_pipe = 0; | 2025 | dev_priv->vblank_pipe = 0; |
1608 | 2026 | ||
1609 | if (HAS_PCH_SPLIT(dev)) { | ||
1610 | ironlake_irq_uninstall(dev); | ||
1611 | return; | ||
1612 | } | ||
1613 | |||
1614 | if (I915_HAS_HOTPLUG(dev)) { | 2027 | if (I915_HAS_HOTPLUG(dev)) { |
1615 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 2028 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
1616 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 2029 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
1617 | } | 2030 | } |
1618 | 2031 | ||
1619 | I915_WRITE(HWSTAM, 0xffffffff); | 2032 | I915_WRITE(HWSTAM, 0xffffffff); |
1620 | I915_WRITE(PIPEASTAT, 0); | 2033 | for_each_pipe(pipe) |
1621 | I915_WRITE(PIPEBSTAT, 0); | 2034 | I915_WRITE(PIPESTAT(pipe), 0); |
1622 | I915_WRITE(IMR, 0xffffffff); | 2035 | I915_WRITE(IMR, 0xffffffff); |
1623 | I915_WRITE(IER, 0x0); | 2036 | I915_WRITE(IER, 0x0); |
1624 | 2037 | ||
1625 | I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); | 2038 | for_each_pipe(pipe) |
1626 | I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); | 2039 | I915_WRITE(PIPESTAT(pipe), |
2040 | I915_READ(PIPESTAT(pipe)) & 0x8000ffff); | ||
1627 | I915_WRITE(IIR, I915_READ(IIR)); | 2041 | I915_WRITE(IIR, I915_READ(IIR)); |
1628 | } | 2042 | } |
2043 | |||
2044 | void intel_irq_init(struct drm_device *dev) | ||
2045 | { | ||
2046 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | ||
2047 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | ||
2048 | if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { | ||
2049 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ | ||
2050 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | ||
2051 | } | ||
2052 | |||
2053 | |||
2054 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; | ||
2055 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; | ||
2056 | |||
2057 | if (IS_IVYBRIDGE(dev)) { | ||
2058 | /* Share pre & uninstall handlers with ILK/SNB */ | ||
2059 | dev->driver->irq_handler = ivybridge_irq_handler; | ||
2060 | dev->driver->irq_preinstall = ironlake_irq_preinstall; | ||
2061 | dev->driver->irq_postinstall = ivybridge_irq_postinstall; | ||
2062 | dev->driver->irq_uninstall = ironlake_irq_uninstall; | ||
2063 | dev->driver->enable_vblank = ivybridge_enable_vblank; | ||
2064 | dev->driver->disable_vblank = ivybridge_disable_vblank; | ||
2065 | } else if (HAS_PCH_SPLIT(dev)) { | ||
2066 | dev->driver->irq_handler = ironlake_irq_handler; | ||
2067 | dev->driver->irq_preinstall = ironlake_irq_preinstall; | ||
2068 | dev->driver->irq_postinstall = ironlake_irq_postinstall; | ||
2069 | dev->driver->irq_uninstall = ironlake_irq_uninstall; | ||
2070 | dev->driver->enable_vblank = ironlake_enable_vblank; | ||
2071 | dev->driver->disable_vblank = ironlake_disable_vblank; | ||
2072 | } else { | ||
2073 | dev->driver->irq_preinstall = i915_driver_irq_preinstall; | ||
2074 | dev->driver->irq_postinstall = i915_driver_irq_postinstall; | ||
2075 | dev->driver->irq_uninstall = i915_driver_irq_uninstall; | ||
2076 | dev->driver->irq_handler = i915_driver_irq_handler; | ||
2077 | dev->driver->enable_vblank = i915_enable_vblank; | ||
2078 | dev->driver->disable_vblank = i915_disable_vblank; | ||
2079 | } | ||
2080 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 4f5e15577e89..5d5def756c9e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -25,52 +25,16 @@ | |||
25 | #ifndef _I915_REG_H_ | 25 | #ifndef _I915_REG_H_ |
26 | #define _I915_REG_H_ | 26 | #define _I915_REG_H_ |
27 | 27 | ||
28 | #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) | ||
29 | |||
28 | /* | 30 | /* |
29 | * The Bridge device's PCI config space has information about the | 31 | * The Bridge device's PCI config space has information about the |
30 | * fb aperture size and the amount of pre-reserved memory. | 32 | * fb aperture size and the amount of pre-reserved memory. |
33 | * This is all handled in the intel-gtt.ko module. i915.ko only | ||
34 | * cares about the vga bit for the vga rbiter. | ||
31 | */ | 35 | */ |
32 | #define INTEL_GMCH_CTRL 0x52 | 36 | #define INTEL_GMCH_CTRL 0x52 |
33 | #define INTEL_GMCH_VGA_DISABLE (1 << 1) | 37 | #define INTEL_GMCH_VGA_DISABLE (1 << 1) |
34 | #define INTEL_GMCH_ENABLED 0x4 | ||
35 | #define INTEL_GMCH_MEM_MASK 0x1 | ||
36 | #define INTEL_GMCH_MEM_64M 0x1 | ||
37 | #define INTEL_GMCH_MEM_128M 0 | ||
38 | |||
39 | #define INTEL_GMCH_GMS_MASK (0xf << 4) | ||
40 | #define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4) | ||
41 | #define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4) | ||
42 | #define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4) | ||
43 | #define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4) | ||
44 | #define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4) | ||
45 | #define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4) | ||
46 | |||
47 | #define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4) | ||
48 | #define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4) | ||
49 | #define INTEL_GMCH_GMS_STOLEN_128M (0x8 << 4) | ||
50 | #define INTEL_GMCH_GMS_STOLEN_256M (0x9 << 4) | ||
51 | #define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) | ||
52 | #define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) | ||
53 | #define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) | ||
54 | #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) | ||
55 | |||
56 | #define SNB_GMCH_CTRL 0x50 | ||
57 | #define SNB_GMCH_GMS_STOLEN_MASK 0xF8 | ||
58 | #define SNB_GMCH_GMS_STOLEN_32M (1 << 3) | ||
59 | #define SNB_GMCH_GMS_STOLEN_64M (2 << 3) | ||
60 | #define SNB_GMCH_GMS_STOLEN_96M (3 << 3) | ||
61 | #define SNB_GMCH_GMS_STOLEN_128M (4 << 3) | ||
62 | #define SNB_GMCH_GMS_STOLEN_160M (5 << 3) | ||
63 | #define SNB_GMCH_GMS_STOLEN_192M (6 << 3) | ||
64 | #define SNB_GMCH_GMS_STOLEN_224M (7 << 3) | ||
65 | #define SNB_GMCH_GMS_STOLEN_256M (8 << 3) | ||
66 | #define SNB_GMCH_GMS_STOLEN_288M (9 << 3) | ||
67 | #define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) | ||
68 | #define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) | ||
69 | #define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) | ||
70 | #define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) | ||
71 | #define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) | ||
72 | #define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) | ||
73 | #define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) | ||
74 | 38 | ||
75 | /* PCI config space */ | 39 | /* PCI config space */ |
76 | 40 | ||
@@ -106,10 +70,19 @@ | |||
106 | #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) | 70 | #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) |
107 | #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) | 71 | #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) |
108 | #define LBB 0xf4 | 72 | #define LBB 0xf4 |
109 | #define GDRST 0xc0 | 73 | |
110 | #define GDRST_FULL (0<<2) | 74 | /* Graphics reset regs */ |
111 | #define GDRST_RENDER (1<<2) | 75 | #define I965_GDRST 0xc0 /* PCI config register */ |
112 | #define GDRST_MEDIA (3<<2) | 76 | #define ILK_GDSR 0x2ca4 /* MCHBAR offset */ |
77 | #define GRDOM_FULL (0<<2) | ||
78 | #define GRDOM_RENDER (1<<2) | ||
79 | #define GRDOM_MEDIA (3<<2) | ||
80 | |||
81 | #define GEN6_GDRST 0x941c | ||
82 | #define GEN6_GRDOM_FULL (1 << 0) | ||
83 | #define GEN6_GRDOM_RENDER (1 << 1) | ||
84 | #define GEN6_GRDOM_MEDIA (1 << 2) | ||
85 | #define GEN6_GRDOM_BLT (1 << 3) | ||
113 | 86 | ||
114 | /* VGA stuff */ | 87 | /* VGA stuff */ |
115 | 88 | ||
@@ -172,6 +145,8 @@ | |||
172 | #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ | 145 | #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ |
173 | #define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ | 146 | #define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ |
174 | #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) | 147 | #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) |
148 | #define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) | ||
149 | #define MI_SUSPEND_FLUSH_EN (1<<0) | ||
175 | #define MI_REPORT_HEAD MI_INSTR(0x07, 0) | 150 | #define MI_REPORT_HEAD MI_INSTR(0x07, 0) |
176 | #define MI_OVERLAY_FLIP MI_INSTR(0x11,0) | 151 | #define MI_OVERLAY_FLIP MI_INSTR(0x11,0) |
177 | #define MI_OVERLAY_CONTINUE (0x0<<21) | 152 | #define MI_OVERLAY_CONTINUE (0x0<<21) |
@@ -186,17 +161,31 @@ | |||
186 | #define MI_MM_SPACE_PHYSICAL (0<<8) | 161 | #define MI_MM_SPACE_PHYSICAL (0<<8) |
187 | #define MI_SAVE_EXT_STATE_EN (1<<3) | 162 | #define MI_SAVE_EXT_STATE_EN (1<<3) |
188 | #define MI_RESTORE_EXT_STATE_EN (1<<2) | 163 | #define MI_RESTORE_EXT_STATE_EN (1<<2) |
164 | #define MI_FORCE_RESTORE (1<<1) | ||
189 | #define MI_RESTORE_INHIBIT (1<<0) | 165 | #define MI_RESTORE_INHIBIT (1<<0) |
190 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) | 166 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) |
191 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ | 167 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ |
192 | #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) | 168 | #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) |
193 | #define MI_STORE_DWORD_INDEX_SHIFT 2 | 169 | #define MI_STORE_DWORD_INDEX_SHIFT 2 |
194 | #define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1) | 170 | /* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM: |
171 | * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw | ||
172 | * simply ignores the register load under certain conditions. | ||
173 | * - One can actually load arbitrary many arbitrary registers: Simply issue x | ||
174 | * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! | ||
175 | */ | ||
176 | #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) | ||
177 | #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ | ||
178 | #define MI_INVALIDATE_TLB (1<<18) | ||
179 | #define MI_INVALIDATE_BSD (1<<7) | ||
195 | #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) | 180 | #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) |
196 | #define MI_BATCH_NON_SECURE (1) | 181 | #define MI_BATCH_NON_SECURE (1) |
197 | #define MI_BATCH_NON_SECURE_I965 (1<<8) | 182 | #define MI_BATCH_NON_SECURE_I965 (1<<8) |
198 | #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) | 183 | #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) |
199 | 184 | #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ | |
185 | #define MI_SEMAPHORE_GLOBAL_GTT (1<<22) | ||
186 | #define MI_SEMAPHORE_UPDATE (1<<21) | ||
187 | #define MI_SEMAPHORE_COMPARE (1<<20) | ||
188 | #define MI_SEMAPHORE_REGISTER (1<<18) | ||
200 | /* | 189 | /* |
201 | * 3D instructions used by the kernel | 190 | * 3D instructions used by the kernel |
202 | */ | 191 | */ |
@@ -249,6 +238,16 @@ | |||
249 | #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ | 238 | #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ |
250 | #define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */ | 239 | #define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */ |
251 | 240 | ||
241 | |||
242 | /* | ||
243 | * Reset registers | ||
244 | */ | ||
245 | #define DEBUG_RESET_I830 0x6070 | ||
246 | #define DEBUG_RESET_FULL (1<<7) | ||
247 | #define DEBUG_RESET_RENDER (1<<8) | ||
248 | #define DEBUG_RESET_DISPLAY (1<<9) | ||
249 | |||
250 | |||
252 | /* | 251 | /* |
253 | * Fence registers | 252 | * Fence registers |
254 | */ | 253 | */ |
@@ -279,10 +278,25 @@ | |||
279 | * Instruction and interrupt control regs | 278 | * Instruction and interrupt control regs |
280 | */ | 279 | */ |
281 | #define PGTBL_ER 0x02024 | 280 | #define PGTBL_ER 0x02024 |
282 | #define PRB0_TAIL 0x02030 | 281 | #define RENDER_RING_BASE 0x02000 |
283 | #define PRB0_HEAD 0x02034 | 282 | #define BSD_RING_BASE 0x04000 |
284 | #define PRB0_START 0x02038 | 283 | #define GEN6_BSD_RING_BASE 0x12000 |
285 | #define PRB0_CTL 0x0203c | 284 | #define BLT_RING_BASE 0x22000 |
285 | #define RING_TAIL(base) ((base)+0x30) | ||
286 | #define RING_HEAD(base) ((base)+0x34) | ||
287 | #define RING_START(base) ((base)+0x38) | ||
288 | #define RING_CTL(base) ((base)+0x3c) | ||
289 | #define RING_SYNC_0(base) ((base)+0x40) | ||
290 | #define RING_SYNC_1(base) ((base)+0x44) | ||
291 | #define RING_MAX_IDLE(base) ((base)+0x54) | ||
292 | #define RING_HWS_PGA(base) ((base)+0x80) | ||
293 | #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) | ||
294 | #define RENDER_HWS_PGA_GEN7 (0x04080) | ||
295 | #define BSD_HWS_PGA_GEN7 (0x04180) | ||
296 | #define BLT_HWS_PGA_GEN7 (0x04280) | ||
297 | #define RING_ACTHD(base) ((base)+0x74) | ||
298 | #define RING_NOPID(base) ((base)+0x94) | ||
299 | #define RING_IMR(base) ((base)+0xa8) | ||
286 | #define TAIL_ADDR 0x001FFFF8 | 300 | #define TAIL_ADDR 0x001FFFF8 |
287 | #define HEAD_WRAP_COUNT 0xFFE00000 | 301 | #define HEAD_WRAP_COUNT 0xFFE00000 |
288 | #define HEAD_WRAP_ONE 0x00200000 | 302 | #define HEAD_WRAP_ONE 0x00200000 |
@@ -295,10 +309,19 @@ | |||
295 | #define RING_VALID_MASK 0x00000001 | 309 | #define RING_VALID_MASK 0x00000001 |
296 | #define RING_VALID 0x00000001 | 310 | #define RING_VALID 0x00000001 |
297 | #define RING_INVALID 0x00000000 | 311 | #define RING_INVALID 0x00000000 |
312 | #define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ | ||
313 | #define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ | ||
314 | #define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */ | ||
315 | #if 0 | ||
316 | #define PRB0_TAIL 0x02030 | ||
317 | #define PRB0_HEAD 0x02034 | ||
318 | #define PRB0_START 0x02038 | ||
319 | #define PRB0_CTL 0x0203c | ||
298 | #define PRB1_TAIL 0x02040 /* 915+ only */ | 320 | #define PRB1_TAIL 0x02040 /* 915+ only */ |
299 | #define PRB1_HEAD 0x02044 /* 915+ only */ | 321 | #define PRB1_HEAD 0x02044 /* 915+ only */ |
300 | #define PRB1_START 0x02048 /* 915+ only */ | 322 | #define PRB1_START 0x02048 /* 915+ only */ |
301 | #define PRB1_CTL 0x0204c /* 915+ only */ | 323 | #define PRB1_CTL 0x0204c /* 915+ only */ |
324 | #endif | ||
302 | #define IPEIR_I965 0x02064 | 325 | #define IPEIR_I965 0x02064 |
303 | #define IPEHR_I965 0x02068 | 326 | #define IPEHR_I965 0x02068 |
304 | #define INSTDONE_I965 0x0206c | 327 | #define INSTDONE_I965 0x0206c |
@@ -306,7 +329,6 @@ | |||
306 | #define INSTDONE1 0x0207c /* 965+ only */ | 329 | #define INSTDONE1 0x0207c /* 965+ only */ |
307 | #define ACTHD_I965 0x02074 | 330 | #define ACTHD_I965 0x02074 |
308 | #define HWS_PGA 0x02080 | 331 | #define HWS_PGA 0x02080 |
309 | #define HWS_PGA_GEN6 0x04080 | ||
310 | #define HWS_ADDRESS_MASK 0xfffff000 | 332 | #define HWS_ADDRESS_MASK 0xfffff000 |
311 | #define HWS_START_ADDRESS_SHIFT 4 | 333 | #define HWS_START_ADDRESS_SHIFT 4 |
312 | #define PWRCTXA 0x2088 /* 965GM+ only */ | 334 | #define PWRCTXA 0x2088 /* 965GM+ only */ |
@@ -316,11 +338,42 @@ | |||
316 | #define INSTDONE 0x02090 | 338 | #define INSTDONE 0x02090 |
317 | #define NOPID 0x02094 | 339 | #define NOPID 0x02094 |
318 | #define HWSTAM 0x02098 | 340 | #define HWSTAM 0x02098 |
341 | #define VCS_INSTDONE 0x1206C | ||
342 | #define VCS_IPEIR 0x12064 | ||
343 | #define VCS_IPEHR 0x12068 | ||
344 | #define VCS_ACTHD 0x12074 | ||
345 | #define BCS_INSTDONE 0x2206C | ||
346 | #define BCS_IPEIR 0x22064 | ||
347 | #define BCS_IPEHR 0x22068 | ||
348 | #define BCS_ACTHD 0x22074 | ||
349 | |||
350 | #define ERROR_GEN6 0x040a0 | ||
351 | |||
352 | /* GM45+ chicken bits -- debug workaround bits that may be required | ||
353 | * for various sorts of correct behavior. The top 16 bits of each are | ||
354 | * the enables for writing to the corresponding low bit. | ||
355 | */ | ||
356 | #define _3D_CHICKEN 0x02084 | ||
357 | #define _3D_CHICKEN2 0x0208c | ||
358 | /* Disables pipelining of read flushes past the SF-WIZ interface. | ||
359 | * Required on all Ironlake steppings according to the B-Spec, but the | ||
360 | * particular danger of not doing so is not specified. | ||
361 | */ | ||
362 | # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) | ||
363 | #define _3D_CHICKEN3 0x02090 | ||
319 | 364 | ||
320 | #define MI_MODE 0x0209c | 365 | #define MI_MODE 0x0209c |
321 | # define VS_TIMER_DISPATCH (1 << 6) | 366 | # define VS_TIMER_DISPATCH (1 << 6) |
322 | # define MI_FLUSH_ENABLE (1 << 11) | 367 | # define MI_FLUSH_ENABLE (1 << 11) |
323 | 368 | ||
369 | #define GFX_MODE 0x02520 | ||
370 | #define GFX_RUN_LIST_ENABLE (1<<15) | ||
371 | #define GFX_TLB_INVALIDATE_ALWAYS (1<<13) | ||
372 | #define GFX_SURFACE_FAULT_ENABLE (1<<12) | ||
373 | #define GFX_REPLAY_MODE (1<<11) | ||
374 | #define GFX_PSMI_GRANULARITY (1<<10) | ||
375 | #define GFX_PPGTT_ENABLE (1<<9) | ||
376 | |||
324 | #define SCPD0 0x0209c /* 915+ only */ | 377 | #define SCPD0 0x0209c /* 915+ only */ |
325 | #define IER 0x020a0 | 378 | #define IER 0x020a0 |
326 | #define IIR 0x020a4 | 379 | #define IIR 0x020a4 |
@@ -355,9 +408,12 @@ | |||
355 | #define I915_ERROR_INSTRUCTION (1<<0) | 408 | #define I915_ERROR_INSTRUCTION (1<<0) |
356 | #define INSTPM 0x020c0 | 409 | #define INSTPM 0x020c0 |
357 | #define INSTPM_SELF_EN (1<<12) /* 915GM only */ | 410 | #define INSTPM_SELF_EN (1<<12) /* 915GM only */ |
411 | #define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts | ||
412 | will not assert AGPBUSY# and will only | ||
413 | be delivered when out of C3. */ | ||
358 | #define ACTHD 0x020c8 | 414 | #define ACTHD 0x020c8 |
359 | #define FW_BLC 0x020d8 | 415 | #define FW_BLC 0x020d8 |
360 | #define FW_BLC2 0x020dc | 416 | #define FW_BLC2 0x020dc |
361 | #define FW_BLC_SELF 0x020e0 /* 915+ only */ | 417 | #define FW_BLC_SELF 0x020e0 /* 915+ only */ |
362 | #define FW_BLC_SELF_EN_MASK (1<<31) | 418 | #define FW_BLC_SELF_EN_MASK (1<<31) |
363 | #define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ | 419 | #define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ |
@@ -464,17 +520,22 @@ | |||
464 | #define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25) | 520 | #define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25) |
465 | #define GEN6_BLITTER_SYNC_STATUS (1 << 24) | 521 | #define GEN6_BLITTER_SYNC_STATUS (1 << 24) |
466 | #define GEN6_BLITTER_USER_INTERRUPT (1 << 22) | 522 | #define GEN6_BLITTER_USER_INTERRUPT (1 << 22) |
467 | /* | ||
468 | * BSD (bit stream decoder instruction and interrupt control register defines | ||
469 | * (G4X and Ironlake only) | ||
470 | */ | ||
471 | 523 | ||
472 | #define BSD_RING_TAIL 0x04030 | 524 | #define GEN6_BLITTER_ECOSKPD 0x221d0 |
473 | #define BSD_RING_HEAD 0x04034 | 525 | #define GEN6_BLITTER_LOCK_SHIFT 16 |
474 | #define BSD_RING_START 0x04038 | 526 | #define GEN6_BLITTER_FBC_NOTIFY (1<<3) |
475 | #define BSD_RING_CTL 0x0403c | 527 | |
476 | #define BSD_RING_ACTHD 0x04074 | 528 | #define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 |
477 | #define BSD_HWS_PGA 0x04080 | 529 | #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16) |
530 | #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0) | ||
531 | #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0 | ||
532 | #define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) | ||
533 | |||
534 | #define GEN6_BSD_HWSTAM 0x12098 | ||
535 | #define GEN6_BSD_IMR 0x120a8 | ||
536 | #define GEN6_BSD_USER_INTERRUPT (1 << 12) | ||
537 | |||
538 | #define GEN6_BSD_RNCID 0x12198 | ||
478 | 539 | ||
479 | /* | 540 | /* |
480 | * Framebuffer compression (915+ only) | 541 | * Framebuffer compression (915+ only) |
@@ -552,6 +613,18 @@ | |||
552 | 613 | ||
553 | #define ILK_DISPLAY_CHICKEN1 0x42000 | 614 | #define ILK_DISPLAY_CHICKEN1 0x42000 |
554 | #define ILK_FBCQ_DIS (1<<22) | 615 | #define ILK_FBCQ_DIS (1<<22) |
616 | #define ILK_PABSTRETCH_DIS (1<<21) | ||
617 | |||
618 | |||
619 | /* | ||
620 | * Framebuffer compression for Sandybridge | ||
621 | * | ||
622 | * The following two registers are of type GTTMMADR | ||
623 | */ | ||
624 | #define SNB_DPFC_CTL_SA 0x100100 | ||
625 | #define SNB_CPU_FENCE_ENABLE (1<<29) | ||
626 | #define DPFC_CPU_FENCE_OFFSET 0x100104 | ||
627 | |||
555 | 628 | ||
556 | /* | 629 | /* |
557 | * GPIO regs | 630 | * GPIO regs |
@@ -579,12 +652,51 @@ | |||
579 | # define GPIO_DATA_VAL_IN (1 << 12) | 652 | # define GPIO_DATA_VAL_IN (1 << 12) |
580 | # define GPIO_DATA_PULLUP_DISABLE (1 << 13) | 653 | # define GPIO_DATA_PULLUP_DISABLE (1 << 13) |
581 | 654 | ||
582 | #define GMBUS0 0x5100 | 655 | #define GMBUS0 0x5100 /* clock/port select */ |
583 | #define GMBUS1 0x5104 | 656 | #define GMBUS_RATE_100KHZ (0<<8) |
584 | #define GMBUS2 0x5108 | 657 | #define GMBUS_RATE_50KHZ (1<<8) |
585 | #define GMBUS3 0x510c | 658 | #define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */ |
586 | #define GMBUS4 0x5110 | 659 | #define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */ |
587 | #define GMBUS5 0x5120 | 660 | #define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */ |
661 | #define GMBUS_PORT_DISABLED 0 | ||
662 | #define GMBUS_PORT_SSC 1 | ||
663 | #define GMBUS_PORT_VGADDC 2 | ||
664 | #define GMBUS_PORT_PANEL 3 | ||
665 | #define GMBUS_PORT_DPC 4 /* HDMIC */ | ||
666 | #define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */ | ||
667 | /* 6 reserved */ | ||
668 | #define GMBUS_PORT_DPD 7 /* HDMID */ | ||
669 | #define GMBUS_NUM_PORTS 8 | ||
670 | #define GMBUS1 0x5104 /* command/status */ | ||
671 | #define GMBUS_SW_CLR_INT (1<<31) | ||
672 | #define GMBUS_SW_RDY (1<<30) | ||
673 | #define GMBUS_ENT (1<<29) /* enable timeout */ | ||
674 | #define GMBUS_CYCLE_NONE (0<<25) | ||
675 | #define GMBUS_CYCLE_WAIT (1<<25) | ||
676 | #define GMBUS_CYCLE_INDEX (2<<25) | ||
677 | #define GMBUS_CYCLE_STOP (4<<25) | ||
678 | #define GMBUS_BYTE_COUNT_SHIFT 16 | ||
679 | #define GMBUS_SLAVE_INDEX_SHIFT 8 | ||
680 | #define GMBUS_SLAVE_ADDR_SHIFT 1 | ||
681 | #define GMBUS_SLAVE_READ (1<<0) | ||
682 | #define GMBUS_SLAVE_WRITE (0<<0) | ||
683 | #define GMBUS2 0x5108 /* status */ | ||
684 | #define GMBUS_INUSE (1<<15) | ||
685 | #define GMBUS_HW_WAIT_PHASE (1<<14) | ||
686 | #define GMBUS_STALL_TIMEOUT (1<<13) | ||
687 | #define GMBUS_INT (1<<12) | ||
688 | #define GMBUS_HW_RDY (1<<11) | ||
689 | #define GMBUS_SATOER (1<<10) | ||
690 | #define GMBUS_ACTIVE (1<<9) | ||
691 | #define GMBUS3 0x510c /* data buffer bytes 3-0 */ | ||
692 | #define GMBUS4 0x5110 /* interrupt mask (Pineview+) */ | ||
693 | #define GMBUS_SLAVE_TIMEOUT_EN (1<<4) | ||
694 | #define GMBUS_NAK_EN (1<<3) | ||
695 | #define GMBUS_IDLE_EN (1<<2) | ||
696 | #define GMBUS_HW_WAIT_EN (1<<1) | ||
697 | #define GMBUS_HW_RDY_EN (1<<0) | ||
698 | #define GMBUS5 0x5120 /* byte index */ | ||
699 | #define GMBUS_2BYTE_INDEX_EN (1<<31) | ||
588 | 700 | ||
589 | /* | 701 | /* |
590 | * Clock control & power management | 702 | * Clock control & power management |
@@ -601,8 +713,9 @@ | |||
601 | #define VGA1_PD_P1_DIV_2 (1 << 13) | 713 | #define VGA1_PD_P1_DIV_2 (1 << 13) |
602 | #define VGA1_PD_P1_SHIFT 8 | 714 | #define VGA1_PD_P1_SHIFT 8 |
603 | #define VGA1_PD_P1_MASK (0x1f << 8) | 715 | #define VGA1_PD_P1_MASK (0x1f << 8) |
604 | #define DPLL_A 0x06014 | 716 | #define _DPLL_A 0x06014 |
605 | #define DPLL_B 0x06018 | 717 | #define _DPLL_B 0x06018 |
718 | #define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) | ||
606 | #define DPLL_VCO_ENABLE (1 << 31) | 719 | #define DPLL_VCO_ENABLE (1 << 31) |
607 | #define DPLL_DVO_HIGH_SPEED (1 << 30) | 720 | #define DPLL_DVO_HIGH_SPEED (1 << 30) |
608 | #define DPLL_SYNCLOCK_ENABLE (1 << 29) | 721 | #define DPLL_SYNCLOCK_ENABLE (1 << 29) |
@@ -633,31 +746,6 @@ | |||
633 | #define LVDS 0x61180 | 746 | #define LVDS 0x61180 |
634 | #define LVDS_ON (1<<31) | 747 | #define LVDS_ON (1<<31) |
635 | 748 | ||
636 | #define ADPA 0x61100 | ||
637 | #define ADPA_DPMS_MASK (~(3<<10)) | ||
638 | #define ADPA_DPMS_ON (0<<10) | ||
639 | #define ADPA_DPMS_SUSPEND (1<<10) | ||
640 | #define ADPA_DPMS_STANDBY (2<<10) | ||
641 | #define ADPA_DPMS_OFF (3<<10) | ||
642 | |||
643 | #define RING_TAIL 0x00 | ||
644 | #define TAIL_ADDR 0x001FFFF8 | ||
645 | #define RING_HEAD 0x04 | ||
646 | #define HEAD_WRAP_COUNT 0xFFE00000 | ||
647 | #define HEAD_WRAP_ONE 0x00200000 | ||
648 | #define HEAD_ADDR 0x001FFFFC | ||
649 | #define RING_START 0x08 | ||
650 | #define START_ADDR 0xFFFFF000 | ||
651 | #define RING_LEN 0x0C | ||
652 | #define RING_NR_PAGES 0x001FF000 | ||
653 | #define RING_REPORT_MASK 0x00000006 | ||
654 | #define RING_REPORT_64K 0x00000002 | ||
655 | #define RING_REPORT_128K 0x00000004 | ||
656 | #define RING_NO_REPORT 0x00000000 | ||
657 | #define RING_VALID_MASK 0x00000001 | ||
658 | #define RING_VALID 0x00000001 | ||
659 | #define RING_INVALID 0x00000000 | ||
660 | |||
661 | /* Scratch pad debug 0 reg: | 749 | /* Scratch pad debug 0 reg: |
662 | */ | 750 | */ |
663 | #define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 | 751 | #define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 |
@@ -698,7 +786,7 @@ | |||
698 | #define SDVO_MULTIPLIER_MASK 0x000000ff | 786 | #define SDVO_MULTIPLIER_MASK 0x000000ff |
699 | #define SDVO_MULTIPLIER_SHIFT_HIRES 4 | 787 | #define SDVO_MULTIPLIER_SHIFT_HIRES 4 |
700 | #define SDVO_MULTIPLIER_SHIFT_VGA 0 | 788 | #define SDVO_MULTIPLIER_SHIFT_VGA 0 |
701 | #define DPLL_A_MD 0x0601c /* 965+ only */ | 789 | #define _DPLL_A_MD 0x0601c /* 965+ only */ |
702 | /* | 790 | /* |
703 | * UDI pixel divider, controlling how many pixels are stuffed into a packet. | 791 | * UDI pixel divider, controlling how many pixels are stuffed into a packet. |
704 | * | 792 | * |
@@ -735,11 +823,14 @@ | |||
735 | */ | 823 | */ |
736 | #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f | 824 | #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f |
737 | #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 | 825 | #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 |
738 | #define DPLL_B_MD 0x06020 /* 965+ only */ | 826 | #define _DPLL_B_MD 0x06020 /* 965+ only */ |
739 | #define FPA0 0x06040 | 827 | #define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD) |
740 | #define FPA1 0x06044 | 828 | #define _FPA0 0x06040 |
741 | #define FPB0 0x06048 | 829 | #define _FPA1 0x06044 |
742 | #define FPB1 0x0604c | 830 | #define _FPB0 0x06048 |
831 | #define _FPB1 0x0604c | ||
832 | #define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0) | ||
833 | #define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1) | ||
743 | #define FP_N_DIV_MASK 0x003f0000 | 834 | #define FP_N_DIV_MASK 0x003f0000 |
744 | #define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 | 835 | #define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 |
745 | #define FP_N_DIV_SHIFT 16 | 836 | #define FP_N_DIV_SHIFT 16 |
@@ -760,6 +851,7 @@ | |||
760 | #define DPLLA_TEST_M_BYPASS (1 << 2) | 851 | #define DPLLA_TEST_M_BYPASS (1 << 2) |
761 | #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) | 852 | #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) |
762 | #define D_STATE 0x6104 | 853 | #define D_STATE 0x6104 |
854 | #define DSTATE_GFX_RESET_I830 (1<<6) | ||
763 | #define DSTATE_PLL_D3_OFF (1<<3) | 855 | #define DSTATE_PLL_D3_OFF (1<<3) |
764 | #define DSTATE_GFX_CLOCK_GATING (1<<1) | 856 | #define DSTATE_GFX_CLOCK_GATING (1<<1) |
765 | #define DSTATE_DOT_CLOCK_GATING (1<<0) | 857 | #define DSTATE_DOT_CLOCK_GATING (1<<0) |
@@ -877,8 +969,9 @@ | |||
877 | * Palette regs | 969 | * Palette regs |
878 | */ | 970 | */ |
879 | 971 | ||
880 | #define PALETTE_A 0x0a000 | 972 | #define _PALETTE_A 0x0a000 |
881 | #define PALETTE_B 0x0a800 | 973 | #define _PALETTE_B 0x0a800 |
974 | #define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B) | ||
882 | 975 | ||
883 | /* MCH MMIO space */ | 976 | /* MCH MMIO space */ |
884 | 977 | ||
@@ -892,6 +985,8 @@ | |||
892 | */ | 985 | */ |
893 | #define MCHBAR_MIRROR_BASE 0x10000 | 986 | #define MCHBAR_MIRROR_BASE 0x10000 |
894 | 987 | ||
988 | #define MCHBAR_MIRROR_BASE_SNB 0x140000 | ||
989 | |||
895 | /** 915-945 and GM965 MCH register controlling DRAM channel access */ | 990 | /** 915-945 and GM965 MCH register controlling DRAM channel access */ |
896 | #define DCC 0x10200 | 991 | #define DCC 0x10200 |
897 | #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) | 992 | #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) |
@@ -926,6 +1021,8 @@ | |||
926 | #define CLKCFG_MEM_800 (3 << 4) | 1021 | #define CLKCFG_MEM_800 (3 << 4) |
927 | #define CLKCFG_MEM_MASK (7 << 4) | 1022 | #define CLKCFG_MEM_MASK (7 << 4) |
928 | 1023 | ||
1024 | #define TSC1 0x11001 | ||
1025 | #define TSE (1<<0) | ||
929 | #define TR1 0x11006 | 1026 | #define TR1 0x11006 |
930 | #define TSFS 0x11020 | 1027 | #define TSFS 0x11020 |
931 | #define TSFS_SLOPE_MASK 0x0000ff00 | 1028 | #define TSFS_SLOPE_MASK 0x0000ff00 |
@@ -1051,9 +1148,50 @@ | |||
1051 | #define RCBMINAVG 0x111a0 | 1148 | #define RCBMINAVG 0x111a0 |
1052 | #define RCUPEI 0x111b0 | 1149 | #define RCUPEI 0x111b0 |
1053 | #define RCDNEI 0x111b4 | 1150 | #define RCDNEI 0x111b4 |
1054 | #define MCHBAR_RENDER_STANDBY 0x111b8 | 1151 | #define RSTDBYCTL 0x111b8 |
1055 | #define RCX_SW_EXIT (1<<23) | 1152 | #define RS1EN (1<<31) |
1056 | #define RSX_STATUS_MASK 0x00700000 | 1153 | #define RS2EN (1<<30) |
1154 | #define RS3EN (1<<29) | ||
1155 | #define D3RS3EN (1<<28) /* Display D3 imlies RS3 */ | ||
1156 | #define SWPROMORSX (1<<27) /* RSx promotion timers ignored */ | ||
1157 | #define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */ | ||
1158 | #define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */ | ||
1159 | #define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */ | ||
1160 | #define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */ | ||
1161 | #define RSX_STATUS_MASK (7<<20) | ||
1162 | #define RSX_STATUS_ON (0<<20) | ||
1163 | #define RSX_STATUS_RC1 (1<<20) | ||
1164 | #define RSX_STATUS_RC1E (2<<20) | ||
1165 | #define RSX_STATUS_RS1 (3<<20) | ||
1166 | #define RSX_STATUS_RS2 (4<<20) /* aka rc6 */ | ||
1167 | #define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */ | ||
1168 | #define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */ | ||
1169 | #define RSX_STATUS_RSVD2 (7<<20) | ||
1170 | #define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */ | ||
1171 | #define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */ | ||
1172 | #define JRSC (1<<17) /* rsx coupled to cpu c-state */ | ||
1173 | #define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */ | ||
1174 | #define RS1CONTSAV_MASK (3<<14) | ||
1175 | #define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */ | ||
1176 | #define RS1CONTSAV_RSVD (1<<14) | ||
1177 | #define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */ | ||
1178 | #define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */ | ||
1179 | #define NORMSLEXLAT_MASK (3<<12) | ||
1180 | #define SLOW_RS123 (0<<12) | ||
1181 | #define SLOW_RS23 (1<<12) | ||
1182 | #define SLOW_RS3 (2<<12) | ||
1183 | #define NORMAL_RS123 (3<<12) | ||
1184 | #define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */ | ||
1185 | #define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */ | ||
1186 | #define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */ | ||
1187 | #define STATELOCK (1<<7) /* locked to rs_cstate if 0 */ | ||
1188 | #define RS_CSTATE_MASK (3<<4) | ||
1189 | #define RS_CSTATE_C367_RS1 (0<<4) | ||
1190 | #define RS_CSTATE_C36_RS1_C7_RS2 (1<<4) | ||
1191 | #define RS_CSTATE_RSVD (2<<4) | ||
1192 | #define RS_CSTATE_C367_RS2 (3<<4) | ||
1193 | #define REDSAVES (1<<3) /* no context save if was idle during rs0 */ | ||
1194 | #define REDRESTORES (1<<2) /* no restore if was idle during rs0 */ | ||
1057 | #define VIDCTL 0x111c0 | 1195 | #define VIDCTL 0x111c0 |
1058 | #define VIDSTS 0x111c8 | 1196 | #define VIDSTS 0x111c8 |
1059 | #define VIDSTART 0x111cc /* 8 bits */ | 1197 | #define VIDSTART 0x111cc /* 8 bits */ |
@@ -1070,6 +1208,8 @@ | |||
1070 | #define MEMSTAT_SRC_CTL_STDBY 3 | 1208 | #define MEMSTAT_SRC_CTL_STDBY 3 |
1071 | #define RCPREVBSYTUPAVG 0x113b8 | 1209 | #define RCPREVBSYTUPAVG 0x113b8 |
1072 | #define RCPREVBSYTDNAVG 0x113bc | 1210 | #define RCPREVBSYTDNAVG 0x113bc |
1211 | #define PMMISC 0x11214 | ||
1212 | #define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */ | ||
1073 | #define SDEW 0x1124c | 1213 | #define SDEW 0x1124c |
1074 | #define CSIEW0 0x11250 | 1214 | #define CSIEW0 0x11250 |
1075 | #define CSIEW1 0x11254 | 1215 | #define CSIEW1 0x11254 |
@@ -1107,6 +1247,10 @@ | |||
1107 | #define DDRMPLL1 0X12c20 | 1247 | #define DDRMPLL1 0X12c20 |
1108 | #define PEG_BAND_GAP_DATA 0x14d68 | 1248 | #define PEG_BAND_GAP_DATA 0x14d68 |
1109 | 1249 | ||
1250 | #define GEN6_GT_PERF_STATUS 0x145948 | ||
1251 | #define GEN6_RP_STATE_LIMITS 0x145994 | ||
1252 | #define GEN6_RP_STATE_CAP 0x145998 | ||
1253 | |||
1110 | /* | 1254 | /* |
1111 | * Logical Context regs | 1255 | * Logical Context regs |
1112 | */ | 1256 | */ |
@@ -1131,24 +1275,32 @@ | |||
1131 | */ | 1275 | */ |
1132 | 1276 | ||
1133 | /* Pipe A timing regs */ | 1277 | /* Pipe A timing regs */ |
1134 | #define HTOTAL_A 0x60000 | 1278 | #define _HTOTAL_A 0x60000 |
1135 | #define HBLANK_A 0x60004 | 1279 | #define _HBLANK_A 0x60004 |
1136 | #define HSYNC_A 0x60008 | 1280 | #define _HSYNC_A 0x60008 |
1137 | #define VTOTAL_A 0x6000c | 1281 | #define _VTOTAL_A 0x6000c |
1138 | #define VBLANK_A 0x60010 | 1282 | #define _VBLANK_A 0x60010 |
1139 | #define VSYNC_A 0x60014 | 1283 | #define _VSYNC_A 0x60014 |
1140 | #define PIPEASRC 0x6001c | 1284 | #define _PIPEASRC 0x6001c |
1141 | #define BCLRPAT_A 0x60020 | 1285 | #define _BCLRPAT_A 0x60020 |
1142 | 1286 | ||
1143 | /* Pipe B timing regs */ | 1287 | /* Pipe B timing regs */ |
1144 | #define HTOTAL_B 0x61000 | 1288 | #define _HTOTAL_B 0x61000 |
1145 | #define HBLANK_B 0x61004 | 1289 | #define _HBLANK_B 0x61004 |
1146 | #define HSYNC_B 0x61008 | 1290 | #define _HSYNC_B 0x61008 |
1147 | #define VTOTAL_B 0x6100c | 1291 | #define _VTOTAL_B 0x6100c |
1148 | #define VBLANK_B 0x61010 | 1292 | #define _VBLANK_B 0x61010 |
1149 | #define VSYNC_B 0x61014 | 1293 | #define _VSYNC_B 0x61014 |
1150 | #define PIPEBSRC 0x6101c | 1294 | #define _PIPEBSRC 0x6101c |
1151 | #define BCLRPAT_B 0x61020 | 1295 | #define _BCLRPAT_B 0x61020 |
1296 | |||
1297 | #define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B) | ||
1298 | #define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B) | ||
1299 | #define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B) | ||
1300 | #define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B) | ||
1301 | #define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B) | ||
1302 | #define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B) | ||
1303 | #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) | ||
1152 | 1304 | ||
1153 | /* VGA port control */ | 1305 | /* VGA port control */ |
1154 | #define ADPA 0x61100 | 1306 | #define ADPA 0x61100 |
@@ -1173,6 +1325,7 @@ | |||
1173 | #define ADPA_DPMS_STANDBY (2<<10) | 1325 | #define ADPA_DPMS_STANDBY (2<<10) |
1174 | #define ADPA_DPMS_OFF (3<<10) | 1326 | #define ADPA_DPMS_OFF (3<<10) |
1175 | 1327 | ||
1328 | |||
1176 | /* Hotplug control (945+ only) */ | 1329 | /* Hotplug control (945+ only) */ |
1177 | #define PORT_HOTPLUG_EN 0x61110 | 1330 | #define PORT_HOTPLUG_EN 0x61110 |
1178 | #define HDMIB_HOTPLUG_INT_EN (1 << 29) | 1331 | #define HDMIB_HOTPLUG_INT_EN (1 << 29) |
@@ -1241,6 +1394,7 @@ | |||
1241 | #define SDVO_ENCODING_HDMI (0x2 << 10) | 1394 | #define SDVO_ENCODING_HDMI (0x2 << 10) |
1242 | /** Requird for HDMI operation */ | 1395 | /** Requird for HDMI operation */ |
1243 | #define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9) | 1396 | #define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9) |
1397 | #define SDVO_COLOR_RANGE_16_235 (1 << 8) | ||
1244 | #define SDVO_BORDER_ENABLE (1 << 7) | 1398 | #define SDVO_BORDER_ENABLE (1 << 7) |
1245 | #define SDVO_AUDIO_ENABLE (1 << 6) | 1399 | #define SDVO_AUDIO_ENABLE (1 << 6) |
1246 | /** New with 965, default is to be set */ | 1400 | /** New with 965, default is to be set */ |
@@ -1296,8 +1450,13 @@ | |||
1296 | #define LVDS_PORT_EN (1 << 31) | 1450 | #define LVDS_PORT_EN (1 << 31) |
1297 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ | 1451 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ |
1298 | #define LVDS_PIPEB_SELECT (1 << 30) | 1452 | #define LVDS_PIPEB_SELECT (1 << 30) |
1453 | #define LVDS_PIPE_MASK (1 << 30) | ||
1299 | /* LVDS dithering flag on 965/g4x platform */ | 1454 | /* LVDS dithering flag on 965/g4x platform */ |
1300 | #define LVDS_ENABLE_DITHER (1 << 25) | 1455 | #define LVDS_ENABLE_DITHER (1 << 25) |
1456 | /* LVDS sync polarity flags. Set to invert (i.e. negative) */ | ||
1457 | #define LVDS_VSYNC_POLARITY (1 << 21) | ||
1458 | #define LVDS_HSYNC_POLARITY (1 << 20) | ||
1459 | |||
1301 | /* Enable border for unscaled (or aspect-scaled) display */ | 1460 | /* Enable border for unscaled (or aspect-scaled) display */ |
1302 | #define LVDS_BORDER_ENABLE (1 << 15) | 1461 | #define LVDS_BORDER_ENABLE (1 << 15) |
1303 | /* | 1462 | /* |
@@ -1331,6 +1490,25 @@ | |||
1331 | #define LVDS_B0B3_POWER_DOWN (0 << 2) | 1490 | #define LVDS_B0B3_POWER_DOWN (0 << 2) |
1332 | #define LVDS_B0B3_POWER_UP (3 << 2) | 1491 | #define LVDS_B0B3_POWER_UP (3 << 2) |
1333 | 1492 | ||
1493 | #define LVDS_PIPE_ENABLED(V, P) \ | ||
1494 | (((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN)) | ||
1495 | |||
1496 | /* Video Data Island Packet control */ | ||
1497 | #define VIDEO_DIP_DATA 0x61178 | ||
1498 | #define VIDEO_DIP_CTL 0x61170 | ||
1499 | #define VIDEO_DIP_ENABLE (1 << 31) | ||
1500 | #define VIDEO_DIP_PORT_B (1 << 29) | ||
1501 | #define VIDEO_DIP_PORT_C (2 << 29) | ||
1502 | #define VIDEO_DIP_ENABLE_AVI (1 << 21) | ||
1503 | #define VIDEO_DIP_ENABLE_VENDOR (2 << 21) | ||
1504 | #define VIDEO_DIP_ENABLE_SPD (8 << 21) | ||
1505 | #define VIDEO_DIP_SELECT_AVI (0 << 19) | ||
1506 | #define VIDEO_DIP_SELECT_VENDOR (1 << 19) | ||
1507 | #define VIDEO_DIP_SELECT_SPD (3 << 19) | ||
1508 | #define VIDEO_DIP_FREQ_ONCE (0 << 16) | ||
1509 | #define VIDEO_DIP_FREQ_VSYNC (1 << 16) | ||
1510 | #define VIDEO_DIP_FREQ_2VSYNC (2 << 16) | ||
1511 | |||
1334 | /* Panel power sequencing */ | 1512 | /* Panel power sequencing */ |
1335 | #define PP_STATUS 0x61200 | 1513 | #define PP_STATUS 0x61200 |
1336 | #define PP_ON (1 << 31) | 1514 | #define PP_ON (1 << 31) |
@@ -1346,6 +1524,9 @@ | |||
1346 | #define PP_SEQUENCE_ON (1 << 28) | 1524 | #define PP_SEQUENCE_ON (1 << 28) |
1347 | #define PP_SEQUENCE_OFF (2 << 28) | 1525 | #define PP_SEQUENCE_OFF (2 << 28) |
1348 | #define PP_SEQUENCE_MASK 0x30000000 | 1526 | #define PP_SEQUENCE_MASK 0x30000000 |
1527 | #define PP_CYCLE_DELAY_ACTIVE (1 << 27) | ||
1528 | #define PP_SEQUENCE_STATE_ON_IDLE (1 << 3) | ||
1529 | #define PP_SEQUENCE_STATE_MASK 0x0000000f | ||
1349 | #define PP_CONTROL 0x61204 | 1530 | #define PP_CONTROL 0x61204 |
1350 | #define POWER_TARGET_ON (1 << 0) | 1531 | #define POWER_TARGET_ON (1 << 0) |
1351 | #define PP_ON_DELAYS 0x61208 | 1532 | #define PP_ON_DELAYS 0x61208 |
@@ -1481,6 +1662,7 @@ | |||
1481 | # define TV_TEST_MODE_MASK (7 << 0) | 1662 | # define TV_TEST_MODE_MASK (7 << 0) |
1482 | 1663 | ||
1483 | #define TV_DAC 0x68004 | 1664 | #define TV_DAC 0x68004 |
1665 | # define TV_DAC_SAVE 0x00ffff00 | ||
1484 | /** | 1666 | /** |
1485 | * Reports that DAC state change logic has reported change (RO). | 1667 | * Reports that DAC state change logic has reported change (RO). |
1486 | * | 1668 | * |
@@ -1899,6 +2081,10 @@ | |||
1899 | 2081 | ||
1900 | #define DP_PORT_EN (1 << 31) | 2082 | #define DP_PORT_EN (1 << 31) |
1901 | #define DP_PIPEB_SELECT (1 << 30) | 2083 | #define DP_PIPEB_SELECT (1 << 30) |
2084 | #define DP_PIPE_MASK (1 << 30) | ||
2085 | |||
2086 | #define DP_PIPE_ENABLED(V, P) \ | ||
2087 | (((V) & (DP_PIPE_MASK | DP_PORT_EN)) == ((P) << 30 | DP_PORT_EN)) | ||
1902 | 2088 | ||
1903 | /* Link training mode - select a suitable mode for each stage */ | 2089 | /* Link training mode - select a suitable mode for each stage */ |
1904 | #define DP_LINK_TRAIN_PAT_1 (0 << 28) | 2090 | #define DP_LINK_TRAIN_PAT_1 (0 << 28) |
@@ -2041,8 +2227,8 @@ | |||
2041 | * which is after the LUTs, so we want the bytes for our color format. | 2227 | * which is after the LUTs, so we want the bytes for our color format. |
2042 | * For our current usage, this is always 3, one byte for R, G and B. | 2228 | * For our current usage, this is always 3, one byte for R, G and B. |
2043 | */ | 2229 | */ |
2044 | #define PIPEA_GMCH_DATA_M 0x70050 | 2230 | #define _PIPEA_GMCH_DATA_M 0x70050 |
2045 | #define PIPEB_GMCH_DATA_M 0x71050 | 2231 | #define _PIPEB_GMCH_DATA_M 0x71050 |
2046 | 2232 | ||
2047 | /* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ | 2233 | /* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ |
2048 | #define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25) | 2234 | #define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25) |
@@ -2050,8 +2236,8 @@ | |||
2050 | 2236 | ||
2051 | #define PIPE_GMCH_DATA_M_MASK (0xffffff) | 2237 | #define PIPE_GMCH_DATA_M_MASK (0xffffff) |
2052 | 2238 | ||
2053 | #define PIPEA_GMCH_DATA_N 0x70054 | 2239 | #define _PIPEA_GMCH_DATA_N 0x70054 |
2054 | #define PIPEB_GMCH_DATA_N 0x71054 | 2240 | #define _PIPEB_GMCH_DATA_N 0x71054 |
2055 | #define PIPE_GMCH_DATA_N_MASK (0xffffff) | 2241 | #define PIPE_GMCH_DATA_N_MASK (0xffffff) |
2056 | 2242 | ||
2057 | /* | 2243 | /* |
@@ -2065,40 +2251,51 @@ | |||
2065 | * Attributes and VB-ID. | 2251 | * Attributes and VB-ID. |
2066 | */ | 2252 | */ |
2067 | 2253 | ||
2068 | #define PIPEA_DP_LINK_M 0x70060 | 2254 | #define _PIPEA_DP_LINK_M 0x70060 |
2069 | #define PIPEB_DP_LINK_M 0x71060 | 2255 | #define _PIPEB_DP_LINK_M 0x71060 |
2070 | #define PIPEA_DP_LINK_M_MASK (0xffffff) | 2256 | #define PIPEA_DP_LINK_M_MASK (0xffffff) |
2071 | 2257 | ||
2072 | #define PIPEA_DP_LINK_N 0x70064 | 2258 | #define _PIPEA_DP_LINK_N 0x70064 |
2073 | #define PIPEB_DP_LINK_N 0x71064 | 2259 | #define _PIPEB_DP_LINK_N 0x71064 |
2074 | #define PIPEA_DP_LINK_N_MASK (0xffffff) | 2260 | #define PIPEA_DP_LINK_N_MASK (0xffffff) |
2075 | 2261 | ||
2262 | #define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M) | ||
2263 | #define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N) | ||
2264 | #define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M) | ||
2265 | #define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N) | ||
2266 | |||
2076 | /* Display & cursor control */ | 2267 | /* Display & cursor control */ |
2077 | 2268 | ||
2078 | /* dithering flag on Ironlake */ | ||
2079 | #define PIPE_ENABLE_DITHER (1 << 4) | ||
2080 | #define PIPE_DITHER_TYPE_MASK (3 << 2) | ||
2081 | #define PIPE_DITHER_TYPE_SPATIAL (0 << 2) | ||
2082 | #define PIPE_DITHER_TYPE_ST01 (1 << 2) | ||
2083 | /* Pipe A */ | 2269 | /* Pipe A */ |
2084 | #define PIPEADSL 0x70000 | 2270 | #define _PIPEADSL 0x70000 |
2085 | #define DSL_LINEMASK 0x00000fff | 2271 | #define DSL_LINEMASK 0x00000fff |
2086 | #define PIPEACONF 0x70008 | 2272 | #define _PIPEACONF 0x70008 |
2087 | #define PIPEACONF_ENABLE (1<<31) | 2273 | #define PIPECONF_ENABLE (1<<31) |
2088 | #define PIPEACONF_DISABLE 0 | 2274 | #define PIPECONF_DISABLE 0 |
2089 | #define PIPEACONF_DOUBLE_WIDE (1<<30) | 2275 | #define PIPECONF_DOUBLE_WIDE (1<<30) |
2090 | #define I965_PIPECONF_ACTIVE (1<<30) | 2276 | #define I965_PIPECONF_ACTIVE (1<<30) |
2091 | #define PIPEACONF_SINGLE_WIDE 0 | 2277 | #define PIPECONF_SINGLE_WIDE 0 |
2092 | #define PIPEACONF_PIPE_UNLOCKED 0 | 2278 | #define PIPECONF_PIPE_UNLOCKED 0 |
2093 | #define PIPEACONF_PIPE_LOCKED (1<<25) | 2279 | #define PIPECONF_PIPE_LOCKED (1<<25) |
2094 | #define PIPEACONF_PALETTE 0 | 2280 | #define PIPECONF_PALETTE 0 |
2095 | #define PIPEACONF_GAMMA (1<<24) | 2281 | #define PIPECONF_GAMMA (1<<24) |
2096 | #define PIPECONF_FORCE_BORDER (1<<25) | 2282 | #define PIPECONF_FORCE_BORDER (1<<25) |
2097 | #define PIPECONF_PROGRESSIVE (0 << 21) | 2283 | #define PIPECONF_PROGRESSIVE (0 << 21) |
2098 | #define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) | 2284 | #define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) |
2099 | #define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) | 2285 | #define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) |
2100 | #define PIPECONF_CXSR_DOWNCLOCK (1<<16) | 2286 | #define PIPECONF_CXSR_DOWNCLOCK (1<<16) |
2101 | #define PIPEASTAT 0x70024 | 2287 | #define PIPECONF_BPP_MASK (0x000000e0) |
2288 | #define PIPECONF_BPP_8 (0<<5) | ||
2289 | #define PIPECONF_BPP_10 (1<<5) | ||
2290 | #define PIPECONF_BPP_6 (2<<5) | ||
2291 | #define PIPECONF_BPP_12 (3<<5) | ||
2292 | #define PIPECONF_DITHER_EN (1<<4) | ||
2293 | #define PIPECONF_DITHER_TYPE_MASK (0x0000000c) | ||
2294 | #define PIPECONF_DITHER_TYPE_SP (0<<2) | ||
2295 | #define PIPECONF_DITHER_TYPE_ST1 (1<<2) | ||
2296 | #define PIPECONF_DITHER_TYPE_ST2 (2<<2) | ||
2297 | #define PIPECONF_DITHER_TYPE_TEMP (3<<2) | ||
2298 | #define _PIPEASTAT 0x70024 | ||
2102 | #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) | 2299 | #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) |
2103 | #define PIPE_CRC_ERROR_ENABLE (1UL<<29) | 2300 | #define PIPE_CRC_ERROR_ENABLE (1UL<<29) |
2104 | #define PIPE_CRC_DONE_ENABLE (1UL<<28) | 2301 | #define PIPE_CRC_DONE_ENABLE (1UL<<28) |
@@ -2128,12 +2325,19 @@ | |||
2128 | #define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ | 2325 | #define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ |
2129 | #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) | 2326 | #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) |
2130 | #define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) | 2327 | #define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) |
2131 | #define PIPE_BPC_MASK (7 << 5) /* Ironlake */ | 2328 | #define PIPE_BPC_MASK (7 << 5) /* Ironlake */ |
2132 | #define PIPE_8BPC (0 << 5) | 2329 | #define PIPE_8BPC (0 << 5) |
2133 | #define PIPE_10BPC (1 << 5) | 2330 | #define PIPE_10BPC (1 << 5) |
2134 | #define PIPE_6BPC (2 << 5) | 2331 | #define PIPE_6BPC (2 << 5) |
2135 | #define PIPE_12BPC (3 << 5) | 2332 | #define PIPE_12BPC (3 << 5) |
2136 | 2333 | ||
2334 | #define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) | ||
2335 | #define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF) | ||
2336 | #define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) | ||
2337 | #define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) | ||
2338 | #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) | ||
2339 | #define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) | ||
2340 | |||
2137 | #define DSPARB 0x70030 | 2341 | #define DSPARB 0x70030 |
2138 | #define DSPARB_CSTART_MASK (0x7f << 7) | 2342 | #define DSPARB_CSTART_MASK (0x7f << 7) |
2139 | #define DSPARB_CSTART_SHIFT 7 | 2343 | #define DSPARB_CSTART_SHIFT 7 |
@@ -2206,8 +2410,8 @@ | |||
2206 | #define WM1_LP_SR_EN (1<<31) | 2410 | #define WM1_LP_SR_EN (1<<31) |
2207 | #define WM1_LP_LATENCY_SHIFT 24 | 2411 | #define WM1_LP_LATENCY_SHIFT 24 |
2208 | #define WM1_LP_LATENCY_MASK (0x7f<<24) | 2412 | #define WM1_LP_LATENCY_MASK (0x7f<<24) |
2209 | #define WM1_LP_FBC_LP1_MASK (0xf<<20) | 2413 | #define WM1_LP_FBC_MASK (0xf<<20) |
2210 | #define WM1_LP_FBC_LP1_SHIFT 20 | 2414 | #define WM1_LP_FBC_SHIFT 20 |
2211 | #define WM1_LP_SR_MASK (0x1ff<<8) | 2415 | #define WM1_LP_SR_MASK (0x1ff<<8) |
2212 | #define WM1_LP_SR_SHIFT 8 | 2416 | #define WM1_LP_SR_SHIFT 8 |
2213 | #define WM1_LP_CURSOR_MASK (0x3f) | 2417 | #define WM1_LP_CURSOR_MASK (0x3f) |
@@ -2220,8 +2424,13 @@ | |||
2220 | 2424 | ||
2221 | /* Memory latency timer register */ | 2425 | /* Memory latency timer register */ |
2222 | #define MLTR_ILK 0x11222 | 2426 | #define MLTR_ILK 0x11222 |
2427 | #define MLTR_WM1_SHIFT 0 | ||
2428 | #define MLTR_WM2_SHIFT 8 | ||
2223 | /* the unit of memory self-refresh latency time is 0.5us */ | 2429 | /* the unit of memory self-refresh latency time is 0.5us */ |
2224 | #define ILK_SRLT_MASK 0x3f | 2430 | #define ILK_SRLT_MASK 0x3f |
2431 | #define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK) | ||
2432 | #define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT) | ||
2433 | #define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT) | ||
2225 | 2434 | ||
2226 | /* define the fifo size on Ironlake */ | 2435 | /* define the fifo size on Ironlake */ |
2227 | #define ILK_DISPLAY_FIFO 128 | 2436 | #define ILK_DISPLAY_FIFO 128 |
@@ -2240,6 +2449,40 @@ | |||
2240 | 2449 | ||
2241 | #define ILK_FIFO_LINE_SIZE 64 | 2450 | #define ILK_FIFO_LINE_SIZE 64 |
2242 | 2451 | ||
2452 | /* define the WM info on Sandybridge */ | ||
2453 | #define SNB_DISPLAY_FIFO 128 | ||
2454 | #define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */ | ||
2455 | #define SNB_DISPLAY_DFTWM 8 | ||
2456 | #define SNB_CURSOR_FIFO 32 | ||
2457 | #define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */ | ||
2458 | #define SNB_CURSOR_DFTWM 8 | ||
2459 | |||
2460 | #define SNB_DISPLAY_SR_FIFO 512 | ||
2461 | #define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */ | ||
2462 | #define SNB_DISPLAY_DFT_SRWM 0x3f | ||
2463 | #define SNB_CURSOR_SR_FIFO 64 | ||
2464 | #define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */ | ||
2465 | #define SNB_CURSOR_DFT_SRWM 8 | ||
2466 | |||
2467 | #define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */ | ||
2468 | |||
2469 | #define SNB_FIFO_LINE_SIZE 64 | ||
2470 | |||
2471 | |||
2472 | /* the address where we get all kinds of latency value */ | ||
2473 | #define SSKPD 0x5d10 | ||
2474 | #define SSKPD_WM_MASK 0x3f | ||
2475 | #define SSKPD_WM0_SHIFT 0 | ||
2476 | #define SSKPD_WM1_SHIFT 8 | ||
2477 | #define SSKPD_WM2_SHIFT 16 | ||
2478 | #define SSKPD_WM3_SHIFT 24 | ||
2479 | |||
2480 | #define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK) | ||
2481 | #define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT) | ||
2482 | #define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT) | ||
2483 | #define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT) | ||
2484 | #define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT) | ||
2485 | |||
2243 | /* | 2486 | /* |
2244 | * The two pipe frame counter registers are not synchronized, so | 2487 | * The two pipe frame counter registers are not synchronized, so |
2245 | * reading a stable value is somewhat tricky. The following code | 2488 | * reading a stable value is somewhat tricky. The following code |
@@ -2255,20 +2498,21 @@ | |||
2255 | * } while (high1 != high2); | 2498 | * } while (high1 != high2); |
2256 | * frame = (high1 << 8) | low1; | 2499 | * frame = (high1 << 8) | low1; |
2257 | */ | 2500 | */ |
2258 | #define PIPEAFRAMEHIGH 0x70040 | 2501 | #define _PIPEAFRAMEHIGH 0x70040 |
2259 | #define PIPE_FRAME_HIGH_MASK 0x0000ffff | 2502 | #define PIPE_FRAME_HIGH_MASK 0x0000ffff |
2260 | #define PIPE_FRAME_HIGH_SHIFT 0 | 2503 | #define PIPE_FRAME_HIGH_SHIFT 0 |
2261 | #define PIPEAFRAMEPIXEL 0x70044 | 2504 | #define _PIPEAFRAMEPIXEL 0x70044 |
2262 | #define PIPE_FRAME_LOW_MASK 0xff000000 | 2505 | #define PIPE_FRAME_LOW_MASK 0xff000000 |
2263 | #define PIPE_FRAME_LOW_SHIFT 24 | 2506 | #define PIPE_FRAME_LOW_SHIFT 24 |
2264 | #define PIPE_PIXEL_MASK 0x00ffffff | 2507 | #define PIPE_PIXEL_MASK 0x00ffffff |
2265 | #define PIPE_PIXEL_SHIFT 0 | 2508 | #define PIPE_PIXEL_SHIFT 0 |
2266 | /* GM45+ just has to be different */ | 2509 | /* GM45+ just has to be different */ |
2267 | #define PIPEA_FRMCOUNT_GM45 0x70040 | 2510 | #define _PIPEA_FRMCOUNT_GM45 0x70040 |
2268 | #define PIPEA_FLIPCOUNT_GM45 0x70044 | 2511 | #define _PIPEA_FLIPCOUNT_GM45 0x70044 |
2512 | #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) | ||
2269 | 2513 | ||
2270 | /* Cursor A & B regs */ | 2514 | /* Cursor A & B regs */ |
2271 | #define CURACNTR 0x70080 | 2515 | #define _CURACNTR 0x70080 |
2272 | /* Old style CUR*CNTR flags (desktop 8xx) */ | 2516 | /* Old style CUR*CNTR flags (desktop 8xx) */ |
2273 | #define CURSOR_ENABLE 0x80000000 | 2517 | #define CURSOR_ENABLE 0x80000000 |
2274 | #define CURSOR_GAMMA_ENABLE 0x40000000 | 2518 | #define CURSOR_GAMMA_ENABLE 0x40000000 |
@@ -2289,19 +2533,23 @@ | |||
2289 | #define MCURSOR_PIPE_A 0x00 | 2533 | #define MCURSOR_PIPE_A 0x00 |
2290 | #define MCURSOR_PIPE_B (1 << 28) | 2534 | #define MCURSOR_PIPE_B (1 << 28) |
2291 | #define MCURSOR_GAMMA_ENABLE (1 << 26) | 2535 | #define MCURSOR_GAMMA_ENABLE (1 << 26) |
2292 | #define CURABASE 0x70084 | 2536 | #define _CURABASE 0x70084 |
2293 | #define CURAPOS 0x70088 | 2537 | #define _CURAPOS 0x70088 |
2294 | #define CURSOR_POS_MASK 0x007FF | 2538 | #define CURSOR_POS_MASK 0x007FF |
2295 | #define CURSOR_POS_SIGN 0x8000 | 2539 | #define CURSOR_POS_SIGN 0x8000 |
2296 | #define CURSOR_X_SHIFT 0 | 2540 | #define CURSOR_X_SHIFT 0 |
2297 | #define CURSOR_Y_SHIFT 16 | 2541 | #define CURSOR_Y_SHIFT 16 |
2298 | #define CURSIZE 0x700a0 | 2542 | #define CURSIZE 0x700a0 |
2299 | #define CURBCNTR 0x700c0 | 2543 | #define _CURBCNTR 0x700c0 |
2300 | #define CURBBASE 0x700c4 | 2544 | #define _CURBBASE 0x700c4 |
2301 | #define CURBPOS 0x700c8 | 2545 | #define _CURBPOS 0x700c8 |
2546 | |||
2547 | #define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR) | ||
2548 | #define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE) | ||
2549 | #define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS) | ||
2302 | 2550 | ||
2303 | /* Display A control */ | 2551 | /* Display A control */ |
2304 | #define DSPACNTR 0x70180 | 2552 | #define _DSPACNTR 0x70180 |
2305 | #define DISPLAY_PLANE_ENABLE (1<<31) | 2553 | #define DISPLAY_PLANE_ENABLE (1<<31) |
2306 | #define DISPLAY_PLANE_DISABLE 0 | 2554 | #define DISPLAY_PLANE_DISABLE 0 |
2307 | #define DISPPLANE_GAMMA_ENABLE (1<<30) | 2555 | #define DISPPLANE_GAMMA_ENABLE (1<<30) |
@@ -2315,9 +2563,10 @@ | |||
2315 | #define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26) | 2563 | #define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26) |
2316 | #define DISPPLANE_STEREO_ENABLE (1<<25) | 2564 | #define DISPPLANE_STEREO_ENABLE (1<<25) |
2317 | #define DISPPLANE_STEREO_DISABLE 0 | 2565 | #define DISPPLANE_STEREO_DISABLE 0 |
2318 | #define DISPPLANE_SEL_PIPE_MASK (1<<24) | 2566 | #define DISPPLANE_SEL_PIPE_SHIFT 24 |
2567 | #define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT) | ||
2319 | #define DISPPLANE_SEL_PIPE_A 0 | 2568 | #define DISPPLANE_SEL_PIPE_A 0 |
2320 | #define DISPPLANE_SEL_PIPE_B (1<<24) | 2569 | #define DISPPLANE_SEL_PIPE_B (1<<DISPPLANE_SEL_PIPE_SHIFT) |
2321 | #define DISPPLANE_SRC_KEY_ENABLE (1<<22) | 2570 | #define DISPPLANE_SRC_KEY_ENABLE (1<<22) |
2322 | #define DISPPLANE_SRC_KEY_DISABLE 0 | 2571 | #define DISPPLANE_SRC_KEY_DISABLE 0 |
2323 | #define DISPPLANE_LINE_DOUBLE (1<<20) | 2572 | #define DISPPLANE_LINE_DOUBLE (1<<20) |
@@ -2326,12 +2575,20 @@ | |||
2326 | #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) | 2575 | #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) |
2327 | #define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ | 2576 | #define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ |
2328 | #define DISPPLANE_TILED (1<<10) | 2577 | #define DISPPLANE_TILED (1<<10) |
2329 | #define DSPAADDR 0x70184 | 2578 | #define _DSPAADDR 0x70184 |
2330 | #define DSPASTRIDE 0x70188 | 2579 | #define _DSPASTRIDE 0x70188 |
2331 | #define DSPAPOS 0x7018C /* reserved */ | 2580 | #define _DSPAPOS 0x7018C /* reserved */ |
2332 | #define DSPASIZE 0x70190 | 2581 | #define _DSPASIZE 0x70190 |
2333 | #define DSPASURF 0x7019C /* 965+ only */ | 2582 | #define _DSPASURF 0x7019C /* 965+ only */ |
2334 | #define DSPATILEOFF 0x701A4 /* 965+ only */ | 2583 | #define _DSPATILEOFF 0x701A4 /* 965+ only */ |
2584 | |||
2585 | #define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) | ||
2586 | #define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) | ||
2587 | #define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE) | ||
2588 | #define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS) | ||
2589 | #define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE) | ||
2590 | #define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) | ||
2591 | #define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) | ||
2335 | 2592 | ||
2336 | /* VBIOS flags */ | 2593 | /* VBIOS flags */ |
2337 | #define SWF00 0x71410 | 2594 | #define SWF00 0x71410 |
@@ -2349,27 +2606,27 @@ | |||
2349 | #define SWF32 0x7241c | 2606 | #define SWF32 0x7241c |
2350 | 2607 | ||
2351 | /* Pipe B */ | 2608 | /* Pipe B */ |
2352 | #define PIPEBDSL 0x71000 | 2609 | #define _PIPEBDSL 0x71000 |
2353 | #define PIPEBCONF 0x71008 | 2610 | #define _PIPEBCONF 0x71008 |
2354 | #define PIPEBSTAT 0x71024 | 2611 | #define _PIPEBSTAT 0x71024 |
2355 | #define PIPEBFRAMEHIGH 0x71040 | 2612 | #define _PIPEBFRAMEHIGH 0x71040 |
2356 | #define PIPEBFRAMEPIXEL 0x71044 | 2613 | #define _PIPEBFRAMEPIXEL 0x71044 |
2357 | #define PIPEB_FRMCOUNT_GM45 0x71040 | 2614 | #define _PIPEB_FRMCOUNT_GM45 0x71040 |
2358 | #define PIPEB_FLIPCOUNT_GM45 0x71044 | 2615 | #define _PIPEB_FLIPCOUNT_GM45 0x71044 |
2359 | 2616 | ||
2360 | 2617 | ||
2361 | /* Display B control */ | 2618 | /* Display B control */ |
2362 | #define DSPBCNTR 0x71180 | 2619 | #define _DSPBCNTR 0x71180 |
2363 | #define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) | 2620 | #define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) |
2364 | #define DISPPLANE_ALPHA_TRANS_DISABLE 0 | 2621 | #define DISPPLANE_ALPHA_TRANS_DISABLE 0 |
2365 | #define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 | 2622 | #define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 |
2366 | #define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) | 2623 | #define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) |
2367 | #define DSPBADDR 0x71184 | 2624 | #define _DSPBADDR 0x71184 |
2368 | #define DSPBSTRIDE 0x71188 | 2625 | #define _DSPBSTRIDE 0x71188 |
2369 | #define DSPBPOS 0x7118C | 2626 | #define _DSPBPOS 0x7118C |
2370 | #define DSPBSIZE 0x71190 | 2627 | #define _DSPBSIZE 0x71190 |
2371 | #define DSPBSURF 0x7119C | 2628 | #define _DSPBSURF 0x7119C |
2372 | #define DSPBTILEOFF 0x711A4 | 2629 | #define _DSPBTILEOFF 0x711A4 |
2373 | 2630 | ||
2374 | /* VBIOS regs */ | 2631 | /* VBIOS regs */ |
2375 | #define VGACNTRL 0x71400 | 2632 | #define VGACNTRL 0x71400 |
@@ -2397,6 +2654,7 @@ | |||
2397 | #define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00 | 2654 | #define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00 |
2398 | 2655 | ||
2399 | #define FDI_PLL_BIOS_0 0x46000 | 2656 | #define FDI_PLL_BIOS_0 0x46000 |
2657 | #define FDI_PLL_FB_CLOCK_MASK 0xff | ||
2400 | #define FDI_PLL_BIOS_1 0x46004 | 2658 | #define FDI_PLL_BIOS_1 0x46004 |
2401 | #define FDI_PLL_BIOS_2 0x46008 | 2659 | #define FDI_PLL_BIOS_2 0x46008 |
2402 | #define DISPLAY_PORT_PLL_BIOS_0 0x4600c | 2660 | #define DISPLAY_PORT_PLL_BIOS_0 0x4600c |
@@ -2404,6 +2662,8 @@ | |||
2404 | #define DISPLAY_PORT_PLL_BIOS_2 0x46014 | 2662 | #define DISPLAY_PORT_PLL_BIOS_2 0x46014 |
2405 | 2663 | ||
2406 | #define PCH_DSPCLK_GATE_D 0x42020 | 2664 | #define PCH_DSPCLK_GATE_D 0x42020 |
2665 | # define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) | ||
2666 | # define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) | ||
2407 | # define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) | 2667 | # define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) |
2408 | # define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) | 2668 | # define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) |
2409 | 2669 | ||
@@ -2411,73 +2671,89 @@ | |||
2411 | # define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) | 2671 | # define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) |
2412 | # define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) | 2672 | # define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) |
2413 | 2673 | ||
2674 | #define PCH_3DCGDIS1 0x46024 | ||
2675 | # define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11) | ||
2676 | |||
2414 | #define FDI_PLL_FREQ_CTL 0x46030 | 2677 | #define FDI_PLL_FREQ_CTL 0x46030 |
2415 | #define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) | 2678 | #define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) |
2416 | #define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 | 2679 | #define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 |
2417 | #define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff | 2680 | #define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff |
2418 | 2681 | ||
2419 | 2682 | ||
2420 | #define PIPEA_DATA_M1 0x60030 | 2683 | #define _PIPEA_DATA_M1 0x60030 |
2421 | #define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ | 2684 | #define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ |
2422 | #define TU_SIZE_MASK 0x7e000000 | 2685 | #define TU_SIZE_MASK 0x7e000000 |
2423 | #define PIPEA_DATA_M1_OFFSET 0 | 2686 | #define PIPE_DATA_M1_OFFSET 0 |
2424 | #define PIPEA_DATA_N1 0x60034 | 2687 | #define _PIPEA_DATA_N1 0x60034 |
2425 | #define PIPEA_DATA_N1_OFFSET 0 | 2688 | #define PIPE_DATA_N1_OFFSET 0 |
2426 | 2689 | ||
2427 | #define PIPEA_DATA_M2 0x60038 | 2690 | #define _PIPEA_DATA_M2 0x60038 |
2428 | #define PIPEA_DATA_M2_OFFSET 0 | 2691 | #define PIPE_DATA_M2_OFFSET 0 |
2429 | #define PIPEA_DATA_N2 0x6003c | 2692 | #define _PIPEA_DATA_N2 0x6003c |
2430 | #define PIPEA_DATA_N2_OFFSET 0 | 2693 | #define PIPE_DATA_N2_OFFSET 0 |
2431 | 2694 | ||
2432 | #define PIPEA_LINK_M1 0x60040 | 2695 | #define _PIPEA_LINK_M1 0x60040 |
2433 | #define PIPEA_LINK_M1_OFFSET 0 | 2696 | #define PIPE_LINK_M1_OFFSET 0 |
2434 | #define PIPEA_LINK_N1 0x60044 | 2697 | #define _PIPEA_LINK_N1 0x60044 |
2435 | #define PIPEA_LINK_N1_OFFSET 0 | 2698 | #define PIPE_LINK_N1_OFFSET 0 |
2436 | 2699 | ||
2437 | #define PIPEA_LINK_M2 0x60048 | 2700 | #define _PIPEA_LINK_M2 0x60048 |
2438 | #define PIPEA_LINK_M2_OFFSET 0 | 2701 | #define PIPE_LINK_M2_OFFSET 0 |
2439 | #define PIPEA_LINK_N2 0x6004c | 2702 | #define _PIPEA_LINK_N2 0x6004c |
2440 | #define PIPEA_LINK_N2_OFFSET 0 | 2703 | #define PIPE_LINK_N2_OFFSET 0 |
2441 | 2704 | ||
2442 | /* PIPEB timing regs are same start from 0x61000 */ | 2705 | /* PIPEB timing regs are same start from 0x61000 */ |
2443 | 2706 | ||
2444 | #define PIPEB_DATA_M1 0x61030 | 2707 | #define _PIPEB_DATA_M1 0x61030 |
2445 | #define PIPEB_DATA_M1_OFFSET 0 | 2708 | #define _PIPEB_DATA_N1 0x61034 |
2446 | #define PIPEB_DATA_N1 0x61034 | 2709 | |
2447 | #define PIPEB_DATA_N1_OFFSET 0 | 2710 | #define _PIPEB_DATA_M2 0x61038 |
2711 | #define _PIPEB_DATA_N2 0x6103c | ||
2448 | 2712 | ||
2449 | #define PIPEB_DATA_M2 0x61038 | 2713 | #define _PIPEB_LINK_M1 0x61040 |
2450 | #define PIPEB_DATA_M2_OFFSET 0 | 2714 | #define _PIPEB_LINK_N1 0x61044 |
2451 | #define PIPEB_DATA_N2 0x6103c | ||
2452 | #define PIPEB_DATA_N2_OFFSET 0 | ||
2453 | 2715 | ||
2454 | #define PIPEB_LINK_M1 0x61040 | 2716 | #define _PIPEB_LINK_M2 0x61048 |
2455 | #define PIPEB_LINK_M1_OFFSET 0 | 2717 | #define _PIPEB_LINK_N2 0x6104c |
2456 | #define PIPEB_LINK_N1 0x61044 | ||
2457 | #define PIPEB_LINK_N1_OFFSET 0 | ||
2458 | 2718 | ||
2459 | #define PIPEB_LINK_M2 0x61048 | 2719 | #define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1) |
2460 | #define PIPEB_LINK_M2_OFFSET 0 | 2720 | #define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1) |
2461 | #define PIPEB_LINK_N2 0x6104c | 2721 | #define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2) |
2462 | #define PIPEB_LINK_N2_OFFSET 0 | 2722 | #define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2) |
2723 | #define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1) | ||
2724 | #define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1) | ||
2725 | #define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2) | ||
2726 | #define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2) | ||
2463 | 2727 | ||
2464 | /* CPU panel fitter */ | 2728 | /* CPU panel fitter */ |
2465 | #define PFA_CTL_1 0x68080 | 2729 | /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ |
2466 | #define PFB_CTL_1 0x68880 | 2730 | #define _PFA_CTL_1 0x68080 |
2731 | #define _PFB_CTL_1 0x68880 | ||
2467 | #define PF_ENABLE (1<<31) | 2732 | #define PF_ENABLE (1<<31) |
2468 | #define PF_FILTER_MASK (3<<23) | 2733 | #define PF_FILTER_MASK (3<<23) |
2469 | #define PF_FILTER_PROGRAMMED (0<<23) | 2734 | #define PF_FILTER_PROGRAMMED (0<<23) |
2470 | #define PF_FILTER_MED_3x3 (1<<23) | 2735 | #define PF_FILTER_MED_3x3 (1<<23) |
2471 | #define PF_FILTER_EDGE_ENHANCE (2<<23) | 2736 | #define PF_FILTER_EDGE_ENHANCE (2<<23) |
2472 | #define PF_FILTER_EDGE_SOFTEN (3<<23) | 2737 | #define PF_FILTER_EDGE_SOFTEN (3<<23) |
2473 | #define PFA_WIN_SZ 0x68074 | 2738 | #define _PFA_WIN_SZ 0x68074 |
2474 | #define PFB_WIN_SZ 0x68874 | 2739 | #define _PFB_WIN_SZ 0x68874 |
2475 | #define PFA_WIN_POS 0x68070 | 2740 | #define _PFA_WIN_POS 0x68070 |
2476 | #define PFB_WIN_POS 0x68870 | 2741 | #define _PFB_WIN_POS 0x68870 |
2742 | #define _PFA_VSCALE 0x68084 | ||
2743 | #define _PFB_VSCALE 0x68884 | ||
2744 | #define _PFA_HSCALE 0x68090 | ||
2745 | #define _PFB_HSCALE 0x68890 | ||
2746 | |||
2747 | #define PF_CTL(pipe) _PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1) | ||
2748 | #define PF_WIN_SZ(pipe) _PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ) | ||
2749 | #define PF_WIN_POS(pipe) _PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS) | ||
2750 | #define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE) | ||
2751 | #define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE) | ||
2477 | 2752 | ||
2478 | /* legacy palette */ | 2753 | /* legacy palette */ |
2479 | #define LGC_PALETTE_A 0x4a000 | 2754 | #define _LGC_PALETTE_A 0x4a000 |
2480 | #define LGC_PALETTE_B 0x4a800 | 2755 | #define _LGC_PALETTE_B 0x4a800 |
2756 | #define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) | ||
2481 | 2757 | ||
2482 | /* interrupts */ | 2758 | /* interrupts */ |
2483 | #define DE_MASTER_IRQ_CONTROL (1 << 31) | 2759 | #define DE_MASTER_IRQ_CONTROL (1 << 31) |
@@ -2506,6 +2782,19 @@ | |||
2506 | #define DE_PIPEA_VSYNC (1 << 3) | 2782 | #define DE_PIPEA_VSYNC (1 << 3) |
2507 | #define DE_PIPEA_FIFO_UNDERRUN (1 << 0) | 2783 | #define DE_PIPEA_FIFO_UNDERRUN (1 << 0) |
2508 | 2784 | ||
2785 | /* More Ivybridge lolz */ | ||
2786 | #define DE_ERR_DEBUG_IVB (1<<30) | ||
2787 | #define DE_GSE_IVB (1<<29) | ||
2788 | #define DE_PCH_EVENT_IVB (1<<28) | ||
2789 | #define DE_DP_A_HOTPLUG_IVB (1<<27) | ||
2790 | #define DE_AUX_CHANNEL_A_IVB (1<<26) | ||
2791 | #define DE_SPRITEB_FLIP_DONE_IVB (1<<9) | ||
2792 | #define DE_SPRITEA_FLIP_DONE_IVB (1<<4) | ||
2793 | #define DE_PLANEB_FLIP_DONE_IVB (1<<8) | ||
2794 | #define DE_PLANEA_FLIP_DONE_IVB (1<<3) | ||
2795 | #define DE_PIPEB_VBLANK_IVB (1<<5) | ||
2796 | #define DE_PIPEA_VBLANK_IVB (1<<0) | ||
2797 | |||
2509 | #define DEISR 0x44000 | 2798 | #define DEISR 0x44000 |
2510 | #define DEIMR 0x44004 | 2799 | #define DEIMR 0x44004 |
2511 | #define DEIIR 0x44008 | 2800 | #define DEIIR 0x44008 |
@@ -2516,7 +2805,8 @@ | |||
2516 | #define GT_SYNC_STATUS (1 << 2) | 2805 | #define GT_SYNC_STATUS (1 << 2) |
2517 | #define GT_USER_INTERRUPT (1 << 0) | 2806 | #define GT_USER_INTERRUPT (1 << 0) |
2518 | #define GT_BSD_USER_INTERRUPT (1 << 5) | 2807 | #define GT_BSD_USER_INTERRUPT (1 << 5) |
2519 | 2808 | #define GT_GEN6_BSD_USER_INTERRUPT (1 << 12) | |
2809 | #define GT_BLT_USER_INTERRUPT (1 << 22) | ||
2520 | 2810 | ||
2521 | #define GTISR 0x44010 | 2811 | #define GTISR 0x44010 |
2522 | #define GTIMR 0x44014 | 2812 | #define GTIMR 0x44014 |
@@ -2524,10 +2814,22 @@ | |||
2524 | #define GTIER 0x4401c | 2814 | #define GTIER 0x4401c |
2525 | 2815 | ||
2526 | #define ILK_DISPLAY_CHICKEN2 0x42004 | 2816 | #define ILK_DISPLAY_CHICKEN2 0x42004 |
2817 | /* Required on all Ironlake and Sandybridge according to the B-Spec. */ | ||
2818 | #define ILK_ELPIN_409_SELECT (1 << 25) | ||
2527 | #define ILK_DPARB_GATE (1<<22) | 2819 | #define ILK_DPARB_GATE (1<<22) |
2528 | #define ILK_VSDPFD_FULL (1<<21) | 2820 | #define ILK_VSDPFD_FULL (1<<21) |
2821 | #define ILK_DISPLAY_CHICKEN_FUSES 0x42014 | ||
2822 | #define ILK_INTERNAL_GRAPHICS_DISABLE (1<<31) | ||
2823 | #define ILK_INTERNAL_DISPLAY_DISABLE (1<<30) | ||
2824 | #define ILK_DISPLAY_DEBUG_DISABLE (1<<29) | ||
2825 | #define ILK_HDCP_DISABLE (1<<25) | ||
2826 | #define ILK_eDP_A_DISABLE (1<<24) | ||
2827 | #define ILK_DESKTOP (1<<23) | ||
2529 | #define ILK_DSPCLK_GATE 0x42020 | 2828 | #define ILK_DSPCLK_GATE 0x42020 |
2829 | #define IVB_VRHUNIT_CLK_GATE (1<<28) | ||
2530 | #define ILK_DPARB_CLK_GATE (1<<5) | 2830 | #define ILK_DPARB_CLK_GATE (1<<5) |
2831 | #define ILK_DPFD_CLK_GATE (1<<7) | ||
2832 | |||
2531 | /* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ | 2833 | /* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ |
2532 | #define ILK_CLK_FBC (1<<7) | 2834 | #define ILK_CLK_FBC (1<<7) |
2533 | #define ILK_DPFC_DIS1 (1<<8) | 2835 | #define ILK_DPFC_DIS1 (1<<8) |
@@ -2540,17 +2842,50 @@ | |||
2540 | /* PCH */ | 2842 | /* PCH */ |
2541 | 2843 | ||
2542 | /* south display engine interrupt */ | 2844 | /* south display engine interrupt */ |
2845 | #define SDE_AUDIO_POWER_D (1 << 27) | ||
2846 | #define SDE_AUDIO_POWER_C (1 << 26) | ||
2847 | #define SDE_AUDIO_POWER_B (1 << 25) | ||
2848 | #define SDE_AUDIO_POWER_SHIFT (25) | ||
2849 | #define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT) | ||
2850 | #define SDE_GMBUS (1 << 24) | ||
2851 | #define SDE_AUDIO_HDCP_TRANSB (1 << 23) | ||
2852 | #define SDE_AUDIO_HDCP_TRANSA (1 << 22) | ||
2853 | #define SDE_AUDIO_HDCP_MASK (3 << 22) | ||
2854 | #define SDE_AUDIO_TRANSB (1 << 21) | ||
2855 | #define SDE_AUDIO_TRANSA (1 << 20) | ||
2856 | #define SDE_AUDIO_TRANS_MASK (3 << 20) | ||
2857 | #define SDE_POISON (1 << 19) | ||
2858 | /* 18 reserved */ | ||
2859 | #define SDE_FDI_RXB (1 << 17) | ||
2860 | #define SDE_FDI_RXA (1 << 16) | ||
2861 | #define SDE_FDI_MASK (3 << 16) | ||
2862 | #define SDE_AUXD (1 << 15) | ||
2863 | #define SDE_AUXC (1 << 14) | ||
2864 | #define SDE_AUXB (1 << 13) | ||
2865 | #define SDE_AUX_MASK (7 << 13) | ||
2866 | /* 12 reserved */ | ||
2543 | #define SDE_CRT_HOTPLUG (1 << 11) | 2867 | #define SDE_CRT_HOTPLUG (1 << 11) |
2544 | #define SDE_PORTD_HOTPLUG (1 << 10) | 2868 | #define SDE_PORTD_HOTPLUG (1 << 10) |
2545 | #define SDE_PORTC_HOTPLUG (1 << 9) | 2869 | #define SDE_PORTC_HOTPLUG (1 << 9) |
2546 | #define SDE_PORTB_HOTPLUG (1 << 8) | 2870 | #define SDE_PORTB_HOTPLUG (1 << 8) |
2547 | #define SDE_SDVOB_HOTPLUG (1 << 6) | 2871 | #define SDE_SDVOB_HOTPLUG (1 << 6) |
2548 | #define SDE_HOTPLUG_MASK (0xf << 8) | 2872 | #define SDE_HOTPLUG_MASK (0xf << 8) |
2873 | #define SDE_TRANSB_CRC_DONE (1 << 5) | ||
2874 | #define SDE_TRANSB_CRC_ERR (1 << 4) | ||
2875 | #define SDE_TRANSB_FIFO_UNDER (1 << 3) | ||
2876 | #define SDE_TRANSA_CRC_DONE (1 << 2) | ||
2877 | #define SDE_TRANSA_CRC_ERR (1 << 1) | ||
2878 | #define SDE_TRANSA_FIFO_UNDER (1 << 0) | ||
2879 | #define SDE_TRANS_MASK (0x3f) | ||
2549 | /* CPT */ | 2880 | /* CPT */ |
2550 | #define SDE_CRT_HOTPLUG_CPT (1 << 19) | 2881 | #define SDE_CRT_HOTPLUG_CPT (1 << 19) |
2551 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) | 2882 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) |
2552 | #define SDE_PORTC_HOTPLUG_CPT (1 << 22) | 2883 | #define SDE_PORTC_HOTPLUG_CPT (1 << 22) |
2553 | #define SDE_PORTB_HOTPLUG_CPT (1 << 21) | 2884 | #define SDE_PORTB_HOTPLUG_CPT (1 << 21) |
2885 | #define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ | ||
2886 | SDE_PORTD_HOTPLUG_CPT | \ | ||
2887 | SDE_PORTC_HOTPLUG_CPT | \ | ||
2888 | SDE_PORTB_HOTPLUG_CPT) | ||
2554 | 2889 | ||
2555 | #define SDEISR 0xc4000 | 2890 | #define SDEISR 0xc4000 |
2556 | #define SDEIMR 0xc4004 | 2891 | #define SDEIMR 0xc4004 |
@@ -2598,13 +2933,17 @@ | |||
2598 | #define PCH_GMBUS4 0xc5110 | 2933 | #define PCH_GMBUS4 0xc5110 |
2599 | #define PCH_GMBUS5 0xc5120 | 2934 | #define PCH_GMBUS5 0xc5120 |
2600 | 2935 | ||
2601 | #define PCH_DPLL_A 0xc6014 | 2936 | #define _PCH_DPLL_A 0xc6014 |
2602 | #define PCH_DPLL_B 0xc6018 | 2937 | #define _PCH_DPLL_B 0xc6018 |
2938 | #define PCH_DPLL(pipe) _PIPE(pipe, _PCH_DPLL_A, _PCH_DPLL_B) | ||
2603 | 2939 | ||
2604 | #define PCH_FPA0 0xc6040 | 2940 | #define _PCH_FPA0 0xc6040 |
2605 | #define PCH_FPA1 0xc6044 | 2941 | #define FP_CB_TUNE (0x3<<22) |
2606 | #define PCH_FPB0 0xc6048 | 2942 | #define _PCH_FPA1 0xc6044 |
2607 | #define PCH_FPB1 0xc604c | 2943 | #define _PCH_FPB0 0xc6048 |
2944 | #define _PCH_FPB1 0xc604c | ||
2945 | #define PCH_FP0(pipe) _PIPE(pipe, _PCH_FPA0, _PCH_FPB0) | ||
2946 | #define PCH_FP1(pipe) _PIPE(pipe, _PCH_FPA1, _PCH_FPB1) | ||
2608 | 2947 | ||
2609 | #define PCH_DPLL_TEST 0xc606c | 2948 | #define PCH_DPLL_TEST 0xc606c |
2610 | 2949 | ||
@@ -2623,6 +2962,7 @@ | |||
2623 | #define DREF_NONSPREAD_SOURCE_MASK (3<<9) | 2962 | #define DREF_NONSPREAD_SOURCE_MASK (3<<9) |
2624 | #define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7) | 2963 | #define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7) |
2625 | #define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7) | 2964 | #define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7) |
2965 | #define DREF_SUPERSPREAD_SOURCE_MASK (3<<7) | ||
2626 | #define DREF_SSC4_DOWNSPREAD (0<<6) | 2966 | #define DREF_SSC4_DOWNSPREAD (0<<6) |
2627 | #define DREF_SSC4_CENTERSPREAD (1<<6) | 2967 | #define DREF_SSC4_CENTERSPREAD (1<<6) |
2628 | #define DREF_SSC1_DISABLE (0<<1) | 2968 | #define DREF_SSC1_DISABLE (0<<1) |
@@ -2655,52 +2995,69 @@ | |||
2655 | 2995 | ||
2656 | /* transcoder */ | 2996 | /* transcoder */ |
2657 | 2997 | ||
2658 | #define TRANS_HTOTAL_A 0xe0000 | 2998 | #define _TRANS_HTOTAL_A 0xe0000 |
2659 | #define TRANS_HTOTAL_SHIFT 16 | 2999 | #define TRANS_HTOTAL_SHIFT 16 |
2660 | #define TRANS_HACTIVE_SHIFT 0 | 3000 | #define TRANS_HACTIVE_SHIFT 0 |
2661 | #define TRANS_HBLANK_A 0xe0004 | 3001 | #define _TRANS_HBLANK_A 0xe0004 |
2662 | #define TRANS_HBLANK_END_SHIFT 16 | 3002 | #define TRANS_HBLANK_END_SHIFT 16 |
2663 | #define TRANS_HBLANK_START_SHIFT 0 | 3003 | #define TRANS_HBLANK_START_SHIFT 0 |
2664 | #define TRANS_HSYNC_A 0xe0008 | 3004 | #define _TRANS_HSYNC_A 0xe0008 |
2665 | #define TRANS_HSYNC_END_SHIFT 16 | 3005 | #define TRANS_HSYNC_END_SHIFT 16 |
2666 | #define TRANS_HSYNC_START_SHIFT 0 | 3006 | #define TRANS_HSYNC_START_SHIFT 0 |
2667 | #define TRANS_VTOTAL_A 0xe000c | 3007 | #define _TRANS_VTOTAL_A 0xe000c |
2668 | #define TRANS_VTOTAL_SHIFT 16 | 3008 | #define TRANS_VTOTAL_SHIFT 16 |
2669 | #define TRANS_VACTIVE_SHIFT 0 | 3009 | #define TRANS_VACTIVE_SHIFT 0 |
2670 | #define TRANS_VBLANK_A 0xe0010 | 3010 | #define _TRANS_VBLANK_A 0xe0010 |
2671 | #define TRANS_VBLANK_END_SHIFT 16 | 3011 | #define TRANS_VBLANK_END_SHIFT 16 |
2672 | #define TRANS_VBLANK_START_SHIFT 0 | 3012 | #define TRANS_VBLANK_START_SHIFT 0 |
2673 | #define TRANS_VSYNC_A 0xe0014 | 3013 | #define _TRANS_VSYNC_A 0xe0014 |
2674 | #define TRANS_VSYNC_END_SHIFT 16 | 3014 | #define TRANS_VSYNC_END_SHIFT 16 |
2675 | #define TRANS_VSYNC_START_SHIFT 0 | 3015 | #define TRANS_VSYNC_START_SHIFT 0 |
2676 | 3016 | ||
2677 | #define TRANSA_DATA_M1 0xe0030 | 3017 | #define _TRANSA_DATA_M1 0xe0030 |
2678 | #define TRANSA_DATA_N1 0xe0034 | 3018 | #define _TRANSA_DATA_N1 0xe0034 |
2679 | #define TRANSA_DATA_M2 0xe0038 | 3019 | #define _TRANSA_DATA_M2 0xe0038 |
2680 | #define TRANSA_DATA_N2 0xe003c | 3020 | #define _TRANSA_DATA_N2 0xe003c |
2681 | #define TRANSA_DP_LINK_M1 0xe0040 | 3021 | #define _TRANSA_DP_LINK_M1 0xe0040 |
2682 | #define TRANSA_DP_LINK_N1 0xe0044 | 3022 | #define _TRANSA_DP_LINK_N1 0xe0044 |
2683 | #define TRANSA_DP_LINK_M2 0xe0048 | 3023 | #define _TRANSA_DP_LINK_M2 0xe0048 |
2684 | #define TRANSA_DP_LINK_N2 0xe004c | 3024 | #define _TRANSA_DP_LINK_N2 0xe004c |
2685 | 3025 | ||
2686 | #define TRANS_HTOTAL_B 0xe1000 | 3026 | #define _TRANS_HTOTAL_B 0xe1000 |
2687 | #define TRANS_HBLANK_B 0xe1004 | 3027 | #define _TRANS_HBLANK_B 0xe1004 |
2688 | #define TRANS_HSYNC_B 0xe1008 | 3028 | #define _TRANS_HSYNC_B 0xe1008 |
2689 | #define TRANS_VTOTAL_B 0xe100c | 3029 | #define _TRANS_VTOTAL_B 0xe100c |
2690 | #define TRANS_VBLANK_B 0xe1010 | 3030 | #define _TRANS_VBLANK_B 0xe1010 |
2691 | #define TRANS_VSYNC_B 0xe1014 | 3031 | #define _TRANS_VSYNC_B 0xe1014 |
2692 | 3032 | ||
2693 | #define TRANSB_DATA_M1 0xe1030 | 3033 | #define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B) |
2694 | #define TRANSB_DATA_N1 0xe1034 | 3034 | #define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B) |
2695 | #define TRANSB_DATA_M2 0xe1038 | 3035 | #define TRANS_HSYNC(pipe) _PIPE(pipe, _TRANS_HSYNC_A, _TRANS_HSYNC_B) |
2696 | #define TRANSB_DATA_N2 0xe103c | 3036 | #define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B) |
2697 | #define TRANSB_DP_LINK_M1 0xe1040 | 3037 | #define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B) |
2698 | #define TRANSB_DP_LINK_N1 0xe1044 | 3038 | #define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B) |
2699 | #define TRANSB_DP_LINK_M2 0xe1048 | 3039 | |
2700 | #define TRANSB_DP_LINK_N2 0xe104c | 3040 | #define _TRANSB_DATA_M1 0xe1030 |
2701 | 3041 | #define _TRANSB_DATA_N1 0xe1034 | |
2702 | #define TRANSACONF 0xf0008 | 3042 | #define _TRANSB_DATA_M2 0xe1038 |
2703 | #define TRANSBCONF 0xf1008 | 3043 | #define _TRANSB_DATA_N2 0xe103c |
3044 | #define _TRANSB_DP_LINK_M1 0xe1040 | ||
3045 | #define _TRANSB_DP_LINK_N1 0xe1044 | ||
3046 | #define _TRANSB_DP_LINK_M2 0xe1048 | ||
3047 | #define _TRANSB_DP_LINK_N2 0xe104c | ||
3048 | |||
3049 | #define TRANSDATA_M1(pipe) _PIPE(pipe, _TRANSA_DATA_M1, _TRANSB_DATA_M1) | ||
3050 | #define TRANSDATA_N1(pipe) _PIPE(pipe, _TRANSA_DATA_N1, _TRANSB_DATA_N1) | ||
3051 | #define TRANSDATA_M2(pipe) _PIPE(pipe, _TRANSA_DATA_M2, _TRANSB_DATA_M2) | ||
3052 | #define TRANSDATA_N2(pipe) _PIPE(pipe, _TRANSA_DATA_N2, _TRANSB_DATA_N2) | ||
3053 | #define TRANSDPLINK_M1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M1, _TRANSB_DP_LINK_M1) | ||
3054 | #define TRANSDPLINK_N1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N1, _TRANSB_DP_LINK_N1) | ||
3055 | #define TRANSDPLINK_M2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M2, _TRANSB_DP_LINK_M2) | ||
3056 | #define TRANSDPLINK_N2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N2, _TRANSB_DP_LINK_N2) | ||
3057 | |||
3058 | #define _TRANSACONF 0xf0008 | ||
3059 | #define _TRANSBCONF 0xf1008 | ||
3060 | #define TRANSCONF(plane) _PIPE(plane, _TRANSACONF, _TRANSBCONF) | ||
2704 | #define TRANS_DISABLE (0<<31) | 3061 | #define TRANS_DISABLE (0<<31) |
2705 | #define TRANS_ENABLE (1<<31) | 3062 | #define TRANS_ENABLE (1<<31) |
2706 | #define TRANS_STATE_MASK (1<<30) | 3063 | #define TRANS_STATE_MASK (1<<30) |
@@ -2718,13 +3075,22 @@ | |||
2718 | #define TRANS_6BPC (2<<5) | 3075 | #define TRANS_6BPC (2<<5) |
2719 | #define TRANS_12BPC (3<<5) | 3076 | #define TRANS_12BPC (3<<5) |
2720 | 3077 | ||
2721 | #define FDI_RXA_CHICKEN 0xc200c | 3078 | #define SOUTH_CHICKEN2 0xc2004 |
2722 | #define FDI_RXB_CHICKEN 0xc2010 | 3079 | #define DPLS_EDP_PPS_FIX_DIS (1<<0) |
2723 | #define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1) | 3080 | |
3081 | #define _FDI_RXA_CHICKEN 0xc200c | ||
3082 | #define _FDI_RXB_CHICKEN 0xc2010 | ||
3083 | #define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1) | ||
3084 | #define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0) | ||
3085 | #define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) | ||
3086 | |||
3087 | #define SOUTH_DSPCLK_GATE_D 0xc2020 | ||
3088 | #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) | ||
2724 | 3089 | ||
2725 | /* CPU: FDI_TX */ | 3090 | /* CPU: FDI_TX */ |
2726 | #define FDI_TXA_CTL 0x60100 | 3091 | #define _FDI_TXA_CTL 0x60100 |
2727 | #define FDI_TXB_CTL 0x61100 | 3092 | #define _FDI_TXB_CTL 0x61100 |
3093 | #define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL) | ||
2728 | #define FDI_TX_DISABLE (0<<31) | 3094 | #define FDI_TX_DISABLE (0<<31) |
2729 | #define FDI_TX_ENABLE (1<<31) | 3095 | #define FDI_TX_ENABLE (1<<31) |
2730 | #define FDI_LINK_TRAIN_PATTERN_1 (0<<28) | 3096 | #define FDI_LINK_TRAIN_PATTERN_1 (0<<28) |
@@ -2759,16 +3125,26 @@ | |||
2759 | #define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18) | 3125 | #define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18) |
2760 | /* Ironlake: hardwired to 1 */ | 3126 | /* Ironlake: hardwired to 1 */ |
2761 | #define FDI_TX_PLL_ENABLE (1<<14) | 3127 | #define FDI_TX_PLL_ENABLE (1<<14) |
3128 | |||
3129 | /* Ivybridge has different bits for lolz */ | ||
3130 | #define FDI_LINK_TRAIN_PATTERN_1_IVB (0<<8) | ||
3131 | #define FDI_LINK_TRAIN_PATTERN_2_IVB (1<<8) | ||
3132 | #define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2<<8) | ||
3133 | #define FDI_LINK_TRAIN_NONE_IVB (3<<8) | ||
3134 | |||
2762 | /* both Tx and Rx */ | 3135 | /* both Tx and Rx */ |
3136 | #define FDI_LINK_TRAIN_AUTO (1<<10) | ||
2763 | #define FDI_SCRAMBLING_ENABLE (0<<7) | 3137 | #define FDI_SCRAMBLING_ENABLE (0<<7) |
2764 | #define FDI_SCRAMBLING_DISABLE (1<<7) | 3138 | #define FDI_SCRAMBLING_DISABLE (1<<7) |
2765 | 3139 | ||
2766 | /* FDI_RX, FDI_X is hard-wired to Transcoder_X */ | 3140 | /* FDI_RX, FDI_X is hard-wired to Transcoder_X */ |
2767 | #define FDI_RXA_CTL 0xf000c | 3141 | #define _FDI_RXA_CTL 0xf000c |
2768 | #define FDI_RXB_CTL 0xf100c | 3142 | #define _FDI_RXB_CTL 0xf100c |
3143 | #define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL) | ||
2769 | #define FDI_RX_ENABLE (1<<31) | 3144 | #define FDI_RX_ENABLE (1<<31) |
2770 | #define FDI_RX_DISABLE (0<<31) | ||
2771 | /* train, dp width same as FDI_TX */ | 3145 | /* train, dp width same as FDI_TX */ |
3146 | #define FDI_FS_ERRC_ENABLE (1<<27) | ||
3147 | #define FDI_FE_ERRC_ENABLE (1<<26) | ||
2772 | #define FDI_DP_PORT_WIDTH_X8 (7<<19) | 3148 | #define FDI_DP_PORT_WIDTH_X8 (7<<19) |
2773 | #define FDI_8BPC (0<<16) | 3149 | #define FDI_8BPC (0<<16) |
2774 | #define FDI_10BPC (1<<16) | 3150 | #define FDI_10BPC (1<<16) |
@@ -2782,8 +3158,7 @@ | |||
2782 | #define FDI_FS_ERR_REPORT_ENABLE (1<<9) | 3158 | #define FDI_FS_ERR_REPORT_ENABLE (1<<9) |
2783 | #define FDI_FE_ERR_REPORT_ENABLE (1<<8) | 3159 | #define FDI_FE_ERR_REPORT_ENABLE (1<<8) |
2784 | #define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) | 3160 | #define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) |
2785 | #define FDI_SEL_RAWCLK (0<<4) | 3161 | #define FDI_PCDCLK (1<<4) |
2786 | #define FDI_SEL_PCDCLK (1<<4) | ||
2787 | /* CPT */ | 3162 | /* CPT */ |
2788 | #define FDI_AUTO_TRAINING (1<<10) | 3163 | #define FDI_AUTO_TRAINING (1<<10) |
2789 | #define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8) | 3164 | #define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8) |
@@ -2792,12 +3167,15 @@ | |||
2792 | #define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) | 3167 | #define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) |
2793 | #define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) | 3168 | #define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) |
2794 | 3169 | ||
2795 | #define FDI_RXA_MISC 0xf0010 | 3170 | #define _FDI_RXA_MISC 0xf0010 |
2796 | #define FDI_RXB_MISC 0xf1010 | 3171 | #define _FDI_RXB_MISC 0xf1010 |
2797 | #define FDI_RXA_TUSIZE1 0xf0030 | 3172 | #define _FDI_RXA_TUSIZE1 0xf0030 |
2798 | #define FDI_RXA_TUSIZE2 0xf0038 | 3173 | #define _FDI_RXA_TUSIZE2 0xf0038 |
2799 | #define FDI_RXB_TUSIZE1 0xf1030 | 3174 | #define _FDI_RXB_TUSIZE1 0xf1030 |
2800 | #define FDI_RXB_TUSIZE2 0xf1038 | 3175 | #define _FDI_RXB_TUSIZE2 0xf1038 |
3176 | #define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) | ||
3177 | #define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) | ||
3178 | #define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) | ||
2801 | 3179 | ||
2802 | /* FDI_RX interrupt register format */ | 3180 | /* FDI_RX interrupt register format */ |
2803 | #define FDI_RX_INTER_LANE_ALIGN (1<<10) | 3181 | #define FDI_RX_INTER_LANE_ALIGN (1<<10) |
@@ -2812,10 +3190,12 @@ | |||
2812 | #define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1) | 3190 | #define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1) |
2813 | #define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0) | 3191 | #define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0) |
2814 | 3192 | ||
2815 | #define FDI_RXA_IIR 0xf0014 | 3193 | #define _FDI_RXA_IIR 0xf0014 |
2816 | #define FDI_RXA_IMR 0xf0018 | 3194 | #define _FDI_RXA_IMR 0xf0018 |
2817 | #define FDI_RXB_IIR 0xf1014 | 3195 | #define _FDI_RXB_IIR 0xf1014 |
2818 | #define FDI_RXB_IMR 0xf1018 | 3196 | #define _FDI_RXB_IMR 0xf1018 |
3197 | #define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR) | ||
3198 | #define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR) | ||
2819 | 3199 | ||
2820 | #define FDI_PLL_CTL_1 0xfe000 | 3200 | #define FDI_PLL_CTL_1 0xfe000 |
2821 | #define FDI_PLL_CTL_2 0xfe004 | 3201 | #define FDI_PLL_CTL_2 0xfe004 |
@@ -2845,11 +3225,15 @@ | |||
2845 | #define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) | 3225 | #define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) |
2846 | #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) | 3226 | #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) |
2847 | 3227 | ||
3228 | #define ADPA_PIPE_ENABLED(V, P) \ | ||
3229 | (((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE)) | ||
3230 | |||
2848 | /* or SDVOB */ | 3231 | /* or SDVOB */ |
2849 | #define HDMIB 0xe1140 | 3232 | #define HDMIB 0xe1140 |
2850 | #define PORT_ENABLE (1 << 31) | 3233 | #define PORT_ENABLE (1 << 31) |
2851 | #define TRANSCODER_A (0) | 3234 | #define TRANSCODER_A (0) |
2852 | #define TRANSCODER_B (1 << 30) | 3235 | #define TRANSCODER_B (1 << 30) |
3236 | #define TRANSCODER_MASK (1 << 30) | ||
2853 | #define COLOR_FORMAT_8bpc (0) | 3237 | #define COLOR_FORMAT_8bpc (0) |
2854 | #define COLOR_FORMAT_12bpc (3 << 26) | 3238 | #define COLOR_FORMAT_12bpc (3 << 26) |
2855 | #define SDVOB_HOTPLUG_ENABLE (1 << 23) | 3239 | #define SDVOB_HOTPLUG_ENABLE (1 << 23) |
@@ -2865,6 +3249,9 @@ | |||
2865 | #define HSYNC_ACTIVE_HIGH (1 << 3) | 3249 | #define HSYNC_ACTIVE_HIGH (1 << 3) |
2866 | #define PORT_DETECTED (1 << 2) | 3250 | #define PORT_DETECTED (1 << 2) |
2867 | 3251 | ||
3252 | #define HDMI_PIPE_ENABLED(V, P) \ | ||
3253 | (((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE)) | ||
3254 | |||
2868 | /* PCH SDVOB multiplex with HDMIB */ | 3255 | /* PCH SDVOB multiplex with HDMIB */ |
2869 | #define PCH_SDVOB HDMIB | 3256 | #define PCH_SDVOB HDMIB |
2870 | 3257 | ||
@@ -2935,10 +3322,12 @@ | |||
2935 | #define TRANS_DP_CTL_A 0xe0300 | 3322 | #define TRANS_DP_CTL_A 0xe0300 |
2936 | #define TRANS_DP_CTL_B 0xe1300 | 3323 | #define TRANS_DP_CTL_B 0xe1300 |
2937 | #define TRANS_DP_CTL_C 0xe2300 | 3324 | #define TRANS_DP_CTL_C 0xe2300 |
3325 | #define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000) | ||
2938 | #define TRANS_DP_OUTPUT_ENABLE (1<<31) | 3326 | #define TRANS_DP_OUTPUT_ENABLE (1<<31) |
2939 | #define TRANS_DP_PORT_SEL_B (0<<29) | 3327 | #define TRANS_DP_PORT_SEL_B (0<<29) |
2940 | #define TRANS_DP_PORT_SEL_C (1<<29) | 3328 | #define TRANS_DP_PORT_SEL_C (1<<29) |
2941 | #define TRANS_DP_PORT_SEL_D (2<<29) | 3329 | #define TRANS_DP_PORT_SEL_D (2<<29) |
3330 | #define TRANS_DP_PORT_SEL_NONE (3<<29) | ||
2942 | #define TRANS_DP_PORT_SEL_MASK (3<<29) | 3331 | #define TRANS_DP_PORT_SEL_MASK (3<<29) |
2943 | #define TRANS_DP_AUDIO_ONLY (1<<26) | 3332 | #define TRANS_DP_AUDIO_ONLY (1<<26) |
2944 | #define TRANS_DP_ENH_FRAMING (1<<18) | 3333 | #define TRANS_DP_ENH_FRAMING (1<<18) |
@@ -2946,6 +3335,7 @@ | |||
2946 | #define TRANS_DP_10BPC (1<<9) | 3335 | #define TRANS_DP_10BPC (1<<9) |
2947 | #define TRANS_DP_6BPC (2<<9) | 3336 | #define TRANS_DP_6BPC (2<<9) |
2948 | #define TRANS_DP_12BPC (3<<9) | 3337 | #define TRANS_DP_12BPC (3<<9) |
3338 | #define TRANS_DP_BPC_MASK (3<<9) | ||
2949 | #define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) | 3339 | #define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) |
2950 | #define TRANS_DP_VSYNC_ACTIVE_LOW 0 | 3340 | #define TRANS_DP_VSYNC_ACTIVE_LOW 0 |
2951 | #define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) | 3341 | #define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) |
@@ -2959,10 +3349,92 @@ | |||
2959 | #define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) | 3349 | #define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) |
2960 | #define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) | 3350 | #define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) |
2961 | /* SNB B-stepping */ | 3351 | /* SNB B-stepping */ |
2962 | #define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) | 3352 | #define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22) |
2963 | #define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) | 3353 | #define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22) |
2964 | #define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) | 3354 | #define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22) |
2965 | #define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) | 3355 | #define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22) |
3356 | #define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22) | ||
2966 | #define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) | 3357 | #define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) |
2967 | 3358 | ||
3359 | #define FORCEWAKE 0xA18C | ||
3360 | #define FORCEWAKE_ACK 0x130090 | ||
3361 | |||
3362 | #define GT_FIFO_FREE_ENTRIES 0x120008 | ||
3363 | |||
3364 | #define GEN6_RPNSWREQ 0xA008 | ||
3365 | #define GEN6_TURBO_DISABLE (1<<31) | ||
3366 | #define GEN6_FREQUENCY(x) ((x)<<25) | ||
3367 | #define GEN6_OFFSET(x) ((x)<<19) | ||
3368 | #define GEN6_AGGRESSIVE_TURBO (0<<15) | ||
3369 | #define GEN6_RC_VIDEO_FREQ 0xA00C | ||
3370 | #define GEN6_RC_CONTROL 0xA090 | ||
3371 | #define GEN6_RC_CTL_RC6pp_ENABLE (1<<16) | ||
3372 | #define GEN6_RC_CTL_RC6p_ENABLE (1<<17) | ||
3373 | #define GEN6_RC_CTL_RC6_ENABLE (1<<18) | ||
3374 | #define GEN6_RC_CTL_RC1e_ENABLE (1<<20) | ||
3375 | #define GEN6_RC_CTL_RC7_ENABLE (1<<22) | ||
3376 | #define GEN6_RC_CTL_EI_MODE(x) ((x)<<27) | ||
3377 | #define GEN6_RC_CTL_HW_ENABLE (1<<31) | ||
3378 | #define GEN6_RP_DOWN_TIMEOUT 0xA010 | ||
3379 | #define GEN6_RP_INTERRUPT_LIMITS 0xA014 | ||
3380 | #define GEN6_RPSTAT1 0xA01C | ||
3381 | #define GEN6_CAGF_SHIFT 8 | ||
3382 | #define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) | ||
3383 | #define GEN6_RP_CONTROL 0xA024 | ||
3384 | #define GEN6_RP_MEDIA_TURBO (1<<11) | ||
3385 | #define GEN6_RP_USE_NORMAL_FREQ (1<<9) | ||
3386 | #define GEN6_RP_MEDIA_IS_GFX (1<<8) | ||
3387 | #define GEN6_RP_ENABLE (1<<7) | ||
3388 | #define GEN6_RP_UP_IDLE_MIN (0x1<<3) | ||
3389 | #define GEN6_RP_UP_BUSY_AVG (0x2<<3) | ||
3390 | #define GEN6_RP_UP_BUSY_CONT (0x4<<3) | ||
3391 | #define GEN6_RP_DOWN_IDLE_CONT (0x1<<0) | ||
3392 | #define GEN6_RP_UP_THRESHOLD 0xA02C | ||
3393 | #define GEN6_RP_DOWN_THRESHOLD 0xA030 | ||
3394 | #define GEN6_RP_CUR_UP_EI 0xA050 | ||
3395 | #define GEN6_CURICONT_MASK 0xffffff | ||
3396 | #define GEN6_RP_CUR_UP 0xA054 | ||
3397 | #define GEN6_CURBSYTAVG_MASK 0xffffff | ||
3398 | #define GEN6_RP_PREV_UP 0xA058 | ||
3399 | #define GEN6_RP_CUR_DOWN_EI 0xA05C | ||
3400 | #define GEN6_CURIAVG_MASK 0xffffff | ||
3401 | #define GEN6_RP_CUR_DOWN 0xA060 | ||
3402 | #define GEN6_RP_PREV_DOWN 0xA064 | ||
3403 | #define GEN6_RP_UP_EI 0xA068 | ||
3404 | #define GEN6_RP_DOWN_EI 0xA06C | ||
3405 | #define GEN6_RP_IDLE_HYSTERSIS 0xA070 | ||
3406 | #define GEN6_RC_STATE 0xA094 | ||
3407 | #define GEN6_RC1_WAKE_RATE_LIMIT 0xA098 | ||
3408 | #define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C | ||
3409 | #define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0 | ||
3410 | #define GEN6_RC_EVALUATION_INTERVAL 0xA0A8 | ||
3411 | #define GEN6_RC_IDLE_HYSTERSIS 0xA0AC | ||
3412 | #define GEN6_RC_SLEEP 0xA0B0 | ||
3413 | #define GEN6_RC1e_THRESHOLD 0xA0B4 | ||
3414 | #define GEN6_RC6_THRESHOLD 0xA0B8 | ||
3415 | #define GEN6_RC6p_THRESHOLD 0xA0BC | ||
3416 | #define GEN6_RC6pp_THRESHOLD 0xA0C0 | ||
3417 | #define GEN6_PMINTRMSK 0xA168 | ||
3418 | |||
3419 | #define GEN6_PMISR 0x44020 | ||
3420 | #define GEN6_PMIMR 0x44024 /* rps_lock */ | ||
3421 | #define GEN6_PMIIR 0x44028 | ||
3422 | #define GEN6_PMIER 0x4402C | ||
3423 | #define GEN6_PM_MBOX_EVENT (1<<25) | ||
3424 | #define GEN6_PM_THERMAL_EVENT (1<<24) | ||
3425 | #define GEN6_PM_RP_DOWN_TIMEOUT (1<<6) | ||
3426 | #define GEN6_PM_RP_UP_THRESHOLD (1<<5) | ||
3427 | #define GEN6_PM_RP_DOWN_THRESHOLD (1<<4) | ||
3428 | #define GEN6_PM_RP_UP_EI_EXPIRED (1<<2) | ||
3429 | #define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1) | ||
3430 | #define GEN6_PM_DEFERRED_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \ | ||
3431 | GEN6_PM_RP_DOWN_THRESHOLD | \ | ||
3432 | GEN6_PM_RP_DOWN_TIMEOUT) | ||
3433 | |||
3434 | #define GEN6_PCODE_MAILBOX 0x138124 | ||
3435 | #define GEN6_PCODE_READY (1<<31) | ||
3436 | #define GEN6_READ_OC_PARAMS 0xc | ||
3437 | #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9 | ||
3438 | #define GEN6_PCODE_DATA 0x138128 | ||
3439 | |||
2968 | #endif /* _I915_REG_H_ */ | 3440 | #endif /* _I915_REG_H_ */ |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 31f08581e93a..5257cfc34c35 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -34,11 +34,10 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) | |||
34 | struct drm_i915_private *dev_priv = dev->dev_private; | 34 | struct drm_i915_private *dev_priv = dev->dev_private; |
35 | u32 dpll_reg; | 35 | u32 dpll_reg; |
36 | 36 | ||
37 | if (HAS_PCH_SPLIT(dev)) { | 37 | if (HAS_PCH_SPLIT(dev)) |
38 | dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B; | 38 | dpll_reg = (pipe == PIPE_A) ? _PCH_DPLL_A : _PCH_DPLL_B; |
39 | } else { | 39 | else |
40 | dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B; | 40 | dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B; |
41 | } | ||
42 | 41 | ||
43 | return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE); | 42 | return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE); |
44 | } | 43 | } |
@@ -46,7 +45,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) | |||
46 | static void i915_save_palette(struct drm_device *dev, enum pipe pipe) | 45 | static void i915_save_palette(struct drm_device *dev, enum pipe pipe) |
47 | { | 46 | { |
48 | struct drm_i915_private *dev_priv = dev->dev_private; | 47 | struct drm_i915_private *dev_priv = dev->dev_private; |
49 | unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); | 48 | unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); |
50 | u32 *array; | 49 | u32 *array; |
51 | int i; | 50 | int i; |
52 | 51 | ||
@@ -54,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe) | |||
54 | return; | 53 | return; |
55 | 54 | ||
56 | if (HAS_PCH_SPLIT(dev)) | 55 | if (HAS_PCH_SPLIT(dev)) |
57 | reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; | 56 | reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; |
58 | 57 | ||
59 | if (pipe == PIPE_A) | 58 | if (pipe == PIPE_A) |
60 | array = dev_priv->save_palette_a; | 59 | array = dev_priv->save_palette_a; |
@@ -68,7 +67,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe) | |||
68 | static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) | 67 | static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) |
69 | { | 68 | { |
70 | struct drm_i915_private *dev_priv = dev->dev_private; | 69 | struct drm_i915_private *dev_priv = dev->dev_private; |
71 | unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); | 70 | unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); |
72 | u32 *array; | 71 | u32 *array; |
73 | int i; | 72 | int i; |
74 | 73 | ||
@@ -76,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) | |||
76 | return; | 75 | return; |
77 | 76 | ||
78 | if (HAS_PCH_SPLIT(dev)) | 77 | if (HAS_PCH_SPLIT(dev)) |
79 | reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; | 78 | reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; |
80 | 79 | ||
81 | if (pipe == PIPE_A) | 80 | if (pipe == PIPE_A) |
82 | array = dev_priv->save_palette_a; | 81 | array = dev_priv->save_palette_a; |
@@ -235,128 +234,161 @@ static void i915_restore_vga(struct drm_device *dev) | |||
235 | static void i915_save_modeset_reg(struct drm_device *dev) | 234 | static void i915_save_modeset_reg(struct drm_device *dev) |
236 | { | 235 | { |
237 | struct drm_i915_private *dev_priv = dev->dev_private; | 236 | struct drm_i915_private *dev_priv = dev->dev_private; |
237 | int i; | ||
238 | 238 | ||
239 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 239 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
240 | return; | 240 | return; |
241 | 241 | ||
242 | /* Cursor state */ | ||
243 | dev_priv->saveCURACNTR = I915_READ(_CURACNTR); | ||
244 | dev_priv->saveCURAPOS = I915_READ(_CURAPOS); | ||
245 | dev_priv->saveCURABASE = I915_READ(_CURABASE); | ||
246 | dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR); | ||
247 | dev_priv->saveCURBPOS = I915_READ(_CURBPOS); | ||
248 | dev_priv->saveCURBBASE = I915_READ(_CURBBASE); | ||
249 | if (IS_GEN2(dev)) | ||
250 | dev_priv->saveCURSIZE = I915_READ(CURSIZE); | ||
251 | |||
242 | if (HAS_PCH_SPLIT(dev)) { | 252 | if (HAS_PCH_SPLIT(dev)) { |
243 | dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); | 253 | dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); |
244 | dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); | 254 | dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); |
245 | } | 255 | } |
246 | 256 | ||
247 | /* Pipe & plane A info */ | 257 | /* Pipe & plane A info */ |
248 | dev_priv->savePIPEACONF = I915_READ(PIPEACONF); | 258 | dev_priv->savePIPEACONF = I915_READ(_PIPEACONF); |
249 | dev_priv->savePIPEASRC = I915_READ(PIPEASRC); | 259 | dev_priv->savePIPEASRC = I915_READ(_PIPEASRC); |
250 | if (HAS_PCH_SPLIT(dev)) { | 260 | if (HAS_PCH_SPLIT(dev)) { |
251 | dev_priv->saveFPA0 = I915_READ(PCH_FPA0); | 261 | dev_priv->saveFPA0 = I915_READ(_PCH_FPA0); |
252 | dev_priv->saveFPA1 = I915_READ(PCH_FPA1); | 262 | dev_priv->saveFPA1 = I915_READ(_PCH_FPA1); |
253 | dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A); | 263 | dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A); |
254 | } else { | 264 | } else { |
255 | dev_priv->saveFPA0 = I915_READ(FPA0); | 265 | dev_priv->saveFPA0 = I915_READ(_FPA0); |
256 | dev_priv->saveFPA1 = I915_READ(FPA1); | 266 | dev_priv->saveFPA1 = I915_READ(_FPA1); |
257 | dev_priv->saveDPLL_A = I915_READ(DPLL_A); | 267 | dev_priv->saveDPLL_A = I915_READ(_DPLL_A); |
258 | } | 268 | } |
259 | if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) | 269 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) |
260 | dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); | 270 | dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD); |
261 | dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); | 271 | dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A); |
262 | dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); | 272 | dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A); |
263 | dev_priv->saveHSYNC_A = I915_READ(HSYNC_A); | 273 | dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A); |
264 | dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); | 274 | dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A); |
265 | dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); | 275 | dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A); |
266 | dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); | 276 | dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A); |
267 | if (!HAS_PCH_SPLIT(dev)) | 277 | if (!HAS_PCH_SPLIT(dev)) |
268 | dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); | 278 | dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A); |
269 | 279 | ||
270 | if (HAS_PCH_SPLIT(dev)) { | 280 | if (HAS_PCH_SPLIT(dev)) { |
271 | dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1); | 281 | dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); |
272 | dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1); | 282 | dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); |
273 | dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1); | 283 | dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); |
274 | dev_priv->savePIPEA_LINK_N1 = I915_READ(PIPEA_LINK_N1); | 284 | dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); |
275 | 285 | ||
276 | dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL); | 286 | dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); |
277 | dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL); | 287 | dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); |
278 | 288 | ||
279 | dev_priv->savePFA_CTL_1 = I915_READ(PFA_CTL_1); | 289 | dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1); |
280 | dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ); | 290 | dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); |
281 | dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS); | 291 | dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); |
282 | 292 | ||
283 | dev_priv->saveTRANSACONF = I915_READ(TRANSACONF); | 293 | dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF); |
284 | dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A); | 294 | dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); |
285 | dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A); | 295 | dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); |
286 | dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A); | 296 | dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); |
287 | dev_priv->saveTRANS_VTOTAL_A = I915_READ(TRANS_VTOTAL_A); | 297 | dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); |
288 | dev_priv->saveTRANS_VBLANK_A = I915_READ(TRANS_VBLANK_A); | 298 | dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); |
289 | dev_priv->saveTRANS_VSYNC_A = I915_READ(TRANS_VSYNC_A); | 299 | dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); |
290 | } | 300 | } |
291 | 301 | ||
292 | dev_priv->saveDSPACNTR = I915_READ(DSPACNTR); | 302 | dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR); |
293 | dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE); | 303 | dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE); |
294 | dev_priv->saveDSPASIZE = I915_READ(DSPASIZE); | 304 | dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE); |
295 | dev_priv->saveDSPAPOS = I915_READ(DSPAPOS); | 305 | dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS); |
296 | dev_priv->saveDSPAADDR = I915_READ(DSPAADDR); | 306 | dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR); |
297 | if (IS_I965G(dev)) { | 307 | if (INTEL_INFO(dev)->gen >= 4) { |
298 | dev_priv->saveDSPASURF = I915_READ(DSPASURF); | 308 | dev_priv->saveDSPASURF = I915_READ(_DSPASURF); |
299 | dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); | 309 | dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF); |
300 | } | 310 | } |
301 | i915_save_palette(dev, PIPE_A); | 311 | i915_save_palette(dev, PIPE_A); |
302 | dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT); | 312 | dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT); |
303 | 313 | ||
304 | /* Pipe & plane B info */ | 314 | /* Pipe & plane B info */ |
305 | dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); | 315 | dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF); |
306 | dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); | 316 | dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC); |
307 | if (HAS_PCH_SPLIT(dev)) { | 317 | if (HAS_PCH_SPLIT(dev)) { |
308 | dev_priv->saveFPB0 = I915_READ(PCH_FPB0); | 318 | dev_priv->saveFPB0 = I915_READ(_PCH_FPB0); |
309 | dev_priv->saveFPB1 = I915_READ(PCH_FPB1); | 319 | dev_priv->saveFPB1 = I915_READ(_PCH_FPB1); |
310 | dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B); | 320 | dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B); |
311 | } else { | 321 | } else { |
312 | dev_priv->saveFPB0 = I915_READ(FPB0); | 322 | dev_priv->saveFPB0 = I915_READ(_FPB0); |
313 | dev_priv->saveFPB1 = I915_READ(FPB1); | 323 | dev_priv->saveFPB1 = I915_READ(_FPB1); |
314 | dev_priv->saveDPLL_B = I915_READ(DPLL_B); | 324 | dev_priv->saveDPLL_B = I915_READ(_DPLL_B); |
315 | } | 325 | } |
316 | if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) | 326 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) |
317 | dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); | 327 | dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD); |
318 | dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); | 328 | dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B); |
319 | dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); | 329 | dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B); |
320 | dev_priv->saveHSYNC_B = I915_READ(HSYNC_B); | 330 | dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B); |
321 | dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); | 331 | dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B); |
322 | dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); | 332 | dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B); |
323 | dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); | 333 | dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B); |
324 | if (!HAS_PCH_SPLIT(dev)) | 334 | if (!HAS_PCH_SPLIT(dev)) |
325 | dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B); | 335 | dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B); |
326 | 336 | ||
327 | if (HAS_PCH_SPLIT(dev)) { | 337 | if (HAS_PCH_SPLIT(dev)) { |
328 | dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1); | 338 | dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); |
329 | dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1); | 339 | dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); |
330 | dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1); | 340 | dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); |
331 | dev_priv->savePIPEB_LINK_N1 = I915_READ(PIPEB_LINK_N1); | 341 | dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); |
332 | 342 | ||
333 | dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL); | 343 | dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); |
334 | dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL); | 344 | dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); |
335 | 345 | ||
336 | dev_priv->savePFB_CTL_1 = I915_READ(PFB_CTL_1); | 346 | dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1); |
337 | dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ); | 347 | dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); |
338 | dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS); | 348 | dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); |
339 | 349 | ||
340 | dev_priv->saveTRANSBCONF = I915_READ(TRANSBCONF); | 350 | dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF); |
341 | dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B); | 351 | dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); |
342 | dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B); | 352 | dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); |
343 | dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B); | 353 | dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); |
344 | dev_priv->saveTRANS_VTOTAL_B = I915_READ(TRANS_VTOTAL_B); | 354 | dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); |
345 | dev_priv->saveTRANS_VBLANK_B = I915_READ(TRANS_VBLANK_B); | 355 | dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); |
346 | dev_priv->saveTRANS_VSYNC_B = I915_READ(TRANS_VSYNC_B); | 356 | dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); |
347 | } | 357 | } |
348 | 358 | ||
349 | dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR); | 359 | dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR); |
350 | dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE); | 360 | dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); |
351 | dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE); | 361 | dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE); |
352 | dev_priv->saveDSPBPOS = I915_READ(DSPBPOS); | 362 | dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS); |
353 | dev_priv->saveDSPBADDR = I915_READ(DSPBADDR); | 363 | dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR); |
354 | if (IS_I965GM(dev) || IS_GM45(dev)) { | 364 | if (INTEL_INFO(dev)->gen >= 4) { |
355 | dev_priv->saveDSPBSURF = I915_READ(DSPBSURF); | 365 | dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF); |
356 | dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); | 366 | dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); |
357 | } | 367 | } |
358 | i915_save_palette(dev, PIPE_B); | 368 | i915_save_palette(dev, PIPE_B); |
359 | dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); | 369 | dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT); |
370 | |||
371 | /* Fences */ | ||
372 | switch (INTEL_INFO(dev)->gen) { | ||
373 | case 6: | ||
374 | for (i = 0; i < 16; i++) | ||
375 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | ||
376 | break; | ||
377 | case 5: | ||
378 | case 4: | ||
379 | for (i = 0; i < 16; i++) | ||
380 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | ||
381 | break; | ||
382 | case 3: | ||
383 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
384 | for (i = 0; i < 8; i++) | ||
385 | dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | ||
386 | case 2: | ||
387 | for (i = 0; i < 8; i++) | ||
388 | dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | ||
389 | break; | ||
390 | } | ||
391 | |||
360 | return; | 392 | return; |
361 | } | 393 | } |
362 | 394 | ||
@@ -365,24 +397,47 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
365 | struct drm_i915_private *dev_priv = dev->dev_private; | 397 | struct drm_i915_private *dev_priv = dev->dev_private; |
366 | int dpll_a_reg, fpa0_reg, fpa1_reg; | 398 | int dpll_a_reg, fpa0_reg, fpa1_reg; |
367 | int dpll_b_reg, fpb0_reg, fpb1_reg; | 399 | int dpll_b_reg, fpb0_reg, fpb1_reg; |
400 | int i; | ||
368 | 401 | ||
369 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 402 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
370 | return; | 403 | return; |
371 | 404 | ||
405 | /* Fences */ | ||
406 | switch (INTEL_INFO(dev)->gen) { | ||
407 | case 6: | ||
408 | for (i = 0; i < 16; i++) | ||
409 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); | ||
410 | break; | ||
411 | case 5: | ||
412 | case 4: | ||
413 | for (i = 0; i < 16; i++) | ||
414 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); | ||
415 | break; | ||
416 | case 3: | ||
417 | case 2: | ||
418 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
419 | for (i = 0; i < 8; i++) | ||
420 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); | ||
421 | for (i = 0; i < 8; i++) | ||
422 | I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); | ||
423 | break; | ||
424 | } | ||
425 | |||
426 | |||
372 | if (HAS_PCH_SPLIT(dev)) { | 427 | if (HAS_PCH_SPLIT(dev)) { |
373 | dpll_a_reg = PCH_DPLL_A; | 428 | dpll_a_reg = _PCH_DPLL_A; |
374 | dpll_b_reg = PCH_DPLL_B; | 429 | dpll_b_reg = _PCH_DPLL_B; |
375 | fpa0_reg = PCH_FPA0; | 430 | fpa0_reg = _PCH_FPA0; |
376 | fpb0_reg = PCH_FPB0; | 431 | fpb0_reg = _PCH_FPB0; |
377 | fpa1_reg = PCH_FPA1; | 432 | fpa1_reg = _PCH_FPA1; |
378 | fpb1_reg = PCH_FPB1; | 433 | fpb1_reg = _PCH_FPB1; |
379 | } else { | 434 | } else { |
380 | dpll_a_reg = DPLL_A; | 435 | dpll_a_reg = _DPLL_A; |
381 | dpll_b_reg = DPLL_B; | 436 | dpll_b_reg = _DPLL_B; |
382 | fpa0_reg = FPA0; | 437 | fpa0_reg = _FPA0; |
383 | fpb0_reg = FPB0; | 438 | fpb0_reg = _FPB0; |
384 | fpa1_reg = FPA1; | 439 | fpa1_reg = _FPA1; |
385 | fpb1_reg = FPB1; | 440 | fpb1_reg = _FPB1; |
386 | } | 441 | } |
387 | 442 | ||
388 | if (HAS_PCH_SPLIT(dev)) { | 443 | if (HAS_PCH_SPLIT(dev)) { |
@@ -404,61 +459,61 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
404 | I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); | 459 | I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); |
405 | POSTING_READ(dpll_a_reg); | 460 | POSTING_READ(dpll_a_reg); |
406 | udelay(150); | 461 | udelay(150); |
407 | if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { | 462 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { |
408 | I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); | 463 | I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD); |
409 | POSTING_READ(DPLL_A_MD); | 464 | POSTING_READ(_DPLL_A_MD); |
410 | } | 465 | } |
411 | udelay(150); | 466 | udelay(150); |
412 | 467 | ||
413 | /* Restore mode */ | 468 | /* Restore mode */ |
414 | I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); | 469 | I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A); |
415 | I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); | 470 | I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A); |
416 | I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); | 471 | I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A); |
417 | I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); | 472 | I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A); |
418 | I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); | 473 | I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A); |
419 | I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); | 474 | I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A); |
420 | if (!HAS_PCH_SPLIT(dev)) | 475 | if (!HAS_PCH_SPLIT(dev)) |
421 | I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); | 476 | I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A); |
422 | 477 | ||
423 | if (HAS_PCH_SPLIT(dev)) { | 478 | if (HAS_PCH_SPLIT(dev)) { |
424 | I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); | 479 | I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); |
425 | I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); | 480 | I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); |
426 | I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); | 481 | I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); |
427 | I915_WRITE(PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1); | 482 | I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1); |
428 | 483 | ||
429 | I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); | 484 | I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); |
430 | I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); | 485 | I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); |
431 | 486 | ||
432 | I915_WRITE(PFA_CTL_1, dev_priv->savePFA_CTL_1); | 487 | I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1); |
433 | I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); | 488 | I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); |
434 | I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS); | 489 | I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS); |
435 | 490 | ||
436 | I915_WRITE(TRANSACONF, dev_priv->saveTRANSACONF); | 491 | I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF); |
437 | I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); | 492 | I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); |
438 | I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); | 493 | I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); |
439 | I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); | 494 | I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); |
440 | I915_WRITE(TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A); | 495 | I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A); |
441 | I915_WRITE(TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A); | 496 | I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A); |
442 | I915_WRITE(TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A); | 497 | I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A); |
443 | } | 498 | } |
444 | 499 | ||
445 | /* Restore plane info */ | 500 | /* Restore plane info */ |
446 | I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); | 501 | I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE); |
447 | I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); | 502 | I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS); |
448 | I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); | 503 | I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC); |
449 | I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); | 504 | I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR); |
450 | I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); | 505 | I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE); |
451 | if (IS_I965G(dev)) { | 506 | if (INTEL_INFO(dev)->gen >= 4) { |
452 | I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); | 507 | I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF); |
453 | I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); | 508 | I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF); |
454 | } | 509 | } |
455 | 510 | ||
456 | I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); | 511 | I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF); |
457 | 512 | ||
458 | i915_restore_palette(dev, PIPE_A); | 513 | i915_restore_palette(dev, PIPE_A); |
459 | /* Enable the plane */ | 514 | /* Enable the plane */ |
460 | I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); | 515 | I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR); |
461 | I915_WRITE(DSPAADDR, I915_READ(DSPAADDR)); | 516 | I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); |
462 | 517 | ||
463 | /* Pipe & plane B info */ | 518 | /* Pipe & plane B info */ |
464 | if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { | 519 | if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { |
@@ -473,66 +528,76 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
473 | I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); | 528 | I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); |
474 | POSTING_READ(dpll_b_reg); | 529 | POSTING_READ(dpll_b_reg); |
475 | udelay(150); | 530 | udelay(150); |
476 | if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { | 531 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { |
477 | I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); | 532 | I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD); |
478 | POSTING_READ(DPLL_B_MD); | 533 | POSTING_READ(_DPLL_B_MD); |
479 | } | 534 | } |
480 | udelay(150); | 535 | udelay(150); |
481 | 536 | ||
482 | /* Restore mode */ | 537 | /* Restore mode */ |
483 | I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); | 538 | I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B); |
484 | I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); | 539 | I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B); |
485 | I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); | 540 | I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B); |
486 | I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); | 541 | I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B); |
487 | I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); | 542 | I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B); |
488 | I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); | 543 | I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B); |
489 | if (!HAS_PCH_SPLIT(dev)) | 544 | if (!HAS_PCH_SPLIT(dev)) |
490 | I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); | 545 | I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B); |
491 | 546 | ||
492 | if (HAS_PCH_SPLIT(dev)) { | 547 | if (HAS_PCH_SPLIT(dev)) { |
493 | I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); | 548 | I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); |
494 | I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); | 549 | I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); |
495 | I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); | 550 | I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); |
496 | I915_WRITE(PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1); | 551 | I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1); |
497 | 552 | ||
498 | I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); | 553 | I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); |
499 | I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); | 554 | I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); |
500 | 555 | ||
501 | I915_WRITE(PFB_CTL_1, dev_priv->savePFB_CTL_1); | 556 | I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1); |
502 | I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); | 557 | I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); |
503 | I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS); | 558 | I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS); |
504 | 559 | ||
505 | I915_WRITE(TRANSBCONF, dev_priv->saveTRANSBCONF); | 560 | I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF); |
506 | I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); | 561 | I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); |
507 | I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); | 562 | I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); |
508 | I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); | 563 | I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); |
509 | I915_WRITE(TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B); | 564 | I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B); |
510 | I915_WRITE(TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B); | 565 | I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B); |
511 | I915_WRITE(TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B); | 566 | I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B); |
512 | } | 567 | } |
513 | 568 | ||
514 | /* Restore plane info */ | 569 | /* Restore plane info */ |
515 | I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); | 570 | I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE); |
516 | I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); | 571 | I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS); |
517 | I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); | 572 | I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC); |
518 | I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); | 573 | I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR); |
519 | I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); | 574 | I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); |
520 | if (IS_I965G(dev)) { | 575 | if (INTEL_INFO(dev)->gen >= 4) { |
521 | I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); | 576 | I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF); |
522 | I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); | 577 | I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); |
523 | } | 578 | } |
524 | 579 | ||
525 | I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); | 580 | I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF); |
526 | 581 | ||
527 | i915_restore_palette(dev, PIPE_B); | 582 | i915_restore_palette(dev, PIPE_B); |
528 | /* Enable the plane */ | 583 | /* Enable the plane */ |
529 | I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); | 584 | I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR); |
530 | I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); | 585 | I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); |
586 | |||
587 | /* Cursor state */ | ||
588 | I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS); | ||
589 | I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR); | ||
590 | I915_WRITE(_CURABASE, dev_priv->saveCURABASE); | ||
591 | I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS); | ||
592 | I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR); | ||
593 | I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE); | ||
594 | if (IS_GEN2(dev)) | ||
595 | I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); | ||
531 | 596 | ||
532 | return; | 597 | return; |
533 | } | 598 | } |
534 | 599 | ||
535 | void i915_save_display(struct drm_device *dev) | 600 | static void i915_save_display(struct drm_device *dev) |
536 | { | 601 | { |
537 | struct drm_i915_private *dev_priv = dev->dev_private; | 602 | struct drm_i915_private *dev_priv = dev->dev_private; |
538 | 603 | ||
@@ -543,16 +608,6 @@ void i915_save_display(struct drm_device *dev) | |||
543 | /* Don't save them in KMS mode */ | 608 | /* Don't save them in KMS mode */ |
544 | i915_save_modeset_reg(dev); | 609 | i915_save_modeset_reg(dev); |
545 | 610 | ||
546 | /* Cursor state */ | ||
547 | dev_priv->saveCURACNTR = I915_READ(CURACNTR); | ||
548 | dev_priv->saveCURAPOS = I915_READ(CURAPOS); | ||
549 | dev_priv->saveCURABASE = I915_READ(CURABASE); | ||
550 | dev_priv->saveCURBCNTR = I915_READ(CURBCNTR); | ||
551 | dev_priv->saveCURBPOS = I915_READ(CURBPOS); | ||
552 | dev_priv->saveCURBBASE = I915_READ(CURBBASE); | ||
553 | if (!IS_I9XX(dev)) | ||
554 | dev_priv->saveCURSIZE = I915_READ(CURSIZE); | ||
555 | |||
556 | /* CRT state */ | 611 | /* CRT state */ |
557 | if (HAS_PCH_SPLIT(dev)) { | 612 | if (HAS_PCH_SPLIT(dev)) { |
558 | dev_priv->saveADPA = I915_READ(PCH_ADPA); | 613 | dev_priv->saveADPA = I915_READ(PCH_ADPA); |
@@ -573,7 +628,7 @@ void i915_save_display(struct drm_device *dev) | |||
573 | dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); | 628 | dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); |
574 | dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); | 629 | dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); |
575 | dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); | 630 | dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); |
576 | if (IS_I965G(dev)) | 631 | if (INTEL_INFO(dev)->gen >= 4) |
577 | dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); | 632 | dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); |
578 | if (IS_MOBILE(dev) && !IS_I830(dev)) | 633 | if (IS_MOBILE(dev) && !IS_I830(dev)) |
579 | dev_priv->saveLVDS = I915_READ(LVDS); | 634 | dev_priv->saveLVDS = I915_READ(LVDS); |
@@ -597,14 +652,14 @@ void i915_save_display(struct drm_device *dev) | |||
597 | dev_priv->saveDP_B = I915_READ(DP_B); | 652 | dev_priv->saveDP_B = I915_READ(DP_B); |
598 | dev_priv->saveDP_C = I915_READ(DP_C); | 653 | dev_priv->saveDP_C = I915_READ(DP_C); |
599 | dev_priv->saveDP_D = I915_READ(DP_D); | 654 | dev_priv->saveDP_D = I915_READ(DP_D); |
600 | dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M); | 655 | dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); |
601 | dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M); | 656 | dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); |
602 | dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N); | 657 | dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); |
603 | dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N); | 658 | dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); |
604 | dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M); | 659 | dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); |
605 | dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M); | 660 | dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); |
606 | dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N); | 661 | dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); |
607 | dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N); | 662 | dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); |
608 | } | 663 | } |
609 | /* FIXME: save TV & SDVO state */ | 664 | /* FIXME: save TV & SDVO state */ |
610 | 665 | ||
@@ -634,7 +689,7 @@ void i915_save_display(struct drm_device *dev) | |||
634 | i915_save_vga(dev); | 689 | i915_save_vga(dev); |
635 | } | 690 | } |
636 | 691 | ||
637 | void i915_restore_display(struct drm_device *dev) | 692 | static void i915_restore_display(struct drm_device *dev) |
638 | { | 693 | { |
639 | struct drm_i915_private *dev_priv = dev->dev_private; | 694 | struct drm_i915_private *dev_priv = dev->dev_private; |
640 | 695 | ||
@@ -643,30 +698,20 @@ void i915_restore_display(struct drm_device *dev) | |||
643 | 698 | ||
644 | /* Display port ratios (must be done before clock is set) */ | 699 | /* Display port ratios (must be done before clock is set) */ |
645 | if (SUPPORTS_INTEGRATED_DP(dev)) { | 700 | if (SUPPORTS_INTEGRATED_DP(dev)) { |
646 | I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); | 701 | I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); |
647 | I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); | 702 | I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); |
648 | I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); | 703 | I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); |
649 | I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); | 704 | I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); |
650 | I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); | 705 | I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); |
651 | I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); | 706 | I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); |
652 | I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); | 707 | I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); |
653 | I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); | 708 | I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); |
654 | } | 709 | } |
655 | 710 | ||
656 | /* This is only meaningful in non-KMS mode */ | 711 | /* This is only meaningful in non-KMS mode */ |
657 | /* Don't restore them in KMS mode */ | 712 | /* Don't restore them in KMS mode */ |
658 | i915_restore_modeset_reg(dev); | 713 | i915_restore_modeset_reg(dev); |
659 | 714 | ||
660 | /* Cursor state */ | ||
661 | I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); | ||
662 | I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); | ||
663 | I915_WRITE(CURABASE, dev_priv->saveCURABASE); | ||
664 | I915_WRITE(CURBPOS, dev_priv->saveCURBPOS); | ||
665 | I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR); | ||
666 | I915_WRITE(CURBBASE, dev_priv->saveCURBBASE); | ||
667 | if (!IS_I9XX(dev)) | ||
668 | I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); | ||
669 | |||
670 | /* CRT state */ | 715 | /* CRT state */ |
671 | if (HAS_PCH_SPLIT(dev)) | 716 | if (HAS_PCH_SPLIT(dev)) |
672 | I915_WRITE(PCH_ADPA, dev_priv->saveADPA); | 717 | I915_WRITE(PCH_ADPA, dev_priv->saveADPA); |
@@ -674,7 +719,7 @@ void i915_restore_display(struct drm_device *dev) | |||
674 | I915_WRITE(ADPA, dev_priv->saveADPA); | 719 | I915_WRITE(ADPA, dev_priv->saveADPA); |
675 | 720 | ||
676 | /* LVDS state */ | 721 | /* LVDS state */ |
677 | if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) | 722 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) |
678 | I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); | 723 | I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); |
679 | 724 | ||
680 | if (HAS_PCH_SPLIT(dev)) { | 725 | if (HAS_PCH_SPLIT(dev)) { |
@@ -694,7 +739,7 @@ void i915_restore_display(struct drm_device *dev) | |||
694 | I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); | 739 | I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); |
695 | I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); | 740 | I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); |
696 | I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); | 741 | I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); |
697 | I915_WRITE(MCHBAR_RENDER_STANDBY, | 742 | I915_WRITE(RSTDBYCTL, |
698 | dev_priv->saveMCHBAR_RENDER_STANDBY); | 743 | dev_priv->saveMCHBAR_RENDER_STANDBY); |
699 | } else { | 744 | } else { |
700 | I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); | 745 | I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); |
@@ -735,6 +780,7 @@ void i915_restore_display(struct drm_device *dev) | |||
735 | I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); | 780 | I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); |
736 | else | 781 | else |
737 | I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); | 782 | I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); |
783 | |||
738 | I915_WRITE(VGA0, dev_priv->saveVGA0); | 784 | I915_WRITE(VGA0, dev_priv->saveVGA0); |
739 | I915_WRITE(VGA1, dev_priv->saveVGA1); | 785 | I915_WRITE(VGA1, dev_priv->saveVGA1); |
740 | I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); | 786 | I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); |
@@ -751,6 +797,8 @@ int i915_save_state(struct drm_device *dev) | |||
751 | 797 | ||
752 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); | 798 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); |
753 | 799 | ||
800 | mutex_lock(&dev->struct_mutex); | ||
801 | |||
754 | /* Hardware status page */ | 802 | /* Hardware status page */ |
755 | dev_priv->saveHWS = I915_READ(HWS_PGA); | 803 | dev_priv->saveHWS = I915_READ(HWS_PGA); |
756 | 804 | ||
@@ -762,17 +810,19 @@ int i915_save_state(struct drm_device *dev) | |||
762 | dev_priv->saveDEIMR = I915_READ(DEIMR); | 810 | dev_priv->saveDEIMR = I915_READ(DEIMR); |
763 | dev_priv->saveGTIER = I915_READ(GTIER); | 811 | dev_priv->saveGTIER = I915_READ(GTIER); |
764 | dev_priv->saveGTIMR = I915_READ(GTIMR); | 812 | dev_priv->saveGTIMR = I915_READ(GTIMR); |
765 | dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); | 813 | dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); |
766 | dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); | 814 | dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); |
767 | dev_priv->saveMCHBAR_RENDER_STANDBY = | 815 | dev_priv->saveMCHBAR_RENDER_STANDBY = |
768 | I915_READ(MCHBAR_RENDER_STANDBY); | 816 | I915_READ(RSTDBYCTL); |
769 | } else { | 817 | } else { |
770 | dev_priv->saveIER = I915_READ(IER); | 818 | dev_priv->saveIER = I915_READ(IER); |
771 | dev_priv->saveIMR = I915_READ(IMR); | 819 | dev_priv->saveIMR = I915_READ(IMR); |
772 | } | 820 | } |
773 | 821 | ||
774 | if (HAS_PCH_SPLIT(dev)) | 822 | if (IS_IRONLAKE_M(dev)) |
775 | ironlake_disable_drps(dev); | 823 | ironlake_disable_drps(dev); |
824 | if (IS_GEN6(dev)) | ||
825 | gen6_disable_rps(dev); | ||
776 | 826 | ||
777 | /* Cache mode state */ | 827 | /* Cache mode state */ |
778 | dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); | 828 | dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); |
@@ -788,27 +838,7 @@ int i915_save_state(struct drm_device *dev) | |||
788 | for (i = 0; i < 3; i++) | 838 | for (i = 0; i < 3; i++) |
789 | dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); | 839 | dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); |
790 | 840 | ||
791 | /* Fences */ | 841 | mutex_unlock(&dev->struct_mutex); |
792 | switch (INTEL_INFO(dev)->gen) { | ||
793 | case 6: | ||
794 | for (i = 0; i < 16; i++) | ||
795 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | ||
796 | break; | ||
797 | case 5: | ||
798 | case 4: | ||
799 | for (i = 0; i < 16; i++) | ||
800 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | ||
801 | break; | ||
802 | case 3: | ||
803 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
804 | for (i = 0; i < 8; i++) | ||
805 | dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | ||
806 | case 2: | ||
807 | for (i = 0; i < 8; i++) | ||
808 | dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | ||
809 | break; | ||
810 | |||
811 | } | ||
812 | 842 | ||
813 | return 0; | 843 | return 0; |
814 | } | 844 | } |
@@ -820,30 +850,11 @@ int i915_restore_state(struct drm_device *dev) | |||
820 | 850 | ||
821 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); | 851 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); |
822 | 852 | ||
853 | mutex_lock(&dev->struct_mutex); | ||
854 | |||
823 | /* Hardware status page */ | 855 | /* Hardware status page */ |
824 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); | 856 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); |
825 | 857 | ||
826 | /* Fences */ | ||
827 | switch (INTEL_INFO(dev)->gen) { | ||
828 | case 6: | ||
829 | for (i = 0; i < 16; i++) | ||
830 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); | ||
831 | break; | ||
832 | case 5: | ||
833 | case 4: | ||
834 | for (i = 0; i < 16; i++) | ||
835 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); | ||
836 | break; | ||
837 | case 3: | ||
838 | case 2: | ||
839 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
840 | for (i = 0; i < 8; i++) | ||
841 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); | ||
842 | for (i = 0; i < 8; i++) | ||
843 | I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); | ||
844 | break; | ||
845 | } | ||
846 | |||
847 | i915_restore_display(dev); | 858 | i915_restore_display(dev); |
848 | 859 | ||
849 | /* Interrupt state */ | 860 | /* Interrupt state */ |
@@ -852,18 +863,25 @@ int i915_restore_state(struct drm_device *dev) | |||
852 | I915_WRITE(DEIMR, dev_priv->saveDEIMR); | 863 | I915_WRITE(DEIMR, dev_priv->saveDEIMR); |
853 | I915_WRITE(GTIER, dev_priv->saveGTIER); | 864 | I915_WRITE(GTIER, dev_priv->saveGTIER); |
854 | I915_WRITE(GTIMR, dev_priv->saveGTIMR); | 865 | I915_WRITE(GTIMR, dev_priv->saveGTIMR); |
855 | I915_WRITE(FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); | 866 | I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); |
856 | I915_WRITE(FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); | 867 | I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); |
857 | } else { | 868 | } else { |
858 | I915_WRITE (IER, dev_priv->saveIER); | 869 | I915_WRITE(IER, dev_priv->saveIER); |
859 | I915_WRITE (IMR, dev_priv->saveIMR); | 870 | I915_WRITE(IMR, dev_priv->saveIMR); |
860 | } | 871 | } |
872 | mutex_unlock(&dev->struct_mutex); | ||
861 | 873 | ||
862 | /* Clock gating state */ | ||
863 | intel_init_clock_gating(dev); | 874 | intel_init_clock_gating(dev); |
864 | 875 | ||
865 | if (HAS_PCH_SPLIT(dev)) | 876 | if (IS_IRONLAKE_M(dev)) { |
866 | ironlake_enable_drps(dev); | 877 | ironlake_enable_drps(dev); |
878 | intel_init_emon(dev); | ||
879 | } | ||
880 | |||
881 | if (IS_GEN6(dev)) | ||
882 | gen6_enable_rps(dev_priv); | ||
883 | |||
884 | mutex_lock(&dev->struct_mutex); | ||
867 | 885 | ||
868 | /* Cache mode state */ | 886 | /* Cache mode state */ |
869 | I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); | 887 | I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); |
@@ -878,9 +896,9 @@ int i915_restore_state(struct drm_device *dev) | |||
878 | for (i = 0; i < 3; i++) | 896 | for (i = 0; i < 3; i++) |
879 | I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); | 897 | I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); |
880 | 898 | ||
881 | /* I2C state */ | 899 | mutex_unlock(&dev->struct_mutex); |
882 | intel_i2c_reset_gmbus(dev); | 900 | |
901 | intel_i2c_reset(dev); | ||
883 | 902 | ||
884 | return 0; | 903 | return 0; |
885 | } | 904 | } |
886 | |||
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index fea97a21cc14..d623fefbfaca 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -6,6 +6,8 @@ | |||
6 | #include <linux/tracepoint.h> | 6 | #include <linux/tracepoint.h> |
7 | 7 | ||
8 | #include <drm/drmP.h> | 8 | #include <drm/drmP.h> |
9 | #include "i915_drv.h" | ||
10 | #include "intel_ringbuffer.h" | ||
9 | 11 | ||
10 | #undef TRACE_SYSTEM | 12 | #undef TRACE_SYSTEM |
11 | #define TRACE_SYSTEM i915 | 13 | #define TRACE_SYSTEM i915 |
@@ -15,97 +17,160 @@ | |||
15 | /* object tracking */ | 17 | /* object tracking */ |
16 | 18 | ||
17 | TRACE_EVENT(i915_gem_object_create, | 19 | TRACE_EVENT(i915_gem_object_create, |
18 | 20 | TP_PROTO(struct drm_i915_gem_object *obj), | |
19 | TP_PROTO(struct drm_gem_object *obj), | ||
20 | |||
21 | TP_ARGS(obj), | 21 | TP_ARGS(obj), |
22 | 22 | ||
23 | TP_STRUCT__entry( | 23 | TP_STRUCT__entry( |
24 | __field(struct drm_gem_object *, obj) | 24 | __field(struct drm_i915_gem_object *, obj) |
25 | __field(u32, size) | 25 | __field(u32, size) |
26 | ), | 26 | ), |
27 | 27 | ||
28 | TP_fast_assign( | 28 | TP_fast_assign( |
29 | __entry->obj = obj; | 29 | __entry->obj = obj; |
30 | __entry->size = obj->size; | 30 | __entry->size = obj->base.size; |
31 | ), | 31 | ), |
32 | 32 | ||
33 | TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) | 33 | TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) |
34 | ); | 34 | ); |
35 | 35 | ||
36 | TRACE_EVENT(i915_gem_object_bind, | 36 | TRACE_EVENT(i915_gem_object_bind, |
37 | TP_PROTO(struct drm_i915_gem_object *obj, bool mappable), | ||
38 | TP_ARGS(obj, mappable), | ||
37 | 39 | ||
38 | TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset), | 40 | TP_STRUCT__entry( |
41 | __field(struct drm_i915_gem_object *, obj) | ||
42 | __field(u32, offset) | ||
43 | __field(u32, size) | ||
44 | __field(bool, mappable) | ||
45 | ), | ||
39 | 46 | ||
40 | TP_ARGS(obj, gtt_offset), | 47 | TP_fast_assign( |
48 | __entry->obj = obj; | ||
49 | __entry->offset = obj->gtt_space->start; | ||
50 | __entry->size = obj->gtt_space->size; | ||
51 | __entry->mappable = mappable; | ||
52 | ), | ||
53 | |||
54 | TP_printk("obj=%p, offset=%08x size=%x%s", | ||
55 | __entry->obj, __entry->offset, __entry->size, | ||
56 | __entry->mappable ? ", mappable" : "") | ||
57 | ); | ||
58 | |||
59 | TRACE_EVENT(i915_gem_object_unbind, | ||
60 | TP_PROTO(struct drm_i915_gem_object *obj), | ||
61 | TP_ARGS(obj), | ||
41 | 62 | ||
42 | TP_STRUCT__entry( | 63 | TP_STRUCT__entry( |
43 | __field(struct drm_gem_object *, obj) | 64 | __field(struct drm_i915_gem_object *, obj) |
44 | __field(u32, gtt_offset) | 65 | __field(u32, offset) |
66 | __field(u32, size) | ||
45 | ), | 67 | ), |
46 | 68 | ||
47 | TP_fast_assign( | 69 | TP_fast_assign( |
48 | __entry->obj = obj; | 70 | __entry->obj = obj; |
49 | __entry->gtt_offset = gtt_offset; | 71 | __entry->offset = obj->gtt_space->start; |
72 | __entry->size = obj->gtt_space->size; | ||
50 | ), | 73 | ), |
51 | 74 | ||
52 | TP_printk("obj=%p, gtt_offset=%08x", | 75 | TP_printk("obj=%p, offset=%08x size=%x", |
53 | __entry->obj, __entry->gtt_offset) | 76 | __entry->obj, __entry->offset, __entry->size) |
54 | ); | 77 | ); |
55 | 78 | ||
56 | TRACE_EVENT(i915_gem_object_change_domain, | 79 | TRACE_EVENT(i915_gem_object_change_domain, |
57 | 80 | TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write), | |
58 | TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), | 81 | TP_ARGS(obj, old_read, old_write), |
59 | |||
60 | TP_ARGS(obj, old_read_domains, old_write_domain), | ||
61 | 82 | ||
62 | TP_STRUCT__entry( | 83 | TP_STRUCT__entry( |
63 | __field(struct drm_gem_object *, obj) | 84 | __field(struct drm_i915_gem_object *, obj) |
64 | __field(u32, read_domains) | 85 | __field(u32, read_domains) |
65 | __field(u32, write_domain) | 86 | __field(u32, write_domain) |
66 | ), | 87 | ), |
67 | 88 | ||
68 | TP_fast_assign( | 89 | TP_fast_assign( |
69 | __entry->obj = obj; | 90 | __entry->obj = obj; |
70 | __entry->read_domains = obj->read_domains | (old_read_domains << 16); | 91 | __entry->read_domains = obj->base.read_domains | (old_read << 16); |
71 | __entry->write_domain = obj->write_domain | (old_write_domain << 16); | 92 | __entry->write_domain = obj->base.write_domain | (old_write << 16); |
72 | ), | 93 | ), |
73 | 94 | ||
74 | TP_printk("obj=%p, read=%04x, write=%04x", | 95 | TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x", |
75 | __entry->obj, | 96 | __entry->obj, |
76 | __entry->read_domains, __entry->write_domain) | 97 | __entry->read_domains >> 16, |
98 | __entry->read_domains & 0xffff, | ||
99 | __entry->write_domain >> 16, | ||
100 | __entry->write_domain & 0xffff) | ||
77 | ); | 101 | ); |
78 | 102 | ||
79 | TRACE_EVENT(i915_gem_object_get_fence, | 103 | TRACE_EVENT(i915_gem_object_pwrite, |
104 | TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len), | ||
105 | TP_ARGS(obj, offset, len), | ||
80 | 106 | ||
81 | TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode), | 107 | TP_STRUCT__entry( |
108 | __field(struct drm_i915_gem_object *, obj) | ||
109 | __field(u32, offset) | ||
110 | __field(u32, len) | ||
111 | ), | ||
82 | 112 | ||
83 | TP_ARGS(obj, fence, tiling_mode), | 113 | TP_fast_assign( |
114 | __entry->obj = obj; | ||
115 | __entry->offset = offset; | ||
116 | __entry->len = len; | ||
117 | ), | ||
118 | |||
119 | TP_printk("obj=%p, offset=%u, len=%u", | ||
120 | __entry->obj, __entry->offset, __entry->len) | ||
121 | ); | ||
122 | |||
123 | TRACE_EVENT(i915_gem_object_pread, | ||
124 | TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len), | ||
125 | TP_ARGS(obj, offset, len), | ||
84 | 126 | ||
85 | TP_STRUCT__entry( | 127 | TP_STRUCT__entry( |
86 | __field(struct drm_gem_object *, obj) | 128 | __field(struct drm_i915_gem_object *, obj) |
87 | __field(int, fence) | 129 | __field(u32, offset) |
88 | __field(int, tiling_mode) | 130 | __field(u32, len) |
89 | ), | 131 | ), |
90 | 132 | ||
91 | TP_fast_assign( | 133 | TP_fast_assign( |
92 | __entry->obj = obj; | 134 | __entry->obj = obj; |
93 | __entry->fence = fence; | 135 | __entry->offset = offset; |
94 | __entry->tiling_mode = tiling_mode; | 136 | __entry->len = len; |
95 | ), | 137 | ), |
96 | 138 | ||
97 | TP_printk("obj=%p, fence=%d, tiling=%d", | 139 | TP_printk("obj=%p, offset=%u, len=%u", |
98 | __entry->obj, __entry->fence, __entry->tiling_mode) | 140 | __entry->obj, __entry->offset, __entry->len) |
99 | ); | 141 | ); |
100 | 142 | ||
101 | DECLARE_EVENT_CLASS(i915_gem_object, | 143 | TRACE_EVENT(i915_gem_object_fault, |
144 | TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write), | ||
145 | TP_ARGS(obj, index, gtt, write), | ||
146 | |||
147 | TP_STRUCT__entry( | ||
148 | __field(struct drm_i915_gem_object *, obj) | ||
149 | __field(u32, index) | ||
150 | __field(bool, gtt) | ||
151 | __field(bool, write) | ||
152 | ), | ||
153 | |||
154 | TP_fast_assign( | ||
155 | __entry->obj = obj; | ||
156 | __entry->index = index; | ||
157 | __entry->gtt = gtt; | ||
158 | __entry->write = write; | ||
159 | ), | ||
102 | 160 | ||
103 | TP_PROTO(struct drm_gem_object *obj), | 161 | TP_printk("obj=%p, %s index=%u %s", |
162 | __entry->obj, | ||
163 | __entry->gtt ? "GTT" : "CPU", | ||
164 | __entry->index, | ||
165 | __entry->write ? ", writable" : "") | ||
166 | ); | ||
104 | 167 | ||
168 | DECLARE_EVENT_CLASS(i915_gem_object, | ||
169 | TP_PROTO(struct drm_i915_gem_object *obj), | ||
105 | TP_ARGS(obj), | 170 | TP_ARGS(obj), |
106 | 171 | ||
107 | TP_STRUCT__entry( | 172 | TP_STRUCT__entry( |
108 | __field(struct drm_gem_object *, obj) | 173 | __field(struct drm_i915_gem_object *, obj) |
109 | ), | 174 | ), |
110 | 175 | ||
111 | TP_fast_assign( | 176 | TP_fast_assign( |
@@ -116,160 +181,181 @@ DECLARE_EVENT_CLASS(i915_gem_object, | |||
116 | ); | 181 | ); |
117 | 182 | ||
118 | DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, | 183 | DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, |
119 | 184 | TP_PROTO(struct drm_i915_gem_object *obj), | |
120 | TP_PROTO(struct drm_gem_object *obj), | 185 | TP_ARGS(obj) |
121 | |||
122 | TP_ARGS(obj) | ||
123 | ); | 186 | ); |
124 | 187 | ||
125 | DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, | 188 | DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, |
126 | 189 | TP_PROTO(struct drm_i915_gem_object *obj), | |
127 | TP_PROTO(struct drm_gem_object *obj), | ||
128 | |||
129 | TP_ARGS(obj) | 190 | TP_ARGS(obj) |
130 | ); | 191 | ); |
131 | 192 | ||
132 | DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, | 193 | TRACE_EVENT(i915_gem_evict, |
194 | TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable), | ||
195 | TP_ARGS(dev, size, align, mappable), | ||
133 | 196 | ||
134 | TP_PROTO(struct drm_gem_object *obj), | 197 | TP_STRUCT__entry( |
198 | __field(u32, dev) | ||
199 | __field(u32, size) | ||
200 | __field(u32, align) | ||
201 | __field(bool, mappable) | ||
202 | ), | ||
135 | 203 | ||
136 | TP_ARGS(obj) | 204 | TP_fast_assign( |
205 | __entry->dev = dev->primary->index; | ||
206 | __entry->size = size; | ||
207 | __entry->align = align; | ||
208 | __entry->mappable = mappable; | ||
209 | ), | ||
210 | |||
211 | TP_printk("dev=%d, size=%d, align=%d %s", | ||
212 | __entry->dev, __entry->size, __entry->align, | ||
213 | __entry->mappable ? ", mappable" : "") | ||
137 | ); | 214 | ); |
138 | 215 | ||
139 | /* batch tracing */ | 216 | TRACE_EVENT(i915_gem_evict_everything, |
217 | TP_PROTO(struct drm_device *dev, bool purgeable), | ||
218 | TP_ARGS(dev, purgeable), | ||
140 | 219 | ||
141 | TRACE_EVENT(i915_gem_request_submit, | 220 | TP_STRUCT__entry( |
221 | __field(u32, dev) | ||
222 | __field(bool, purgeable) | ||
223 | ), | ||
224 | |||
225 | TP_fast_assign( | ||
226 | __entry->dev = dev->primary->index; | ||
227 | __entry->purgeable = purgeable; | ||
228 | ), | ||
142 | 229 | ||
143 | TP_PROTO(struct drm_device *dev, u32 seqno), | 230 | TP_printk("dev=%d%s", |
231 | __entry->dev, | ||
232 | __entry->purgeable ? ", purgeable only" : "") | ||
233 | ); | ||
144 | 234 | ||
145 | TP_ARGS(dev, seqno), | 235 | TRACE_EVENT(i915_gem_ring_dispatch, |
236 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | ||
237 | TP_ARGS(ring, seqno), | ||
146 | 238 | ||
147 | TP_STRUCT__entry( | 239 | TP_STRUCT__entry( |
148 | __field(u32, dev) | 240 | __field(u32, dev) |
241 | __field(u32, ring) | ||
149 | __field(u32, seqno) | 242 | __field(u32, seqno) |
150 | ), | 243 | ), |
151 | 244 | ||
152 | TP_fast_assign( | 245 | TP_fast_assign( |
153 | __entry->dev = dev->primary->index; | 246 | __entry->dev = ring->dev->primary->index; |
247 | __entry->ring = ring->id; | ||
154 | __entry->seqno = seqno; | 248 | __entry->seqno = seqno; |
155 | i915_trace_irq_get(dev, seqno); | 249 | i915_trace_irq_get(ring, seqno); |
156 | ), | 250 | ), |
157 | 251 | ||
158 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) | 252 | TP_printk("dev=%u, ring=%u, seqno=%u", |
253 | __entry->dev, __entry->ring, __entry->seqno) | ||
159 | ); | 254 | ); |
160 | 255 | ||
161 | TRACE_EVENT(i915_gem_request_flush, | 256 | TRACE_EVENT(i915_gem_ring_flush, |
162 | 257 | TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush), | |
163 | TP_PROTO(struct drm_device *dev, u32 seqno, | 258 | TP_ARGS(ring, invalidate, flush), |
164 | u32 flush_domains, u32 invalidate_domains), | ||
165 | |||
166 | TP_ARGS(dev, seqno, flush_domains, invalidate_domains), | ||
167 | 259 | ||
168 | TP_STRUCT__entry( | 260 | TP_STRUCT__entry( |
169 | __field(u32, dev) | 261 | __field(u32, dev) |
170 | __field(u32, seqno) | 262 | __field(u32, ring) |
171 | __field(u32, flush_domains) | 263 | __field(u32, invalidate) |
172 | __field(u32, invalidate_domains) | 264 | __field(u32, flush) |
173 | ), | 265 | ), |
174 | 266 | ||
175 | TP_fast_assign( | 267 | TP_fast_assign( |
176 | __entry->dev = dev->primary->index; | 268 | __entry->dev = ring->dev->primary->index; |
177 | __entry->seqno = seqno; | 269 | __entry->ring = ring->id; |
178 | __entry->flush_domains = flush_domains; | 270 | __entry->invalidate = invalidate; |
179 | __entry->invalidate_domains = invalidate_domains; | 271 | __entry->flush = flush; |
180 | ), | 272 | ), |
181 | 273 | ||
182 | TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x", | 274 | TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x", |
183 | __entry->dev, __entry->seqno, | 275 | __entry->dev, __entry->ring, |
184 | __entry->flush_domains, __entry->invalidate_domains) | 276 | __entry->invalidate, __entry->flush) |
185 | ); | 277 | ); |
186 | 278 | ||
187 | DECLARE_EVENT_CLASS(i915_gem_request, | 279 | DECLARE_EVENT_CLASS(i915_gem_request, |
188 | 280 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | |
189 | TP_PROTO(struct drm_device *dev, u32 seqno), | 281 | TP_ARGS(ring, seqno), |
190 | |||
191 | TP_ARGS(dev, seqno), | ||
192 | 282 | ||
193 | TP_STRUCT__entry( | 283 | TP_STRUCT__entry( |
194 | __field(u32, dev) | 284 | __field(u32, dev) |
285 | __field(u32, ring) | ||
195 | __field(u32, seqno) | 286 | __field(u32, seqno) |
196 | ), | 287 | ), |
197 | 288 | ||
198 | TP_fast_assign( | 289 | TP_fast_assign( |
199 | __entry->dev = dev->primary->index; | 290 | __entry->dev = ring->dev->primary->index; |
291 | __entry->ring = ring->id; | ||
200 | __entry->seqno = seqno; | 292 | __entry->seqno = seqno; |
201 | ), | 293 | ), |
202 | 294 | ||
203 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) | 295 | TP_printk("dev=%u, ring=%u, seqno=%u", |
296 | __entry->dev, __entry->ring, __entry->seqno) | ||
204 | ); | 297 | ); |
205 | 298 | ||
206 | DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, | 299 | DEFINE_EVENT(i915_gem_request, i915_gem_request_add, |
207 | 300 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | |
208 | TP_PROTO(struct drm_device *dev, u32 seqno), | 301 | TP_ARGS(ring, seqno) |
302 | ); | ||
209 | 303 | ||
210 | TP_ARGS(dev, seqno) | 304 | DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, |
305 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | ||
306 | TP_ARGS(ring, seqno) | ||
211 | ); | 307 | ); |
212 | 308 | ||
213 | DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, | 309 | DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, |
214 | 310 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | |
215 | TP_PROTO(struct drm_device *dev, u32 seqno), | 311 | TP_ARGS(ring, seqno) |
216 | |||
217 | TP_ARGS(dev, seqno) | ||
218 | ); | 312 | ); |
219 | 313 | ||
220 | DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin, | 314 | DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin, |
221 | 315 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | |
222 | TP_PROTO(struct drm_device *dev, u32 seqno), | 316 | TP_ARGS(ring, seqno) |
223 | |||
224 | TP_ARGS(dev, seqno) | ||
225 | ); | 317 | ); |
226 | 318 | ||
227 | DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, | 319 | DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, |
228 | 320 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | |
229 | TP_PROTO(struct drm_device *dev, u32 seqno), | 321 | TP_ARGS(ring, seqno) |
230 | |||
231 | TP_ARGS(dev, seqno) | ||
232 | ); | 322 | ); |
233 | 323 | ||
234 | DECLARE_EVENT_CLASS(i915_ring, | 324 | DECLARE_EVENT_CLASS(i915_ring, |
235 | 325 | TP_PROTO(struct intel_ring_buffer *ring), | |
236 | TP_PROTO(struct drm_device *dev), | 326 | TP_ARGS(ring), |
237 | |||
238 | TP_ARGS(dev), | ||
239 | 327 | ||
240 | TP_STRUCT__entry( | 328 | TP_STRUCT__entry( |
241 | __field(u32, dev) | 329 | __field(u32, dev) |
330 | __field(u32, ring) | ||
242 | ), | 331 | ), |
243 | 332 | ||
244 | TP_fast_assign( | 333 | TP_fast_assign( |
245 | __entry->dev = dev->primary->index; | 334 | __entry->dev = ring->dev->primary->index; |
335 | __entry->ring = ring->id; | ||
246 | ), | 336 | ), |
247 | 337 | ||
248 | TP_printk("dev=%u", __entry->dev) | 338 | TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring) |
249 | ); | 339 | ); |
250 | 340 | ||
251 | DEFINE_EVENT(i915_ring, i915_ring_wait_begin, | 341 | DEFINE_EVENT(i915_ring, i915_ring_wait_begin, |
252 | 342 | TP_PROTO(struct intel_ring_buffer *ring), | |
253 | TP_PROTO(struct drm_device *dev), | 343 | TP_ARGS(ring) |
254 | |||
255 | TP_ARGS(dev) | ||
256 | ); | 344 | ); |
257 | 345 | ||
258 | DEFINE_EVENT(i915_ring, i915_ring_wait_end, | 346 | DEFINE_EVENT(i915_ring, i915_ring_wait_end, |
259 | 347 | TP_PROTO(struct intel_ring_buffer *ring), | |
260 | TP_PROTO(struct drm_device *dev), | 348 | TP_ARGS(ring) |
261 | |||
262 | TP_ARGS(dev) | ||
263 | ); | 349 | ); |
264 | 350 | ||
265 | TRACE_EVENT(i915_flip_request, | 351 | TRACE_EVENT(i915_flip_request, |
266 | TP_PROTO(int plane, struct drm_gem_object *obj), | 352 | TP_PROTO(int plane, struct drm_i915_gem_object *obj), |
267 | 353 | ||
268 | TP_ARGS(plane, obj), | 354 | TP_ARGS(plane, obj), |
269 | 355 | ||
270 | TP_STRUCT__entry( | 356 | TP_STRUCT__entry( |
271 | __field(int, plane) | 357 | __field(int, plane) |
272 | __field(struct drm_gem_object *, obj) | 358 | __field(struct drm_i915_gem_object *, obj) |
273 | ), | 359 | ), |
274 | 360 | ||
275 | TP_fast_assign( | 361 | TP_fast_assign( |
@@ -281,13 +367,13 @@ TRACE_EVENT(i915_flip_request, | |||
281 | ); | 367 | ); |
282 | 368 | ||
283 | TRACE_EVENT(i915_flip_complete, | 369 | TRACE_EVENT(i915_flip_complete, |
284 | TP_PROTO(int plane, struct drm_gem_object *obj), | 370 | TP_PROTO(int plane, struct drm_i915_gem_object *obj), |
285 | 371 | ||
286 | TP_ARGS(plane, obj), | 372 | TP_ARGS(plane, obj), |
287 | 373 | ||
288 | TP_STRUCT__entry( | 374 | TP_STRUCT__entry( |
289 | __field(int, plane) | 375 | __field(int, plane) |
290 | __field(struct drm_gem_object *, obj) | 376 | __field(struct drm_i915_gem_object *, obj) |
291 | ), | 377 | ), |
292 | 378 | ||
293 | TP_fast_assign( | 379 | TP_fast_assign( |
@@ -298,6 +384,32 @@ TRACE_EVENT(i915_flip_complete, | |||
298 | TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) | 384 | TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) |
299 | ); | 385 | ); |
300 | 386 | ||
387 | TRACE_EVENT(i915_reg_rw, | ||
388 | TP_PROTO(bool write, u32 reg, u64 val, int len), | ||
389 | |||
390 | TP_ARGS(write, reg, val, len), | ||
391 | |||
392 | TP_STRUCT__entry( | ||
393 | __field(u64, val) | ||
394 | __field(u32, reg) | ||
395 | __field(u16, write) | ||
396 | __field(u16, len) | ||
397 | ), | ||
398 | |||
399 | TP_fast_assign( | ||
400 | __entry->val = (u64)val; | ||
401 | __entry->reg = reg; | ||
402 | __entry->write = write; | ||
403 | __entry->len = len; | ||
404 | ), | ||
405 | |||
406 | TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)", | ||
407 | __entry->write ? "write" : "read", | ||
408 | __entry->reg, __entry->len, | ||
409 | (u32)(__entry->val & 0xffffffff), | ||
410 | (u32)(__entry->val >> 32)) | ||
411 | ); | ||
412 | |||
301 | #endif /* _I915_TRACE_H_ */ | 413 | #endif /* _I915_TRACE_H_ */ |
302 | 414 | ||
303 | /* This part must be outside protection */ | 415 | /* This part must be outside protection */ |
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c new file mode 100644 index 000000000000..2cb8e0b9f1ee --- /dev/null +++ b/drivers/gpu/drm/i915/intel_acpi.c | |||
@@ -0,0 +1,252 @@ | |||
1 | /* | ||
2 | * Intel ACPI functions | ||
3 | * | ||
4 | * _DSM related code stolen from nouveau_acpi.c. | ||
5 | */ | ||
6 | #include <linux/pci.h> | ||
7 | #include <linux/acpi.h> | ||
8 | #include <linux/vga_switcheroo.h> | ||
9 | #include <acpi/acpi_drivers.h> | ||
10 | |||
11 | #include "drmP.h" | ||
12 | |||
13 | #define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ | ||
14 | |||
15 | #define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */ | ||
16 | #define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */ | ||
17 | |||
18 | static struct intel_dsm_priv { | ||
19 | acpi_handle dhandle; | ||
20 | } intel_dsm_priv; | ||
21 | |||
22 | static const u8 intel_dsm_guid[] = { | ||
23 | 0xd3, 0x73, 0xd8, 0x7e, | ||
24 | 0xd0, 0xc2, | ||
25 | 0x4f, 0x4e, | ||
26 | 0xa8, 0x54, | ||
27 | 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c | ||
28 | }; | ||
29 | |||
30 | static int intel_dsm(acpi_handle handle, int func, int arg) | ||
31 | { | ||
32 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
33 | struct acpi_object_list input; | ||
34 | union acpi_object params[4]; | ||
35 | union acpi_object *obj; | ||
36 | u32 result; | ||
37 | int ret = 0; | ||
38 | |||
39 | input.count = 4; | ||
40 | input.pointer = params; | ||
41 | params[0].type = ACPI_TYPE_BUFFER; | ||
42 | params[0].buffer.length = sizeof(intel_dsm_guid); | ||
43 | params[0].buffer.pointer = (char *)intel_dsm_guid; | ||
44 | params[1].type = ACPI_TYPE_INTEGER; | ||
45 | params[1].integer.value = INTEL_DSM_REVISION_ID; | ||
46 | params[2].type = ACPI_TYPE_INTEGER; | ||
47 | params[2].integer.value = func; | ||
48 | params[3].type = ACPI_TYPE_INTEGER; | ||
49 | params[3].integer.value = arg; | ||
50 | |||
51 | ret = acpi_evaluate_object(handle, "_DSM", &input, &output); | ||
52 | if (ret) { | ||
53 | DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret); | ||
54 | return ret; | ||
55 | } | ||
56 | |||
57 | obj = (union acpi_object *)output.pointer; | ||
58 | |||
59 | result = 0; | ||
60 | switch (obj->type) { | ||
61 | case ACPI_TYPE_INTEGER: | ||
62 | result = obj->integer.value; | ||
63 | break; | ||
64 | |||
65 | case ACPI_TYPE_BUFFER: | ||
66 | if (obj->buffer.length == 4) { | ||
67 | result =(obj->buffer.pointer[0] | | ||
68 | (obj->buffer.pointer[1] << 8) | | ||
69 | (obj->buffer.pointer[2] << 16) | | ||
70 | (obj->buffer.pointer[3] << 24)); | ||
71 | break; | ||
72 | } | ||
73 | default: | ||
74 | ret = -EINVAL; | ||
75 | break; | ||
76 | } | ||
77 | if (result == 0x80000002) | ||
78 | ret = -ENODEV; | ||
79 | |||
80 | kfree(output.pointer); | ||
81 | return ret; | ||
82 | } | ||
83 | |||
84 | static char *intel_dsm_port_name(u8 id) | ||
85 | { | ||
86 | switch (id) { | ||
87 | case 0: | ||
88 | return "Reserved"; | ||
89 | case 1: | ||
90 | return "Analog VGA"; | ||
91 | case 2: | ||
92 | return "LVDS"; | ||
93 | case 3: | ||
94 | return "Reserved"; | ||
95 | case 4: | ||
96 | return "HDMI/DVI_B"; | ||
97 | case 5: | ||
98 | return "HDMI/DVI_C"; | ||
99 | case 6: | ||
100 | return "HDMI/DVI_D"; | ||
101 | case 7: | ||
102 | return "DisplayPort_A"; | ||
103 | case 8: | ||
104 | return "DisplayPort_B"; | ||
105 | case 9: | ||
106 | return "DisplayPort_C"; | ||
107 | case 0xa: | ||
108 | return "DisplayPort_D"; | ||
109 | case 0xb: | ||
110 | case 0xc: | ||
111 | case 0xd: | ||
112 | return "Reserved"; | ||
113 | case 0xe: | ||
114 | return "WiDi"; | ||
115 | default: | ||
116 | return "bad type"; | ||
117 | } | ||
118 | } | ||
119 | |||
120 | static char *intel_dsm_mux_type(u8 type) | ||
121 | { | ||
122 | switch (type) { | ||
123 | case 0: | ||
124 | return "unknown"; | ||
125 | case 1: | ||
126 | return "No MUX, iGPU only"; | ||
127 | case 2: | ||
128 | return "No MUX, dGPU only"; | ||
129 | case 3: | ||
130 | return "MUXed between iGPU and dGPU"; | ||
131 | default: | ||
132 | return "bad type"; | ||
133 | } | ||
134 | } | ||
135 | |||
136 | static void intel_dsm_platform_mux_info(void) | ||
137 | { | ||
138 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
139 | struct acpi_object_list input; | ||
140 | union acpi_object params[4]; | ||
141 | union acpi_object *pkg; | ||
142 | int i, ret; | ||
143 | |||
144 | input.count = 4; | ||
145 | input.pointer = params; | ||
146 | params[0].type = ACPI_TYPE_BUFFER; | ||
147 | params[0].buffer.length = sizeof(intel_dsm_guid); | ||
148 | params[0].buffer.pointer = (char *)intel_dsm_guid; | ||
149 | params[1].type = ACPI_TYPE_INTEGER; | ||
150 | params[1].integer.value = INTEL_DSM_REVISION_ID; | ||
151 | params[2].type = ACPI_TYPE_INTEGER; | ||
152 | params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO; | ||
153 | params[3].type = ACPI_TYPE_INTEGER; | ||
154 | params[3].integer.value = 0; | ||
155 | |||
156 | ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input, | ||
157 | &output); | ||
158 | if (ret) { | ||
159 | DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret); | ||
160 | goto out; | ||
161 | } | ||
162 | |||
163 | pkg = (union acpi_object *)output.pointer; | ||
164 | |||
165 | if (pkg->type == ACPI_TYPE_PACKAGE) { | ||
166 | union acpi_object *connector_count = &pkg->package.elements[0]; | ||
167 | DRM_DEBUG_DRIVER("MUX info connectors: %lld\n", | ||
168 | (unsigned long long)connector_count->integer.value); | ||
169 | for (i = 1; i < pkg->package.count; i++) { | ||
170 | union acpi_object *obj = &pkg->package.elements[i]; | ||
171 | union acpi_object *connector_id = | ||
172 | &obj->package.elements[0]; | ||
173 | union acpi_object *info = &obj->package.elements[1]; | ||
174 | DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n", | ||
175 | (unsigned long long)connector_id->integer.value); | ||
176 | DRM_DEBUG_DRIVER(" port id: %s\n", | ||
177 | intel_dsm_port_name(info->buffer.pointer[0])); | ||
178 | DRM_DEBUG_DRIVER(" display mux info: %s\n", | ||
179 | intel_dsm_mux_type(info->buffer.pointer[1])); | ||
180 | DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n", | ||
181 | intel_dsm_mux_type(info->buffer.pointer[2])); | ||
182 | DRM_DEBUG_DRIVER(" hpd mux info: %s\n", | ||
183 | intel_dsm_mux_type(info->buffer.pointer[3])); | ||
184 | } | ||
185 | } else { | ||
186 | DRM_ERROR("MUX INFO call failed\n"); | ||
187 | } | ||
188 | |||
189 | out: | ||
190 | kfree(output.pointer); | ||
191 | } | ||
192 | |||
193 | static bool intel_dsm_pci_probe(struct pci_dev *pdev) | ||
194 | { | ||
195 | acpi_handle dhandle, intel_handle; | ||
196 | acpi_status status; | ||
197 | int ret; | ||
198 | |||
199 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | ||
200 | if (!dhandle) | ||
201 | return false; | ||
202 | |||
203 | status = acpi_get_handle(dhandle, "_DSM", &intel_handle); | ||
204 | if (ACPI_FAILURE(status)) { | ||
205 | DRM_DEBUG_KMS("no _DSM method for intel device\n"); | ||
206 | return false; | ||
207 | } | ||
208 | |||
209 | ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0); | ||
210 | if (ret < 0) { | ||
211 | DRM_ERROR("failed to get supported _DSM functions\n"); | ||
212 | return false; | ||
213 | } | ||
214 | |||
215 | intel_dsm_priv.dhandle = dhandle; | ||
216 | |||
217 | intel_dsm_platform_mux_info(); | ||
218 | return true; | ||
219 | } | ||
220 | |||
221 | static bool intel_dsm_detect(void) | ||
222 | { | ||
223 | char acpi_method_name[255] = { 0 }; | ||
224 | struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; | ||
225 | struct pci_dev *pdev = NULL; | ||
226 | bool has_dsm = false; | ||
227 | int vga_count = 0; | ||
228 | |||
229 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { | ||
230 | vga_count++; | ||
231 | has_dsm |= intel_dsm_pci_probe(pdev); | ||
232 | } | ||
233 | |||
234 | if (vga_count == 2 && has_dsm) { | ||
235 | acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); | ||
236 | DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n", | ||
237 | acpi_method_name); | ||
238 | return true; | ||
239 | } | ||
240 | |||
241 | return false; | ||
242 | } | ||
243 | |||
244 | void intel_register_dsm_handler(void) | ||
245 | { | ||
246 | if (!intel_dsm_detect()) | ||
247 | return; | ||
248 | } | ||
249 | |||
250 | void intel_unregister_dsm_handler(void) | ||
251 | { | ||
252 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 96f75d7f6633..927442a11925 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -24,6 +24,7 @@ | |||
24 | * Eric Anholt <eric@anholt.net> | 24 | * Eric Anholt <eric@anholt.net> |
25 | * | 25 | * |
26 | */ | 26 | */ |
27 | #include <drm/drm_dp_helper.h> | ||
27 | #include "drmP.h" | 28 | #include "drmP.h" |
28 | #include "drm.h" | 29 | #include "drm.h" |
29 | #include "i915_drm.h" | 30 | #include "i915_drm.h" |
@@ -129,10 +130,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
129 | int i, temp_downclock; | 130 | int i, temp_downclock; |
130 | struct drm_display_mode *temp_mode; | 131 | struct drm_display_mode *temp_mode; |
131 | 132 | ||
132 | /* Defaults if we can't find VBT info */ | ||
133 | dev_priv->lvds_dither = 0; | ||
134 | dev_priv->lvds_vbt = 0; | ||
135 | |||
136 | lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); | 133 | lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); |
137 | if (!lvds_options) | 134 | if (!lvds_options) |
138 | return; | 135 | return; |
@@ -140,6 +137,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
140 | dev_priv->lvds_dither = lvds_options->pixel_dither; | 137 | dev_priv->lvds_dither = lvds_options->pixel_dither; |
141 | if (lvds_options->panel_type == 0xff) | 138 | if (lvds_options->panel_type == 0xff) |
142 | return; | 139 | return; |
140 | |||
143 | panel_type = lvds_options->panel_type; | 141 | panel_type = lvds_options->panel_type; |
144 | 142 | ||
145 | lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); | 143 | lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); |
@@ -169,6 +167,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
169 | ((unsigned char *)entry + dvo_timing_offset); | 167 | ((unsigned char *)entry + dvo_timing_offset); |
170 | 168 | ||
171 | panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); | 169 | panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); |
170 | if (!panel_fixed_mode) | ||
171 | return; | ||
172 | 172 | ||
173 | fill_detail_timing_data(panel_fixed_mode, dvo_timing); | 173 | fill_detail_timing_data(panel_fixed_mode, dvo_timing); |
174 | 174 | ||
@@ -214,9 +214,9 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
214 | i915_lvds_downclock) { | 214 | i915_lvds_downclock) { |
215 | dev_priv->lvds_downclock_avail = 1; | 215 | dev_priv->lvds_downclock_avail = 1; |
216 | dev_priv->lvds_downclock = temp_downclock; | 216 | dev_priv->lvds_downclock = temp_downclock; |
217 | DRM_DEBUG_KMS("LVDS downclock is found in VBT. ", | 217 | DRM_DEBUG_KMS("LVDS downclock is found in VBT. " |
218 | "Normal Clock %dKHz, downclock %dKHz\n", | 218 | "Normal Clock %dKHz, downclock %dKHz\n", |
219 | temp_downclock, panel_fixed_mode->clock); | 219 | temp_downclock, panel_fixed_mode->clock); |
220 | } | 220 | } |
221 | return; | 221 | return; |
222 | } | 222 | } |
@@ -226,31 +226,49 @@ static void | |||
226 | parse_sdvo_panel_data(struct drm_i915_private *dev_priv, | 226 | parse_sdvo_panel_data(struct drm_i915_private *dev_priv, |
227 | struct bdb_header *bdb) | 227 | struct bdb_header *bdb) |
228 | { | 228 | { |
229 | struct bdb_sdvo_lvds_options *sdvo_lvds_options; | ||
230 | struct lvds_dvo_timing *dvo_timing; | 229 | struct lvds_dvo_timing *dvo_timing; |
231 | struct drm_display_mode *panel_fixed_mode; | 230 | struct drm_display_mode *panel_fixed_mode; |
231 | int index; | ||
232 | 232 | ||
233 | dev_priv->sdvo_lvds_vbt_mode = NULL; | 233 | index = i915_vbt_sdvo_panel_type; |
234 | if (index == -1) { | ||
235 | struct bdb_sdvo_lvds_options *sdvo_lvds_options; | ||
234 | 236 | ||
235 | sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); | 237 | sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); |
236 | if (!sdvo_lvds_options) | 238 | if (!sdvo_lvds_options) |
237 | return; | 239 | return; |
240 | |||
241 | index = sdvo_lvds_options->panel_type; | ||
242 | } | ||
238 | 243 | ||
239 | dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS); | 244 | dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS); |
240 | if (!dvo_timing) | 245 | if (!dvo_timing) |
241 | return; | 246 | return; |
242 | 247 | ||
243 | panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); | 248 | panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); |
244 | |||
245 | if (!panel_fixed_mode) | 249 | if (!panel_fixed_mode) |
246 | return; | 250 | return; |
247 | 251 | ||
248 | fill_detail_timing_data(panel_fixed_mode, | 252 | fill_detail_timing_data(panel_fixed_mode, dvo_timing + index); |
249 | dvo_timing + sdvo_lvds_options->panel_type); | ||
250 | 253 | ||
251 | dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode; | 254 | dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode; |
252 | 255 | ||
253 | return; | 256 | DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n"); |
257 | drm_mode_debug_printmodeline(panel_fixed_mode); | ||
258 | } | ||
259 | |||
260 | static int intel_bios_ssc_frequency(struct drm_device *dev, | ||
261 | bool alternate) | ||
262 | { | ||
263 | switch (INTEL_INFO(dev)->gen) { | ||
264 | case 2: | ||
265 | return alternate ? 66 : 48; | ||
266 | case 3: | ||
267 | case 4: | ||
268 | return alternate ? 100 : 96; | ||
269 | default: | ||
270 | return alternate ? 100 : 120; | ||
271 | } | ||
254 | } | 272 | } |
255 | 273 | ||
256 | static void | 274 | static void |
@@ -260,27 +278,13 @@ parse_general_features(struct drm_i915_private *dev_priv, | |||
260 | struct drm_device *dev = dev_priv->dev; | 278 | struct drm_device *dev = dev_priv->dev; |
261 | struct bdb_general_features *general; | 279 | struct bdb_general_features *general; |
262 | 280 | ||
263 | /* Set sensible defaults in case we can't find the general block */ | ||
264 | dev_priv->int_tv_support = 1; | ||
265 | dev_priv->int_crt_support = 1; | ||
266 | |||
267 | general = find_section(bdb, BDB_GENERAL_FEATURES); | 281 | general = find_section(bdb, BDB_GENERAL_FEATURES); |
268 | if (general) { | 282 | if (general) { |
269 | dev_priv->int_tv_support = general->int_tv_support; | 283 | dev_priv->int_tv_support = general->int_tv_support; |
270 | dev_priv->int_crt_support = general->int_crt_support; | 284 | dev_priv->int_crt_support = general->int_crt_support; |
271 | dev_priv->lvds_use_ssc = general->enable_ssc; | 285 | dev_priv->lvds_use_ssc = general->enable_ssc; |
272 | 286 | dev_priv->lvds_ssc_freq = | |
273 | if (dev_priv->lvds_use_ssc) { | 287 | intel_bios_ssc_frequency(dev, general->ssc_freq); |
274 | if (IS_I85X(dev_priv->dev)) | ||
275 | dev_priv->lvds_ssc_freq = | ||
276 | general->ssc_freq ? 66 : 48; | ||
277 | else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev)) | ||
278 | dev_priv->lvds_ssc_freq = | ||
279 | general->ssc_freq ? 100 : 120; | ||
280 | else | ||
281 | dev_priv->lvds_ssc_freq = | ||
282 | general->ssc_freq ? 100 : 96; | ||
283 | } | ||
284 | } | 288 | } |
285 | } | 289 | } |
286 | 290 | ||
@@ -289,14 +293,6 @@ parse_general_definitions(struct drm_i915_private *dev_priv, | |||
289 | struct bdb_header *bdb) | 293 | struct bdb_header *bdb) |
290 | { | 294 | { |
291 | struct bdb_general_definitions *general; | 295 | struct bdb_general_definitions *general; |
292 | const int crt_bus_map_table[] = { | ||
293 | GPIOB, | ||
294 | GPIOA, | ||
295 | GPIOC, | ||
296 | GPIOD, | ||
297 | GPIOE, | ||
298 | GPIOF, | ||
299 | }; | ||
300 | 296 | ||
301 | general = find_section(bdb, BDB_GENERAL_DEFINITIONS); | 297 | general = find_section(bdb, BDB_GENERAL_DEFINITIONS); |
302 | if (general) { | 298 | if (general) { |
@@ -304,10 +300,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv, | |||
304 | if (block_size >= sizeof(*general)) { | 300 | if (block_size >= sizeof(*general)) { |
305 | int bus_pin = general->crt_ddc_gmbus_pin; | 301 | int bus_pin = general->crt_ddc_gmbus_pin; |
306 | DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); | 302 | DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); |
307 | if ((bus_pin >= 1) && (bus_pin <= 6)) { | 303 | if (bus_pin >= 1 && bus_pin <= 6) |
308 | dev_priv->crt_ddc_bus = | 304 | dev_priv->crt_ddc_pin = bus_pin; |
309 | crt_bus_map_table[bus_pin-1]; | ||
310 | } | ||
311 | } else { | 305 | } else { |
312 | DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", | 306 | DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", |
313 | block_size); | 307 | block_size); |
@@ -317,7 +311,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv, | |||
317 | 311 | ||
318 | static void | 312 | static void |
319 | parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, | 313 | parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, |
320 | struct bdb_header *bdb) | 314 | struct bdb_header *bdb) |
321 | { | 315 | { |
322 | struct sdvo_device_mapping *p_mapping; | 316 | struct sdvo_device_mapping *p_mapping; |
323 | struct bdb_general_definitions *p_defs; | 317 | struct bdb_general_definitions *p_defs; |
@@ -327,7 +321,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, | |||
327 | 321 | ||
328 | p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); | 322 | p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); |
329 | if (!p_defs) { | 323 | if (!p_defs) { |
330 | DRM_DEBUG_KMS("No general definition block is found\n"); | 324 | DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n"); |
331 | return; | 325 | return; |
332 | } | 326 | } |
333 | /* judge whether the size of child device meets the requirements. | 327 | /* judge whether the size of child device meets the requirements. |
@@ -377,7 +371,16 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, | |||
377 | p_mapping->slave_addr = p_child->slave_addr; | 371 | p_mapping->slave_addr = p_child->slave_addr; |
378 | p_mapping->dvo_wiring = p_child->dvo_wiring; | 372 | p_mapping->dvo_wiring = p_child->dvo_wiring; |
379 | p_mapping->ddc_pin = p_child->ddc_pin; | 373 | p_mapping->ddc_pin = p_child->ddc_pin; |
374 | p_mapping->i2c_pin = p_child->i2c_pin; | ||
375 | p_mapping->i2c_speed = p_child->i2c_speed; | ||
380 | p_mapping->initialized = 1; | 376 | p_mapping->initialized = 1; |
377 | DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n", | ||
378 | p_mapping->dvo_port, | ||
379 | p_mapping->slave_addr, | ||
380 | p_mapping->dvo_wiring, | ||
381 | p_mapping->ddc_pin, | ||
382 | p_mapping->i2c_pin, | ||
383 | p_mapping->i2c_speed); | ||
381 | } else { | 384 | } else { |
382 | DRM_DEBUG_KMS("Maybe one SDVO port is shared by " | 385 | DRM_DEBUG_KMS("Maybe one SDVO port is shared by " |
383 | "two SDVO device.\n"); | 386 | "two SDVO device.\n"); |
@@ -409,14 +412,11 @@ parse_driver_features(struct drm_i915_private *dev_priv, | |||
409 | if (!driver) | 412 | if (!driver) |
410 | return; | 413 | return; |
411 | 414 | ||
412 | if (driver && SUPPORTS_EDP(dev) && | 415 | if (SUPPORTS_EDP(dev) && |
413 | driver->lvds_config == BDB_DRIVER_FEATURE_EDP) { | 416 | driver->lvds_config == BDB_DRIVER_FEATURE_EDP) |
414 | dev_priv->edp_support = 1; | 417 | dev_priv->edp.support = 1; |
415 | } else { | ||
416 | dev_priv->edp_support = 0; | ||
417 | } | ||
418 | 418 | ||
419 | if (driver && driver->dual_frequency) | 419 | if (driver->dual_frequency) |
420 | dev_priv->render_reclock_avail = true; | 420 | dev_priv->render_reclock_avail = true; |
421 | } | 421 | } |
422 | 422 | ||
@@ -424,27 +424,78 @@ static void | |||
424 | parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | 424 | parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) |
425 | { | 425 | { |
426 | struct bdb_edp *edp; | 426 | struct bdb_edp *edp; |
427 | struct edp_power_seq *edp_pps; | ||
428 | struct edp_link_params *edp_link_params; | ||
427 | 429 | ||
428 | edp = find_section(bdb, BDB_EDP); | 430 | edp = find_section(bdb, BDB_EDP); |
429 | if (!edp) { | 431 | if (!edp) { |
430 | if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) { | 432 | if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) { |
431 | DRM_DEBUG_KMS("No eDP BDB found but eDP panel " | 433 | DRM_DEBUG_KMS("No eDP BDB found but eDP panel " |
432 | "supported, assume 18bpp panel color " | 434 | "supported, assume %dbpp panel color " |
433 | "depth.\n"); | 435 | "depth.\n", |
434 | dev_priv->edp_bpp = 18; | 436 | dev_priv->edp.bpp); |
435 | } | 437 | } |
436 | return; | 438 | return; |
437 | } | 439 | } |
438 | 440 | ||
439 | switch ((edp->color_depth >> (panel_type * 2)) & 3) { | 441 | switch ((edp->color_depth >> (panel_type * 2)) & 3) { |
440 | case EDP_18BPP: | 442 | case EDP_18BPP: |
441 | dev_priv->edp_bpp = 18; | 443 | dev_priv->edp.bpp = 18; |
442 | break; | 444 | break; |
443 | case EDP_24BPP: | 445 | case EDP_24BPP: |
444 | dev_priv->edp_bpp = 24; | 446 | dev_priv->edp.bpp = 24; |
445 | break; | 447 | break; |
446 | case EDP_30BPP: | 448 | case EDP_30BPP: |
447 | dev_priv->edp_bpp = 30; | 449 | dev_priv->edp.bpp = 30; |
450 | break; | ||
451 | } | ||
452 | |||
453 | /* Get the eDP sequencing and link info */ | ||
454 | edp_pps = &edp->power_seqs[panel_type]; | ||
455 | edp_link_params = &edp->link_params[panel_type]; | ||
456 | |||
457 | dev_priv->edp.pps = *edp_pps; | ||
458 | |||
459 | dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 : | ||
460 | DP_LINK_BW_1_62; | ||
461 | switch (edp_link_params->lanes) { | ||
462 | case 0: | ||
463 | dev_priv->edp.lanes = 1; | ||
464 | break; | ||
465 | case 1: | ||
466 | dev_priv->edp.lanes = 2; | ||
467 | break; | ||
468 | case 3: | ||
469 | default: | ||
470 | dev_priv->edp.lanes = 4; | ||
471 | break; | ||
472 | } | ||
473 | switch (edp_link_params->preemphasis) { | ||
474 | case 0: | ||
475 | dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0; | ||
476 | break; | ||
477 | case 1: | ||
478 | dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; | ||
479 | break; | ||
480 | case 2: | ||
481 | dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6; | ||
482 | break; | ||
483 | case 3: | ||
484 | dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; | ||
485 | break; | ||
486 | } | ||
487 | switch (edp_link_params->vswing) { | ||
488 | case 0: | ||
489 | dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400; | ||
490 | break; | ||
491 | case 1: | ||
492 | dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600; | ||
493 | break; | ||
494 | case 2: | ||
495 | dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800; | ||
496 | break; | ||
497 | case 3: | ||
498 | dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200; | ||
448 | break; | 499 | break; |
449 | } | 500 | } |
450 | } | 501 | } |
@@ -460,7 +511,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, | |||
460 | 511 | ||
461 | p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); | 512 | p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); |
462 | if (!p_defs) { | 513 | if (!p_defs) { |
463 | DRM_DEBUG_KMS("No general definition block is found\n"); | 514 | DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); |
464 | return; | 515 | return; |
465 | } | 516 | } |
466 | /* judge whether the size of child device meets the requirements. | 517 | /* judge whether the size of child device meets the requirements. |
@@ -513,50 +564,89 @@ parse_device_mapping(struct drm_i915_private *dev_priv, | |||
513 | } | 564 | } |
514 | return; | 565 | return; |
515 | } | 566 | } |
567 | |||
568 | static void | ||
569 | init_vbt_defaults(struct drm_i915_private *dev_priv) | ||
570 | { | ||
571 | struct drm_device *dev = dev_priv->dev; | ||
572 | |||
573 | dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC; | ||
574 | |||
575 | /* LFP panel data */ | ||
576 | dev_priv->lvds_dither = 1; | ||
577 | dev_priv->lvds_vbt = 0; | ||
578 | |||
579 | /* SDVO panel data */ | ||
580 | dev_priv->sdvo_lvds_vbt_mode = NULL; | ||
581 | |||
582 | /* general features */ | ||
583 | dev_priv->int_tv_support = 1; | ||
584 | dev_priv->int_crt_support = 1; | ||
585 | |||
586 | /* Default to using SSC */ | ||
587 | dev_priv->lvds_use_ssc = 1; | ||
588 | dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); | ||
589 | DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq); | ||
590 | |||
591 | /* eDP data */ | ||
592 | dev_priv->edp.bpp = 18; | ||
593 | } | ||
594 | |||
516 | /** | 595 | /** |
517 | * intel_init_bios - initialize VBIOS settings & find VBT | 596 | * intel_parse_bios - find VBT and initialize settings from the BIOS |
518 | * @dev: DRM device | 597 | * @dev: DRM device |
519 | * | 598 | * |
520 | * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers | 599 | * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers |
521 | * to appropriate values. | 600 | * to appropriate values. |
522 | * | 601 | * |
523 | * VBT existence is a sanity check that is relied on by other i830_bios.c code. | ||
524 | * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may | ||
525 | * feed an updated VBT back through that, compared to what we'll fetch using | ||
526 | * this method of groping around in the BIOS data. | ||
527 | * | ||
528 | * Returns 0 on success, nonzero on failure. | 602 | * Returns 0 on success, nonzero on failure. |
529 | */ | 603 | */ |
530 | bool | 604 | bool |
531 | intel_init_bios(struct drm_device *dev) | 605 | intel_parse_bios(struct drm_device *dev) |
532 | { | 606 | { |
533 | struct drm_i915_private *dev_priv = dev->dev_private; | 607 | struct drm_i915_private *dev_priv = dev->dev_private; |
534 | struct pci_dev *pdev = dev->pdev; | 608 | struct pci_dev *pdev = dev->pdev; |
535 | struct vbt_header *vbt = NULL; | 609 | struct bdb_header *bdb = NULL; |
536 | struct bdb_header *bdb; | 610 | u8 __iomem *bios = NULL; |
537 | u8 __iomem *bios; | 611 | |
538 | size_t size; | 612 | init_vbt_defaults(dev_priv); |
539 | int i; | 613 | |
540 | 614 | /* XXX Should this validation be moved to intel_opregion.c? */ | |
541 | bios = pci_map_rom(pdev, &size); | 615 | if (dev_priv->opregion.vbt) { |
542 | if (!bios) | 616 | struct vbt_header *vbt = dev_priv->opregion.vbt; |
543 | return -1; | 617 | if (memcmp(vbt->signature, "$VBT", 4) == 0) { |
544 | 618 | DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n", | |
545 | /* Scour memory looking for the VBT signature */ | 619 | vbt->signature); |
546 | for (i = 0; i + 4 < size; i++) { | 620 | bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset); |
547 | if (!memcmp(bios + i, "$VBT", 4)) { | 621 | } else |
548 | vbt = (struct vbt_header *)(bios + i); | 622 | dev_priv->opregion.vbt = NULL; |
549 | break; | ||
550 | } | ||
551 | } | 623 | } |
552 | 624 | ||
553 | if (!vbt) { | 625 | if (bdb == NULL) { |
554 | DRM_ERROR("VBT signature missing\n"); | 626 | struct vbt_header *vbt = NULL; |
555 | pci_unmap_rom(pdev, bios); | 627 | size_t size; |
556 | return -1; | 628 | int i; |
557 | } | 629 | |
630 | bios = pci_map_rom(pdev, &size); | ||
631 | if (!bios) | ||
632 | return -1; | ||
633 | |||
634 | /* Scour memory looking for the VBT signature */ | ||
635 | for (i = 0; i + 4 < size; i++) { | ||
636 | if (!memcmp(bios + i, "$VBT", 4)) { | ||
637 | vbt = (struct vbt_header *)(bios + i); | ||
638 | break; | ||
639 | } | ||
640 | } | ||
558 | 641 | ||
559 | bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); | 642 | if (!vbt) { |
643 | DRM_ERROR("VBT signature missing\n"); | ||
644 | pci_unmap_rom(pdev, bios); | ||
645 | return -1; | ||
646 | } | ||
647 | |||
648 | bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); | ||
649 | } | ||
560 | 650 | ||
561 | /* Grab useful general definitions */ | 651 | /* Grab useful general definitions */ |
562 | parse_general_features(dev_priv, bdb); | 652 | parse_general_features(dev_priv, bdb); |
@@ -568,7 +658,25 @@ intel_init_bios(struct drm_device *dev) | |||
568 | parse_driver_features(dev_priv, bdb); | 658 | parse_driver_features(dev_priv, bdb); |
569 | parse_edp(dev_priv, bdb); | 659 | parse_edp(dev_priv, bdb); |
570 | 660 | ||
571 | pci_unmap_rom(pdev, bios); | 661 | if (bios) |
662 | pci_unmap_rom(pdev, bios); | ||
572 | 663 | ||
573 | return 0; | 664 | return 0; |
574 | } | 665 | } |
666 | |||
667 | /* Ensure that vital registers have been initialised, even if the BIOS | ||
668 | * is absent or just failing to do its job. | ||
669 | */ | ||
670 | void intel_setup_bios(struct drm_device *dev) | ||
671 | { | ||
672 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
673 | |||
674 | /* Set the Panel Power On/Off timings if uninitialized. */ | ||
675 | if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) { | ||
676 | /* Set T2 to 40ms and T5 to 200ms */ | ||
677 | I915_WRITE(PP_ON_DELAYS, 0x019007d0); | ||
678 | |||
679 | /* Set T3 to 35ms and Tx to 200ms */ | ||
680 | I915_WRITE(PP_OFF_DELAYS, 0x015e07d0); | ||
681 | } | ||
682 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 4c18514f6f80..5f8e4edcbbb9 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h | |||
@@ -197,7 +197,8 @@ struct bdb_general_features { | |||
197 | struct child_device_config { | 197 | struct child_device_config { |
198 | u16 handle; | 198 | u16 handle; |
199 | u16 device_type; | 199 | u16 device_type; |
200 | u8 device_id[10]; /* See DEVICE_TYPE_* above */ | 200 | u8 i2c_speed; |
201 | u8 rsvd[9]; | ||
201 | u16 addin_offset; | 202 | u16 addin_offset; |
202 | u8 dvo_port; /* See Device_PORT_* above */ | 203 | u8 dvo_port; /* See Device_PORT_* above */ |
203 | u8 i2c_pin; | 204 | u8 i2c_pin; |
@@ -466,7 +467,8 @@ struct bdb_edp { | |||
466 | struct edp_link_params link_params[16]; | 467 | struct edp_link_params link_params[16]; |
467 | } __attribute__ ((packed)); | 468 | } __attribute__ ((packed)); |
468 | 469 | ||
469 | bool intel_init_bios(struct drm_device *dev); | 470 | void intel_setup_bios(struct drm_device *dev); |
471 | bool intel_parse_bios(struct drm_device *dev); | ||
470 | 472 | ||
471 | /* | 473 | /* |
472 | * Driver<->VBIOS interaction occurs through scratch bits in | 474 | * Driver<->VBIOS interaction occurs through scratch bits in |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 197d4f32585a..0979d8877880 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -30,10 +30,30 @@ | |||
30 | #include "drm.h" | 30 | #include "drm.h" |
31 | #include "drm_crtc.h" | 31 | #include "drm_crtc.h" |
32 | #include "drm_crtc_helper.h" | 32 | #include "drm_crtc_helper.h" |
33 | #include "drm_edid.h" | ||
33 | #include "intel_drv.h" | 34 | #include "intel_drv.h" |
34 | #include "i915_drm.h" | 35 | #include "i915_drm.h" |
35 | #include "i915_drv.h" | 36 | #include "i915_drv.h" |
36 | 37 | ||
38 | /* Here's the desired hotplug mode */ | ||
39 | #define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \ | ||
40 | ADPA_CRT_HOTPLUG_WARMUP_10MS | \ | ||
41 | ADPA_CRT_HOTPLUG_SAMPLE_4S | \ | ||
42 | ADPA_CRT_HOTPLUG_VOLTAGE_50 | \ | ||
43 | ADPA_CRT_HOTPLUG_VOLREF_325MV | \ | ||
44 | ADPA_CRT_HOTPLUG_ENABLE) | ||
45 | |||
46 | struct intel_crt { | ||
47 | struct intel_encoder base; | ||
48 | bool force_hotplug_required; | ||
49 | }; | ||
50 | |||
51 | static struct intel_crt *intel_attached_crt(struct drm_connector *connector) | ||
52 | { | ||
53 | return container_of(intel_attached_encoder(connector), | ||
54 | struct intel_crt, base); | ||
55 | } | ||
56 | |||
37 | static void intel_crt_dpms(struct drm_encoder *encoder, int mode) | 57 | static void intel_crt_dpms(struct drm_encoder *encoder, int mode) |
38 | { | 58 | { |
39 | struct drm_device *dev = encoder->dev; | 59 | struct drm_device *dev = encoder->dev; |
@@ -79,7 +99,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector, | |||
79 | if (mode->clock < 25000) | 99 | if (mode->clock < 25000) |
80 | return MODE_CLOCK_LOW; | 100 | return MODE_CLOCK_LOW; |
81 | 101 | ||
82 | if (!IS_I9XX(dev)) | 102 | if (IS_GEN2(dev)) |
83 | max_clock = 350000; | 103 | max_clock = 350000; |
84 | else | 104 | else |
85 | max_clock = 400000; | 105 | max_clock = 400000; |
@@ -109,10 +129,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
109 | u32 adpa, dpll_md; | 129 | u32 adpa, dpll_md; |
110 | u32 adpa_reg; | 130 | u32 adpa_reg; |
111 | 131 | ||
112 | if (intel_crtc->pipe == 0) | 132 | dpll_md_reg = DPLL_MD(intel_crtc->pipe); |
113 | dpll_md_reg = DPLL_A_MD; | ||
114 | else | ||
115 | dpll_md_reg = DPLL_B_MD; | ||
116 | 133 | ||
117 | if (HAS_PCH_SPLIT(dev)) | 134 | if (HAS_PCH_SPLIT(dev)) |
118 | adpa_reg = PCH_ADPA; | 135 | adpa_reg = PCH_ADPA; |
@@ -123,13 +140,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
123 | * Disable separate mode multiplier used when cloning SDVO to CRT | 140 | * Disable separate mode multiplier used when cloning SDVO to CRT |
124 | * XXX this needs to be adjusted when we really are cloning | 141 | * XXX this needs to be adjusted when we really are cloning |
125 | */ | 142 | */ |
126 | if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { | 143 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { |
127 | dpll_md = I915_READ(dpll_md_reg); | 144 | dpll_md = I915_READ(dpll_md_reg); |
128 | I915_WRITE(dpll_md_reg, | 145 | I915_WRITE(dpll_md_reg, |
129 | dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); | 146 | dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); |
130 | } | 147 | } |
131 | 148 | ||
132 | adpa = 0; | 149 | adpa = ADPA_HOTPLUG_BITS; |
133 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 150 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
134 | adpa |= ADPA_HSYNC_ACTIVE_HIGH; | 151 | adpa |= ADPA_HSYNC_ACTIVE_HIGH; |
135 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 152 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
@@ -140,69 +157,60 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
140 | adpa |= PORT_TRANS_A_SEL_CPT; | 157 | adpa |= PORT_TRANS_A_SEL_CPT; |
141 | else | 158 | else |
142 | adpa |= ADPA_PIPE_A_SELECT; | 159 | adpa |= ADPA_PIPE_A_SELECT; |
143 | if (!HAS_PCH_SPLIT(dev)) | ||
144 | I915_WRITE(BCLRPAT_A, 0); | ||
145 | } else { | 160 | } else { |
146 | if (HAS_PCH_CPT(dev)) | 161 | if (HAS_PCH_CPT(dev)) |
147 | adpa |= PORT_TRANS_B_SEL_CPT; | 162 | adpa |= PORT_TRANS_B_SEL_CPT; |
148 | else | 163 | else |
149 | adpa |= ADPA_PIPE_B_SELECT; | 164 | adpa |= ADPA_PIPE_B_SELECT; |
150 | if (!HAS_PCH_SPLIT(dev)) | ||
151 | I915_WRITE(BCLRPAT_B, 0); | ||
152 | } | 165 | } |
153 | 166 | ||
167 | if (!HAS_PCH_SPLIT(dev)) | ||
168 | I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); | ||
169 | |||
154 | I915_WRITE(adpa_reg, adpa); | 170 | I915_WRITE(adpa_reg, adpa); |
155 | } | 171 | } |
156 | 172 | ||
157 | static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | 173 | static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) |
158 | { | 174 | { |
159 | struct drm_device *dev = connector->dev; | 175 | struct drm_device *dev = connector->dev; |
176 | struct intel_crt *crt = intel_attached_crt(connector); | ||
160 | struct drm_i915_private *dev_priv = dev->dev_private; | 177 | struct drm_i915_private *dev_priv = dev->dev_private; |
161 | u32 adpa, temp; | 178 | u32 adpa; |
162 | bool ret; | 179 | bool ret; |
163 | bool turn_off_dac = false; | ||
164 | 180 | ||
165 | temp = adpa = I915_READ(PCH_ADPA); | 181 | /* The first time through, trigger an explicit detection cycle */ |
182 | if (crt->force_hotplug_required) { | ||
183 | bool turn_off_dac = HAS_PCH_SPLIT(dev); | ||
184 | u32 save_adpa; | ||
166 | 185 | ||
167 | if (HAS_PCH_SPLIT(dev)) | 186 | crt->force_hotplug_required = 0; |
168 | turn_off_dac = true; | 187 | |
169 | 188 | save_adpa = adpa = I915_READ(PCH_ADPA); | |
170 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | 189 | DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); |
171 | if (turn_off_dac) | 190 | |
172 | adpa &= ~ADPA_DAC_ENABLE; | 191 | adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; |
173 | 192 | if (turn_off_dac) | |
174 | /* disable HPD first */ | 193 | adpa &= ~ADPA_DAC_ENABLE; |
175 | I915_WRITE(PCH_ADPA, adpa); | 194 | |
176 | (void)I915_READ(PCH_ADPA); | 195 | I915_WRITE(PCH_ADPA, adpa); |
177 | 196 | ||
178 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | | 197 | if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, |
179 | ADPA_CRT_HOTPLUG_WARMUP_10MS | | 198 | 1000)) |
180 | ADPA_CRT_HOTPLUG_SAMPLE_4S | | 199 | DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); |
181 | ADPA_CRT_HOTPLUG_VOLTAGE_50 | /* default */ | 200 | |
182 | ADPA_CRT_HOTPLUG_VOLREF_325MV | | 201 | if (turn_off_dac) { |
183 | ADPA_CRT_HOTPLUG_ENABLE | | 202 | I915_WRITE(PCH_ADPA, save_adpa); |
184 | ADPA_CRT_HOTPLUG_FORCE_TRIGGER); | 203 | POSTING_READ(PCH_ADPA); |
185 | 204 | } | |
186 | DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa); | ||
187 | I915_WRITE(PCH_ADPA, adpa); | ||
188 | |||
189 | if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, | ||
190 | 1000, 1)) | ||
191 | DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); | ||
192 | |||
193 | if (turn_off_dac) { | ||
194 | I915_WRITE(PCH_ADPA, temp); | ||
195 | (void)I915_READ(PCH_ADPA); | ||
196 | } | 205 | } |
197 | 206 | ||
198 | /* Check the status to see if both blue and green are on now */ | 207 | /* Check the status to see if both blue and green are on now */ |
199 | adpa = I915_READ(PCH_ADPA); | 208 | adpa = I915_READ(PCH_ADPA); |
200 | adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; | 209 | if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) |
201 | if ((adpa == ADPA_CRT_HOTPLUG_MONITOR_COLOR) || | ||
202 | (adpa == ADPA_CRT_HOTPLUG_MONITOR_MONO)) | ||
203 | ret = true; | 210 | ret = true; |
204 | else | 211 | else |
205 | ret = false; | 212 | ret = false; |
213 | DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret); | ||
206 | 214 | ||
207 | return ret; | 215 | return ret; |
208 | } | 216 | } |
@@ -244,7 +252,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | |||
244 | /* wait for FORCE_DETECT to go off */ | 252 | /* wait for FORCE_DETECT to go off */ |
245 | if (wait_for((I915_READ(PORT_HOTPLUG_EN) & | 253 | if (wait_for((I915_READ(PORT_HOTPLUG_EN) & |
246 | CRT_HOTPLUG_FORCE_DETECT) == 0, | 254 | CRT_HOTPLUG_FORCE_DETECT) == 0, |
247 | 1000, 1)) | 255 | 1000)) |
248 | DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off"); | 256 | DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off"); |
249 | } | 257 | } |
250 | 258 | ||
@@ -261,25 +269,51 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | |||
261 | return ret; | 269 | return ret; |
262 | } | 270 | } |
263 | 271 | ||
264 | static bool intel_crt_detect_ddc(struct drm_encoder *encoder) | 272 | static bool intel_crt_detect_ddc(struct drm_connector *connector) |
265 | { | 273 | { |
266 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 274 | struct intel_crt *crt = intel_attached_crt(connector); |
275 | struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; | ||
267 | 276 | ||
268 | /* CRT should always be at 0, but check anyway */ | 277 | /* CRT should always be at 0, but check anyway */ |
269 | if (intel_encoder->type != INTEL_OUTPUT_ANALOG) | 278 | if (crt->base.type != INTEL_OUTPUT_ANALOG) |
270 | return false; | 279 | return false; |
271 | 280 | ||
272 | return intel_ddc_probe(intel_encoder); | 281 | if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { |
282 | struct edid *edid; | ||
283 | bool is_digital = false; | ||
284 | |||
285 | edid = drm_get_edid(connector, | ||
286 | &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); | ||
287 | /* | ||
288 | * This may be a DVI-I connector with a shared DDC | ||
289 | * link between analog and digital outputs, so we | ||
290 | * have to check the EDID input spec of the attached device. | ||
291 | * | ||
292 | * On the other hand, what should we do if it is a broken EDID? | ||
293 | */ | ||
294 | if (edid != NULL) { | ||
295 | is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; | ||
296 | connector->display_info.raw_edid = NULL; | ||
297 | kfree(edid); | ||
298 | } | ||
299 | |||
300 | if (!is_digital) { | ||
301 | DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); | ||
302 | return true; | ||
303 | } else { | ||
304 | DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); | ||
305 | } | ||
306 | } | ||
307 | |||
308 | return false; | ||
273 | } | 309 | } |
274 | 310 | ||
275 | static enum drm_connector_status | 311 | static enum drm_connector_status |
276 | intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder) | 312 | intel_crt_load_detect(struct intel_crt *crt) |
277 | { | 313 | { |
278 | struct drm_encoder *encoder = &intel_encoder->enc; | 314 | struct drm_device *dev = crt->base.base.dev; |
279 | struct drm_device *dev = encoder->dev; | ||
280 | struct drm_i915_private *dev_priv = dev->dev_private; | 315 | struct drm_i915_private *dev_priv = dev->dev_private; |
281 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 316 | uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe; |
282 | uint32_t pipe = intel_crtc->pipe; | ||
283 | uint32_t save_bclrpat; | 317 | uint32_t save_bclrpat; |
284 | uint32_t save_vtotal; | 318 | uint32_t save_vtotal; |
285 | uint32_t vtotal, vactive; | 319 | uint32_t vtotal, vactive; |
@@ -295,21 +329,14 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder | |||
295 | uint8_t st00; | 329 | uint8_t st00; |
296 | enum drm_connector_status status; | 330 | enum drm_connector_status status; |
297 | 331 | ||
298 | if (pipe == 0) { | 332 | DRM_DEBUG_KMS("starting load-detect on CRT\n"); |
299 | bclrpat_reg = BCLRPAT_A; | 333 | |
300 | vtotal_reg = VTOTAL_A; | 334 | bclrpat_reg = BCLRPAT(pipe); |
301 | vblank_reg = VBLANK_A; | 335 | vtotal_reg = VTOTAL(pipe); |
302 | vsync_reg = VSYNC_A; | 336 | vblank_reg = VBLANK(pipe); |
303 | pipeconf_reg = PIPEACONF; | 337 | vsync_reg = VSYNC(pipe); |
304 | pipe_dsl_reg = PIPEADSL; | 338 | pipeconf_reg = PIPECONF(pipe); |
305 | } else { | 339 | pipe_dsl_reg = PIPEDSL(pipe); |
306 | bclrpat_reg = BCLRPAT_B; | ||
307 | vtotal_reg = VTOTAL_B; | ||
308 | vblank_reg = VBLANK_B; | ||
309 | vsync_reg = VSYNC_B; | ||
310 | pipeconf_reg = PIPEBCONF; | ||
311 | pipe_dsl_reg = PIPEBDSL; | ||
312 | } | ||
313 | 340 | ||
314 | save_bclrpat = I915_READ(bclrpat_reg); | 341 | save_bclrpat = I915_READ(bclrpat_reg); |
315 | save_vtotal = I915_READ(vtotal_reg); | 342 | save_vtotal = I915_READ(vtotal_reg); |
@@ -324,9 +351,10 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder | |||
324 | /* Set the border color to purple. */ | 351 | /* Set the border color to purple. */ |
325 | I915_WRITE(bclrpat_reg, 0x500050); | 352 | I915_WRITE(bclrpat_reg, 0x500050); |
326 | 353 | ||
327 | if (IS_I9XX(dev)) { | 354 | if (!IS_GEN2(dev)) { |
328 | uint32_t pipeconf = I915_READ(pipeconf_reg); | 355 | uint32_t pipeconf = I915_READ(pipeconf_reg); |
329 | I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER); | 356 | I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER); |
357 | POSTING_READ(pipeconf_reg); | ||
330 | /* Wait for next Vblank to substitue | 358 | /* Wait for next Vblank to substitue |
331 | * border color for Color info */ | 359 | * border color for Color info */ |
332 | intel_wait_for_vblank(dev, pipe); | 360 | intel_wait_for_vblank(dev, pipe); |
@@ -404,35 +432,41 @@ static enum drm_connector_status | |||
404 | intel_crt_detect(struct drm_connector *connector, bool force) | 432 | intel_crt_detect(struct drm_connector *connector, bool force) |
405 | { | 433 | { |
406 | struct drm_device *dev = connector->dev; | 434 | struct drm_device *dev = connector->dev; |
407 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 435 | struct intel_crt *crt = intel_attached_crt(connector); |
408 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
409 | struct drm_crtc *crtc; | 436 | struct drm_crtc *crtc; |
410 | int dpms_mode; | ||
411 | enum drm_connector_status status; | 437 | enum drm_connector_status status; |
412 | 438 | ||
413 | if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) { | 439 | if (I915_HAS_HOTPLUG(dev)) { |
414 | if (intel_crt_detect_hotplug(connector)) | 440 | if (intel_crt_detect_hotplug(connector)) { |
441 | DRM_DEBUG_KMS("CRT detected via hotplug\n"); | ||
415 | return connector_status_connected; | 442 | return connector_status_connected; |
416 | else | 443 | } else { |
444 | DRM_DEBUG_KMS("CRT not detected via hotplug\n"); | ||
417 | return connector_status_disconnected; | 445 | return connector_status_disconnected; |
446 | } | ||
418 | } | 447 | } |
419 | 448 | ||
420 | if (intel_crt_detect_ddc(encoder)) | 449 | if (intel_crt_detect_ddc(connector)) |
421 | return connector_status_connected; | 450 | return connector_status_connected; |
422 | 451 | ||
423 | if (!force) | 452 | if (!force) |
424 | return connector->status; | 453 | return connector->status; |
425 | 454 | ||
426 | /* for pre-945g platforms use load detect */ | 455 | /* for pre-945g platforms use load detect */ |
427 | if (encoder->crtc && encoder->crtc->enabled) { | 456 | crtc = crt->base.base.crtc; |
428 | status = intel_crt_load_detect(encoder->crtc, intel_encoder); | 457 | if (crtc && crtc->enabled) { |
458 | status = intel_crt_load_detect(crt); | ||
429 | } else { | 459 | } else { |
430 | crtc = intel_get_load_detect_pipe(intel_encoder, connector, | 460 | struct intel_load_detect_pipe tmp; |
431 | NULL, &dpms_mode); | 461 | |
432 | if (crtc) { | 462 | if (intel_get_load_detect_pipe(&crt->base, connector, NULL, |
433 | status = intel_crt_load_detect(crtc, intel_encoder); | 463 | &tmp)) { |
434 | intel_release_load_detect_pipe(intel_encoder, | 464 | if (intel_crt_detect_ddc(connector)) |
435 | connector, dpms_mode); | 465 | status = connector_status_connected; |
466 | else | ||
467 | status = intel_crt_load_detect(crt); | ||
468 | intel_release_load_detect_pipe(&crt->base, connector, | ||
469 | &tmp); | ||
436 | } else | 470 | } else |
437 | status = connector_status_unknown; | 471 | status = connector_status_unknown; |
438 | } | 472 | } |
@@ -449,32 +483,18 @@ static void intel_crt_destroy(struct drm_connector *connector) | |||
449 | 483 | ||
450 | static int intel_crt_get_modes(struct drm_connector *connector) | 484 | static int intel_crt_get_modes(struct drm_connector *connector) |
451 | { | 485 | { |
452 | int ret; | ||
453 | struct drm_encoder *encoder = intel_attached_encoder(connector); | ||
454 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
455 | struct i2c_adapter *ddc_bus; | ||
456 | struct drm_device *dev = connector->dev; | 486 | struct drm_device *dev = connector->dev; |
487 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
488 | int ret; | ||
457 | 489 | ||
458 | 490 | ret = intel_ddc_get_modes(connector, | |
459 | ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); | 491 | &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); |
460 | if (ret || !IS_G4X(dev)) | 492 | if (ret || !IS_G4X(dev)) |
461 | goto end; | 493 | return ret; |
462 | 494 | ||
463 | /* Try to probe digital port for output in DVI-I -> VGA mode. */ | 495 | /* Try to probe digital port for output in DVI-I -> VGA mode. */ |
464 | ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); | 496 | return intel_ddc_get_modes(connector, |
465 | 497 | &dev_priv->gmbus[GMBUS_PORT_DPB].adapter); | |
466 | if (!ddc_bus) { | ||
467 | dev_printk(KERN_ERR, &connector->dev->pdev->dev, | ||
468 | "DDC bus registration failed for CRTDDC_D.\n"); | ||
469 | goto end; | ||
470 | } | ||
471 | /* Try to get modes by GPIOD port */ | ||
472 | ret = intel_ddc_get_modes(connector, ddc_bus); | ||
473 | intel_i2c_destroy(ddc_bus); | ||
474 | |||
475 | end: | ||
476 | return ret; | ||
477 | |||
478 | } | 498 | } |
479 | 499 | ||
480 | static int intel_crt_set_property(struct drm_connector *connector, | 500 | static int intel_crt_set_property(struct drm_connector *connector, |
@@ -484,6 +504,15 @@ static int intel_crt_set_property(struct drm_connector *connector, | |||
484 | return 0; | 504 | return 0; |
485 | } | 505 | } |
486 | 506 | ||
507 | static void intel_crt_reset(struct drm_connector *connector) | ||
508 | { | ||
509 | struct drm_device *dev = connector->dev; | ||
510 | struct intel_crt *crt = intel_attached_crt(connector); | ||
511 | |||
512 | if (HAS_PCH_SPLIT(dev)) | ||
513 | crt->force_hotplug_required = 1; | ||
514 | } | ||
515 | |||
487 | /* | 516 | /* |
488 | * Routines for controlling stuff on the analog port | 517 | * Routines for controlling stuff on the analog port |
489 | */ | 518 | */ |
@@ -497,6 +526,7 @@ static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = { | |||
497 | }; | 526 | }; |
498 | 527 | ||
499 | static const struct drm_connector_funcs intel_crt_connector_funcs = { | 528 | static const struct drm_connector_funcs intel_crt_connector_funcs = { |
529 | .reset = intel_crt_reset, | ||
500 | .dpms = drm_helper_connector_dpms, | 530 | .dpms = drm_helper_connector_dpms, |
501 | .detect = intel_crt_detect, | 531 | .detect = intel_crt_detect, |
502 | .fill_modes = drm_helper_probe_single_connector_modes, | 532 | .fill_modes = drm_helper_probe_single_connector_modes, |
@@ -507,7 +537,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = { | |||
507 | static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { | 537 | static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { |
508 | .mode_valid = intel_crt_mode_valid, | 538 | .mode_valid = intel_crt_mode_valid, |
509 | .get_modes = intel_crt_get_modes, | 539 | .get_modes = intel_crt_get_modes, |
510 | .best_encoder = intel_attached_encoder, | 540 | .best_encoder = intel_best_encoder, |
511 | }; | 541 | }; |
512 | 542 | ||
513 | static const struct drm_encoder_funcs intel_crt_enc_funcs = { | 543 | static const struct drm_encoder_funcs intel_crt_enc_funcs = { |
@@ -517,18 +547,17 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { | |||
517 | void intel_crt_init(struct drm_device *dev) | 547 | void intel_crt_init(struct drm_device *dev) |
518 | { | 548 | { |
519 | struct drm_connector *connector; | 549 | struct drm_connector *connector; |
520 | struct intel_encoder *intel_encoder; | 550 | struct intel_crt *crt; |
521 | struct intel_connector *intel_connector; | 551 | struct intel_connector *intel_connector; |
522 | struct drm_i915_private *dev_priv = dev->dev_private; | 552 | struct drm_i915_private *dev_priv = dev->dev_private; |
523 | u32 i2c_reg; | ||
524 | 553 | ||
525 | intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL); | 554 | crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL); |
526 | if (!intel_encoder) | 555 | if (!crt) |
527 | return; | 556 | return; |
528 | 557 | ||
529 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 558 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
530 | if (!intel_connector) { | 559 | if (!intel_connector) { |
531 | kfree(intel_encoder); | 560 | kfree(crt); |
532 | return; | 561 | return; |
533 | } | 562 | } |
534 | 563 | ||
@@ -536,37 +565,20 @@ void intel_crt_init(struct drm_device *dev) | |||
536 | drm_connector_init(dev, &intel_connector->base, | 565 | drm_connector_init(dev, &intel_connector->base, |
537 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); | 566 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); |
538 | 567 | ||
539 | drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs, | 568 | drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, |
540 | DRM_MODE_ENCODER_DAC); | 569 | DRM_MODE_ENCODER_DAC); |
541 | 570 | ||
542 | drm_mode_connector_attach_encoder(&intel_connector->base, | 571 | intel_connector_attach_encoder(intel_connector, &crt->base); |
543 | &intel_encoder->enc); | ||
544 | 572 | ||
545 | /* Set up the DDC bus. */ | 573 | crt->base.type = INTEL_OUTPUT_ANALOG; |
546 | if (HAS_PCH_SPLIT(dev)) | 574 | crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT | |
547 | i2c_reg = PCH_GPIOA; | 575 | 1 << INTEL_ANALOG_CLONE_BIT | |
548 | else { | 576 | 1 << INTEL_SDVO_LVDS_CLONE_BIT); |
549 | i2c_reg = GPIOA; | 577 | crt->base.crtc_mask = (1 << 0) | (1 << 1); |
550 | /* Use VBT information for CRT DDC if available */ | ||
551 | if (dev_priv->crt_ddc_bus != 0) | ||
552 | i2c_reg = dev_priv->crt_ddc_bus; | ||
553 | } | ||
554 | intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); | ||
555 | if (!intel_encoder->ddc_bus) { | ||
556 | dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " | ||
557 | "failed.\n"); | ||
558 | return; | ||
559 | } | ||
560 | |||
561 | intel_encoder->type = INTEL_OUTPUT_ANALOG; | ||
562 | intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | ||
563 | (1 << INTEL_ANALOG_CLONE_BIT) | | ||
564 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | ||
565 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | ||
566 | connector->interlace_allowed = 1; | 578 | connector->interlace_allowed = 1; |
567 | connector->doublescan_allowed = 0; | 579 | connector->doublescan_allowed = 0; |
568 | 580 | ||
569 | drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs); | 581 | drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs); |
570 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); | 582 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); |
571 | 583 | ||
572 | drm_sysfs_connector_add(connector); | 584 | drm_sysfs_connector_add(connector); |
@@ -576,5 +588,22 @@ void intel_crt_init(struct drm_device *dev) | |||
576 | else | 588 | else |
577 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | 589 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
578 | 590 | ||
591 | /* | ||
592 | * Configure the automatic hotplug detection stuff | ||
593 | */ | ||
594 | crt->force_hotplug_required = 0; | ||
595 | if (HAS_PCH_SPLIT(dev)) { | ||
596 | u32 adpa; | ||
597 | |||
598 | adpa = I915_READ(PCH_ADPA); | ||
599 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | ||
600 | adpa |= ADPA_HOTPLUG_BITS; | ||
601 | I915_WRITE(PCH_ADPA, adpa); | ||
602 | POSTING_READ(PCH_ADPA); | ||
603 | |||
604 | DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); | ||
605 | crt->force_hotplug_required = 1; | ||
606 | } | ||
607 | |||
579 | dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; | 608 | dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; |
580 | } | 609 | } |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 979228594599..0f1c799afea1 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -43,8 +43,8 @@ | |||
43 | 43 | ||
44 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type); | 44 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type); |
45 | static void intel_update_watermarks(struct drm_device *dev); | 45 | static void intel_update_watermarks(struct drm_device *dev); |
46 | static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule); | 46 | static void intel_increase_pllclock(struct drm_crtc *crtc); |
47 | static void intel_crtc_update_cursor(struct drm_crtc *crtc); | 47 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); |
48 | 48 | ||
49 | typedef struct { | 49 | typedef struct { |
50 | /* given values */ | 50 | /* given values */ |
@@ -76,255 +76,6 @@ struct intel_limit { | |||
76 | int, int, intel_clock_t *); | 76 | int, int, intel_clock_t *); |
77 | }; | 77 | }; |
78 | 78 | ||
79 | #define I8XX_DOT_MIN 25000 | ||
80 | #define I8XX_DOT_MAX 350000 | ||
81 | #define I8XX_VCO_MIN 930000 | ||
82 | #define I8XX_VCO_MAX 1400000 | ||
83 | #define I8XX_N_MIN 3 | ||
84 | #define I8XX_N_MAX 16 | ||
85 | #define I8XX_M_MIN 96 | ||
86 | #define I8XX_M_MAX 140 | ||
87 | #define I8XX_M1_MIN 18 | ||
88 | #define I8XX_M1_MAX 26 | ||
89 | #define I8XX_M2_MIN 6 | ||
90 | #define I8XX_M2_MAX 16 | ||
91 | #define I8XX_P_MIN 4 | ||
92 | #define I8XX_P_MAX 128 | ||
93 | #define I8XX_P1_MIN 2 | ||
94 | #define I8XX_P1_MAX 33 | ||
95 | #define I8XX_P1_LVDS_MIN 1 | ||
96 | #define I8XX_P1_LVDS_MAX 6 | ||
97 | #define I8XX_P2_SLOW 4 | ||
98 | #define I8XX_P2_FAST 2 | ||
99 | #define I8XX_P2_LVDS_SLOW 14 | ||
100 | #define I8XX_P2_LVDS_FAST 7 | ||
101 | #define I8XX_P2_SLOW_LIMIT 165000 | ||
102 | |||
103 | #define I9XX_DOT_MIN 20000 | ||
104 | #define I9XX_DOT_MAX 400000 | ||
105 | #define I9XX_VCO_MIN 1400000 | ||
106 | #define I9XX_VCO_MAX 2800000 | ||
107 | #define PINEVIEW_VCO_MIN 1700000 | ||
108 | #define PINEVIEW_VCO_MAX 3500000 | ||
109 | #define I9XX_N_MIN 1 | ||
110 | #define I9XX_N_MAX 6 | ||
111 | /* Pineview's Ncounter is a ring counter */ | ||
112 | #define PINEVIEW_N_MIN 3 | ||
113 | #define PINEVIEW_N_MAX 6 | ||
114 | #define I9XX_M_MIN 70 | ||
115 | #define I9XX_M_MAX 120 | ||
116 | #define PINEVIEW_M_MIN 2 | ||
117 | #define PINEVIEW_M_MAX 256 | ||
118 | #define I9XX_M1_MIN 10 | ||
119 | #define I9XX_M1_MAX 22 | ||
120 | #define I9XX_M2_MIN 5 | ||
121 | #define I9XX_M2_MAX 9 | ||
122 | /* Pineview M1 is reserved, and must be 0 */ | ||
123 | #define PINEVIEW_M1_MIN 0 | ||
124 | #define PINEVIEW_M1_MAX 0 | ||
125 | #define PINEVIEW_M2_MIN 0 | ||
126 | #define PINEVIEW_M2_MAX 254 | ||
127 | #define I9XX_P_SDVO_DAC_MIN 5 | ||
128 | #define I9XX_P_SDVO_DAC_MAX 80 | ||
129 | #define I9XX_P_LVDS_MIN 7 | ||
130 | #define I9XX_P_LVDS_MAX 98 | ||
131 | #define PINEVIEW_P_LVDS_MIN 7 | ||
132 | #define PINEVIEW_P_LVDS_MAX 112 | ||
133 | #define I9XX_P1_MIN 1 | ||
134 | #define I9XX_P1_MAX 8 | ||
135 | #define I9XX_P2_SDVO_DAC_SLOW 10 | ||
136 | #define I9XX_P2_SDVO_DAC_FAST 5 | ||
137 | #define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000 | ||
138 | #define I9XX_P2_LVDS_SLOW 14 | ||
139 | #define I9XX_P2_LVDS_FAST 7 | ||
140 | #define I9XX_P2_LVDS_SLOW_LIMIT 112000 | ||
141 | |||
142 | /*The parameter is for SDVO on G4x platform*/ | ||
143 | #define G4X_DOT_SDVO_MIN 25000 | ||
144 | #define G4X_DOT_SDVO_MAX 270000 | ||
145 | #define G4X_VCO_MIN 1750000 | ||
146 | #define G4X_VCO_MAX 3500000 | ||
147 | #define G4X_N_SDVO_MIN 1 | ||
148 | #define G4X_N_SDVO_MAX 4 | ||
149 | #define G4X_M_SDVO_MIN 104 | ||
150 | #define G4X_M_SDVO_MAX 138 | ||
151 | #define G4X_M1_SDVO_MIN 17 | ||
152 | #define G4X_M1_SDVO_MAX 23 | ||
153 | #define G4X_M2_SDVO_MIN 5 | ||
154 | #define G4X_M2_SDVO_MAX 11 | ||
155 | #define G4X_P_SDVO_MIN 10 | ||
156 | #define G4X_P_SDVO_MAX 30 | ||
157 | #define G4X_P1_SDVO_MIN 1 | ||
158 | #define G4X_P1_SDVO_MAX 3 | ||
159 | #define G4X_P2_SDVO_SLOW 10 | ||
160 | #define G4X_P2_SDVO_FAST 10 | ||
161 | #define G4X_P2_SDVO_LIMIT 270000 | ||
162 | |||
163 | /*The parameter is for HDMI_DAC on G4x platform*/ | ||
164 | #define G4X_DOT_HDMI_DAC_MIN 22000 | ||
165 | #define G4X_DOT_HDMI_DAC_MAX 400000 | ||
166 | #define G4X_N_HDMI_DAC_MIN 1 | ||
167 | #define G4X_N_HDMI_DAC_MAX 4 | ||
168 | #define G4X_M_HDMI_DAC_MIN 104 | ||
169 | #define G4X_M_HDMI_DAC_MAX 138 | ||
170 | #define G4X_M1_HDMI_DAC_MIN 16 | ||
171 | #define G4X_M1_HDMI_DAC_MAX 23 | ||
172 | #define G4X_M2_HDMI_DAC_MIN 5 | ||
173 | #define G4X_M2_HDMI_DAC_MAX 11 | ||
174 | #define G4X_P_HDMI_DAC_MIN 5 | ||
175 | #define G4X_P_HDMI_DAC_MAX 80 | ||
176 | #define G4X_P1_HDMI_DAC_MIN 1 | ||
177 | #define G4X_P1_HDMI_DAC_MAX 8 | ||
178 | #define G4X_P2_HDMI_DAC_SLOW 10 | ||
179 | #define G4X_P2_HDMI_DAC_FAST 5 | ||
180 | #define G4X_P2_HDMI_DAC_LIMIT 165000 | ||
181 | |||
182 | /*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/ | ||
183 | #define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000 | ||
184 | #define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000 | ||
185 | #define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1 | ||
186 | #define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3 | ||
187 | #define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104 | ||
188 | #define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138 | ||
189 | #define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17 | ||
190 | #define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23 | ||
191 | #define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5 | ||
192 | #define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11 | ||
193 | #define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28 | ||
194 | #define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112 | ||
195 | #define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2 | ||
196 | #define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8 | ||
197 | #define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14 | ||
198 | #define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14 | ||
199 | #define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0 | ||
200 | |||
201 | /*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/ | ||
202 | #define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000 | ||
203 | #define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000 | ||
204 | #define G4X_N_DUAL_CHANNEL_LVDS_MIN 1 | ||
205 | #define G4X_N_DUAL_CHANNEL_LVDS_MAX 3 | ||
206 | #define G4X_M_DUAL_CHANNEL_LVDS_MIN 104 | ||
207 | #define G4X_M_DUAL_CHANNEL_LVDS_MAX 138 | ||
208 | #define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17 | ||
209 | #define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23 | ||
210 | #define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5 | ||
211 | #define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11 | ||
212 | #define G4X_P_DUAL_CHANNEL_LVDS_MIN 14 | ||
213 | #define G4X_P_DUAL_CHANNEL_LVDS_MAX 42 | ||
214 | #define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2 | ||
215 | #define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6 | ||
216 | #define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7 | ||
217 | #define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 | ||
218 | #define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 | ||
219 | |||
220 | /*The parameter is for DISPLAY PORT on G4x platform*/ | ||
221 | #define G4X_DOT_DISPLAY_PORT_MIN 161670 | ||
222 | #define G4X_DOT_DISPLAY_PORT_MAX 227000 | ||
223 | #define G4X_N_DISPLAY_PORT_MIN 1 | ||
224 | #define G4X_N_DISPLAY_PORT_MAX 2 | ||
225 | #define G4X_M_DISPLAY_PORT_MIN 97 | ||
226 | #define G4X_M_DISPLAY_PORT_MAX 108 | ||
227 | #define G4X_M1_DISPLAY_PORT_MIN 0x10 | ||
228 | #define G4X_M1_DISPLAY_PORT_MAX 0x12 | ||
229 | #define G4X_M2_DISPLAY_PORT_MIN 0x05 | ||
230 | #define G4X_M2_DISPLAY_PORT_MAX 0x06 | ||
231 | #define G4X_P_DISPLAY_PORT_MIN 10 | ||
232 | #define G4X_P_DISPLAY_PORT_MAX 20 | ||
233 | #define G4X_P1_DISPLAY_PORT_MIN 1 | ||
234 | #define G4X_P1_DISPLAY_PORT_MAX 2 | ||
235 | #define G4X_P2_DISPLAY_PORT_SLOW 10 | ||
236 | #define G4X_P2_DISPLAY_PORT_FAST 10 | ||
237 | #define G4X_P2_DISPLAY_PORT_LIMIT 0 | ||
238 | |||
239 | /* Ironlake / Sandybridge */ | ||
240 | /* as we calculate clock using (register_value + 2) for | ||
241 | N/M1/M2, so here the range value for them is (actual_value-2). | ||
242 | */ | ||
243 | #define IRONLAKE_DOT_MIN 25000 | ||
244 | #define IRONLAKE_DOT_MAX 350000 | ||
245 | #define IRONLAKE_VCO_MIN 1760000 | ||
246 | #define IRONLAKE_VCO_MAX 3510000 | ||
247 | #define IRONLAKE_M1_MIN 12 | ||
248 | #define IRONLAKE_M1_MAX 22 | ||
249 | #define IRONLAKE_M2_MIN 5 | ||
250 | #define IRONLAKE_M2_MAX 9 | ||
251 | #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ | ||
252 | |||
253 | /* We have parameter ranges for different type of outputs. */ | ||
254 | |||
255 | /* DAC & HDMI Refclk 120Mhz */ | ||
256 | #define IRONLAKE_DAC_N_MIN 1 | ||
257 | #define IRONLAKE_DAC_N_MAX 5 | ||
258 | #define IRONLAKE_DAC_M_MIN 79 | ||
259 | #define IRONLAKE_DAC_M_MAX 127 | ||
260 | #define IRONLAKE_DAC_P_MIN 5 | ||
261 | #define IRONLAKE_DAC_P_MAX 80 | ||
262 | #define IRONLAKE_DAC_P1_MIN 1 | ||
263 | #define IRONLAKE_DAC_P1_MAX 8 | ||
264 | #define IRONLAKE_DAC_P2_SLOW 10 | ||
265 | #define IRONLAKE_DAC_P2_FAST 5 | ||
266 | |||
267 | /* LVDS single-channel 120Mhz refclk */ | ||
268 | #define IRONLAKE_LVDS_S_N_MIN 1 | ||
269 | #define IRONLAKE_LVDS_S_N_MAX 3 | ||
270 | #define IRONLAKE_LVDS_S_M_MIN 79 | ||
271 | #define IRONLAKE_LVDS_S_M_MAX 118 | ||
272 | #define IRONLAKE_LVDS_S_P_MIN 28 | ||
273 | #define IRONLAKE_LVDS_S_P_MAX 112 | ||
274 | #define IRONLAKE_LVDS_S_P1_MIN 2 | ||
275 | #define IRONLAKE_LVDS_S_P1_MAX 8 | ||
276 | #define IRONLAKE_LVDS_S_P2_SLOW 14 | ||
277 | #define IRONLAKE_LVDS_S_P2_FAST 14 | ||
278 | |||
279 | /* LVDS dual-channel 120Mhz refclk */ | ||
280 | #define IRONLAKE_LVDS_D_N_MIN 1 | ||
281 | #define IRONLAKE_LVDS_D_N_MAX 3 | ||
282 | #define IRONLAKE_LVDS_D_M_MIN 79 | ||
283 | #define IRONLAKE_LVDS_D_M_MAX 127 | ||
284 | #define IRONLAKE_LVDS_D_P_MIN 14 | ||
285 | #define IRONLAKE_LVDS_D_P_MAX 56 | ||
286 | #define IRONLAKE_LVDS_D_P1_MIN 2 | ||
287 | #define IRONLAKE_LVDS_D_P1_MAX 8 | ||
288 | #define IRONLAKE_LVDS_D_P2_SLOW 7 | ||
289 | #define IRONLAKE_LVDS_D_P2_FAST 7 | ||
290 | |||
291 | /* LVDS single-channel 100Mhz refclk */ | ||
292 | #define IRONLAKE_LVDS_S_SSC_N_MIN 1 | ||
293 | #define IRONLAKE_LVDS_S_SSC_N_MAX 2 | ||
294 | #define IRONLAKE_LVDS_S_SSC_M_MIN 79 | ||
295 | #define IRONLAKE_LVDS_S_SSC_M_MAX 126 | ||
296 | #define IRONLAKE_LVDS_S_SSC_P_MIN 28 | ||
297 | #define IRONLAKE_LVDS_S_SSC_P_MAX 112 | ||
298 | #define IRONLAKE_LVDS_S_SSC_P1_MIN 2 | ||
299 | #define IRONLAKE_LVDS_S_SSC_P1_MAX 8 | ||
300 | #define IRONLAKE_LVDS_S_SSC_P2_SLOW 14 | ||
301 | #define IRONLAKE_LVDS_S_SSC_P2_FAST 14 | ||
302 | |||
303 | /* LVDS dual-channel 100Mhz refclk */ | ||
304 | #define IRONLAKE_LVDS_D_SSC_N_MIN 1 | ||
305 | #define IRONLAKE_LVDS_D_SSC_N_MAX 3 | ||
306 | #define IRONLAKE_LVDS_D_SSC_M_MIN 79 | ||
307 | #define IRONLAKE_LVDS_D_SSC_M_MAX 126 | ||
308 | #define IRONLAKE_LVDS_D_SSC_P_MIN 14 | ||
309 | #define IRONLAKE_LVDS_D_SSC_P_MAX 42 | ||
310 | #define IRONLAKE_LVDS_D_SSC_P1_MIN 2 | ||
311 | #define IRONLAKE_LVDS_D_SSC_P1_MAX 6 | ||
312 | #define IRONLAKE_LVDS_D_SSC_P2_SLOW 7 | ||
313 | #define IRONLAKE_LVDS_D_SSC_P2_FAST 7 | ||
314 | |||
315 | /* DisplayPort */ | ||
316 | #define IRONLAKE_DP_N_MIN 1 | ||
317 | #define IRONLAKE_DP_N_MAX 2 | ||
318 | #define IRONLAKE_DP_M_MIN 81 | ||
319 | #define IRONLAKE_DP_M_MAX 90 | ||
320 | #define IRONLAKE_DP_P_MIN 10 | ||
321 | #define IRONLAKE_DP_P_MAX 20 | ||
322 | #define IRONLAKE_DP_P2_FAST 10 | ||
323 | #define IRONLAKE_DP_P2_SLOW 10 | ||
324 | #define IRONLAKE_DP_P2_LIMIT 0 | ||
325 | #define IRONLAKE_DP_P1_MIN 1 | ||
326 | #define IRONLAKE_DP_P1_MAX 2 | ||
327 | |||
328 | /* FDI */ | 79 | /* FDI */ |
329 | #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ | 80 | #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ |
330 | 81 | ||
@@ -342,316 +93,284 @@ static bool | |||
342 | intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, | 93 | intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, |
343 | int target, int refclk, intel_clock_t *best_clock); | 94 | int target, int refclk, intel_clock_t *best_clock); |
344 | 95 | ||
96 | static inline u32 /* units of 100MHz */ | ||
97 | intel_fdi_link_freq(struct drm_device *dev) | ||
98 | { | ||
99 | if (IS_GEN5(dev)) { | ||
100 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
101 | return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; | ||
102 | } else | ||
103 | return 27; | ||
104 | } | ||
105 | |||
345 | static const intel_limit_t intel_limits_i8xx_dvo = { | 106 | static const intel_limit_t intel_limits_i8xx_dvo = { |
346 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, | 107 | .dot = { .min = 25000, .max = 350000 }, |
347 | .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, | 108 | .vco = { .min = 930000, .max = 1400000 }, |
348 | .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, | 109 | .n = { .min = 3, .max = 16 }, |
349 | .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX }, | 110 | .m = { .min = 96, .max = 140 }, |
350 | .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX }, | 111 | .m1 = { .min = 18, .max = 26 }, |
351 | .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX }, | 112 | .m2 = { .min = 6, .max = 16 }, |
352 | .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX }, | 113 | .p = { .min = 4, .max = 128 }, |
353 | .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, | 114 | .p1 = { .min = 2, .max = 33 }, |
354 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | 115 | .p2 = { .dot_limit = 165000, |
355 | .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, | 116 | .p2_slow = 4, .p2_fast = 2 }, |
356 | .find_pll = intel_find_best_PLL, | 117 | .find_pll = intel_find_best_PLL, |
357 | }; | 118 | }; |
358 | 119 | ||
359 | static const intel_limit_t intel_limits_i8xx_lvds = { | 120 | static const intel_limit_t intel_limits_i8xx_lvds = { |
360 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, | 121 | .dot = { .min = 25000, .max = 350000 }, |
361 | .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, | 122 | .vco = { .min = 930000, .max = 1400000 }, |
362 | .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, | 123 | .n = { .min = 3, .max = 16 }, |
363 | .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX }, | 124 | .m = { .min = 96, .max = 140 }, |
364 | .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX }, | 125 | .m1 = { .min = 18, .max = 26 }, |
365 | .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX }, | 126 | .m2 = { .min = 6, .max = 16 }, |
366 | .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX }, | 127 | .p = { .min = 4, .max = 128 }, |
367 | .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, | 128 | .p1 = { .min = 1, .max = 6 }, |
368 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | 129 | .p2 = { .dot_limit = 165000, |
369 | .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, | 130 | .p2_slow = 14, .p2_fast = 7 }, |
370 | .find_pll = intel_find_best_PLL, | 131 | .find_pll = intel_find_best_PLL, |
371 | }; | 132 | }; |
372 | 133 | ||
373 | static const intel_limit_t intel_limits_i9xx_sdvo = { | 134 | static const intel_limit_t intel_limits_i9xx_sdvo = { |
374 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | 135 | .dot = { .min = 20000, .max = 400000 }, |
375 | .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, | 136 | .vco = { .min = 1400000, .max = 2800000 }, |
376 | .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, | 137 | .n = { .min = 1, .max = 6 }, |
377 | .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX }, | 138 | .m = { .min = 70, .max = 120 }, |
378 | .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX }, | 139 | .m1 = { .min = 10, .max = 22 }, |
379 | .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX }, | 140 | .m2 = { .min = 5, .max = 9 }, |
380 | .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, | 141 | .p = { .min = 5, .max = 80 }, |
381 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | 142 | .p1 = { .min = 1, .max = 8 }, |
382 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | 143 | .p2 = { .dot_limit = 200000, |
383 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | 144 | .p2_slow = 10, .p2_fast = 5 }, |
384 | .find_pll = intel_find_best_PLL, | 145 | .find_pll = intel_find_best_PLL, |
385 | }; | 146 | }; |
386 | 147 | ||
387 | static const intel_limit_t intel_limits_i9xx_lvds = { | 148 | static const intel_limit_t intel_limits_i9xx_lvds = { |
388 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | 149 | .dot = { .min = 20000, .max = 400000 }, |
389 | .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, | 150 | .vco = { .min = 1400000, .max = 2800000 }, |
390 | .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, | 151 | .n = { .min = 1, .max = 6 }, |
391 | .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX }, | 152 | .m = { .min = 70, .max = 120 }, |
392 | .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX }, | 153 | .m1 = { .min = 10, .max = 22 }, |
393 | .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX }, | 154 | .m2 = { .min = 5, .max = 9 }, |
394 | .p = { .min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX }, | 155 | .p = { .min = 7, .max = 98 }, |
395 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | 156 | .p1 = { .min = 1, .max = 8 }, |
396 | /* The single-channel range is 25-112Mhz, and dual-channel | 157 | .p2 = { .dot_limit = 112000, |
397 | * is 80-224Mhz. Prefer single channel as much as possible. | 158 | .p2_slow = 14, .p2_fast = 7 }, |
398 | */ | ||
399 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | ||
400 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, | ||
401 | .find_pll = intel_find_best_PLL, | 159 | .find_pll = intel_find_best_PLL, |
402 | }; | 160 | }; |
403 | 161 | ||
404 | /* below parameter and function is for G4X Chipset Family*/ | 162 | |
405 | static const intel_limit_t intel_limits_g4x_sdvo = { | 163 | static const intel_limit_t intel_limits_g4x_sdvo = { |
406 | .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, | 164 | .dot = { .min = 25000, .max = 270000 }, |
407 | .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, | 165 | .vco = { .min = 1750000, .max = 3500000}, |
408 | .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, | 166 | .n = { .min = 1, .max = 4 }, |
409 | .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX }, | 167 | .m = { .min = 104, .max = 138 }, |
410 | .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX }, | 168 | .m1 = { .min = 17, .max = 23 }, |
411 | .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX }, | 169 | .m2 = { .min = 5, .max = 11 }, |
412 | .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX }, | 170 | .p = { .min = 10, .max = 30 }, |
413 | .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX}, | 171 | .p1 = { .min = 1, .max = 3}, |
414 | .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT, | 172 | .p2 = { .dot_limit = 270000, |
415 | .p2_slow = G4X_P2_SDVO_SLOW, | 173 | .p2_slow = 10, |
416 | .p2_fast = G4X_P2_SDVO_FAST | 174 | .p2_fast = 10 |
417 | }, | 175 | }, |
418 | .find_pll = intel_g4x_find_best_PLL, | 176 | .find_pll = intel_g4x_find_best_PLL, |
419 | }; | 177 | }; |
420 | 178 | ||
421 | static const intel_limit_t intel_limits_g4x_hdmi = { | 179 | static const intel_limit_t intel_limits_g4x_hdmi = { |
422 | .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, | 180 | .dot = { .min = 22000, .max = 400000 }, |
423 | .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, | 181 | .vco = { .min = 1750000, .max = 3500000}, |
424 | .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, | 182 | .n = { .min = 1, .max = 4 }, |
425 | .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX }, | 183 | .m = { .min = 104, .max = 138 }, |
426 | .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX }, | 184 | .m1 = { .min = 16, .max = 23 }, |
427 | .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX }, | 185 | .m2 = { .min = 5, .max = 11 }, |
428 | .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX }, | 186 | .p = { .min = 5, .max = 80 }, |
429 | .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX}, | 187 | .p1 = { .min = 1, .max = 8}, |
430 | .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT, | 188 | .p2 = { .dot_limit = 165000, |
431 | .p2_slow = G4X_P2_HDMI_DAC_SLOW, | 189 | .p2_slow = 10, .p2_fast = 5 }, |
432 | .p2_fast = G4X_P2_HDMI_DAC_FAST | ||
433 | }, | ||
434 | .find_pll = intel_g4x_find_best_PLL, | 190 | .find_pll = intel_g4x_find_best_PLL, |
435 | }; | 191 | }; |
436 | 192 | ||
437 | static const intel_limit_t intel_limits_g4x_single_channel_lvds = { | 193 | static const intel_limit_t intel_limits_g4x_single_channel_lvds = { |
438 | .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, | 194 | .dot = { .min = 20000, .max = 115000 }, |
439 | .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, | 195 | .vco = { .min = 1750000, .max = 3500000 }, |
440 | .vco = { .min = G4X_VCO_MIN, | 196 | .n = { .min = 1, .max = 3 }, |
441 | .max = G4X_VCO_MAX }, | 197 | .m = { .min = 104, .max = 138 }, |
442 | .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN, | 198 | .m1 = { .min = 17, .max = 23 }, |
443 | .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX }, | 199 | .m2 = { .min = 5, .max = 11 }, |
444 | .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN, | 200 | .p = { .min = 28, .max = 112 }, |
445 | .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX }, | 201 | .p1 = { .min = 2, .max = 8 }, |
446 | .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN, | 202 | .p2 = { .dot_limit = 0, |
447 | .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX }, | 203 | .p2_slow = 14, .p2_fast = 14 |
448 | .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN, | ||
449 | .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX }, | ||
450 | .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN, | ||
451 | .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX }, | ||
452 | .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN, | ||
453 | .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX }, | ||
454 | .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT, | ||
455 | .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW, | ||
456 | .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST | ||
457 | }, | 204 | }, |
458 | .find_pll = intel_g4x_find_best_PLL, | 205 | .find_pll = intel_g4x_find_best_PLL, |
459 | }; | 206 | }; |
460 | 207 | ||
461 | static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { | 208 | static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { |
462 | .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, | 209 | .dot = { .min = 80000, .max = 224000 }, |
463 | .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, | 210 | .vco = { .min = 1750000, .max = 3500000 }, |
464 | .vco = { .min = G4X_VCO_MIN, | 211 | .n = { .min = 1, .max = 3 }, |
465 | .max = G4X_VCO_MAX }, | 212 | .m = { .min = 104, .max = 138 }, |
466 | .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN, | 213 | .m1 = { .min = 17, .max = 23 }, |
467 | .max = G4X_N_DUAL_CHANNEL_LVDS_MAX }, | 214 | .m2 = { .min = 5, .max = 11 }, |
468 | .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN, | 215 | .p = { .min = 14, .max = 42 }, |
469 | .max = G4X_M_DUAL_CHANNEL_LVDS_MAX }, | 216 | .p1 = { .min = 2, .max = 6 }, |
470 | .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN, | 217 | .p2 = { .dot_limit = 0, |
471 | .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX }, | 218 | .p2_slow = 7, .p2_fast = 7 |
472 | .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN, | ||
473 | .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX }, | ||
474 | .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN, | ||
475 | .max = G4X_P_DUAL_CHANNEL_LVDS_MAX }, | ||
476 | .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN, | ||
477 | .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX }, | ||
478 | .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT, | ||
479 | .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW, | ||
480 | .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST | ||
481 | }, | 219 | }, |
482 | .find_pll = intel_g4x_find_best_PLL, | 220 | .find_pll = intel_g4x_find_best_PLL, |
483 | }; | 221 | }; |
484 | 222 | ||
485 | static const intel_limit_t intel_limits_g4x_display_port = { | 223 | static const intel_limit_t intel_limits_g4x_display_port = { |
486 | .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN, | 224 | .dot = { .min = 161670, .max = 227000 }, |
487 | .max = G4X_DOT_DISPLAY_PORT_MAX }, | 225 | .vco = { .min = 1750000, .max = 3500000}, |
488 | .vco = { .min = G4X_VCO_MIN, | 226 | .n = { .min = 1, .max = 2 }, |
489 | .max = G4X_VCO_MAX}, | 227 | .m = { .min = 97, .max = 108 }, |
490 | .n = { .min = G4X_N_DISPLAY_PORT_MIN, | 228 | .m1 = { .min = 0x10, .max = 0x12 }, |
491 | .max = G4X_N_DISPLAY_PORT_MAX }, | 229 | .m2 = { .min = 0x05, .max = 0x06 }, |
492 | .m = { .min = G4X_M_DISPLAY_PORT_MIN, | 230 | .p = { .min = 10, .max = 20 }, |
493 | .max = G4X_M_DISPLAY_PORT_MAX }, | 231 | .p1 = { .min = 1, .max = 2}, |
494 | .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN, | 232 | .p2 = { .dot_limit = 0, |
495 | .max = G4X_M1_DISPLAY_PORT_MAX }, | 233 | .p2_slow = 10, .p2_fast = 10 }, |
496 | .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN, | ||
497 | .max = G4X_M2_DISPLAY_PORT_MAX }, | ||
498 | .p = { .min = G4X_P_DISPLAY_PORT_MIN, | ||
499 | .max = G4X_P_DISPLAY_PORT_MAX }, | ||
500 | .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN, | ||
501 | .max = G4X_P1_DISPLAY_PORT_MAX}, | ||
502 | .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT, | ||
503 | .p2_slow = G4X_P2_DISPLAY_PORT_SLOW, | ||
504 | .p2_fast = G4X_P2_DISPLAY_PORT_FAST }, | ||
505 | .find_pll = intel_find_pll_g4x_dp, | 234 | .find_pll = intel_find_pll_g4x_dp, |
506 | }; | 235 | }; |
507 | 236 | ||
508 | static const intel_limit_t intel_limits_pineview_sdvo = { | 237 | static const intel_limit_t intel_limits_pineview_sdvo = { |
509 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, | 238 | .dot = { .min = 20000, .max = 400000}, |
510 | .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX }, | 239 | .vco = { .min = 1700000, .max = 3500000 }, |
511 | .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX }, | 240 | /* Pineview's Ncounter is a ring counter */ |
512 | .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX }, | 241 | .n = { .min = 3, .max = 6 }, |
513 | .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX }, | 242 | .m = { .min = 2, .max = 256 }, |
514 | .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX }, | 243 | /* Pineview only has one combined m divider, which we treat as m2. */ |
515 | .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, | 244 | .m1 = { .min = 0, .max = 0 }, |
516 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | 245 | .m2 = { .min = 0, .max = 254 }, |
517 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | 246 | .p = { .min = 5, .max = 80 }, |
518 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | 247 | .p1 = { .min = 1, .max = 8 }, |
248 | .p2 = { .dot_limit = 200000, | ||
249 | .p2_slow = 10, .p2_fast = 5 }, | ||
519 | .find_pll = intel_find_best_PLL, | 250 | .find_pll = intel_find_best_PLL, |
520 | }; | 251 | }; |
521 | 252 | ||
522 | static const intel_limit_t intel_limits_pineview_lvds = { | 253 | static const intel_limit_t intel_limits_pineview_lvds = { |
523 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | 254 | .dot = { .min = 20000, .max = 400000 }, |
524 | .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX }, | 255 | .vco = { .min = 1700000, .max = 3500000 }, |
525 | .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX }, | 256 | .n = { .min = 3, .max = 6 }, |
526 | .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX }, | 257 | .m = { .min = 2, .max = 256 }, |
527 | .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX }, | 258 | .m1 = { .min = 0, .max = 0 }, |
528 | .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX }, | 259 | .m2 = { .min = 0, .max = 254 }, |
529 | .p = { .min = PINEVIEW_P_LVDS_MIN, .max = PINEVIEW_P_LVDS_MAX }, | 260 | .p = { .min = 7, .max = 112 }, |
530 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | 261 | .p1 = { .min = 1, .max = 8 }, |
531 | /* Pineview only supports single-channel mode. */ | 262 | .p2 = { .dot_limit = 112000, |
532 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | 263 | .p2_slow = 14, .p2_fast = 14 }, |
533 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, | ||
534 | .find_pll = intel_find_best_PLL, | 264 | .find_pll = intel_find_best_PLL, |
535 | }; | 265 | }; |
536 | 266 | ||
267 | /* Ironlake / Sandybridge | ||
268 | * | ||
269 | * We calculate clock using (register_value + 2) for N/M1/M2, so here | ||
270 | * the range value for them is (actual_value - 2). | ||
271 | */ | ||
537 | static const intel_limit_t intel_limits_ironlake_dac = { | 272 | static const intel_limit_t intel_limits_ironlake_dac = { |
538 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | 273 | .dot = { .min = 25000, .max = 350000 }, |
539 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | 274 | .vco = { .min = 1760000, .max = 3510000 }, |
540 | .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX }, | 275 | .n = { .min = 1, .max = 5 }, |
541 | .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX }, | 276 | .m = { .min = 79, .max = 127 }, |
542 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | 277 | .m1 = { .min = 12, .max = 22 }, |
543 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | 278 | .m2 = { .min = 5, .max = 9 }, |
544 | .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX }, | 279 | .p = { .min = 5, .max = 80 }, |
545 | .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX }, | 280 | .p1 = { .min = 1, .max = 8 }, |
546 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 281 | .p2 = { .dot_limit = 225000, |
547 | .p2_slow = IRONLAKE_DAC_P2_SLOW, | 282 | .p2_slow = 10, .p2_fast = 5 }, |
548 | .p2_fast = IRONLAKE_DAC_P2_FAST }, | ||
549 | .find_pll = intel_g4x_find_best_PLL, | 283 | .find_pll = intel_g4x_find_best_PLL, |
550 | }; | 284 | }; |
551 | 285 | ||
552 | static const intel_limit_t intel_limits_ironlake_single_lvds = { | 286 | static const intel_limit_t intel_limits_ironlake_single_lvds = { |
553 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | 287 | .dot = { .min = 25000, .max = 350000 }, |
554 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | 288 | .vco = { .min = 1760000, .max = 3510000 }, |
555 | .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX }, | 289 | .n = { .min = 1, .max = 3 }, |
556 | .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX }, | 290 | .m = { .min = 79, .max = 118 }, |
557 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | 291 | .m1 = { .min = 12, .max = 22 }, |
558 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | 292 | .m2 = { .min = 5, .max = 9 }, |
559 | .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX }, | 293 | .p = { .min = 28, .max = 112 }, |
560 | .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX }, | 294 | .p1 = { .min = 2, .max = 8 }, |
561 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 295 | .p2 = { .dot_limit = 225000, |
562 | .p2_slow = IRONLAKE_LVDS_S_P2_SLOW, | 296 | .p2_slow = 14, .p2_fast = 14 }, |
563 | .p2_fast = IRONLAKE_LVDS_S_P2_FAST }, | ||
564 | .find_pll = intel_g4x_find_best_PLL, | 297 | .find_pll = intel_g4x_find_best_PLL, |
565 | }; | 298 | }; |
566 | 299 | ||
567 | static const intel_limit_t intel_limits_ironlake_dual_lvds = { | 300 | static const intel_limit_t intel_limits_ironlake_dual_lvds = { |
568 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | 301 | .dot = { .min = 25000, .max = 350000 }, |
569 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | 302 | .vco = { .min = 1760000, .max = 3510000 }, |
570 | .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX }, | 303 | .n = { .min = 1, .max = 3 }, |
571 | .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX }, | 304 | .m = { .min = 79, .max = 127 }, |
572 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | 305 | .m1 = { .min = 12, .max = 22 }, |
573 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | 306 | .m2 = { .min = 5, .max = 9 }, |
574 | .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX }, | 307 | .p = { .min = 14, .max = 56 }, |
575 | .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX }, | 308 | .p1 = { .min = 2, .max = 8 }, |
576 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 309 | .p2 = { .dot_limit = 225000, |
577 | .p2_slow = IRONLAKE_LVDS_D_P2_SLOW, | 310 | .p2_slow = 7, .p2_fast = 7 }, |
578 | .p2_fast = IRONLAKE_LVDS_D_P2_FAST }, | ||
579 | .find_pll = intel_g4x_find_best_PLL, | 311 | .find_pll = intel_g4x_find_best_PLL, |
580 | }; | 312 | }; |
581 | 313 | ||
314 | /* LVDS 100mhz refclk limits. */ | ||
582 | static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { | 315 | static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { |
583 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | 316 | .dot = { .min = 25000, .max = 350000 }, |
584 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | 317 | .vco = { .min = 1760000, .max = 3510000 }, |
585 | .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX }, | 318 | .n = { .min = 1, .max = 2 }, |
586 | .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX }, | 319 | .m = { .min = 79, .max = 126 }, |
587 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | 320 | .m1 = { .min = 12, .max = 22 }, |
588 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | 321 | .m2 = { .min = 5, .max = 9 }, |
589 | .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX }, | 322 | .p = { .min = 28, .max = 112 }, |
590 | .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX }, | 323 | .p1 = { .min = 2,.max = 8 }, |
591 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 324 | .p2 = { .dot_limit = 225000, |
592 | .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW, | 325 | .p2_slow = 14, .p2_fast = 14 }, |
593 | .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST }, | ||
594 | .find_pll = intel_g4x_find_best_PLL, | 326 | .find_pll = intel_g4x_find_best_PLL, |
595 | }; | 327 | }; |
596 | 328 | ||
597 | static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { | 329 | static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { |
598 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | 330 | .dot = { .min = 25000, .max = 350000 }, |
599 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | 331 | .vco = { .min = 1760000, .max = 3510000 }, |
600 | .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX }, | 332 | .n = { .min = 1, .max = 3 }, |
601 | .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX }, | 333 | .m = { .min = 79, .max = 126 }, |
602 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | 334 | .m1 = { .min = 12, .max = 22 }, |
603 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | 335 | .m2 = { .min = 5, .max = 9 }, |
604 | .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX }, | 336 | .p = { .min = 14, .max = 42 }, |
605 | .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX }, | 337 | .p1 = { .min = 2,.max = 6 }, |
606 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 338 | .p2 = { .dot_limit = 225000, |
607 | .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW, | 339 | .p2_slow = 7, .p2_fast = 7 }, |
608 | .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST }, | ||
609 | .find_pll = intel_g4x_find_best_PLL, | 340 | .find_pll = intel_g4x_find_best_PLL, |
610 | }; | 341 | }; |
611 | 342 | ||
612 | static const intel_limit_t intel_limits_ironlake_display_port = { | 343 | static const intel_limit_t intel_limits_ironlake_display_port = { |
613 | .dot = { .min = IRONLAKE_DOT_MIN, | 344 | .dot = { .min = 25000, .max = 350000 }, |
614 | .max = IRONLAKE_DOT_MAX }, | 345 | .vco = { .min = 1760000, .max = 3510000}, |
615 | .vco = { .min = IRONLAKE_VCO_MIN, | 346 | .n = { .min = 1, .max = 2 }, |
616 | .max = IRONLAKE_VCO_MAX}, | 347 | .m = { .min = 81, .max = 90 }, |
617 | .n = { .min = IRONLAKE_DP_N_MIN, | 348 | .m1 = { .min = 12, .max = 22 }, |
618 | .max = IRONLAKE_DP_N_MAX }, | 349 | .m2 = { .min = 5, .max = 9 }, |
619 | .m = { .min = IRONLAKE_DP_M_MIN, | 350 | .p = { .min = 10, .max = 20 }, |
620 | .max = IRONLAKE_DP_M_MAX }, | 351 | .p1 = { .min = 1, .max = 2}, |
621 | .m1 = { .min = IRONLAKE_M1_MIN, | 352 | .p2 = { .dot_limit = 0, |
622 | .max = IRONLAKE_M1_MAX }, | 353 | .p2_slow = 10, .p2_fast = 10 }, |
623 | .m2 = { .min = IRONLAKE_M2_MIN, | ||
624 | .max = IRONLAKE_M2_MAX }, | ||
625 | .p = { .min = IRONLAKE_DP_P_MIN, | ||
626 | .max = IRONLAKE_DP_P_MAX }, | ||
627 | .p1 = { .min = IRONLAKE_DP_P1_MIN, | ||
628 | .max = IRONLAKE_DP_P1_MAX}, | ||
629 | .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT, | ||
630 | .p2_slow = IRONLAKE_DP_P2_SLOW, | ||
631 | .p2_fast = IRONLAKE_DP_P2_FAST }, | ||
632 | .find_pll = intel_find_pll_ironlake_dp, | 354 | .find_pll = intel_find_pll_ironlake_dp, |
633 | }; | 355 | }; |
634 | 356 | ||
635 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) | 357 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, |
358 | int refclk) | ||
636 | { | 359 | { |
637 | struct drm_device *dev = crtc->dev; | 360 | struct drm_device *dev = crtc->dev; |
638 | struct drm_i915_private *dev_priv = dev->dev_private; | 361 | struct drm_i915_private *dev_priv = dev->dev_private; |
639 | const intel_limit_t *limit; | 362 | const intel_limit_t *limit; |
640 | int refclk = 120; | ||
641 | 363 | ||
642 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 364 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
643 | if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100) | ||
644 | refclk = 100; | ||
645 | |||
646 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == | 365 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == |
647 | LVDS_CLKB_POWER_UP) { | 366 | LVDS_CLKB_POWER_UP) { |
648 | /* LVDS dual channel */ | 367 | /* LVDS dual channel */ |
649 | if (refclk == 100) | 368 | if (refclk == 100000) |
650 | limit = &intel_limits_ironlake_dual_lvds_100m; | 369 | limit = &intel_limits_ironlake_dual_lvds_100m; |
651 | else | 370 | else |
652 | limit = &intel_limits_ironlake_dual_lvds; | 371 | limit = &intel_limits_ironlake_dual_lvds; |
653 | } else { | 372 | } else { |
654 | if (refclk == 100) | 373 | if (refclk == 100000) |
655 | limit = &intel_limits_ironlake_single_lvds_100m; | 374 | limit = &intel_limits_ironlake_single_lvds_100m; |
656 | else | 375 | else |
657 | limit = &intel_limits_ironlake_single_lvds; | 376 | limit = &intel_limits_ironlake_single_lvds; |
@@ -692,25 +411,25 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) | |||
692 | return limit; | 411 | return limit; |
693 | } | 412 | } |
694 | 413 | ||
695 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc) | 414 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) |
696 | { | 415 | { |
697 | struct drm_device *dev = crtc->dev; | 416 | struct drm_device *dev = crtc->dev; |
698 | const intel_limit_t *limit; | 417 | const intel_limit_t *limit; |
699 | 418 | ||
700 | if (HAS_PCH_SPLIT(dev)) | 419 | if (HAS_PCH_SPLIT(dev)) |
701 | limit = intel_ironlake_limit(crtc); | 420 | limit = intel_ironlake_limit(crtc, refclk); |
702 | else if (IS_G4X(dev)) { | 421 | else if (IS_G4X(dev)) { |
703 | limit = intel_g4x_limit(crtc); | 422 | limit = intel_g4x_limit(crtc); |
704 | } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) { | ||
705 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | ||
706 | limit = &intel_limits_i9xx_lvds; | ||
707 | else | ||
708 | limit = &intel_limits_i9xx_sdvo; | ||
709 | } else if (IS_PINEVIEW(dev)) { | 423 | } else if (IS_PINEVIEW(dev)) { |
710 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 424 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
711 | limit = &intel_limits_pineview_lvds; | 425 | limit = &intel_limits_pineview_lvds; |
712 | else | 426 | else |
713 | limit = &intel_limits_pineview_sdvo; | 427 | limit = &intel_limits_pineview_sdvo; |
428 | } else if (!IS_GEN2(dev)) { | ||
429 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | ||
430 | limit = &intel_limits_i9xx_lvds; | ||
431 | else | ||
432 | limit = &intel_limits_i9xx_sdvo; | ||
714 | } else { | 433 | } else { |
715 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 434 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
716 | limit = &intel_limits_i8xx_lvds; | 435 | limit = &intel_limits_i8xx_lvds; |
@@ -744,20 +463,17 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock | |||
744 | /** | 463 | /** |
745 | * Returns whether any output on the specified pipe is of the specified type | 464 | * Returns whether any output on the specified pipe is of the specified type |
746 | */ | 465 | */ |
747 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type) | 466 | bool intel_pipe_has_type(struct drm_crtc *crtc, int type) |
748 | { | 467 | { |
749 | struct drm_device *dev = crtc->dev; | 468 | struct drm_device *dev = crtc->dev; |
750 | struct drm_mode_config *mode_config = &dev->mode_config; | 469 | struct drm_mode_config *mode_config = &dev->mode_config; |
751 | struct drm_encoder *l_entry; | 470 | struct intel_encoder *encoder; |
752 | 471 | ||
753 | list_for_each_entry(l_entry, &mode_config->encoder_list, head) { | 472 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) |
754 | if (l_entry && l_entry->crtc == crtc) { | 473 | if (encoder->base.crtc == crtc && encoder->type == type) |
755 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry); | 474 | return true; |
756 | if (intel_encoder->type == type) | 475 | |
757 | return true; | 476 | return false; |
758 | } | ||
759 | } | ||
760 | return false; | ||
761 | } | 477 | } |
762 | 478 | ||
763 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) | 479 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) |
@@ -766,11 +482,10 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) | |||
766 | * the given connectors. | 482 | * the given connectors. |
767 | */ | 483 | */ |
768 | 484 | ||
769 | static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) | 485 | static bool intel_PLL_is_valid(struct drm_device *dev, |
486 | const intel_limit_t *limit, | ||
487 | const intel_clock_t *clock) | ||
770 | { | 488 | { |
771 | const intel_limit_t *limit = intel_limit (crtc); | ||
772 | struct drm_device *dev = crtc->dev; | ||
773 | |||
774 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) | 489 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) |
775 | INTELPllInvalid ("p1 out of range\n"); | 490 | INTELPllInvalid ("p1 out of range\n"); |
776 | if (clock->p < limit->p.min || limit->p.max < clock->p) | 491 | if (clock->p < limit->p.min || limit->p.max < clock->p) |
@@ -842,8 +557,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
842 | int this_err; | 557 | int this_err; |
843 | 558 | ||
844 | intel_clock(dev, refclk, &clock); | 559 | intel_clock(dev, refclk, &clock); |
845 | 560 | if (!intel_PLL_is_valid(dev, limit, | |
846 | if (!intel_PLL_is_valid(crtc, &clock)) | 561 | &clock)) |
847 | continue; | 562 | continue; |
848 | 563 | ||
849 | this_err = abs(clock.dot - target); | 564 | this_err = abs(clock.dot - target); |
@@ -905,9 +620,11 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
905 | int this_err; | 620 | int this_err; |
906 | 621 | ||
907 | intel_clock(dev, refclk, &clock); | 622 | intel_clock(dev, refclk, &clock); |
908 | if (!intel_PLL_is_valid(crtc, &clock)) | 623 | if (!intel_PLL_is_valid(dev, limit, |
624 | &clock)) | ||
909 | continue; | 625 | continue; |
910 | this_err = abs(clock.dot - target) ; | 626 | |
627 | this_err = abs(clock.dot - target); | ||
911 | if (this_err < err_most) { | 628 | if (this_err < err_most) { |
912 | *best_clock = clock; | 629 | *best_clock = clock; |
913 | err_most = this_err; | 630 | err_most = this_err; |
@@ -928,10 +645,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
928 | struct drm_device *dev = crtc->dev; | 645 | struct drm_device *dev = crtc->dev; |
929 | intel_clock_t clock; | 646 | intel_clock_t clock; |
930 | 647 | ||
931 | /* return directly when it is eDP */ | ||
932 | if (HAS_eDP) | ||
933 | return true; | ||
934 | |||
935 | if (target < 200000) { | 648 | if (target < 200000) { |
936 | clock.n = 1; | 649 | clock.n = 1; |
937 | clock.p1 = 2; | 650 | clock.p1 = 2; |
@@ -955,26 +668,26 @@ static bool | |||
955 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | 668 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, |
956 | int target, int refclk, intel_clock_t *best_clock) | 669 | int target, int refclk, intel_clock_t *best_clock) |
957 | { | 670 | { |
958 | intel_clock_t clock; | 671 | intel_clock_t clock; |
959 | if (target < 200000) { | 672 | if (target < 200000) { |
960 | clock.p1 = 2; | 673 | clock.p1 = 2; |
961 | clock.p2 = 10; | 674 | clock.p2 = 10; |
962 | clock.n = 2; | 675 | clock.n = 2; |
963 | clock.m1 = 23; | 676 | clock.m1 = 23; |
964 | clock.m2 = 8; | 677 | clock.m2 = 8; |
965 | } else { | 678 | } else { |
966 | clock.p1 = 1; | 679 | clock.p1 = 1; |
967 | clock.p2 = 10; | 680 | clock.p2 = 10; |
968 | clock.n = 1; | 681 | clock.n = 1; |
969 | clock.m1 = 14; | 682 | clock.m1 = 14; |
970 | clock.m2 = 2; | 683 | clock.m2 = 2; |
971 | } | 684 | } |
972 | clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); | 685 | clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); |
973 | clock.p = (clock.p1 * clock.p2); | 686 | clock.p = (clock.p1 * clock.p2); |
974 | clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; | 687 | clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; |
975 | clock.vco = 0; | 688 | clock.vco = 0; |
976 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); | 689 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); |
977 | return true; | 690 | return true; |
978 | } | 691 | } |
979 | 692 | ||
980 | /** | 693 | /** |
@@ -988,7 +701,7 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
988 | void intel_wait_for_vblank(struct drm_device *dev, int pipe) | 701 | void intel_wait_for_vblank(struct drm_device *dev, int pipe) |
989 | { | 702 | { |
990 | struct drm_i915_private *dev_priv = dev->dev_private; | 703 | struct drm_i915_private *dev_priv = dev->dev_private; |
991 | int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT); | 704 | int pipestat_reg = PIPESTAT(pipe); |
992 | 705 | ||
993 | /* Clear existing vblank status. Note this will clear any other | 706 | /* Clear existing vblank status. Note this will clear any other |
994 | * sticky status fields as well. | 707 | * sticky status fields as well. |
@@ -1007,9 +720,9 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |||
1007 | I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); | 720 | I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); |
1008 | 721 | ||
1009 | /* Wait for vblank interrupt bit to set */ | 722 | /* Wait for vblank interrupt bit to set */ |
1010 | if (wait_for((I915_READ(pipestat_reg) & | 723 | if (wait_for(I915_READ(pipestat_reg) & |
1011 | PIPE_VBLANK_INTERRUPT_STATUS), | 724 | PIPE_VBLANK_INTERRUPT_STATUS, |
1012 | 50, 0)) | 725 | 50)) |
1013 | DRM_DEBUG_KMS("vblank wait timed out\n"); | 726 | DRM_DEBUG_KMS("vblank wait timed out\n"); |
1014 | } | 727 | } |
1015 | 728 | ||
@@ -1028,47 +741,664 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |||
1028 | * Otherwise: | 741 | * Otherwise: |
1029 | * wait for the display line value to settle (it usually | 742 | * wait for the display line value to settle (it usually |
1030 | * ends up stopping at the start of the next frame). | 743 | * ends up stopping at the start of the next frame). |
1031 | * | 744 | * |
1032 | */ | 745 | */ |
1033 | static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) | 746 | void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) |
1034 | { | 747 | { |
1035 | struct drm_i915_private *dev_priv = dev->dev_private; | 748 | struct drm_i915_private *dev_priv = dev->dev_private; |
1036 | 749 | ||
1037 | if (INTEL_INFO(dev)->gen >= 4) { | 750 | if (INTEL_INFO(dev)->gen >= 4) { |
1038 | int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF); | 751 | int reg = PIPECONF(pipe); |
1039 | 752 | ||
1040 | /* Wait for the Pipe State to go off */ | 753 | /* Wait for the Pipe State to go off */ |
1041 | if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, | 754 | if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, |
1042 | 100, 0)) | 755 | 100)) |
1043 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); | 756 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); |
1044 | } else { | 757 | } else { |
1045 | u32 last_line; | 758 | u32 last_line; |
1046 | int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); | 759 | int reg = PIPEDSL(pipe); |
1047 | unsigned long timeout = jiffies + msecs_to_jiffies(100); | 760 | unsigned long timeout = jiffies + msecs_to_jiffies(100); |
1048 | 761 | ||
1049 | /* Wait for the display line to settle */ | 762 | /* Wait for the display line to settle */ |
1050 | do { | 763 | do { |
1051 | last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; | 764 | last_line = I915_READ(reg) & DSL_LINEMASK; |
1052 | mdelay(5); | 765 | mdelay(5); |
1053 | } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && | 766 | } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) && |
1054 | time_after(timeout, jiffies)); | 767 | time_after(timeout, jiffies)); |
1055 | if (time_after(jiffies, timeout)) | 768 | if (time_after(jiffies, timeout)) |
1056 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); | 769 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); |
1057 | } | 770 | } |
1058 | } | 771 | } |
1059 | 772 | ||
1060 | /* Parameters have changed, update FBC info */ | 773 | static const char *state_string(bool enabled) |
774 | { | ||
775 | return enabled ? "on" : "off"; | ||
776 | } | ||
777 | |||
778 | /* Only for pre-ILK configs */ | ||
779 | static void assert_pll(struct drm_i915_private *dev_priv, | ||
780 | enum pipe pipe, bool state) | ||
781 | { | ||
782 | int reg; | ||
783 | u32 val; | ||
784 | bool cur_state; | ||
785 | |||
786 | reg = DPLL(pipe); | ||
787 | val = I915_READ(reg); | ||
788 | cur_state = !!(val & DPLL_VCO_ENABLE); | ||
789 | WARN(cur_state != state, | ||
790 | "PLL state assertion failure (expected %s, current %s)\n", | ||
791 | state_string(state), state_string(cur_state)); | ||
792 | } | ||
793 | #define assert_pll_enabled(d, p) assert_pll(d, p, true) | ||
794 | #define assert_pll_disabled(d, p) assert_pll(d, p, false) | ||
795 | |||
796 | /* For ILK+ */ | ||
797 | static void assert_pch_pll(struct drm_i915_private *dev_priv, | ||
798 | enum pipe pipe, bool state) | ||
799 | { | ||
800 | int reg; | ||
801 | u32 val; | ||
802 | bool cur_state; | ||
803 | |||
804 | reg = PCH_DPLL(pipe); | ||
805 | val = I915_READ(reg); | ||
806 | cur_state = !!(val & DPLL_VCO_ENABLE); | ||
807 | WARN(cur_state != state, | ||
808 | "PCH PLL state assertion failure (expected %s, current %s)\n", | ||
809 | state_string(state), state_string(cur_state)); | ||
810 | } | ||
811 | #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true) | ||
812 | #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false) | ||
813 | |||
814 | static void assert_fdi_tx(struct drm_i915_private *dev_priv, | ||
815 | enum pipe pipe, bool state) | ||
816 | { | ||
817 | int reg; | ||
818 | u32 val; | ||
819 | bool cur_state; | ||
820 | |||
821 | reg = FDI_TX_CTL(pipe); | ||
822 | val = I915_READ(reg); | ||
823 | cur_state = !!(val & FDI_TX_ENABLE); | ||
824 | WARN(cur_state != state, | ||
825 | "FDI TX state assertion failure (expected %s, current %s)\n", | ||
826 | state_string(state), state_string(cur_state)); | ||
827 | } | ||
828 | #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) | ||
829 | #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) | ||
830 | |||
831 | static void assert_fdi_rx(struct drm_i915_private *dev_priv, | ||
832 | enum pipe pipe, bool state) | ||
833 | { | ||
834 | int reg; | ||
835 | u32 val; | ||
836 | bool cur_state; | ||
837 | |||
838 | reg = FDI_RX_CTL(pipe); | ||
839 | val = I915_READ(reg); | ||
840 | cur_state = !!(val & FDI_RX_ENABLE); | ||
841 | WARN(cur_state != state, | ||
842 | "FDI RX state assertion failure (expected %s, current %s)\n", | ||
843 | state_string(state), state_string(cur_state)); | ||
844 | } | ||
845 | #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) | ||
846 | #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) | ||
847 | |||
848 | static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, | ||
849 | enum pipe pipe) | ||
850 | { | ||
851 | int reg; | ||
852 | u32 val; | ||
853 | |||
854 | /* ILK FDI PLL is always enabled */ | ||
855 | if (dev_priv->info->gen == 5) | ||
856 | return; | ||
857 | |||
858 | reg = FDI_TX_CTL(pipe); | ||
859 | val = I915_READ(reg); | ||
860 | WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); | ||
861 | } | ||
862 | |||
863 | static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, | ||
864 | enum pipe pipe) | ||
865 | { | ||
866 | int reg; | ||
867 | u32 val; | ||
868 | |||
869 | reg = FDI_RX_CTL(pipe); | ||
870 | val = I915_READ(reg); | ||
871 | WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); | ||
872 | } | ||
873 | |||
874 | static void assert_panel_unlocked(struct drm_i915_private *dev_priv, | ||
875 | enum pipe pipe) | ||
876 | { | ||
877 | int pp_reg, lvds_reg; | ||
878 | u32 val; | ||
879 | enum pipe panel_pipe = PIPE_A; | ||
880 | bool locked = locked; | ||
881 | |||
882 | if (HAS_PCH_SPLIT(dev_priv->dev)) { | ||
883 | pp_reg = PCH_PP_CONTROL; | ||
884 | lvds_reg = PCH_LVDS; | ||
885 | } else { | ||
886 | pp_reg = PP_CONTROL; | ||
887 | lvds_reg = LVDS; | ||
888 | } | ||
889 | |||
890 | val = I915_READ(pp_reg); | ||
891 | if (!(val & PANEL_POWER_ON) || | ||
892 | ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) | ||
893 | locked = false; | ||
894 | |||
895 | if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) | ||
896 | panel_pipe = PIPE_B; | ||
897 | |||
898 | WARN(panel_pipe == pipe && locked, | ||
899 | "panel assertion failure, pipe %c regs locked\n", | ||
900 | pipe_name(pipe)); | ||
901 | } | ||
902 | |||
903 | static void assert_pipe(struct drm_i915_private *dev_priv, | ||
904 | enum pipe pipe, bool state) | ||
905 | { | ||
906 | int reg; | ||
907 | u32 val; | ||
908 | bool cur_state; | ||
909 | |||
910 | reg = PIPECONF(pipe); | ||
911 | val = I915_READ(reg); | ||
912 | cur_state = !!(val & PIPECONF_ENABLE); | ||
913 | WARN(cur_state != state, | ||
914 | "pipe %c assertion failure (expected %s, current %s)\n", | ||
915 | pipe_name(pipe), state_string(state), state_string(cur_state)); | ||
916 | } | ||
917 | #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) | ||
918 | #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) | ||
919 | |||
920 | static void assert_plane_enabled(struct drm_i915_private *dev_priv, | ||
921 | enum plane plane) | ||
922 | { | ||
923 | int reg; | ||
924 | u32 val; | ||
925 | |||
926 | reg = DSPCNTR(plane); | ||
927 | val = I915_READ(reg); | ||
928 | WARN(!(val & DISPLAY_PLANE_ENABLE), | ||
929 | "plane %c assertion failure, should be active but is disabled\n", | ||
930 | plane_name(plane)); | ||
931 | } | ||
932 | |||
933 | static void assert_planes_disabled(struct drm_i915_private *dev_priv, | ||
934 | enum pipe pipe) | ||
935 | { | ||
936 | int reg, i; | ||
937 | u32 val; | ||
938 | int cur_pipe; | ||
939 | |||
940 | /* Planes are fixed to pipes on ILK+ */ | ||
941 | if (HAS_PCH_SPLIT(dev_priv->dev)) | ||
942 | return; | ||
943 | |||
944 | /* Need to check both planes against the pipe */ | ||
945 | for (i = 0; i < 2; i++) { | ||
946 | reg = DSPCNTR(i); | ||
947 | val = I915_READ(reg); | ||
948 | cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> | ||
949 | DISPPLANE_SEL_PIPE_SHIFT; | ||
950 | WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, | ||
951 | "plane %c assertion failure, should be off on pipe %c but is still active\n", | ||
952 | plane_name(i), pipe_name(pipe)); | ||
953 | } | ||
954 | } | ||
955 | |||
956 | static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) | ||
957 | { | ||
958 | u32 val; | ||
959 | bool enabled; | ||
960 | |||
961 | val = I915_READ(PCH_DREF_CONTROL); | ||
962 | enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | | ||
963 | DREF_SUPERSPREAD_SOURCE_MASK)); | ||
964 | WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); | ||
965 | } | ||
966 | |||
967 | static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, | ||
968 | enum pipe pipe) | ||
969 | { | ||
970 | int reg; | ||
971 | u32 val; | ||
972 | bool enabled; | ||
973 | |||
974 | reg = TRANSCONF(pipe); | ||
975 | val = I915_READ(reg); | ||
976 | enabled = !!(val & TRANS_ENABLE); | ||
977 | WARN(enabled, | ||
978 | "transcoder assertion failed, should be off on pipe %c but is still active\n", | ||
979 | pipe_name(pipe)); | ||
980 | } | ||
981 | |||
982 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, | ||
983 | enum pipe pipe, int reg) | ||
984 | { | ||
985 | u32 val = I915_READ(reg); | ||
986 | WARN(DP_PIPE_ENABLED(val, pipe), | ||
987 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", | ||
988 | reg, pipe_name(pipe)); | ||
989 | } | ||
990 | |||
991 | static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, | ||
992 | enum pipe pipe, int reg) | ||
993 | { | ||
994 | u32 val = I915_READ(reg); | ||
995 | WARN(HDMI_PIPE_ENABLED(val, pipe), | ||
996 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", | ||
997 | reg, pipe_name(pipe)); | ||
998 | } | ||
999 | |||
1000 | static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | ||
1001 | enum pipe pipe) | ||
1002 | { | ||
1003 | int reg; | ||
1004 | u32 val; | ||
1005 | |||
1006 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B); | ||
1007 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C); | ||
1008 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D); | ||
1009 | |||
1010 | reg = PCH_ADPA; | ||
1011 | val = I915_READ(reg); | ||
1012 | WARN(ADPA_PIPE_ENABLED(val, pipe), | ||
1013 | "PCH VGA enabled on transcoder %c, should be disabled\n", | ||
1014 | pipe_name(pipe)); | ||
1015 | |||
1016 | reg = PCH_LVDS; | ||
1017 | val = I915_READ(reg); | ||
1018 | WARN(LVDS_PIPE_ENABLED(val, pipe), | ||
1019 | "PCH LVDS enabled on transcoder %c, should be disabled\n", | ||
1020 | pipe_name(pipe)); | ||
1021 | |||
1022 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB); | ||
1023 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC); | ||
1024 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMID); | ||
1025 | } | ||
1026 | |||
1027 | /** | ||
1028 | * intel_enable_pll - enable a PLL | ||
1029 | * @dev_priv: i915 private structure | ||
1030 | * @pipe: pipe PLL to enable | ||
1031 | * | ||
1032 | * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to | ||
1033 | * make sure the PLL reg is writable first though, since the panel write | ||
1034 | * protect mechanism may be enabled. | ||
1035 | * | ||
1036 | * Note! This is for pre-ILK only. | ||
1037 | */ | ||
1038 | static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | ||
1039 | { | ||
1040 | int reg; | ||
1041 | u32 val; | ||
1042 | |||
1043 | /* No really, not for ILK+ */ | ||
1044 | BUG_ON(dev_priv->info->gen >= 5); | ||
1045 | |||
1046 | /* PLL is protected by panel, make sure we can write it */ | ||
1047 | if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) | ||
1048 | assert_panel_unlocked(dev_priv, pipe); | ||
1049 | |||
1050 | reg = DPLL(pipe); | ||
1051 | val = I915_READ(reg); | ||
1052 | val |= DPLL_VCO_ENABLE; | ||
1053 | |||
1054 | /* We do this three times for luck */ | ||
1055 | I915_WRITE(reg, val); | ||
1056 | POSTING_READ(reg); | ||
1057 | udelay(150); /* wait for warmup */ | ||
1058 | I915_WRITE(reg, val); | ||
1059 | POSTING_READ(reg); | ||
1060 | udelay(150); /* wait for warmup */ | ||
1061 | I915_WRITE(reg, val); | ||
1062 | POSTING_READ(reg); | ||
1063 | udelay(150); /* wait for warmup */ | ||
1064 | } | ||
1065 | |||
1066 | /** | ||
1067 | * intel_disable_pll - disable a PLL | ||
1068 | * @dev_priv: i915 private structure | ||
1069 | * @pipe: pipe PLL to disable | ||
1070 | * | ||
1071 | * Disable the PLL for @pipe, making sure the pipe is off first. | ||
1072 | * | ||
1073 | * Note! This is for pre-ILK only. | ||
1074 | */ | ||
1075 | static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | ||
1076 | { | ||
1077 | int reg; | ||
1078 | u32 val; | ||
1079 | |||
1080 | /* Don't disable pipe A or pipe A PLLs if needed */ | ||
1081 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | ||
1082 | return; | ||
1083 | |||
1084 | /* Make sure the pipe isn't still relying on us */ | ||
1085 | assert_pipe_disabled(dev_priv, pipe); | ||
1086 | |||
1087 | reg = DPLL(pipe); | ||
1088 | val = I915_READ(reg); | ||
1089 | val &= ~DPLL_VCO_ENABLE; | ||
1090 | I915_WRITE(reg, val); | ||
1091 | POSTING_READ(reg); | ||
1092 | } | ||
1093 | |||
1094 | /** | ||
1095 | * intel_enable_pch_pll - enable PCH PLL | ||
1096 | * @dev_priv: i915 private structure | ||
1097 | * @pipe: pipe PLL to enable | ||
1098 | * | ||
1099 | * The PCH PLL needs to be enabled before the PCH transcoder, since it | ||
1100 | * drives the transcoder clock. | ||
1101 | */ | ||
1102 | static void intel_enable_pch_pll(struct drm_i915_private *dev_priv, | ||
1103 | enum pipe pipe) | ||
1104 | { | ||
1105 | int reg; | ||
1106 | u32 val; | ||
1107 | |||
1108 | /* PCH only available on ILK+ */ | ||
1109 | BUG_ON(dev_priv->info->gen < 5); | ||
1110 | |||
1111 | /* PCH refclock must be enabled first */ | ||
1112 | assert_pch_refclk_enabled(dev_priv); | ||
1113 | |||
1114 | reg = PCH_DPLL(pipe); | ||
1115 | val = I915_READ(reg); | ||
1116 | val |= DPLL_VCO_ENABLE; | ||
1117 | I915_WRITE(reg, val); | ||
1118 | POSTING_READ(reg); | ||
1119 | udelay(200); | ||
1120 | } | ||
1121 | |||
1122 | static void intel_disable_pch_pll(struct drm_i915_private *dev_priv, | ||
1123 | enum pipe pipe) | ||
1124 | { | ||
1125 | int reg; | ||
1126 | u32 val; | ||
1127 | |||
1128 | /* PCH only available on ILK+ */ | ||
1129 | BUG_ON(dev_priv->info->gen < 5); | ||
1130 | |||
1131 | /* Make sure transcoder isn't still depending on us */ | ||
1132 | assert_transcoder_disabled(dev_priv, pipe); | ||
1133 | |||
1134 | reg = PCH_DPLL(pipe); | ||
1135 | val = I915_READ(reg); | ||
1136 | val &= ~DPLL_VCO_ENABLE; | ||
1137 | I915_WRITE(reg, val); | ||
1138 | POSTING_READ(reg); | ||
1139 | udelay(200); | ||
1140 | } | ||
1141 | |||
1142 | static void intel_enable_transcoder(struct drm_i915_private *dev_priv, | ||
1143 | enum pipe pipe) | ||
1144 | { | ||
1145 | int reg; | ||
1146 | u32 val; | ||
1147 | |||
1148 | /* PCH only available on ILK+ */ | ||
1149 | BUG_ON(dev_priv->info->gen < 5); | ||
1150 | |||
1151 | /* Make sure PCH DPLL is enabled */ | ||
1152 | assert_pch_pll_enabled(dev_priv, pipe); | ||
1153 | |||
1154 | /* FDI must be feeding us bits for PCH ports */ | ||
1155 | assert_fdi_tx_enabled(dev_priv, pipe); | ||
1156 | assert_fdi_rx_enabled(dev_priv, pipe); | ||
1157 | |||
1158 | reg = TRANSCONF(pipe); | ||
1159 | val = I915_READ(reg); | ||
1160 | /* | ||
1161 | * make the BPC in transcoder be consistent with | ||
1162 | * that in pipeconf reg. | ||
1163 | */ | ||
1164 | val &= ~PIPE_BPC_MASK; | ||
1165 | val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; | ||
1166 | I915_WRITE(reg, val | TRANS_ENABLE); | ||
1167 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) | ||
1168 | DRM_ERROR("failed to enable transcoder %d\n", pipe); | ||
1169 | } | ||
1170 | |||
1171 | static void intel_disable_transcoder(struct drm_i915_private *dev_priv, | ||
1172 | enum pipe pipe) | ||
1173 | { | ||
1174 | int reg; | ||
1175 | u32 val; | ||
1176 | |||
1177 | /* FDI relies on the transcoder */ | ||
1178 | assert_fdi_tx_disabled(dev_priv, pipe); | ||
1179 | assert_fdi_rx_disabled(dev_priv, pipe); | ||
1180 | |||
1181 | /* Ports must be off as well */ | ||
1182 | assert_pch_ports_disabled(dev_priv, pipe); | ||
1183 | |||
1184 | reg = TRANSCONF(pipe); | ||
1185 | val = I915_READ(reg); | ||
1186 | val &= ~TRANS_ENABLE; | ||
1187 | I915_WRITE(reg, val); | ||
1188 | /* wait for PCH transcoder off, transcoder state */ | ||
1189 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) | ||
1190 | DRM_ERROR("failed to disable transcoder\n"); | ||
1191 | } | ||
1192 | |||
1193 | /** | ||
1194 | * intel_enable_pipe - enable a pipe, asserting requirements | ||
1195 | * @dev_priv: i915 private structure | ||
1196 | * @pipe: pipe to enable | ||
1197 | * @pch_port: on ILK+, is this pipe driving a PCH port or not | ||
1198 | * | ||
1199 | * Enable @pipe, making sure that various hardware specific requirements | ||
1200 | * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. | ||
1201 | * | ||
1202 | * @pipe should be %PIPE_A or %PIPE_B. | ||
1203 | * | ||
1204 | * Will wait until the pipe is actually running (i.e. first vblank) before | ||
1205 | * returning. | ||
1206 | */ | ||
1207 | static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, | ||
1208 | bool pch_port) | ||
1209 | { | ||
1210 | int reg; | ||
1211 | u32 val; | ||
1212 | |||
1213 | /* | ||
1214 | * A pipe without a PLL won't actually be able to drive bits from | ||
1215 | * a plane. On ILK+ the pipe PLLs are integrated, so we don't | ||
1216 | * need the check. | ||
1217 | */ | ||
1218 | if (!HAS_PCH_SPLIT(dev_priv->dev)) | ||
1219 | assert_pll_enabled(dev_priv, pipe); | ||
1220 | else { | ||
1221 | if (pch_port) { | ||
1222 | /* if driving the PCH, we need FDI enabled */ | ||
1223 | assert_fdi_rx_pll_enabled(dev_priv, pipe); | ||
1224 | assert_fdi_tx_pll_enabled(dev_priv, pipe); | ||
1225 | } | ||
1226 | /* FIXME: assert CPU port conditions for SNB+ */ | ||
1227 | } | ||
1228 | |||
1229 | reg = PIPECONF(pipe); | ||
1230 | val = I915_READ(reg); | ||
1231 | if (val & PIPECONF_ENABLE) | ||
1232 | return; | ||
1233 | |||
1234 | I915_WRITE(reg, val | PIPECONF_ENABLE); | ||
1235 | intel_wait_for_vblank(dev_priv->dev, pipe); | ||
1236 | } | ||
1237 | |||
1238 | /** | ||
1239 | * intel_disable_pipe - disable a pipe, asserting requirements | ||
1240 | * @dev_priv: i915 private structure | ||
1241 | * @pipe: pipe to disable | ||
1242 | * | ||
1243 | * Disable @pipe, making sure that various hardware specific requirements | ||
1244 | * are met, if applicable, e.g. plane disabled, panel fitter off, etc. | ||
1245 | * | ||
1246 | * @pipe should be %PIPE_A or %PIPE_B. | ||
1247 | * | ||
1248 | * Will wait until the pipe has shut down before returning. | ||
1249 | */ | ||
1250 | static void intel_disable_pipe(struct drm_i915_private *dev_priv, | ||
1251 | enum pipe pipe) | ||
1252 | { | ||
1253 | int reg; | ||
1254 | u32 val; | ||
1255 | |||
1256 | /* | ||
1257 | * Make sure planes won't keep trying to pump pixels to us, | ||
1258 | * or we might hang the display. | ||
1259 | */ | ||
1260 | assert_planes_disabled(dev_priv, pipe); | ||
1261 | |||
1262 | /* Don't disable pipe A or pipe A PLLs if needed */ | ||
1263 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | ||
1264 | return; | ||
1265 | |||
1266 | reg = PIPECONF(pipe); | ||
1267 | val = I915_READ(reg); | ||
1268 | if ((val & PIPECONF_ENABLE) == 0) | ||
1269 | return; | ||
1270 | |||
1271 | I915_WRITE(reg, val & ~PIPECONF_ENABLE); | ||
1272 | intel_wait_for_pipe_off(dev_priv->dev, pipe); | ||
1273 | } | ||
1274 | |||
1275 | /** | ||
1276 | * intel_enable_plane - enable a display plane on a given pipe | ||
1277 | * @dev_priv: i915 private structure | ||
1278 | * @plane: plane to enable | ||
1279 | * @pipe: pipe being fed | ||
1280 | * | ||
1281 | * Enable @plane on @pipe, making sure that @pipe is running first. | ||
1282 | */ | ||
1283 | static void intel_enable_plane(struct drm_i915_private *dev_priv, | ||
1284 | enum plane plane, enum pipe pipe) | ||
1285 | { | ||
1286 | int reg; | ||
1287 | u32 val; | ||
1288 | |||
1289 | /* If the pipe isn't enabled, we can't pump pixels and may hang */ | ||
1290 | assert_pipe_enabled(dev_priv, pipe); | ||
1291 | |||
1292 | reg = DSPCNTR(plane); | ||
1293 | val = I915_READ(reg); | ||
1294 | if (val & DISPLAY_PLANE_ENABLE) | ||
1295 | return; | ||
1296 | |||
1297 | I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); | ||
1298 | intel_wait_for_vblank(dev_priv->dev, pipe); | ||
1299 | } | ||
1300 | |||
1301 | /* | ||
1302 | * Plane regs are double buffered, going from enabled->disabled needs a | ||
1303 | * trigger in order to latch. The display address reg provides this. | ||
1304 | */ | ||
1305 | static void intel_flush_display_plane(struct drm_i915_private *dev_priv, | ||
1306 | enum plane plane) | ||
1307 | { | ||
1308 | u32 reg = DSPADDR(plane); | ||
1309 | I915_WRITE(reg, I915_READ(reg)); | ||
1310 | } | ||
1311 | |||
1312 | /** | ||
1313 | * intel_disable_plane - disable a display plane | ||
1314 | * @dev_priv: i915 private structure | ||
1315 | * @plane: plane to disable | ||
1316 | * @pipe: pipe consuming the data | ||
1317 | * | ||
1318 | * Disable @plane; should be an independent operation. | ||
1319 | */ | ||
1320 | static void intel_disable_plane(struct drm_i915_private *dev_priv, | ||
1321 | enum plane plane, enum pipe pipe) | ||
1322 | { | ||
1323 | int reg; | ||
1324 | u32 val; | ||
1325 | |||
1326 | reg = DSPCNTR(plane); | ||
1327 | val = I915_READ(reg); | ||
1328 | if ((val & DISPLAY_PLANE_ENABLE) == 0) | ||
1329 | return; | ||
1330 | |||
1331 | I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); | ||
1332 | intel_flush_display_plane(dev_priv, plane); | ||
1333 | intel_wait_for_vblank(dev_priv->dev, pipe); | ||
1334 | } | ||
1335 | |||
1336 | static void disable_pch_dp(struct drm_i915_private *dev_priv, | ||
1337 | enum pipe pipe, int reg) | ||
1338 | { | ||
1339 | u32 val = I915_READ(reg); | ||
1340 | if (DP_PIPE_ENABLED(val, pipe)) | ||
1341 | I915_WRITE(reg, val & ~DP_PORT_EN); | ||
1342 | } | ||
1343 | |||
1344 | static void disable_pch_hdmi(struct drm_i915_private *dev_priv, | ||
1345 | enum pipe pipe, int reg) | ||
1346 | { | ||
1347 | u32 val = I915_READ(reg); | ||
1348 | if (HDMI_PIPE_ENABLED(val, pipe)) | ||
1349 | I915_WRITE(reg, val & ~PORT_ENABLE); | ||
1350 | } | ||
1351 | |||
1352 | /* Disable any ports connected to this transcoder */ | ||
1353 | static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, | ||
1354 | enum pipe pipe) | ||
1355 | { | ||
1356 | u32 reg, val; | ||
1357 | |||
1358 | val = I915_READ(PCH_PP_CONTROL); | ||
1359 | I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); | ||
1360 | |||
1361 | disable_pch_dp(dev_priv, pipe, PCH_DP_B); | ||
1362 | disable_pch_dp(dev_priv, pipe, PCH_DP_C); | ||
1363 | disable_pch_dp(dev_priv, pipe, PCH_DP_D); | ||
1364 | |||
1365 | reg = PCH_ADPA; | ||
1366 | val = I915_READ(reg); | ||
1367 | if (ADPA_PIPE_ENABLED(val, pipe)) | ||
1368 | I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); | ||
1369 | |||
1370 | reg = PCH_LVDS; | ||
1371 | val = I915_READ(reg); | ||
1372 | if (LVDS_PIPE_ENABLED(val, pipe)) { | ||
1373 | I915_WRITE(reg, val & ~LVDS_PORT_EN); | ||
1374 | POSTING_READ(reg); | ||
1375 | udelay(100); | ||
1376 | } | ||
1377 | |||
1378 | disable_pch_hdmi(dev_priv, pipe, HDMIB); | ||
1379 | disable_pch_hdmi(dev_priv, pipe, HDMIC); | ||
1380 | disable_pch_hdmi(dev_priv, pipe, HDMID); | ||
1381 | } | ||
1382 | |||
1061 | static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 1383 | static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1062 | { | 1384 | { |
1063 | struct drm_device *dev = crtc->dev; | 1385 | struct drm_device *dev = crtc->dev; |
1064 | struct drm_i915_private *dev_priv = dev->dev_private; | 1386 | struct drm_i915_private *dev_priv = dev->dev_private; |
1065 | struct drm_framebuffer *fb = crtc->fb; | 1387 | struct drm_framebuffer *fb = crtc->fb; |
1066 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1388 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1067 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | 1389 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1068 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1390 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1069 | int plane, i; | 1391 | int plane, i; |
1070 | u32 fbc_ctl, fbc_ctl2; | 1392 | u32 fbc_ctl, fbc_ctl2; |
1071 | 1393 | ||
1394 | if (fb->pitch == dev_priv->cfb_pitch && | ||
1395 | obj->fence_reg == dev_priv->cfb_fence && | ||
1396 | intel_crtc->plane == dev_priv->cfb_plane && | ||
1397 | I915_READ(FBC_CONTROL) & FBC_CTL_EN) | ||
1398 | return; | ||
1399 | |||
1400 | i8xx_disable_fbc(dev); | ||
1401 | |||
1072 | dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; | 1402 | dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; |
1073 | 1403 | ||
1074 | if (fb->pitch < dev_priv->cfb_pitch) | 1404 | if (fb->pitch < dev_priv->cfb_pitch) |
@@ -1076,7 +1406,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1076 | 1406 | ||
1077 | /* FBC_CTL wants 64B units */ | 1407 | /* FBC_CTL wants 64B units */ |
1078 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | 1408 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; |
1079 | dev_priv->cfb_fence = obj_priv->fence_reg; | 1409 | dev_priv->cfb_fence = obj->fence_reg; |
1080 | dev_priv->cfb_plane = intel_crtc->plane; | 1410 | dev_priv->cfb_plane = intel_crtc->plane; |
1081 | plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; | 1411 | plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; |
1082 | 1412 | ||
@@ -1086,7 +1416,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1086 | 1416 | ||
1087 | /* Set it up... */ | 1417 | /* Set it up... */ |
1088 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; | 1418 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; |
1089 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1419 | if (obj->tiling_mode != I915_TILING_NONE) |
1090 | fbc_ctl2 |= FBC_CTL_CPU_FENCE; | 1420 | fbc_ctl2 |= FBC_CTL_CPU_FENCE; |
1091 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); | 1421 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); |
1092 | I915_WRITE(FBC_FENCE_OFF, crtc->y); | 1422 | I915_WRITE(FBC_FENCE_OFF, crtc->y); |
@@ -1097,12 +1427,12 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1097 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ | 1427 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ |
1098 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | 1428 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; |
1099 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; | 1429 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; |
1100 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1430 | if (obj->tiling_mode != I915_TILING_NONE) |
1101 | fbc_ctl |= dev_priv->cfb_fence; | 1431 | fbc_ctl |= dev_priv->cfb_fence; |
1102 | I915_WRITE(FBC_CONTROL, fbc_ctl); | 1432 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
1103 | 1433 | ||
1104 | DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ", | 1434 | DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ", |
1105 | dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); | 1435 | dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); |
1106 | } | 1436 | } |
1107 | 1437 | ||
1108 | void i8xx_disable_fbc(struct drm_device *dev) | 1438 | void i8xx_disable_fbc(struct drm_device *dev) |
@@ -1110,19 +1440,16 @@ void i8xx_disable_fbc(struct drm_device *dev) | |||
1110 | struct drm_i915_private *dev_priv = dev->dev_private; | 1440 | struct drm_i915_private *dev_priv = dev->dev_private; |
1111 | u32 fbc_ctl; | 1441 | u32 fbc_ctl; |
1112 | 1442 | ||
1113 | if (!I915_HAS_FBC(dev)) | ||
1114 | return; | ||
1115 | |||
1116 | if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN)) | ||
1117 | return; /* Already off, just return */ | ||
1118 | |||
1119 | /* Disable compression */ | 1443 | /* Disable compression */ |
1120 | fbc_ctl = I915_READ(FBC_CONTROL); | 1444 | fbc_ctl = I915_READ(FBC_CONTROL); |
1445 | if ((fbc_ctl & FBC_CTL_EN) == 0) | ||
1446 | return; | ||
1447 | |||
1121 | fbc_ctl &= ~FBC_CTL_EN; | 1448 | fbc_ctl &= ~FBC_CTL_EN; |
1122 | I915_WRITE(FBC_CONTROL, fbc_ctl); | 1449 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
1123 | 1450 | ||
1124 | /* Wait for compressing bit to clear */ | 1451 | /* Wait for compressing bit to clear */ |
1125 | if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) { | 1452 | if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { |
1126 | DRM_DEBUG_KMS("FBC idle timed out\n"); | 1453 | DRM_DEBUG_KMS("FBC idle timed out\n"); |
1127 | return; | 1454 | return; |
1128 | } | 1455 | } |
@@ -1143,26 +1470,37 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1143 | struct drm_i915_private *dev_priv = dev->dev_private; | 1470 | struct drm_i915_private *dev_priv = dev->dev_private; |
1144 | struct drm_framebuffer *fb = crtc->fb; | 1471 | struct drm_framebuffer *fb = crtc->fb; |
1145 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1472 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1146 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | 1473 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1147 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1474 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1148 | int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : | 1475 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
1149 | DPFC_CTL_PLANEB); | ||
1150 | unsigned long stall_watermark = 200; | 1476 | unsigned long stall_watermark = 200; |
1151 | u32 dpfc_ctl; | 1477 | u32 dpfc_ctl; |
1152 | 1478 | ||
1479 | dpfc_ctl = I915_READ(DPFC_CONTROL); | ||
1480 | if (dpfc_ctl & DPFC_CTL_EN) { | ||
1481 | if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && | ||
1482 | dev_priv->cfb_fence == obj->fence_reg && | ||
1483 | dev_priv->cfb_plane == intel_crtc->plane && | ||
1484 | dev_priv->cfb_y == crtc->y) | ||
1485 | return; | ||
1486 | |||
1487 | I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); | ||
1488 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
1489 | } | ||
1490 | |||
1153 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | 1491 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; |
1154 | dev_priv->cfb_fence = obj_priv->fence_reg; | 1492 | dev_priv->cfb_fence = obj->fence_reg; |
1155 | dev_priv->cfb_plane = intel_crtc->plane; | 1493 | dev_priv->cfb_plane = intel_crtc->plane; |
1494 | dev_priv->cfb_y = crtc->y; | ||
1156 | 1495 | ||
1157 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; | 1496 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; |
1158 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | 1497 | if (obj->tiling_mode != I915_TILING_NONE) { |
1159 | dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; | 1498 | dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; |
1160 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); | 1499 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); |
1161 | } else { | 1500 | } else { |
1162 | I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY); | 1501 | I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY); |
1163 | } | 1502 | } |
1164 | 1503 | ||
1165 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | ||
1166 | I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | | 1504 | I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | |
1167 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | 1505 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | |
1168 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); | 1506 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); |
@@ -1181,10 +1519,12 @@ void g4x_disable_fbc(struct drm_device *dev) | |||
1181 | 1519 | ||
1182 | /* Disable compression */ | 1520 | /* Disable compression */ |
1183 | dpfc_ctl = I915_READ(DPFC_CONTROL); | 1521 | dpfc_ctl = I915_READ(DPFC_CONTROL); |
1184 | dpfc_ctl &= ~DPFC_CTL_EN; | 1522 | if (dpfc_ctl & DPFC_CTL_EN) { |
1185 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | 1523 | dpfc_ctl &= ~DPFC_CTL_EN; |
1524 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | ||
1186 | 1525 | ||
1187 | DRM_DEBUG_KMS("disabled FBC\n"); | 1526 | DRM_DEBUG_KMS("disabled FBC\n"); |
1527 | } | ||
1188 | } | 1528 | } |
1189 | 1529 | ||
1190 | static bool g4x_fbc_enabled(struct drm_device *dev) | 1530 | static bool g4x_fbc_enabled(struct drm_device *dev) |
@@ -1194,42 +1534,80 @@ static bool g4x_fbc_enabled(struct drm_device *dev) | |||
1194 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; | 1534 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; |
1195 | } | 1535 | } |
1196 | 1536 | ||
1537 | static void sandybridge_blit_fbc_update(struct drm_device *dev) | ||
1538 | { | ||
1539 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1540 | u32 blt_ecoskpd; | ||
1541 | |||
1542 | /* Make sure blitter notifies FBC of writes */ | ||
1543 | gen6_gt_force_wake_get(dev_priv); | ||
1544 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); | ||
1545 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << | ||
1546 | GEN6_BLITTER_LOCK_SHIFT; | ||
1547 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
1548 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; | ||
1549 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
1550 | blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << | ||
1551 | GEN6_BLITTER_LOCK_SHIFT); | ||
1552 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
1553 | POSTING_READ(GEN6_BLITTER_ECOSKPD); | ||
1554 | gen6_gt_force_wake_put(dev_priv); | ||
1555 | } | ||
1556 | |||
1197 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 1557 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1198 | { | 1558 | { |
1199 | struct drm_device *dev = crtc->dev; | 1559 | struct drm_device *dev = crtc->dev; |
1200 | struct drm_i915_private *dev_priv = dev->dev_private; | 1560 | struct drm_i915_private *dev_priv = dev->dev_private; |
1201 | struct drm_framebuffer *fb = crtc->fb; | 1561 | struct drm_framebuffer *fb = crtc->fb; |
1202 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1562 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1203 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | 1563 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1204 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1564 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1205 | int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA : | 1565 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
1206 | DPFC_CTL_PLANEB; | ||
1207 | unsigned long stall_watermark = 200; | 1566 | unsigned long stall_watermark = 200; |
1208 | u32 dpfc_ctl; | 1567 | u32 dpfc_ctl; |
1209 | 1568 | ||
1569 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | ||
1570 | if (dpfc_ctl & DPFC_CTL_EN) { | ||
1571 | if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && | ||
1572 | dev_priv->cfb_fence == obj->fence_reg && | ||
1573 | dev_priv->cfb_plane == intel_crtc->plane && | ||
1574 | dev_priv->cfb_offset == obj->gtt_offset && | ||
1575 | dev_priv->cfb_y == crtc->y) | ||
1576 | return; | ||
1577 | |||
1578 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); | ||
1579 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
1580 | } | ||
1581 | |||
1210 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | 1582 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; |
1211 | dev_priv->cfb_fence = obj_priv->fence_reg; | 1583 | dev_priv->cfb_fence = obj->fence_reg; |
1212 | dev_priv->cfb_plane = intel_crtc->plane; | 1584 | dev_priv->cfb_plane = intel_crtc->plane; |
1585 | dev_priv->cfb_offset = obj->gtt_offset; | ||
1586 | dev_priv->cfb_y = crtc->y; | ||
1213 | 1587 | ||
1214 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | ||
1215 | dpfc_ctl &= DPFC_RESERVED; | 1588 | dpfc_ctl &= DPFC_RESERVED; |
1216 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); | 1589 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); |
1217 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | 1590 | if (obj->tiling_mode != I915_TILING_NONE) { |
1218 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); | 1591 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); |
1219 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); | 1592 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); |
1220 | } else { | 1593 | } else { |
1221 | I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY); | 1594 | I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY); |
1222 | } | 1595 | } |
1223 | 1596 | ||
1224 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); | ||
1225 | I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | | 1597 | I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | |
1226 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | 1598 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | |
1227 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); | 1599 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); |
1228 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); | 1600 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); |
1229 | I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); | 1601 | I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); |
1230 | /* enable it... */ | 1602 | /* enable it... */ |
1231 | I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) | | 1603 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); |
1232 | DPFC_CTL_EN); | 1604 | |
1605 | if (IS_GEN6(dev)) { | ||
1606 | I915_WRITE(SNB_DPFC_CTL_SA, | ||
1607 | SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); | ||
1608 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); | ||
1609 | sandybridge_blit_fbc_update(dev); | ||
1610 | } | ||
1233 | 1611 | ||
1234 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); | 1612 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); |
1235 | } | 1613 | } |
@@ -1241,10 +1619,12 @@ void ironlake_disable_fbc(struct drm_device *dev) | |||
1241 | 1619 | ||
1242 | /* Disable compression */ | 1620 | /* Disable compression */ |
1243 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | 1621 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); |
1244 | dpfc_ctl &= ~DPFC_CTL_EN; | 1622 | if (dpfc_ctl & DPFC_CTL_EN) { |
1245 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); | 1623 | dpfc_ctl &= ~DPFC_CTL_EN; |
1624 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); | ||
1246 | 1625 | ||
1247 | DRM_DEBUG_KMS("disabled FBC\n"); | 1626 | DRM_DEBUG_KMS("disabled FBC\n"); |
1627 | } | ||
1248 | } | 1628 | } |
1249 | 1629 | ||
1250 | static bool ironlake_fbc_enabled(struct drm_device *dev) | 1630 | static bool ironlake_fbc_enabled(struct drm_device *dev) |
@@ -1286,8 +1666,7 @@ void intel_disable_fbc(struct drm_device *dev) | |||
1286 | 1666 | ||
1287 | /** | 1667 | /** |
1288 | * intel_update_fbc - enable/disable FBC as needed | 1668 | * intel_update_fbc - enable/disable FBC as needed |
1289 | * @crtc: CRTC to point the compressor at | 1669 | * @dev: the drm_device |
1290 | * @mode: mode in use | ||
1291 | * | 1670 | * |
1292 | * Set up the framebuffer compression hardware at mode set time. We | 1671 | * Set up the framebuffer compression hardware at mode set time. We |
1293 | * enable it if possible: | 1672 | * enable it if possible: |
@@ -1304,18 +1683,14 @@ void intel_disable_fbc(struct drm_device *dev) | |||
1304 | * | 1683 | * |
1305 | * We need to enable/disable FBC on a global basis. | 1684 | * We need to enable/disable FBC on a global basis. |
1306 | */ | 1685 | */ |
1307 | static void intel_update_fbc(struct drm_crtc *crtc, | 1686 | static void intel_update_fbc(struct drm_device *dev) |
1308 | struct drm_display_mode *mode) | ||
1309 | { | 1687 | { |
1310 | struct drm_device *dev = crtc->dev; | ||
1311 | struct drm_i915_private *dev_priv = dev->dev_private; | 1688 | struct drm_i915_private *dev_priv = dev->dev_private; |
1312 | struct drm_framebuffer *fb = crtc->fb; | 1689 | struct drm_crtc *crtc = NULL, *tmp_crtc; |
1690 | struct intel_crtc *intel_crtc; | ||
1691 | struct drm_framebuffer *fb; | ||
1313 | struct intel_framebuffer *intel_fb; | 1692 | struct intel_framebuffer *intel_fb; |
1314 | struct drm_i915_gem_object *obj_priv; | 1693 | struct drm_i915_gem_object *obj; |
1315 | struct drm_crtc *tmp_crtc; | ||
1316 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1317 | int plane = intel_crtc->plane; | ||
1318 | int crtcs_enabled = 0; | ||
1319 | 1694 | ||
1320 | DRM_DEBUG_KMS("\n"); | 1695 | DRM_DEBUG_KMS("\n"); |
1321 | 1696 | ||
@@ -1325,12 +1700,6 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
1325 | if (!I915_HAS_FBC(dev)) | 1700 | if (!I915_HAS_FBC(dev)) |
1326 | return; | 1701 | return; |
1327 | 1702 | ||
1328 | if (!crtc->fb) | ||
1329 | return; | ||
1330 | |||
1331 | intel_fb = to_intel_framebuffer(fb); | ||
1332 | obj_priv = to_intel_bo(intel_fb->obj); | ||
1333 | |||
1334 | /* | 1703 | /* |
1335 | * If FBC is already on, we just have to verify that we can | 1704 | * If FBC is already on, we just have to verify that we can |
1336 | * keep it that way... | 1705 | * keep it that way... |
@@ -1341,40 +1710,57 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
1341 | * - going to an unsupported config (interlace, pixel multiply, etc.) | 1710 | * - going to an unsupported config (interlace, pixel multiply, etc.) |
1342 | */ | 1711 | */ |
1343 | list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { | 1712 | list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { |
1344 | if (tmp_crtc->enabled) | 1713 | if (tmp_crtc->enabled && tmp_crtc->fb) { |
1345 | crtcs_enabled++; | 1714 | if (crtc) { |
1715 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); | ||
1716 | dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; | ||
1717 | goto out_disable; | ||
1718 | } | ||
1719 | crtc = tmp_crtc; | ||
1720 | } | ||
1346 | } | 1721 | } |
1347 | DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled); | 1722 | |
1348 | if (crtcs_enabled > 1) { | 1723 | if (!crtc || crtc->fb == NULL) { |
1349 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); | 1724 | DRM_DEBUG_KMS("no output, disabling\n"); |
1350 | dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; | 1725 | dev_priv->no_fbc_reason = FBC_NO_OUTPUT; |
1351 | goto out_disable; | 1726 | goto out_disable; |
1352 | } | 1727 | } |
1353 | if (intel_fb->obj->size > dev_priv->cfb_size) { | 1728 | |
1729 | intel_crtc = to_intel_crtc(crtc); | ||
1730 | fb = crtc->fb; | ||
1731 | intel_fb = to_intel_framebuffer(fb); | ||
1732 | obj = intel_fb->obj; | ||
1733 | |||
1734 | if (!i915_enable_fbc) { | ||
1735 | DRM_DEBUG_KMS("fbc disabled per module param (default off)\n"); | ||
1736 | dev_priv->no_fbc_reason = FBC_MODULE_PARAM; | ||
1737 | goto out_disable; | ||
1738 | } | ||
1739 | if (intel_fb->obj->base.size > dev_priv->cfb_size) { | ||
1354 | DRM_DEBUG_KMS("framebuffer too large, disabling " | 1740 | DRM_DEBUG_KMS("framebuffer too large, disabling " |
1355 | "compression\n"); | 1741 | "compression\n"); |
1356 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; | 1742 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; |
1357 | goto out_disable; | 1743 | goto out_disable; |
1358 | } | 1744 | } |
1359 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || | 1745 | if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || |
1360 | (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { | 1746 | (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { |
1361 | DRM_DEBUG_KMS("mode incompatible with compression, " | 1747 | DRM_DEBUG_KMS("mode incompatible with compression, " |
1362 | "disabling\n"); | 1748 | "disabling\n"); |
1363 | dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; | 1749 | dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; |
1364 | goto out_disable; | 1750 | goto out_disable; |
1365 | } | 1751 | } |
1366 | if ((mode->hdisplay > 2048) || | 1752 | if ((crtc->mode.hdisplay > 2048) || |
1367 | (mode->vdisplay > 1536)) { | 1753 | (crtc->mode.vdisplay > 1536)) { |
1368 | DRM_DEBUG_KMS("mode too large for compression, disabling\n"); | 1754 | DRM_DEBUG_KMS("mode too large for compression, disabling\n"); |
1369 | dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; | 1755 | dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; |
1370 | goto out_disable; | 1756 | goto out_disable; |
1371 | } | 1757 | } |
1372 | if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { | 1758 | if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) { |
1373 | DRM_DEBUG_KMS("plane not 0, disabling compression\n"); | 1759 | DRM_DEBUG_KMS("plane not 0, disabling compression\n"); |
1374 | dev_priv->no_fbc_reason = FBC_BAD_PLANE; | 1760 | dev_priv->no_fbc_reason = FBC_BAD_PLANE; |
1375 | goto out_disable; | 1761 | goto out_disable; |
1376 | } | 1762 | } |
1377 | if (obj_priv->tiling_mode != I915_TILING_X) { | 1763 | if (obj->tiling_mode != I915_TILING_X) { |
1378 | DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); | 1764 | DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); |
1379 | dev_priv->no_fbc_reason = FBC_NOT_TILED; | 1765 | dev_priv->no_fbc_reason = FBC_NOT_TILED; |
1380 | goto out_disable; | 1766 | goto out_disable; |
@@ -1384,18 +1770,7 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
1384 | if (in_dbg_master()) | 1770 | if (in_dbg_master()) |
1385 | goto out_disable; | 1771 | goto out_disable; |
1386 | 1772 | ||
1387 | if (intel_fbc_enabled(dev)) { | 1773 | intel_enable_fbc(crtc, 500); |
1388 | /* We can re-enable it in this case, but need to update pitch */ | ||
1389 | if ((fb->pitch > dev_priv->cfb_pitch) || | ||
1390 | (obj_priv->fence_reg != dev_priv->cfb_fence) || | ||
1391 | (plane != dev_priv->cfb_plane)) | ||
1392 | intel_disable_fbc(dev); | ||
1393 | } | ||
1394 | |||
1395 | /* Now try to turn it back on if possible */ | ||
1396 | if (!intel_fbc_enabled(dev)) | ||
1397 | intel_enable_fbc(crtc, 500); | ||
1398 | |||
1399 | return; | 1774 | return; |
1400 | 1775 | ||
1401 | out_disable: | 1776 | out_disable: |
@@ -1407,17 +1782,19 @@ out_disable: | |||
1407 | } | 1782 | } |
1408 | 1783 | ||
1409 | int | 1784 | int |
1410 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) | 1785 | intel_pin_and_fence_fb_obj(struct drm_device *dev, |
1786 | struct drm_i915_gem_object *obj, | ||
1787 | struct intel_ring_buffer *pipelined) | ||
1411 | { | 1788 | { |
1412 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1789 | struct drm_i915_private *dev_priv = dev->dev_private; |
1413 | u32 alignment; | 1790 | u32 alignment; |
1414 | int ret; | 1791 | int ret; |
1415 | 1792 | ||
1416 | switch (obj_priv->tiling_mode) { | 1793 | switch (obj->tiling_mode) { |
1417 | case I915_TILING_NONE: | 1794 | case I915_TILING_NONE: |
1418 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | 1795 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) |
1419 | alignment = 128 * 1024; | 1796 | alignment = 128 * 1024; |
1420 | else if (IS_I965G(dev)) | 1797 | else if (INTEL_INFO(dev)->gen >= 4) |
1421 | alignment = 4 * 1024; | 1798 | alignment = 4 * 1024; |
1422 | else | 1799 | else |
1423 | alignment = 64 * 1024; | 1800 | alignment = 64 * 1024; |
@@ -1434,46 +1811,50 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) | |||
1434 | BUG(); | 1811 | BUG(); |
1435 | } | 1812 | } |
1436 | 1813 | ||
1437 | ret = i915_gem_object_pin(obj, alignment); | 1814 | dev_priv->mm.interruptible = false; |
1438 | if (ret != 0) | 1815 | ret = i915_gem_object_pin(obj, alignment, true); |
1439 | return ret; | 1816 | if (ret) |
1817 | goto err_interruptible; | ||
1818 | |||
1819 | ret = i915_gem_object_set_to_display_plane(obj, pipelined); | ||
1820 | if (ret) | ||
1821 | goto err_unpin; | ||
1440 | 1822 | ||
1441 | /* Install a fence for tiled scan-out. Pre-i965 always needs a | 1823 | /* Install a fence for tiled scan-out. Pre-i965 always needs a |
1442 | * fence, whereas 965+ only requires a fence if using | 1824 | * fence, whereas 965+ only requires a fence if using |
1443 | * framebuffer compression. For simplicity, we always install | 1825 | * framebuffer compression. For simplicity, we always install |
1444 | * a fence as the cost is not that onerous. | 1826 | * a fence as the cost is not that onerous. |
1445 | */ | 1827 | */ |
1446 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE && | 1828 | if (obj->tiling_mode != I915_TILING_NONE) { |
1447 | obj_priv->tiling_mode != I915_TILING_NONE) { | 1829 | ret = i915_gem_object_get_fence(obj, pipelined); |
1448 | ret = i915_gem_object_get_fence_reg(obj); | 1830 | if (ret) |
1449 | if (ret != 0) { | 1831 | goto err_unpin; |
1450 | i915_gem_object_unpin(obj); | ||
1451 | return ret; | ||
1452 | } | ||
1453 | } | 1832 | } |
1454 | 1833 | ||
1834 | dev_priv->mm.interruptible = true; | ||
1455 | return 0; | 1835 | return 0; |
1836 | |||
1837 | err_unpin: | ||
1838 | i915_gem_object_unpin(obj); | ||
1839 | err_interruptible: | ||
1840 | dev_priv->mm.interruptible = true; | ||
1841 | return ret; | ||
1456 | } | 1842 | } |
1457 | 1843 | ||
1458 | /* Assume fb object is pinned & idle & fenced and just update base pointers */ | 1844 | /* Assume fb object is pinned & idle & fenced and just update base pointers */ |
1459 | static int | 1845 | static int |
1460 | intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | 1846 | intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
1461 | int x, int y) | 1847 | int x, int y, enum mode_set_atomic state) |
1462 | { | 1848 | { |
1463 | struct drm_device *dev = crtc->dev; | 1849 | struct drm_device *dev = crtc->dev; |
1464 | struct drm_i915_private *dev_priv = dev->dev_private; | 1850 | struct drm_i915_private *dev_priv = dev->dev_private; |
1465 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1851 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1466 | struct intel_framebuffer *intel_fb; | 1852 | struct intel_framebuffer *intel_fb; |
1467 | struct drm_i915_gem_object *obj_priv; | 1853 | struct drm_i915_gem_object *obj; |
1468 | struct drm_gem_object *obj; | ||
1469 | int plane = intel_crtc->plane; | 1854 | int plane = intel_crtc->plane; |
1470 | unsigned long Start, Offset; | 1855 | unsigned long Start, Offset; |
1471 | int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR); | ||
1472 | int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF); | ||
1473 | int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE; | ||
1474 | int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF); | ||
1475 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; | ||
1476 | u32 dspcntr; | 1856 | u32 dspcntr; |
1857 | u32 reg; | ||
1477 | 1858 | ||
1478 | switch (plane) { | 1859 | switch (plane) { |
1479 | case 0: | 1860 | case 0: |
@@ -1486,9 +1867,9 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1486 | 1867 | ||
1487 | intel_fb = to_intel_framebuffer(fb); | 1868 | intel_fb = to_intel_framebuffer(fb); |
1488 | obj = intel_fb->obj; | 1869 | obj = intel_fb->obj; |
1489 | obj_priv = to_intel_bo(obj); | ||
1490 | 1870 | ||
1491 | dspcntr = I915_READ(dspcntr_reg); | 1871 | reg = DSPCNTR(plane); |
1872 | dspcntr = I915_READ(reg); | ||
1492 | /* Mask out pixel format bits in case we change it */ | 1873 | /* Mask out pixel format bits in case we change it */ |
1493 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; | 1874 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; |
1494 | switch (fb->bits_per_pixel) { | 1875 | switch (fb->bits_per_pixel) { |
@@ -1509,8 +1890,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1509 | DRM_ERROR("Unknown color depth\n"); | 1890 | DRM_ERROR("Unknown color depth\n"); |
1510 | return -EINVAL; | 1891 | return -EINVAL; |
1511 | } | 1892 | } |
1512 | if (IS_I965G(dev)) { | 1893 | if (INTEL_INFO(dev)->gen >= 4) { |
1513 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1894 | if (obj->tiling_mode != I915_TILING_NONE) |
1514 | dspcntr |= DISPPLANE_TILED; | 1895 | dspcntr |= DISPPLANE_TILED; |
1515 | else | 1896 | else |
1516 | dspcntr &= ~DISPPLANE_TILED; | 1897 | dspcntr &= ~DISPPLANE_TILED; |
@@ -1520,28 +1901,24 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1520 | /* must disable */ | 1901 | /* must disable */ |
1521 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; | 1902 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; |
1522 | 1903 | ||
1523 | I915_WRITE(dspcntr_reg, dspcntr); | 1904 | I915_WRITE(reg, dspcntr); |
1524 | 1905 | ||
1525 | Start = obj_priv->gtt_offset; | 1906 | Start = obj->gtt_offset; |
1526 | Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); | 1907 | Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); |
1527 | 1908 | ||
1528 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", | 1909 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", |
1529 | Start, Offset, x, y, fb->pitch); | 1910 | Start, Offset, x, y, fb->pitch); |
1530 | I915_WRITE(dspstride, fb->pitch); | 1911 | I915_WRITE(DSPSTRIDE(plane), fb->pitch); |
1531 | if (IS_I965G(dev)) { | 1912 | if (INTEL_INFO(dev)->gen >= 4) { |
1532 | I915_WRITE(dspsurf, Start); | 1913 | I915_WRITE(DSPSURF(plane), Start); |
1533 | I915_WRITE(dsptileoff, (y << 16) | x); | 1914 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); |
1534 | I915_WRITE(dspbase, Offset); | 1915 | I915_WRITE(DSPADDR(plane), Offset); |
1535 | } else { | 1916 | } else |
1536 | I915_WRITE(dspbase, Start + Offset); | 1917 | I915_WRITE(DSPADDR(plane), Start + Offset); |
1537 | } | 1918 | POSTING_READ(reg); |
1538 | POSTING_READ(dspbase); | ||
1539 | |||
1540 | if (IS_I965G(dev) || plane == 0) | ||
1541 | intel_update_fbc(crtc, &crtc->mode); | ||
1542 | 1919 | ||
1543 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1920 | intel_update_fbc(dev); |
1544 | intel_increase_pllclock(crtc, true); | 1921 | intel_increase_pllclock(crtc); |
1545 | 1922 | ||
1546 | return 0; | 1923 | return 0; |
1547 | } | 1924 | } |
@@ -1553,11 +1930,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1553 | struct drm_device *dev = crtc->dev; | 1930 | struct drm_device *dev = crtc->dev; |
1554 | struct drm_i915_master_private *master_priv; | 1931 | struct drm_i915_master_private *master_priv; |
1555 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1932 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1556 | struct intel_framebuffer *intel_fb; | ||
1557 | struct drm_i915_gem_object *obj_priv; | ||
1558 | struct drm_gem_object *obj; | ||
1559 | int pipe = intel_crtc->pipe; | ||
1560 | int plane = intel_crtc->plane; | ||
1561 | int ret; | 1933 | int ret; |
1562 | 1934 | ||
1563 | /* no fb bound */ | 1935 | /* no fb bound */ |
@@ -1566,44 +1938,54 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1566 | return 0; | 1938 | return 0; |
1567 | } | 1939 | } |
1568 | 1940 | ||
1569 | switch (plane) { | 1941 | switch (intel_crtc->plane) { |
1570 | case 0: | 1942 | case 0: |
1571 | case 1: | 1943 | case 1: |
1572 | break; | 1944 | break; |
1573 | default: | 1945 | default: |
1574 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); | ||
1575 | return -EINVAL; | 1946 | return -EINVAL; |
1576 | } | 1947 | } |
1577 | 1948 | ||
1578 | intel_fb = to_intel_framebuffer(crtc->fb); | ||
1579 | obj = intel_fb->obj; | ||
1580 | obj_priv = to_intel_bo(obj); | ||
1581 | |||
1582 | mutex_lock(&dev->struct_mutex); | 1949 | mutex_lock(&dev->struct_mutex); |
1583 | ret = intel_pin_and_fence_fb_obj(dev, obj); | 1950 | ret = intel_pin_and_fence_fb_obj(dev, |
1951 | to_intel_framebuffer(crtc->fb)->obj, | ||
1952 | NULL); | ||
1584 | if (ret != 0) { | 1953 | if (ret != 0) { |
1585 | mutex_unlock(&dev->struct_mutex); | 1954 | mutex_unlock(&dev->struct_mutex); |
1586 | return ret; | 1955 | return ret; |
1587 | } | 1956 | } |
1588 | 1957 | ||
1589 | ret = i915_gem_object_set_to_display_plane(obj); | 1958 | if (old_fb) { |
1590 | if (ret != 0) { | 1959 | struct drm_i915_private *dev_priv = dev->dev_private; |
1591 | i915_gem_object_unpin(obj); | 1960 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; |
1592 | mutex_unlock(&dev->struct_mutex); | 1961 | |
1593 | return ret; | 1962 | wait_event(dev_priv->pending_flip_queue, |
1963 | atomic_read(&dev_priv->mm.wedged) || | ||
1964 | atomic_read(&obj->pending_flip) == 0); | ||
1965 | |||
1966 | /* Big Hammer, we also need to ensure that any pending | ||
1967 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the | ||
1968 | * current scanout is retired before unpinning the old | ||
1969 | * framebuffer. | ||
1970 | * | ||
1971 | * This should only fail upon a hung GPU, in which case we | ||
1972 | * can safely continue. | ||
1973 | */ | ||
1974 | ret = i915_gem_object_flush_gpu(obj); | ||
1975 | (void) ret; | ||
1594 | } | 1976 | } |
1595 | 1977 | ||
1596 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y); | 1978 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, |
1979 | LEAVE_ATOMIC_MODE_SET); | ||
1597 | if (ret) { | 1980 | if (ret) { |
1598 | i915_gem_object_unpin(obj); | 1981 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); |
1599 | mutex_unlock(&dev->struct_mutex); | 1982 | mutex_unlock(&dev->struct_mutex); |
1600 | return ret; | 1983 | return ret; |
1601 | } | 1984 | } |
1602 | 1985 | ||
1603 | if (old_fb) { | 1986 | if (old_fb) { |
1604 | intel_fb = to_intel_framebuffer(old_fb); | 1987 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
1605 | obj_priv = to_intel_bo(intel_fb->obj); | 1988 | i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj); |
1606 | i915_gem_object_unpin(intel_fb->obj); | ||
1607 | } | 1989 | } |
1608 | 1990 | ||
1609 | mutex_unlock(&dev->struct_mutex); | 1991 | mutex_unlock(&dev->struct_mutex); |
@@ -1615,7 +1997,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1615 | if (!master_priv->sarea_priv) | 1997 | if (!master_priv->sarea_priv) |
1616 | return 0; | 1998 | return 0; |
1617 | 1999 | ||
1618 | if (pipe) { | 2000 | if (intel_crtc->pipe) { |
1619 | master_priv->sarea_priv->pipeB_x = x; | 2001 | master_priv->sarea_priv->pipeB_x = x; |
1620 | master_priv->sarea_priv->pipeB_y = y; | 2002 | master_priv->sarea_priv->pipeB_y = y; |
1621 | } else { | 2003 | } else { |
@@ -1626,7 +2008,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1626 | return 0; | 2008 | return 0; |
1627 | } | 2009 | } |
1628 | 2010 | ||
1629 | static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) | 2011 | static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) |
1630 | { | 2012 | { |
1631 | struct drm_device *dev = crtc->dev; | 2013 | struct drm_device *dev = crtc->dev; |
1632 | struct drm_i915_private *dev_priv = dev->dev_private; | 2014 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -1659,9 +2041,51 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) | |||
1659 | } | 2041 | } |
1660 | I915_WRITE(DP_A, dpa_ctl); | 2042 | I915_WRITE(DP_A, dpa_ctl); |
1661 | 2043 | ||
2044 | POSTING_READ(DP_A); | ||
1662 | udelay(500); | 2045 | udelay(500); |
1663 | } | 2046 | } |
1664 | 2047 | ||
2048 | static void intel_fdi_normal_train(struct drm_crtc *crtc) | ||
2049 | { | ||
2050 | struct drm_device *dev = crtc->dev; | ||
2051 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2052 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2053 | int pipe = intel_crtc->pipe; | ||
2054 | u32 reg, temp; | ||
2055 | |||
2056 | /* enable normal train */ | ||
2057 | reg = FDI_TX_CTL(pipe); | ||
2058 | temp = I915_READ(reg); | ||
2059 | if (IS_IVYBRIDGE(dev)) { | ||
2060 | temp &= ~FDI_LINK_TRAIN_NONE_IVB; | ||
2061 | temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; | ||
2062 | } else { | ||
2063 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2064 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; | ||
2065 | } | ||
2066 | I915_WRITE(reg, temp); | ||
2067 | |||
2068 | reg = FDI_RX_CTL(pipe); | ||
2069 | temp = I915_READ(reg); | ||
2070 | if (HAS_PCH_CPT(dev)) { | ||
2071 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
2072 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | ||
2073 | } else { | ||
2074 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2075 | temp |= FDI_LINK_TRAIN_NONE; | ||
2076 | } | ||
2077 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | ||
2078 | |||
2079 | /* wait one idle pattern time */ | ||
2080 | POSTING_READ(reg); | ||
2081 | udelay(1000); | ||
2082 | |||
2083 | /* IVB wants error correction enabled */ | ||
2084 | if (IS_IVYBRIDGE(dev)) | ||
2085 | I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | | ||
2086 | FDI_FE_ERRC_ENABLE); | ||
2087 | } | ||
2088 | |||
1665 | /* The FDI link training functions for ILK/Ibexpeak. */ | 2089 | /* The FDI link training functions for ILK/Ibexpeak. */ |
1666 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) | 2090 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) |
1667 | { | 2091 | { |
@@ -1669,84 +2093,97 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1669 | struct drm_i915_private *dev_priv = dev->dev_private; | 2093 | struct drm_i915_private *dev_priv = dev->dev_private; |
1670 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2094 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1671 | int pipe = intel_crtc->pipe; | 2095 | int pipe = intel_crtc->pipe; |
1672 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; | 2096 | int plane = intel_crtc->plane; |
1673 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; | 2097 | u32 reg, temp, tries; |
1674 | int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; | 2098 | |
1675 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; | 2099 | /* FDI needs bits from pipe & plane first */ |
1676 | u32 temp, tries = 0; | 2100 | assert_pipe_enabled(dev_priv, pipe); |
2101 | assert_plane_enabled(dev_priv, plane); | ||
1677 | 2102 | ||
1678 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | 2103 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
1679 | for train result */ | 2104 | for train result */ |
1680 | temp = I915_READ(fdi_rx_imr_reg); | 2105 | reg = FDI_RX_IMR(pipe); |
2106 | temp = I915_READ(reg); | ||
1681 | temp &= ~FDI_RX_SYMBOL_LOCK; | 2107 | temp &= ~FDI_RX_SYMBOL_LOCK; |
1682 | temp &= ~FDI_RX_BIT_LOCK; | 2108 | temp &= ~FDI_RX_BIT_LOCK; |
1683 | I915_WRITE(fdi_rx_imr_reg, temp); | 2109 | I915_WRITE(reg, temp); |
1684 | I915_READ(fdi_rx_imr_reg); | 2110 | I915_READ(reg); |
1685 | udelay(150); | 2111 | udelay(150); |
1686 | 2112 | ||
1687 | /* enable CPU FDI TX and PCH FDI RX */ | 2113 | /* enable CPU FDI TX and PCH FDI RX */ |
1688 | temp = I915_READ(fdi_tx_reg); | 2114 | reg = FDI_TX_CTL(pipe); |
1689 | temp |= FDI_TX_ENABLE; | 2115 | temp = I915_READ(reg); |
1690 | temp &= ~(7 << 19); | 2116 | temp &= ~(7 << 19); |
1691 | temp |= (intel_crtc->fdi_lanes - 1) << 19; | 2117 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
1692 | temp &= ~FDI_LINK_TRAIN_NONE; | 2118 | temp &= ~FDI_LINK_TRAIN_NONE; |
1693 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 2119 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
1694 | I915_WRITE(fdi_tx_reg, temp); | 2120 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
1695 | I915_READ(fdi_tx_reg); | ||
1696 | 2121 | ||
1697 | temp = I915_READ(fdi_rx_reg); | 2122 | reg = FDI_RX_CTL(pipe); |
2123 | temp = I915_READ(reg); | ||
1698 | temp &= ~FDI_LINK_TRAIN_NONE; | 2124 | temp &= ~FDI_LINK_TRAIN_NONE; |
1699 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 2125 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
1700 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); | 2126 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
1701 | I915_READ(fdi_rx_reg); | 2127 | |
2128 | POSTING_READ(reg); | ||
1702 | udelay(150); | 2129 | udelay(150); |
1703 | 2130 | ||
2131 | /* Ironlake workaround, enable clock pointer after FDI enable*/ | ||
2132 | if (HAS_PCH_IBX(dev)) { | ||
2133 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); | ||
2134 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | | ||
2135 | FDI_RX_PHASE_SYNC_POINTER_EN); | ||
2136 | } | ||
2137 | |||
2138 | reg = FDI_RX_IIR(pipe); | ||
1704 | for (tries = 0; tries < 5; tries++) { | 2139 | for (tries = 0; tries < 5; tries++) { |
1705 | temp = I915_READ(fdi_rx_iir_reg); | 2140 | temp = I915_READ(reg); |
1706 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 2141 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
1707 | 2142 | ||
1708 | if ((temp & FDI_RX_BIT_LOCK)) { | 2143 | if ((temp & FDI_RX_BIT_LOCK)) { |
1709 | DRM_DEBUG_KMS("FDI train 1 done.\n"); | 2144 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
1710 | I915_WRITE(fdi_rx_iir_reg, | 2145 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
1711 | temp | FDI_RX_BIT_LOCK); | ||
1712 | break; | 2146 | break; |
1713 | } | 2147 | } |
1714 | } | 2148 | } |
1715 | if (tries == 5) | 2149 | if (tries == 5) |
1716 | DRM_DEBUG_KMS("FDI train 1 fail!\n"); | 2150 | DRM_ERROR("FDI train 1 fail!\n"); |
1717 | 2151 | ||
1718 | /* Train 2 */ | 2152 | /* Train 2 */ |
1719 | temp = I915_READ(fdi_tx_reg); | 2153 | reg = FDI_TX_CTL(pipe); |
2154 | temp = I915_READ(reg); | ||
1720 | temp &= ~FDI_LINK_TRAIN_NONE; | 2155 | temp &= ~FDI_LINK_TRAIN_NONE; |
1721 | temp |= FDI_LINK_TRAIN_PATTERN_2; | 2156 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
1722 | I915_WRITE(fdi_tx_reg, temp); | 2157 | I915_WRITE(reg, temp); |
1723 | 2158 | ||
1724 | temp = I915_READ(fdi_rx_reg); | 2159 | reg = FDI_RX_CTL(pipe); |
2160 | temp = I915_READ(reg); | ||
1725 | temp &= ~FDI_LINK_TRAIN_NONE; | 2161 | temp &= ~FDI_LINK_TRAIN_NONE; |
1726 | temp |= FDI_LINK_TRAIN_PATTERN_2; | 2162 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
1727 | I915_WRITE(fdi_rx_reg, temp); | 2163 | I915_WRITE(reg, temp); |
1728 | udelay(150); | ||
1729 | 2164 | ||
1730 | tries = 0; | 2165 | POSTING_READ(reg); |
2166 | udelay(150); | ||
1731 | 2167 | ||
2168 | reg = FDI_RX_IIR(pipe); | ||
1732 | for (tries = 0; tries < 5; tries++) { | 2169 | for (tries = 0; tries < 5; tries++) { |
1733 | temp = I915_READ(fdi_rx_iir_reg); | 2170 | temp = I915_READ(reg); |
1734 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 2171 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
1735 | 2172 | ||
1736 | if (temp & FDI_RX_SYMBOL_LOCK) { | 2173 | if (temp & FDI_RX_SYMBOL_LOCK) { |
1737 | I915_WRITE(fdi_rx_iir_reg, | 2174 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
1738 | temp | FDI_RX_SYMBOL_LOCK); | ||
1739 | DRM_DEBUG_KMS("FDI train 2 done.\n"); | 2175 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
1740 | break; | 2176 | break; |
1741 | } | 2177 | } |
1742 | } | 2178 | } |
1743 | if (tries == 5) | 2179 | if (tries == 5) |
1744 | DRM_DEBUG_KMS("FDI train 2 fail!\n"); | 2180 | DRM_ERROR("FDI train 2 fail!\n"); |
1745 | 2181 | ||
1746 | DRM_DEBUG_KMS("FDI train done\n"); | 2182 | DRM_DEBUG_KMS("FDI train done\n"); |
2183 | |||
1747 | } | 2184 | } |
1748 | 2185 | ||
1749 | static int snb_b_fdi_train_param [] = { | 2186 | static const int snb_b_fdi_train_param [] = { |
1750 | FDI_LINK_TRAIN_400MV_0DB_SNB_B, | 2187 | FDI_LINK_TRAIN_400MV_0DB_SNB_B, |
1751 | FDI_LINK_TRAIN_400MV_6DB_SNB_B, | 2188 | FDI_LINK_TRAIN_400MV_6DB_SNB_B, |
1752 | FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, | 2189 | FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, |
@@ -1760,24 +2197,22 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
1760 | struct drm_i915_private *dev_priv = dev->dev_private; | 2197 | struct drm_i915_private *dev_priv = dev->dev_private; |
1761 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2198 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1762 | int pipe = intel_crtc->pipe; | 2199 | int pipe = intel_crtc->pipe; |
1763 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; | 2200 | u32 reg, temp, i; |
1764 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; | ||
1765 | int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; | ||
1766 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; | ||
1767 | u32 temp, i; | ||
1768 | 2201 | ||
1769 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | 2202 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
1770 | for train result */ | 2203 | for train result */ |
1771 | temp = I915_READ(fdi_rx_imr_reg); | 2204 | reg = FDI_RX_IMR(pipe); |
2205 | temp = I915_READ(reg); | ||
1772 | temp &= ~FDI_RX_SYMBOL_LOCK; | 2206 | temp &= ~FDI_RX_SYMBOL_LOCK; |
1773 | temp &= ~FDI_RX_BIT_LOCK; | 2207 | temp &= ~FDI_RX_BIT_LOCK; |
1774 | I915_WRITE(fdi_rx_imr_reg, temp); | 2208 | I915_WRITE(reg, temp); |
1775 | I915_READ(fdi_rx_imr_reg); | 2209 | |
2210 | POSTING_READ(reg); | ||
1776 | udelay(150); | 2211 | udelay(150); |
1777 | 2212 | ||
1778 | /* enable CPU FDI TX and PCH FDI RX */ | 2213 | /* enable CPU FDI TX and PCH FDI RX */ |
1779 | temp = I915_READ(fdi_tx_reg); | 2214 | reg = FDI_TX_CTL(pipe); |
1780 | temp |= FDI_TX_ENABLE; | 2215 | temp = I915_READ(reg); |
1781 | temp &= ~(7 << 19); | 2216 | temp &= ~(7 << 19); |
1782 | temp |= (intel_crtc->fdi_lanes - 1) << 19; | 2217 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
1783 | temp &= ~FDI_LINK_TRAIN_NONE; | 2218 | temp &= ~FDI_LINK_TRAIN_NONE; |
@@ -1785,10 +2220,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
1785 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 2220 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
1786 | /* SNB-B */ | 2221 | /* SNB-B */ |
1787 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | 2222 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
1788 | I915_WRITE(fdi_tx_reg, temp); | 2223 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
1789 | I915_READ(fdi_tx_reg); | ||
1790 | 2224 | ||
1791 | temp = I915_READ(fdi_rx_reg); | 2225 | reg = FDI_RX_CTL(pipe); |
2226 | temp = I915_READ(reg); | ||
1792 | if (HAS_PCH_CPT(dev)) { | 2227 | if (HAS_PCH_CPT(dev)) { |
1793 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | 2228 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
1794 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | 2229 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
@@ -1796,32 +2231,37 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
1796 | temp &= ~FDI_LINK_TRAIN_NONE; | 2231 | temp &= ~FDI_LINK_TRAIN_NONE; |
1797 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 2232 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
1798 | } | 2233 | } |
1799 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); | 2234 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
1800 | I915_READ(fdi_rx_reg); | 2235 | |
2236 | POSTING_READ(reg); | ||
1801 | udelay(150); | 2237 | udelay(150); |
1802 | 2238 | ||
1803 | for (i = 0; i < 4; i++ ) { | 2239 | for (i = 0; i < 4; i++ ) { |
1804 | temp = I915_READ(fdi_tx_reg); | 2240 | reg = FDI_TX_CTL(pipe); |
2241 | temp = I915_READ(reg); | ||
1805 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 2242 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
1806 | temp |= snb_b_fdi_train_param[i]; | 2243 | temp |= snb_b_fdi_train_param[i]; |
1807 | I915_WRITE(fdi_tx_reg, temp); | 2244 | I915_WRITE(reg, temp); |
2245 | |||
2246 | POSTING_READ(reg); | ||
1808 | udelay(500); | 2247 | udelay(500); |
1809 | 2248 | ||
1810 | temp = I915_READ(fdi_rx_iir_reg); | 2249 | reg = FDI_RX_IIR(pipe); |
2250 | temp = I915_READ(reg); | ||
1811 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 2251 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
1812 | 2252 | ||
1813 | if (temp & FDI_RX_BIT_LOCK) { | 2253 | if (temp & FDI_RX_BIT_LOCK) { |
1814 | I915_WRITE(fdi_rx_iir_reg, | 2254 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
1815 | temp | FDI_RX_BIT_LOCK); | ||
1816 | DRM_DEBUG_KMS("FDI train 1 done.\n"); | 2255 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
1817 | break; | 2256 | break; |
1818 | } | 2257 | } |
1819 | } | 2258 | } |
1820 | if (i == 4) | 2259 | if (i == 4) |
1821 | DRM_DEBUG_KMS("FDI train 1 fail!\n"); | 2260 | DRM_ERROR("FDI train 1 fail!\n"); |
1822 | 2261 | ||
1823 | /* Train 2 */ | 2262 | /* Train 2 */ |
1824 | temp = I915_READ(fdi_tx_reg); | 2263 | reg = FDI_TX_CTL(pipe); |
2264 | temp = I915_READ(reg); | ||
1825 | temp &= ~FDI_LINK_TRAIN_NONE; | 2265 | temp &= ~FDI_LINK_TRAIN_NONE; |
1826 | temp |= FDI_LINK_TRAIN_PATTERN_2; | 2266 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
1827 | if (IS_GEN6(dev)) { | 2267 | if (IS_GEN6(dev)) { |
@@ -1829,9 +2269,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
1829 | /* SNB-B */ | 2269 | /* SNB-B */ |
1830 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | 2270 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
1831 | } | 2271 | } |
1832 | I915_WRITE(fdi_tx_reg, temp); | 2272 | I915_WRITE(reg, temp); |
1833 | 2273 | ||
1834 | temp = I915_READ(fdi_rx_reg); | 2274 | reg = FDI_RX_CTL(pipe); |
2275 | temp = I915_READ(reg); | ||
1835 | if (HAS_PCH_CPT(dev)) { | 2276 | if (HAS_PCH_CPT(dev)) { |
1836 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | 2277 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
1837 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; | 2278 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; |
@@ -1839,445 +2280,544 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
1839 | temp &= ~FDI_LINK_TRAIN_NONE; | 2280 | temp &= ~FDI_LINK_TRAIN_NONE; |
1840 | temp |= FDI_LINK_TRAIN_PATTERN_2; | 2281 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
1841 | } | 2282 | } |
1842 | I915_WRITE(fdi_rx_reg, temp); | 2283 | I915_WRITE(reg, temp); |
2284 | |||
2285 | POSTING_READ(reg); | ||
1843 | udelay(150); | 2286 | udelay(150); |
1844 | 2287 | ||
1845 | for (i = 0; i < 4; i++ ) { | 2288 | for (i = 0; i < 4; i++ ) { |
1846 | temp = I915_READ(fdi_tx_reg); | 2289 | reg = FDI_TX_CTL(pipe); |
2290 | temp = I915_READ(reg); | ||
1847 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 2291 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
1848 | temp |= snb_b_fdi_train_param[i]; | 2292 | temp |= snb_b_fdi_train_param[i]; |
1849 | I915_WRITE(fdi_tx_reg, temp); | 2293 | I915_WRITE(reg, temp); |
2294 | |||
2295 | POSTING_READ(reg); | ||
1850 | udelay(500); | 2296 | udelay(500); |
1851 | 2297 | ||
1852 | temp = I915_READ(fdi_rx_iir_reg); | 2298 | reg = FDI_RX_IIR(pipe); |
2299 | temp = I915_READ(reg); | ||
1853 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 2300 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
1854 | 2301 | ||
1855 | if (temp & FDI_RX_SYMBOL_LOCK) { | 2302 | if (temp & FDI_RX_SYMBOL_LOCK) { |
1856 | I915_WRITE(fdi_rx_iir_reg, | 2303 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
1857 | temp | FDI_RX_SYMBOL_LOCK); | ||
1858 | DRM_DEBUG_KMS("FDI train 2 done.\n"); | 2304 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
1859 | break; | 2305 | break; |
1860 | } | 2306 | } |
1861 | } | 2307 | } |
1862 | if (i == 4) | 2308 | if (i == 4) |
1863 | DRM_DEBUG_KMS("FDI train 2 fail!\n"); | 2309 | DRM_ERROR("FDI train 2 fail!\n"); |
1864 | 2310 | ||
1865 | DRM_DEBUG_KMS("FDI train done.\n"); | 2311 | DRM_DEBUG_KMS("FDI train done.\n"); |
1866 | } | 2312 | } |
1867 | 2313 | ||
1868 | static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | 2314 | /* Manual link training for Ivy Bridge A0 parts */ |
2315 | static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | ||
1869 | { | 2316 | { |
1870 | struct drm_device *dev = crtc->dev; | 2317 | struct drm_device *dev = crtc->dev; |
1871 | struct drm_i915_private *dev_priv = dev->dev_private; | 2318 | struct drm_i915_private *dev_priv = dev->dev_private; |
1872 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2319 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1873 | int pipe = intel_crtc->pipe; | 2320 | int pipe = intel_crtc->pipe; |
1874 | int plane = intel_crtc->plane; | 2321 | u32 reg, temp, i; |
1875 | int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; | ||
1876 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | ||
1877 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; | ||
1878 | int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; | ||
1879 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; | ||
1880 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; | ||
1881 | int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; | ||
1882 | int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; | ||
1883 | int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; | ||
1884 | int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; | ||
1885 | int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; | ||
1886 | int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; | ||
1887 | int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; | ||
1888 | int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B; | ||
1889 | int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B; | ||
1890 | int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B; | ||
1891 | int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B; | ||
1892 | int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; | ||
1893 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; | ||
1894 | int trans_dpll_sel = (pipe == 0) ? 0 : 1; | ||
1895 | u32 temp; | ||
1896 | u32 pipe_bpc; | ||
1897 | |||
1898 | temp = I915_READ(pipeconf_reg); | ||
1899 | pipe_bpc = temp & PIPE_BPC_MASK; | ||
1900 | 2322 | ||
1901 | /* XXX: When our outputs are all unaware of DPMS modes other than off | 2323 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
1902 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | 2324 | for train result */ |
1903 | */ | 2325 | reg = FDI_RX_IMR(pipe); |
1904 | switch (mode) { | 2326 | temp = I915_READ(reg); |
1905 | case DRM_MODE_DPMS_ON: | 2327 | temp &= ~FDI_RX_SYMBOL_LOCK; |
1906 | case DRM_MODE_DPMS_STANDBY: | 2328 | temp &= ~FDI_RX_BIT_LOCK; |
1907 | case DRM_MODE_DPMS_SUSPEND: | 2329 | I915_WRITE(reg, temp); |
1908 | DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); | ||
1909 | 2330 | ||
1910 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 2331 | POSTING_READ(reg); |
1911 | temp = I915_READ(PCH_LVDS); | 2332 | udelay(150); |
1912 | if ((temp & LVDS_PORT_EN) == 0) { | ||
1913 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); | ||
1914 | POSTING_READ(PCH_LVDS); | ||
1915 | } | ||
1916 | } | ||
1917 | 2333 | ||
1918 | if (!HAS_eDP) { | 2334 | /* enable CPU FDI TX and PCH FDI RX */ |
2335 | reg = FDI_TX_CTL(pipe); | ||
2336 | temp = I915_READ(reg); | ||
2337 | temp &= ~(7 << 19); | ||
2338 | temp |= (intel_crtc->fdi_lanes - 1) << 19; | ||
2339 | temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); | ||
2340 | temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; | ||
2341 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | ||
2342 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | ||
2343 | I915_WRITE(reg, temp | FDI_TX_ENABLE); | ||
1919 | 2344 | ||
1920 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ | 2345 | reg = FDI_RX_CTL(pipe); |
1921 | temp = I915_READ(fdi_rx_reg); | 2346 | temp = I915_READ(reg); |
1922 | /* | 2347 | temp &= ~FDI_LINK_TRAIN_AUTO; |
1923 | * make the BPC in FDI Rx be consistent with that in | 2348 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
1924 | * pipeconf reg. | 2349 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
1925 | */ | 2350 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
1926 | temp &= ~(0x7 << 16); | ||
1927 | temp |= (pipe_bpc << 11); | ||
1928 | temp &= ~(7 << 19); | ||
1929 | temp |= (intel_crtc->fdi_lanes - 1) << 19; | ||
1930 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); | ||
1931 | I915_READ(fdi_rx_reg); | ||
1932 | udelay(200); | ||
1933 | 2351 | ||
1934 | /* Switch from Rawclk to PCDclk */ | 2352 | POSTING_READ(reg); |
1935 | temp = I915_READ(fdi_rx_reg); | 2353 | udelay(150); |
1936 | I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); | ||
1937 | I915_READ(fdi_rx_reg); | ||
1938 | udelay(200); | ||
1939 | 2354 | ||
1940 | /* Enable CPU FDI TX PLL, always on for Ironlake */ | 2355 | for (i = 0; i < 4; i++ ) { |
1941 | temp = I915_READ(fdi_tx_reg); | 2356 | reg = FDI_TX_CTL(pipe); |
1942 | if ((temp & FDI_TX_PLL_ENABLE) == 0) { | 2357 | temp = I915_READ(reg); |
1943 | I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); | 2358 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
1944 | I915_READ(fdi_tx_reg); | 2359 | temp |= snb_b_fdi_train_param[i]; |
1945 | udelay(100); | 2360 | I915_WRITE(reg, temp); |
1946 | } | ||
1947 | } | ||
1948 | 2361 | ||
1949 | /* Enable panel fitting for LVDS */ | 2362 | POSTING_READ(reg); |
1950 | if (dev_priv->pch_pf_size && | 2363 | udelay(500); |
1951 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) | ||
1952 | || HAS_eDP || intel_pch_has_edp(crtc))) { | ||
1953 | /* Force use of hard-coded filter coefficients | ||
1954 | * as some pre-programmed values are broken, | ||
1955 | * e.g. x201. | ||
1956 | */ | ||
1957 | I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, | ||
1958 | PF_ENABLE | PF_FILTER_MED_3x3); | ||
1959 | I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS, | ||
1960 | dev_priv->pch_pf_pos); | ||
1961 | I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, | ||
1962 | dev_priv->pch_pf_size); | ||
1963 | } | ||
1964 | 2364 | ||
1965 | /* Enable CPU pipe */ | 2365 | reg = FDI_RX_IIR(pipe); |
1966 | temp = I915_READ(pipeconf_reg); | 2366 | temp = I915_READ(reg); |
1967 | if ((temp & PIPEACONF_ENABLE) == 0) { | 2367 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
1968 | I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); | ||
1969 | I915_READ(pipeconf_reg); | ||
1970 | udelay(100); | ||
1971 | } | ||
1972 | 2368 | ||
1973 | /* configure and enable CPU plane */ | 2369 | if (temp & FDI_RX_BIT_LOCK || |
1974 | temp = I915_READ(dspcntr_reg); | 2370 | (I915_READ(reg) & FDI_RX_BIT_LOCK)) { |
1975 | if ((temp & DISPLAY_PLANE_ENABLE) == 0) { | 2371 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
1976 | I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); | 2372 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
1977 | /* Flush the plane changes */ | 2373 | break; |
1978 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | ||
1979 | } | 2374 | } |
2375 | } | ||
2376 | if (i == 4) | ||
2377 | DRM_ERROR("FDI train 1 fail!\n"); | ||
1980 | 2378 | ||
1981 | if (!HAS_eDP) { | 2379 | /* Train 2 */ |
1982 | /* For PCH output, training FDI link */ | 2380 | reg = FDI_TX_CTL(pipe); |
1983 | if (IS_GEN6(dev)) | 2381 | temp = I915_READ(reg); |
1984 | gen6_fdi_link_train(crtc); | 2382 | temp &= ~FDI_LINK_TRAIN_NONE_IVB; |
1985 | else | 2383 | temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; |
1986 | ironlake_fdi_link_train(crtc); | 2384 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2385 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | ||
2386 | I915_WRITE(reg, temp); | ||
1987 | 2387 | ||
1988 | /* enable PCH DPLL */ | 2388 | reg = FDI_RX_CTL(pipe); |
1989 | temp = I915_READ(pch_dpll_reg); | 2389 | temp = I915_READ(reg); |
1990 | if ((temp & DPLL_VCO_ENABLE) == 0) { | 2390 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
1991 | I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); | 2391 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; |
1992 | I915_READ(pch_dpll_reg); | 2392 | I915_WRITE(reg, temp); |
1993 | } | ||
1994 | udelay(200); | ||
1995 | 2393 | ||
1996 | if (HAS_PCH_CPT(dev)) { | 2394 | POSTING_READ(reg); |
1997 | /* Be sure PCH DPLL SEL is set */ | 2395 | udelay(150); |
1998 | temp = I915_READ(PCH_DPLL_SEL); | ||
1999 | if (trans_dpll_sel == 0 && | ||
2000 | (temp & TRANSA_DPLL_ENABLE) == 0) | ||
2001 | temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); | ||
2002 | else if (trans_dpll_sel == 1 && | ||
2003 | (temp & TRANSB_DPLL_ENABLE) == 0) | ||
2004 | temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | ||
2005 | I915_WRITE(PCH_DPLL_SEL, temp); | ||
2006 | I915_READ(PCH_DPLL_SEL); | ||
2007 | } | ||
2008 | 2396 | ||
2009 | /* set transcoder timing */ | 2397 | for (i = 0; i < 4; i++ ) { |
2010 | I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); | 2398 | reg = FDI_TX_CTL(pipe); |
2011 | I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); | 2399 | temp = I915_READ(reg); |
2012 | I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); | 2400 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2013 | 2401 | temp |= snb_b_fdi_train_param[i]; | |
2014 | I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); | 2402 | I915_WRITE(reg, temp); |
2015 | I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); | ||
2016 | I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); | ||
2017 | |||
2018 | /* enable normal train */ | ||
2019 | temp = I915_READ(fdi_tx_reg); | ||
2020 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2021 | I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | | ||
2022 | FDI_TX_ENHANCE_FRAME_ENABLE); | ||
2023 | I915_READ(fdi_tx_reg); | ||
2024 | |||
2025 | temp = I915_READ(fdi_rx_reg); | ||
2026 | if (HAS_PCH_CPT(dev)) { | ||
2027 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
2028 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | ||
2029 | } else { | ||
2030 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2031 | temp |= FDI_LINK_TRAIN_NONE; | ||
2032 | } | ||
2033 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | ||
2034 | I915_READ(fdi_rx_reg); | ||
2035 | |||
2036 | /* wait one idle pattern time */ | ||
2037 | udelay(100); | ||
2038 | |||
2039 | /* For PCH DP, enable TRANS_DP_CTL */ | ||
2040 | if (HAS_PCH_CPT(dev) && | ||
2041 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | ||
2042 | int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; | ||
2043 | int reg; | ||
2044 | |||
2045 | reg = I915_READ(trans_dp_ctl); | ||
2046 | reg &= ~(TRANS_DP_PORT_SEL_MASK | | ||
2047 | TRANS_DP_SYNC_MASK); | ||
2048 | reg |= (TRANS_DP_OUTPUT_ENABLE | | ||
2049 | TRANS_DP_ENH_FRAMING); | ||
2050 | |||
2051 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) | ||
2052 | reg |= TRANS_DP_HSYNC_ACTIVE_HIGH; | ||
2053 | if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) | ||
2054 | reg |= TRANS_DP_VSYNC_ACTIVE_HIGH; | ||
2055 | |||
2056 | switch (intel_trans_dp_port_sel(crtc)) { | ||
2057 | case PCH_DP_B: | ||
2058 | reg |= TRANS_DP_PORT_SEL_B; | ||
2059 | break; | ||
2060 | case PCH_DP_C: | ||
2061 | reg |= TRANS_DP_PORT_SEL_C; | ||
2062 | break; | ||
2063 | case PCH_DP_D: | ||
2064 | reg |= TRANS_DP_PORT_SEL_D; | ||
2065 | break; | ||
2066 | default: | ||
2067 | DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); | ||
2068 | reg |= TRANS_DP_PORT_SEL_B; | ||
2069 | break; | ||
2070 | } | ||
2071 | 2403 | ||
2072 | I915_WRITE(trans_dp_ctl, reg); | 2404 | POSTING_READ(reg); |
2073 | POSTING_READ(trans_dp_ctl); | 2405 | udelay(500); |
2074 | } | ||
2075 | 2406 | ||
2076 | /* enable PCH transcoder */ | 2407 | reg = FDI_RX_IIR(pipe); |
2077 | temp = I915_READ(transconf_reg); | 2408 | temp = I915_READ(reg); |
2078 | /* | 2409 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2079 | * make the BPC in transcoder be consistent with | ||
2080 | * that in pipeconf reg. | ||
2081 | */ | ||
2082 | temp &= ~PIPE_BPC_MASK; | ||
2083 | temp |= pipe_bpc; | ||
2084 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); | ||
2085 | I915_READ(transconf_reg); | ||
2086 | 2410 | ||
2087 | if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1)) | 2411 | if (temp & FDI_RX_SYMBOL_LOCK) { |
2088 | DRM_ERROR("failed to enable transcoder\n"); | 2412 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
2413 | DRM_DEBUG_KMS("FDI train 2 done.\n"); | ||
2414 | break; | ||
2089 | } | 2415 | } |
2416 | } | ||
2417 | if (i == 4) | ||
2418 | DRM_ERROR("FDI train 2 fail!\n"); | ||
2090 | 2419 | ||
2091 | intel_crtc_load_lut(crtc); | 2420 | DRM_DEBUG_KMS("FDI train done.\n"); |
2421 | } | ||
2092 | 2422 | ||
2093 | intel_update_fbc(crtc, &crtc->mode); | 2423 | static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) |
2094 | break; | 2424 | { |
2425 | struct drm_device *dev = crtc->dev; | ||
2426 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2427 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2428 | int pipe = intel_crtc->pipe; | ||
2429 | u32 reg, temp; | ||
2095 | 2430 | ||
2096 | case DRM_MODE_DPMS_OFF: | 2431 | /* Write the TU size bits so error detection works */ |
2097 | DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); | 2432 | I915_WRITE(FDI_RX_TUSIZE1(pipe), |
2433 | I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); | ||
2098 | 2434 | ||
2099 | drm_vblank_off(dev, pipe); | 2435 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
2100 | /* Disable display plane */ | 2436 | reg = FDI_RX_CTL(pipe); |
2101 | temp = I915_READ(dspcntr_reg); | 2437 | temp = I915_READ(reg); |
2102 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { | 2438 | temp &= ~((0x7 << 19) | (0x7 << 16)); |
2103 | I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); | 2439 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
2104 | /* Flush the plane changes */ | 2440 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; |
2105 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | 2441 | I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); |
2106 | I915_READ(dspbase_reg); | ||
2107 | } | ||
2108 | 2442 | ||
2109 | if (dev_priv->cfb_plane == plane && | 2443 | POSTING_READ(reg); |
2110 | dev_priv->display.disable_fbc) | 2444 | udelay(200); |
2111 | dev_priv->display.disable_fbc(dev); | ||
2112 | 2445 | ||
2113 | /* disable cpu pipe, disable after all planes disabled */ | 2446 | /* Switch from Rawclk to PCDclk */ |
2114 | temp = I915_READ(pipeconf_reg); | 2447 | temp = I915_READ(reg); |
2115 | if ((temp & PIPEACONF_ENABLE) != 0) { | 2448 | I915_WRITE(reg, temp | FDI_PCDCLK); |
2116 | I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); | ||
2117 | 2449 | ||
2118 | /* wait for cpu pipe off, pipe state */ | 2450 | POSTING_READ(reg); |
2119 | if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1)) | 2451 | udelay(200); |
2120 | DRM_ERROR("failed to turn off cpu pipe\n"); | 2452 | |
2121 | } else | 2453 | /* Enable CPU FDI TX PLL, always on for Ironlake */ |
2122 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | 2454 | reg = FDI_TX_CTL(pipe); |
2455 | temp = I915_READ(reg); | ||
2456 | if ((temp & FDI_TX_PLL_ENABLE) == 0) { | ||
2457 | I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); | ||
2123 | 2458 | ||
2459 | POSTING_READ(reg); | ||
2124 | udelay(100); | 2460 | udelay(100); |
2461 | } | ||
2462 | } | ||
2463 | |||
2464 | static void ironlake_fdi_disable(struct drm_crtc *crtc) | ||
2465 | { | ||
2466 | struct drm_device *dev = crtc->dev; | ||
2467 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2468 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2469 | int pipe = intel_crtc->pipe; | ||
2470 | u32 reg, temp; | ||
2471 | |||
2472 | /* disable CPU FDI tx and PCH FDI rx */ | ||
2473 | reg = FDI_TX_CTL(pipe); | ||
2474 | temp = I915_READ(reg); | ||
2475 | I915_WRITE(reg, temp & ~FDI_TX_ENABLE); | ||
2476 | POSTING_READ(reg); | ||
2477 | |||
2478 | reg = FDI_RX_CTL(pipe); | ||
2479 | temp = I915_READ(reg); | ||
2480 | temp &= ~(0x7 << 16); | ||
2481 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; | ||
2482 | I915_WRITE(reg, temp & ~FDI_RX_ENABLE); | ||
2483 | |||
2484 | POSTING_READ(reg); | ||
2485 | udelay(100); | ||
2486 | |||
2487 | /* Ironlake workaround, disable clock pointer after downing FDI */ | ||
2488 | if (HAS_PCH_IBX(dev)) { | ||
2489 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); | ||
2490 | I915_WRITE(FDI_RX_CHICKEN(pipe), | ||
2491 | I915_READ(FDI_RX_CHICKEN(pipe) & | ||
2492 | ~FDI_RX_PHASE_SYNC_POINTER_EN)); | ||
2493 | } | ||
2494 | |||
2495 | /* still set train pattern 1 */ | ||
2496 | reg = FDI_TX_CTL(pipe); | ||
2497 | temp = I915_READ(reg); | ||
2498 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2499 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2500 | I915_WRITE(reg, temp); | ||
2501 | |||
2502 | reg = FDI_RX_CTL(pipe); | ||
2503 | temp = I915_READ(reg); | ||
2504 | if (HAS_PCH_CPT(dev)) { | ||
2505 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
2506 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | ||
2507 | } else { | ||
2508 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2509 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2510 | } | ||
2511 | /* BPC in FDI rx is consistent with that in PIPECONF */ | ||
2512 | temp &= ~(0x07 << 16); | ||
2513 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; | ||
2514 | I915_WRITE(reg, temp); | ||
2125 | 2515 | ||
2126 | /* Disable PF */ | 2516 | POSTING_READ(reg); |
2127 | I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0); | 2517 | udelay(100); |
2128 | I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0); | 2518 | } |
2129 | 2519 | ||
2130 | /* disable CPU FDI tx and PCH FDI rx */ | 2520 | /* |
2131 | temp = I915_READ(fdi_tx_reg); | 2521 | * When we disable a pipe, we need to clear any pending scanline wait events |
2132 | I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE); | 2522 | * to avoid hanging the ring, which we assume we are waiting on. |
2133 | I915_READ(fdi_tx_reg); | 2523 | */ |
2524 | static void intel_clear_scanline_wait(struct drm_device *dev) | ||
2525 | { | ||
2526 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2527 | struct intel_ring_buffer *ring; | ||
2528 | u32 tmp; | ||
2134 | 2529 | ||
2135 | temp = I915_READ(fdi_rx_reg); | 2530 | if (IS_GEN2(dev)) |
2136 | /* BPC in FDI rx is consistent with that in pipeconf */ | 2531 | /* Can't break the hang on i8xx */ |
2137 | temp &= ~(0x07 << 16); | 2532 | return; |
2138 | temp |= (pipe_bpc << 11); | ||
2139 | I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); | ||
2140 | I915_READ(fdi_rx_reg); | ||
2141 | 2533 | ||
2142 | udelay(100); | 2534 | ring = LP_RING(dev_priv); |
2535 | tmp = I915_READ_CTL(ring); | ||
2536 | if (tmp & RING_WAIT) | ||
2537 | I915_WRITE_CTL(ring, tmp); | ||
2538 | } | ||
2143 | 2539 | ||
2144 | /* still set train pattern 1 */ | 2540 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) |
2145 | temp = I915_READ(fdi_tx_reg); | 2541 | { |
2146 | temp &= ~FDI_LINK_TRAIN_NONE; | 2542 | struct drm_i915_gem_object *obj; |
2147 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 2543 | struct drm_i915_private *dev_priv; |
2148 | I915_WRITE(fdi_tx_reg, temp); | ||
2149 | POSTING_READ(fdi_tx_reg); | ||
2150 | 2544 | ||
2151 | temp = I915_READ(fdi_rx_reg); | 2545 | if (crtc->fb == NULL) |
2152 | if (HAS_PCH_CPT(dev)) { | 2546 | return; |
2153 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
2154 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | ||
2155 | } else { | ||
2156 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2157 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2158 | } | ||
2159 | I915_WRITE(fdi_rx_reg, temp); | ||
2160 | POSTING_READ(fdi_rx_reg); | ||
2161 | 2547 | ||
2162 | udelay(100); | 2548 | obj = to_intel_framebuffer(crtc->fb)->obj; |
2549 | dev_priv = crtc->dev->dev_private; | ||
2550 | wait_event(dev_priv->pending_flip_queue, | ||
2551 | atomic_read(&obj->pending_flip) == 0); | ||
2552 | } | ||
2163 | 2553 | ||
2164 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 2554 | static bool intel_crtc_driving_pch(struct drm_crtc *crtc) |
2165 | temp = I915_READ(PCH_LVDS); | 2555 | { |
2166 | I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN); | 2556 | struct drm_device *dev = crtc->dev; |
2167 | I915_READ(PCH_LVDS); | 2557 | struct drm_mode_config *mode_config = &dev->mode_config; |
2168 | udelay(100); | 2558 | struct intel_encoder *encoder; |
2169 | } | ||
2170 | 2559 | ||
2171 | /* disable PCH transcoder */ | 2560 | /* |
2172 | temp = I915_READ(transconf_reg); | 2561 | * If there's a non-PCH eDP on this crtc, it must be DP_A, and that |
2173 | if ((temp & TRANS_ENABLE) != 0) { | 2562 | * must be driven by its own crtc; no sharing is possible. |
2174 | I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); | 2563 | */ |
2564 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | ||
2565 | if (encoder->base.crtc != crtc) | ||
2566 | continue; | ||
2175 | 2567 | ||
2176 | /* wait for PCH transcoder off, transcoder state */ | 2568 | switch (encoder->type) { |
2177 | if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1)) | 2569 | case INTEL_OUTPUT_EDP: |
2178 | DRM_ERROR("failed to disable transcoder\n"); | 2570 | if (!intel_encoder_is_pch_edp(&encoder->base)) |
2571 | return false; | ||
2572 | continue; | ||
2179 | } | 2573 | } |
2574 | } | ||
2180 | 2575 | ||
2181 | temp = I915_READ(transconf_reg); | 2576 | return true; |
2182 | /* BPC in transcoder is consistent with that in pipeconf */ | 2577 | } |
2183 | temp &= ~PIPE_BPC_MASK; | ||
2184 | temp |= pipe_bpc; | ||
2185 | I915_WRITE(transconf_reg, temp); | ||
2186 | I915_READ(transconf_reg); | ||
2187 | udelay(100); | ||
2188 | 2578 | ||
2189 | if (HAS_PCH_CPT(dev)) { | 2579 | /* |
2190 | /* disable TRANS_DP_CTL */ | 2580 | * Enable PCH resources required for PCH ports: |
2191 | int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; | 2581 | * - PCH PLLs |
2192 | int reg; | 2582 | * - FDI training & RX/TX |
2583 | * - update transcoder timings | ||
2584 | * - DP transcoding bits | ||
2585 | * - transcoder | ||
2586 | */ | ||
2587 | static void ironlake_pch_enable(struct drm_crtc *crtc) | ||
2588 | { | ||
2589 | struct drm_device *dev = crtc->dev; | ||
2590 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2591 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2592 | int pipe = intel_crtc->pipe; | ||
2593 | u32 reg, temp; | ||
2193 | 2594 | ||
2194 | reg = I915_READ(trans_dp_ctl); | 2595 | /* For PCH output, training FDI link */ |
2195 | reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); | 2596 | dev_priv->display.fdi_link_train(crtc); |
2196 | I915_WRITE(trans_dp_ctl, reg); | ||
2197 | POSTING_READ(trans_dp_ctl); | ||
2198 | 2597 | ||
2199 | /* disable DPLL_SEL */ | 2598 | intel_enable_pch_pll(dev_priv, pipe); |
2200 | temp = I915_READ(PCH_DPLL_SEL); | 2599 | |
2201 | if (trans_dpll_sel == 0) | 2600 | if (HAS_PCH_CPT(dev)) { |
2202 | temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); | 2601 | /* Be sure PCH DPLL SEL is set */ |
2203 | else | 2602 | temp = I915_READ(PCH_DPLL_SEL); |
2204 | temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | 2603 | if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0) |
2205 | I915_WRITE(PCH_DPLL_SEL, temp); | 2604 | temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); |
2206 | I915_READ(PCH_DPLL_SEL); | 2605 | else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0) |
2606 | temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | ||
2607 | I915_WRITE(PCH_DPLL_SEL, temp); | ||
2608 | } | ||
2207 | 2609 | ||
2610 | /* set transcoder timing, panel must allow it */ | ||
2611 | assert_panel_unlocked(dev_priv, pipe); | ||
2612 | I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); | ||
2613 | I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); | ||
2614 | I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); | ||
2615 | |||
2616 | I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); | ||
2617 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); | ||
2618 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); | ||
2619 | |||
2620 | intel_fdi_normal_train(crtc); | ||
2621 | |||
2622 | /* For PCH DP, enable TRANS_DP_CTL */ | ||
2623 | if (HAS_PCH_CPT(dev) && | ||
2624 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | ||
2625 | reg = TRANS_DP_CTL(pipe); | ||
2626 | temp = I915_READ(reg); | ||
2627 | temp &= ~(TRANS_DP_PORT_SEL_MASK | | ||
2628 | TRANS_DP_SYNC_MASK | | ||
2629 | TRANS_DP_BPC_MASK); | ||
2630 | temp |= (TRANS_DP_OUTPUT_ENABLE | | ||
2631 | TRANS_DP_ENH_FRAMING); | ||
2632 | temp |= TRANS_DP_8BPC; | ||
2633 | |||
2634 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) | ||
2635 | temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; | ||
2636 | if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) | ||
2637 | temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; | ||
2638 | |||
2639 | switch (intel_trans_dp_port_sel(crtc)) { | ||
2640 | case PCH_DP_B: | ||
2641 | temp |= TRANS_DP_PORT_SEL_B; | ||
2642 | break; | ||
2643 | case PCH_DP_C: | ||
2644 | temp |= TRANS_DP_PORT_SEL_C; | ||
2645 | break; | ||
2646 | case PCH_DP_D: | ||
2647 | temp |= TRANS_DP_PORT_SEL_D; | ||
2648 | break; | ||
2649 | default: | ||
2650 | DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); | ||
2651 | temp |= TRANS_DP_PORT_SEL_B; | ||
2652 | break; | ||
2208 | } | 2653 | } |
2209 | 2654 | ||
2210 | /* disable PCH DPLL */ | 2655 | I915_WRITE(reg, temp); |
2211 | temp = I915_READ(pch_dpll_reg); | 2656 | } |
2212 | I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); | ||
2213 | I915_READ(pch_dpll_reg); | ||
2214 | |||
2215 | /* Switch from PCDclk to Rawclk */ | ||
2216 | temp = I915_READ(fdi_rx_reg); | ||
2217 | temp &= ~FDI_SEL_PCDCLK; | ||
2218 | I915_WRITE(fdi_rx_reg, temp); | ||
2219 | I915_READ(fdi_rx_reg); | ||
2220 | |||
2221 | /* Disable CPU FDI TX PLL */ | ||
2222 | temp = I915_READ(fdi_tx_reg); | ||
2223 | I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); | ||
2224 | I915_READ(fdi_tx_reg); | ||
2225 | udelay(100); | ||
2226 | 2657 | ||
2227 | temp = I915_READ(fdi_rx_reg); | 2658 | intel_enable_transcoder(dev_priv, pipe); |
2228 | temp &= ~FDI_RX_PLL_ENABLE; | 2659 | } |
2229 | I915_WRITE(fdi_rx_reg, temp); | ||
2230 | I915_READ(fdi_rx_reg); | ||
2231 | 2660 | ||
2232 | /* Wait for the clocks to turn off. */ | 2661 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
2233 | udelay(100); | 2662 | { |
2234 | break; | 2663 | struct drm_device *dev = crtc->dev; |
2664 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2665 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2666 | int pipe = intel_crtc->pipe; | ||
2667 | int plane = intel_crtc->plane; | ||
2668 | u32 temp; | ||
2669 | bool is_pch_port; | ||
2670 | |||
2671 | if (intel_crtc->active) | ||
2672 | return; | ||
2673 | |||
2674 | intel_crtc->active = true; | ||
2675 | intel_update_watermarks(dev); | ||
2676 | |||
2677 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
2678 | temp = I915_READ(PCH_LVDS); | ||
2679 | if ((temp & LVDS_PORT_EN) == 0) | ||
2680 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); | ||
2235 | } | 2681 | } |
2682 | |||
2683 | is_pch_port = intel_crtc_driving_pch(crtc); | ||
2684 | |||
2685 | if (is_pch_port) | ||
2686 | ironlake_fdi_pll_enable(crtc); | ||
2687 | else | ||
2688 | ironlake_fdi_disable(crtc); | ||
2689 | |||
2690 | /* Enable panel fitting for LVDS */ | ||
2691 | if (dev_priv->pch_pf_size && | ||
2692 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { | ||
2693 | /* Force use of hard-coded filter coefficients | ||
2694 | * as some pre-programmed values are broken, | ||
2695 | * e.g. x201. | ||
2696 | */ | ||
2697 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); | ||
2698 | I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); | ||
2699 | I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); | ||
2700 | } | ||
2701 | |||
2702 | intel_enable_pipe(dev_priv, pipe, is_pch_port); | ||
2703 | intel_enable_plane(dev_priv, plane, pipe); | ||
2704 | |||
2705 | if (is_pch_port) | ||
2706 | ironlake_pch_enable(crtc); | ||
2707 | |||
2708 | intel_crtc_load_lut(crtc); | ||
2709 | |||
2710 | mutex_lock(&dev->struct_mutex); | ||
2711 | intel_update_fbc(dev); | ||
2712 | mutex_unlock(&dev->struct_mutex); | ||
2713 | |||
2714 | intel_crtc_update_cursor(crtc, true); | ||
2236 | } | 2715 | } |
2237 | 2716 | ||
2238 | static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) | 2717 | static void ironlake_crtc_disable(struct drm_crtc *crtc) |
2239 | { | 2718 | { |
2240 | struct intel_overlay *overlay; | 2719 | struct drm_device *dev = crtc->dev; |
2241 | int ret; | 2720 | struct drm_i915_private *dev_priv = dev->dev_private; |
2721 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2722 | int pipe = intel_crtc->pipe; | ||
2723 | int plane = intel_crtc->plane; | ||
2724 | u32 reg, temp; | ||
2242 | 2725 | ||
2243 | if (!enable && intel_crtc->overlay) { | 2726 | if (!intel_crtc->active) |
2244 | overlay = intel_crtc->overlay; | 2727 | return; |
2245 | mutex_lock(&overlay->dev->struct_mutex); | ||
2246 | for (;;) { | ||
2247 | ret = intel_overlay_switch_off(overlay); | ||
2248 | if (ret == 0) | ||
2249 | break; | ||
2250 | 2728 | ||
2251 | ret = intel_overlay_recover_from_interrupt(overlay, 0); | 2729 | intel_crtc_wait_for_pending_flips(crtc); |
2252 | if (ret != 0) { | 2730 | drm_vblank_off(dev, pipe); |
2253 | /* overlay doesn't react anymore. Usually | 2731 | intel_crtc_update_cursor(crtc, false); |
2254 | * results in a black screen and an unkillable | 2732 | |
2255 | * X server. */ | 2733 | intel_disable_plane(dev_priv, plane, pipe); |
2256 | BUG(); | 2734 | |
2257 | overlay->hw_wedged = HW_WEDGED; | 2735 | if (dev_priv->cfb_plane == plane && |
2258 | break; | 2736 | dev_priv->display.disable_fbc) |
2259 | } | 2737 | dev_priv->display.disable_fbc(dev); |
2738 | |||
2739 | intel_disable_pipe(dev_priv, pipe); | ||
2740 | |||
2741 | /* Disable PF */ | ||
2742 | I915_WRITE(PF_CTL(pipe), 0); | ||
2743 | I915_WRITE(PF_WIN_SZ(pipe), 0); | ||
2744 | |||
2745 | ironlake_fdi_disable(crtc); | ||
2746 | |||
2747 | /* This is a horrible layering violation; we should be doing this in | ||
2748 | * the connector/encoder ->prepare instead, but we don't always have | ||
2749 | * enough information there about the config to know whether it will | ||
2750 | * actually be necessary or just cause undesired flicker. | ||
2751 | */ | ||
2752 | intel_disable_pch_ports(dev_priv, pipe); | ||
2753 | |||
2754 | intel_disable_transcoder(dev_priv, pipe); | ||
2755 | |||
2756 | if (HAS_PCH_CPT(dev)) { | ||
2757 | /* disable TRANS_DP_CTL */ | ||
2758 | reg = TRANS_DP_CTL(pipe); | ||
2759 | temp = I915_READ(reg); | ||
2760 | temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); | ||
2761 | temp |= TRANS_DP_PORT_SEL_NONE; | ||
2762 | I915_WRITE(reg, temp); | ||
2763 | |||
2764 | /* disable DPLL_SEL */ | ||
2765 | temp = I915_READ(PCH_DPLL_SEL); | ||
2766 | switch (pipe) { | ||
2767 | case 0: | ||
2768 | temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); | ||
2769 | break; | ||
2770 | case 1: | ||
2771 | temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | ||
2772 | break; | ||
2773 | case 2: | ||
2774 | /* FIXME: manage transcoder PLLs? */ | ||
2775 | temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); | ||
2776 | break; | ||
2777 | default: | ||
2778 | BUG(); /* wtf */ | ||
2260 | } | 2779 | } |
2261 | mutex_unlock(&overlay->dev->struct_mutex); | 2780 | I915_WRITE(PCH_DPLL_SEL, temp); |
2262 | } | 2781 | } |
2263 | /* Let userspace switch the overlay on again. In most cases userspace | ||
2264 | * has to recompute where to put it anyway. */ | ||
2265 | 2782 | ||
2266 | return; | 2783 | /* disable PCH DPLL */ |
2784 | intel_disable_pch_pll(dev_priv, pipe); | ||
2785 | |||
2786 | /* Switch from PCDclk to Rawclk */ | ||
2787 | reg = FDI_RX_CTL(pipe); | ||
2788 | temp = I915_READ(reg); | ||
2789 | I915_WRITE(reg, temp & ~FDI_PCDCLK); | ||
2790 | |||
2791 | /* Disable CPU FDI TX PLL */ | ||
2792 | reg = FDI_TX_CTL(pipe); | ||
2793 | temp = I915_READ(reg); | ||
2794 | I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); | ||
2795 | |||
2796 | POSTING_READ(reg); | ||
2797 | udelay(100); | ||
2798 | |||
2799 | reg = FDI_RX_CTL(pipe); | ||
2800 | temp = I915_READ(reg); | ||
2801 | I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); | ||
2802 | |||
2803 | /* Wait for the clocks to turn off. */ | ||
2804 | POSTING_READ(reg); | ||
2805 | udelay(100); | ||
2806 | |||
2807 | intel_crtc->active = false; | ||
2808 | intel_update_watermarks(dev); | ||
2809 | |||
2810 | mutex_lock(&dev->struct_mutex); | ||
2811 | intel_update_fbc(dev); | ||
2812 | intel_clear_scanline_wait(dev); | ||
2813 | mutex_unlock(&dev->struct_mutex); | ||
2267 | } | 2814 | } |
2268 | 2815 | ||
2269 | static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | 2816 | static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) |
2270 | { | 2817 | { |
2271 | struct drm_device *dev = crtc->dev; | ||
2272 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2273 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2818 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2274 | int pipe = intel_crtc->pipe; | 2819 | int pipe = intel_crtc->pipe; |
2275 | int plane = intel_crtc->plane; | 2820 | int plane = intel_crtc->plane; |
2276 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | ||
2277 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; | ||
2278 | int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; | ||
2279 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | ||
2280 | u32 temp; | ||
2281 | 2821 | ||
2282 | /* XXX: When our outputs are all unaware of DPMS modes other than off | 2822 | /* XXX: When our outputs are all unaware of DPMS modes other than off |
2283 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | 2823 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. |
@@ -2286,88 +2826,105 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2286 | case DRM_MODE_DPMS_ON: | 2826 | case DRM_MODE_DPMS_ON: |
2287 | case DRM_MODE_DPMS_STANDBY: | 2827 | case DRM_MODE_DPMS_STANDBY: |
2288 | case DRM_MODE_DPMS_SUSPEND: | 2828 | case DRM_MODE_DPMS_SUSPEND: |
2289 | /* Enable the DPLL */ | 2829 | DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); |
2290 | temp = I915_READ(dpll_reg); | 2830 | ironlake_crtc_enable(crtc); |
2291 | if ((temp & DPLL_VCO_ENABLE) == 0) { | 2831 | break; |
2292 | I915_WRITE(dpll_reg, temp); | ||
2293 | I915_READ(dpll_reg); | ||
2294 | /* Wait for the clocks to stabilize. */ | ||
2295 | udelay(150); | ||
2296 | I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); | ||
2297 | I915_READ(dpll_reg); | ||
2298 | /* Wait for the clocks to stabilize. */ | ||
2299 | udelay(150); | ||
2300 | I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); | ||
2301 | I915_READ(dpll_reg); | ||
2302 | /* Wait for the clocks to stabilize. */ | ||
2303 | udelay(150); | ||
2304 | } | ||
2305 | 2832 | ||
2306 | /* Enable the pipe */ | 2833 | case DRM_MODE_DPMS_OFF: |
2307 | temp = I915_READ(pipeconf_reg); | 2834 | DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); |
2308 | if ((temp & PIPEACONF_ENABLE) == 0) | 2835 | ironlake_crtc_disable(crtc); |
2309 | I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); | 2836 | break; |
2310 | 2837 | } | |
2311 | /* Enable the plane */ | 2838 | } |
2312 | temp = I915_READ(dspcntr_reg); | 2839 | |
2313 | if ((temp & DISPLAY_PLANE_ENABLE) == 0) { | 2840 | static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) |
2314 | I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); | 2841 | { |
2315 | /* Flush the plane changes */ | 2842 | if (!enable && intel_crtc->overlay) { |
2316 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | 2843 | struct drm_device *dev = intel_crtc->base.dev; |
2317 | } | 2844 | struct drm_i915_private *dev_priv = dev->dev_private; |
2845 | |||
2846 | mutex_lock(&dev->struct_mutex); | ||
2847 | dev_priv->mm.interruptible = false; | ||
2848 | (void) intel_overlay_switch_off(intel_crtc->overlay); | ||
2849 | dev_priv->mm.interruptible = true; | ||
2850 | mutex_unlock(&dev->struct_mutex); | ||
2851 | } | ||
2318 | 2852 | ||
2319 | intel_crtc_load_lut(crtc); | 2853 | /* Let userspace switch the overlay on again. In most cases userspace |
2854 | * has to recompute where to put it anyway. | ||
2855 | */ | ||
2856 | } | ||
2320 | 2857 | ||
2321 | if ((IS_I965G(dev) || plane == 0)) | 2858 | static void i9xx_crtc_enable(struct drm_crtc *crtc) |
2322 | intel_update_fbc(crtc, &crtc->mode); | 2859 | { |
2860 | struct drm_device *dev = crtc->dev; | ||
2861 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2862 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2863 | int pipe = intel_crtc->pipe; | ||
2864 | int plane = intel_crtc->plane; | ||
2323 | 2865 | ||
2324 | /* Give the overlay scaler a chance to enable if it's on this pipe */ | 2866 | if (intel_crtc->active) |
2325 | intel_crtc_dpms_overlay(intel_crtc, true); | 2867 | return; |
2326 | break; | ||
2327 | case DRM_MODE_DPMS_OFF: | ||
2328 | /* Give the overlay scaler a chance to disable if it's on this pipe */ | ||
2329 | intel_crtc_dpms_overlay(intel_crtc, false); | ||
2330 | drm_vblank_off(dev, pipe); | ||
2331 | |||
2332 | if (dev_priv->cfb_plane == plane && | ||
2333 | dev_priv->display.disable_fbc) | ||
2334 | dev_priv->display.disable_fbc(dev); | ||
2335 | |||
2336 | /* Disable display plane */ | ||
2337 | temp = I915_READ(dspcntr_reg); | ||
2338 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { | ||
2339 | I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); | ||
2340 | /* Flush the plane changes */ | ||
2341 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | ||
2342 | I915_READ(dspbase_reg); | ||
2343 | } | ||
2344 | 2868 | ||
2345 | /* Don't disable pipe A or pipe A PLLs if needed */ | 2869 | intel_crtc->active = true; |
2346 | if (pipeconf_reg == PIPEACONF && | 2870 | intel_update_watermarks(dev); |
2347 | (dev_priv->quirks & QUIRK_PIPEA_FORCE)) { | ||
2348 | /* Wait for vblank for the disable to take effect */ | ||
2349 | intel_wait_for_vblank(dev, pipe); | ||
2350 | goto skip_pipe_off; | ||
2351 | } | ||
2352 | 2871 | ||
2353 | /* Next, disable display pipes */ | 2872 | intel_enable_pll(dev_priv, pipe); |
2354 | temp = I915_READ(pipeconf_reg); | 2873 | intel_enable_pipe(dev_priv, pipe, false); |
2355 | if ((temp & PIPEACONF_ENABLE) != 0) { | 2874 | intel_enable_plane(dev_priv, plane, pipe); |
2356 | I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); | ||
2357 | I915_READ(pipeconf_reg); | ||
2358 | } | ||
2359 | 2875 | ||
2360 | /* Wait for the pipe to turn off */ | 2876 | intel_crtc_load_lut(crtc); |
2361 | intel_wait_for_pipe_off(dev, pipe); | 2877 | intel_update_fbc(dev); |
2362 | 2878 | ||
2363 | temp = I915_READ(dpll_reg); | 2879 | /* Give the overlay scaler a chance to enable if it's on this pipe */ |
2364 | if ((temp & DPLL_VCO_ENABLE) != 0) { | 2880 | intel_crtc_dpms_overlay(intel_crtc, true); |
2365 | I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); | 2881 | intel_crtc_update_cursor(crtc, true); |
2366 | I915_READ(dpll_reg); | 2882 | } |
2367 | } | 2883 | |
2368 | skip_pipe_off: | 2884 | static void i9xx_crtc_disable(struct drm_crtc *crtc) |
2369 | /* Wait for the clocks to turn off. */ | 2885 | { |
2370 | udelay(150); | 2886 | struct drm_device *dev = crtc->dev; |
2887 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2888 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2889 | int pipe = intel_crtc->pipe; | ||
2890 | int plane = intel_crtc->plane; | ||
2891 | |||
2892 | if (!intel_crtc->active) | ||
2893 | return; | ||
2894 | |||
2895 | /* Give the overlay scaler a chance to disable if it's on this pipe */ | ||
2896 | intel_crtc_wait_for_pending_flips(crtc); | ||
2897 | drm_vblank_off(dev, pipe); | ||
2898 | intel_crtc_dpms_overlay(intel_crtc, false); | ||
2899 | intel_crtc_update_cursor(crtc, false); | ||
2900 | |||
2901 | if (dev_priv->cfb_plane == plane && | ||
2902 | dev_priv->display.disable_fbc) | ||
2903 | dev_priv->display.disable_fbc(dev); | ||
2904 | |||
2905 | intel_disable_plane(dev_priv, plane, pipe); | ||
2906 | intel_disable_pipe(dev_priv, pipe); | ||
2907 | intel_disable_pll(dev_priv, pipe); | ||
2908 | |||
2909 | intel_crtc->active = false; | ||
2910 | intel_update_fbc(dev); | ||
2911 | intel_update_watermarks(dev); | ||
2912 | intel_clear_scanline_wait(dev); | ||
2913 | } | ||
2914 | |||
2915 | static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
2916 | { | ||
2917 | /* XXX: When our outputs are all unaware of DPMS modes other than off | ||
2918 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | ||
2919 | */ | ||
2920 | switch (mode) { | ||
2921 | case DRM_MODE_DPMS_ON: | ||
2922 | case DRM_MODE_DPMS_STANDBY: | ||
2923 | case DRM_MODE_DPMS_SUSPEND: | ||
2924 | i9xx_crtc_enable(crtc); | ||
2925 | break; | ||
2926 | case DRM_MODE_DPMS_OFF: | ||
2927 | i9xx_crtc_disable(crtc); | ||
2371 | break; | 2928 | break; |
2372 | } | 2929 | } |
2373 | } | 2930 | } |
@@ -2388,26 +2945,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2388 | return; | 2945 | return; |
2389 | 2946 | ||
2390 | intel_crtc->dpms_mode = mode; | 2947 | intel_crtc->dpms_mode = mode; |
2391 | intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON; | ||
2392 | |||
2393 | /* When switching on the display, ensure that SR is disabled | ||
2394 | * with multiple pipes prior to enabling to new pipe. | ||
2395 | * | ||
2396 | * When switching off the display, make sure the cursor is | ||
2397 | * properly hidden prior to disabling the pipe. | ||
2398 | */ | ||
2399 | if (mode == DRM_MODE_DPMS_ON) | ||
2400 | intel_update_watermarks(dev); | ||
2401 | else | ||
2402 | intel_crtc_update_cursor(crtc); | ||
2403 | 2948 | ||
2404 | dev_priv->display.dpms(crtc, mode); | 2949 | dev_priv->display.dpms(crtc, mode); |
2405 | 2950 | ||
2406 | if (mode == DRM_MODE_DPMS_ON) | ||
2407 | intel_crtc_update_cursor(crtc); | ||
2408 | else | ||
2409 | intel_update_watermarks(dev); | ||
2410 | |||
2411 | if (!dev->primary->master) | 2951 | if (!dev->primary->master) |
2412 | return; | 2952 | return; |
2413 | 2953 | ||
@@ -2427,21 +2967,51 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2427 | master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; | 2967 | master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; |
2428 | break; | 2968 | break; |
2429 | default: | 2969 | default: |
2430 | DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); | 2970 | DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); |
2431 | break; | 2971 | break; |
2432 | } | 2972 | } |
2433 | } | 2973 | } |
2434 | 2974 | ||
2435 | static void intel_crtc_prepare (struct drm_crtc *crtc) | 2975 | static void intel_crtc_disable(struct drm_crtc *crtc) |
2436 | { | 2976 | { |
2437 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | 2977 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; |
2978 | struct drm_device *dev = crtc->dev; | ||
2979 | |||
2438 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); | 2980 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); |
2981 | |||
2982 | if (crtc->fb) { | ||
2983 | mutex_lock(&dev->struct_mutex); | ||
2984 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | ||
2985 | mutex_unlock(&dev->struct_mutex); | ||
2986 | } | ||
2987 | } | ||
2988 | |||
2989 | /* Prepare for a mode set. | ||
2990 | * | ||
2991 | * Note we could be a lot smarter here. We need to figure out which outputs | ||
2992 | * will be enabled, which disabled (in short, how the config will changes) | ||
2993 | * and perform the minimum necessary steps to accomplish that, e.g. updating | ||
2994 | * watermarks, FBC configuration, making sure PLLs are programmed correctly, | ||
2995 | * panel fitting is in the proper state, etc. | ||
2996 | */ | ||
2997 | static void i9xx_crtc_prepare(struct drm_crtc *crtc) | ||
2998 | { | ||
2999 | i9xx_crtc_disable(crtc); | ||
2439 | } | 3000 | } |
2440 | 3001 | ||
2441 | static void intel_crtc_commit (struct drm_crtc *crtc) | 3002 | static void i9xx_crtc_commit(struct drm_crtc *crtc) |
2442 | { | 3003 | { |
2443 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | 3004 | i9xx_crtc_enable(crtc); |
2444 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | 3005 | } |
3006 | |||
3007 | static void ironlake_crtc_prepare(struct drm_crtc *crtc) | ||
3008 | { | ||
3009 | ironlake_crtc_disable(crtc); | ||
3010 | } | ||
3011 | |||
3012 | static void ironlake_crtc_commit(struct drm_crtc *crtc) | ||
3013 | { | ||
3014 | ironlake_crtc_enable(crtc); | ||
2445 | } | 3015 | } |
2446 | 3016 | ||
2447 | void intel_encoder_prepare (struct drm_encoder *encoder) | 3017 | void intel_encoder_prepare (struct drm_encoder *encoder) |
@@ -2460,13 +3030,7 @@ void intel_encoder_commit (struct drm_encoder *encoder) | |||
2460 | 3030 | ||
2461 | void intel_encoder_destroy(struct drm_encoder *encoder) | 3031 | void intel_encoder_destroy(struct drm_encoder *encoder) |
2462 | { | 3032 | { |
2463 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 3033 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
2464 | |||
2465 | if (intel_encoder->ddc_bus) | ||
2466 | intel_i2c_destroy(intel_encoder->ddc_bus); | ||
2467 | |||
2468 | if (intel_encoder->i2c_bus) | ||
2469 | intel_i2c_destroy(intel_encoder->i2c_bus); | ||
2470 | 3034 | ||
2471 | drm_encoder_cleanup(encoder); | 3035 | drm_encoder_cleanup(encoder); |
2472 | kfree(intel_encoder); | 3036 | kfree(intel_encoder); |
@@ -2557,33 +3121,6 @@ static int i830_get_display_clock_speed(struct drm_device *dev) | |||
2557 | return 133000; | 3121 | return 133000; |
2558 | } | 3122 | } |
2559 | 3123 | ||
2560 | /** | ||
2561 | * Return the pipe currently connected to the panel fitter, | ||
2562 | * or -1 if the panel fitter is not present or not in use | ||
2563 | */ | ||
2564 | int intel_panel_fitter_pipe (struct drm_device *dev) | ||
2565 | { | ||
2566 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2567 | u32 pfit_control; | ||
2568 | |||
2569 | /* i830 doesn't have a panel fitter */ | ||
2570 | if (IS_I830(dev)) | ||
2571 | return -1; | ||
2572 | |||
2573 | pfit_control = I915_READ(PFIT_CONTROL); | ||
2574 | |||
2575 | /* See if the panel fitter is in use */ | ||
2576 | if ((pfit_control & PFIT_ENABLE) == 0) | ||
2577 | return -1; | ||
2578 | |||
2579 | /* 965 can place panel fitter on either pipe */ | ||
2580 | if (IS_I965G(dev)) | ||
2581 | return (pfit_control >> 29) & 0x3; | ||
2582 | |||
2583 | /* older chips can only use pipe 1 */ | ||
2584 | return 1; | ||
2585 | } | ||
2586 | |||
2587 | struct fdi_m_n { | 3124 | struct fdi_m_n { |
2588 | u32 tu; | 3125 | u32 tu; |
2589 | u32 gmch_m; | 3126 | u32 gmch_m; |
@@ -2601,27 +3138,19 @@ fdi_reduce_ratio(u32 *num, u32 *den) | |||
2601 | } | 3138 | } |
2602 | } | 3139 | } |
2603 | 3140 | ||
2604 | #define DATA_N 0x800000 | ||
2605 | #define LINK_N 0x80000 | ||
2606 | |||
2607 | static void | 3141 | static void |
2608 | ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, | 3142 | ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, |
2609 | int link_clock, struct fdi_m_n *m_n) | 3143 | int link_clock, struct fdi_m_n *m_n) |
2610 | { | 3144 | { |
2611 | u64 temp; | ||
2612 | |||
2613 | m_n->tu = 64; /* default size */ | 3145 | m_n->tu = 64; /* default size */ |
2614 | 3146 | ||
2615 | temp = (u64) DATA_N * pixel_clock; | 3147 | /* BUG_ON(pixel_clock > INT_MAX / 36); */ |
2616 | temp = div_u64(temp, link_clock); | 3148 | m_n->gmch_m = bits_per_pixel * pixel_clock; |
2617 | m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes); | 3149 | m_n->gmch_n = link_clock * nlanes * 8; |
2618 | m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */ | ||
2619 | m_n->gmch_n = DATA_N; | ||
2620 | fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); | 3150 | fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); |
2621 | 3151 | ||
2622 | temp = (u64) LINK_N * pixel_clock; | 3152 | m_n->link_m = pixel_clock; |
2623 | m_n->link_m = div_u64(temp, link_clock); | 3153 | m_n->link_n = link_clock; |
2624 | m_n->link_n = LINK_N; | ||
2625 | fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); | 3154 | fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); |
2626 | } | 3155 | } |
2627 | 3156 | ||
@@ -2635,77 +3164,77 @@ struct intel_watermark_params { | |||
2635 | }; | 3164 | }; |
2636 | 3165 | ||
2637 | /* Pineview has different values for various configs */ | 3166 | /* Pineview has different values for various configs */ |
2638 | static struct intel_watermark_params pineview_display_wm = { | 3167 | static const struct intel_watermark_params pineview_display_wm = { |
2639 | PINEVIEW_DISPLAY_FIFO, | 3168 | PINEVIEW_DISPLAY_FIFO, |
2640 | PINEVIEW_MAX_WM, | 3169 | PINEVIEW_MAX_WM, |
2641 | PINEVIEW_DFT_WM, | 3170 | PINEVIEW_DFT_WM, |
2642 | PINEVIEW_GUARD_WM, | 3171 | PINEVIEW_GUARD_WM, |
2643 | PINEVIEW_FIFO_LINE_SIZE | 3172 | PINEVIEW_FIFO_LINE_SIZE |
2644 | }; | 3173 | }; |
2645 | static struct intel_watermark_params pineview_display_hplloff_wm = { | 3174 | static const struct intel_watermark_params pineview_display_hplloff_wm = { |
2646 | PINEVIEW_DISPLAY_FIFO, | 3175 | PINEVIEW_DISPLAY_FIFO, |
2647 | PINEVIEW_MAX_WM, | 3176 | PINEVIEW_MAX_WM, |
2648 | PINEVIEW_DFT_HPLLOFF_WM, | 3177 | PINEVIEW_DFT_HPLLOFF_WM, |
2649 | PINEVIEW_GUARD_WM, | 3178 | PINEVIEW_GUARD_WM, |
2650 | PINEVIEW_FIFO_LINE_SIZE | 3179 | PINEVIEW_FIFO_LINE_SIZE |
2651 | }; | 3180 | }; |
2652 | static struct intel_watermark_params pineview_cursor_wm = { | 3181 | static const struct intel_watermark_params pineview_cursor_wm = { |
2653 | PINEVIEW_CURSOR_FIFO, | 3182 | PINEVIEW_CURSOR_FIFO, |
2654 | PINEVIEW_CURSOR_MAX_WM, | 3183 | PINEVIEW_CURSOR_MAX_WM, |
2655 | PINEVIEW_CURSOR_DFT_WM, | 3184 | PINEVIEW_CURSOR_DFT_WM, |
2656 | PINEVIEW_CURSOR_GUARD_WM, | 3185 | PINEVIEW_CURSOR_GUARD_WM, |
2657 | PINEVIEW_FIFO_LINE_SIZE, | 3186 | PINEVIEW_FIFO_LINE_SIZE, |
2658 | }; | 3187 | }; |
2659 | static struct intel_watermark_params pineview_cursor_hplloff_wm = { | 3188 | static const struct intel_watermark_params pineview_cursor_hplloff_wm = { |
2660 | PINEVIEW_CURSOR_FIFO, | 3189 | PINEVIEW_CURSOR_FIFO, |
2661 | PINEVIEW_CURSOR_MAX_WM, | 3190 | PINEVIEW_CURSOR_MAX_WM, |
2662 | PINEVIEW_CURSOR_DFT_WM, | 3191 | PINEVIEW_CURSOR_DFT_WM, |
2663 | PINEVIEW_CURSOR_GUARD_WM, | 3192 | PINEVIEW_CURSOR_GUARD_WM, |
2664 | PINEVIEW_FIFO_LINE_SIZE | 3193 | PINEVIEW_FIFO_LINE_SIZE |
2665 | }; | 3194 | }; |
2666 | static struct intel_watermark_params g4x_wm_info = { | 3195 | static const struct intel_watermark_params g4x_wm_info = { |
2667 | G4X_FIFO_SIZE, | 3196 | G4X_FIFO_SIZE, |
2668 | G4X_MAX_WM, | 3197 | G4X_MAX_WM, |
2669 | G4X_MAX_WM, | 3198 | G4X_MAX_WM, |
2670 | 2, | 3199 | 2, |
2671 | G4X_FIFO_LINE_SIZE, | 3200 | G4X_FIFO_LINE_SIZE, |
2672 | }; | 3201 | }; |
2673 | static struct intel_watermark_params g4x_cursor_wm_info = { | 3202 | static const struct intel_watermark_params g4x_cursor_wm_info = { |
2674 | I965_CURSOR_FIFO, | 3203 | I965_CURSOR_FIFO, |
2675 | I965_CURSOR_MAX_WM, | 3204 | I965_CURSOR_MAX_WM, |
2676 | I965_CURSOR_DFT_WM, | 3205 | I965_CURSOR_DFT_WM, |
2677 | 2, | 3206 | 2, |
2678 | G4X_FIFO_LINE_SIZE, | 3207 | G4X_FIFO_LINE_SIZE, |
2679 | }; | 3208 | }; |
2680 | static struct intel_watermark_params i965_cursor_wm_info = { | 3209 | static const struct intel_watermark_params i965_cursor_wm_info = { |
2681 | I965_CURSOR_FIFO, | 3210 | I965_CURSOR_FIFO, |
2682 | I965_CURSOR_MAX_WM, | 3211 | I965_CURSOR_MAX_WM, |
2683 | I965_CURSOR_DFT_WM, | 3212 | I965_CURSOR_DFT_WM, |
2684 | 2, | 3213 | 2, |
2685 | I915_FIFO_LINE_SIZE, | 3214 | I915_FIFO_LINE_SIZE, |
2686 | }; | 3215 | }; |
2687 | static struct intel_watermark_params i945_wm_info = { | 3216 | static const struct intel_watermark_params i945_wm_info = { |
2688 | I945_FIFO_SIZE, | 3217 | I945_FIFO_SIZE, |
2689 | I915_MAX_WM, | 3218 | I915_MAX_WM, |
2690 | 1, | 3219 | 1, |
2691 | 2, | 3220 | 2, |
2692 | I915_FIFO_LINE_SIZE | 3221 | I915_FIFO_LINE_SIZE |
2693 | }; | 3222 | }; |
2694 | static struct intel_watermark_params i915_wm_info = { | 3223 | static const struct intel_watermark_params i915_wm_info = { |
2695 | I915_FIFO_SIZE, | 3224 | I915_FIFO_SIZE, |
2696 | I915_MAX_WM, | 3225 | I915_MAX_WM, |
2697 | 1, | 3226 | 1, |
2698 | 2, | 3227 | 2, |
2699 | I915_FIFO_LINE_SIZE | 3228 | I915_FIFO_LINE_SIZE |
2700 | }; | 3229 | }; |
2701 | static struct intel_watermark_params i855_wm_info = { | 3230 | static const struct intel_watermark_params i855_wm_info = { |
2702 | I855GM_FIFO_SIZE, | 3231 | I855GM_FIFO_SIZE, |
2703 | I915_MAX_WM, | 3232 | I915_MAX_WM, |
2704 | 1, | 3233 | 1, |
2705 | 2, | 3234 | 2, |
2706 | I830_FIFO_LINE_SIZE | 3235 | I830_FIFO_LINE_SIZE |
2707 | }; | 3236 | }; |
2708 | static struct intel_watermark_params i830_wm_info = { | 3237 | static const struct intel_watermark_params i830_wm_info = { |
2709 | I830_FIFO_SIZE, | 3238 | I830_FIFO_SIZE, |
2710 | I915_MAX_WM, | 3239 | I915_MAX_WM, |
2711 | 1, | 3240 | 1, |
@@ -2713,31 +3242,28 @@ static struct intel_watermark_params i830_wm_info = { | |||
2713 | I830_FIFO_LINE_SIZE | 3242 | I830_FIFO_LINE_SIZE |
2714 | }; | 3243 | }; |
2715 | 3244 | ||
2716 | static struct intel_watermark_params ironlake_display_wm_info = { | 3245 | static const struct intel_watermark_params ironlake_display_wm_info = { |
2717 | ILK_DISPLAY_FIFO, | 3246 | ILK_DISPLAY_FIFO, |
2718 | ILK_DISPLAY_MAXWM, | 3247 | ILK_DISPLAY_MAXWM, |
2719 | ILK_DISPLAY_DFTWM, | 3248 | ILK_DISPLAY_DFTWM, |
2720 | 2, | 3249 | 2, |
2721 | ILK_FIFO_LINE_SIZE | 3250 | ILK_FIFO_LINE_SIZE |
2722 | }; | 3251 | }; |
2723 | 3252 | static const struct intel_watermark_params ironlake_cursor_wm_info = { | |
2724 | static struct intel_watermark_params ironlake_cursor_wm_info = { | ||
2725 | ILK_CURSOR_FIFO, | 3253 | ILK_CURSOR_FIFO, |
2726 | ILK_CURSOR_MAXWM, | 3254 | ILK_CURSOR_MAXWM, |
2727 | ILK_CURSOR_DFTWM, | 3255 | ILK_CURSOR_DFTWM, |
2728 | 2, | 3256 | 2, |
2729 | ILK_FIFO_LINE_SIZE | 3257 | ILK_FIFO_LINE_SIZE |
2730 | }; | 3258 | }; |
2731 | 3259 | static const struct intel_watermark_params ironlake_display_srwm_info = { | |
2732 | static struct intel_watermark_params ironlake_display_srwm_info = { | ||
2733 | ILK_DISPLAY_SR_FIFO, | 3260 | ILK_DISPLAY_SR_FIFO, |
2734 | ILK_DISPLAY_MAX_SRWM, | 3261 | ILK_DISPLAY_MAX_SRWM, |
2735 | ILK_DISPLAY_DFT_SRWM, | 3262 | ILK_DISPLAY_DFT_SRWM, |
2736 | 2, | 3263 | 2, |
2737 | ILK_FIFO_LINE_SIZE | 3264 | ILK_FIFO_LINE_SIZE |
2738 | }; | 3265 | }; |
2739 | 3266 | static const struct intel_watermark_params ironlake_cursor_srwm_info = { | |
2740 | static struct intel_watermark_params ironlake_cursor_srwm_info = { | ||
2741 | ILK_CURSOR_SR_FIFO, | 3267 | ILK_CURSOR_SR_FIFO, |
2742 | ILK_CURSOR_MAX_SRWM, | 3268 | ILK_CURSOR_MAX_SRWM, |
2743 | ILK_CURSOR_DFT_SRWM, | 3269 | ILK_CURSOR_DFT_SRWM, |
@@ -2745,6 +3271,36 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = { | |||
2745 | ILK_FIFO_LINE_SIZE | 3271 | ILK_FIFO_LINE_SIZE |
2746 | }; | 3272 | }; |
2747 | 3273 | ||
3274 | static const struct intel_watermark_params sandybridge_display_wm_info = { | ||
3275 | SNB_DISPLAY_FIFO, | ||
3276 | SNB_DISPLAY_MAXWM, | ||
3277 | SNB_DISPLAY_DFTWM, | ||
3278 | 2, | ||
3279 | SNB_FIFO_LINE_SIZE | ||
3280 | }; | ||
3281 | static const struct intel_watermark_params sandybridge_cursor_wm_info = { | ||
3282 | SNB_CURSOR_FIFO, | ||
3283 | SNB_CURSOR_MAXWM, | ||
3284 | SNB_CURSOR_DFTWM, | ||
3285 | 2, | ||
3286 | SNB_FIFO_LINE_SIZE | ||
3287 | }; | ||
3288 | static const struct intel_watermark_params sandybridge_display_srwm_info = { | ||
3289 | SNB_DISPLAY_SR_FIFO, | ||
3290 | SNB_DISPLAY_MAX_SRWM, | ||
3291 | SNB_DISPLAY_DFT_SRWM, | ||
3292 | 2, | ||
3293 | SNB_FIFO_LINE_SIZE | ||
3294 | }; | ||
3295 | static const struct intel_watermark_params sandybridge_cursor_srwm_info = { | ||
3296 | SNB_CURSOR_SR_FIFO, | ||
3297 | SNB_CURSOR_MAX_SRWM, | ||
3298 | SNB_CURSOR_DFT_SRWM, | ||
3299 | 2, | ||
3300 | SNB_FIFO_LINE_SIZE | ||
3301 | }; | ||
3302 | |||
3303 | |||
2748 | /** | 3304 | /** |
2749 | * intel_calculate_wm - calculate watermark level | 3305 | * intel_calculate_wm - calculate watermark level |
2750 | * @clock_in_khz: pixel clock | 3306 | * @clock_in_khz: pixel clock |
@@ -2764,7 +3320,8 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = { | |||
2764 | * will occur, and a display engine hang could result. | 3320 | * will occur, and a display engine hang could result. |
2765 | */ | 3321 | */ |
2766 | static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | 3322 | static unsigned long intel_calculate_wm(unsigned long clock_in_khz, |
2767 | struct intel_watermark_params *wm, | 3323 | const struct intel_watermark_params *wm, |
3324 | int fifo_size, | ||
2768 | int pixel_size, | 3325 | int pixel_size, |
2769 | unsigned long latency_ns) | 3326 | unsigned long latency_ns) |
2770 | { | 3327 | { |
@@ -2780,11 +3337,11 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | |||
2780 | 1000; | 3337 | 1000; |
2781 | entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); | 3338 | entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); |
2782 | 3339 | ||
2783 | DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); | 3340 | DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); |
2784 | 3341 | ||
2785 | wm_size = wm->fifo_size - (entries_required + wm->guard_size); | 3342 | wm_size = fifo_size - (entries_required + wm->guard_size); |
2786 | 3343 | ||
2787 | DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); | 3344 | DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); |
2788 | 3345 | ||
2789 | /* Don't promote wm_size to unsigned... */ | 3346 | /* Don't promote wm_size to unsigned... */ |
2790 | if (wm_size > (long)wm->max_wm) | 3347 | if (wm_size > (long)wm->max_wm) |
@@ -2902,7 +3459,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane) | |||
2902 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; | 3459 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; |
2903 | 3460 | ||
2904 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | 3461 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
2905 | plane ? "B" : "A", size); | 3462 | plane ? "B" : "A", size); |
2906 | 3463 | ||
2907 | return size; | 3464 | return size; |
2908 | } | 3465 | } |
@@ -2919,7 +3476,7 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane) | |||
2919 | size >>= 1; /* Convert to cachelines */ | 3476 | size >>= 1; /* Convert to cachelines */ |
2920 | 3477 | ||
2921 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | 3478 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
2922 | plane ? "B" : "A", size); | 3479 | plane ? "B" : "A", size); |
2923 | 3480 | ||
2924 | return size; | 3481 | return size; |
2925 | } | 3482 | } |
@@ -2934,8 +3491,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane) | |||
2934 | size >>= 2; /* Convert to cachelines */ | 3491 | size >>= 2; /* Convert to cachelines */ |
2935 | 3492 | ||
2936 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | 3493 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
2937 | plane ? "B" : "A", | 3494 | plane ? "B" : "A", |
2938 | size); | 3495 | size); |
2939 | 3496 | ||
2940 | return size; | 3497 | return size; |
2941 | } | 3498 | } |
@@ -2950,20 +3507,33 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) | |||
2950 | size >>= 1; /* Convert to cachelines */ | 3507 | size >>= 1; /* Convert to cachelines */ |
2951 | 3508 | ||
2952 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | 3509 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
2953 | plane ? "B" : "A", size); | 3510 | plane ? "B" : "A", size); |
2954 | 3511 | ||
2955 | return size; | 3512 | return size; |
2956 | } | 3513 | } |
2957 | 3514 | ||
2958 | static void pineview_update_wm(struct drm_device *dev, int planea_clock, | 3515 | static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) |
2959 | int planeb_clock, int sr_hdisplay, int unused, | 3516 | { |
2960 | int pixel_size) | 3517 | struct drm_crtc *crtc, *enabled = NULL; |
3518 | |||
3519 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
3520 | if (crtc->enabled && crtc->fb) { | ||
3521 | if (enabled) | ||
3522 | return NULL; | ||
3523 | enabled = crtc; | ||
3524 | } | ||
3525 | } | ||
3526 | |||
3527 | return enabled; | ||
3528 | } | ||
3529 | |||
3530 | static void pineview_update_wm(struct drm_device *dev) | ||
2961 | { | 3531 | { |
2962 | struct drm_i915_private *dev_priv = dev->dev_private; | 3532 | struct drm_i915_private *dev_priv = dev->dev_private; |
3533 | struct drm_crtc *crtc; | ||
2963 | const struct cxsr_latency *latency; | 3534 | const struct cxsr_latency *latency; |
2964 | u32 reg; | 3535 | u32 reg; |
2965 | unsigned long wm; | 3536 | unsigned long wm; |
2966 | int sr_clock; | ||
2967 | 3537 | ||
2968 | latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, | 3538 | latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, |
2969 | dev_priv->fsb_freq, dev_priv->mem_freq); | 3539 | dev_priv->fsb_freq, dev_priv->mem_freq); |
@@ -2973,11 +3543,14 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
2973 | return; | 3543 | return; |
2974 | } | 3544 | } |
2975 | 3545 | ||
2976 | if (!planea_clock || !planeb_clock) { | 3546 | crtc = single_enabled_crtc(dev); |
2977 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3547 | if (crtc) { |
3548 | int clock = crtc->mode.clock; | ||
3549 | int pixel_size = crtc->fb->bits_per_pixel / 8; | ||
2978 | 3550 | ||
2979 | /* Display SR */ | 3551 | /* Display SR */ |
2980 | wm = intel_calculate_wm(sr_clock, &pineview_display_wm, | 3552 | wm = intel_calculate_wm(clock, &pineview_display_wm, |
3553 | pineview_display_wm.fifo_size, | ||
2981 | pixel_size, latency->display_sr); | 3554 | pixel_size, latency->display_sr); |
2982 | reg = I915_READ(DSPFW1); | 3555 | reg = I915_READ(DSPFW1); |
2983 | reg &= ~DSPFW_SR_MASK; | 3556 | reg &= ~DSPFW_SR_MASK; |
@@ -2986,7 +3559,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
2986 | DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); | 3559 | DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); |
2987 | 3560 | ||
2988 | /* cursor SR */ | 3561 | /* cursor SR */ |
2989 | wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm, | 3562 | wm = intel_calculate_wm(clock, &pineview_cursor_wm, |
3563 | pineview_display_wm.fifo_size, | ||
2990 | pixel_size, latency->cursor_sr); | 3564 | pixel_size, latency->cursor_sr); |
2991 | reg = I915_READ(DSPFW3); | 3565 | reg = I915_READ(DSPFW3); |
2992 | reg &= ~DSPFW_CURSOR_SR_MASK; | 3566 | reg &= ~DSPFW_CURSOR_SR_MASK; |
@@ -2994,7 +3568,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
2994 | I915_WRITE(DSPFW3, reg); | 3568 | I915_WRITE(DSPFW3, reg); |
2995 | 3569 | ||
2996 | /* Display HPLL off SR */ | 3570 | /* Display HPLL off SR */ |
2997 | wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm, | 3571 | wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, |
3572 | pineview_display_hplloff_wm.fifo_size, | ||
2998 | pixel_size, latency->display_hpll_disable); | 3573 | pixel_size, latency->display_hpll_disable); |
2999 | reg = I915_READ(DSPFW3); | 3574 | reg = I915_READ(DSPFW3); |
3000 | reg &= ~DSPFW_HPLL_SR_MASK; | 3575 | reg &= ~DSPFW_HPLL_SR_MASK; |
@@ -3002,7 +3577,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
3002 | I915_WRITE(DSPFW3, reg); | 3577 | I915_WRITE(DSPFW3, reg); |
3003 | 3578 | ||
3004 | /* cursor HPLL off SR */ | 3579 | /* cursor HPLL off SR */ |
3005 | wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm, | 3580 | wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, |
3581 | pineview_display_hplloff_wm.fifo_size, | ||
3006 | pixel_size, latency->cursor_hpll_disable); | 3582 | pixel_size, latency->cursor_hpll_disable); |
3007 | reg = I915_READ(DSPFW3); | 3583 | reg = I915_READ(DSPFW3); |
3008 | reg &= ~DSPFW_HPLL_CURSOR_MASK; | 3584 | reg &= ~DSPFW_HPLL_CURSOR_MASK; |
@@ -3020,125 +3596,229 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
3020 | } | 3596 | } |
3021 | } | 3597 | } |
3022 | 3598 | ||
3023 | static void g4x_update_wm(struct drm_device *dev, int planea_clock, | 3599 | static bool g4x_compute_wm0(struct drm_device *dev, |
3024 | int planeb_clock, int sr_hdisplay, int sr_htotal, | 3600 | int plane, |
3025 | int pixel_size) | 3601 | const struct intel_watermark_params *display, |
3602 | int display_latency_ns, | ||
3603 | const struct intel_watermark_params *cursor, | ||
3604 | int cursor_latency_ns, | ||
3605 | int *plane_wm, | ||
3606 | int *cursor_wm) | ||
3026 | { | 3607 | { |
3027 | struct drm_i915_private *dev_priv = dev->dev_private; | 3608 | struct drm_crtc *crtc; |
3028 | int total_size, cacheline_size; | 3609 | int htotal, hdisplay, clock, pixel_size; |
3029 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm, cursor_sr; | 3610 | int line_time_us, line_count; |
3030 | struct intel_watermark_params planea_params, planeb_params; | 3611 | int entries, tlb_miss; |
3031 | unsigned long line_time_us; | 3612 | |
3032 | int sr_clock, sr_entries = 0, entries_required; | 3613 | crtc = intel_get_crtc_for_plane(dev, plane); |
3033 | 3614 | if (crtc->fb == NULL || !crtc->enabled) { | |
3034 | /* Create copies of the base settings for each pipe */ | 3615 | *cursor_wm = cursor->guard_size; |
3035 | planea_params = planeb_params = g4x_wm_info; | 3616 | *plane_wm = display->guard_size; |
3617 | return false; | ||
3618 | } | ||
3036 | 3619 | ||
3037 | /* Grab a couple of global values before we overwrite them */ | 3620 | htotal = crtc->mode.htotal; |
3038 | total_size = planea_params.fifo_size; | 3621 | hdisplay = crtc->mode.hdisplay; |
3039 | cacheline_size = planea_params.cacheline_size; | 3622 | clock = crtc->mode.clock; |
3623 | pixel_size = crtc->fb->bits_per_pixel / 8; | ||
3624 | |||
3625 | /* Use the small buffer method to calculate plane watermark */ | ||
3626 | entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; | ||
3627 | tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; | ||
3628 | if (tlb_miss > 0) | ||
3629 | entries += tlb_miss; | ||
3630 | entries = DIV_ROUND_UP(entries, display->cacheline_size); | ||
3631 | *plane_wm = entries + display->guard_size; | ||
3632 | if (*plane_wm > (int)display->max_wm) | ||
3633 | *plane_wm = display->max_wm; | ||
3634 | |||
3635 | /* Use the large buffer method to calculate cursor watermark */ | ||
3636 | line_time_us = ((htotal * 1000) / clock); | ||
3637 | line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; | ||
3638 | entries = line_count * 64 * pixel_size; | ||
3639 | tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; | ||
3640 | if (tlb_miss > 0) | ||
3641 | entries += tlb_miss; | ||
3642 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); | ||
3643 | *cursor_wm = entries + cursor->guard_size; | ||
3644 | if (*cursor_wm > (int)cursor->max_wm) | ||
3645 | *cursor_wm = (int)cursor->max_wm; | ||
3040 | 3646 | ||
3041 | /* | 3647 | return true; |
3042 | * Note: we need to make sure we don't overflow for various clock & | 3648 | } |
3043 | * latency values. | ||
3044 | * clocks go from a few thousand to several hundred thousand. | ||
3045 | * latency is usually a few thousand | ||
3046 | */ | ||
3047 | entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) / | ||
3048 | 1000; | ||
3049 | entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE); | ||
3050 | planea_wm = entries_required + planea_params.guard_size; | ||
3051 | 3649 | ||
3052 | entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) / | 3650 | /* |
3053 | 1000; | 3651 | * Check the wm result. |
3054 | entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE); | 3652 | * |
3055 | planeb_wm = entries_required + planeb_params.guard_size; | 3653 | * If any calculated watermark values is larger than the maximum value that |
3654 | * can be programmed into the associated watermark register, that watermark | ||
3655 | * must be disabled. | ||
3656 | */ | ||
3657 | static bool g4x_check_srwm(struct drm_device *dev, | ||
3658 | int display_wm, int cursor_wm, | ||
3659 | const struct intel_watermark_params *display, | ||
3660 | const struct intel_watermark_params *cursor) | ||
3661 | { | ||
3662 | DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", | ||
3663 | display_wm, cursor_wm); | ||
3056 | 3664 | ||
3057 | cursora_wm = cursorb_wm = 16; | 3665 | if (display_wm > display->max_wm) { |
3058 | cursor_sr = 32; | 3666 | DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", |
3667 | display_wm, display->max_wm); | ||
3668 | return false; | ||
3669 | } | ||
3059 | 3670 | ||
3060 | DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); | 3671 | if (cursor_wm > cursor->max_wm) { |
3672 | DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", | ||
3673 | cursor_wm, cursor->max_wm); | ||
3674 | return false; | ||
3675 | } | ||
3061 | 3676 | ||
3062 | /* Calc sr entries for one plane configs */ | 3677 | if (!(display_wm || cursor_wm)) { |
3063 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | 3678 | DRM_DEBUG_KMS("SR latency is 0, disabling\n"); |
3064 | /* self-refresh has much higher latency */ | 3679 | return false; |
3065 | static const int sr_latency_ns = 12000; | 3680 | } |
3066 | 3681 | ||
3067 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3682 | return true; |
3068 | line_time_us = ((sr_htotal * 1000) / sr_clock); | 3683 | } |
3069 | 3684 | ||
3070 | /* Use ns/us then divide to preserve precision */ | 3685 | static bool g4x_compute_srwm(struct drm_device *dev, |
3071 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * | 3686 | int plane, |
3072 | pixel_size * sr_hdisplay; | 3687 | int latency_ns, |
3073 | sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); | 3688 | const struct intel_watermark_params *display, |
3074 | 3689 | const struct intel_watermark_params *cursor, | |
3075 | entries_required = (((sr_latency_ns / line_time_us) + | 3690 | int *display_wm, int *cursor_wm) |
3076 | 1000) / 1000) * pixel_size * 64; | 3691 | { |
3077 | entries_required = DIV_ROUND_UP(entries_required, | 3692 | struct drm_crtc *crtc; |
3078 | g4x_cursor_wm_info.cacheline_size); | 3693 | int hdisplay, htotal, pixel_size, clock; |
3079 | cursor_sr = entries_required + g4x_cursor_wm_info.guard_size; | 3694 | unsigned long line_time_us; |
3080 | 3695 | int line_count, line_size; | |
3081 | if (cursor_sr > g4x_cursor_wm_info.max_wm) | 3696 | int small, large; |
3082 | cursor_sr = g4x_cursor_wm_info.max_wm; | 3697 | int entries; |
3083 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " | ||
3084 | "cursor %d\n", sr_entries, cursor_sr); | ||
3085 | 3698 | ||
3086 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 3699 | if (!latency_ns) { |
3087 | } else { | 3700 | *display_wm = *cursor_wm = 0; |
3088 | /* Turn off self refresh if both pipes are enabled */ | 3701 | return false; |
3089 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
3090 | & ~FW_BLC_SELF_EN); | ||
3091 | } | 3702 | } |
3092 | 3703 | ||
3093 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", | 3704 | crtc = intel_get_crtc_for_plane(dev, plane); |
3094 | planea_wm, planeb_wm, sr_entries); | 3705 | hdisplay = crtc->mode.hdisplay; |
3706 | htotal = crtc->mode.htotal; | ||
3707 | clock = crtc->mode.clock; | ||
3708 | pixel_size = crtc->fb->bits_per_pixel / 8; | ||
3709 | |||
3710 | line_time_us = (htotal * 1000) / clock; | ||
3711 | line_count = (latency_ns / line_time_us + 1000) / 1000; | ||
3712 | line_size = hdisplay * pixel_size; | ||
3095 | 3713 | ||
3096 | planea_wm &= 0x3f; | 3714 | /* Use the minimum of the small and large buffer method for primary */ |
3097 | planeb_wm &= 0x3f; | 3715 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; |
3716 | large = line_count * line_size; | ||
3717 | |||
3718 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); | ||
3719 | *display_wm = entries + display->guard_size; | ||
3720 | |||
3721 | /* calculate the self-refresh watermark for display cursor */ | ||
3722 | entries = line_count * pixel_size * 64; | ||
3723 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); | ||
3724 | *cursor_wm = entries + cursor->guard_size; | ||
3725 | |||
3726 | return g4x_check_srwm(dev, | ||
3727 | *display_wm, *cursor_wm, | ||
3728 | display, cursor); | ||
3729 | } | ||
3730 | |||
3731 | #define single_plane_enabled(mask) is_power_of_2(mask) | ||
3732 | |||
3733 | static void g4x_update_wm(struct drm_device *dev) | ||
3734 | { | ||
3735 | static const int sr_latency_ns = 12000; | ||
3736 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3737 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm; | ||
3738 | int plane_sr, cursor_sr; | ||
3739 | unsigned int enabled = 0; | ||
3740 | |||
3741 | if (g4x_compute_wm0(dev, 0, | ||
3742 | &g4x_wm_info, latency_ns, | ||
3743 | &g4x_cursor_wm_info, latency_ns, | ||
3744 | &planea_wm, &cursora_wm)) | ||
3745 | enabled |= 1; | ||
3746 | |||
3747 | if (g4x_compute_wm0(dev, 1, | ||
3748 | &g4x_wm_info, latency_ns, | ||
3749 | &g4x_cursor_wm_info, latency_ns, | ||
3750 | &planeb_wm, &cursorb_wm)) | ||
3751 | enabled |= 2; | ||
3752 | |||
3753 | plane_sr = cursor_sr = 0; | ||
3754 | if (single_plane_enabled(enabled) && | ||
3755 | g4x_compute_srwm(dev, ffs(enabled) - 1, | ||
3756 | sr_latency_ns, | ||
3757 | &g4x_wm_info, | ||
3758 | &g4x_cursor_wm_info, | ||
3759 | &plane_sr, &cursor_sr)) | ||
3760 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | ||
3761 | else | ||
3762 | I915_WRITE(FW_BLC_SELF, | ||
3763 | I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); | ||
3098 | 3764 | ||
3099 | I915_WRITE(DSPFW1, (sr_entries << DSPFW_SR_SHIFT) | | 3765 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", |
3766 | planea_wm, cursora_wm, | ||
3767 | planeb_wm, cursorb_wm, | ||
3768 | plane_sr, cursor_sr); | ||
3769 | |||
3770 | I915_WRITE(DSPFW1, | ||
3771 | (plane_sr << DSPFW_SR_SHIFT) | | ||
3100 | (cursorb_wm << DSPFW_CURSORB_SHIFT) | | 3772 | (cursorb_wm << DSPFW_CURSORB_SHIFT) | |
3101 | (planeb_wm << DSPFW_PLANEB_SHIFT) | planea_wm); | 3773 | (planeb_wm << DSPFW_PLANEB_SHIFT) | |
3102 | I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | | 3774 | planea_wm); |
3775 | I915_WRITE(DSPFW2, | ||
3776 | (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | | ||
3103 | (cursora_wm << DSPFW_CURSORA_SHIFT)); | 3777 | (cursora_wm << DSPFW_CURSORA_SHIFT)); |
3104 | /* HPLL off in SR has some issues on G4x... disable it */ | 3778 | /* HPLL off in SR has some issues on G4x... disable it */ |
3105 | I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | | 3779 | I915_WRITE(DSPFW3, |
3780 | (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | | ||
3106 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | 3781 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); |
3107 | } | 3782 | } |
3108 | 3783 | ||
3109 | static void i965_update_wm(struct drm_device *dev, int planea_clock, | 3784 | static void i965_update_wm(struct drm_device *dev) |
3110 | int planeb_clock, int sr_hdisplay, int sr_htotal, | ||
3111 | int pixel_size) | ||
3112 | { | 3785 | { |
3113 | struct drm_i915_private *dev_priv = dev->dev_private; | 3786 | struct drm_i915_private *dev_priv = dev->dev_private; |
3114 | unsigned long line_time_us; | 3787 | struct drm_crtc *crtc; |
3115 | int sr_clock, sr_entries, srwm = 1; | 3788 | int srwm = 1; |
3116 | int cursor_sr = 16; | 3789 | int cursor_sr = 16; |
3117 | 3790 | ||
3118 | /* Calc sr entries for one plane configs */ | 3791 | /* Calc sr entries for one plane configs */ |
3119 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | 3792 | crtc = single_enabled_crtc(dev); |
3793 | if (crtc) { | ||
3120 | /* self-refresh has much higher latency */ | 3794 | /* self-refresh has much higher latency */ |
3121 | static const int sr_latency_ns = 12000; | 3795 | static const int sr_latency_ns = 12000; |
3796 | int clock = crtc->mode.clock; | ||
3797 | int htotal = crtc->mode.htotal; | ||
3798 | int hdisplay = crtc->mode.hdisplay; | ||
3799 | int pixel_size = crtc->fb->bits_per_pixel / 8; | ||
3800 | unsigned long line_time_us; | ||
3801 | int entries; | ||
3122 | 3802 | ||
3123 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3803 | line_time_us = ((htotal * 1000) / clock); |
3124 | line_time_us = ((sr_htotal * 1000) / sr_clock); | ||
3125 | 3804 | ||
3126 | /* Use ns/us then divide to preserve precision */ | 3805 | /* Use ns/us then divide to preserve precision */ |
3127 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * | 3806 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
3128 | pixel_size * sr_hdisplay; | 3807 | pixel_size * hdisplay; |
3129 | sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE); | 3808 | entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); |
3130 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | 3809 | srwm = I965_FIFO_SIZE - entries; |
3131 | srwm = I965_FIFO_SIZE - sr_entries; | ||
3132 | if (srwm < 0) | 3810 | if (srwm < 0) |
3133 | srwm = 1; | 3811 | srwm = 1; |
3134 | srwm &= 0x1ff; | 3812 | srwm &= 0x1ff; |
3813 | DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", | ||
3814 | entries, srwm); | ||
3135 | 3815 | ||
3136 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * | 3816 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
3137 | pixel_size * 64; | 3817 | pixel_size * 64; |
3138 | sr_entries = DIV_ROUND_UP(sr_entries, | 3818 | entries = DIV_ROUND_UP(entries, |
3139 | i965_cursor_wm_info.cacheline_size); | 3819 | i965_cursor_wm_info.cacheline_size); |
3140 | cursor_sr = i965_cursor_wm_info.fifo_size - | 3820 | cursor_sr = i965_cursor_wm_info.fifo_size - |
3141 | (sr_entries + i965_cursor_wm_info.guard_size); | 3821 | (entries + i965_cursor_wm_info.guard_size); |
3142 | 3822 | ||
3143 | if (cursor_sr > i965_cursor_wm_info.max_wm) | 3823 | if (cursor_sr > i965_cursor_wm_info.max_wm) |
3144 | cursor_sr = i965_cursor_wm_info.max_wm; | 3824 | cursor_sr = i965_cursor_wm_info.max_wm; |
@@ -3146,11 +3826,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
3146 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " | 3826 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " |
3147 | "cursor %d\n", srwm, cursor_sr); | 3827 | "cursor %d\n", srwm, cursor_sr); |
3148 | 3828 | ||
3149 | if (IS_I965GM(dev)) | 3829 | if (IS_CRESTLINE(dev)) |
3150 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 3830 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
3151 | } else { | 3831 | } else { |
3152 | /* Turn off self refresh if both pipes are enabled */ | 3832 | /* Turn off self refresh if both pipes are enabled */ |
3153 | if (IS_I965GM(dev)) | 3833 | if (IS_CRESTLINE(dev)) |
3154 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | 3834 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) |
3155 | & ~FW_BLC_SELF_EN); | 3835 | & ~FW_BLC_SELF_EN); |
3156 | } | 3836 | } |
@@ -3159,46 +3839,56 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
3159 | srwm); | 3839 | srwm); |
3160 | 3840 | ||
3161 | /* 965 has limitations... */ | 3841 | /* 965 has limitations... */ |
3162 | I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | | 3842 | I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | |
3163 | (8 << 0)); | 3843 | (8 << 16) | (8 << 8) | (8 << 0)); |
3164 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); | 3844 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); |
3165 | /* update cursor SR watermark */ | 3845 | /* update cursor SR watermark */ |
3166 | I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | 3846 | I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); |
3167 | } | 3847 | } |
3168 | 3848 | ||
3169 | static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | 3849 | static void i9xx_update_wm(struct drm_device *dev) |
3170 | int planeb_clock, int sr_hdisplay, int sr_htotal, | ||
3171 | int pixel_size) | ||
3172 | { | 3850 | { |
3173 | struct drm_i915_private *dev_priv = dev->dev_private; | 3851 | struct drm_i915_private *dev_priv = dev->dev_private; |
3852 | const struct intel_watermark_params *wm_info; | ||
3174 | uint32_t fwater_lo; | 3853 | uint32_t fwater_lo; |
3175 | uint32_t fwater_hi; | 3854 | uint32_t fwater_hi; |
3176 | int total_size, cacheline_size, cwm, srwm = 1; | 3855 | int cwm, srwm = 1; |
3856 | int fifo_size; | ||
3177 | int planea_wm, planeb_wm; | 3857 | int planea_wm, planeb_wm; |
3178 | struct intel_watermark_params planea_params, planeb_params; | 3858 | struct drm_crtc *crtc, *enabled = NULL; |
3179 | unsigned long line_time_us; | ||
3180 | int sr_clock, sr_entries = 0; | ||
3181 | 3859 | ||
3182 | /* Create copies of the base settings for each pipe */ | 3860 | if (IS_I945GM(dev)) |
3183 | if (IS_I965GM(dev) || IS_I945GM(dev)) | 3861 | wm_info = &i945_wm_info; |
3184 | planea_params = planeb_params = i945_wm_info; | 3862 | else if (!IS_GEN2(dev)) |
3185 | else if (IS_I9XX(dev)) | 3863 | wm_info = &i915_wm_info; |
3186 | planea_params = planeb_params = i915_wm_info; | ||
3187 | else | 3864 | else |
3188 | planea_params = planeb_params = i855_wm_info; | 3865 | wm_info = &i855_wm_info; |
3189 | 3866 | ||
3190 | /* Grab a couple of global values before we overwrite them */ | 3867 | fifo_size = dev_priv->display.get_fifo_size(dev, 0); |
3191 | total_size = planea_params.fifo_size; | 3868 | crtc = intel_get_crtc_for_plane(dev, 0); |
3192 | cacheline_size = planea_params.cacheline_size; | 3869 | if (crtc->enabled && crtc->fb) { |
3193 | 3870 | planea_wm = intel_calculate_wm(crtc->mode.clock, | |
3194 | /* Update per-plane FIFO sizes */ | 3871 | wm_info, fifo_size, |
3195 | planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0); | 3872 | crtc->fb->bits_per_pixel / 8, |
3196 | planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1); | 3873 | latency_ns); |
3874 | enabled = crtc; | ||
3875 | } else | ||
3876 | planea_wm = fifo_size - wm_info->guard_size; | ||
3877 | |||
3878 | fifo_size = dev_priv->display.get_fifo_size(dev, 1); | ||
3879 | crtc = intel_get_crtc_for_plane(dev, 1); | ||
3880 | if (crtc->enabled && crtc->fb) { | ||
3881 | planeb_wm = intel_calculate_wm(crtc->mode.clock, | ||
3882 | wm_info, fifo_size, | ||
3883 | crtc->fb->bits_per_pixel / 8, | ||
3884 | latency_ns); | ||
3885 | if (enabled == NULL) | ||
3886 | enabled = crtc; | ||
3887 | else | ||
3888 | enabled = NULL; | ||
3889 | } else | ||
3890 | planeb_wm = fifo_size - wm_info->guard_size; | ||
3197 | 3891 | ||
3198 | planea_wm = intel_calculate_wm(planea_clock, &planea_params, | ||
3199 | pixel_size, latency_ns); | ||
3200 | planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params, | ||
3201 | pixel_size, latency_ns); | ||
3202 | DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); | 3892 | DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); |
3203 | 3893 | ||
3204 | /* | 3894 | /* |
@@ -3206,43 +3896,43 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
3206 | */ | 3896 | */ |
3207 | cwm = 2; | 3897 | cwm = 2; |
3208 | 3898 | ||
3899 | /* Play safe and disable self-refresh before adjusting watermarks. */ | ||
3900 | if (IS_I945G(dev) || IS_I945GM(dev)) | ||
3901 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); | ||
3902 | else if (IS_I915GM(dev)) | ||
3903 | I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); | ||
3904 | |||
3209 | /* Calc sr entries for one plane configs */ | 3905 | /* Calc sr entries for one plane configs */ |
3210 | if (HAS_FW_BLC(dev) && sr_hdisplay && | 3906 | if (HAS_FW_BLC(dev) && enabled) { |
3211 | (!planea_clock || !planeb_clock)) { | ||
3212 | /* self-refresh has much higher latency */ | 3907 | /* self-refresh has much higher latency */ |
3213 | static const int sr_latency_ns = 6000; | 3908 | static const int sr_latency_ns = 6000; |
3909 | int clock = enabled->mode.clock; | ||
3910 | int htotal = enabled->mode.htotal; | ||
3911 | int hdisplay = enabled->mode.hdisplay; | ||
3912 | int pixel_size = enabled->fb->bits_per_pixel / 8; | ||
3913 | unsigned long line_time_us; | ||
3914 | int entries; | ||
3214 | 3915 | ||
3215 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3916 | line_time_us = (htotal * 1000) / clock; |
3216 | line_time_us = ((sr_htotal * 1000) / sr_clock); | ||
3217 | 3917 | ||
3218 | /* Use ns/us then divide to preserve precision */ | 3918 | /* Use ns/us then divide to preserve precision */ |
3219 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * | 3919 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
3220 | pixel_size * sr_hdisplay; | 3920 | pixel_size * hdisplay; |
3221 | sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); | 3921 | entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); |
3222 | DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); | 3922 | DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); |
3223 | srwm = total_size - sr_entries; | 3923 | srwm = wm_info->fifo_size - entries; |
3224 | if (srwm < 0) | 3924 | if (srwm < 0) |
3225 | srwm = 1; | 3925 | srwm = 1; |
3226 | 3926 | ||
3227 | if (IS_I945G(dev) || IS_I945GM(dev)) | 3927 | if (IS_I945G(dev) || IS_I945GM(dev)) |
3228 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); | 3928 | I915_WRITE(FW_BLC_SELF, |
3229 | else if (IS_I915GM(dev)) { | 3929 | FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); |
3230 | /* 915M has a smaller SRWM field */ | 3930 | else if (IS_I915GM(dev)) |
3231 | I915_WRITE(FW_BLC_SELF, srwm & 0x3f); | 3931 | I915_WRITE(FW_BLC_SELF, srwm & 0x3f); |
3232 | I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); | ||
3233 | } | ||
3234 | } else { | ||
3235 | /* Turn off self refresh if both pipes are enabled */ | ||
3236 | if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
3237 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
3238 | & ~FW_BLC_SELF_EN); | ||
3239 | } else if (IS_I915GM(dev)) { | ||
3240 | I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); | ||
3241 | } | ||
3242 | } | 3932 | } |
3243 | 3933 | ||
3244 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", | 3934 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", |
3245 | planea_wm, planeb_wm, cwm, srwm); | 3935 | planea_wm, planeb_wm, cwm, srwm); |
3246 | 3936 | ||
3247 | fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); | 3937 | fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); |
3248 | fwater_hi = (cwm & 0x1f); | 3938 | fwater_hi = (cwm & 0x1f); |
@@ -3253,19 +3943,36 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
3253 | 3943 | ||
3254 | I915_WRITE(FW_BLC, fwater_lo); | 3944 | I915_WRITE(FW_BLC, fwater_lo); |
3255 | I915_WRITE(FW_BLC2, fwater_hi); | 3945 | I915_WRITE(FW_BLC2, fwater_hi); |
3946 | |||
3947 | if (HAS_FW_BLC(dev)) { | ||
3948 | if (enabled) { | ||
3949 | if (IS_I945G(dev) || IS_I945GM(dev)) | ||
3950 | I915_WRITE(FW_BLC_SELF, | ||
3951 | FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); | ||
3952 | else if (IS_I915GM(dev)) | ||
3953 | I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); | ||
3954 | DRM_DEBUG_KMS("memory self refresh enabled\n"); | ||
3955 | } else | ||
3956 | DRM_DEBUG_KMS("memory self refresh disabled\n"); | ||
3957 | } | ||
3256 | } | 3958 | } |
3257 | 3959 | ||
3258 | static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, | 3960 | static void i830_update_wm(struct drm_device *dev) |
3259 | int unused2, int unused3, int pixel_size) | ||
3260 | { | 3961 | { |
3261 | struct drm_i915_private *dev_priv = dev->dev_private; | 3962 | struct drm_i915_private *dev_priv = dev->dev_private; |
3262 | uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; | 3963 | struct drm_crtc *crtc; |
3964 | uint32_t fwater_lo; | ||
3263 | int planea_wm; | 3965 | int planea_wm; |
3264 | 3966 | ||
3265 | i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0); | 3967 | crtc = single_enabled_crtc(dev); |
3968 | if (crtc == NULL) | ||
3969 | return; | ||
3266 | 3970 | ||
3267 | planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info, | 3971 | planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, |
3268 | pixel_size, latency_ns); | 3972 | dev_priv->display.get_fifo_size(dev, 0), |
3973 | crtc->fb->bits_per_pixel / 8, | ||
3974 | latency_ns); | ||
3975 | fwater_lo = I915_READ(FW_BLC) & ~0xfff; | ||
3269 | fwater_lo |= (3<<8) | planea_wm; | 3976 | fwater_lo |= (3<<8) | planea_wm; |
3270 | 3977 | ||
3271 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); | 3978 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); |
@@ -3276,146 +3983,286 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, | |||
3276 | #define ILK_LP0_PLANE_LATENCY 700 | 3983 | #define ILK_LP0_PLANE_LATENCY 700 |
3277 | #define ILK_LP0_CURSOR_LATENCY 1300 | 3984 | #define ILK_LP0_CURSOR_LATENCY 1300 |
3278 | 3985 | ||
3279 | static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | 3986 | /* |
3280 | int planeb_clock, int sr_hdisplay, int sr_htotal, | 3987 | * Check the wm result. |
3281 | int pixel_size) | 3988 | * |
3989 | * If any calculated watermark values is larger than the maximum value that | ||
3990 | * can be programmed into the associated watermark register, that watermark | ||
3991 | * must be disabled. | ||
3992 | */ | ||
3993 | static bool ironlake_check_srwm(struct drm_device *dev, int level, | ||
3994 | int fbc_wm, int display_wm, int cursor_wm, | ||
3995 | const struct intel_watermark_params *display, | ||
3996 | const struct intel_watermark_params *cursor) | ||
3282 | { | 3997 | { |
3283 | struct drm_i915_private *dev_priv = dev->dev_private; | 3998 | struct drm_i915_private *dev_priv = dev->dev_private; |
3284 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm; | 3999 | |
3285 | int sr_wm, cursor_wm; | 4000 | DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," |
3286 | unsigned long line_time_us; | 4001 | " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); |
3287 | int sr_clock, entries_required; | 4002 | |
3288 | u32 reg_value; | 4003 | if (fbc_wm > SNB_FBC_MAX_SRWM) { |
3289 | int line_count; | 4004 | DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", |
3290 | int planea_htotal = 0, planeb_htotal = 0; | 4005 | fbc_wm, SNB_FBC_MAX_SRWM, level); |
4006 | |||
4007 | /* fbc has it's own way to disable FBC WM */ | ||
4008 | I915_WRITE(DISP_ARB_CTL, | ||
4009 | I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); | ||
4010 | return false; | ||
4011 | } | ||
4012 | |||
4013 | if (display_wm > display->max_wm) { | ||
4014 | DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", | ||
4015 | display_wm, SNB_DISPLAY_MAX_SRWM, level); | ||
4016 | return false; | ||
4017 | } | ||
4018 | |||
4019 | if (cursor_wm > cursor->max_wm) { | ||
4020 | DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", | ||
4021 | cursor_wm, SNB_CURSOR_MAX_SRWM, level); | ||
4022 | return false; | ||
4023 | } | ||
4024 | |||
4025 | if (!(fbc_wm || display_wm || cursor_wm)) { | ||
4026 | DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); | ||
4027 | return false; | ||
4028 | } | ||
4029 | |||
4030 | return true; | ||
4031 | } | ||
4032 | |||
4033 | /* | ||
4034 | * Compute watermark values of WM[1-3], | ||
4035 | */ | ||
4036 | static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, | ||
4037 | int latency_ns, | ||
4038 | const struct intel_watermark_params *display, | ||
4039 | const struct intel_watermark_params *cursor, | ||
4040 | int *fbc_wm, int *display_wm, int *cursor_wm) | ||
4041 | { | ||
3291 | struct drm_crtc *crtc; | 4042 | struct drm_crtc *crtc; |
4043 | unsigned long line_time_us; | ||
4044 | int hdisplay, htotal, pixel_size, clock; | ||
4045 | int line_count, line_size; | ||
4046 | int small, large; | ||
4047 | int entries; | ||
3292 | 4048 | ||
3293 | /* Need htotal for all active display plane */ | 4049 | if (!latency_ns) { |
3294 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 4050 | *fbc_wm = *display_wm = *cursor_wm = 0; |
3295 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4051 | return false; |
3296 | if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) { | ||
3297 | if (intel_crtc->plane == 0) | ||
3298 | planea_htotal = crtc->mode.htotal; | ||
3299 | else | ||
3300 | planeb_htotal = crtc->mode.htotal; | ||
3301 | } | ||
3302 | } | 4052 | } |
3303 | 4053 | ||
3304 | /* Calculate and update the watermark for plane A */ | 4054 | crtc = intel_get_crtc_for_plane(dev, plane); |
3305 | if (planea_clock) { | 4055 | hdisplay = crtc->mode.hdisplay; |
3306 | entries_required = ((planea_clock / 1000) * pixel_size * | 4056 | htotal = crtc->mode.htotal; |
3307 | ILK_LP0_PLANE_LATENCY) / 1000; | 4057 | clock = crtc->mode.clock; |
3308 | entries_required = DIV_ROUND_UP(entries_required, | 4058 | pixel_size = crtc->fb->bits_per_pixel / 8; |
3309 | ironlake_display_wm_info.cacheline_size); | ||
3310 | planea_wm = entries_required + | ||
3311 | ironlake_display_wm_info.guard_size; | ||
3312 | 4059 | ||
3313 | if (planea_wm > (int)ironlake_display_wm_info.max_wm) | 4060 | line_time_us = (htotal * 1000) / clock; |
3314 | planea_wm = ironlake_display_wm_info.max_wm; | 4061 | line_count = (latency_ns / line_time_us + 1000) / 1000; |
4062 | line_size = hdisplay * pixel_size; | ||
3315 | 4063 | ||
3316 | /* Use the large buffer method to calculate cursor watermark */ | 4064 | /* Use the minimum of the small and large buffer method for primary */ |
3317 | line_time_us = (planea_htotal * 1000) / planea_clock; | 4065 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; |
4066 | large = line_count * line_size; | ||
3318 | 4067 | ||
3319 | /* Use ns/us then divide to preserve precision */ | 4068 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); |
3320 | line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; | 4069 | *display_wm = entries + display->guard_size; |
3321 | |||
3322 | /* calculate the cursor watermark for cursor A */ | ||
3323 | entries_required = line_count * 64 * pixel_size; | ||
3324 | entries_required = DIV_ROUND_UP(entries_required, | ||
3325 | ironlake_cursor_wm_info.cacheline_size); | ||
3326 | cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size; | ||
3327 | if (cursora_wm > ironlake_cursor_wm_info.max_wm) | ||
3328 | cursora_wm = ironlake_cursor_wm_info.max_wm; | ||
3329 | |||
3330 | reg_value = I915_READ(WM0_PIPEA_ILK); | ||
3331 | reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | ||
3332 | reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) | | ||
3333 | (cursora_wm & WM0_PIPE_CURSOR_MASK); | ||
3334 | I915_WRITE(WM0_PIPEA_ILK, reg_value); | ||
3335 | DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, " | ||
3336 | "cursor: %d\n", planea_wm, cursora_wm); | ||
3337 | } | ||
3338 | /* Calculate and update the watermark for plane B */ | ||
3339 | if (planeb_clock) { | ||
3340 | entries_required = ((planeb_clock / 1000) * pixel_size * | ||
3341 | ILK_LP0_PLANE_LATENCY) / 1000; | ||
3342 | entries_required = DIV_ROUND_UP(entries_required, | ||
3343 | ironlake_display_wm_info.cacheline_size); | ||
3344 | planeb_wm = entries_required + | ||
3345 | ironlake_display_wm_info.guard_size; | ||
3346 | |||
3347 | if (planeb_wm > (int)ironlake_display_wm_info.max_wm) | ||
3348 | planeb_wm = ironlake_display_wm_info.max_wm; | ||
3349 | |||
3350 | /* Use the large buffer method to calculate cursor watermark */ | ||
3351 | line_time_us = (planeb_htotal * 1000) / planeb_clock; | ||
3352 | 4070 | ||
3353 | /* Use ns/us then divide to preserve precision */ | 4071 | /* |
3354 | line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; | 4072 | * Spec says: |
4073 | * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 | ||
4074 | */ | ||
4075 | *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; | ||
4076 | |||
4077 | /* calculate the self-refresh watermark for display cursor */ | ||
4078 | entries = line_count * pixel_size * 64; | ||
4079 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); | ||
4080 | *cursor_wm = entries + cursor->guard_size; | ||
3355 | 4081 | ||
3356 | /* calculate the cursor watermark for cursor B */ | 4082 | return ironlake_check_srwm(dev, level, |
3357 | entries_required = line_count * 64 * pixel_size; | 4083 | *fbc_wm, *display_wm, *cursor_wm, |
3358 | entries_required = DIV_ROUND_UP(entries_required, | 4084 | display, cursor); |
3359 | ironlake_cursor_wm_info.cacheline_size); | 4085 | } |
3360 | cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size; | ||
3361 | if (cursorb_wm > ironlake_cursor_wm_info.max_wm) | ||
3362 | cursorb_wm = ironlake_cursor_wm_info.max_wm; | ||
3363 | 4086 | ||
3364 | reg_value = I915_READ(WM0_PIPEB_ILK); | 4087 | static void ironlake_update_wm(struct drm_device *dev) |
3365 | reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | 4088 | { |
3366 | reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | | 4089 | struct drm_i915_private *dev_priv = dev->dev_private; |
3367 | (cursorb_wm & WM0_PIPE_CURSOR_MASK); | 4090 | int fbc_wm, plane_wm, cursor_wm; |
3368 | I915_WRITE(WM0_PIPEB_ILK, reg_value); | 4091 | unsigned int enabled; |
3369 | DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, " | 4092 | |
3370 | "cursor: %d\n", planeb_wm, cursorb_wm); | 4093 | enabled = 0; |
4094 | if (g4x_compute_wm0(dev, 0, | ||
4095 | &ironlake_display_wm_info, | ||
4096 | ILK_LP0_PLANE_LATENCY, | ||
4097 | &ironlake_cursor_wm_info, | ||
4098 | ILK_LP0_CURSOR_LATENCY, | ||
4099 | &plane_wm, &cursor_wm)) { | ||
4100 | I915_WRITE(WM0_PIPEA_ILK, | ||
4101 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
4102 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | ||
4103 | " plane %d, " "cursor: %d\n", | ||
4104 | plane_wm, cursor_wm); | ||
4105 | enabled |= 1; | ||
4106 | } | ||
4107 | |||
4108 | if (g4x_compute_wm0(dev, 1, | ||
4109 | &ironlake_display_wm_info, | ||
4110 | ILK_LP0_PLANE_LATENCY, | ||
4111 | &ironlake_cursor_wm_info, | ||
4112 | ILK_LP0_CURSOR_LATENCY, | ||
4113 | &plane_wm, &cursor_wm)) { | ||
4114 | I915_WRITE(WM0_PIPEB_ILK, | ||
4115 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
4116 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | ||
4117 | " plane %d, cursor: %d\n", | ||
4118 | plane_wm, cursor_wm); | ||
4119 | enabled |= 2; | ||
3371 | } | 4120 | } |
3372 | 4121 | ||
3373 | /* | 4122 | /* |
3374 | * Calculate and update the self-refresh watermark only when one | 4123 | * Calculate and update the self-refresh watermark only when one |
3375 | * display plane is used. | 4124 | * display plane is used. |
3376 | */ | 4125 | */ |
3377 | if (!planea_clock || !planeb_clock) { | 4126 | I915_WRITE(WM3_LP_ILK, 0); |
4127 | I915_WRITE(WM2_LP_ILK, 0); | ||
4128 | I915_WRITE(WM1_LP_ILK, 0); | ||
3378 | 4129 | ||
3379 | /* Read the self-refresh latency. The unit is 0.5us */ | 4130 | if (!single_plane_enabled(enabled)) |
3380 | int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; | 4131 | return; |
4132 | enabled = ffs(enabled) - 1; | ||
4133 | |||
4134 | /* WM1 */ | ||
4135 | if (!ironlake_compute_srwm(dev, 1, enabled, | ||
4136 | ILK_READ_WM1_LATENCY() * 500, | ||
4137 | &ironlake_display_srwm_info, | ||
4138 | &ironlake_cursor_srwm_info, | ||
4139 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
4140 | return; | ||
3381 | 4141 | ||
3382 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 4142 | I915_WRITE(WM1_LP_ILK, |
3383 | line_time_us = ((sr_htotal * 1000) / sr_clock); | 4143 | WM1_LP_SR_EN | |
4144 | (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
4145 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
4146 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
4147 | cursor_wm); | ||
4148 | |||
4149 | /* WM2 */ | ||
4150 | if (!ironlake_compute_srwm(dev, 2, enabled, | ||
4151 | ILK_READ_WM2_LATENCY() * 500, | ||
4152 | &ironlake_display_srwm_info, | ||
4153 | &ironlake_cursor_srwm_info, | ||
4154 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
4155 | return; | ||
3384 | 4156 | ||
3385 | /* Use ns/us then divide to preserve precision */ | 4157 | I915_WRITE(WM2_LP_ILK, |
3386 | line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) | 4158 | WM2_LP_EN | |
3387 | / 1000; | 4159 | (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | |
3388 | 4160 | (fbc_wm << WM1_LP_FBC_SHIFT) | | |
3389 | /* calculate the self-refresh watermark for display plane */ | 4161 | (plane_wm << WM1_LP_SR_SHIFT) | |
3390 | entries_required = line_count * sr_hdisplay * pixel_size; | 4162 | cursor_wm); |
3391 | entries_required = DIV_ROUND_UP(entries_required, | ||
3392 | ironlake_display_srwm_info.cacheline_size); | ||
3393 | sr_wm = entries_required + | ||
3394 | ironlake_display_srwm_info.guard_size; | ||
3395 | |||
3396 | /* calculate the self-refresh watermark for display cursor */ | ||
3397 | entries_required = line_count * pixel_size * 64; | ||
3398 | entries_required = DIV_ROUND_UP(entries_required, | ||
3399 | ironlake_cursor_srwm_info.cacheline_size); | ||
3400 | cursor_wm = entries_required + | ||
3401 | ironlake_cursor_srwm_info.guard_size; | ||
3402 | |||
3403 | /* configure watermark and enable self-refresh */ | ||
3404 | reg_value = I915_READ(WM1_LP_ILK); | ||
3405 | reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | | ||
3406 | WM1_LP_CURSOR_MASK); | ||
3407 | reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | | ||
3408 | (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; | ||
3409 | |||
3410 | I915_WRITE(WM1_LP_ILK, reg_value); | ||
3411 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " | ||
3412 | "cursor %d\n", sr_wm, cursor_wm); | ||
3413 | 4163 | ||
3414 | } else { | 4164 | /* |
3415 | /* Turn off self refresh if both pipes are enabled */ | 4165 | * WM3 is unsupported on ILK, probably because we don't have latency |
3416 | I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); | 4166 | * data for that power state |
4167 | */ | ||
4168 | } | ||
4169 | |||
4170 | static void sandybridge_update_wm(struct drm_device *dev) | ||
4171 | { | ||
4172 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4173 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ | ||
4174 | int fbc_wm, plane_wm, cursor_wm; | ||
4175 | unsigned int enabled; | ||
4176 | |||
4177 | enabled = 0; | ||
4178 | if (g4x_compute_wm0(dev, 0, | ||
4179 | &sandybridge_display_wm_info, latency, | ||
4180 | &sandybridge_cursor_wm_info, latency, | ||
4181 | &plane_wm, &cursor_wm)) { | ||
4182 | I915_WRITE(WM0_PIPEA_ILK, | ||
4183 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
4184 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | ||
4185 | " plane %d, " "cursor: %d\n", | ||
4186 | plane_wm, cursor_wm); | ||
4187 | enabled |= 1; | ||
4188 | } | ||
4189 | |||
4190 | if (g4x_compute_wm0(dev, 1, | ||
4191 | &sandybridge_display_wm_info, latency, | ||
4192 | &sandybridge_cursor_wm_info, latency, | ||
4193 | &plane_wm, &cursor_wm)) { | ||
4194 | I915_WRITE(WM0_PIPEB_ILK, | ||
4195 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
4196 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | ||
4197 | " plane %d, cursor: %d\n", | ||
4198 | plane_wm, cursor_wm); | ||
4199 | enabled |= 2; | ||
3417 | } | 4200 | } |
4201 | |||
4202 | /* | ||
4203 | * Calculate and update the self-refresh watermark only when one | ||
4204 | * display plane is used. | ||
4205 | * | ||
4206 | * SNB support 3 levels of watermark. | ||
4207 | * | ||
4208 | * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, | ||
4209 | * and disabled in the descending order | ||
4210 | * | ||
4211 | */ | ||
4212 | I915_WRITE(WM3_LP_ILK, 0); | ||
4213 | I915_WRITE(WM2_LP_ILK, 0); | ||
4214 | I915_WRITE(WM1_LP_ILK, 0); | ||
4215 | |||
4216 | if (!single_plane_enabled(enabled)) | ||
4217 | return; | ||
4218 | enabled = ffs(enabled) - 1; | ||
4219 | |||
4220 | /* WM1 */ | ||
4221 | if (!ironlake_compute_srwm(dev, 1, enabled, | ||
4222 | SNB_READ_WM1_LATENCY() * 500, | ||
4223 | &sandybridge_display_srwm_info, | ||
4224 | &sandybridge_cursor_srwm_info, | ||
4225 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
4226 | return; | ||
4227 | |||
4228 | I915_WRITE(WM1_LP_ILK, | ||
4229 | WM1_LP_SR_EN | | ||
4230 | (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
4231 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
4232 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
4233 | cursor_wm); | ||
4234 | |||
4235 | /* WM2 */ | ||
4236 | if (!ironlake_compute_srwm(dev, 2, enabled, | ||
4237 | SNB_READ_WM2_LATENCY() * 500, | ||
4238 | &sandybridge_display_srwm_info, | ||
4239 | &sandybridge_cursor_srwm_info, | ||
4240 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
4241 | return; | ||
4242 | |||
4243 | I915_WRITE(WM2_LP_ILK, | ||
4244 | WM2_LP_EN | | ||
4245 | (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
4246 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
4247 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
4248 | cursor_wm); | ||
4249 | |||
4250 | /* WM3 */ | ||
4251 | if (!ironlake_compute_srwm(dev, 3, enabled, | ||
4252 | SNB_READ_WM3_LATENCY() * 500, | ||
4253 | &sandybridge_display_srwm_info, | ||
4254 | &sandybridge_cursor_srwm_info, | ||
4255 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
4256 | return; | ||
4257 | |||
4258 | I915_WRITE(WM3_LP_ILK, | ||
4259 | WM3_LP_EN | | ||
4260 | (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
4261 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
4262 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
4263 | cursor_wm); | ||
3418 | } | 4264 | } |
4265 | |||
3419 | /** | 4266 | /** |
3420 | * intel_update_watermarks - update FIFO watermark values based on current modes | 4267 | * intel_update_watermarks - update FIFO watermark values based on current modes |
3421 | * | 4268 | * |
@@ -3447,117 +4294,56 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | |||
3447 | * | 4294 | * |
3448 | * We don't use the sprite, so we can ignore that. And on Crestline we have | 4295 | * We don't use the sprite, so we can ignore that. And on Crestline we have |
3449 | * to set the non-SR watermarks to 8. | 4296 | * to set the non-SR watermarks to 8. |
3450 | */ | 4297 | */ |
3451 | static void intel_update_watermarks(struct drm_device *dev) | 4298 | static void intel_update_watermarks(struct drm_device *dev) |
3452 | { | 4299 | { |
3453 | struct drm_i915_private *dev_priv = dev->dev_private; | 4300 | struct drm_i915_private *dev_priv = dev->dev_private; |
3454 | struct drm_crtc *crtc; | ||
3455 | int sr_hdisplay = 0; | ||
3456 | unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; | ||
3457 | int enabled = 0, pixel_size = 0; | ||
3458 | int sr_htotal = 0; | ||
3459 | 4301 | ||
3460 | if (!dev_priv->display.update_wm) | 4302 | if (dev_priv->display.update_wm) |
3461 | return; | 4303 | dev_priv->display.update_wm(dev); |
3462 | 4304 | } | |
3463 | /* Get the clock config from both planes */ | ||
3464 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
3465 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
3466 | if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) { | ||
3467 | enabled++; | ||
3468 | if (intel_crtc->plane == 0) { | ||
3469 | DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n", | ||
3470 | intel_crtc->pipe, crtc->mode.clock); | ||
3471 | planea_clock = crtc->mode.clock; | ||
3472 | } else { | ||
3473 | DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n", | ||
3474 | intel_crtc->pipe, crtc->mode.clock); | ||
3475 | planeb_clock = crtc->mode.clock; | ||
3476 | } | ||
3477 | sr_hdisplay = crtc->mode.hdisplay; | ||
3478 | sr_clock = crtc->mode.clock; | ||
3479 | sr_htotal = crtc->mode.htotal; | ||
3480 | if (crtc->fb) | ||
3481 | pixel_size = crtc->fb->bits_per_pixel / 8; | ||
3482 | else | ||
3483 | pixel_size = 4; /* by default */ | ||
3484 | } | ||
3485 | } | ||
3486 | |||
3487 | if (enabled <= 0) | ||
3488 | return; | ||
3489 | 4305 | ||
3490 | dev_priv->display.update_wm(dev, planea_clock, planeb_clock, | 4306 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) |
3491 | sr_hdisplay, sr_htotal, pixel_size); | 4307 | { |
4308 | return dev_priv->lvds_use_ssc && i915_panel_use_ssc | ||
4309 | && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); | ||
3492 | } | 4310 | } |
3493 | 4311 | ||
3494 | static int intel_crtc_mode_set(struct drm_crtc *crtc, | 4312 | static int i9xx_crtc_mode_set(struct drm_crtc *crtc, |
3495 | struct drm_display_mode *mode, | 4313 | struct drm_display_mode *mode, |
3496 | struct drm_display_mode *adjusted_mode, | 4314 | struct drm_display_mode *adjusted_mode, |
3497 | int x, int y, | 4315 | int x, int y, |
3498 | struct drm_framebuffer *old_fb) | 4316 | struct drm_framebuffer *old_fb) |
3499 | { | 4317 | { |
3500 | struct drm_device *dev = crtc->dev; | 4318 | struct drm_device *dev = crtc->dev; |
3501 | struct drm_i915_private *dev_priv = dev->dev_private; | 4319 | struct drm_i915_private *dev_priv = dev->dev_private; |
3502 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4320 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3503 | int pipe = intel_crtc->pipe; | 4321 | int pipe = intel_crtc->pipe; |
3504 | int plane = intel_crtc->plane; | 4322 | int plane = intel_crtc->plane; |
3505 | int fp_reg = (pipe == 0) ? FPA0 : FPB0; | ||
3506 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | ||
3507 | int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD; | ||
3508 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; | ||
3509 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | ||
3510 | int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; | ||
3511 | int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; | ||
3512 | int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; | ||
3513 | int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; | ||
3514 | int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; | ||
3515 | int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; | ||
3516 | int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; | ||
3517 | int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; | ||
3518 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; | ||
3519 | int refclk, num_connectors = 0; | 4323 | int refclk, num_connectors = 0; |
3520 | intel_clock_t clock, reduced_clock; | 4324 | intel_clock_t clock, reduced_clock; |
3521 | u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; | 4325 | u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; |
3522 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; | 4326 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; |
3523 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; | 4327 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; |
3524 | struct intel_encoder *has_edp_encoder = NULL; | ||
3525 | struct drm_mode_config *mode_config = &dev->mode_config; | 4328 | struct drm_mode_config *mode_config = &dev->mode_config; |
3526 | struct drm_encoder *encoder; | 4329 | struct intel_encoder *encoder; |
3527 | const intel_limit_t *limit; | 4330 | const intel_limit_t *limit; |
3528 | int ret; | 4331 | int ret; |
3529 | struct fdi_m_n m_n = {0}; | ||
3530 | int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1; | ||
3531 | int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1; | ||
3532 | int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1; | ||
3533 | int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1; | ||
3534 | int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0; | ||
3535 | int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; | ||
3536 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; | ||
3537 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; | ||
3538 | int trans_dpll_sel = (pipe == 0) ? 0 : 1; | ||
3539 | int lvds_reg = LVDS; | ||
3540 | u32 temp; | 4332 | u32 temp; |
3541 | int sdvo_pixel_multiply; | 4333 | u32 lvds_sync = 0; |
3542 | int target_clock; | ||
3543 | |||
3544 | drm_vblank_pre_modeset(dev, pipe); | ||
3545 | |||
3546 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { | ||
3547 | struct intel_encoder *intel_encoder; | ||
3548 | 4334 | ||
3549 | if (encoder->crtc != crtc) | 4335 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { |
4336 | if (encoder->base.crtc != crtc) | ||
3550 | continue; | 4337 | continue; |
3551 | 4338 | ||
3552 | intel_encoder = enc_to_intel_encoder(encoder); | 4339 | switch (encoder->type) { |
3553 | switch (intel_encoder->type) { | ||
3554 | case INTEL_OUTPUT_LVDS: | 4340 | case INTEL_OUTPUT_LVDS: |
3555 | is_lvds = true; | 4341 | is_lvds = true; |
3556 | break; | 4342 | break; |
3557 | case INTEL_OUTPUT_SDVO: | 4343 | case INTEL_OUTPUT_SDVO: |
3558 | case INTEL_OUTPUT_HDMI: | 4344 | case INTEL_OUTPUT_HDMI: |
3559 | is_sdvo = true; | 4345 | is_sdvo = true; |
3560 | if (intel_encoder->needs_tv_clock) | 4346 | if (encoder->needs_tv_clock) |
3561 | is_tv = true; | 4347 | is_tv = true; |
3562 | break; | 4348 | break; |
3563 | case INTEL_OUTPUT_DVO: | 4349 | case INTEL_OUTPUT_DVO: |
@@ -3572,48 +4358,41 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3572 | case INTEL_OUTPUT_DISPLAYPORT: | 4358 | case INTEL_OUTPUT_DISPLAYPORT: |
3573 | is_dp = true; | 4359 | is_dp = true; |
3574 | break; | 4360 | break; |
3575 | case INTEL_OUTPUT_EDP: | ||
3576 | has_edp_encoder = intel_encoder; | ||
3577 | break; | ||
3578 | } | 4361 | } |
3579 | 4362 | ||
3580 | num_connectors++; | 4363 | num_connectors++; |
3581 | } | 4364 | } |
3582 | 4365 | ||
3583 | if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) { | 4366 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { |
3584 | refclk = dev_priv->lvds_ssc_freq * 1000; | 4367 | refclk = dev_priv->lvds_ssc_freq * 1000; |
3585 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | 4368 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", |
3586 | refclk / 1000); | 4369 | refclk / 1000); |
3587 | } else if (IS_I9XX(dev)) { | 4370 | } else if (!IS_GEN2(dev)) { |
3588 | refclk = 96000; | 4371 | refclk = 96000; |
3589 | if (HAS_PCH_SPLIT(dev)) | ||
3590 | refclk = 120000; /* 120Mhz refclk */ | ||
3591 | } else { | 4372 | } else { |
3592 | refclk = 48000; | 4373 | refclk = 48000; |
3593 | } | 4374 | } |
3594 | |||
3595 | 4375 | ||
3596 | /* | 4376 | /* |
3597 | * Returns a set of divisors for the desired target clock with the given | 4377 | * Returns a set of divisors for the desired target clock with the given |
3598 | * refclk, or FALSE. The returned values represent the clock equation: | 4378 | * refclk, or FALSE. The returned values represent the clock equation: |
3599 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. | 4379 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. |
3600 | */ | 4380 | */ |
3601 | limit = intel_limit(crtc); | 4381 | limit = intel_limit(crtc, refclk); |
3602 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); | 4382 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); |
3603 | if (!ok) { | 4383 | if (!ok) { |
3604 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | 4384 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
3605 | drm_vblank_post_modeset(dev, pipe); | ||
3606 | return -EINVAL; | 4385 | return -EINVAL; |
3607 | } | 4386 | } |
3608 | 4387 | ||
3609 | /* Ensure that the cursor is valid for the new mode before changing... */ | 4388 | /* Ensure that the cursor is valid for the new mode before changing... */ |
3610 | intel_crtc_update_cursor(crtc); | 4389 | intel_crtc_update_cursor(crtc, true); |
3611 | 4390 | ||
3612 | if (is_lvds && dev_priv->lvds_downclock_avail) { | 4391 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
3613 | has_reduced_clock = limit->find_pll(limit, crtc, | 4392 | has_reduced_clock = limit->find_pll(limit, crtc, |
3614 | dev_priv->lvds_downclock, | 4393 | dev_priv->lvds_downclock, |
3615 | refclk, | 4394 | refclk, |
3616 | &reduced_clock); | 4395 | &reduced_clock); |
3617 | if (has_reduced_clock && (clock.p != reduced_clock.p)) { | 4396 | if (has_reduced_clock && (clock.p != reduced_clock.p)) { |
3618 | /* | 4397 | /* |
3619 | * If the different P is found, it means that we can't | 4398 | * If the different P is found, it means that we can't |
@@ -3622,7 +4401,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3622 | * feature. | 4401 | * feature. |
3623 | */ | 4402 | */ |
3624 | DRM_DEBUG_KMS("Different P is found for " | 4403 | DRM_DEBUG_KMS("Different P is found for " |
3625 | "LVDS clock/downclock\n"); | 4404 | "LVDS clock/downclock\n"); |
3626 | has_reduced_clock = 0; | 4405 | has_reduced_clock = 0; |
3627 | } | 4406 | } |
3628 | } | 4407 | } |
@@ -3630,14 +4409,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3630 | this mirrors vbios setting. */ | 4409 | this mirrors vbios setting. */ |
3631 | if (is_sdvo && is_tv) { | 4410 | if (is_sdvo && is_tv) { |
3632 | if (adjusted_mode->clock >= 100000 | 4411 | if (adjusted_mode->clock >= 100000 |
3633 | && adjusted_mode->clock < 140500) { | 4412 | && adjusted_mode->clock < 140500) { |
3634 | clock.p1 = 2; | 4413 | clock.p1 = 2; |
3635 | clock.p2 = 10; | 4414 | clock.p2 = 10; |
3636 | clock.n = 3; | 4415 | clock.n = 3; |
3637 | clock.m1 = 16; | 4416 | clock.m1 = 16; |
3638 | clock.m2 = 8; | 4417 | clock.m2 = 8; |
3639 | } else if (adjusted_mode->clock >= 140500 | 4418 | } else if (adjusted_mode->clock >= 140500 |
3640 | && adjusted_mode->clock <= 200000) { | 4419 | && adjusted_mode->clock <= 200000) { |
3641 | clock.p1 = 1; | 4420 | clock.p1 = 1; |
3642 | clock.p2 = 10; | 4421 | clock.p2 = 10; |
3643 | clock.n = 6; | 4422 | clock.n = 6; |
@@ -3646,128 +4425,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3646 | } | 4425 | } |
3647 | } | 4426 | } |
3648 | 4427 | ||
3649 | /* FDI link */ | ||
3650 | if (HAS_PCH_SPLIT(dev)) { | ||
3651 | int lane = 0, link_bw, bpp; | ||
3652 | /* eDP doesn't require FDI link, so just set DP M/N | ||
3653 | according to current link config */ | ||
3654 | if (has_edp_encoder) { | ||
3655 | target_clock = mode->clock; | ||
3656 | intel_edp_link_config(has_edp_encoder, | ||
3657 | &lane, &link_bw); | ||
3658 | } else { | ||
3659 | /* DP over FDI requires target mode clock | ||
3660 | instead of link clock */ | ||
3661 | if (is_dp) | ||
3662 | target_clock = mode->clock; | ||
3663 | else | ||
3664 | target_clock = adjusted_mode->clock; | ||
3665 | link_bw = 270000; | ||
3666 | } | ||
3667 | |||
3668 | /* determine panel color depth */ | ||
3669 | temp = I915_READ(pipeconf_reg); | ||
3670 | temp &= ~PIPE_BPC_MASK; | ||
3671 | if (is_lvds) { | ||
3672 | int lvds_reg = I915_READ(PCH_LVDS); | ||
3673 | /* the BPC will be 6 if it is 18-bit LVDS panel */ | ||
3674 | if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) | ||
3675 | temp |= PIPE_8BPC; | ||
3676 | else | ||
3677 | temp |= PIPE_6BPC; | ||
3678 | } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) { | ||
3679 | switch (dev_priv->edp_bpp/3) { | ||
3680 | case 8: | ||
3681 | temp |= PIPE_8BPC; | ||
3682 | break; | ||
3683 | case 10: | ||
3684 | temp |= PIPE_10BPC; | ||
3685 | break; | ||
3686 | case 6: | ||
3687 | temp |= PIPE_6BPC; | ||
3688 | break; | ||
3689 | case 12: | ||
3690 | temp |= PIPE_12BPC; | ||
3691 | break; | ||
3692 | } | ||
3693 | } else | ||
3694 | temp |= PIPE_8BPC; | ||
3695 | I915_WRITE(pipeconf_reg, temp); | ||
3696 | I915_READ(pipeconf_reg); | ||
3697 | |||
3698 | switch (temp & PIPE_BPC_MASK) { | ||
3699 | case PIPE_8BPC: | ||
3700 | bpp = 24; | ||
3701 | break; | ||
3702 | case PIPE_10BPC: | ||
3703 | bpp = 30; | ||
3704 | break; | ||
3705 | case PIPE_6BPC: | ||
3706 | bpp = 18; | ||
3707 | break; | ||
3708 | case PIPE_12BPC: | ||
3709 | bpp = 36; | ||
3710 | break; | ||
3711 | default: | ||
3712 | DRM_ERROR("unknown pipe bpc value\n"); | ||
3713 | bpp = 24; | ||
3714 | } | ||
3715 | |||
3716 | if (!lane) { | ||
3717 | /* | ||
3718 | * Account for spread spectrum to avoid | ||
3719 | * oversubscribing the link. Max center spread | ||
3720 | * is 2.5%; use 5% for safety's sake. | ||
3721 | */ | ||
3722 | u32 bps = target_clock * bpp * 21 / 20; | ||
3723 | lane = bps / (link_bw * 8) + 1; | ||
3724 | } | ||
3725 | |||
3726 | intel_crtc->fdi_lanes = lane; | ||
3727 | |||
3728 | ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); | ||
3729 | } | ||
3730 | |||
3731 | /* Ironlake: try to setup display ref clock before DPLL | ||
3732 | * enabling. This is only under driver's control after | ||
3733 | * PCH B stepping, previous chipset stepping should be | ||
3734 | * ignoring this setting. | ||
3735 | */ | ||
3736 | if (HAS_PCH_SPLIT(dev)) { | ||
3737 | temp = I915_READ(PCH_DREF_CONTROL); | ||
3738 | /* Always enable nonspread source */ | ||
3739 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; | ||
3740 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; | ||
3741 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
3742 | POSTING_READ(PCH_DREF_CONTROL); | ||
3743 | |||
3744 | temp &= ~DREF_SSC_SOURCE_MASK; | ||
3745 | temp |= DREF_SSC_SOURCE_ENABLE; | ||
3746 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
3747 | POSTING_READ(PCH_DREF_CONTROL); | ||
3748 | |||
3749 | udelay(200); | ||
3750 | |||
3751 | if (has_edp_encoder) { | ||
3752 | if (dev_priv->lvds_use_ssc) { | ||
3753 | temp |= DREF_SSC1_ENABLE; | ||
3754 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
3755 | POSTING_READ(PCH_DREF_CONTROL); | ||
3756 | |||
3757 | udelay(200); | ||
3758 | |||
3759 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | ||
3760 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | ||
3761 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
3762 | POSTING_READ(PCH_DREF_CONTROL); | ||
3763 | } else { | ||
3764 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | ||
3765 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
3766 | POSTING_READ(PCH_DREF_CONTROL); | ||
3767 | } | ||
3768 | } | ||
3769 | } | ||
3770 | |||
3771 | if (IS_PINEVIEW(dev)) { | 4428 | if (IS_PINEVIEW(dev)) { |
3772 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; | 4429 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; |
3773 | if (has_reduced_clock) | 4430 | if (has_reduced_clock) |
@@ -3780,21 +4437,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3780 | reduced_clock.m2; | 4437 | reduced_clock.m2; |
3781 | } | 4438 | } |
3782 | 4439 | ||
3783 | if (!HAS_PCH_SPLIT(dev)) | 4440 | dpll = DPLL_VGA_MODE_DIS; |
3784 | dpll = DPLL_VGA_MODE_DIS; | ||
3785 | 4441 | ||
3786 | if (IS_I9XX(dev)) { | 4442 | if (!IS_GEN2(dev)) { |
3787 | if (is_lvds) | 4443 | if (is_lvds) |
3788 | dpll |= DPLLB_MODE_LVDS; | 4444 | dpll |= DPLLB_MODE_LVDS; |
3789 | else | 4445 | else |
3790 | dpll |= DPLLB_MODE_DAC_SERIAL; | 4446 | dpll |= DPLLB_MODE_DAC_SERIAL; |
3791 | if (is_sdvo) { | 4447 | if (is_sdvo) { |
4448 | int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); | ||
4449 | if (pixel_multiplier > 1) { | ||
4450 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
4451 | dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; | ||
4452 | } | ||
3792 | dpll |= DPLL_DVO_HIGH_SPEED; | 4453 | dpll |= DPLL_DVO_HIGH_SPEED; |
3793 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; | ||
3794 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
3795 | dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; | ||
3796 | else if (HAS_PCH_SPLIT(dev)) | ||
3797 | dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; | ||
3798 | } | 4454 | } |
3799 | if (is_dp) | 4455 | if (is_dp) |
3800 | dpll |= DPLL_DVO_HIGH_SPEED; | 4456 | dpll |= DPLL_DVO_HIGH_SPEED; |
@@ -3804,9 +4460,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3804 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; | 4460 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; |
3805 | else { | 4461 | else { |
3806 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; | 4462 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
3807 | /* also FPA1 */ | ||
3808 | if (HAS_PCH_SPLIT(dev)) | ||
3809 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; | ||
3810 | if (IS_G4X(dev) && has_reduced_clock) | 4463 | if (IS_G4X(dev) && has_reduced_clock) |
3811 | dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; | 4464 | dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; |
3812 | } | 4465 | } |
@@ -3824,7 +4477,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3824 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; | 4477 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; |
3825 | break; | 4478 | break; |
3826 | } | 4479 | } |
3827 | if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) | 4480 | if (INTEL_INFO(dev)->gen >= 4) |
3828 | dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); | 4481 | dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); |
3829 | } else { | 4482 | } else { |
3830 | if (is_lvds) { | 4483 | if (is_lvds) { |
@@ -3845,27 +4498,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3845 | /* XXX: just matching BIOS for now */ | 4498 | /* XXX: just matching BIOS for now */ |
3846 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ | 4499 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
3847 | dpll |= 3; | 4500 | dpll |= 3; |
3848 | else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) | 4501 | else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
3849 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; | 4502 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
3850 | else | 4503 | else |
3851 | dpll |= PLL_REF_INPUT_DREFCLK; | 4504 | dpll |= PLL_REF_INPUT_DREFCLK; |
3852 | 4505 | ||
3853 | /* setup pipeconf */ | 4506 | /* setup pipeconf */ |
3854 | pipeconf = I915_READ(pipeconf_reg); | 4507 | pipeconf = I915_READ(PIPECONF(pipe)); |
3855 | 4508 | ||
3856 | /* Set up the display plane register */ | 4509 | /* Set up the display plane register */ |
3857 | dspcntr = DISPPLANE_GAMMA_ENABLE; | 4510 | dspcntr = DISPPLANE_GAMMA_ENABLE; |
3858 | 4511 | ||
3859 | /* Ironlake's plane is forced to pipe, bit 24 is to | 4512 | /* Ironlake's plane is forced to pipe, bit 24 is to |
3860 | enable color space conversion */ | 4513 | enable color space conversion */ |
3861 | if (!HAS_PCH_SPLIT(dev)) { | 4514 | if (pipe == 0) |
3862 | if (pipe == 0) | 4515 | dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; |
3863 | dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; | 4516 | else |
3864 | else | 4517 | dspcntr |= DISPPLANE_SEL_PIPE_B; |
3865 | dspcntr |= DISPPLANE_SEL_PIPE_B; | ||
3866 | } | ||
3867 | 4518 | ||
3868 | if (pipe == 0 && !IS_I965G(dev)) { | 4519 | if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { |
3869 | /* Enable pixel doubling when the dot clock is > 90% of the (display) | 4520 | /* Enable pixel doubling when the dot clock is > 90% of the (display) |
3870 | * core speed. | 4521 | * core speed. |
3871 | * | 4522 | * |
@@ -3874,51 +4525,536 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3874 | */ | 4525 | */ |
3875 | if (mode->clock > | 4526 | if (mode->clock > |
3876 | dev_priv->display.get_display_clock_speed(dev) * 9 / 10) | 4527 | dev_priv->display.get_display_clock_speed(dev) * 9 / 10) |
3877 | pipeconf |= PIPEACONF_DOUBLE_WIDE; | 4528 | pipeconf |= PIPECONF_DOUBLE_WIDE; |
3878 | else | 4529 | else |
3879 | pipeconf &= ~PIPEACONF_DOUBLE_WIDE; | 4530 | pipeconf &= ~PIPECONF_DOUBLE_WIDE; |
3880 | } | 4531 | } |
3881 | 4532 | ||
3882 | dspcntr |= DISPLAY_PLANE_ENABLE; | ||
3883 | pipeconf |= PIPEACONF_ENABLE; | ||
3884 | dpll |= DPLL_VCO_ENABLE; | 4533 | dpll |= DPLL_VCO_ENABLE; |
3885 | 4534 | ||
4535 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); | ||
4536 | drm_mode_debug_printmodeline(mode); | ||
3886 | 4537 | ||
3887 | /* Disable the panel fitter if it was on our pipe */ | 4538 | I915_WRITE(FP0(pipe), fp); |
3888 | if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe) | 4539 | I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); |
3889 | I915_WRITE(PFIT_CONTROL, 0); | 4540 | |
4541 | POSTING_READ(DPLL(pipe)); | ||
4542 | udelay(150); | ||
4543 | |||
4544 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | ||
4545 | * This is an exception to the general rule that mode_set doesn't turn | ||
4546 | * things on. | ||
4547 | */ | ||
4548 | if (is_lvds) { | ||
4549 | temp = I915_READ(LVDS); | ||
4550 | temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; | ||
4551 | if (pipe == 1) { | ||
4552 | temp |= LVDS_PIPEB_SELECT; | ||
4553 | } else { | ||
4554 | temp &= ~LVDS_PIPEB_SELECT; | ||
4555 | } | ||
4556 | /* set the corresponsding LVDS_BORDER bit */ | ||
4557 | temp |= dev_priv->lvds_border_bits; | ||
4558 | /* Set the B0-B3 data pairs corresponding to whether we're going to | ||
4559 | * set the DPLLs for dual-channel mode or not. | ||
4560 | */ | ||
4561 | if (clock.p2 == 7) | ||
4562 | temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; | ||
4563 | else | ||
4564 | temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); | ||
4565 | |||
4566 | /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) | ||
4567 | * appropriately here, but we need to look more thoroughly into how | ||
4568 | * panels behave in the two modes. | ||
4569 | */ | ||
4570 | /* set the dithering flag on LVDS as needed */ | ||
4571 | if (INTEL_INFO(dev)->gen >= 4) { | ||
4572 | if (dev_priv->lvds_dither) | ||
4573 | temp |= LVDS_ENABLE_DITHER; | ||
4574 | else | ||
4575 | temp &= ~LVDS_ENABLE_DITHER; | ||
4576 | } | ||
4577 | if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
4578 | lvds_sync |= LVDS_HSYNC_POLARITY; | ||
4579 | if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
4580 | lvds_sync |= LVDS_VSYNC_POLARITY; | ||
4581 | if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) | ||
4582 | != lvds_sync) { | ||
4583 | char flags[2] = "-+"; | ||
4584 | DRM_INFO("Changing LVDS panel from " | ||
4585 | "(%chsync, %cvsync) to (%chsync, %cvsync)\n", | ||
4586 | flags[!(temp & LVDS_HSYNC_POLARITY)], | ||
4587 | flags[!(temp & LVDS_VSYNC_POLARITY)], | ||
4588 | flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], | ||
4589 | flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); | ||
4590 | temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); | ||
4591 | temp |= lvds_sync; | ||
4592 | } | ||
4593 | I915_WRITE(LVDS, temp); | ||
4594 | } | ||
4595 | |||
4596 | if (is_dp) { | ||
4597 | intel_dp_set_m_n(crtc, mode, adjusted_mode); | ||
4598 | } | ||
4599 | |||
4600 | I915_WRITE(DPLL(pipe), dpll); | ||
4601 | |||
4602 | /* Wait for the clocks to stabilize. */ | ||
4603 | POSTING_READ(DPLL(pipe)); | ||
4604 | udelay(150); | ||
4605 | |||
4606 | if (INTEL_INFO(dev)->gen >= 4) { | ||
4607 | temp = 0; | ||
4608 | if (is_sdvo) { | ||
4609 | temp = intel_mode_get_pixel_multiplier(adjusted_mode); | ||
4610 | if (temp > 1) | ||
4611 | temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; | ||
4612 | else | ||
4613 | temp = 0; | ||
4614 | } | ||
4615 | I915_WRITE(DPLL_MD(pipe), temp); | ||
4616 | } else { | ||
4617 | /* The pixel multiplier can only be updated once the | ||
4618 | * DPLL is enabled and the clocks are stable. | ||
4619 | * | ||
4620 | * So write it again. | ||
4621 | */ | ||
4622 | I915_WRITE(DPLL(pipe), dpll); | ||
4623 | } | ||
4624 | |||
4625 | intel_crtc->lowfreq_avail = false; | ||
4626 | if (is_lvds && has_reduced_clock && i915_powersave) { | ||
4627 | I915_WRITE(FP1(pipe), fp2); | ||
4628 | intel_crtc->lowfreq_avail = true; | ||
4629 | if (HAS_PIPE_CXSR(dev)) { | ||
4630 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); | ||
4631 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; | ||
4632 | } | ||
4633 | } else { | ||
4634 | I915_WRITE(FP1(pipe), fp); | ||
4635 | if (HAS_PIPE_CXSR(dev)) { | ||
4636 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); | ||
4637 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; | ||
4638 | } | ||
4639 | } | ||
4640 | |||
4641 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { | ||
4642 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; | ||
4643 | /* the chip adds 2 halflines automatically */ | ||
4644 | adjusted_mode->crtc_vdisplay -= 1; | ||
4645 | adjusted_mode->crtc_vtotal -= 1; | ||
4646 | adjusted_mode->crtc_vblank_start -= 1; | ||
4647 | adjusted_mode->crtc_vblank_end -= 1; | ||
4648 | adjusted_mode->crtc_vsync_end -= 1; | ||
4649 | adjusted_mode->crtc_vsync_start -= 1; | ||
4650 | } else | ||
4651 | pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ | ||
4652 | |||
4653 | I915_WRITE(HTOTAL(pipe), | ||
4654 | (adjusted_mode->crtc_hdisplay - 1) | | ||
4655 | ((adjusted_mode->crtc_htotal - 1) << 16)); | ||
4656 | I915_WRITE(HBLANK(pipe), | ||
4657 | (adjusted_mode->crtc_hblank_start - 1) | | ||
4658 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); | ||
4659 | I915_WRITE(HSYNC(pipe), | ||
4660 | (adjusted_mode->crtc_hsync_start - 1) | | ||
4661 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); | ||
4662 | |||
4663 | I915_WRITE(VTOTAL(pipe), | ||
4664 | (adjusted_mode->crtc_vdisplay - 1) | | ||
4665 | ((adjusted_mode->crtc_vtotal - 1) << 16)); | ||
4666 | I915_WRITE(VBLANK(pipe), | ||
4667 | (adjusted_mode->crtc_vblank_start - 1) | | ||
4668 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); | ||
4669 | I915_WRITE(VSYNC(pipe), | ||
4670 | (adjusted_mode->crtc_vsync_start - 1) | | ||
4671 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); | ||
4672 | |||
4673 | /* pipesrc and dspsize control the size that is scaled from, | ||
4674 | * which should always be the user's requested size. | ||
4675 | */ | ||
4676 | I915_WRITE(DSPSIZE(plane), | ||
4677 | ((mode->vdisplay - 1) << 16) | | ||
4678 | (mode->hdisplay - 1)); | ||
4679 | I915_WRITE(DSPPOS(plane), 0); | ||
4680 | I915_WRITE(PIPESRC(pipe), | ||
4681 | ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); | ||
4682 | |||
4683 | I915_WRITE(PIPECONF(pipe), pipeconf); | ||
4684 | POSTING_READ(PIPECONF(pipe)); | ||
4685 | intel_enable_pipe(dev_priv, pipe, false); | ||
4686 | |||
4687 | intel_wait_for_vblank(dev, pipe); | ||
4688 | |||
4689 | I915_WRITE(DSPCNTR(plane), dspcntr); | ||
4690 | POSTING_READ(DSPCNTR(plane)); | ||
4691 | intel_enable_plane(dev_priv, plane, pipe); | ||
4692 | |||
4693 | ret = intel_pipe_set_base(crtc, x, y, old_fb); | ||
4694 | |||
4695 | intel_update_watermarks(dev); | ||
4696 | |||
4697 | return ret; | ||
4698 | } | ||
4699 | |||
4700 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | ||
4701 | struct drm_display_mode *mode, | ||
4702 | struct drm_display_mode *adjusted_mode, | ||
4703 | int x, int y, | ||
4704 | struct drm_framebuffer *old_fb) | ||
4705 | { | ||
4706 | struct drm_device *dev = crtc->dev; | ||
4707 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4708 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
4709 | int pipe = intel_crtc->pipe; | ||
4710 | int plane = intel_crtc->plane; | ||
4711 | int refclk, num_connectors = 0; | ||
4712 | intel_clock_t clock, reduced_clock; | ||
4713 | u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; | ||
4714 | bool ok, has_reduced_clock = false, is_sdvo = false; | ||
4715 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; | ||
4716 | struct intel_encoder *has_edp_encoder = NULL; | ||
4717 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
4718 | struct intel_encoder *encoder; | ||
4719 | const intel_limit_t *limit; | ||
4720 | int ret; | ||
4721 | struct fdi_m_n m_n = {0}; | ||
4722 | u32 temp; | ||
4723 | u32 lvds_sync = 0; | ||
4724 | int target_clock, pixel_multiplier, lane, link_bw, bpp, factor; | ||
4725 | |||
4726 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | ||
4727 | if (encoder->base.crtc != crtc) | ||
4728 | continue; | ||
4729 | |||
4730 | switch (encoder->type) { | ||
4731 | case INTEL_OUTPUT_LVDS: | ||
4732 | is_lvds = true; | ||
4733 | break; | ||
4734 | case INTEL_OUTPUT_SDVO: | ||
4735 | case INTEL_OUTPUT_HDMI: | ||
4736 | is_sdvo = true; | ||
4737 | if (encoder->needs_tv_clock) | ||
4738 | is_tv = true; | ||
4739 | break; | ||
4740 | case INTEL_OUTPUT_TVOUT: | ||
4741 | is_tv = true; | ||
4742 | break; | ||
4743 | case INTEL_OUTPUT_ANALOG: | ||
4744 | is_crt = true; | ||
4745 | break; | ||
4746 | case INTEL_OUTPUT_DISPLAYPORT: | ||
4747 | is_dp = true; | ||
4748 | break; | ||
4749 | case INTEL_OUTPUT_EDP: | ||
4750 | has_edp_encoder = encoder; | ||
4751 | break; | ||
4752 | } | ||
4753 | |||
4754 | num_connectors++; | ||
4755 | } | ||
4756 | |||
4757 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { | ||
4758 | refclk = dev_priv->lvds_ssc_freq * 1000; | ||
4759 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | ||
4760 | refclk / 1000); | ||
4761 | } else { | ||
4762 | refclk = 96000; | ||
4763 | if (!has_edp_encoder || | ||
4764 | intel_encoder_is_pch_edp(&has_edp_encoder->base)) | ||
4765 | refclk = 120000; /* 120Mhz refclk */ | ||
4766 | } | ||
4767 | |||
4768 | /* | ||
4769 | * Returns a set of divisors for the desired target clock with the given | ||
4770 | * refclk, or FALSE. The returned values represent the clock equation: | ||
4771 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. | ||
4772 | */ | ||
4773 | limit = intel_limit(crtc, refclk); | ||
4774 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); | ||
4775 | if (!ok) { | ||
4776 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | ||
4777 | return -EINVAL; | ||
4778 | } | ||
4779 | |||
4780 | /* Ensure that the cursor is valid for the new mode before changing... */ | ||
4781 | intel_crtc_update_cursor(crtc, true); | ||
4782 | |||
4783 | if (is_lvds && dev_priv->lvds_downclock_avail) { | ||
4784 | has_reduced_clock = limit->find_pll(limit, crtc, | ||
4785 | dev_priv->lvds_downclock, | ||
4786 | refclk, | ||
4787 | &reduced_clock); | ||
4788 | if (has_reduced_clock && (clock.p != reduced_clock.p)) { | ||
4789 | /* | ||
4790 | * If the different P is found, it means that we can't | ||
4791 | * switch the display clock by using the FP0/FP1. | ||
4792 | * In such case we will disable the LVDS downclock | ||
4793 | * feature. | ||
4794 | */ | ||
4795 | DRM_DEBUG_KMS("Different P is found for " | ||
4796 | "LVDS clock/downclock\n"); | ||
4797 | has_reduced_clock = 0; | ||
4798 | } | ||
4799 | } | ||
4800 | /* SDVO TV has fixed PLL values depend on its clock range, | ||
4801 | this mirrors vbios setting. */ | ||
4802 | if (is_sdvo && is_tv) { | ||
4803 | if (adjusted_mode->clock >= 100000 | ||
4804 | && adjusted_mode->clock < 140500) { | ||
4805 | clock.p1 = 2; | ||
4806 | clock.p2 = 10; | ||
4807 | clock.n = 3; | ||
4808 | clock.m1 = 16; | ||
4809 | clock.m2 = 8; | ||
4810 | } else if (adjusted_mode->clock >= 140500 | ||
4811 | && adjusted_mode->clock <= 200000) { | ||
4812 | clock.p1 = 1; | ||
4813 | clock.p2 = 10; | ||
4814 | clock.n = 6; | ||
4815 | clock.m1 = 12; | ||
4816 | clock.m2 = 8; | ||
4817 | } | ||
4818 | } | ||
4819 | |||
4820 | /* FDI link */ | ||
4821 | pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); | ||
4822 | lane = 0; | ||
4823 | /* CPU eDP doesn't require FDI link, so just set DP M/N | ||
4824 | according to current link config */ | ||
4825 | if (has_edp_encoder && | ||
4826 | !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | ||
4827 | target_clock = mode->clock; | ||
4828 | intel_edp_link_config(has_edp_encoder, | ||
4829 | &lane, &link_bw); | ||
4830 | } else { | ||
4831 | /* [e]DP over FDI requires target mode clock | ||
4832 | instead of link clock */ | ||
4833 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) | ||
4834 | target_clock = mode->clock; | ||
4835 | else | ||
4836 | target_clock = adjusted_mode->clock; | ||
4837 | |||
4838 | /* FDI is a binary signal running at ~2.7GHz, encoding | ||
4839 | * each output octet as 10 bits. The actual frequency | ||
4840 | * is stored as a divider into a 100MHz clock, and the | ||
4841 | * mode pixel clock is stored in units of 1KHz. | ||
4842 | * Hence the bw of each lane in terms of the mode signal | ||
4843 | * is: | ||
4844 | */ | ||
4845 | link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; | ||
4846 | } | ||
4847 | |||
4848 | /* determine panel color depth */ | ||
4849 | temp = I915_READ(PIPECONF(pipe)); | ||
4850 | temp &= ~PIPE_BPC_MASK; | ||
4851 | if (is_lvds) { | ||
4852 | /* the BPC will be 6 if it is 18-bit LVDS panel */ | ||
4853 | if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) | ||
4854 | temp |= PIPE_8BPC; | ||
4855 | else | ||
4856 | temp |= PIPE_6BPC; | ||
4857 | } else if (has_edp_encoder) { | ||
4858 | switch (dev_priv->edp.bpp/3) { | ||
4859 | case 8: | ||
4860 | temp |= PIPE_8BPC; | ||
4861 | break; | ||
4862 | case 10: | ||
4863 | temp |= PIPE_10BPC; | ||
4864 | break; | ||
4865 | case 6: | ||
4866 | temp |= PIPE_6BPC; | ||
4867 | break; | ||
4868 | case 12: | ||
4869 | temp |= PIPE_12BPC; | ||
4870 | break; | ||
4871 | } | ||
4872 | } else | ||
4873 | temp |= PIPE_8BPC; | ||
4874 | I915_WRITE(PIPECONF(pipe), temp); | ||
4875 | |||
4876 | switch (temp & PIPE_BPC_MASK) { | ||
4877 | case PIPE_8BPC: | ||
4878 | bpp = 24; | ||
4879 | break; | ||
4880 | case PIPE_10BPC: | ||
4881 | bpp = 30; | ||
4882 | break; | ||
4883 | case PIPE_6BPC: | ||
4884 | bpp = 18; | ||
4885 | break; | ||
4886 | case PIPE_12BPC: | ||
4887 | bpp = 36; | ||
4888 | break; | ||
4889 | default: | ||
4890 | DRM_ERROR("unknown pipe bpc value\n"); | ||
4891 | bpp = 24; | ||
4892 | } | ||
4893 | |||
4894 | if (!lane) { | ||
4895 | /* | ||
4896 | * Account for spread spectrum to avoid | ||
4897 | * oversubscribing the link. Max center spread | ||
4898 | * is 2.5%; use 5% for safety's sake. | ||
4899 | */ | ||
4900 | u32 bps = target_clock * bpp * 21 / 20; | ||
4901 | lane = bps / (link_bw * 8) + 1; | ||
4902 | } | ||
4903 | |||
4904 | intel_crtc->fdi_lanes = lane; | ||
4905 | |||
4906 | if (pixel_multiplier > 1) | ||
4907 | link_bw *= pixel_multiplier; | ||
4908 | ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); | ||
4909 | |||
4910 | /* Ironlake: try to setup display ref clock before DPLL | ||
4911 | * enabling. This is only under driver's control after | ||
4912 | * PCH B stepping, previous chipset stepping should be | ||
4913 | * ignoring this setting. | ||
4914 | */ | ||
4915 | temp = I915_READ(PCH_DREF_CONTROL); | ||
4916 | /* Always enable nonspread source */ | ||
4917 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; | ||
4918 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; | ||
4919 | temp &= ~DREF_SSC_SOURCE_MASK; | ||
4920 | temp |= DREF_SSC_SOURCE_ENABLE; | ||
4921 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
4922 | |||
4923 | POSTING_READ(PCH_DREF_CONTROL); | ||
4924 | udelay(200); | ||
4925 | |||
4926 | if (has_edp_encoder) { | ||
4927 | if (intel_panel_use_ssc(dev_priv)) { | ||
4928 | temp |= DREF_SSC1_ENABLE; | ||
4929 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
4930 | |||
4931 | POSTING_READ(PCH_DREF_CONTROL); | ||
4932 | udelay(200); | ||
4933 | } | ||
4934 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | ||
4935 | |||
4936 | /* Enable CPU source on CPU attached eDP */ | ||
4937 | if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | ||
4938 | if (intel_panel_use_ssc(dev_priv)) | ||
4939 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | ||
4940 | else | ||
4941 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | ||
4942 | } else { | ||
4943 | /* Enable SSC on PCH eDP if needed */ | ||
4944 | if (intel_panel_use_ssc(dev_priv)) { | ||
4945 | DRM_ERROR("enabling SSC on PCH\n"); | ||
4946 | temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; | ||
4947 | } | ||
4948 | } | ||
4949 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
4950 | POSTING_READ(PCH_DREF_CONTROL); | ||
4951 | udelay(200); | ||
4952 | } | ||
4953 | |||
4954 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | ||
4955 | if (has_reduced_clock) | ||
4956 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | | ||
4957 | reduced_clock.m2; | ||
4958 | |||
4959 | /* Enable autotuning of the PLL clock (if permissible) */ | ||
4960 | factor = 21; | ||
4961 | if (is_lvds) { | ||
4962 | if ((intel_panel_use_ssc(dev_priv) && | ||
4963 | dev_priv->lvds_ssc_freq == 100) || | ||
4964 | (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) | ||
4965 | factor = 25; | ||
4966 | } else if (is_sdvo && is_tv) | ||
4967 | factor = 20; | ||
4968 | |||
4969 | if (clock.m1 < factor * clock.n) | ||
4970 | fp |= FP_CB_TUNE; | ||
4971 | |||
4972 | dpll = 0; | ||
4973 | |||
4974 | if (is_lvds) | ||
4975 | dpll |= DPLLB_MODE_LVDS; | ||
4976 | else | ||
4977 | dpll |= DPLLB_MODE_DAC_SERIAL; | ||
4978 | if (is_sdvo) { | ||
4979 | int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); | ||
4980 | if (pixel_multiplier > 1) { | ||
4981 | dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; | ||
4982 | } | ||
4983 | dpll |= DPLL_DVO_HIGH_SPEED; | ||
4984 | } | ||
4985 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) | ||
4986 | dpll |= DPLL_DVO_HIGH_SPEED; | ||
4987 | |||
4988 | /* compute bitmask from p1 value */ | ||
4989 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; | ||
4990 | /* also FPA1 */ | ||
4991 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; | ||
4992 | |||
4993 | switch (clock.p2) { | ||
4994 | case 5: | ||
4995 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; | ||
4996 | break; | ||
4997 | case 7: | ||
4998 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; | ||
4999 | break; | ||
5000 | case 10: | ||
5001 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; | ||
5002 | break; | ||
5003 | case 14: | ||
5004 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; | ||
5005 | break; | ||
5006 | } | ||
5007 | |||
5008 | if (is_sdvo && is_tv) | ||
5009 | dpll |= PLL_REF_INPUT_TVCLKINBC; | ||
5010 | else if (is_tv) | ||
5011 | /* XXX: just matching BIOS for now */ | ||
5012 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ | ||
5013 | dpll |= 3; | ||
5014 | else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) | ||
5015 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; | ||
5016 | else | ||
5017 | dpll |= PLL_REF_INPUT_DREFCLK; | ||
5018 | |||
5019 | /* setup pipeconf */ | ||
5020 | pipeconf = I915_READ(PIPECONF(pipe)); | ||
5021 | |||
5022 | /* Set up the display plane register */ | ||
5023 | dspcntr = DISPPLANE_GAMMA_ENABLE; | ||
3890 | 5024 | ||
3891 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); | 5025 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); |
3892 | drm_mode_debug_printmodeline(mode); | 5026 | drm_mode_debug_printmodeline(mode); |
3893 | 5027 | ||
3894 | /* assign to Ironlake registers */ | 5028 | /* PCH eDP needs FDI, but CPU eDP does not */ |
3895 | if (HAS_PCH_SPLIT(dev)) { | 5029 | if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
3896 | fp_reg = pch_fp_reg; | 5030 | I915_WRITE(PCH_FP0(pipe), fp); |
3897 | dpll_reg = pch_dpll_reg; | 5031 | I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); |
3898 | } | ||
3899 | 5032 | ||
3900 | if (!has_edp_encoder) { | 5033 | POSTING_READ(PCH_DPLL(pipe)); |
3901 | I915_WRITE(fp_reg, fp); | ||
3902 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); | ||
3903 | I915_READ(dpll_reg); | ||
3904 | udelay(150); | 5034 | udelay(150); |
3905 | } | 5035 | } |
3906 | 5036 | ||
3907 | /* enable transcoder DPLL */ | 5037 | /* enable transcoder DPLL */ |
3908 | if (HAS_PCH_CPT(dev)) { | 5038 | if (HAS_PCH_CPT(dev)) { |
3909 | temp = I915_READ(PCH_DPLL_SEL); | 5039 | temp = I915_READ(PCH_DPLL_SEL); |
3910 | if (trans_dpll_sel == 0) | 5040 | switch (pipe) { |
3911 | temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); | 5041 | case 0: |
3912 | else | 5042 | temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL; |
3913 | temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | 5043 | break; |
5044 | case 1: | ||
5045 | temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL; | ||
5046 | break; | ||
5047 | case 2: | ||
5048 | /* FIXME: manage transcoder PLLs? */ | ||
5049 | temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL; | ||
5050 | break; | ||
5051 | default: | ||
5052 | BUG(); | ||
5053 | } | ||
3914 | I915_WRITE(PCH_DPLL_SEL, temp); | 5054 | I915_WRITE(PCH_DPLL_SEL, temp); |
3915 | I915_READ(PCH_DPLL_SEL); | ||
3916 | udelay(150); | ||
3917 | } | ||
3918 | 5055 | ||
3919 | if (HAS_PCH_SPLIT(dev)) { | 5056 | POSTING_READ(PCH_DPLL_SEL); |
3920 | pipeconf &= ~PIPE_ENABLE_DITHER; | 5057 | udelay(150); |
3921 | pipeconf &= ~PIPE_DITHER_TYPE_MASK; | ||
3922 | } | 5058 | } |
3923 | 5059 | ||
3924 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | 5060 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
@@ -3926,105 +5062,96 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3926 | * things on. | 5062 | * things on. |
3927 | */ | 5063 | */ |
3928 | if (is_lvds) { | 5064 | if (is_lvds) { |
3929 | u32 lvds; | 5065 | temp = I915_READ(PCH_LVDS); |
3930 | 5066 | temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; | |
3931 | if (HAS_PCH_SPLIT(dev)) | ||
3932 | lvds_reg = PCH_LVDS; | ||
3933 | |||
3934 | lvds = I915_READ(lvds_reg); | ||
3935 | lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; | ||
3936 | if (pipe == 1) { | 5067 | if (pipe == 1) { |
3937 | if (HAS_PCH_CPT(dev)) | 5068 | if (HAS_PCH_CPT(dev)) |
3938 | lvds |= PORT_TRANS_B_SEL_CPT; | 5069 | temp |= PORT_TRANS_B_SEL_CPT; |
3939 | else | 5070 | else |
3940 | lvds |= LVDS_PIPEB_SELECT; | 5071 | temp |= LVDS_PIPEB_SELECT; |
3941 | } else { | 5072 | } else { |
3942 | if (HAS_PCH_CPT(dev)) | 5073 | if (HAS_PCH_CPT(dev)) |
3943 | lvds &= ~PORT_TRANS_SEL_MASK; | 5074 | temp &= ~PORT_TRANS_SEL_MASK; |
3944 | else | 5075 | else |
3945 | lvds &= ~LVDS_PIPEB_SELECT; | 5076 | temp &= ~LVDS_PIPEB_SELECT; |
3946 | } | 5077 | } |
3947 | /* set the corresponsding LVDS_BORDER bit */ | 5078 | /* set the corresponsding LVDS_BORDER bit */ |
3948 | lvds |= dev_priv->lvds_border_bits; | 5079 | temp |= dev_priv->lvds_border_bits; |
3949 | /* Set the B0-B3 data pairs corresponding to whether we're going to | 5080 | /* Set the B0-B3 data pairs corresponding to whether we're going to |
3950 | * set the DPLLs for dual-channel mode or not. | 5081 | * set the DPLLs for dual-channel mode or not. |
3951 | */ | 5082 | */ |
3952 | if (clock.p2 == 7) | 5083 | if (clock.p2 == 7) |
3953 | lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; | 5084 | temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; |
3954 | else | 5085 | else |
3955 | lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); | 5086 | temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); |
3956 | 5087 | ||
3957 | /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) | 5088 | /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) |
3958 | * appropriately here, but we need to look more thoroughly into how | 5089 | * appropriately here, but we need to look more thoroughly into how |
3959 | * panels behave in the two modes. | 5090 | * panels behave in the two modes. |
3960 | */ | 5091 | */ |
3961 | /* set the dithering flag */ | 5092 | if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) |
3962 | if (IS_I965G(dev)) { | 5093 | lvds_sync |= LVDS_HSYNC_POLARITY; |
3963 | if (dev_priv->lvds_dither) { | 5094 | if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) |
3964 | if (HAS_PCH_SPLIT(dev)) { | 5095 | lvds_sync |= LVDS_VSYNC_POLARITY; |
3965 | pipeconf |= PIPE_ENABLE_DITHER; | 5096 | if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) |
3966 | pipeconf |= PIPE_DITHER_TYPE_ST01; | 5097 | != lvds_sync) { |
3967 | } else | 5098 | char flags[2] = "-+"; |
3968 | lvds |= LVDS_ENABLE_DITHER; | 5099 | DRM_INFO("Changing LVDS panel from " |
3969 | } else { | 5100 | "(%chsync, %cvsync) to (%chsync, %cvsync)\n", |
3970 | if (!HAS_PCH_SPLIT(dev)) { | 5101 | flags[!(temp & LVDS_HSYNC_POLARITY)], |
3971 | lvds &= ~LVDS_ENABLE_DITHER; | 5102 | flags[!(temp & LVDS_VSYNC_POLARITY)], |
3972 | } | 5103 | flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], |
3973 | } | 5104 | flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); |
5105 | temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); | ||
5106 | temp |= lvds_sync; | ||
3974 | } | 5107 | } |
3975 | I915_WRITE(lvds_reg, lvds); | 5108 | I915_WRITE(PCH_LVDS, temp); |
3976 | I915_READ(lvds_reg); | ||
3977 | } | 5109 | } |
3978 | if (is_dp) | 5110 | |
5111 | /* set the dithering flag and clear for anything other than a panel. */ | ||
5112 | pipeconf &= ~PIPECONF_DITHER_EN; | ||
5113 | pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; | ||
5114 | if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) { | ||
5115 | pipeconf |= PIPECONF_DITHER_EN; | ||
5116 | pipeconf |= PIPECONF_DITHER_TYPE_ST1; | ||
5117 | } | ||
5118 | |||
5119 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | ||
3979 | intel_dp_set_m_n(crtc, mode, adjusted_mode); | 5120 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
3980 | else if (HAS_PCH_SPLIT(dev)) { | 5121 | } else { |
3981 | /* For non-DP output, clear any trans DP clock recovery setting.*/ | 5122 | /* For non-DP output, clear any trans DP clock recovery setting.*/ |
3982 | if (pipe == 0) { | 5123 | I915_WRITE(TRANSDATA_M1(pipe), 0); |
3983 | I915_WRITE(TRANSA_DATA_M1, 0); | 5124 | I915_WRITE(TRANSDATA_N1(pipe), 0); |
3984 | I915_WRITE(TRANSA_DATA_N1, 0); | 5125 | I915_WRITE(TRANSDPLINK_M1(pipe), 0); |
3985 | I915_WRITE(TRANSA_DP_LINK_M1, 0); | 5126 | I915_WRITE(TRANSDPLINK_N1(pipe), 0); |
3986 | I915_WRITE(TRANSA_DP_LINK_N1, 0); | ||
3987 | } else { | ||
3988 | I915_WRITE(TRANSB_DATA_M1, 0); | ||
3989 | I915_WRITE(TRANSB_DATA_N1, 0); | ||
3990 | I915_WRITE(TRANSB_DP_LINK_M1, 0); | ||
3991 | I915_WRITE(TRANSB_DP_LINK_N1, 0); | ||
3992 | } | ||
3993 | } | 5127 | } |
3994 | 5128 | ||
3995 | if (!has_edp_encoder) { | 5129 | if (!has_edp_encoder || |
3996 | I915_WRITE(fp_reg, fp); | 5130 | intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
3997 | I915_WRITE(dpll_reg, dpll); | 5131 | I915_WRITE(PCH_DPLL(pipe), dpll); |
3998 | I915_READ(dpll_reg); | ||
3999 | /* Wait for the clocks to stabilize. */ | ||
4000 | udelay(150); | ||
4001 | 5132 | ||
4002 | if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { | ||
4003 | if (is_sdvo) { | ||
4004 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; | ||
4005 | I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | | ||
4006 | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); | ||
4007 | } else | ||
4008 | I915_WRITE(dpll_md_reg, 0); | ||
4009 | } else { | ||
4010 | /* write it again -- the BIOS does, after all */ | ||
4011 | I915_WRITE(dpll_reg, dpll); | ||
4012 | } | ||
4013 | I915_READ(dpll_reg); | ||
4014 | /* Wait for the clocks to stabilize. */ | 5133 | /* Wait for the clocks to stabilize. */ |
5134 | POSTING_READ(PCH_DPLL(pipe)); | ||
4015 | udelay(150); | 5135 | udelay(150); |
5136 | |||
5137 | /* The pixel multiplier can only be updated once the | ||
5138 | * DPLL is enabled and the clocks are stable. | ||
5139 | * | ||
5140 | * So write it again. | ||
5141 | */ | ||
5142 | I915_WRITE(PCH_DPLL(pipe), dpll); | ||
4016 | } | 5143 | } |
4017 | 5144 | ||
5145 | intel_crtc->lowfreq_avail = false; | ||
4018 | if (is_lvds && has_reduced_clock && i915_powersave) { | 5146 | if (is_lvds && has_reduced_clock && i915_powersave) { |
4019 | I915_WRITE(fp_reg + 4, fp2); | 5147 | I915_WRITE(PCH_FP1(pipe), fp2); |
4020 | intel_crtc->lowfreq_avail = true; | 5148 | intel_crtc->lowfreq_avail = true; |
4021 | if (HAS_PIPE_CXSR(dev)) { | 5149 | if (HAS_PIPE_CXSR(dev)) { |
4022 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); | 5150 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); |
4023 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; | 5151 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; |
4024 | } | 5152 | } |
4025 | } else { | 5153 | } else { |
4026 | I915_WRITE(fp_reg + 4, fp); | 5154 | I915_WRITE(PCH_FP1(pipe), fp); |
4027 | intel_crtc->lowfreq_avail = false; | ||
4028 | if (HAS_PIPE_CXSR(dev)) { | 5155 | if (HAS_PIPE_CXSR(dev)) { |
4029 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); | 5156 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); |
4030 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; | 5157 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; |
@@ -4043,74 +5170,80 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4043 | } else | 5170 | } else |
4044 | pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ | 5171 | pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ |
4045 | 5172 | ||
4046 | I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | | 5173 | I915_WRITE(HTOTAL(pipe), |
5174 | (adjusted_mode->crtc_hdisplay - 1) | | ||
4047 | ((adjusted_mode->crtc_htotal - 1) << 16)); | 5175 | ((adjusted_mode->crtc_htotal - 1) << 16)); |
4048 | I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | | 5176 | I915_WRITE(HBLANK(pipe), |
5177 | (adjusted_mode->crtc_hblank_start - 1) | | ||
4049 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); | 5178 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); |
4050 | I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | | 5179 | I915_WRITE(HSYNC(pipe), |
5180 | (adjusted_mode->crtc_hsync_start - 1) | | ||
4051 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); | 5181 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); |
4052 | I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | | 5182 | |
5183 | I915_WRITE(VTOTAL(pipe), | ||
5184 | (adjusted_mode->crtc_vdisplay - 1) | | ||
4053 | ((adjusted_mode->crtc_vtotal - 1) << 16)); | 5185 | ((adjusted_mode->crtc_vtotal - 1) << 16)); |
4054 | I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | | 5186 | I915_WRITE(VBLANK(pipe), |
5187 | (adjusted_mode->crtc_vblank_start - 1) | | ||
4055 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); | 5188 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); |
4056 | I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | | 5189 | I915_WRITE(VSYNC(pipe), |
5190 | (adjusted_mode->crtc_vsync_start - 1) | | ||
4057 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); | 5191 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); |
4058 | /* pipesrc and dspsize control the size that is scaled from, which should | 5192 | |
5193 | /* pipesrc controls the size that is scaled from, which should | ||
4059 | * always be the user's requested size. | 5194 | * always be the user's requested size. |
4060 | */ | 5195 | */ |
4061 | if (!HAS_PCH_SPLIT(dev)) { | 5196 | I915_WRITE(PIPESRC(pipe), |
4062 | I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | | 5197 | ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); |
4063 | (mode->hdisplay - 1)); | ||
4064 | I915_WRITE(dsppos_reg, 0); | ||
4065 | } | ||
4066 | I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); | ||
4067 | |||
4068 | if (HAS_PCH_SPLIT(dev)) { | ||
4069 | I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); | ||
4070 | I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); | ||
4071 | I915_WRITE(link_m1_reg, m_n.link_m); | ||
4072 | I915_WRITE(link_n1_reg, m_n.link_n); | ||
4073 | |||
4074 | if (has_edp_encoder) { | ||
4075 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); | ||
4076 | } else { | ||
4077 | /* enable FDI RX PLL too */ | ||
4078 | temp = I915_READ(fdi_rx_reg); | ||
4079 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); | ||
4080 | I915_READ(fdi_rx_reg); | ||
4081 | udelay(200); | ||
4082 | 5198 | ||
4083 | /* enable FDI TX PLL too */ | 5199 | I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); |
4084 | temp = I915_READ(fdi_tx_reg); | 5200 | I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); |
4085 | I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); | 5201 | I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); |
4086 | I915_READ(fdi_tx_reg); | 5202 | I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); |
4087 | 5203 | ||
4088 | /* enable FDI RX PCDCLK */ | 5204 | if (has_edp_encoder && |
4089 | temp = I915_READ(fdi_rx_reg); | 5205 | !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
4090 | I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); | 5206 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); |
4091 | I915_READ(fdi_rx_reg); | ||
4092 | udelay(200); | ||
4093 | } | ||
4094 | } | 5207 | } |
4095 | 5208 | ||
4096 | I915_WRITE(pipeconf_reg, pipeconf); | 5209 | I915_WRITE(PIPECONF(pipe), pipeconf); |
4097 | I915_READ(pipeconf_reg); | 5210 | POSTING_READ(PIPECONF(pipe)); |
4098 | 5211 | ||
4099 | intel_wait_for_vblank(dev, pipe); | 5212 | intel_wait_for_vblank(dev, pipe); |
4100 | 5213 | ||
4101 | if (IS_IRONLAKE(dev)) { | 5214 | if (IS_GEN5(dev)) { |
4102 | /* enable address swizzle for tiling buffer */ | 5215 | /* enable address swizzle for tiling buffer */ |
4103 | temp = I915_READ(DISP_ARB_CTL); | 5216 | temp = I915_READ(DISP_ARB_CTL); |
4104 | I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); | 5217 | I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); |
4105 | } | 5218 | } |
4106 | 5219 | ||
4107 | I915_WRITE(dspcntr_reg, dspcntr); | 5220 | I915_WRITE(DSPCNTR(plane), dspcntr); |
5221 | POSTING_READ(DSPCNTR(plane)); | ||
4108 | 5222 | ||
4109 | /* Flush the plane changes */ | ||
4110 | ret = intel_pipe_set_base(crtc, x, y, old_fb); | 5223 | ret = intel_pipe_set_base(crtc, x, y, old_fb); |
4111 | 5224 | ||
4112 | intel_update_watermarks(dev); | 5225 | intel_update_watermarks(dev); |
4113 | 5226 | ||
5227 | return ret; | ||
5228 | } | ||
5229 | |||
5230 | static int intel_crtc_mode_set(struct drm_crtc *crtc, | ||
5231 | struct drm_display_mode *mode, | ||
5232 | struct drm_display_mode *adjusted_mode, | ||
5233 | int x, int y, | ||
5234 | struct drm_framebuffer *old_fb) | ||
5235 | { | ||
5236 | struct drm_device *dev = crtc->dev; | ||
5237 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5238 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
5239 | int pipe = intel_crtc->pipe; | ||
5240 | int ret; | ||
5241 | |||
5242 | drm_vblank_pre_modeset(dev, pipe); | ||
5243 | |||
5244 | ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, | ||
5245 | x, y, old_fb); | ||
5246 | |||
4114 | drm_vblank_post_modeset(dev, pipe); | 5247 | drm_vblank_post_modeset(dev, pipe); |
4115 | 5248 | ||
4116 | return ret; | 5249 | return ret; |
@@ -4122,7 +5255,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) | |||
4122 | struct drm_device *dev = crtc->dev; | 5255 | struct drm_device *dev = crtc->dev; |
4123 | struct drm_i915_private *dev_priv = dev->dev_private; | 5256 | struct drm_i915_private *dev_priv = dev->dev_private; |
4124 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5257 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4125 | int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B; | 5258 | int palreg = PALETTE(intel_crtc->pipe); |
4126 | int i; | 5259 | int i; |
4127 | 5260 | ||
4128 | /* The clocks have to be on to load the palette. */ | 5261 | /* The clocks have to be on to load the palette. */ |
@@ -4131,8 +5264,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) | |||
4131 | 5264 | ||
4132 | /* use legacy palette for Ironlake */ | 5265 | /* use legacy palette for Ironlake */ |
4133 | if (HAS_PCH_SPLIT(dev)) | 5266 | if (HAS_PCH_SPLIT(dev)) |
4134 | palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : | 5267 | palreg = LGC_PALETTE(intel_crtc->pipe); |
4135 | LGC_PALETTE_B; | ||
4136 | 5268 | ||
4137 | for (i = 0; i < 256; i++) { | 5269 | for (i = 0; i < 256; i++) { |
4138 | I915_WRITE(palreg + 4 * i, | 5270 | I915_WRITE(palreg + 4 * i, |
@@ -4153,12 +5285,12 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base) | |||
4153 | if (intel_crtc->cursor_visible == visible) | 5285 | if (intel_crtc->cursor_visible == visible) |
4154 | return; | 5286 | return; |
4155 | 5287 | ||
4156 | cntl = I915_READ(CURACNTR); | 5288 | cntl = I915_READ(_CURACNTR); |
4157 | if (visible) { | 5289 | if (visible) { |
4158 | /* On these chipsets we can only modify the base whilst | 5290 | /* On these chipsets we can only modify the base whilst |
4159 | * the cursor is disabled. | 5291 | * the cursor is disabled. |
4160 | */ | 5292 | */ |
4161 | I915_WRITE(CURABASE, base); | 5293 | I915_WRITE(_CURABASE, base); |
4162 | 5294 | ||
4163 | cntl &= ~(CURSOR_FORMAT_MASK); | 5295 | cntl &= ~(CURSOR_FORMAT_MASK); |
4164 | /* XXX width must be 64, stride 256 => 0x00 << 28 */ | 5296 | /* XXX width must be 64, stride 256 => 0x00 << 28 */ |
@@ -4167,7 +5299,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base) | |||
4167 | CURSOR_FORMAT_ARGB; | 5299 | CURSOR_FORMAT_ARGB; |
4168 | } else | 5300 | } else |
4169 | cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); | 5301 | cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); |
4170 | I915_WRITE(CURACNTR, cntl); | 5302 | I915_WRITE(_CURACNTR, cntl); |
4171 | 5303 | ||
4172 | intel_crtc->cursor_visible = visible; | 5304 | intel_crtc->cursor_visible = visible; |
4173 | } | 5305 | } |
@@ -4181,7 +5313,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | |||
4181 | bool visible = base != 0; | 5313 | bool visible = base != 0; |
4182 | 5314 | ||
4183 | if (intel_crtc->cursor_visible != visible) { | 5315 | if (intel_crtc->cursor_visible != visible) { |
4184 | uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR); | 5316 | uint32_t cntl = I915_READ(CURCNTR(pipe)); |
4185 | if (base) { | 5317 | if (base) { |
4186 | cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); | 5318 | cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); |
4187 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; | 5319 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; |
@@ -4190,16 +5322,17 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | |||
4190 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | 5322 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); |
4191 | cntl |= CURSOR_MODE_DISABLE; | 5323 | cntl |= CURSOR_MODE_DISABLE; |
4192 | } | 5324 | } |
4193 | I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl); | 5325 | I915_WRITE(CURCNTR(pipe), cntl); |
4194 | 5326 | ||
4195 | intel_crtc->cursor_visible = visible; | 5327 | intel_crtc->cursor_visible = visible; |
4196 | } | 5328 | } |
4197 | /* and commit changes on next vblank */ | 5329 | /* and commit changes on next vblank */ |
4198 | I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base); | 5330 | I915_WRITE(CURBASE(pipe), base); |
4199 | } | 5331 | } |
4200 | 5332 | ||
4201 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ | 5333 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ |
4202 | static void intel_crtc_update_cursor(struct drm_crtc *crtc) | 5334 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, |
5335 | bool on) | ||
4203 | { | 5336 | { |
4204 | struct drm_device *dev = crtc->dev; | 5337 | struct drm_device *dev = crtc->dev; |
4205 | struct drm_i915_private *dev_priv = dev->dev_private; | 5338 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -4212,7 +5345,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc) | |||
4212 | 5345 | ||
4213 | pos = 0; | 5346 | pos = 0; |
4214 | 5347 | ||
4215 | if (intel_crtc->cursor_on && crtc->fb) { | 5348 | if (on && crtc->enabled && crtc->fb) { |
4216 | base = intel_crtc->cursor_addr; | 5349 | base = intel_crtc->cursor_addr; |
4217 | if (x > (int) crtc->fb->width) | 5350 | if (x > (int) crtc->fb->width) |
4218 | base = 0; | 5351 | base = 0; |
@@ -4244,7 +5377,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc) | |||
4244 | if (!visible && !intel_crtc->cursor_visible) | 5377 | if (!visible && !intel_crtc->cursor_visible) |
4245 | return; | 5378 | return; |
4246 | 5379 | ||
4247 | I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos); | 5380 | I915_WRITE(CURPOS(pipe), pos); |
4248 | if (IS_845G(dev) || IS_I865G(dev)) | 5381 | if (IS_845G(dev) || IS_I865G(dev)) |
4249 | i845_update_cursor(crtc, base); | 5382 | i845_update_cursor(crtc, base); |
4250 | else | 5383 | else |
@@ -4255,15 +5388,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc) | |||
4255 | } | 5388 | } |
4256 | 5389 | ||
4257 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, | 5390 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, |
4258 | struct drm_file *file_priv, | 5391 | struct drm_file *file, |
4259 | uint32_t handle, | 5392 | uint32_t handle, |
4260 | uint32_t width, uint32_t height) | 5393 | uint32_t width, uint32_t height) |
4261 | { | 5394 | { |
4262 | struct drm_device *dev = crtc->dev; | 5395 | struct drm_device *dev = crtc->dev; |
4263 | struct drm_i915_private *dev_priv = dev->dev_private; | 5396 | struct drm_i915_private *dev_priv = dev->dev_private; |
4264 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5397 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4265 | struct drm_gem_object *bo; | 5398 | struct drm_i915_gem_object *obj; |
4266 | struct drm_i915_gem_object *obj_priv; | ||
4267 | uint32_t addr; | 5399 | uint32_t addr; |
4268 | int ret; | 5400 | int ret; |
4269 | 5401 | ||
@@ -4273,7 +5405,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4273 | if (!handle) { | 5405 | if (!handle) { |
4274 | DRM_DEBUG_KMS("cursor off\n"); | 5406 | DRM_DEBUG_KMS("cursor off\n"); |
4275 | addr = 0; | 5407 | addr = 0; |
4276 | bo = NULL; | 5408 | obj = NULL; |
4277 | mutex_lock(&dev->struct_mutex); | 5409 | mutex_lock(&dev->struct_mutex); |
4278 | goto finish; | 5410 | goto finish; |
4279 | } | 5411 | } |
@@ -4284,13 +5416,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4284 | return -EINVAL; | 5416 | return -EINVAL; |
4285 | } | 5417 | } |
4286 | 5418 | ||
4287 | bo = drm_gem_object_lookup(dev, file_priv, handle); | 5419 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); |
4288 | if (!bo) | 5420 | if (&obj->base == NULL) |
4289 | return -ENOENT; | 5421 | return -ENOENT; |
4290 | 5422 | ||
4291 | obj_priv = to_intel_bo(bo); | 5423 | if (obj->base.size < width * height * 4) { |
4292 | |||
4293 | if (bo->size < width * height * 4) { | ||
4294 | DRM_ERROR("buffer is to small\n"); | 5424 | DRM_ERROR("buffer is to small\n"); |
4295 | ret = -ENOMEM; | 5425 | ret = -ENOMEM; |
4296 | goto fail; | 5426 | goto fail; |
@@ -4299,60 +5429,72 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4299 | /* we only need to pin inside GTT if cursor is non-phy */ | 5429 | /* we only need to pin inside GTT if cursor is non-phy */ |
4300 | mutex_lock(&dev->struct_mutex); | 5430 | mutex_lock(&dev->struct_mutex); |
4301 | if (!dev_priv->info->cursor_needs_physical) { | 5431 | if (!dev_priv->info->cursor_needs_physical) { |
4302 | ret = i915_gem_object_pin(bo, PAGE_SIZE); | 5432 | if (obj->tiling_mode) { |
5433 | DRM_ERROR("cursor cannot be tiled\n"); | ||
5434 | ret = -EINVAL; | ||
5435 | goto fail_locked; | ||
5436 | } | ||
5437 | |||
5438 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); | ||
4303 | if (ret) { | 5439 | if (ret) { |
4304 | DRM_ERROR("failed to pin cursor bo\n"); | 5440 | DRM_ERROR("failed to pin cursor bo\n"); |
4305 | goto fail_locked; | 5441 | goto fail_locked; |
4306 | } | 5442 | } |
4307 | 5443 | ||
4308 | ret = i915_gem_object_set_to_gtt_domain(bo, 0); | 5444 | ret = i915_gem_object_set_to_gtt_domain(obj, 0); |
5445 | if (ret) { | ||
5446 | DRM_ERROR("failed to move cursor bo into the GTT\n"); | ||
5447 | goto fail_unpin; | ||
5448 | } | ||
5449 | |||
5450 | ret = i915_gem_object_put_fence(obj); | ||
4309 | if (ret) { | 5451 | if (ret) { |
4310 | DRM_ERROR("failed to move cursor bo into the GTT\n"); | 5452 | DRM_ERROR("failed to move cursor bo into the GTT\n"); |
4311 | goto fail_unpin; | 5453 | goto fail_unpin; |
4312 | } | 5454 | } |
4313 | 5455 | ||
4314 | addr = obj_priv->gtt_offset; | 5456 | addr = obj->gtt_offset; |
4315 | } else { | 5457 | } else { |
4316 | int align = IS_I830(dev) ? 16 * 1024 : 256; | 5458 | int align = IS_I830(dev) ? 16 * 1024 : 256; |
4317 | ret = i915_gem_attach_phys_object(dev, bo, | 5459 | ret = i915_gem_attach_phys_object(dev, obj, |
4318 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, | 5460 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, |
4319 | align); | 5461 | align); |
4320 | if (ret) { | 5462 | if (ret) { |
4321 | DRM_ERROR("failed to attach phys object\n"); | 5463 | DRM_ERROR("failed to attach phys object\n"); |
4322 | goto fail_locked; | 5464 | goto fail_locked; |
4323 | } | 5465 | } |
4324 | addr = obj_priv->phys_obj->handle->busaddr; | 5466 | addr = obj->phys_obj->handle->busaddr; |
4325 | } | 5467 | } |
4326 | 5468 | ||
4327 | if (!IS_I9XX(dev)) | 5469 | if (IS_GEN2(dev)) |
4328 | I915_WRITE(CURSIZE, (height << 12) | width); | 5470 | I915_WRITE(CURSIZE, (height << 12) | width); |
4329 | 5471 | ||
4330 | finish: | 5472 | finish: |
4331 | if (intel_crtc->cursor_bo) { | 5473 | if (intel_crtc->cursor_bo) { |
4332 | if (dev_priv->info->cursor_needs_physical) { | 5474 | if (dev_priv->info->cursor_needs_physical) { |
4333 | if (intel_crtc->cursor_bo != bo) | 5475 | if (intel_crtc->cursor_bo != obj) |
4334 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); | 5476 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); |
4335 | } else | 5477 | } else |
4336 | i915_gem_object_unpin(intel_crtc->cursor_bo); | 5478 | i915_gem_object_unpin(intel_crtc->cursor_bo); |
4337 | drm_gem_object_unreference(intel_crtc->cursor_bo); | 5479 | drm_gem_object_unreference(&intel_crtc->cursor_bo->base); |
4338 | } | 5480 | } |
4339 | 5481 | ||
4340 | mutex_unlock(&dev->struct_mutex); | 5482 | mutex_unlock(&dev->struct_mutex); |
4341 | 5483 | ||
4342 | intel_crtc->cursor_addr = addr; | 5484 | intel_crtc->cursor_addr = addr; |
4343 | intel_crtc->cursor_bo = bo; | 5485 | intel_crtc->cursor_bo = obj; |
4344 | intel_crtc->cursor_width = width; | 5486 | intel_crtc->cursor_width = width; |
4345 | intel_crtc->cursor_height = height; | 5487 | intel_crtc->cursor_height = height; |
4346 | 5488 | ||
4347 | intel_crtc_update_cursor(crtc); | 5489 | intel_crtc_update_cursor(crtc, true); |
4348 | 5490 | ||
4349 | return 0; | 5491 | return 0; |
4350 | fail_unpin: | 5492 | fail_unpin: |
4351 | i915_gem_object_unpin(bo); | 5493 | i915_gem_object_unpin(obj); |
4352 | fail_locked: | 5494 | fail_locked: |
4353 | mutex_unlock(&dev->struct_mutex); | 5495 | mutex_unlock(&dev->struct_mutex); |
4354 | fail: | 5496 | fail: |
4355 | drm_gem_object_unreference_unlocked(bo); | 5497 | drm_gem_object_unreference_unlocked(&obj->base); |
4356 | return ret; | 5498 | return ret; |
4357 | } | 5499 | } |
4358 | 5500 | ||
@@ -4363,7 +5505,7 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | |||
4363 | intel_crtc->cursor_x = x; | 5505 | intel_crtc->cursor_x = x; |
4364 | intel_crtc->cursor_y = y; | 5506 | intel_crtc->cursor_y = y; |
4365 | 5507 | ||
4366 | intel_crtc_update_cursor(crtc); | 5508 | intel_crtc_update_cursor(crtc, true); |
4367 | 5509 | ||
4368 | return 0; | 5510 | return 0; |
4369 | } | 5511 | } |
@@ -4424,43 +5566,140 @@ static struct drm_display_mode load_detect_mode = { | |||
4424 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 5566 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
4425 | }; | 5567 | }; |
4426 | 5568 | ||
4427 | struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | 5569 | static struct drm_framebuffer * |
4428 | struct drm_connector *connector, | 5570 | intel_framebuffer_create(struct drm_device *dev, |
4429 | struct drm_display_mode *mode, | 5571 | struct drm_mode_fb_cmd *mode_cmd, |
4430 | int *dpms_mode) | 5572 | struct drm_i915_gem_object *obj) |
5573 | { | ||
5574 | struct intel_framebuffer *intel_fb; | ||
5575 | int ret; | ||
5576 | |||
5577 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); | ||
5578 | if (!intel_fb) { | ||
5579 | drm_gem_object_unreference_unlocked(&obj->base); | ||
5580 | return ERR_PTR(-ENOMEM); | ||
5581 | } | ||
5582 | |||
5583 | ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); | ||
5584 | if (ret) { | ||
5585 | drm_gem_object_unreference_unlocked(&obj->base); | ||
5586 | kfree(intel_fb); | ||
5587 | return ERR_PTR(ret); | ||
5588 | } | ||
5589 | |||
5590 | return &intel_fb->base; | ||
5591 | } | ||
5592 | |||
5593 | static u32 | ||
5594 | intel_framebuffer_pitch_for_width(int width, int bpp) | ||
5595 | { | ||
5596 | u32 pitch = DIV_ROUND_UP(width * bpp, 8); | ||
5597 | return ALIGN(pitch, 64); | ||
5598 | } | ||
5599 | |||
5600 | static u32 | ||
5601 | intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) | ||
5602 | { | ||
5603 | u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); | ||
5604 | return ALIGN(pitch * mode->vdisplay, PAGE_SIZE); | ||
5605 | } | ||
5606 | |||
5607 | static struct drm_framebuffer * | ||
5608 | intel_framebuffer_create_for_mode(struct drm_device *dev, | ||
5609 | struct drm_display_mode *mode, | ||
5610 | int depth, int bpp) | ||
5611 | { | ||
5612 | struct drm_i915_gem_object *obj; | ||
5613 | struct drm_mode_fb_cmd mode_cmd; | ||
5614 | |||
5615 | obj = i915_gem_alloc_object(dev, | ||
5616 | intel_framebuffer_size_for_mode(mode, bpp)); | ||
5617 | if (obj == NULL) | ||
5618 | return ERR_PTR(-ENOMEM); | ||
5619 | |||
5620 | mode_cmd.width = mode->hdisplay; | ||
5621 | mode_cmd.height = mode->vdisplay; | ||
5622 | mode_cmd.depth = depth; | ||
5623 | mode_cmd.bpp = bpp; | ||
5624 | mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp); | ||
5625 | |||
5626 | return intel_framebuffer_create(dev, &mode_cmd, obj); | ||
5627 | } | ||
5628 | |||
5629 | static struct drm_framebuffer * | ||
5630 | mode_fits_in_fbdev(struct drm_device *dev, | ||
5631 | struct drm_display_mode *mode) | ||
5632 | { | ||
5633 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5634 | struct drm_i915_gem_object *obj; | ||
5635 | struct drm_framebuffer *fb; | ||
5636 | |||
5637 | if (dev_priv->fbdev == NULL) | ||
5638 | return NULL; | ||
5639 | |||
5640 | obj = dev_priv->fbdev->ifb.obj; | ||
5641 | if (obj == NULL) | ||
5642 | return NULL; | ||
5643 | |||
5644 | fb = &dev_priv->fbdev->ifb.base; | ||
5645 | if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay, | ||
5646 | fb->bits_per_pixel)) | ||
5647 | return NULL; | ||
5648 | |||
5649 | if (obj->base.size < mode->vdisplay * fb->pitch) | ||
5650 | return NULL; | ||
5651 | |||
5652 | return fb; | ||
5653 | } | ||
5654 | |||
5655 | bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | ||
5656 | struct drm_connector *connector, | ||
5657 | struct drm_display_mode *mode, | ||
5658 | struct intel_load_detect_pipe *old) | ||
4431 | { | 5659 | { |
4432 | struct intel_crtc *intel_crtc; | 5660 | struct intel_crtc *intel_crtc; |
4433 | struct drm_crtc *possible_crtc; | 5661 | struct drm_crtc *possible_crtc; |
4434 | struct drm_crtc *supported_crtc =NULL; | 5662 | struct drm_encoder *encoder = &intel_encoder->base; |
4435 | struct drm_encoder *encoder = &intel_encoder->enc; | ||
4436 | struct drm_crtc *crtc = NULL; | 5663 | struct drm_crtc *crtc = NULL; |
4437 | struct drm_device *dev = encoder->dev; | 5664 | struct drm_device *dev = encoder->dev; |
4438 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | 5665 | struct drm_framebuffer *old_fb; |
4439 | struct drm_crtc_helper_funcs *crtc_funcs; | ||
4440 | int i = -1; | 5666 | int i = -1; |
4441 | 5667 | ||
5668 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", | ||
5669 | connector->base.id, drm_get_connector_name(connector), | ||
5670 | encoder->base.id, drm_get_encoder_name(encoder)); | ||
5671 | |||
4442 | /* | 5672 | /* |
4443 | * Algorithm gets a little messy: | 5673 | * Algorithm gets a little messy: |
5674 | * | ||
4444 | * - if the connector already has an assigned crtc, use it (but make | 5675 | * - if the connector already has an assigned crtc, use it (but make |
4445 | * sure it's on first) | 5676 | * sure it's on first) |
5677 | * | ||
4446 | * - try to find the first unused crtc that can drive this connector, | 5678 | * - try to find the first unused crtc that can drive this connector, |
4447 | * and use that if we find one | 5679 | * and use that if we find one |
4448 | * - if there are no unused crtcs available, try to use the first | ||
4449 | * one we found that supports the connector | ||
4450 | */ | 5680 | */ |
4451 | 5681 | ||
4452 | /* See if we already have a CRTC for this connector */ | 5682 | /* See if we already have a CRTC for this connector */ |
4453 | if (encoder->crtc) { | 5683 | if (encoder->crtc) { |
4454 | crtc = encoder->crtc; | 5684 | crtc = encoder->crtc; |
4455 | /* Make sure the crtc and connector are running */ | 5685 | |
4456 | intel_crtc = to_intel_crtc(crtc); | 5686 | intel_crtc = to_intel_crtc(crtc); |
4457 | *dpms_mode = intel_crtc->dpms_mode; | 5687 | old->dpms_mode = intel_crtc->dpms_mode; |
5688 | old->load_detect_temp = false; | ||
5689 | |||
5690 | /* Make sure the crtc and connector are running */ | ||
4458 | if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { | 5691 | if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { |
5692 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
5693 | struct drm_crtc_helper_funcs *crtc_funcs; | ||
5694 | |||
4459 | crtc_funcs = crtc->helper_private; | 5695 | crtc_funcs = crtc->helper_private; |
4460 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | 5696 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); |
5697 | |||
5698 | encoder_funcs = encoder->helper_private; | ||
4461 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); | 5699 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); |
4462 | } | 5700 | } |
4463 | return crtc; | 5701 | |
5702 | return true; | ||
4464 | } | 5703 | } |
4465 | 5704 | ||
4466 | /* Find an unused one (if possible) */ | 5705 | /* Find an unused one (if possible) */ |
@@ -4472,66 +5711,91 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | |||
4472 | crtc = possible_crtc; | 5711 | crtc = possible_crtc; |
4473 | break; | 5712 | break; |
4474 | } | 5713 | } |
4475 | if (!supported_crtc) | ||
4476 | supported_crtc = possible_crtc; | ||
4477 | } | 5714 | } |
4478 | 5715 | ||
4479 | /* | 5716 | /* |
4480 | * If we didn't find an unused CRTC, don't use any. | 5717 | * If we didn't find an unused CRTC, don't use any. |
4481 | */ | 5718 | */ |
4482 | if (!crtc) { | 5719 | if (!crtc) { |
4483 | return NULL; | 5720 | DRM_DEBUG_KMS("no pipe available for load-detect\n"); |
5721 | return false; | ||
4484 | } | 5722 | } |
4485 | 5723 | ||
4486 | encoder->crtc = crtc; | 5724 | encoder->crtc = crtc; |
4487 | connector->encoder = encoder; | 5725 | connector->encoder = encoder; |
4488 | intel_encoder->load_detect_temp = true; | ||
4489 | 5726 | ||
4490 | intel_crtc = to_intel_crtc(crtc); | 5727 | intel_crtc = to_intel_crtc(crtc); |
4491 | *dpms_mode = intel_crtc->dpms_mode; | 5728 | old->dpms_mode = intel_crtc->dpms_mode; |
5729 | old->load_detect_temp = true; | ||
5730 | old->release_fb = NULL; | ||
4492 | 5731 | ||
4493 | if (!crtc->enabled) { | 5732 | if (!mode) |
4494 | if (!mode) | 5733 | mode = &load_detect_mode; |
4495 | mode = &load_detect_mode; | 5734 | |
4496 | drm_crtc_helper_set_mode(crtc, mode, 0, 0, crtc->fb); | 5735 | old_fb = crtc->fb; |
4497 | } else { | ||
4498 | if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { | ||
4499 | crtc_funcs = crtc->helper_private; | ||
4500 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | ||
4501 | } | ||
4502 | 5736 | ||
4503 | /* Add this connector to the crtc */ | 5737 | /* We need a framebuffer large enough to accommodate all accesses |
4504 | encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode); | 5738 | * that the plane may generate whilst we perform load detection. |
4505 | encoder_funcs->commit(encoder); | 5739 | * We can not rely on the fbcon either being present (we get called |
5740 | * during its initialisation to detect all boot displays, or it may | ||
5741 | * not even exist) or that it is large enough to satisfy the | ||
5742 | * requested mode. | ||
5743 | */ | ||
5744 | crtc->fb = mode_fits_in_fbdev(dev, mode); | ||
5745 | if (crtc->fb == NULL) { | ||
5746 | DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); | ||
5747 | crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); | ||
5748 | old->release_fb = crtc->fb; | ||
5749 | } else | ||
5750 | DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); | ||
5751 | if (IS_ERR(crtc->fb)) { | ||
5752 | DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); | ||
5753 | crtc->fb = old_fb; | ||
5754 | return false; | ||
4506 | } | 5755 | } |
5756 | |||
5757 | if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) { | ||
5758 | DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); | ||
5759 | if (old->release_fb) | ||
5760 | old->release_fb->funcs->destroy(old->release_fb); | ||
5761 | crtc->fb = old_fb; | ||
5762 | return false; | ||
5763 | } | ||
5764 | |||
4507 | /* let the connector get through one full cycle before testing */ | 5765 | /* let the connector get through one full cycle before testing */ |
4508 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 5766 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
4509 | 5767 | ||
4510 | return crtc; | 5768 | return true; |
4511 | } | 5769 | } |
4512 | 5770 | ||
4513 | void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, | 5771 | void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, |
4514 | struct drm_connector *connector, int dpms_mode) | 5772 | struct drm_connector *connector, |
5773 | struct intel_load_detect_pipe *old) | ||
4515 | { | 5774 | { |
4516 | struct drm_encoder *encoder = &intel_encoder->enc; | 5775 | struct drm_encoder *encoder = &intel_encoder->base; |
4517 | struct drm_device *dev = encoder->dev; | 5776 | struct drm_device *dev = encoder->dev; |
4518 | struct drm_crtc *crtc = encoder->crtc; | 5777 | struct drm_crtc *crtc = encoder->crtc; |
4519 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | 5778 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
4520 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | 5779 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; |
4521 | 5780 | ||
4522 | if (intel_encoder->load_detect_temp) { | 5781 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
4523 | encoder->crtc = NULL; | 5782 | connector->base.id, drm_get_connector_name(connector), |
5783 | encoder->base.id, drm_get_encoder_name(encoder)); | ||
5784 | |||
5785 | if (old->load_detect_temp) { | ||
4524 | connector->encoder = NULL; | 5786 | connector->encoder = NULL; |
4525 | intel_encoder->load_detect_temp = false; | ||
4526 | crtc->enabled = drm_helper_crtc_in_use(crtc); | ||
4527 | drm_helper_disable_unused_functions(dev); | 5787 | drm_helper_disable_unused_functions(dev); |
5788 | |||
5789 | if (old->release_fb) | ||
5790 | old->release_fb->funcs->destroy(old->release_fb); | ||
5791 | |||
5792 | return; | ||
4528 | } | 5793 | } |
4529 | 5794 | ||
4530 | /* Switch crtc and encoder back off if necessary */ | 5795 | /* Switch crtc and encoder back off if necessary */ |
4531 | if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { | 5796 | if (old->dpms_mode != DRM_MODE_DPMS_ON) { |
4532 | if (encoder->crtc == crtc) | 5797 | encoder_funcs->dpms(encoder, old->dpms_mode); |
4533 | encoder_funcs->dpms(encoder, dpms_mode); | 5798 | crtc_funcs->dpms(crtc, old->dpms_mode); |
4534 | crtc_funcs->dpms(crtc, dpms_mode); | ||
4535 | } | 5799 | } |
4536 | } | 5800 | } |
4537 | 5801 | ||
@@ -4541,14 +5805,14 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
4541 | struct drm_i915_private *dev_priv = dev->dev_private; | 5805 | struct drm_i915_private *dev_priv = dev->dev_private; |
4542 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5806 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4543 | int pipe = intel_crtc->pipe; | 5807 | int pipe = intel_crtc->pipe; |
4544 | u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B); | 5808 | u32 dpll = I915_READ(DPLL(pipe)); |
4545 | u32 fp; | 5809 | u32 fp; |
4546 | intel_clock_t clock; | 5810 | intel_clock_t clock; |
4547 | 5811 | ||
4548 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) | 5812 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) |
4549 | fp = I915_READ((pipe == 0) ? FPA0 : FPB0); | 5813 | fp = I915_READ(FP0(pipe)); |
4550 | else | 5814 | else |
4551 | fp = I915_READ((pipe == 0) ? FPA1 : FPB1); | 5815 | fp = I915_READ(FP1(pipe)); |
4552 | 5816 | ||
4553 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; | 5817 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; |
4554 | if (IS_PINEVIEW(dev)) { | 5818 | if (IS_PINEVIEW(dev)) { |
@@ -4559,7 +5823,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
4559 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; | 5823 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; |
4560 | } | 5824 | } |
4561 | 5825 | ||
4562 | if (IS_I9XX(dev)) { | 5826 | if (!IS_GEN2(dev)) { |
4563 | if (IS_PINEVIEW(dev)) | 5827 | if (IS_PINEVIEW(dev)) |
4564 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> | 5828 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> |
4565 | DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); | 5829 | DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); |
@@ -4630,10 +5894,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
4630 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5894 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4631 | int pipe = intel_crtc->pipe; | 5895 | int pipe = intel_crtc->pipe; |
4632 | struct drm_display_mode *mode; | 5896 | struct drm_display_mode *mode; |
4633 | int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B); | 5897 | int htot = I915_READ(HTOTAL(pipe)); |
4634 | int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B); | 5898 | int hsync = I915_READ(HSYNC(pipe)); |
4635 | int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B); | 5899 | int vtot = I915_READ(VTOTAL(pipe)); |
4636 | int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B); | 5900 | int vsync = I915_READ(VSYNC(pipe)); |
4637 | 5901 | ||
4638 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); | 5902 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); |
4639 | if (!mode) | 5903 | if (!mode) |
@@ -4663,10 +5927,14 @@ static void intel_gpu_idle_timer(unsigned long arg) | |||
4663 | struct drm_device *dev = (struct drm_device *)arg; | 5927 | struct drm_device *dev = (struct drm_device *)arg; |
4664 | drm_i915_private_t *dev_priv = dev->dev_private; | 5928 | drm_i915_private_t *dev_priv = dev->dev_private; |
4665 | 5929 | ||
4666 | DRM_DEBUG_DRIVER("idle timer fired, downclocking\n"); | 5930 | if (!list_empty(&dev_priv->mm.active_list)) { |
5931 | /* Still processing requests, so just re-arm the timer. */ | ||
5932 | mod_timer(&dev_priv->idle_timer, jiffies + | ||
5933 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | ||
5934 | return; | ||
5935 | } | ||
4667 | 5936 | ||
4668 | dev_priv->busy = false; | 5937 | dev_priv->busy = false; |
4669 | |||
4670 | queue_work(dev_priv->wq, &dev_priv->idle_work); | 5938 | queue_work(dev_priv->wq, &dev_priv->idle_work); |
4671 | } | 5939 | } |
4672 | 5940 | ||
@@ -4677,22 +5945,28 @@ static void intel_crtc_idle_timer(unsigned long arg) | |||
4677 | struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; | 5945 | struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; |
4678 | struct drm_crtc *crtc = &intel_crtc->base; | 5946 | struct drm_crtc *crtc = &intel_crtc->base; |
4679 | drm_i915_private_t *dev_priv = crtc->dev->dev_private; | 5947 | drm_i915_private_t *dev_priv = crtc->dev->dev_private; |
5948 | struct intel_framebuffer *intel_fb; | ||
4680 | 5949 | ||
4681 | DRM_DEBUG_DRIVER("idle timer fired, downclocking\n"); | 5950 | intel_fb = to_intel_framebuffer(crtc->fb); |
5951 | if (intel_fb && intel_fb->obj->active) { | ||
5952 | /* The framebuffer is still being accessed by the GPU. */ | ||
5953 | mod_timer(&intel_crtc->idle_timer, jiffies + | ||
5954 | msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); | ||
5955 | return; | ||
5956 | } | ||
4682 | 5957 | ||
4683 | intel_crtc->busy = false; | 5958 | intel_crtc->busy = false; |
4684 | |||
4685 | queue_work(dev_priv->wq, &dev_priv->idle_work); | 5959 | queue_work(dev_priv->wq, &dev_priv->idle_work); |
4686 | } | 5960 | } |
4687 | 5961 | ||
4688 | static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) | 5962 | static void intel_increase_pllclock(struct drm_crtc *crtc) |
4689 | { | 5963 | { |
4690 | struct drm_device *dev = crtc->dev; | 5964 | struct drm_device *dev = crtc->dev; |
4691 | drm_i915_private_t *dev_priv = dev->dev_private; | 5965 | drm_i915_private_t *dev_priv = dev->dev_private; |
4692 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5966 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4693 | int pipe = intel_crtc->pipe; | 5967 | int pipe = intel_crtc->pipe; |
4694 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | 5968 | int dpll_reg = DPLL(pipe); |
4695 | int dpll = I915_READ(dpll_reg); | 5969 | int dpll; |
4696 | 5970 | ||
4697 | if (HAS_PCH_SPLIT(dev)) | 5971 | if (HAS_PCH_SPLIT(dev)) |
4698 | return; | 5972 | return; |
@@ -4700,17 +5974,18 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) | |||
4700 | if (!dev_priv->lvds_downclock_avail) | 5974 | if (!dev_priv->lvds_downclock_avail) |
4701 | return; | 5975 | return; |
4702 | 5976 | ||
5977 | dpll = I915_READ(dpll_reg); | ||
4703 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { | 5978 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { |
4704 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); | 5979 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
4705 | 5980 | ||
4706 | /* Unlock panel regs */ | 5981 | /* Unlock panel regs */ |
4707 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | | 5982 | I915_WRITE(PP_CONTROL, |
4708 | PANEL_UNLOCK_REGS); | 5983 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); |
4709 | 5984 | ||
4710 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; | 5985 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
4711 | I915_WRITE(dpll_reg, dpll); | 5986 | I915_WRITE(dpll_reg, dpll); |
4712 | dpll = I915_READ(dpll_reg); | ||
4713 | intel_wait_for_vblank(dev, pipe); | 5987 | intel_wait_for_vblank(dev, pipe); |
5988 | |||
4714 | dpll = I915_READ(dpll_reg); | 5989 | dpll = I915_READ(dpll_reg); |
4715 | if (dpll & DISPLAY_RATE_SELECT_FPA1) | 5990 | if (dpll & DISPLAY_RATE_SELECT_FPA1) |
4716 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); | 5991 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); |
@@ -4720,9 +5995,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) | |||
4720 | } | 5995 | } |
4721 | 5996 | ||
4722 | /* Schedule downclock */ | 5997 | /* Schedule downclock */ |
4723 | if (schedule) | 5998 | mod_timer(&intel_crtc->idle_timer, jiffies + |
4724 | mod_timer(&intel_crtc->idle_timer, jiffies + | 5999 | msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); |
4725 | msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); | ||
4726 | } | 6000 | } |
4727 | 6001 | ||
4728 | static void intel_decrease_pllclock(struct drm_crtc *crtc) | 6002 | static void intel_decrease_pllclock(struct drm_crtc *crtc) |
@@ -4731,7 +6005,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) | |||
4731 | drm_i915_private_t *dev_priv = dev->dev_private; | 6005 | drm_i915_private_t *dev_priv = dev->dev_private; |
4732 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 6006 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4733 | int pipe = intel_crtc->pipe; | 6007 | int pipe = intel_crtc->pipe; |
4734 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | 6008 | int dpll_reg = DPLL(pipe); |
4735 | int dpll = I915_READ(dpll_reg); | 6009 | int dpll = I915_READ(dpll_reg); |
4736 | 6010 | ||
4737 | if (HAS_PCH_SPLIT(dev)) | 6011 | if (HAS_PCH_SPLIT(dev)) |
@@ -4753,7 +6027,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) | |||
4753 | 6027 | ||
4754 | dpll |= DISPLAY_RATE_SELECT_FPA1; | 6028 | dpll |= DISPLAY_RATE_SELECT_FPA1; |
4755 | I915_WRITE(dpll_reg, dpll); | 6029 | I915_WRITE(dpll_reg, dpll); |
4756 | dpll = I915_READ(dpll_reg); | ||
4757 | intel_wait_for_vblank(dev, pipe); | 6030 | intel_wait_for_vblank(dev, pipe); |
4758 | dpll = I915_READ(dpll_reg); | 6031 | dpll = I915_READ(dpll_reg); |
4759 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) | 6032 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) |
@@ -4779,7 +6052,6 @@ static void intel_idle_update(struct work_struct *work) | |||
4779 | struct drm_device *dev = dev_priv->dev; | 6052 | struct drm_device *dev = dev_priv->dev; |
4780 | struct drm_crtc *crtc; | 6053 | struct drm_crtc *crtc; |
4781 | struct intel_crtc *intel_crtc; | 6054 | struct intel_crtc *intel_crtc; |
4782 | int enabled = 0; | ||
4783 | 6055 | ||
4784 | if (!i915_powersave) | 6056 | if (!i915_powersave) |
4785 | return; | 6057 | return; |
@@ -4793,16 +6065,11 @@ static void intel_idle_update(struct work_struct *work) | |||
4793 | if (!crtc->fb) | 6065 | if (!crtc->fb) |
4794 | continue; | 6066 | continue; |
4795 | 6067 | ||
4796 | enabled++; | ||
4797 | intel_crtc = to_intel_crtc(crtc); | 6068 | intel_crtc = to_intel_crtc(crtc); |
4798 | if (!intel_crtc->busy) | 6069 | if (!intel_crtc->busy) |
4799 | intel_decrease_pllclock(crtc); | 6070 | intel_decrease_pllclock(crtc); |
4800 | } | 6071 | } |
4801 | 6072 | ||
4802 | if ((enabled == 1) && (IS_I945G(dev) || IS_I945GM(dev))) { | ||
4803 | DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); | ||
4804 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); | ||
4805 | } | ||
4806 | 6073 | ||
4807 | mutex_unlock(&dev->struct_mutex); | 6074 | mutex_unlock(&dev->struct_mutex); |
4808 | } | 6075 | } |
@@ -4817,7 +6084,7 @@ static void intel_idle_update(struct work_struct *work) | |||
4817 | * buffer), we'll also mark the display as busy, so we know to increase its | 6084 | * buffer), we'll also mark the display as busy, so we know to increase its |
4818 | * clock frequency. | 6085 | * clock frequency. |
4819 | */ | 6086 | */ |
4820 | void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | 6087 | void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) |
4821 | { | 6088 | { |
4822 | drm_i915_private_t *dev_priv = dev->dev_private; | 6089 | drm_i915_private_t *dev_priv = dev->dev_private; |
4823 | struct drm_crtc *crtc = NULL; | 6090 | struct drm_crtc *crtc = NULL; |
@@ -4827,17 +6094,9 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | |||
4827 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 6094 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
4828 | return; | 6095 | return; |
4829 | 6096 | ||
4830 | if (!dev_priv->busy) { | 6097 | if (!dev_priv->busy) |
4831 | if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
4832 | u32 fw_blc_self; | ||
4833 | |||
4834 | DRM_DEBUG_DRIVER("disable memory self refresh on 945\n"); | ||
4835 | fw_blc_self = I915_READ(FW_BLC_SELF); | ||
4836 | fw_blc_self &= ~FW_BLC_SELF_EN; | ||
4837 | I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); | ||
4838 | } | ||
4839 | dev_priv->busy = true; | 6098 | dev_priv->busy = true; |
4840 | } else | 6099 | else |
4841 | mod_timer(&dev_priv->idle_timer, jiffies + | 6100 | mod_timer(&dev_priv->idle_timer, jiffies + |
4842 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | 6101 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); |
4843 | 6102 | ||
@@ -4849,16 +6108,8 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | |||
4849 | intel_fb = to_intel_framebuffer(crtc->fb); | 6108 | intel_fb = to_intel_framebuffer(crtc->fb); |
4850 | if (intel_fb->obj == obj) { | 6109 | if (intel_fb->obj == obj) { |
4851 | if (!intel_crtc->busy) { | 6110 | if (!intel_crtc->busy) { |
4852 | if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
4853 | u32 fw_blc_self; | ||
4854 | |||
4855 | DRM_DEBUG_DRIVER("disable memory self refresh on 945\n"); | ||
4856 | fw_blc_self = I915_READ(FW_BLC_SELF); | ||
4857 | fw_blc_self &= ~FW_BLC_SELF_EN; | ||
4858 | I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); | ||
4859 | } | ||
4860 | /* Non-busy -> busy, upclock */ | 6111 | /* Non-busy -> busy, upclock */ |
4861 | intel_increase_pllclock(crtc, true); | 6112 | intel_increase_pllclock(crtc); |
4862 | intel_crtc->busy = true; | 6113 | intel_crtc->busy = true; |
4863 | } else { | 6114 | } else { |
4864 | /* Busy -> busy, put off timer */ | 6115 | /* Busy -> busy, put off timer */ |
@@ -4872,8 +6123,22 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | |||
4872 | static void intel_crtc_destroy(struct drm_crtc *crtc) | 6123 | static void intel_crtc_destroy(struct drm_crtc *crtc) |
4873 | { | 6124 | { |
4874 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 6125 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6126 | struct drm_device *dev = crtc->dev; | ||
6127 | struct intel_unpin_work *work; | ||
6128 | unsigned long flags; | ||
6129 | |||
6130 | spin_lock_irqsave(&dev->event_lock, flags); | ||
6131 | work = intel_crtc->unpin_work; | ||
6132 | intel_crtc->unpin_work = NULL; | ||
6133 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
6134 | |||
6135 | if (work) { | ||
6136 | cancel_work_sync(&work->work); | ||
6137 | kfree(work); | ||
6138 | } | ||
4875 | 6139 | ||
4876 | drm_crtc_cleanup(crtc); | 6140 | drm_crtc_cleanup(crtc); |
6141 | |||
4877 | kfree(intel_crtc); | 6142 | kfree(intel_crtc); |
4878 | } | 6143 | } |
4879 | 6144 | ||
@@ -4884,8 +6149,9 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
4884 | 6149 | ||
4885 | mutex_lock(&work->dev->struct_mutex); | 6150 | mutex_lock(&work->dev->struct_mutex); |
4886 | i915_gem_object_unpin(work->old_fb_obj); | 6151 | i915_gem_object_unpin(work->old_fb_obj); |
4887 | drm_gem_object_unreference(work->pending_flip_obj); | 6152 | drm_gem_object_unreference(&work->pending_flip_obj->base); |
4888 | drm_gem_object_unreference(work->old_fb_obj); | 6153 | drm_gem_object_unreference(&work->old_fb_obj->base); |
6154 | |||
4889 | mutex_unlock(&work->dev->struct_mutex); | 6155 | mutex_unlock(&work->dev->struct_mutex); |
4890 | kfree(work); | 6156 | kfree(work); |
4891 | } | 6157 | } |
@@ -4896,15 +6162,17 @@ static void do_intel_finish_page_flip(struct drm_device *dev, | |||
4896 | drm_i915_private_t *dev_priv = dev->dev_private; | 6162 | drm_i915_private_t *dev_priv = dev->dev_private; |
4897 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 6163 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4898 | struct intel_unpin_work *work; | 6164 | struct intel_unpin_work *work; |
4899 | struct drm_i915_gem_object *obj_priv; | 6165 | struct drm_i915_gem_object *obj; |
4900 | struct drm_pending_vblank_event *e; | 6166 | struct drm_pending_vblank_event *e; |
4901 | struct timeval now; | 6167 | struct timeval tnow, tvbl; |
4902 | unsigned long flags; | 6168 | unsigned long flags; |
4903 | 6169 | ||
4904 | /* Ignore early vblank irqs */ | 6170 | /* Ignore early vblank irqs */ |
4905 | if (intel_crtc == NULL) | 6171 | if (intel_crtc == NULL) |
4906 | return; | 6172 | return; |
4907 | 6173 | ||
6174 | do_gettimeofday(&tnow); | ||
6175 | |||
4908 | spin_lock_irqsave(&dev->event_lock, flags); | 6176 | spin_lock_irqsave(&dev->event_lock, flags); |
4909 | work = intel_crtc->unpin_work; | 6177 | work = intel_crtc->unpin_work; |
4910 | if (work == NULL || !work->pending) { | 6178 | if (work == NULL || !work->pending) { |
@@ -4913,27 +6181,49 @@ static void do_intel_finish_page_flip(struct drm_device *dev, | |||
4913 | } | 6181 | } |
4914 | 6182 | ||
4915 | intel_crtc->unpin_work = NULL; | 6183 | intel_crtc->unpin_work = NULL; |
4916 | drm_vblank_put(dev, intel_crtc->pipe); | ||
4917 | 6184 | ||
4918 | if (work->event) { | 6185 | if (work->event) { |
4919 | e = work->event; | 6186 | e = work->event; |
4920 | do_gettimeofday(&now); | 6187 | e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl); |
4921 | e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe); | 6188 | |
4922 | e->event.tv_sec = now.tv_sec; | 6189 | /* Called before vblank count and timestamps have |
4923 | e->event.tv_usec = now.tv_usec; | 6190 | * been updated for the vblank interval of flip |
6191 | * completion? Need to increment vblank count and | ||
6192 | * add one videorefresh duration to returned timestamp | ||
6193 | * to account for this. We assume this happened if we | ||
6194 | * get called over 0.9 frame durations after the last | ||
6195 | * timestamped vblank. | ||
6196 | * | ||
6197 | * This calculation can not be used with vrefresh rates | ||
6198 | * below 5Hz (10Hz to be on the safe side) without | ||
6199 | * promoting to 64 integers. | ||
6200 | */ | ||
6201 | if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) > | ||
6202 | 9 * crtc->framedur_ns) { | ||
6203 | e->event.sequence++; | ||
6204 | tvbl = ns_to_timeval(timeval_to_ns(&tvbl) + | ||
6205 | crtc->framedur_ns); | ||
6206 | } | ||
6207 | |||
6208 | e->event.tv_sec = tvbl.tv_sec; | ||
6209 | e->event.tv_usec = tvbl.tv_usec; | ||
6210 | |||
4924 | list_add_tail(&e->base.link, | 6211 | list_add_tail(&e->base.link, |
4925 | &e->base.file_priv->event_list); | 6212 | &e->base.file_priv->event_list); |
4926 | wake_up_interruptible(&e->base.file_priv->event_wait); | 6213 | wake_up_interruptible(&e->base.file_priv->event_wait); |
4927 | } | 6214 | } |
4928 | 6215 | ||
6216 | drm_vblank_put(dev, intel_crtc->pipe); | ||
6217 | |||
4929 | spin_unlock_irqrestore(&dev->event_lock, flags); | 6218 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4930 | 6219 | ||
4931 | obj_priv = to_intel_bo(work->pending_flip_obj); | 6220 | obj = work->old_fb_obj; |
6221 | |||
6222 | atomic_clear_mask(1 << intel_crtc->plane, | ||
6223 | &obj->pending_flip.counter); | ||
6224 | if (atomic_read(&obj->pending_flip) == 0) | ||
6225 | wake_up(&dev_priv->pending_flip_queue); | ||
4932 | 6226 | ||
4933 | /* Initial scanout buffer will have a 0 pending flip count */ | ||
4934 | if ((atomic_read(&obj_priv->pending_flip) == 0) || | ||
4935 | atomic_dec_and_test(&obj_priv->pending_flip)) | ||
4936 | DRM_WAKEUP(&dev_priv->pending_flip_queue); | ||
4937 | schedule_work(&work->work); | 6227 | schedule_work(&work->work); |
4938 | 6228 | ||
4939 | trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); | 6229 | trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); |
@@ -4972,6 +6262,197 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) | |||
4972 | spin_unlock_irqrestore(&dev->event_lock, flags); | 6262 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4973 | } | 6263 | } |
4974 | 6264 | ||
6265 | static int intel_gen2_queue_flip(struct drm_device *dev, | ||
6266 | struct drm_crtc *crtc, | ||
6267 | struct drm_framebuffer *fb, | ||
6268 | struct drm_i915_gem_object *obj) | ||
6269 | { | ||
6270 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6271 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
6272 | unsigned long offset; | ||
6273 | u32 flip_mask; | ||
6274 | int ret; | ||
6275 | |||
6276 | ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); | ||
6277 | if (ret) | ||
6278 | goto out; | ||
6279 | |||
6280 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | ||
6281 | offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; | ||
6282 | |||
6283 | ret = BEGIN_LP_RING(6); | ||
6284 | if (ret) | ||
6285 | goto out; | ||
6286 | |||
6287 | /* Can't queue multiple flips, so wait for the previous | ||
6288 | * one to finish before executing the next. | ||
6289 | */ | ||
6290 | if (intel_crtc->plane) | ||
6291 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | ||
6292 | else | ||
6293 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | ||
6294 | OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); | ||
6295 | OUT_RING(MI_NOOP); | ||
6296 | OUT_RING(MI_DISPLAY_FLIP | | ||
6297 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
6298 | OUT_RING(fb->pitch); | ||
6299 | OUT_RING(obj->gtt_offset + offset); | ||
6300 | OUT_RING(MI_NOOP); | ||
6301 | ADVANCE_LP_RING(); | ||
6302 | out: | ||
6303 | return ret; | ||
6304 | } | ||
6305 | |||
6306 | static int intel_gen3_queue_flip(struct drm_device *dev, | ||
6307 | struct drm_crtc *crtc, | ||
6308 | struct drm_framebuffer *fb, | ||
6309 | struct drm_i915_gem_object *obj) | ||
6310 | { | ||
6311 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6312 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
6313 | unsigned long offset; | ||
6314 | u32 flip_mask; | ||
6315 | int ret; | ||
6316 | |||
6317 | ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); | ||
6318 | if (ret) | ||
6319 | goto out; | ||
6320 | |||
6321 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | ||
6322 | offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; | ||
6323 | |||
6324 | ret = BEGIN_LP_RING(6); | ||
6325 | if (ret) | ||
6326 | goto out; | ||
6327 | |||
6328 | if (intel_crtc->plane) | ||
6329 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | ||
6330 | else | ||
6331 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | ||
6332 | OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); | ||
6333 | OUT_RING(MI_NOOP); | ||
6334 | OUT_RING(MI_DISPLAY_FLIP_I915 | | ||
6335 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
6336 | OUT_RING(fb->pitch); | ||
6337 | OUT_RING(obj->gtt_offset + offset); | ||
6338 | OUT_RING(MI_NOOP); | ||
6339 | |||
6340 | ADVANCE_LP_RING(); | ||
6341 | out: | ||
6342 | return ret; | ||
6343 | } | ||
6344 | |||
6345 | static int intel_gen4_queue_flip(struct drm_device *dev, | ||
6346 | struct drm_crtc *crtc, | ||
6347 | struct drm_framebuffer *fb, | ||
6348 | struct drm_i915_gem_object *obj) | ||
6349 | { | ||
6350 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6351 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
6352 | uint32_t pf, pipesrc; | ||
6353 | int ret; | ||
6354 | |||
6355 | ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); | ||
6356 | if (ret) | ||
6357 | goto out; | ||
6358 | |||
6359 | ret = BEGIN_LP_RING(4); | ||
6360 | if (ret) | ||
6361 | goto out; | ||
6362 | |||
6363 | /* i965+ uses the linear or tiled offsets from the | ||
6364 | * Display Registers (which do not change across a page-flip) | ||
6365 | * so we need only reprogram the base address. | ||
6366 | */ | ||
6367 | OUT_RING(MI_DISPLAY_FLIP | | ||
6368 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
6369 | OUT_RING(fb->pitch); | ||
6370 | OUT_RING(obj->gtt_offset | obj->tiling_mode); | ||
6371 | |||
6372 | /* XXX Enabling the panel-fitter across page-flip is so far | ||
6373 | * untested on non-native modes, so ignore it for now. | ||
6374 | * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; | ||
6375 | */ | ||
6376 | pf = 0; | ||
6377 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; | ||
6378 | OUT_RING(pf | pipesrc); | ||
6379 | ADVANCE_LP_RING(); | ||
6380 | out: | ||
6381 | return ret; | ||
6382 | } | ||
6383 | |||
6384 | static int intel_gen6_queue_flip(struct drm_device *dev, | ||
6385 | struct drm_crtc *crtc, | ||
6386 | struct drm_framebuffer *fb, | ||
6387 | struct drm_i915_gem_object *obj) | ||
6388 | { | ||
6389 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6390 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
6391 | uint32_t pf, pipesrc; | ||
6392 | int ret; | ||
6393 | |||
6394 | ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); | ||
6395 | if (ret) | ||
6396 | goto out; | ||
6397 | |||
6398 | ret = BEGIN_LP_RING(4); | ||
6399 | if (ret) | ||
6400 | goto out; | ||
6401 | |||
6402 | OUT_RING(MI_DISPLAY_FLIP | | ||
6403 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
6404 | OUT_RING(fb->pitch | obj->tiling_mode); | ||
6405 | OUT_RING(obj->gtt_offset); | ||
6406 | |||
6407 | pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; | ||
6408 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; | ||
6409 | OUT_RING(pf | pipesrc); | ||
6410 | ADVANCE_LP_RING(); | ||
6411 | out: | ||
6412 | return ret; | ||
6413 | } | ||
6414 | |||
6415 | /* | ||
6416 | * On gen7 we currently use the blit ring because (in early silicon at least) | ||
6417 | * the render ring doesn't give us interrpts for page flip completion, which | ||
6418 | * means clients will hang after the first flip is queued. Fortunately the | ||
6419 | * blit ring generates interrupts properly, so use it instead. | ||
6420 | */ | ||
6421 | static int intel_gen7_queue_flip(struct drm_device *dev, | ||
6422 | struct drm_crtc *crtc, | ||
6423 | struct drm_framebuffer *fb, | ||
6424 | struct drm_i915_gem_object *obj) | ||
6425 | { | ||
6426 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6427 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
6428 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; | ||
6429 | int ret; | ||
6430 | |||
6431 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); | ||
6432 | if (ret) | ||
6433 | goto out; | ||
6434 | |||
6435 | ret = intel_ring_begin(ring, 4); | ||
6436 | if (ret) | ||
6437 | goto out; | ||
6438 | |||
6439 | intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); | ||
6440 | intel_ring_emit(ring, (fb->pitch | obj->tiling_mode)); | ||
6441 | intel_ring_emit(ring, (obj->gtt_offset)); | ||
6442 | intel_ring_emit(ring, (MI_NOOP)); | ||
6443 | intel_ring_advance(ring); | ||
6444 | out: | ||
6445 | return ret; | ||
6446 | } | ||
6447 | |||
6448 | static int intel_default_queue_flip(struct drm_device *dev, | ||
6449 | struct drm_crtc *crtc, | ||
6450 | struct drm_framebuffer *fb, | ||
6451 | struct drm_i915_gem_object *obj) | ||
6452 | { | ||
6453 | return -ENODEV; | ||
6454 | } | ||
6455 | |||
4975 | static int intel_crtc_page_flip(struct drm_crtc *crtc, | 6456 | static int intel_crtc_page_flip(struct drm_crtc *crtc, |
4976 | struct drm_framebuffer *fb, | 6457 | struct drm_framebuffer *fb, |
4977 | struct drm_pending_vblank_event *event) | 6458 | struct drm_pending_vblank_event *event) |
@@ -4979,13 +6460,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4979 | struct drm_device *dev = crtc->dev; | 6460 | struct drm_device *dev = crtc->dev; |
4980 | struct drm_i915_private *dev_priv = dev->dev_private; | 6461 | struct drm_i915_private *dev_priv = dev->dev_private; |
4981 | struct intel_framebuffer *intel_fb; | 6462 | struct intel_framebuffer *intel_fb; |
4982 | struct drm_i915_gem_object *obj_priv; | 6463 | struct drm_i915_gem_object *obj; |
4983 | struct drm_gem_object *obj; | ||
4984 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 6464 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4985 | struct intel_unpin_work *work; | 6465 | struct intel_unpin_work *work; |
4986 | unsigned long flags, offset; | 6466 | unsigned long flags; |
4987 | int pipe = intel_crtc->pipe; | ||
4988 | u32 pf, pipesrc; | ||
4989 | int ret; | 6467 | int ret; |
4990 | 6468 | ||
4991 | work = kzalloc(sizeof *work, GFP_KERNEL); | 6469 | work = kzalloc(sizeof *work, GFP_KERNEL); |
@@ -5014,96 +6492,29 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5014 | obj = intel_fb->obj; | 6492 | obj = intel_fb->obj; |
5015 | 6493 | ||
5016 | mutex_lock(&dev->struct_mutex); | 6494 | mutex_lock(&dev->struct_mutex); |
5017 | ret = intel_pin_and_fence_fb_obj(dev, obj); | ||
5018 | if (ret) | ||
5019 | goto cleanup_work; | ||
5020 | 6495 | ||
5021 | /* Reference the objects for the scheduled work. */ | 6496 | /* Reference the objects for the scheduled work. */ |
5022 | drm_gem_object_reference(work->old_fb_obj); | 6497 | drm_gem_object_reference(&work->old_fb_obj->base); |
5023 | drm_gem_object_reference(obj); | 6498 | drm_gem_object_reference(&obj->base); |
5024 | 6499 | ||
5025 | crtc->fb = fb; | 6500 | crtc->fb = fb; |
5026 | ret = i915_gem_object_flush_write_domain(obj); | ||
5027 | if (ret) | ||
5028 | goto cleanup_objs; | ||
5029 | 6501 | ||
5030 | ret = drm_vblank_get(dev, intel_crtc->pipe); | 6502 | ret = drm_vblank_get(dev, intel_crtc->pipe); |
5031 | if (ret) | 6503 | if (ret) |
5032 | goto cleanup_objs; | 6504 | goto cleanup_objs; |
5033 | 6505 | ||
5034 | obj_priv = to_intel_bo(obj); | ||
5035 | atomic_inc(&obj_priv->pending_flip); | ||
5036 | work->pending_flip_obj = obj; | 6506 | work->pending_flip_obj = obj; |
5037 | 6507 | ||
5038 | if (IS_GEN3(dev) || IS_GEN2(dev)) { | ||
5039 | u32 flip_mask; | ||
5040 | |||
5041 | if (intel_crtc->plane) | ||
5042 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | ||
5043 | else | ||
5044 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | ||
5045 | |||
5046 | BEGIN_LP_RING(2); | ||
5047 | OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); | ||
5048 | OUT_RING(0); | ||
5049 | ADVANCE_LP_RING(); | ||
5050 | } | ||
5051 | |||
5052 | work->enable_stall_check = true; | 6508 | work->enable_stall_check = true; |
5053 | 6509 | ||
5054 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | 6510 | /* Block clients from rendering to the new back buffer until |
5055 | offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; | 6511 | * the flip occurs and the object is no longer visible. |
5056 | 6512 | */ | |
5057 | BEGIN_LP_RING(4); | 6513 | atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); |
5058 | switch(INTEL_INFO(dev)->gen) { | ||
5059 | case 2: | ||
5060 | OUT_RING(MI_DISPLAY_FLIP | | ||
5061 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
5062 | OUT_RING(fb->pitch); | ||
5063 | OUT_RING(obj_priv->gtt_offset + offset); | ||
5064 | OUT_RING(MI_NOOP); | ||
5065 | break; | ||
5066 | |||
5067 | case 3: | ||
5068 | OUT_RING(MI_DISPLAY_FLIP_I915 | | ||
5069 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
5070 | OUT_RING(fb->pitch); | ||
5071 | OUT_RING(obj_priv->gtt_offset + offset); | ||
5072 | OUT_RING(MI_NOOP); | ||
5073 | break; | ||
5074 | |||
5075 | case 4: | ||
5076 | case 5: | ||
5077 | /* i965+ uses the linear or tiled offsets from the | ||
5078 | * Display Registers (which do not change across a page-flip) | ||
5079 | * so we need only reprogram the base address. | ||
5080 | */ | ||
5081 | OUT_RING(MI_DISPLAY_FLIP | | ||
5082 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
5083 | OUT_RING(fb->pitch); | ||
5084 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); | ||
5085 | |||
5086 | /* XXX Enabling the panel-fitter across page-flip is so far | ||
5087 | * untested on non-native modes, so ignore it for now. | ||
5088 | * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; | ||
5089 | */ | ||
5090 | pf = 0; | ||
5091 | pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; | ||
5092 | OUT_RING(pf | pipesrc); | ||
5093 | break; | ||
5094 | 6514 | ||
5095 | case 6: | 6515 | ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); |
5096 | OUT_RING(MI_DISPLAY_FLIP | | 6516 | if (ret) |
5097 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 6517 | goto cleanup_pending; |
5098 | OUT_RING(fb->pitch | obj_priv->tiling_mode); | ||
5099 | OUT_RING(obj_priv->gtt_offset); | ||
5100 | |||
5101 | pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; | ||
5102 | pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; | ||
5103 | OUT_RING(pf | pipesrc); | ||
5104 | break; | ||
5105 | } | ||
5106 | ADVANCE_LP_RING(); | ||
5107 | 6518 | ||
5108 | mutex_unlock(&dev->struct_mutex); | 6519 | mutex_unlock(&dev->struct_mutex); |
5109 | 6520 | ||
@@ -5111,10 +6522,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5111 | 6522 | ||
5112 | return 0; | 6523 | return 0; |
5113 | 6524 | ||
6525 | cleanup_pending: | ||
6526 | atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); | ||
5114 | cleanup_objs: | 6527 | cleanup_objs: |
5115 | drm_gem_object_unreference(work->old_fb_obj); | 6528 | drm_gem_object_unreference(&work->old_fb_obj->base); |
5116 | drm_gem_object_unreference(obj); | 6529 | drm_gem_object_unreference(&obj->base); |
5117 | cleanup_work: | ||
5118 | mutex_unlock(&dev->struct_mutex); | 6530 | mutex_unlock(&dev->struct_mutex); |
5119 | 6531 | ||
5120 | spin_lock_irqsave(&dev->event_lock, flags); | 6532 | spin_lock_irqsave(&dev->event_lock, flags); |
@@ -5126,18 +6538,70 @@ cleanup_work: | |||
5126 | return ret; | 6538 | return ret; |
5127 | } | 6539 | } |
5128 | 6540 | ||
5129 | static const struct drm_crtc_helper_funcs intel_helper_funcs = { | 6541 | static void intel_sanitize_modesetting(struct drm_device *dev, |
6542 | int pipe, int plane) | ||
6543 | { | ||
6544 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6545 | u32 reg, val; | ||
6546 | |||
6547 | if (HAS_PCH_SPLIT(dev)) | ||
6548 | return; | ||
6549 | |||
6550 | /* Who knows what state these registers were left in by the BIOS or | ||
6551 | * grub? | ||
6552 | * | ||
6553 | * If we leave the registers in a conflicting state (e.g. with the | ||
6554 | * display plane reading from the other pipe than the one we intend | ||
6555 | * to use) then when we attempt to teardown the active mode, we will | ||
6556 | * not disable the pipes and planes in the correct order -- leaving | ||
6557 | * a plane reading from a disabled pipe and possibly leading to | ||
6558 | * undefined behaviour. | ||
6559 | */ | ||
6560 | |||
6561 | reg = DSPCNTR(plane); | ||
6562 | val = I915_READ(reg); | ||
6563 | |||
6564 | if ((val & DISPLAY_PLANE_ENABLE) == 0) | ||
6565 | return; | ||
6566 | if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) | ||
6567 | return; | ||
6568 | |||
6569 | /* This display plane is active and attached to the other CPU pipe. */ | ||
6570 | pipe = !pipe; | ||
6571 | |||
6572 | /* Disable the plane and wait for it to stop reading from the pipe. */ | ||
6573 | intel_disable_plane(dev_priv, plane, pipe); | ||
6574 | intel_disable_pipe(dev_priv, pipe); | ||
6575 | } | ||
6576 | |||
6577 | static void intel_crtc_reset(struct drm_crtc *crtc) | ||
6578 | { | ||
6579 | struct drm_device *dev = crtc->dev; | ||
6580 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
6581 | |||
6582 | /* Reset flags back to the 'unknown' status so that they | ||
6583 | * will be correctly set on the initial modeset. | ||
6584 | */ | ||
6585 | intel_crtc->dpms_mode = -1; | ||
6586 | |||
6587 | /* We need to fix up any BIOS configuration that conflicts with | ||
6588 | * our expectations. | ||
6589 | */ | ||
6590 | intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); | ||
6591 | } | ||
6592 | |||
6593 | static struct drm_crtc_helper_funcs intel_helper_funcs = { | ||
5130 | .dpms = intel_crtc_dpms, | 6594 | .dpms = intel_crtc_dpms, |
5131 | .mode_fixup = intel_crtc_mode_fixup, | 6595 | .mode_fixup = intel_crtc_mode_fixup, |
5132 | .mode_set = intel_crtc_mode_set, | 6596 | .mode_set = intel_crtc_mode_set, |
5133 | .mode_set_base = intel_pipe_set_base, | 6597 | .mode_set_base = intel_pipe_set_base, |
5134 | .mode_set_base_atomic = intel_pipe_set_base_atomic, | 6598 | .mode_set_base_atomic = intel_pipe_set_base_atomic, |
5135 | .prepare = intel_crtc_prepare, | ||
5136 | .commit = intel_crtc_commit, | ||
5137 | .load_lut = intel_crtc_load_lut, | 6599 | .load_lut = intel_crtc_load_lut, |
6600 | .disable = intel_crtc_disable, | ||
5138 | }; | 6601 | }; |
5139 | 6602 | ||
5140 | static const struct drm_crtc_funcs intel_crtc_funcs = { | 6603 | static const struct drm_crtc_funcs intel_crtc_funcs = { |
6604 | .reset = intel_crtc_reset, | ||
5141 | .cursor_set = intel_crtc_cursor_set, | 6605 | .cursor_set = intel_crtc_cursor_set, |
5142 | .cursor_move = intel_crtc_cursor_move, | 6606 | .cursor_move = intel_crtc_cursor_move, |
5143 | .gamma_set = intel_crtc_gamma_set, | 6607 | .gamma_set = intel_crtc_gamma_set, |
@@ -5146,7 +6610,6 @@ static const struct drm_crtc_funcs intel_crtc_funcs = { | |||
5146 | .page_flip = intel_crtc_page_flip, | 6610 | .page_flip = intel_crtc_page_flip, |
5147 | }; | 6611 | }; |
5148 | 6612 | ||
5149 | |||
5150 | static void intel_crtc_init(struct drm_device *dev, int pipe) | 6613 | static void intel_crtc_init(struct drm_device *dev, int pipe) |
5151 | { | 6614 | { |
5152 | drm_i915_private_t *dev_priv = dev->dev_private; | 6615 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -5160,8 +6623,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
5160 | drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); | 6623 | drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); |
5161 | 6624 | ||
5162 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); | 6625 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); |
5163 | intel_crtc->pipe = pipe; | ||
5164 | intel_crtc->plane = pipe; | ||
5165 | for (i = 0; i < 256; i++) { | 6626 | for (i = 0; i < 256; i++) { |
5166 | intel_crtc->lut_r[i] = i; | 6627 | intel_crtc->lut_r[i] = i; |
5167 | intel_crtc->lut_g[i] = i; | 6628 | intel_crtc->lut_g[i] = i; |
@@ -5171,9 +6632,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
5171 | /* Swap pipes & planes for FBC on pre-965 */ | 6632 | /* Swap pipes & planes for FBC on pre-965 */ |
5172 | intel_crtc->pipe = pipe; | 6633 | intel_crtc->pipe = pipe; |
5173 | intel_crtc->plane = pipe; | 6634 | intel_crtc->plane = pipe; |
5174 | if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) { | 6635 | if (IS_MOBILE(dev) && IS_GEN3(dev)) { |
5175 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); | 6636 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); |
5176 | intel_crtc->plane = ((pipe == 0) ? 1 : 0); | 6637 | intel_crtc->plane = !pipe; |
5177 | } | 6638 | } |
5178 | 6639 | ||
5179 | BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || | 6640 | BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || |
@@ -5181,8 +6642,17 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
5181 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; | 6642 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; |
5182 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; | 6643 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; |
5183 | 6644 | ||
5184 | intel_crtc->cursor_addr = 0; | 6645 | intel_crtc_reset(&intel_crtc->base); |
5185 | intel_crtc->dpms_mode = -1; | 6646 | intel_crtc->active = true; /* force the pipe off on setup_init_config */ |
6647 | |||
6648 | if (HAS_PCH_SPLIT(dev)) { | ||
6649 | intel_helper_funcs.prepare = ironlake_crtc_prepare; | ||
6650 | intel_helper_funcs.commit = ironlake_crtc_commit; | ||
6651 | } else { | ||
6652 | intel_helper_funcs.prepare = i9xx_crtc_prepare; | ||
6653 | intel_helper_funcs.commit = i9xx_crtc_commit; | ||
6654 | } | ||
6655 | |||
5186 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); | 6656 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); |
5187 | 6657 | ||
5188 | intel_crtc->busy = false; | 6658 | intel_crtc->busy = false; |
@@ -5192,7 +6662,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
5192 | } | 6662 | } |
5193 | 6663 | ||
5194 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 6664 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
5195 | struct drm_file *file_priv) | 6665 | struct drm_file *file) |
5196 | { | 6666 | { |
5197 | drm_i915_private_t *dev_priv = dev->dev_private; | 6667 | drm_i915_private_t *dev_priv = dev->dev_private; |
5198 | struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; | 6668 | struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; |
@@ -5218,47 +6688,56 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | |||
5218 | return 0; | 6688 | return 0; |
5219 | } | 6689 | } |
5220 | 6690 | ||
5221 | struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) | ||
5222 | { | ||
5223 | struct drm_crtc *crtc = NULL; | ||
5224 | |||
5225 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
5226 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
5227 | if (intel_crtc->pipe == pipe) | ||
5228 | break; | ||
5229 | } | ||
5230 | return crtc; | ||
5231 | } | ||
5232 | |||
5233 | static int intel_encoder_clones(struct drm_device *dev, int type_mask) | 6691 | static int intel_encoder_clones(struct drm_device *dev, int type_mask) |
5234 | { | 6692 | { |
6693 | struct intel_encoder *encoder; | ||
5235 | int index_mask = 0; | 6694 | int index_mask = 0; |
5236 | struct drm_encoder *encoder; | ||
5237 | int entry = 0; | 6695 | int entry = 0; |
5238 | 6696 | ||
5239 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 6697 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
5240 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 6698 | if (type_mask & encoder->clone_mask) |
5241 | if (type_mask & intel_encoder->clone_mask) | ||
5242 | index_mask |= (1 << entry); | 6699 | index_mask |= (1 << entry); |
5243 | entry++; | 6700 | entry++; |
5244 | } | 6701 | } |
6702 | |||
5245 | return index_mask; | 6703 | return index_mask; |
5246 | } | 6704 | } |
5247 | 6705 | ||
6706 | static bool has_edp_a(struct drm_device *dev) | ||
6707 | { | ||
6708 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6709 | |||
6710 | if (!IS_MOBILE(dev)) | ||
6711 | return false; | ||
6712 | |||
6713 | if ((I915_READ(DP_A) & DP_DETECTED) == 0) | ||
6714 | return false; | ||
6715 | |||
6716 | if (IS_GEN5(dev) && | ||
6717 | (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) | ||
6718 | return false; | ||
6719 | |||
6720 | return true; | ||
6721 | } | ||
5248 | 6722 | ||
5249 | static void intel_setup_outputs(struct drm_device *dev) | 6723 | static void intel_setup_outputs(struct drm_device *dev) |
5250 | { | 6724 | { |
5251 | struct drm_i915_private *dev_priv = dev->dev_private; | 6725 | struct drm_i915_private *dev_priv = dev->dev_private; |
5252 | struct drm_encoder *encoder; | 6726 | struct intel_encoder *encoder; |
5253 | bool dpd_is_edp = false; | 6727 | bool dpd_is_edp = false; |
6728 | bool has_lvds = false; | ||
5254 | 6729 | ||
5255 | if (IS_MOBILE(dev) && !IS_I830(dev)) | 6730 | if (IS_MOBILE(dev) && !IS_I830(dev)) |
5256 | intel_lvds_init(dev); | 6731 | has_lvds = intel_lvds_init(dev); |
6732 | if (!has_lvds && !HAS_PCH_SPLIT(dev)) { | ||
6733 | /* disable the panel fitter on everything but LVDS */ | ||
6734 | I915_WRITE(PFIT_CONTROL, 0); | ||
6735 | } | ||
5257 | 6736 | ||
5258 | if (HAS_PCH_SPLIT(dev)) { | 6737 | if (HAS_PCH_SPLIT(dev)) { |
5259 | dpd_is_edp = intel_dpd_is_edp(dev); | 6738 | dpd_is_edp = intel_dpd_is_edp(dev); |
5260 | 6739 | ||
5261 | if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) | 6740 | if (has_edp_a(dev)) |
5262 | intel_dp_init(dev, DP_A); | 6741 | intel_dp_init(dev, DP_A); |
5263 | 6742 | ||
5264 | if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) | 6743 | if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) |
@@ -5338,13 +6817,16 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
5338 | if (SUPPORTS_TV(dev)) | 6817 | if (SUPPORTS_TV(dev)) |
5339 | intel_tv_init(dev); | 6818 | intel_tv_init(dev); |
5340 | 6819 | ||
5341 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 6820 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
5342 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 6821 | encoder->base.possible_crtcs = encoder->crtc_mask; |
5343 | 6822 | encoder->base.possible_clones = | |
5344 | encoder->possible_crtcs = intel_encoder->crtc_mask; | 6823 | intel_encoder_clones(dev, encoder->clone_mask); |
5345 | encoder->possible_clones = intel_encoder_clones(dev, | ||
5346 | intel_encoder->clone_mask); | ||
5347 | } | 6824 | } |
6825 | |||
6826 | intel_panel_setup_backlight(dev); | ||
6827 | |||
6828 | /* disable all the possible outputs/crtcs before entering KMS mode */ | ||
6829 | drm_helper_disable_unused_functions(dev); | ||
5348 | } | 6830 | } |
5349 | 6831 | ||
5350 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | 6832 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) |
@@ -5352,19 +6834,19 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | |||
5352 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 6834 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
5353 | 6835 | ||
5354 | drm_framebuffer_cleanup(fb); | 6836 | drm_framebuffer_cleanup(fb); |
5355 | drm_gem_object_unreference_unlocked(intel_fb->obj); | 6837 | drm_gem_object_unreference_unlocked(&intel_fb->obj->base); |
5356 | 6838 | ||
5357 | kfree(intel_fb); | 6839 | kfree(intel_fb); |
5358 | } | 6840 | } |
5359 | 6841 | ||
5360 | static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, | 6842 | static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, |
5361 | struct drm_file *file_priv, | 6843 | struct drm_file *file, |
5362 | unsigned int *handle) | 6844 | unsigned int *handle) |
5363 | { | 6845 | { |
5364 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 6846 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
5365 | struct drm_gem_object *object = intel_fb->obj; | 6847 | struct drm_i915_gem_object *obj = intel_fb->obj; |
5366 | 6848 | ||
5367 | return drm_gem_handle_create(file_priv, object, handle); | 6849 | return drm_gem_handle_create(file, &obj->base, handle); |
5368 | } | 6850 | } |
5369 | 6851 | ||
5370 | static const struct drm_framebuffer_funcs intel_fb_funcs = { | 6852 | static const struct drm_framebuffer_funcs intel_fb_funcs = { |
@@ -5375,10 +6857,26 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { | |||
5375 | int intel_framebuffer_init(struct drm_device *dev, | 6857 | int intel_framebuffer_init(struct drm_device *dev, |
5376 | struct intel_framebuffer *intel_fb, | 6858 | struct intel_framebuffer *intel_fb, |
5377 | struct drm_mode_fb_cmd *mode_cmd, | 6859 | struct drm_mode_fb_cmd *mode_cmd, |
5378 | struct drm_gem_object *obj) | 6860 | struct drm_i915_gem_object *obj) |
5379 | { | 6861 | { |
5380 | int ret; | 6862 | int ret; |
5381 | 6863 | ||
6864 | if (obj->tiling_mode == I915_TILING_Y) | ||
6865 | return -EINVAL; | ||
6866 | |||
6867 | if (mode_cmd->pitch & 63) | ||
6868 | return -EINVAL; | ||
6869 | |||
6870 | switch (mode_cmd->bpp) { | ||
6871 | case 8: | ||
6872 | case 16: | ||
6873 | case 24: | ||
6874 | case 32: | ||
6875 | break; | ||
6876 | default: | ||
6877 | return -EINVAL; | ||
6878 | } | ||
6879 | |||
5382 | ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); | 6880 | ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); |
5383 | if (ret) { | 6881 | if (ret) { |
5384 | DRM_ERROR("framebuffer init failed %d\n", ret); | 6882 | DRM_ERROR("framebuffer init failed %d\n", ret); |
@@ -5395,27 +6893,13 @@ intel_user_framebuffer_create(struct drm_device *dev, | |||
5395 | struct drm_file *filp, | 6893 | struct drm_file *filp, |
5396 | struct drm_mode_fb_cmd *mode_cmd) | 6894 | struct drm_mode_fb_cmd *mode_cmd) |
5397 | { | 6895 | { |
5398 | struct drm_gem_object *obj; | 6896 | struct drm_i915_gem_object *obj; |
5399 | struct intel_framebuffer *intel_fb; | ||
5400 | int ret; | ||
5401 | 6897 | ||
5402 | obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); | 6898 | obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); |
5403 | if (!obj) | 6899 | if (&obj->base == NULL) |
5404 | return ERR_PTR(-ENOENT); | 6900 | return ERR_PTR(-ENOENT); |
5405 | 6901 | ||
5406 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); | 6902 | return intel_framebuffer_create(dev, mode_cmd, obj); |
5407 | if (!intel_fb) | ||
5408 | return ERR_PTR(-ENOMEM); | ||
5409 | |||
5410 | ret = intel_framebuffer_init(dev, intel_fb, | ||
5411 | mode_cmd, obj); | ||
5412 | if (ret) { | ||
5413 | drm_gem_object_unreference_unlocked(obj); | ||
5414 | kfree(intel_fb); | ||
5415 | return ERR_PTR(ret); | ||
5416 | } | ||
5417 | |||
5418 | return &intel_fb->base; | ||
5419 | } | 6903 | } |
5420 | 6904 | ||
5421 | static const struct drm_mode_config_funcs intel_mode_funcs = { | 6905 | static const struct drm_mode_config_funcs intel_mode_funcs = { |
@@ -5423,20 +6907,21 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { | |||
5423 | .output_poll_changed = intel_fb_output_poll_changed, | 6907 | .output_poll_changed = intel_fb_output_poll_changed, |
5424 | }; | 6908 | }; |
5425 | 6909 | ||
5426 | static struct drm_gem_object * | 6910 | static struct drm_i915_gem_object * |
5427 | intel_alloc_context_page(struct drm_device *dev) | 6911 | intel_alloc_context_page(struct drm_device *dev) |
5428 | { | 6912 | { |
5429 | struct drm_gem_object *ctx; | 6913 | struct drm_i915_gem_object *ctx; |
5430 | int ret; | 6914 | int ret; |
5431 | 6915 | ||
6916 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
6917 | |||
5432 | ctx = i915_gem_alloc_object(dev, 4096); | 6918 | ctx = i915_gem_alloc_object(dev, 4096); |
5433 | if (!ctx) { | 6919 | if (!ctx) { |
5434 | DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); | 6920 | DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); |
5435 | return NULL; | 6921 | return NULL; |
5436 | } | 6922 | } |
5437 | 6923 | ||
5438 | mutex_lock(&dev->struct_mutex); | 6924 | ret = i915_gem_object_pin(ctx, 4096, true); |
5439 | ret = i915_gem_object_pin(ctx, 4096); | ||
5440 | if (ret) { | 6925 | if (ret) { |
5441 | DRM_ERROR("failed to pin power context: %d\n", ret); | 6926 | DRM_ERROR("failed to pin power context: %d\n", ret); |
5442 | goto err_unref; | 6927 | goto err_unref; |
@@ -5447,14 +6932,13 @@ intel_alloc_context_page(struct drm_device *dev) | |||
5447 | DRM_ERROR("failed to set-domain on power context: %d\n", ret); | 6932 | DRM_ERROR("failed to set-domain on power context: %d\n", ret); |
5448 | goto err_unpin; | 6933 | goto err_unpin; |
5449 | } | 6934 | } |
5450 | mutex_unlock(&dev->struct_mutex); | ||
5451 | 6935 | ||
5452 | return ctx; | 6936 | return ctx; |
5453 | 6937 | ||
5454 | err_unpin: | 6938 | err_unpin: |
5455 | i915_gem_object_unpin(ctx); | 6939 | i915_gem_object_unpin(ctx); |
5456 | err_unref: | 6940 | err_unref: |
5457 | drm_gem_object_unreference(ctx); | 6941 | drm_gem_object_unreference(&ctx->base); |
5458 | mutex_unlock(&dev->struct_mutex); | 6942 | mutex_unlock(&dev->struct_mutex); |
5459 | return NULL; | 6943 | return NULL; |
5460 | } | 6944 | } |
@@ -5487,6 +6971,10 @@ void ironlake_enable_drps(struct drm_device *dev) | |||
5487 | u32 rgvmodectl = I915_READ(MEMMODECTL); | 6971 | u32 rgvmodectl = I915_READ(MEMMODECTL); |
5488 | u8 fmax, fmin, fstart, vstart; | 6972 | u8 fmax, fmin, fstart, vstart; |
5489 | 6973 | ||
6974 | /* Enable temp reporting */ | ||
6975 | I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); | ||
6976 | I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); | ||
6977 | |||
5490 | /* 100ms RC evaluation intervals */ | 6978 | /* 100ms RC evaluation intervals */ |
5491 | I915_WRITE(RCUPEI, 100000); | 6979 | I915_WRITE(RCUPEI, 100000); |
5492 | I915_WRITE(RCDNEI, 100000); | 6980 | I915_WRITE(RCDNEI, 100000); |
@@ -5502,20 +6990,19 @@ void ironlake_enable_drps(struct drm_device *dev) | |||
5502 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); | 6990 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); |
5503 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> | 6991 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> |
5504 | MEMMODE_FSTART_SHIFT; | 6992 | MEMMODE_FSTART_SHIFT; |
5505 | fstart = fmax; | ||
5506 | 6993 | ||
5507 | vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> | 6994 | vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> |
5508 | PXVFREQ_PX_SHIFT; | 6995 | PXVFREQ_PX_SHIFT; |
5509 | 6996 | ||
5510 | dev_priv->fmax = fstart; /* IPS callback will increase this */ | 6997 | dev_priv->fmax = fmax; /* IPS callback will increase this */ |
5511 | dev_priv->fstart = fstart; | 6998 | dev_priv->fstart = fstart; |
5512 | 6999 | ||
5513 | dev_priv->max_delay = fmax; | 7000 | dev_priv->max_delay = fstart; |
5514 | dev_priv->min_delay = fmin; | 7001 | dev_priv->min_delay = fmin; |
5515 | dev_priv->cur_delay = fstart; | 7002 | dev_priv->cur_delay = fstart; |
5516 | 7003 | ||
5517 | DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, | 7004 | DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", |
5518 | fstart); | 7005 | fmax, fmin, fstart); |
5519 | 7006 | ||
5520 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); | 7007 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); |
5521 | 7008 | ||
@@ -5529,7 +7016,7 @@ void ironlake_enable_drps(struct drm_device *dev) | |||
5529 | rgvmodectl |= MEMMODE_SWMODE_EN; | 7016 | rgvmodectl |= MEMMODE_SWMODE_EN; |
5530 | I915_WRITE(MEMMODECTL, rgvmodectl); | 7017 | I915_WRITE(MEMMODECTL, rgvmodectl); |
5531 | 7018 | ||
5532 | if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0)) | 7019 | if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) |
5533 | DRM_ERROR("stuck trying to change perf mode\n"); | 7020 | DRM_ERROR("stuck trying to change perf mode\n"); |
5534 | msleep(1); | 7021 | msleep(1); |
5535 | 7022 | ||
@@ -5563,6 +7050,30 @@ void ironlake_disable_drps(struct drm_device *dev) | |||
5563 | 7050 | ||
5564 | } | 7051 | } |
5565 | 7052 | ||
7053 | void gen6_set_rps(struct drm_device *dev, u8 val) | ||
7054 | { | ||
7055 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7056 | u32 swreq; | ||
7057 | |||
7058 | swreq = (val & 0x3ff) << 25; | ||
7059 | I915_WRITE(GEN6_RPNSWREQ, swreq); | ||
7060 | } | ||
7061 | |||
7062 | void gen6_disable_rps(struct drm_device *dev) | ||
7063 | { | ||
7064 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7065 | |||
7066 | I915_WRITE(GEN6_RPNSWREQ, 1 << 31); | ||
7067 | I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); | ||
7068 | I915_WRITE(GEN6_PMIER, 0); | ||
7069 | |||
7070 | spin_lock_irq(&dev_priv->rps_lock); | ||
7071 | dev_priv->pm_iir = 0; | ||
7072 | spin_unlock_irq(&dev_priv->rps_lock); | ||
7073 | |||
7074 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); | ||
7075 | } | ||
7076 | |||
5566 | static unsigned long intel_pxfreq(u32 vidfreq) | 7077 | static unsigned long intel_pxfreq(u32 vidfreq) |
5567 | { | 7078 | { |
5568 | unsigned long freq; | 7079 | unsigned long freq; |
@@ -5649,158 +7160,475 @@ void intel_init_emon(struct drm_device *dev) | |||
5649 | dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); | 7160 | dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); |
5650 | } | 7161 | } |
5651 | 7162 | ||
5652 | void intel_init_clock_gating(struct drm_device *dev) | 7163 | void gen6_enable_rps(struct drm_i915_private *dev_priv) |
7164 | { | ||
7165 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | ||
7166 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | ||
7167 | u32 pcu_mbox, rc6_mask = 0; | ||
7168 | int cur_freq, min_freq, max_freq; | ||
7169 | int i; | ||
7170 | |||
7171 | /* Here begins a magic sequence of register writes to enable | ||
7172 | * auto-downclocking. | ||
7173 | * | ||
7174 | * Perhaps there might be some value in exposing these to | ||
7175 | * userspace... | ||
7176 | */ | ||
7177 | I915_WRITE(GEN6_RC_STATE, 0); | ||
7178 | mutex_lock(&dev_priv->dev->struct_mutex); | ||
7179 | gen6_gt_force_wake_get(dev_priv); | ||
7180 | |||
7181 | /* disable the counters and set deterministic thresholds */ | ||
7182 | I915_WRITE(GEN6_RC_CONTROL, 0); | ||
7183 | |||
7184 | I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); | ||
7185 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); | ||
7186 | I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); | ||
7187 | I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); | ||
7188 | I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); | ||
7189 | |||
7190 | for (i = 0; i < I915_NUM_RINGS; i++) | ||
7191 | I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); | ||
7192 | |||
7193 | I915_WRITE(GEN6_RC_SLEEP, 0); | ||
7194 | I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); | ||
7195 | I915_WRITE(GEN6_RC6_THRESHOLD, 50000); | ||
7196 | I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); | ||
7197 | I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ | ||
7198 | |||
7199 | if (i915_enable_rc6) | ||
7200 | rc6_mask = GEN6_RC_CTL_RC6p_ENABLE | | ||
7201 | GEN6_RC_CTL_RC6_ENABLE; | ||
7202 | |||
7203 | I915_WRITE(GEN6_RC_CONTROL, | ||
7204 | rc6_mask | | ||
7205 | GEN6_RC_CTL_EI_MODE(1) | | ||
7206 | GEN6_RC_CTL_HW_ENABLE); | ||
7207 | |||
7208 | I915_WRITE(GEN6_RPNSWREQ, | ||
7209 | GEN6_FREQUENCY(10) | | ||
7210 | GEN6_OFFSET(0) | | ||
7211 | GEN6_AGGRESSIVE_TURBO); | ||
7212 | I915_WRITE(GEN6_RC_VIDEO_FREQ, | ||
7213 | GEN6_FREQUENCY(12)); | ||
7214 | |||
7215 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); | ||
7216 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | ||
7217 | 18 << 24 | | ||
7218 | 6 << 16); | ||
7219 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); | ||
7220 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); | ||
7221 | I915_WRITE(GEN6_RP_UP_EI, 100000); | ||
7222 | I915_WRITE(GEN6_RP_DOWN_EI, 5000000); | ||
7223 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); | ||
7224 | I915_WRITE(GEN6_RP_CONTROL, | ||
7225 | GEN6_RP_MEDIA_TURBO | | ||
7226 | GEN6_RP_USE_NORMAL_FREQ | | ||
7227 | GEN6_RP_MEDIA_IS_GFX | | ||
7228 | GEN6_RP_ENABLE | | ||
7229 | GEN6_RP_UP_BUSY_AVG | | ||
7230 | GEN6_RP_DOWN_IDLE_CONT); | ||
7231 | |||
7232 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
7233 | 500)) | ||
7234 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); | ||
7235 | |||
7236 | I915_WRITE(GEN6_PCODE_DATA, 0); | ||
7237 | I915_WRITE(GEN6_PCODE_MAILBOX, | ||
7238 | GEN6_PCODE_READY | | ||
7239 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); | ||
7240 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
7241 | 500)) | ||
7242 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | ||
7243 | |||
7244 | min_freq = (rp_state_cap & 0xff0000) >> 16; | ||
7245 | max_freq = rp_state_cap & 0xff; | ||
7246 | cur_freq = (gt_perf_status & 0xff00) >> 8; | ||
7247 | |||
7248 | /* Check for overclock support */ | ||
7249 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
7250 | 500)) | ||
7251 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); | ||
7252 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); | ||
7253 | pcu_mbox = I915_READ(GEN6_PCODE_DATA); | ||
7254 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
7255 | 500)) | ||
7256 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | ||
7257 | if (pcu_mbox & (1<<31)) { /* OC supported */ | ||
7258 | max_freq = pcu_mbox & 0xff; | ||
7259 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); | ||
7260 | } | ||
7261 | |||
7262 | /* In units of 100MHz */ | ||
7263 | dev_priv->max_delay = max_freq; | ||
7264 | dev_priv->min_delay = min_freq; | ||
7265 | dev_priv->cur_delay = cur_freq; | ||
7266 | |||
7267 | /* requires MSI enabled */ | ||
7268 | I915_WRITE(GEN6_PMIER, | ||
7269 | GEN6_PM_MBOX_EVENT | | ||
7270 | GEN6_PM_THERMAL_EVENT | | ||
7271 | GEN6_PM_RP_DOWN_TIMEOUT | | ||
7272 | GEN6_PM_RP_UP_THRESHOLD | | ||
7273 | GEN6_PM_RP_DOWN_THRESHOLD | | ||
7274 | GEN6_PM_RP_UP_EI_EXPIRED | | ||
7275 | GEN6_PM_RP_DOWN_EI_EXPIRED); | ||
7276 | spin_lock_irq(&dev_priv->rps_lock); | ||
7277 | WARN_ON(dev_priv->pm_iir != 0); | ||
7278 | I915_WRITE(GEN6_PMIMR, 0); | ||
7279 | spin_unlock_irq(&dev_priv->rps_lock); | ||
7280 | /* enable all PM interrupts */ | ||
7281 | I915_WRITE(GEN6_PMINTRMSK, 0); | ||
7282 | |||
7283 | gen6_gt_force_wake_put(dev_priv); | ||
7284 | mutex_unlock(&dev_priv->dev->struct_mutex); | ||
7285 | } | ||
7286 | |||
7287 | static void ironlake_init_clock_gating(struct drm_device *dev) | ||
5653 | { | 7288 | { |
5654 | struct drm_i915_private *dev_priv = dev->dev_private; | 7289 | struct drm_i915_private *dev_priv = dev->dev_private; |
7290 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | ||
7291 | |||
7292 | /* Required for FBC */ | ||
7293 | dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | | ||
7294 | DPFCRUNIT_CLOCK_GATE_DISABLE | | ||
7295 | DPFDUNIT_CLOCK_GATE_DISABLE; | ||
7296 | /* Required for CxSR */ | ||
7297 | dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; | ||
7298 | |||
7299 | I915_WRITE(PCH_3DCGDIS0, | ||
7300 | MARIUNIT_CLOCK_GATE_DISABLE | | ||
7301 | SVSMUNIT_CLOCK_GATE_DISABLE); | ||
7302 | I915_WRITE(PCH_3DCGDIS1, | ||
7303 | VFMUNIT_CLOCK_GATE_DISABLE); | ||
7304 | |||
7305 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | ||
5655 | 7306 | ||
5656 | /* | 7307 | /* |
5657 | * Disable clock gating reported to work incorrectly according to the | 7308 | * According to the spec the following bits should be set in |
5658 | * specs, but enable as much else as we can. | 7309 | * order to enable memory self-refresh |
7310 | * The bit 22/21 of 0x42004 | ||
7311 | * The bit 5 of 0x42020 | ||
7312 | * The bit 15 of 0x45000 | ||
5659 | */ | 7313 | */ |
5660 | if (HAS_PCH_SPLIT(dev)) { | 7314 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
5661 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | 7315 | (I915_READ(ILK_DISPLAY_CHICKEN2) | |
7316 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); | ||
7317 | I915_WRITE(ILK_DSPCLK_GATE, | ||
7318 | (I915_READ(ILK_DSPCLK_GATE) | | ||
7319 | ILK_DPARB_CLK_GATE)); | ||
7320 | I915_WRITE(DISP_ARB_CTL, | ||
7321 | (I915_READ(DISP_ARB_CTL) | | ||
7322 | DISP_FBC_WM_DIS)); | ||
7323 | I915_WRITE(WM3_LP_ILK, 0); | ||
7324 | I915_WRITE(WM2_LP_ILK, 0); | ||
7325 | I915_WRITE(WM1_LP_ILK, 0); | ||
5662 | 7326 | ||
5663 | if (IS_IRONLAKE(dev)) { | 7327 | /* |
5664 | /* Required for FBC */ | 7328 | * Based on the document from hardware guys the following bits |
5665 | dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; | 7329 | * should be set unconditionally in order to enable FBC. |
5666 | /* Required for CxSR */ | 7330 | * The bit 22 of 0x42000 |
5667 | dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; | 7331 | * The bit 22 of 0x42004 |
7332 | * The bit 7,8,9 of 0x42020. | ||
7333 | */ | ||
7334 | if (IS_IRONLAKE_M(dev)) { | ||
7335 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | ||
7336 | I915_READ(ILK_DISPLAY_CHICKEN1) | | ||
7337 | ILK_FBCQ_DIS); | ||
7338 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | ||
7339 | I915_READ(ILK_DISPLAY_CHICKEN2) | | ||
7340 | ILK_DPARB_GATE); | ||
7341 | I915_WRITE(ILK_DSPCLK_GATE, | ||
7342 | I915_READ(ILK_DSPCLK_GATE) | | ||
7343 | ILK_DPFC_DIS1 | | ||
7344 | ILK_DPFC_DIS2 | | ||
7345 | ILK_CLK_FBC); | ||
7346 | } | ||
7347 | |||
7348 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | ||
7349 | I915_READ(ILK_DISPLAY_CHICKEN2) | | ||
7350 | ILK_ELPIN_409_SELECT); | ||
7351 | I915_WRITE(_3D_CHICKEN2, | ||
7352 | _3D_CHICKEN2_WM_READ_PIPELINED << 16 | | ||
7353 | _3D_CHICKEN2_WM_READ_PIPELINED); | ||
7354 | } | ||
5668 | 7355 | ||
5669 | I915_WRITE(PCH_3DCGDIS0, | 7356 | static void gen6_init_clock_gating(struct drm_device *dev) |
5670 | MARIUNIT_CLOCK_GATE_DISABLE | | 7357 | { |
5671 | SVSMUNIT_CLOCK_GATE_DISABLE); | 7358 | struct drm_i915_private *dev_priv = dev->dev_private; |
5672 | } | 7359 | int pipe; |
7360 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | ||
5673 | 7361 | ||
5674 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | 7362 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
5675 | 7363 | ||
5676 | /* | 7364 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
5677 | * According to the spec the following bits should be set in | 7365 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
5678 | * order to enable memory self-refresh | 7366 | ILK_ELPIN_409_SELECT); |
5679 | * The bit 22/21 of 0x42004 | 7367 | |
5680 | * The bit 5 of 0x42020 | 7368 | I915_WRITE(WM3_LP_ILK, 0); |
5681 | * The bit 15 of 0x45000 | 7369 | I915_WRITE(WM2_LP_ILK, 0); |
5682 | */ | 7370 | I915_WRITE(WM1_LP_ILK, 0); |
5683 | if (IS_IRONLAKE(dev)) { | 7371 | |
5684 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | 7372 | /* |
5685 | (I915_READ(ILK_DISPLAY_CHICKEN2) | | 7373 | * According to the spec the following bits should be |
5686 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); | 7374 | * set in order to enable memory self-refresh and fbc: |
5687 | I915_WRITE(ILK_DSPCLK_GATE, | 7375 | * The bit21 and bit22 of 0x42000 |
5688 | (I915_READ(ILK_DSPCLK_GATE) | | 7376 | * The bit21 and bit22 of 0x42004 |
5689 | ILK_DPARB_CLK_GATE)); | 7377 | * The bit5 and bit7 of 0x42020 |
5690 | I915_WRITE(DISP_ARB_CTL, | 7378 | * The bit14 of 0x70180 |
5691 | (I915_READ(DISP_ARB_CTL) | | 7379 | * The bit14 of 0x71180 |
5692 | DISP_FBC_WM_DIS)); | 7380 | */ |
5693 | I915_WRITE(WM3_LP_ILK, 0); | 7381 | I915_WRITE(ILK_DISPLAY_CHICKEN1, |
5694 | I915_WRITE(WM2_LP_ILK, 0); | 7382 | I915_READ(ILK_DISPLAY_CHICKEN1) | |
5695 | I915_WRITE(WM1_LP_ILK, 0); | 7383 | ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); |
5696 | } | 7384 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
5697 | /* | 7385 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
5698 | * Based on the document from hardware guys the following bits | 7386 | ILK_DPARB_GATE | ILK_VSDPFD_FULL); |
5699 | * should be set unconditionally in order to enable FBC. | 7387 | I915_WRITE(ILK_DSPCLK_GATE, |
5700 | * The bit 22 of 0x42000 | 7388 | I915_READ(ILK_DSPCLK_GATE) | |
5701 | * The bit 22 of 0x42004 | 7389 | ILK_DPARB_CLK_GATE | |
5702 | * The bit 7,8,9 of 0x42020. | 7390 | ILK_DPFD_CLK_GATE); |
5703 | */ | 7391 | |
5704 | if (IS_IRONLAKE_M(dev)) { | 7392 | for_each_pipe(pipe) |
5705 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | 7393 | I915_WRITE(DSPCNTR(pipe), |
5706 | I915_READ(ILK_DISPLAY_CHICKEN1) | | 7394 | I915_READ(DSPCNTR(pipe)) | |
5707 | ILK_FBCQ_DIS); | 7395 | DISPPLANE_TRICKLE_FEED_DISABLE); |
5708 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | 7396 | } |
5709 | I915_READ(ILK_DISPLAY_CHICKEN2) | | 7397 | |
5710 | ILK_DPARB_GATE); | 7398 | static void ivybridge_init_clock_gating(struct drm_device *dev) |
5711 | I915_WRITE(ILK_DSPCLK_GATE, | 7399 | { |
5712 | I915_READ(ILK_DSPCLK_GATE) | | 7400 | struct drm_i915_private *dev_priv = dev->dev_private; |
5713 | ILK_DPFC_DIS1 | | 7401 | int pipe; |
5714 | ILK_DPFC_DIS2 | | 7402 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; |
5715 | ILK_CLK_FBC); | 7403 | |
5716 | } | 7404 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
7405 | |||
7406 | I915_WRITE(WM3_LP_ILK, 0); | ||
7407 | I915_WRITE(WM2_LP_ILK, 0); | ||
7408 | I915_WRITE(WM1_LP_ILK, 0); | ||
7409 | |||
7410 | I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); | ||
7411 | |||
7412 | for_each_pipe(pipe) | ||
7413 | I915_WRITE(DSPCNTR(pipe), | ||
7414 | I915_READ(DSPCNTR(pipe)) | | ||
7415 | DISPPLANE_TRICKLE_FEED_DISABLE); | ||
7416 | } | ||
7417 | |||
7418 | static void g4x_init_clock_gating(struct drm_device *dev) | ||
7419 | { | ||
7420 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7421 | uint32_t dspclk_gate; | ||
7422 | |||
7423 | I915_WRITE(RENCLK_GATE_D1, 0); | ||
7424 | I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | | ||
7425 | GS_UNIT_CLOCK_GATE_DISABLE | | ||
7426 | CL_UNIT_CLOCK_GATE_DISABLE); | ||
7427 | I915_WRITE(RAMCLK_GATE_D, 0); | ||
7428 | dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | | ||
7429 | OVRUNIT_CLOCK_GATE_DISABLE | | ||
7430 | OVCUNIT_CLOCK_GATE_DISABLE; | ||
7431 | if (IS_GM45(dev)) | ||
7432 | dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; | ||
7433 | I915_WRITE(DSPCLK_GATE_D, dspclk_gate); | ||
7434 | } | ||
7435 | |||
7436 | static void crestline_init_clock_gating(struct drm_device *dev) | ||
7437 | { | ||
7438 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7439 | |||
7440 | I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); | ||
7441 | I915_WRITE(RENCLK_GATE_D2, 0); | ||
7442 | I915_WRITE(DSPCLK_GATE_D, 0); | ||
7443 | I915_WRITE(RAMCLK_GATE_D, 0); | ||
7444 | I915_WRITE16(DEUC, 0); | ||
7445 | } | ||
7446 | |||
7447 | static void broadwater_init_clock_gating(struct drm_device *dev) | ||
7448 | { | ||
7449 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7450 | |||
7451 | I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | | ||
7452 | I965_RCC_CLOCK_GATE_DISABLE | | ||
7453 | I965_RCPB_CLOCK_GATE_DISABLE | | ||
7454 | I965_ISC_CLOCK_GATE_DISABLE | | ||
7455 | I965_FBC_CLOCK_GATE_DISABLE); | ||
7456 | I915_WRITE(RENCLK_GATE_D2, 0); | ||
7457 | } | ||
7458 | |||
7459 | static void gen3_init_clock_gating(struct drm_device *dev) | ||
7460 | { | ||
7461 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7462 | u32 dstate = I915_READ(D_STATE); | ||
7463 | |||
7464 | dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | | ||
7465 | DSTATE_DOT_CLOCK_GATING; | ||
7466 | I915_WRITE(D_STATE, dstate); | ||
7467 | } | ||
7468 | |||
7469 | static void i85x_init_clock_gating(struct drm_device *dev) | ||
7470 | { | ||
7471 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7472 | |||
7473 | I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); | ||
7474 | } | ||
7475 | |||
7476 | static void i830_init_clock_gating(struct drm_device *dev) | ||
7477 | { | ||
7478 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7479 | |||
7480 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); | ||
7481 | } | ||
7482 | |||
7483 | static void ibx_init_clock_gating(struct drm_device *dev) | ||
7484 | { | ||
7485 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7486 | |||
7487 | /* | ||
7488 | * On Ibex Peak and Cougar Point, we need to disable clock | ||
7489 | * gating for the panel power sequencer or it will fail to | ||
7490 | * start up when no ports are active. | ||
7491 | */ | ||
7492 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | ||
7493 | } | ||
7494 | |||
7495 | static void cpt_init_clock_gating(struct drm_device *dev) | ||
7496 | { | ||
7497 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7498 | |||
7499 | /* | ||
7500 | * On Ibex Peak and Cougar Point, we need to disable clock | ||
7501 | * gating for the panel power sequencer or it will fail to | ||
7502 | * start up when no ports are active. | ||
7503 | */ | ||
7504 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | ||
7505 | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | | ||
7506 | DPLS_EDP_PPS_FIX_DIS); | ||
7507 | } | ||
7508 | |||
7509 | static void ironlake_teardown_rc6(struct drm_device *dev) | ||
7510 | { | ||
7511 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7512 | |||
7513 | if (dev_priv->renderctx) { | ||
7514 | i915_gem_object_unpin(dev_priv->renderctx); | ||
7515 | drm_gem_object_unreference(&dev_priv->renderctx->base); | ||
7516 | dev_priv->renderctx = NULL; | ||
7517 | } | ||
7518 | |||
7519 | if (dev_priv->pwrctx) { | ||
7520 | i915_gem_object_unpin(dev_priv->pwrctx); | ||
7521 | drm_gem_object_unreference(&dev_priv->pwrctx->base); | ||
7522 | dev_priv->pwrctx = NULL; | ||
7523 | } | ||
7524 | } | ||
7525 | |||
7526 | static void ironlake_disable_rc6(struct drm_device *dev) | ||
7527 | { | ||
7528 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7529 | |||
7530 | if (I915_READ(PWRCTXA)) { | ||
7531 | /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ | ||
7532 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); | ||
7533 | wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), | ||
7534 | 50); | ||
7535 | |||
7536 | I915_WRITE(PWRCTXA, 0); | ||
7537 | POSTING_READ(PWRCTXA); | ||
7538 | |||
7539 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | ||
7540 | POSTING_READ(RSTDBYCTL); | ||
7541 | } | ||
7542 | |||
7543 | ironlake_teardown_rc6(dev); | ||
7544 | } | ||
7545 | |||
7546 | static int ironlake_setup_rc6(struct drm_device *dev) | ||
7547 | { | ||
7548 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7549 | |||
7550 | if (dev_priv->renderctx == NULL) | ||
7551 | dev_priv->renderctx = intel_alloc_context_page(dev); | ||
7552 | if (!dev_priv->renderctx) | ||
7553 | return -ENOMEM; | ||
7554 | |||
7555 | if (dev_priv->pwrctx == NULL) | ||
7556 | dev_priv->pwrctx = intel_alloc_context_page(dev); | ||
7557 | if (!dev_priv->pwrctx) { | ||
7558 | ironlake_teardown_rc6(dev); | ||
7559 | return -ENOMEM; | ||
7560 | } | ||
7561 | |||
7562 | return 0; | ||
7563 | } | ||
7564 | |||
7565 | void ironlake_enable_rc6(struct drm_device *dev) | ||
7566 | { | ||
7567 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7568 | int ret; | ||
7569 | |||
7570 | /* rc6 disabled by default due to repeated reports of hanging during | ||
7571 | * boot and resume. | ||
7572 | */ | ||
7573 | if (!i915_enable_rc6) | ||
7574 | return; | ||
7575 | |||
7576 | mutex_lock(&dev->struct_mutex); | ||
7577 | ret = ironlake_setup_rc6(dev); | ||
7578 | if (ret) { | ||
7579 | mutex_unlock(&dev->struct_mutex); | ||
5717 | return; | 7580 | return; |
5718 | } else if (IS_G4X(dev)) { | ||
5719 | uint32_t dspclk_gate; | ||
5720 | I915_WRITE(RENCLK_GATE_D1, 0); | ||
5721 | I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | | ||
5722 | GS_UNIT_CLOCK_GATE_DISABLE | | ||
5723 | CL_UNIT_CLOCK_GATE_DISABLE); | ||
5724 | I915_WRITE(RAMCLK_GATE_D, 0); | ||
5725 | dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | | ||
5726 | OVRUNIT_CLOCK_GATE_DISABLE | | ||
5727 | OVCUNIT_CLOCK_GATE_DISABLE; | ||
5728 | if (IS_GM45(dev)) | ||
5729 | dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; | ||
5730 | I915_WRITE(DSPCLK_GATE_D, dspclk_gate); | ||
5731 | } else if (IS_I965GM(dev)) { | ||
5732 | I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); | ||
5733 | I915_WRITE(RENCLK_GATE_D2, 0); | ||
5734 | I915_WRITE(DSPCLK_GATE_D, 0); | ||
5735 | I915_WRITE(RAMCLK_GATE_D, 0); | ||
5736 | I915_WRITE16(DEUC, 0); | ||
5737 | } else if (IS_I965G(dev)) { | ||
5738 | I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | | ||
5739 | I965_RCC_CLOCK_GATE_DISABLE | | ||
5740 | I965_RCPB_CLOCK_GATE_DISABLE | | ||
5741 | I965_ISC_CLOCK_GATE_DISABLE | | ||
5742 | I965_FBC_CLOCK_GATE_DISABLE); | ||
5743 | I915_WRITE(RENCLK_GATE_D2, 0); | ||
5744 | } else if (IS_I9XX(dev)) { | ||
5745 | u32 dstate = I915_READ(D_STATE); | ||
5746 | |||
5747 | dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | | ||
5748 | DSTATE_DOT_CLOCK_GATING; | ||
5749 | I915_WRITE(D_STATE, dstate); | ||
5750 | } else if (IS_I85X(dev) || IS_I865G(dev)) { | ||
5751 | I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); | ||
5752 | } else if (IS_I830(dev)) { | ||
5753 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); | ||
5754 | } | 7581 | } |
5755 | 7582 | ||
5756 | /* | 7583 | /* |
5757 | * GPU can automatically power down the render unit if given a page | 7584 | * GPU can automatically power down the render unit if given a page |
5758 | * to save state. | 7585 | * to save state. |
5759 | */ | 7586 | */ |
5760 | if (IS_IRONLAKE_M(dev)) { | 7587 | ret = BEGIN_LP_RING(6); |
5761 | if (dev_priv->renderctx == NULL) | 7588 | if (ret) { |
5762 | dev_priv->renderctx = intel_alloc_context_page(dev); | 7589 | ironlake_teardown_rc6(dev); |
5763 | if (dev_priv->renderctx) { | 7590 | mutex_unlock(&dev->struct_mutex); |
5764 | struct drm_i915_gem_object *obj_priv; | 7591 | return; |
5765 | obj_priv = to_intel_bo(dev_priv->renderctx); | ||
5766 | if (obj_priv) { | ||
5767 | BEGIN_LP_RING(4); | ||
5768 | OUT_RING(MI_SET_CONTEXT); | ||
5769 | OUT_RING(obj_priv->gtt_offset | | ||
5770 | MI_MM_SPACE_GTT | | ||
5771 | MI_SAVE_EXT_STATE_EN | | ||
5772 | MI_RESTORE_EXT_STATE_EN | | ||
5773 | MI_RESTORE_INHIBIT); | ||
5774 | OUT_RING(MI_NOOP); | ||
5775 | OUT_RING(MI_FLUSH); | ||
5776 | ADVANCE_LP_RING(); | ||
5777 | } | ||
5778 | } else | ||
5779 | DRM_DEBUG_KMS("Failed to allocate render context." | ||
5780 | "Disable RC6\n"); | ||
5781 | } | 7592 | } |
5782 | 7593 | ||
5783 | if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { | 7594 | OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); |
5784 | struct drm_i915_gem_object *obj_priv = NULL; | 7595 | OUT_RING(MI_SET_CONTEXT); |
7596 | OUT_RING(dev_priv->renderctx->gtt_offset | | ||
7597 | MI_MM_SPACE_GTT | | ||
7598 | MI_SAVE_EXT_STATE_EN | | ||
7599 | MI_RESTORE_EXT_STATE_EN | | ||
7600 | MI_RESTORE_INHIBIT); | ||
7601 | OUT_RING(MI_SUSPEND_FLUSH); | ||
7602 | OUT_RING(MI_NOOP); | ||
7603 | OUT_RING(MI_FLUSH); | ||
7604 | ADVANCE_LP_RING(); | ||
5785 | 7605 | ||
5786 | if (dev_priv->pwrctx) { | 7606 | /* |
5787 | obj_priv = to_intel_bo(dev_priv->pwrctx); | 7607 | * Wait for the command parser to advance past MI_SET_CONTEXT. The HW |
5788 | } else { | 7608 | * does an implicit flush, combined with MI_FLUSH above, it should be |
5789 | struct drm_gem_object *pwrctx; | 7609 | * safe to assume that renderctx is valid |
7610 | */ | ||
7611 | ret = intel_wait_ring_idle(LP_RING(dev_priv)); | ||
7612 | if (ret) { | ||
7613 | DRM_ERROR("failed to enable ironlake power power savings\n"); | ||
7614 | ironlake_teardown_rc6(dev); | ||
7615 | mutex_unlock(&dev->struct_mutex); | ||
7616 | return; | ||
7617 | } | ||
5790 | 7618 | ||
5791 | pwrctx = intel_alloc_context_page(dev); | 7619 | I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); |
5792 | if (pwrctx) { | 7620 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); |
5793 | dev_priv->pwrctx = pwrctx; | 7621 | mutex_unlock(&dev->struct_mutex); |
5794 | obj_priv = to_intel_bo(pwrctx); | 7622 | } |
5795 | } | ||
5796 | } | ||
5797 | 7623 | ||
5798 | if (obj_priv) { | 7624 | void intel_init_clock_gating(struct drm_device *dev) |
5799 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); | 7625 | { |
5800 | I915_WRITE(MCHBAR_RENDER_STANDBY, | 7626 | struct drm_i915_private *dev_priv = dev->dev_private; |
5801 | I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); | 7627 | |
5802 | } | 7628 | dev_priv->display.init_clock_gating(dev); |
5803 | } | 7629 | |
7630 | if (dev_priv->display.init_pch_clock_gating) | ||
7631 | dev_priv->display.init_pch_clock_gating(dev); | ||
5804 | } | 7632 | } |
5805 | 7633 | ||
5806 | /* Set up chip specific display functions */ | 7634 | /* Set up chip specific display functions */ |
@@ -5809,13 +7637,16 @@ static void intel_init_display(struct drm_device *dev) | |||
5809 | struct drm_i915_private *dev_priv = dev->dev_private; | 7637 | struct drm_i915_private *dev_priv = dev->dev_private; |
5810 | 7638 | ||
5811 | /* We always want a DPMS function */ | 7639 | /* We always want a DPMS function */ |
5812 | if (HAS_PCH_SPLIT(dev)) | 7640 | if (HAS_PCH_SPLIT(dev)) { |
5813 | dev_priv->display.dpms = ironlake_crtc_dpms; | 7641 | dev_priv->display.dpms = ironlake_crtc_dpms; |
5814 | else | 7642 | dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; |
7643 | } else { | ||
5815 | dev_priv->display.dpms = i9xx_crtc_dpms; | 7644 | dev_priv->display.dpms = i9xx_crtc_dpms; |
7645 | dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; | ||
7646 | } | ||
5816 | 7647 | ||
5817 | if (I915_HAS_FBC(dev)) { | 7648 | if (I915_HAS_FBC(dev)) { |
5818 | if (IS_IRONLAKE_M(dev)) { | 7649 | if (HAS_PCH_SPLIT(dev)) { |
5819 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; | 7650 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; |
5820 | dev_priv->display.enable_fbc = ironlake_enable_fbc; | 7651 | dev_priv->display.enable_fbc = ironlake_enable_fbc; |
5821 | dev_priv->display.disable_fbc = ironlake_disable_fbc; | 7652 | dev_priv->display.disable_fbc = ironlake_disable_fbc; |
@@ -5823,7 +7654,7 @@ static void intel_init_display(struct drm_device *dev) | |||
5823 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; | 7654 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; |
5824 | dev_priv->display.enable_fbc = g4x_enable_fbc; | 7655 | dev_priv->display.enable_fbc = g4x_enable_fbc; |
5825 | dev_priv->display.disable_fbc = g4x_disable_fbc; | 7656 | dev_priv->display.disable_fbc = g4x_disable_fbc; |
5826 | } else if (IS_I965GM(dev)) { | 7657 | } else if (IS_CRESTLINE(dev)) { |
5827 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; | 7658 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; |
5828 | dev_priv->display.enable_fbc = i8xx_enable_fbc; | 7659 | dev_priv->display.enable_fbc = i8xx_enable_fbc; |
5829 | dev_priv->display.disable_fbc = i8xx_disable_fbc; | 7660 | dev_priv->display.disable_fbc = i8xx_disable_fbc; |
@@ -5856,7 +7687,12 @@ static void intel_init_display(struct drm_device *dev) | |||
5856 | 7687 | ||
5857 | /* For FIFO watermark updates */ | 7688 | /* For FIFO watermark updates */ |
5858 | if (HAS_PCH_SPLIT(dev)) { | 7689 | if (HAS_PCH_SPLIT(dev)) { |
5859 | if (IS_IRONLAKE(dev)) { | 7690 | if (HAS_PCH_IBX(dev)) |
7691 | dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; | ||
7692 | else if (HAS_PCH_CPT(dev)) | ||
7693 | dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; | ||
7694 | |||
7695 | if (IS_GEN5(dev)) { | ||
5860 | if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) | 7696 | if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) |
5861 | dev_priv->display.update_wm = ironlake_update_wm; | 7697 | dev_priv->display.update_wm = ironlake_update_wm; |
5862 | else { | 7698 | else { |
@@ -5864,6 +7700,30 @@ static void intel_init_display(struct drm_device *dev) | |||
5864 | "Disable CxSR\n"); | 7700 | "Disable CxSR\n"); |
5865 | dev_priv->display.update_wm = NULL; | 7701 | dev_priv->display.update_wm = NULL; |
5866 | } | 7702 | } |
7703 | dev_priv->display.fdi_link_train = ironlake_fdi_link_train; | ||
7704 | dev_priv->display.init_clock_gating = ironlake_init_clock_gating; | ||
7705 | } else if (IS_GEN6(dev)) { | ||
7706 | if (SNB_READ_WM0_LATENCY()) { | ||
7707 | dev_priv->display.update_wm = sandybridge_update_wm; | ||
7708 | } else { | ||
7709 | DRM_DEBUG_KMS("Failed to read display plane latency. " | ||
7710 | "Disable CxSR\n"); | ||
7711 | dev_priv->display.update_wm = NULL; | ||
7712 | } | ||
7713 | dev_priv->display.fdi_link_train = gen6_fdi_link_train; | ||
7714 | dev_priv->display.init_clock_gating = gen6_init_clock_gating; | ||
7715 | } else if (IS_IVYBRIDGE(dev)) { | ||
7716 | /* FIXME: detect B0+ stepping and use auto training */ | ||
7717 | dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; | ||
7718 | if (SNB_READ_WM0_LATENCY()) { | ||
7719 | dev_priv->display.update_wm = sandybridge_update_wm; | ||
7720 | } else { | ||
7721 | DRM_DEBUG_KMS("Failed to read display plane latency. " | ||
7722 | "Disable CxSR\n"); | ||
7723 | dev_priv->display.update_wm = NULL; | ||
7724 | } | ||
7725 | dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; | ||
7726 | |||
5867 | } else | 7727 | } else |
5868 | dev_priv->display.update_wm = NULL; | 7728 | dev_priv->display.update_wm = NULL; |
5869 | } else if (IS_PINEVIEW(dev)) { | 7729 | } else if (IS_PINEVIEW(dev)) { |
@@ -5881,23 +7741,61 @@ static void intel_init_display(struct drm_device *dev) | |||
5881 | dev_priv->display.update_wm = NULL; | 7741 | dev_priv->display.update_wm = NULL; |
5882 | } else | 7742 | } else |
5883 | dev_priv->display.update_wm = pineview_update_wm; | 7743 | dev_priv->display.update_wm = pineview_update_wm; |
5884 | } else if (IS_G4X(dev)) | 7744 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; |
7745 | } else if (IS_G4X(dev)) { | ||
5885 | dev_priv->display.update_wm = g4x_update_wm; | 7746 | dev_priv->display.update_wm = g4x_update_wm; |
5886 | else if (IS_I965G(dev)) | 7747 | dev_priv->display.init_clock_gating = g4x_init_clock_gating; |
7748 | } else if (IS_GEN4(dev)) { | ||
5887 | dev_priv->display.update_wm = i965_update_wm; | 7749 | dev_priv->display.update_wm = i965_update_wm; |
5888 | else if (IS_I9XX(dev)) { | 7750 | if (IS_CRESTLINE(dev)) |
7751 | dev_priv->display.init_clock_gating = crestline_init_clock_gating; | ||
7752 | else if (IS_BROADWATER(dev)) | ||
7753 | dev_priv->display.init_clock_gating = broadwater_init_clock_gating; | ||
7754 | } else if (IS_GEN3(dev)) { | ||
5889 | dev_priv->display.update_wm = i9xx_update_wm; | 7755 | dev_priv->display.update_wm = i9xx_update_wm; |
5890 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; | 7756 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; |
7757 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; | ||
7758 | } else if (IS_I865G(dev)) { | ||
7759 | dev_priv->display.update_wm = i830_update_wm; | ||
7760 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; | ||
7761 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | ||
5891 | } else if (IS_I85X(dev)) { | 7762 | } else if (IS_I85X(dev)) { |
5892 | dev_priv->display.update_wm = i9xx_update_wm; | 7763 | dev_priv->display.update_wm = i9xx_update_wm; |
5893 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; | 7764 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; |
7765 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; | ||
5894 | } else { | 7766 | } else { |
5895 | dev_priv->display.update_wm = i830_update_wm; | 7767 | dev_priv->display.update_wm = i830_update_wm; |
7768 | dev_priv->display.init_clock_gating = i830_init_clock_gating; | ||
5896 | if (IS_845G(dev)) | 7769 | if (IS_845G(dev)) |
5897 | dev_priv->display.get_fifo_size = i845_get_fifo_size; | 7770 | dev_priv->display.get_fifo_size = i845_get_fifo_size; |
5898 | else | 7771 | else |
5899 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | 7772 | dev_priv->display.get_fifo_size = i830_get_fifo_size; |
5900 | } | 7773 | } |
7774 | |||
7775 | /* Default just returns -ENODEV to indicate unsupported */ | ||
7776 | dev_priv->display.queue_flip = intel_default_queue_flip; | ||
7777 | |||
7778 | switch (INTEL_INFO(dev)->gen) { | ||
7779 | case 2: | ||
7780 | dev_priv->display.queue_flip = intel_gen2_queue_flip; | ||
7781 | break; | ||
7782 | |||
7783 | case 3: | ||
7784 | dev_priv->display.queue_flip = intel_gen3_queue_flip; | ||
7785 | break; | ||
7786 | |||
7787 | case 4: | ||
7788 | case 5: | ||
7789 | dev_priv->display.queue_flip = intel_gen4_queue_flip; | ||
7790 | break; | ||
7791 | |||
7792 | case 6: | ||
7793 | dev_priv->display.queue_flip = intel_gen6_queue_flip; | ||
7794 | break; | ||
7795 | case 7: | ||
7796 | dev_priv->display.queue_flip = intel_gen7_queue_flip; | ||
7797 | break; | ||
7798 | } | ||
5901 | } | 7799 | } |
5902 | 7800 | ||
5903 | /* | 7801 | /* |
@@ -5913,6 +7811,15 @@ static void quirk_pipea_force (struct drm_device *dev) | |||
5913 | DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); | 7811 | DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); |
5914 | } | 7812 | } |
5915 | 7813 | ||
7814 | /* | ||
7815 | * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason | ||
7816 | */ | ||
7817 | static void quirk_ssc_force_disable(struct drm_device *dev) | ||
7818 | { | ||
7819 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7820 | dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; | ||
7821 | } | ||
7822 | |||
5916 | struct intel_quirk { | 7823 | struct intel_quirk { |
5917 | int device; | 7824 | int device; |
5918 | int subsystem_vendor; | 7825 | int subsystem_vendor; |
@@ -5941,6 +7848,9 @@ struct intel_quirk intel_quirks[] = { | |||
5941 | /* 855 & before need to leave pipe A & dpll A up */ | 7848 | /* 855 & before need to leave pipe A & dpll A up */ |
5942 | { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | 7849 | { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, |
5943 | { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | 7850 | { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, |
7851 | |||
7852 | /* Lenovo U160 cannot use SSC on LVDS */ | ||
7853 | { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, | ||
5944 | }; | 7854 | }; |
5945 | 7855 | ||
5946 | static void intel_init_quirks(struct drm_device *dev) | 7856 | static void intel_init_quirks(struct drm_device *dev) |
@@ -5999,27 +7909,18 @@ void intel_modeset_init(struct drm_device *dev) | |||
5999 | 7909 | ||
6000 | intel_init_display(dev); | 7910 | intel_init_display(dev); |
6001 | 7911 | ||
6002 | if (IS_I965G(dev)) { | 7912 | if (IS_GEN2(dev)) { |
6003 | dev->mode_config.max_width = 8192; | 7913 | dev->mode_config.max_width = 2048; |
6004 | dev->mode_config.max_height = 8192; | 7914 | dev->mode_config.max_height = 2048; |
6005 | } else if (IS_I9XX(dev)) { | 7915 | } else if (IS_GEN3(dev)) { |
6006 | dev->mode_config.max_width = 4096; | 7916 | dev->mode_config.max_width = 4096; |
6007 | dev->mode_config.max_height = 4096; | 7917 | dev->mode_config.max_height = 4096; |
6008 | } else { | 7918 | } else { |
6009 | dev->mode_config.max_width = 2048; | 7919 | dev->mode_config.max_width = 8192; |
6010 | dev->mode_config.max_height = 2048; | 7920 | dev->mode_config.max_height = 8192; |
6011 | } | 7921 | } |
7922 | dev->mode_config.fb_base = dev->agp->base; | ||
6012 | 7923 | ||
6013 | /* set memory base */ | ||
6014 | if (IS_I9XX(dev)) | ||
6015 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2); | ||
6016 | else | ||
6017 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); | ||
6018 | |||
6019 | if (IS_MOBILE(dev) || IS_I9XX(dev)) | ||
6020 | dev_priv->num_pipe = 2; | ||
6021 | else | ||
6022 | dev_priv->num_pipe = 1; | ||
6023 | DRM_DEBUG_KMS("%d display pipe%s available.\n", | 7924 | DRM_DEBUG_KMS("%d display pipe%s available.\n", |
6024 | dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); | 7925 | dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); |
6025 | 7926 | ||
@@ -6027,21 +7928,29 @@ void intel_modeset_init(struct drm_device *dev) | |||
6027 | intel_crtc_init(dev, i); | 7928 | intel_crtc_init(dev, i); |
6028 | } | 7929 | } |
6029 | 7930 | ||
7931 | /* Just disable it once at startup */ | ||
7932 | i915_disable_vga(dev); | ||
6030 | intel_setup_outputs(dev); | 7933 | intel_setup_outputs(dev); |
6031 | 7934 | ||
6032 | intel_init_clock_gating(dev); | 7935 | intel_init_clock_gating(dev); |
6033 | 7936 | ||
6034 | /* Just disable it once at startup */ | ||
6035 | i915_disable_vga(dev); | ||
6036 | |||
6037 | if (IS_IRONLAKE_M(dev)) { | 7937 | if (IS_IRONLAKE_M(dev)) { |
6038 | ironlake_enable_drps(dev); | 7938 | ironlake_enable_drps(dev); |
6039 | intel_init_emon(dev); | 7939 | intel_init_emon(dev); |
6040 | } | 7940 | } |
6041 | 7941 | ||
7942 | if (IS_GEN6(dev)) | ||
7943 | gen6_enable_rps(dev_priv); | ||
7944 | |||
6042 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); | 7945 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); |
6043 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, | 7946 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, |
6044 | (unsigned long)dev); | 7947 | (unsigned long)dev); |
7948 | } | ||
7949 | |||
7950 | void intel_modeset_gem_init(struct drm_device *dev) | ||
7951 | { | ||
7952 | if (IS_IRONLAKE_M(dev)) | ||
7953 | ironlake_enable_rc6(dev); | ||
6045 | 7954 | ||
6046 | intel_setup_overlay(dev); | 7955 | intel_setup_overlay(dev); |
6047 | } | 7956 | } |
@@ -6052,10 +7961,11 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
6052 | struct drm_crtc *crtc; | 7961 | struct drm_crtc *crtc; |
6053 | struct intel_crtc *intel_crtc; | 7962 | struct intel_crtc *intel_crtc; |
6054 | 7963 | ||
7964 | drm_kms_helper_poll_fini(dev); | ||
6055 | mutex_lock(&dev->struct_mutex); | 7965 | mutex_lock(&dev->struct_mutex); |
6056 | 7966 | ||
6057 | drm_kms_helper_poll_fini(dev); | 7967 | intel_unregister_dsm_handler(); |
6058 | intel_fbdev_fini(dev); | 7968 | |
6059 | 7969 | ||
6060 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 7970 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
6061 | /* Skip inactive CRTCs */ | 7971 | /* Skip inactive CRTCs */ |
@@ -6063,67 +7973,52 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
6063 | continue; | 7973 | continue; |
6064 | 7974 | ||
6065 | intel_crtc = to_intel_crtc(crtc); | 7975 | intel_crtc = to_intel_crtc(crtc); |
6066 | intel_increase_pllclock(crtc, false); | 7976 | intel_increase_pllclock(crtc); |
6067 | del_timer_sync(&intel_crtc->idle_timer); | ||
6068 | } | 7977 | } |
6069 | 7978 | ||
6070 | del_timer_sync(&dev_priv->idle_timer); | ||
6071 | |||
6072 | if (dev_priv->display.disable_fbc) | 7979 | if (dev_priv->display.disable_fbc) |
6073 | dev_priv->display.disable_fbc(dev); | 7980 | dev_priv->display.disable_fbc(dev); |
6074 | 7981 | ||
6075 | if (dev_priv->renderctx) { | ||
6076 | struct drm_i915_gem_object *obj_priv; | ||
6077 | |||
6078 | obj_priv = to_intel_bo(dev_priv->renderctx); | ||
6079 | I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN); | ||
6080 | I915_READ(CCID); | ||
6081 | i915_gem_object_unpin(dev_priv->renderctx); | ||
6082 | drm_gem_object_unreference(dev_priv->renderctx); | ||
6083 | } | ||
6084 | |||
6085 | if (dev_priv->pwrctx) { | ||
6086 | struct drm_i915_gem_object *obj_priv; | ||
6087 | |||
6088 | obj_priv = to_intel_bo(dev_priv->pwrctx); | ||
6089 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); | ||
6090 | I915_READ(PWRCTXA); | ||
6091 | i915_gem_object_unpin(dev_priv->pwrctx); | ||
6092 | drm_gem_object_unreference(dev_priv->pwrctx); | ||
6093 | } | ||
6094 | |||
6095 | if (IS_IRONLAKE_M(dev)) | 7982 | if (IS_IRONLAKE_M(dev)) |
6096 | ironlake_disable_drps(dev); | 7983 | ironlake_disable_drps(dev); |
7984 | if (IS_GEN6(dev)) | ||
7985 | gen6_disable_rps(dev); | ||
7986 | |||
7987 | if (IS_IRONLAKE_M(dev)) | ||
7988 | ironlake_disable_rc6(dev); | ||
6097 | 7989 | ||
6098 | mutex_unlock(&dev->struct_mutex); | 7990 | mutex_unlock(&dev->struct_mutex); |
6099 | 7991 | ||
7992 | /* Disable the irq before mode object teardown, for the irq might | ||
7993 | * enqueue unpin/hotplug work. */ | ||
7994 | drm_irq_uninstall(dev); | ||
7995 | cancel_work_sync(&dev_priv->hotplug_work); | ||
7996 | |||
7997 | /* Shut off idle work before the crtcs get freed. */ | ||
7998 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
7999 | intel_crtc = to_intel_crtc(crtc); | ||
8000 | del_timer_sync(&intel_crtc->idle_timer); | ||
8001 | } | ||
8002 | del_timer_sync(&dev_priv->idle_timer); | ||
8003 | cancel_work_sync(&dev_priv->idle_work); | ||
8004 | |||
6100 | drm_mode_config_cleanup(dev); | 8005 | drm_mode_config_cleanup(dev); |
6101 | } | 8006 | } |
6102 | 8007 | ||
6103 | |||
6104 | /* | 8008 | /* |
6105 | * Return which encoder is currently attached for connector. | 8009 | * Return which encoder is currently attached for connector. |
6106 | */ | 8010 | */ |
6107 | struct drm_encoder *intel_attached_encoder (struct drm_connector *connector) | 8011 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector) |
6108 | { | 8012 | { |
6109 | struct drm_mode_object *obj; | 8013 | return &intel_attached_encoder(connector)->base; |
6110 | struct drm_encoder *encoder; | 8014 | } |
6111 | int i; | ||
6112 | |||
6113 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | ||
6114 | if (connector->encoder_ids[i] == 0) | ||
6115 | break; | ||
6116 | |||
6117 | obj = drm_mode_object_find(connector->dev, | ||
6118 | connector->encoder_ids[i], | ||
6119 | DRM_MODE_OBJECT_ENCODER); | ||
6120 | if (!obj) | ||
6121 | continue; | ||
6122 | 8015 | ||
6123 | encoder = obj_to_encoder(obj); | 8016 | void intel_connector_attach_encoder(struct intel_connector *connector, |
6124 | return encoder; | 8017 | struct intel_encoder *encoder) |
6125 | } | 8018 | { |
6126 | return NULL; | 8019 | connector->encoder = encoder; |
8020 | drm_mode_connector_attach_encoder(&connector->base, | ||
8021 | &encoder->base); | ||
6127 | } | 8022 | } |
6128 | 8023 | ||
6129 | /* | 8024 | /* |
@@ -6142,3 +8037,113 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state) | |||
6142 | pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); | 8037 | pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); |
6143 | return 0; | 8038 | return 0; |
6144 | } | 8039 | } |
8040 | |||
8041 | #ifdef CONFIG_DEBUG_FS | ||
8042 | #include <linux/seq_file.h> | ||
8043 | |||
8044 | struct intel_display_error_state { | ||
8045 | struct intel_cursor_error_state { | ||
8046 | u32 control; | ||
8047 | u32 position; | ||
8048 | u32 base; | ||
8049 | u32 size; | ||
8050 | } cursor[2]; | ||
8051 | |||
8052 | struct intel_pipe_error_state { | ||
8053 | u32 conf; | ||
8054 | u32 source; | ||
8055 | |||
8056 | u32 htotal; | ||
8057 | u32 hblank; | ||
8058 | u32 hsync; | ||
8059 | u32 vtotal; | ||
8060 | u32 vblank; | ||
8061 | u32 vsync; | ||
8062 | } pipe[2]; | ||
8063 | |||
8064 | struct intel_plane_error_state { | ||
8065 | u32 control; | ||
8066 | u32 stride; | ||
8067 | u32 size; | ||
8068 | u32 pos; | ||
8069 | u32 addr; | ||
8070 | u32 surface; | ||
8071 | u32 tile_offset; | ||
8072 | } plane[2]; | ||
8073 | }; | ||
8074 | |||
8075 | struct intel_display_error_state * | ||
8076 | intel_display_capture_error_state(struct drm_device *dev) | ||
8077 | { | ||
8078 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
8079 | struct intel_display_error_state *error; | ||
8080 | int i; | ||
8081 | |||
8082 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | ||
8083 | if (error == NULL) | ||
8084 | return NULL; | ||
8085 | |||
8086 | for (i = 0; i < 2; i++) { | ||
8087 | error->cursor[i].control = I915_READ(CURCNTR(i)); | ||
8088 | error->cursor[i].position = I915_READ(CURPOS(i)); | ||
8089 | error->cursor[i].base = I915_READ(CURBASE(i)); | ||
8090 | |||
8091 | error->plane[i].control = I915_READ(DSPCNTR(i)); | ||
8092 | error->plane[i].stride = I915_READ(DSPSTRIDE(i)); | ||
8093 | error->plane[i].size = I915_READ(DSPSIZE(i)); | ||
8094 | error->plane[i].pos= I915_READ(DSPPOS(i)); | ||
8095 | error->plane[i].addr = I915_READ(DSPADDR(i)); | ||
8096 | if (INTEL_INFO(dev)->gen >= 4) { | ||
8097 | error->plane[i].surface = I915_READ(DSPSURF(i)); | ||
8098 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); | ||
8099 | } | ||
8100 | |||
8101 | error->pipe[i].conf = I915_READ(PIPECONF(i)); | ||
8102 | error->pipe[i].source = I915_READ(PIPESRC(i)); | ||
8103 | error->pipe[i].htotal = I915_READ(HTOTAL(i)); | ||
8104 | error->pipe[i].hblank = I915_READ(HBLANK(i)); | ||
8105 | error->pipe[i].hsync = I915_READ(HSYNC(i)); | ||
8106 | error->pipe[i].vtotal = I915_READ(VTOTAL(i)); | ||
8107 | error->pipe[i].vblank = I915_READ(VBLANK(i)); | ||
8108 | error->pipe[i].vsync = I915_READ(VSYNC(i)); | ||
8109 | } | ||
8110 | |||
8111 | return error; | ||
8112 | } | ||
8113 | |||
8114 | void | ||
8115 | intel_display_print_error_state(struct seq_file *m, | ||
8116 | struct drm_device *dev, | ||
8117 | struct intel_display_error_state *error) | ||
8118 | { | ||
8119 | int i; | ||
8120 | |||
8121 | for (i = 0; i < 2; i++) { | ||
8122 | seq_printf(m, "Pipe [%d]:\n", i); | ||
8123 | seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); | ||
8124 | seq_printf(m, " SRC: %08x\n", error->pipe[i].source); | ||
8125 | seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); | ||
8126 | seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); | ||
8127 | seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); | ||
8128 | seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); | ||
8129 | seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); | ||
8130 | seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); | ||
8131 | |||
8132 | seq_printf(m, "Plane [%d]:\n", i); | ||
8133 | seq_printf(m, " CNTR: %08x\n", error->plane[i].control); | ||
8134 | seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride); | ||
8135 | seq_printf(m, " SIZE: %08x\n", error->plane[i].size); | ||
8136 | seq_printf(m, " POS: %08x\n", error->plane[i].pos); | ||
8137 | seq_printf(m, " ADDR: %08x\n", error->plane[i].addr); | ||
8138 | if (INTEL_INFO(dev)->gen >= 4) { | ||
8139 | seq_printf(m, " SURF: %08x\n", error->plane[i].surface); | ||
8140 | seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); | ||
8141 | } | ||
8142 | |||
8143 | seq_printf(m, "Cursor [%d]:\n", i); | ||
8144 | seq_printf(m, " CNTR: %08x\n", error->cursor[i].control); | ||
8145 | seq_printf(m, " POS: %08x\n", error->cursor[i].position); | ||
8146 | seq_printf(m, " BASE: %08x\n", error->cursor[i].base); | ||
8147 | } | ||
8148 | } | ||
8149 | #endif | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 9ab8708ac6ba..e2aced6eec4c 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -42,30 +42,81 @@ | |||
42 | 42 | ||
43 | #define DP_LINK_CONFIGURATION_SIZE 9 | 43 | #define DP_LINK_CONFIGURATION_SIZE 9 |
44 | 44 | ||
45 | #define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP) | ||
46 | #define IS_PCH_eDP(i) ((i)->is_pch_edp) | ||
47 | |||
48 | struct intel_dp { | 45 | struct intel_dp { |
49 | struct intel_encoder base; | 46 | struct intel_encoder base; |
50 | uint32_t output_reg; | 47 | uint32_t output_reg; |
51 | uint32_t DP; | 48 | uint32_t DP; |
52 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; | 49 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; |
53 | bool has_audio; | 50 | bool has_audio; |
54 | int dpms_mode; | 51 | int force_audio; |
52 | uint32_t color_range; | ||
55 | uint8_t link_bw; | 53 | uint8_t link_bw; |
56 | uint8_t lane_count; | 54 | uint8_t lane_count; |
57 | uint8_t dpcd[4]; | 55 | uint8_t dpcd[4]; |
58 | struct i2c_adapter adapter; | 56 | struct i2c_adapter adapter; |
59 | struct i2c_algo_dp_aux_data algo; | 57 | struct i2c_algo_dp_aux_data algo; |
60 | bool is_pch_edp; | 58 | bool is_pch_edp; |
59 | uint8_t train_set[4]; | ||
60 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | ||
61 | }; | 61 | }; |
62 | 62 | ||
63 | /** | ||
64 | * is_edp - is the given port attached to an eDP panel (either CPU or PCH) | ||
65 | * @intel_dp: DP struct | ||
66 | * | ||
67 | * If a CPU or PCH DP output is attached to an eDP panel, this function | ||
68 | * will return true, and false otherwise. | ||
69 | */ | ||
70 | static bool is_edp(struct intel_dp *intel_dp) | ||
71 | { | ||
72 | return intel_dp->base.type == INTEL_OUTPUT_EDP; | ||
73 | } | ||
74 | |||
75 | /** | ||
76 | * is_pch_edp - is the port on the PCH and attached to an eDP panel? | ||
77 | * @intel_dp: DP struct | ||
78 | * | ||
79 | * Returns true if the given DP struct corresponds to a PCH DP port attached | ||
80 | * to an eDP panel, false otherwise. Helpful for determining whether we | ||
81 | * may need FDI resources for a given DP output or not. | ||
82 | */ | ||
83 | static bool is_pch_edp(struct intel_dp *intel_dp) | ||
84 | { | ||
85 | return intel_dp->is_pch_edp; | ||
86 | } | ||
87 | |||
63 | static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) | 88 | static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) |
64 | { | 89 | { |
65 | return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base); | 90 | return container_of(encoder, struct intel_dp, base.base); |
66 | } | 91 | } |
67 | 92 | ||
68 | static void intel_dp_link_train(struct intel_dp *intel_dp); | 93 | static struct intel_dp *intel_attached_dp(struct drm_connector *connector) |
94 | { | ||
95 | return container_of(intel_attached_encoder(connector), | ||
96 | struct intel_dp, base); | ||
97 | } | ||
98 | |||
99 | /** | ||
100 | * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? | ||
101 | * @encoder: DRM encoder | ||
102 | * | ||
103 | * Return true if @encoder corresponds to a PCH attached eDP panel. Needed | ||
104 | * by intel_display.c. | ||
105 | */ | ||
106 | bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) | ||
107 | { | ||
108 | struct intel_dp *intel_dp; | ||
109 | |||
110 | if (!encoder) | ||
111 | return false; | ||
112 | |||
113 | intel_dp = enc_to_intel_dp(encoder); | ||
114 | |||
115 | return is_pch_edp(intel_dp); | ||
116 | } | ||
117 | |||
118 | static void intel_dp_start_link_train(struct intel_dp *intel_dp); | ||
119 | static void intel_dp_complete_link_train(struct intel_dp *intel_dp); | ||
69 | static void intel_dp_link_down(struct intel_dp *intel_dp); | 120 | static void intel_dp_link_down(struct intel_dp *intel_dp); |
70 | 121 | ||
71 | void | 122 | void |
@@ -86,8 +137,8 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp) | |||
86 | { | 137 | { |
87 | int max_lane_count = 4; | 138 | int max_lane_count = 4; |
88 | 139 | ||
89 | if (intel_dp->dpcd[0] >= 0x11) { | 140 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { |
90 | max_lane_count = intel_dp->dpcd[2] & 0x1f; | 141 | max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; |
91 | switch (max_lane_count) { | 142 | switch (max_lane_count) { |
92 | case 1: case 2: case 4: | 143 | case 1: case 2: case 4: |
93 | break; | 144 | break; |
@@ -101,7 +152,7 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp) | |||
101 | static int | 152 | static int |
102 | intel_dp_max_link_bw(struct intel_dp *intel_dp) | 153 | intel_dp_max_link_bw(struct intel_dp *intel_dp) |
103 | { | 154 | { |
104 | int max_link_bw = intel_dp->dpcd[1]; | 155 | int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; |
105 | 156 | ||
106 | switch (max_link_bw) { | 157 | switch (max_link_bw) { |
107 | case DP_LINK_BW_1_62: | 158 | case DP_LINK_BW_1_62: |
@@ -129,8 +180,8 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi | |||
129 | { | 180 | { |
130 | struct drm_i915_private *dev_priv = dev->dev_private; | 181 | struct drm_i915_private *dev_priv = dev->dev_private; |
131 | 182 | ||
132 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | 183 | if (is_edp(intel_dp)) |
133 | return (pixel_clock * dev_priv->edp_bpp) / 8; | 184 | return (pixel_clock * dev_priv->edp.bpp + 7) / 8; |
134 | else | 185 | else |
135 | return pixel_clock * 3; | 186 | return pixel_clock * 3; |
136 | } | 187 | } |
@@ -145,15 +196,13 @@ static int | |||
145 | intel_dp_mode_valid(struct drm_connector *connector, | 196 | intel_dp_mode_valid(struct drm_connector *connector, |
146 | struct drm_display_mode *mode) | 197 | struct drm_display_mode *mode) |
147 | { | 198 | { |
148 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 199 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
149 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | ||
150 | struct drm_device *dev = connector->dev; | 200 | struct drm_device *dev = connector->dev; |
151 | struct drm_i915_private *dev_priv = dev->dev_private; | 201 | struct drm_i915_private *dev_priv = dev->dev_private; |
152 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); | 202 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); |
153 | int max_lanes = intel_dp_max_lane_count(intel_dp); | 203 | int max_lanes = intel_dp_max_lane_count(intel_dp); |
154 | 204 | ||
155 | if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && | 205 | if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) { |
156 | dev_priv->panel_fixed_mode) { | ||
157 | if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) | 206 | if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) |
158 | return MODE_PANEL; | 207 | return MODE_PANEL; |
159 | 208 | ||
@@ -161,9 +210,9 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
161 | return MODE_PANEL; | 210 | return MODE_PANEL; |
162 | } | 211 | } |
163 | 212 | ||
164 | /* only refuse the mode on non eDP since we have seen some wierd eDP panels | 213 | /* only refuse the mode on non eDP since we have seen some weird eDP panels |
165 | which are outside spec tolerances but somehow work by magic */ | 214 | which are outside spec tolerances but somehow work by magic */ |
166 | if (!IS_eDP(intel_dp) && | 215 | if (!is_edp(intel_dp) && |
167 | (intel_dp_link_required(connector->dev, intel_dp, mode->clock) | 216 | (intel_dp_link_required(connector->dev, intel_dp, mode->clock) |
168 | > intel_dp_max_data_rate(max_link_clock, max_lanes))) | 217 | > intel_dp_max_data_rate(max_link_clock, max_lanes))) |
169 | return MODE_CLOCK_HIGH; | 218 | return MODE_CLOCK_HIGH; |
@@ -233,7 +282,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
233 | uint8_t *recv, int recv_size) | 282 | uint8_t *recv, int recv_size) |
234 | { | 283 | { |
235 | uint32_t output_reg = intel_dp->output_reg; | 284 | uint32_t output_reg = intel_dp->output_reg; |
236 | struct drm_device *dev = intel_dp->base.enc.dev; | 285 | struct drm_device *dev = intel_dp->base.base.dev; |
237 | struct drm_i915_private *dev_priv = dev->dev_private; | 286 | struct drm_i915_private *dev_priv = dev->dev_private; |
238 | uint32_t ch_ctl = output_reg + 0x10; | 287 | uint32_t ch_ctl = output_reg + 0x10; |
239 | uint32_t ch_data = ch_ctl + 4; | 288 | uint32_t ch_data = ch_ctl + 4; |
@@ -246,8 +295,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
246 | /* The clock divider is based off the hrawclk, | 295 | /* The clock divider is based off the hrawclk, |
247 | * and would like to run at 2MHz. So, take the | 296 | * and would like to run at 2MHz. So, take the |
248 | * hrawclk value and divide by 2 and use that | 297 | * hrawclk value and divide by 2 and use that |
298 | * | ||
299 | * Note that PCH attached eDP panels should use a 125MHz input | ||
300 | * clock divider. | ||
249 | */ | 301 | */ |
250 | if (IS_eDP(intel_dp)) { | 302 | if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) { |
251 | if (IS_GEN6(dev)) | 303 | if (IS_GEN6(dev)) |
252 | aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ | 304 | aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ |
253 | else | 305 | else |
@@ -425,6 +477,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
425 | uint16_t address = algo_data->address; | 477 | uint16_t address = algo_data->address; |
426 | uint8_t msg[5]; | 478 | uint8_t msg[5]; |
427 | uint8_t reply[2]; | 479 | uint8_t reply[2]; |
480 | unsigned retry; | ||
428 | int msg_bytes; | 481 | int msg_bytes; |
429 | int reply_bytes; | 482 | int reply_bytes; |
430 | int ret; | 483 | int ret; |
@@ -459,14 +512,33 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
459 | break; | 512 | break; |
460 | } | 513 | } |
461 | 514 | ||
462 | for (;;) { | 515 | for (retry = 0; retry < 5; retry++) { |
463 | ret = intel_dp_aux_ch(intel_dp, | 516 | ret = intel_dp_aux_ch(intel_dp, |
464 | msg, msg_bytes, | 517 | msg, msg_bytes, |
465 | reply, reply_bytes); | 518 | reply, reply_bytes); |
466 | if (ret < 0) { | 519 | if (ret < 0) { |
467 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); | 520 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); |
468 | return ret; | 521 | return ret; |
469 | } | 522 | } |
523 | |||
524 | switch (reply[0] & AUX_NATIVE_REPLY_MASK) { | ||
525 | case AUX_NATIVE_REPLY_ACK: | ||
526 | /* I2C-over-AUX Reply field is only valid | ||
527 | * when paired with AUX ACK. | ||
528 | */ | ||
529 | break; | ||
530 | case AUX_NATIVE_REPLY_NACK: | ||
531 | DRM_DEBUG_KMS("aux_ch native nack\n"); | ||
532 | return -EREMOTEIO; | ||
533 | case AUX_NATIVE_REPLY_DEFER: | ||
534 | udelay(100); | ||
535 | continue; | ||
536 | default: | ||
537 | DRM_ERROR("aux_ch invalid native reply 0x%02x\n", | ||
538 | reply[0]); | ||
539 | return -EREMOTEIO; | ||
540 | } | ||
541 | |||
470 | switch (reply[0] & AUX_I2C_REPLY_MASK) { | 542 | switch (reply[0] & AUX_I2C_REPLY_MASK) { |
471 | case AUX_I2C_REPLY_ACK: | 543 | case AUX_I2C_REPLY_ACK: |
472 | if (mode == MODE_I2C_READ) { | 544 | if (mode == MODE_I2C_READ) { |
@@ -474,17 +546,20 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
474 | } | 546 | } |
475 | return reply_bytes - 1; | 547 | return reply_bytes - 1; |
476 | case AUX_I2C_REPLY_NACK: | 548 | case AUX_I2C_REPLY_NACK: |
477 | DRM_DEBUG_KMS("aux_ch nack\n"); | 549 | DRM_DEBUG_KMS("aux_i2c nack\n"); |
478 | return -EREMOTEIO; | 550 | return -EREMOTEIO; |
479 | case AUX_I2C_REPLY_DEFER: | 551 | case AUX_I2C_REPLY_DEFER: |
480 | DRM_DEBUG_KMS("aux_ch defer\n"); | 552 | DRM_DEBUG_KMS("aux_i2c defer\n"); |
481 | udelay(100); | 553 | udelay(100); |
482 | break; | 554 | break; |
483 | default: | 555 | default: |
484 | DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); | 556 | DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); |
485 | return -EREMOTEIO; | 557 | return -EREMOTEIO; |
486 | } | 558 | } |
487 | } | 559 | } |
560 | |||
561 | DRM_ERROR("too many retries, giving up\n"); | ||
562 | return -EREMOTEIO; | ||
488 | } | 563 | } |
489 | 564 | ||
490 | static int | 565 | static int |
@@ -519,8 +594,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
519 | int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; | 594 | int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; |
520 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; | 595 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; |
521 | 596 | ||
522 | if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && | 597 | if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) { |
523 | dev_priv->panel_fixed_mode) { | ||
524 | intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); | 598 | intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); |
525 | intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, | 599 | intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, |
526 | mode, adjusted_mode); | 600 | mode, adjusted_mode); |
@@ -549,7 +623,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
549 | } | 623 | } |
550 | } | 624 | } |
551 | 625 | ||
552 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { | 626 | if (is_edp(intel_dp)) { |
553 | /* okay we failed just pick the highest */ | 627 | /* okay we failed just pick the highest */ |
554 | intel_dp->lane_count = max_lane_count; | 628 | intel_dp->lane_count = max_lane_count; |
555 | intel_dp->link_bw = bws[max_clock]; | 629 | intel_dp->link_bw = bws[max_clock]; |
@@ -598,25 +672,6 @@ intel_dp_compute_m_n(int bpp, | |||
598 | intel_reduce_ratio(&m_n->link_m, &m_n->link_n); | 672 | intel_reduce_ratio(&m_n->link_m, &m_n->link_n); |
599 | } | 673 | } |
600 | 674 | ||
601 | bool intel_pch_has_edp(struct drm_crtc *crtc) | ||
602 | { | ||
603 | struct drm_device *dev = crtc->dev; | ||
604 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
605 | struct drm_encoder *encoder; | ||
606 | |||
607 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { | ||
608 | struct intel_dp *intel_dp; | ||
609 | |||
610 | if (encoder->crtc != crtc) | ||
611 | continue; | ||
612 | |||
613 | intel_dp = enc_to_intel_dp(encoder); | ||
614 | if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) | ||
615 | return intel_dp->is_pch_edp; | ||
616 | } | ||
617 | return false; | ||
618 | } | ||
619 | |||
620 | void | 675 | void |
621 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 676 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
622 | struct drm_display_mode *adjusted_mode) | 677 | struct drm_display_mode *adjusted_mode) |
@@ -628,6 +683,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
628 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 683 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
629 | int lane_count = 4, bpp = 24; | 684 | int lane_count = 4, bpp = 24; |
630 | struct intel_dp_m_n m_n; | 685 | struct intel_dp_m_n m_n; |
686 | int pipe = intel_crtc->pipe; | ||
631 | 687 | ||
632 | /* | 688 | /* |
633 | * Find the lane count in the intel_encoder private | 689 | * Find the lane count in the intel_encoder private |
@@ -641,8 +697,10 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
641 | intel_dp = enc_to_intel_dp(encoder); | 697 | intel_dp = enc_to_intel_dp(encoder); |
642 | if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { | 698 | if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { |
643 | lane_count = intel_dp->lane_count; | 699 | lane_count = intel_dp->lane_count; |
644 | if (IS_PCH_eDP(intel_dp)) | 700 | break; |
645 | bpp = dev_priv->edp_bpp; | 701 | } else if (is_edp(intel_dp)) { |
702 | lane_count = dev_priv->edp.lanes; | ||
703 | bpp = dev_priv->edp.bpp; | ||
646 | break; | 704 | break; |
647 | } | 705 | } |
648 | } | 706 | } |
@@ -656,39 +714,19 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
656 | mode->clock, adjusted_mode->clock, &m_n); | 714 | mode->clock, adjusted_mode->clock, &m_n); |
657 | 715 | ||
658 | if (HAS_PCH_SPLIT(dev)) { | 716 | if (HAS_PCH_SPLIT(dev)) { |
659 | if (intel_crtc->pipe == 0) { | 717 | I915_WRITE(TRANSDATA_M1(pipe), |
660 | I915_WRITE(TRANSA_DATA_M1, | 718 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | |
661 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | 719 | m_n.gmch_m); |
662 | m_n.gmch_m); | 720 | I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); |
663 | I915_WRITE(TRANSA_DATA_N1, m_n.gmch_n); | 721 | I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); |
664 | I915_WRITE(TRANSA_DP_LINK_M1, m_n.link_m); | 722 | I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); |
665 | I915_WRITE(TRANSA_DP_LINK_N1, m_n.link_n); | ||
666 | } else { | ||
667 | I915_WRITE(TRANSB_DATA_M1, | ||
668 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | ||
669 | m_n.gmch_m); | ||
670 | I915_WRITE(TRANSB_DATA_N1, m_n.gmch_n); | ||
671 | I915_WRITE(TRANSB_DP_LINK_M1, m_n.link_m); | ||
672 | I915_WRITE(TRANSB_DP_LINK_N1, m_n.link_n); | ||
673 | } | ||
674 | } else { | 723 | } else { |
675 | if (intel_crtc->pipe == 0) { | 724 | I915_WRITE(PIPE_GMCH_DATA_M(pipe), |
676 | I915_WRITE(PIPEA_GMCH_DATA_M, | 725 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | |
677 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | 726 | m_n.gmch_m); |
678 | m_n.gmch_m); | 727 | I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); |
679 | I915_WRITE(PIPEA_GMCH_DATA_N, | 728 | I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); |
680 | m_n.gmch_n); | 729 | I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); |
681 | I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m); | ||
682 | I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n); | ||
683 | } else { | ||
684 | I915_WRITE(PIPEB_GMCH_DATA_M, | ||
685 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | ||
686 | m_n.gmch_m); | ||
687 | I915_WRITE(PIPEB_GMCH_DATA_N, | ||
688 | m_n.gmch_n); | ||
689 | I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m); | ||
690 | I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n); | ||
691 | } | ||
692 | } | 730 | } |
693 | } | 731 | } |
694 | 732 | ||
@@ -698,18 +736,18 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
698 | { | 736 | { |
699 | struct drm_device *dev = encoder->dev; | 737 | struct drm_device *dev = encoder->dev; |
700 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 738 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
701 | struct drm_crtc *crtc = intel_dp->base.enc.crtc; | 739 | struct drm_crtc *crtc = intel_dp->base.base.crtc; |
702 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 740 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
703 | 741 | ||
704 | intel_dp->DP = (DP_VOLTAGE_0_4 | | 742 | intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; |
705 | DP_PRE_EMPHASIS_0); | 743 | intel_dp->DP |= intel_dp->color_range; |
706 | 744 | ||
707 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 745 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
708 | intel_dp->DP |= DP_SYNC_HS_HIGH; | 746 | intel_dp->DP |= DP_SYNC_HS_HIGH; |
709 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 747 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
710 | intel_dp->DP |= DP_SYNC_VS_HIGH; | 748 | intel_dp->DP |= DP_SYNC_VS_HIGH; |
711 | 749 | ||
712 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) | 750 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) |
713 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; | 751 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; |
714 | else | 752 | else |
715 | intel_dp->DP |= DP_LINK_TRAIN_OFF; | 753 | intel_dp->DP |= DP_LINK_TRAIN_OFF; |
@@ -735,7 +773,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
735 | /* | 773 | /* |
736 | * Check for DPCD version > 1.1 and enhanced framing support | 774 | * Check for DPCD version > 1.1 and enhanced framing support |
737 | */ | 775 | */ |
738 | if (intel_dp->dpcd[0] >= 0x11 && (intel_dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)) { | 776 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && |
777 | (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { | ||
739 | intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | 778 | intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
740 | intel_dp->DP |= DP_ENHANCED_FRAMING; | 779 | intel_dp->DP |= DP_ENHANCED_FRAMING; |
741 | } | 780 | } |
@@ -744,7 +783,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
744 | if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) | 783 | if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) |
745 | intel_dp->DP |= DP_PIPEB_SELECT; | 784 | intel_dp->DP |= DP_PIPEB_SELECT; |
746 | 785 | ||
747 | if (IS_eDP(intel_dp)) { | 786 | if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) { |
748 | /* don't miss out required setting for eDP */ | 787 | /* don't miss out required setting for eDP */ |
749 | intel_dp->DP |= DP_PLL_ENABLE; | 788 | intel_dp->DP |= DP_PLL_ENABLE; |
750 | if (adjusted_mode->clock < 200000) | 789 | if (adjusted_mode->clock < 200000) |
@@ -754,13 +793,49 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
754 | } | 793 | } |
755 | } | 794 | } |
756 | 795 | ||
757 | static void ironlake_edp_panel_on (struct drm_device *dev) | 796 | static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) |
797 | { | ||
798 | struct drm_device *dev = intel_dp->base.base.dev; | ||
799 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
800 | u32 pp; | ||
801 | |||
802 | /* | ||
803 | * If the panel wasn't on, make sure there's not a currently | ||
804 | * active PP sequence before enabling AUX VDD. | ||
805 | */ | ||
806 | if (!(I915_READ(PCH_PP_STATUS) & PP_ON)) | ||
807 | msleep(dev_priv->panel_t3); | ||
808 | |||
809 | pp = I915_READ(PCH_PP_CONTROL); | ||
810 | pp |= EDP_FORCE_VDD; | ||
811 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
812 | POSTING_READ(PCH_PP_CONTROL); | ||
813 | } | ||
814 | |||
815 | static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp) | ||
758 | { | 816 | { |
817 | struct drm_device *dev = intel_dp->base.base.dev; | ||
759 | struct drm_i915_private *dev_priv = dev->dev_private; | 818 | struct drm_i915_private *dev_priv = dev->dev_private; |
760 | u32 pp; | 819 | u32 pp; |
761 | 820 | ||
821 | pp = I915_READ(PCH_PP_CONTROL); | ||
822 | pp &= ~EDP_FORCE_VDD; | ||
823 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
824 | POSTING_READ(PCH_PP_CONTROL); | ||
825 | |||
826 | /* Make sure sequencer is idle before allowing subsequent activity */ | ||
827 | msleep(dev_priv->panel_t12); | ||
828 | } | ||
829 | |||
830 | /* Returns true if the panel was already on when called */ | ||
831 | static bool ironlake_edp_panel_on (struct intel_dp *intel_dp) | ||
832 | { | ||
833 | struct drm_device *dev = intel_dp->base.base.dev; | ||
834 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
835 | u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; | ||
836 | |||
762 | if (I915_READ(PCH_PP_STATUS) & PP_ON) | 837 | if (I915_READ(PCH_PP_STATUS) & PP_ON) |
763 | return; | 838 | return true; |
764 | 839 | ||
765 | pp = I915_READ(PCH_PP_CONTROL); | 840 | pp = I915_READ(PCH_PP_CONTROL); |
766 | 841 | ||
@@ -771,21 +846,25 @@ static void ironlake_edp_panel_on (struct drm_device *dev) | |||
771 | 846 | ||
772 | pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; | 847 | pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; |
773 | I915_WRITE(PCH_PP_CONTROL, pp); | 848 | I915_WRITE(PCH_PP_CONTROL, pp); |
849 | POSTING_READ(PCH_PP_CONTROL); | ||
774 | 850 | ||
775 | if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10)) | 851 | if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, |
852 | 5000)) | ||
776 | DRM_ERROR("panel on wait timed out: 0x%08x\n", | 853 | DRM_ERROR("panel on wait timed out: 0x%08x\n", |
777 | I915_READ(PCH_PP_STATUS)); | 854 | I915_READ(PCH_PP_STATUS)); |
778 | 855 | ||
779 | pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD); | ||
780 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ | 856 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ |
781 | I915_WRITE(PCH_PP_CONTROL, pp); | 857 | I915_WRITE(PCH_PP_CONTROL, pp); |
782 | POSTING_READ(PCH_PP_CONTROL); | 858 | POSTING_READ(PCH_PP_CONTROL); |
859 | |||
860 | return false; | ||
783 | } | 861 | } |
784 | 862 | ||
785 | static void ironlake_edp_panel_off (struct drm_device *dev) | 863 | static void ironlake_edp_panel_off (struct drm_device *dev) |
786 | { | 864 | { |
787 | struct drm_i915_private *dev_priv = dev->dev_private; | 865 | struct drm_i915_private *dev_priv = dev->dev_private; |
788 | u32 pp; | 866 | u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | |
867 | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK; | ||
789 | 868 | ||
790 | pp = I915_READ(PCH_PP_CONTROL); | 869 | pp = I915_READ(PCH_PP_CONTROL); |
791 | 870 | ||
@@ -796,13 +875,13 @@ static void ironlake_edp_panel_off (struct drm_device *dev) | |||
796 | 875 | ||
797 | pp &= ~POWER_TARGET_ON; | 876 | pp &= ~POWER_TARGET_ON; |
798 | I915_WRITE(PCH_PP_CONTROL, pp); | 877 | I915_WRITE(PCH_PP_CONTROL, pp); |
878 | POSTING_READ(PCH_PP_CONTROL); | ||
799 | 879 | ||
800 | if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10)) | 880 | if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000)) |
801 | DRM_ERROR("panel off wait timed out: 0x%08x\n", | 881 | DRM_ERROR("panel off wait timed out: 0x%08x\n", |
802 | I915_READ(PCH_PP_STATUS)); | 882 | I915_READ(PCH_PP_STATUS)); |
803 | 883 | ||
804 | /* Make sure VDD is enabled so DP AUX will work */ | 884 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ |
805 | pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */ | ||
806 | I915_WRITE(PCH_PP_CONTROL, pp); | 885 | I915_WRITE(PCH_PP_CONTROL, pp); |
807 | POSTING_READ(PCH_PP_CONTROL); | 886 | POSTING_READ(PCH_PP_CONTROL); |
808 | } | 887 | } |
@@ -813,6 +892,13 @@ static void ironlake_edp_backlight_on (struct drm_device *dev) | |||
813 | u32 pp; | 892 | u32 pp; |
814 | 893 | ||
815 | DRM_DEBUG_KMS("\n"); | 894 | DRM_DEBUG_KMS("\n"); |
895 | /* | ||
896 | * If we enable the backlight right away following a panel power | ||
897 | * on, we may see slight flicker as the panel syncs with the eDP | ||
898 | * link. So delay a bit to make sure the image is solid before | ||
899 | * allowing it to appear. | ||
900 | */ | ||
901 | msleep(300); | ||
816 | pp = I915_READ(PCH_PP_CONTROL); | 902 | pp = I915_READ(PCH_PP_CONTROL); |
817 | pp |= EDP_BLC_ENABLE; | 903 | pp |= EDP_BLC_ENABLE; |
818 | I915_WRITE(PCH_PP_CONTROL, pp); | 904 | I915_WRITE(PCH_PP_CONTROL, pp); |
@@ -837,8 +923,10 @@ static void ironlake_edp_pll_on(struct drm_encoder *encoder) | |||
837 | 923 | ||
838 | DRM_DEBUG_KMS("\n"); | 924 | DRM_DEBUG_KMS("\n"); |
839 | dpa_ctl = I915_READ(DP_A); | 925 | dpa_ctl = I915_READ(DP_A); |
840 | dpa_ctl &= ~DP_PLL_ENABLE; | 926 | dpa_ctl |= DP_PLL_ENABLE; |
841 | I915_WRITE(DP_A, dpa_ctl); | 927 | I915_WRITE(DP_A, dpa_ctl); |
928 | POSTING_READ(DP_A); | ||
929 | udelay(200); | ||
842 | } | 930 | } |
843 | 931 | ||
844 | static void ironlake_edp_pll_off(struct drm_encoder *encoder) | 932 | static void ironlake_edp_pll_off(struct drm_encoder *encoder) |
@@ -848,38 +936,79 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder) | |||
848 | u32 dpa_ctl; | 936 | u32 dpa_ctl; |
849 | 937 | ||
850 | dpa_ctl = I915_READ(DP_A); | 938 | dpa_ctl = I915_READ(DP_A); |
851 | dpa_ctl |= DP_PLL_ENABLE; | 939 | dpa_ctl &= ~DP_PLL_ENABLE; |
852 | I915_WRITE(DP_A, dpa_ctl); | 940 | I915_WRITE(DP_A, dpa_ctl); |
941 | POSTING_READ(DP_A); | ||
853 | udelay(200); | 942 | udelay(200); |
854 | } | 943 | } |
855 | 944 | ||
945 | /* If the sink supports it, try to set the power state appropriately */ | ||
946 | static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) | ||
947 | { | ||
948 | int ret, i; | ||
949 | |||
950 | /* Should have a valid DPCD by this point */ | ||
951 | if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) | ||
952 | return; | ||
953 | |||
954 | if (mode != DRM_MODE_DPMS_ON) { | ||
955 | ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, | ||
956 | DP_SET_POWER_D3); | ||
957 | if (ret != 1) | ||
958 | DRM_DEBUG_DRIVER("failed to write sink power state\n"); | ||
959 | } else { | ||
960 | /* | ||
961 | * When turning on, we need to retry for 1ms to give the sink | ||
962 | * time to wake up. | ||
963 | */ | ||
964 | for (i = 0; i < 3; i++) { | ||
965 | ret = intel_dp_aux_native_write_1(intel_dp, | ||
966 | DP_SET_POWER, | ||
967 | DP_SET_POWER_D0); | ||
968 | if (ret == 1) | ||
969 | break; | ||
970 | msleep(1); | ||
971 | } | ||
972 | } | ||
973 | } | ||
974 | |||
856 | static void intel_dp_prepare(struct drm_encoder *encoder) | 975 | static void intel_dp_prepare(struct drm_encoder *encoder) |
857 | { | 976 | { |
858 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 977 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
859 | struct drm_device *dev = encoder->dev; | 978 | struct drm_device *dev = encoder->dev; |
860 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
861 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); | ||
862 | 979 | ||
863 | if (IS_eDP(intel_dp)) { | 980 | /* Wake up the sink first */ |
981 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); | ||
982 | |||
983 | if (is_edp(intel_dp)) { | ||
864 | ironlake_edp_backlight_off(dev); | 984 | ironlake_edp_backlight_off(dev); |
865 | ironlake_edp_panel_on(dev); | 985 | ironlake_edp_panel_off(dev); |
866 | ironlake_edp_pll_on(encoder); | 986 | if (!is_pch_edp(intel_dp)) |
987 | ironlake_edp_pll_on(encoder); | ||
988 | else | ||
989 | ironlake_edp_pll_off(encoder); | ||
867 | } | 990 | } |
868 | if (dp_reg & DP_PORT_EN) | 991 | intel_dp_link_down(intel_dp); |
869 | intel_dp_link_down(intel_dp); | ||
870 | } | 992 | } |
871 | 993 | ||
872 | static void intel_dp_commit(struct drm_encoder *encoder) | 994 | static void intel_dp_commit(struct drm_encoder *encoder) |
873 | { | 995 | { |
874 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 996 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
875 | struct drm_device *dev = encoder->dev; | 997 | struct drm_device *dev = encoder->dev; |
876 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
877 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); | ||
878 | 998 | ||
879 | if (!(dp_reg & DP_PORT_EN)) { | 999 | if (is_edp(intel_dp)) |
880 | intel_dp_link_train(intel_dp); | 1000 | ironlake_edp_panel_vdd_on(intel_dp); |
1001 | |||
1002 | intel_dp_start_link_train(intel_dp); | ||
1003 | |||
1004 | if (is_edp(intel_dp)) { | ||
1005 | ironlake_edp_panel_on(intel_dp); | ||
1006 | ironlake_edp_panel_vdd_off(intel_dp); | ||
881 | } | 1007 | } |
882 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | 1008 | |
1009 | intel_dp_complete_link_train(intel_dp); | ||
1010 | |||
1011 | if (is_edp(intel_dp)) | ||
883 | ironlake_edp_backlight_on(dev); | 1012 | ironlake_edp_backlight_on(dev); |
884 | } | 1013 | } |
885 | 1014 | ||
@@ -892,24 +1021,54 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
892 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); | 1021 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); |
893 | 1022 | ||
894 | if (mode != DRM_MODE_DPMS_ON) { | 1023 | if (mode != DRM_MODE_DPMS_ON) { |
895 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { | 1024 | if (is_edp(intel_dp)) |
896 | ironlake_edp_backlight_off(dev); | 1025 | ironlake_edp_backlight_off(dev); |
1026 | intel_dp_sink_dpms(intel_dp, mode); | ||
1027 | intel_dp_link_down(intel_dp); | ||
1028 | if (is_edp(intel_dp)) | ||
897 | ironlake_edp_panel_off(dev); | 1029 | ironlake_edp_panel_off(dev); |
898 | } | 1030 | if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) |
899 | if (dp_reg & DP_PORT_EN) | ||
900 | intel_dp_link_down(intel_dp); | ||
901 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | ||
902 | ironlake_edp_pll_off(encoder); | 1031 | ironlake_edp_pll_off(encoder); |
903 | } else { | 1032 | } else { |
1033 | if (is_edp(intel_dp)) | ||
1034 | ironlake_edp_panel_vdd_on(intel_dp); | ||
1035 | intel_dp_sink_dpms(intel_dp, mode); | ||
904 | if (!(dp_reg & DP_PORT_EN)) { | 1036 | if (!(dp_reg & DP_PORT_EN)) { |
905 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | 1037 | intel_dp_start_link_train(intel_dp); |
906 | ironlake_edp_panel_on(dev); | 1038 | if (is_edp(intel_dp)) { |
907 | intel_dp_link_train(intel_dp); | 1039 | ironlake_edp_panel_on(intel_dp); |
908 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | 1040 | ironlake_edp_panel_vdd_off(intel_dp); |
909 | ironlake_edp_backlight_on(dev); | 1041 | } |
1042 | intel_dp_complete_link_train(intel_dp); | ||
910 | } | 1043 | } |
1044 | if (is_edp(intel_dp)) | ||
1045 | ironlake_edp_backlight_on(dev); | ||
1046 | } | ||
1047 | } | ||
1048 | |||
1049 | /* | ||
1050 | * Native read with retry for link status and receiver capability reads for | ||
1051 | * cases where the sink may still be asleep. | ||
1052 | */ | ||
1053 | static bool | ||
1054 | intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, | ||
1055 | uint8_t *recv, int recv_bytes) | ||
1056 | { | ||
1057 | int ret, i; | ||
1058 | |||
1059 | /* | ||
1060 | * Sinks are *supposed* to come up within 1ms from an off state, | ||
1061 | * but we're also supposed to retry 3 times per the spec. | ||
1062 | */ | ||
1063 | for (i = 0; i < 3; i++) { | ||
1064 | ret = intel_dp_aux_native_read(intel_dp, address, recv, | ||
1065 | recv_bytes); | ||
1066 | if (ret == recv_bytes) | ||
1067 | return true; | ||
1068 | msleep(1); | ||
911 | } | 1069 | } |
912 | intel_dp->dpms_mode = mode; | 1070 | |
1071 | return false; | ||
913 | } | 1072 | } |
914 | 1073 | ||
915 | /* | 1074 | /* |
@@ -917,17 +1076,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
917 | * link status information | 1076 | * link status information |
918 | */ | 1077 | */ |
919 | static bool | 1078 | static bool |
920 | intel_dp_get_link_status(struct intel_dp *intel_dp, | 1079 | intel_dp_get_link_status(struct intel_dp *intel_dp) |
921 | uint8_t link_status[DP_LINK_STATUS_SIZE]) | ||
922 | { | 1080 | { |
923 | int ret; | 1081 | return intel_dp_aux_native_read_retry(intel_dp, |
924 | 1082 | DP_LANE0_1_STATUS, | |
925 | ret = intel_dp_aux_native_read(intel_dp, | 1083 | intel_dp->link_status, |
926 | DP_LANE0_1_STATUS, | 1084 | DP_LINK_STATUS_SIZE); |
927 | link_status, DP_LINK_STATUS_SIZE); | ||
928 | if (ret != DP_LINK_STATUS_SIZE) | ||
929 | return false; | ||
930 | return true; | ||
931 | } | 1085 | } |
932 | 1086 | ||
933 | static uint8_t | 1087 | static uint8_t |
@@ -999,18 +1153,15 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing) | |||
999 | } | 1153 | } |
1000 | 1154 | ||
1001 | static void | 1155 | static void |
1002 | intel_get_adjust_train(struct intel_dp *intel_dp, | 1156 | intel_get_adjust_train(struct intel_dp *intel_dp) |
1003 | uint8_t link_status[DP_LINK_STATUS_SIZE], | ||
1004 | int lane_count, | ||
1005 | uint8_t train_set[4]) | ||
1006 | { | 1157 | { |
1007 | uint8_t v = 0; | 1158 | uint8_t v = 0; |
1008 | uint8_t p = 0; | 1159 | uint8_t p = 0; |
1009 | int lane; | 1160 | int lane; |
1010 | 1161 | ||
1011 | for (lane = 0; lane < lane_count; lane++) { | 1162 | for (lane = 0; lane < intel_dp->lane_count; lane++) { |
1012 | uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane); | 1163 | uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane); |
1013 | uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane); | 1164 | uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane); |
1014 | 1165 | ||
1015 | if (this_v > v) | 1166 | if (this_v > v) |
1016 | v = this_v; | 1167 | v = this_v; |
@@ -1025,7 +1176,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp, | |||
1025 | p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; | 1176 | p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; |
1026 | 1177 | ||
1027 | for (lane = 0; lane < 4; lane++) | 1178 | for (lane = 0; lane < 4; lane++) |
1028 | train_set[lane] = v | p; | 1179 | intel_dp->train_set[lane] = v | p; |
1029 | } | 1180 | } |
1030 | 1181 | ||
1031 | static uint32_t | 1182 | static uint32_t |
@@ -1070,18 +1221,27 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count) | |||
1070 | static uint32_t | 1221 | static uint32_t |
1071 | intel_gen6_edp_signal_levels(uint8_t train_set) | 1222 | intel_gen6_edp_signal_levels(uint8_t train_set) |
1072 | { | 1223 | { |
1073 | switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) { | 1224 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | |
1225 | DP_TRAIN_PRE_EMPHASIS_MASK); | ||
1226 | switch (signal_levels) { | ||
1074 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: | 1227 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: |
1075 | return EDP_LINK_TRAIN_400MV_0DB_SNB_B; | 1228 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: |
1229 | return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; | ||
1230 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: | ||
1231 | return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; | ||
1076 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: | 1232 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: |
1077 | return EDP_LINK_TRAIN_400MV_6DB_SNB_B; | 1233 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: |
1234 | return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; | ||
1078 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: | 1235 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1079 | return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B; | 1236 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1237 | return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; | ||
1080 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: | 1238 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: |
1081 | return EDP_LINK_TRAIN_800MV_0DB_SNB_B; | 1239 | case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: |
1240 | return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; | ||
1082 | default: | 1241 | default: |
1083 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n"); | 1242 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" |
1084 | return EDP_LINK_TRAIN_400MV_0DB_SNB_B; | 1243 | "0x%x\n", signal_levels); |
1244 | return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; | ||
1085 | } | 1245 | } |
1086 | } | 1246 | } |
1087 | 1247 | ||
@@ -1116,18 +1276,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count | |||
1116 | DP_LANE_CHANNEL_EQ_DONE|\ | 1276 | DP_LANE_CHANNEL_EQ_DONE|\ |
1117 | DP_LANE_SYMBOL_LOCKED) | 1277 | DP_LANE_SYMBOL_LOCKED) |
1118 | static bool | 1278 | static bool |
1119 | intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) | 1279 | intel_channel_eq_ok(struct intel_dp *intel_dp) |
1120 | { | 1280 | { |
1121 | uint8_t lane_align; | 1281 | uint8_t lane_align; |
1122 | uint8_t lane_status; | 1282 | uint8_t lane_status; |
1123 | int lane; | 1283 | int lane; |
1124 | 1284 | ||
1125 | lane_align = intel_dp_link_status(link_status, | 1285 | lane_align = intel_dp_link_status(intel_dp->link_status, |
1126 | DP_LANE_ALIGN_STATUS_UPDATED); | 1286 | DP_LANE_ALIGN_STATUS_UPDATED); |
1127 | if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) | 1287 | if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) |
1128 | return false; | 1288 | return false; |
1129 | for (lane = 0; lane < lane_count; lane++) { | 1289 | for (lane = 0; lane < intel_dp->lane_count; lane++) { |
1130 | lane_status = intel_get_lane_status(link_status, lane); | 1290 | lane_status = intel_get_lane_status(intel_dp->link_status, lane); |
1131 | if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) | 1291 | if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) |
1132 | return false; | 1292 | return false; |
1133 | } | 1293 | } |
@@ -1137,10 +1297,9 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) | |||
1137 | static bool | 1297 | static bool |
1138 | intel_dp_set_link_train(struct intel_dp *intel_dp, | 1298 | intel_dp_set_link_train(struct intel_dp *intel_dp, |
1139 | uint32_t dp_reg_value, | 1299 | uint32_t dp_reg_value, |
1140 | uint8_t dp_train_pat, | 1300 | uint8_t dp_train_pat) |
1141 | uint8_t train_set[4]) | ||
1142 | { | 1301 | { |
1143 | struct drm_device *dev = intel_dp->base.enc.dev; | 1302 | struct drm_device *dev = intel_dp->base.base.dev; |
1144 | struct drm_i915_private *dev_priv = dev->dev_private; | 1303 | struct drm_i915_private *dev_priv = dev->dev_private; |
1145 | int ret; | 1304 | int ret; |
1146 | 1305 | ||
@@ -1152,28 +1311,27 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
1152 | dp_train_pat); | 1311 | dp_train_pat); |
1153 | 1312 | ||
1154 | ret = intel_dp_aux_native_write(intel_dp, | 1313 | ret = intel_dp_aux_native_write(intel_dp, |
1155 | DP_TRAINING_LANE0_SET, train_set, 4); | 1314 | DP_TRAINING_LANE0_SET, |
1315 | intel_dp->train_set, 4); | ||
1156 | if (ret != 4) | 1316 | if (ret != 4) |
1157 | return false; | 1317 | return false; |
1158 | 1318 | ||
1159 | return true; | 1319 | return true; |
1160 | } | 1320 | } |
1161 | 1321 | ||
1322 | /* Enable corresponding port and start training pattern 1 */ | ||
1162 | static void | 1323 | static void |
1163 | intel_dp_link_train(struct intel_dp *intel_dp) | 1324 | intel_dp_start_link_train(struct intel_dp *intel_dp) |
1164 | { | 1325 | { |
1165 | struct drm_device *dev = intel_dp->base.enc.dev; | 1326 | struct drm_device *dev = intel_dp->base.base.dev; |
1166 | struct drm_i915_private *dev_priv = dev->dev_private; | 1327 | struct drm_i915_private *dev_priv = dev->dev_private; |
1167 | uint8_t train_set[4]; | 1328 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); |
1168 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | ||
1169 | int i; | 1329 | int i; |
1170 | uint8_t voltage; | 1330 | uint8_t voltage; |
1171 | bool clock_recovery = false; | 1331 | bool clock_recovery = false; |
1172 | bool channel_eq = false; | ||
1173 | int tries; | 1332 | int tries; |
1174 | u32 reg; | 1333 | u32 reg; |
1175 | uint32_t DP = intel_dp->DP; | 1334 | uint32_t DP = intel_dp->DP; |
1176 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); | ||
1177 | 1335 | ||
1178 | /* Enable output, wait for it to become active */ | 1336 | /* Enable output, wait for it to become active */ |
1179 | I915_WRITE(intel_dp->output_reg, intel_dp->DP); | 1337 | I915_WRITE(intel_dp->output_reg, intel_dp->DP); |
@@ -1186,108 +1344,140 @@ intel_dp_link_train(struct intel_dp *intel_dp) | |||
1186 | DP_LINK_CONFIGURATION_SIZE); | 1344 | DP_LINK_CONFIGURATION_SIZE); |
1187 | 1345 | ||
1188 | DP |= DP_PORT_EN; | 1346 | DP |= DP_PORT_EN; |
1189 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) | 1347 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) |
1190 | DP &= ~DP_LINK_TRAIN_MASK_CPT; | 1348 | DP &= ~DP_LINK_TRAIN_MASK_CPT; |
1191 | else | 1349 | else |
1192 | DP &= ~DP_LINK_TRAIN_MASK; | 1350 | DP &= ~DP_LINK_TRAIN_MASK; |
1193 | memset(train_set, 0, 4); | 1351 | memset(intel_dp->train_set, 0, 4); |
1194 | voltage = 0xff; | 1352 | voltage = 0xff; |
1195 | tries = 0; | 1353 | tries = 0; |
1196 | clock_recovery = false; | 1354 | clock_recovery = false; |
1197 | for (;;) { | 1355 | for (;;) { |
1198 | /* Use train_set[0] to set the voltage and pre emphasis values */ | 1356 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ |
1199 | uint32_t signal_levels; | 1357 | uint32_t signal_levels; |
1200 | if (IS_GEN6(dev) && IS_eDP(intel_dp)) { | 1358 | if (IS_GEN6(dev) && is_edp(intel_dp)) { |
1201 | signal_levels = intel_gen6_edp_signal_levels(train_set[0]); | 1359 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); |
1202 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; | 1360 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
1203 | } else { | 1361 | } else { |
1204 | signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count); | 1362 | signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); |
1205 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1363 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1206 | } | 1364 | } |
1207 | 1365 | ||
1208 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) | 1366 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) |
1209 | reg = DP | DP_LINK_TRAIN_PAT_1_CPT; | 1367 | reg = DP | DP_LINK_TRAIN_PAT_1_CPT; |
1210 | else | 1368 | else |
1211 | reg = DP | DP_LINK_TRAIN_PAT_1; | 1369 | reg = DP | DP_LINK_TRAIN_PAT_1; |
1212 | 1370 | ||
1213 | if (!intel_dp_set_link_train(intel_dp, reg, | 1371 | if (!intel_dp_set_link_train(intel_dp, reg, |
1214 | DP_TRAINING_PATTERN_1, train_set)) | 1372 | DP_TRAINING_PATTERN_1)) |
1215 | break; | 1373 | break; |
1216 | /* Set training pattern 1 */ | 1374 | /* Set training pattern 1 */ |
1217 | 1375 | ||
1218 | udelay(100); | 1376 | udelay(100); |
1219 | if (!intel_dp_get_link_status(intel_dp, link_status)) | 1377 | if (!intel_dp_get_link_status(intel_dp)) |
1220 | break; | 1378 | break; |
1221 | 1379 | ||
1222 | if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { | 1380 | if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { |
1223 | clock_recovery = true; | 1381 | clock_recovery = true; |
1224 | break; | 1382 | break; |
1225 | } | 1383 | } |
1226 | 1384 | ||
1227 | /* Check to see if we've tried the max voltage */ | 1385 | /* Check to see if we've tried the max voltage */ |
1228 | for (i = 0; i < intel_dp->lane_count; i++) | 1386 | for (i = 0; i < intel_dp->lane_count; i++) |
1229 | if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | 1387 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
1230 | break; | 1388 | break; |
1231 | if (i == intel_dp->lane_count) | 1389 | if (i == intel_dp->lane_count) |
1232 | break; | 1390 | break; |
1233 | 1391 | ||
1234 | /* Check to see if we've tried the same voltage 5 times */ | 1392 | /* Check to see if we've tried the same voltage 5 times */ |
1235 | if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { | 1393 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { |
1236 | ++tries; | 1394 | ++tries; |
1237 | if (tries == 5) | 1395 | if (tries == 5) |
1238 | break; | 1396 | break; |
1239 | } else | 1397 | } else |
1240 | tries = 0; | 1398 | tries = 0; |
1241 | voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | 1399 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; |
1242 | 1400 | ||
1243 | /* Compute new train_set as requested by target */ | 1401 | /* Compute new intel_dp->train_set as requested by target */ |
1244 | intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set); | 1402 | intel_get_adjust_train(intel_dp); |
1245 | } | 1403 | } |
1246 | 1404 | ||
1405 | intel_dp->DP = DP; | ||
1406 | } | ||
1407 | |||
1408 | static void | ||
1409 | intel_dp_complete_link_train(struct intel_dp *intel_dp) | ||
1410 | { | ||
1411 | struct drm_device *dev = intel_dp->base.base.dev; | ||
1412 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1413 | bool channel_eq = false; | ||
1414 | int tries, cr_tries; | ||
1415 | u32 reg; | ||
1416 | uint32_t DP = intel_dp->DP; | ||
1417 | |||
1247 | /* channel equalization */ | 1418 | /* channel equalization */ |
1248 | tries = 0; | 1419 | tries = 0; |
1420 | cr_tries = 0; | ||
1249 | channel_eq = false; | 1421 | channel_eq = false; |
1250 | for (;;) { | 1422 | for (;;) { |
1251 | /* Use train_set[0] to set the voltage and pre emphasis values */ | 1423 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ |
1252 | uint32_t signal_levels; | 1424 | uint32_t signal_levels; |
1253 | 1425 | ||
1254 | if (IS_GEN6(dev) && IS_eDP(intel_dp)) { | 1426 | if (cr_tries > 5) { |
1255 | signal_levels = intel_gen6_edp_signal_levels(train_set[0]); | 1427 | DRM_ERROR("failed to train DP, aborting\n"); |
1428 | intel_dp_link_down(intel_dp); | ||
1429 | break; | ||
1430 | } | ||
1431 | |||
1432 | if (IS_GEN6(dev) && is_edp(intel_dp)) { | ||
1433 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); | ||
1256 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; | 1434 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
1257 | } else { | 1435 | } else { |
1258 | signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count); | 1436 | signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); |
1259 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1437 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1260 | } | 1438 | } |
1261 | 1439 | ||
1262 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) | 1440 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) |
1263 | reg = DP | DP_LINK_TRAIN_PAT_2_CPT; | 1441 | reg = DP | DP_LINK_TRAIN_PAT_2_CPT; |
1264 | else | 1442 | else |
1265 | reg = DP | DP_LINK_TRAIN_PAT_2; | 1443 | reg = DP | DP_LINK_TRAIN_PAT_2; |
1266 | 1444 | ||
1267 | /* channel eq pattern */ | 1445 | /* channel eq pattern */ |
1268 | if (!intel_dp_set_link_train(intel_dp, reg, | 1446 | if (!intel_dp_set_link_train(intel_dp, reg, |
1269 | DP_TRAINING_PATTERN_2, train_set)) | 1447 | DP_TRAINING_PATTERN_2)) |
1270 | break; | 1448 | break; |
1271 | 1449 | ||
1272 | udelay(400); | 1450 | udelay(400); |
1273 | if (!intel_dp_get_link_status(intel_dp, link_status)) | 1451 | if (!intel_dp_get_link_status(intel_dp)) |
1274 | break; | 1452 | break; |
1275 | 1453 | ||
1276 | if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) { | 1454 | /* Make sure clock is still ok */ |
1455 | if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { | ||
1456 | intel_dp_start_link_train(intel_dp); | ||
1457 | cr_tries++; | ||
1458 | continue; | ||
1459 | } | ||
1460 | |||
1461 | if (intel_channel_eq_ok(intel_dp)) { | ||
1277 | channel_eq = true; | 1462 | channel_eq = true; |
1278 | break; | 1463 | break; |
1279 | } | 1464 | } |
1280 | 1465 | ||
1281 | /* Try 5 times */ | 1466 | /* Try 5 times, then try clock recovery if that fails */ |
1282 | if (tries > 5) | 1467 | if (tries > 5) { |
1283 | break; | 1468 | intel_dp_link_down(intel_dp); |
1469 | intel_dp_start_link_train(intel_dp); | ||
1470 | tries = 0; | ||
1471 | cr_tries++; | ||
1472 | continue; | ||
1473 | } | ||
1284 | 1474 | ||
1285 | /* Compute new train_set as requested by target */ | 1475 | /* Compute new intel_dp->train_set as requested by target */ |
1286 | intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set); | 1476 | intel_get_adjust_train(intel_dp); |
1287 | ++tries; | 1477 | ++tries; |
1288 | } | 1478 | } |
1289 | 1479 | ||
1290 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) | 1480 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) |
1291 | reg = DP | DP_LINK_TRAIN_OFF_CPT; | 1481 | reg = DP | DP_LINK_TRAIN_OFF_CPT; |
1292 | else | 1482 | else |
1293 | reg = DP | DP_LINK_TRAIN_OFF; | 1483 | reg = DP | DP_LINK_TRAIN_OFF; |
@@ -1301,33 +1491,69 @@ intel_dp_link_train(struct intel_dp *intel_dp) | |||
1301 | static void | 1491 | static void |
1302 | intel_dp_link_down(struct intel_dp *intel_dp) | 1492 | intel_dp_link_down(struct intel_dp *intel_dp) |
1303 | { | 1493 | { |
1304 | struct drm_device *dev = intel_dp->base.enc.dev; | 1494 | struct drm_device *dev = intel_dp->base.base.dev; |
1305 | struct drm_i915_private *dev_priv = dev->dev_private; | 1495 | struct drm_i915_private *dev_priv = dev->dev_private; |
1306 | uint32_t DP = intel_dp->DP; | 1496 | uint32_t DP = intel_dp->DP; |
1307 | 1497 | ||
1498 | if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0) | ||
1499 | return; | ||
1500 | |||
1308 | DRM_DEBUG_KMS("\n"); | 1501 | DRM_DEBUG_KMS("\n"); |
1309 | 1502 | ||
1310 | if (IS_eDP(intel_dp)) { | 1503 | if (is_edp(intel_dp)) { |
1311 | DP &= ~DP_PLL_ENABLE; | 1504 | DP &= ~DP_PLL_ENABLE; |
1312 | I915_WRITE(intel_dp->output_reg, DP); | 1505 | I915_WRITE(intel_dp->output_reg, DP); |
1313 | POSTING_READ(intel_dp->output_reg); | 1506 | POSTING_READ(intel_dp->output_reg); |
1314 | udelay(100); | 1507 | udelay(100); |
1315 | } | 1508 | } |
1316 | 1509 | ||
1317 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) { | 1510 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) { |
1318 | DP &= ~DP_LINK_TRAIN_MASK_CPT; | 1511 | DP &= ~DP_LINK_TRAIN_MASK_CPT; |
1319 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); | 1512 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); |
1320 | POSTING_READ(intel_dp->output_reg); | ||
1321 | } else { | 1513 | } else { |
1322 | DP &= ~DP_LINK_TRAIN_MASK; | 1514 | DP &= ~DP_LINK_TRAIN_MASK; |
1323 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); | 1515 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); |
1324 | POSTING_READ(intel_dp->output_reg); | ||
1325 | } | 1516 | } |
1517 | POSTING_READ(intel_dp->output_reg); | ||
1326 | 1518 | ||
1327 | udelay(17000); | 1519 | msleep(17); |
1328 | 1520 | ||
1329 | if (IS_eDP(intel_dp)) | 1521 | if (is_edp(intel_dp)) |
1330 | DP |= DP_LINK_TRAIN_OFF; | 1522 | DP |= DP_LINK_TRAIN_OFF; |
1523 | |||
1524 | if (!HAS_PCH_CPT(dev) && | ||
1525 | I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { | ||
1526 | struct drm_crtc *crtc = intel_dp->base.base.crtc; | ||
1527 | |||
1528 | /* Hardware workaround: leaving our transcoder select | ||
1529 | * set to transcoder B while it's off will prevent the | ||
1530 | * corresponding HDMI output on transcoder A. | ||
1531 | * | ||
1532 | * Combine this with another hardware workaround: | ||
1533 | * transcoder select bit can only be cleared while the | ||
1534 | * port is enabled. | ||
1535 | */ | ||
1536 | DP &= ~DP_PIPEB_SELECT; | ||
1537 | I915_WRITE(intel_dp->output_reg, DP); | ||
1538 | |||
1539 | /* Changes to enable or select take place the vblank | ||
1540 | * after being written. | ||
1541 | */ | ||
1542 | if (crtc == NULL) { | ||
1543 | /* We can arrive here never having been attached | ||
1544 | * to a CRTC, for instance, due to inheriting | ||
1545 | * random state from the BIOS. | ||
1546 | * | ||
1547 | * If the pipe is not running, play safe and | ||
1548 | * wait for the clocks to stabilise before | ||
1549 | * continuing. | ||
1550 | */ | ||
1551 | POSTING_READ(intel_dp->output_reg); | ||
1552 | msleep(50); | ||
1553 | } else | ||
1554 | intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); | ||
1555 | } | ||
1556 | |||
1331 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); | 1557 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); |
1332 | POSTING_READ(intel_dp->output_reg); | 1558 | POSTING_READ(intel_dp->output_reg); |
1333 | } | 1559 | } |
@@ -1344,60 +1570,63 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1344 | static void | 1570 | static void |
1345 | intel_dp_check_link_status(struct intel_dp *intel_dp) | 1571 | intel_dp_check_link_status(struct intel_dp *intel_dp) |
1346 | { | 1572 | { |
1347 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | 1573 | int ret; |
1574 | |||
1575 | if (!intel_dp->base.base.crtc) | ||
1576 | return; | ||
1348 | 1577 | ||
1349 | if (!intel_dp->base.enc.crtc) | 1578 | if (!intel_dp_get_link_status(intel_dp)) { |
1579 | intel_dp_link_down(intel_dp); | ||
1350 | return; | 1580 | return; |
1581 | } | ||
1351 | 1582 | ||
1352 | if (!intel_dp_get_link_status(intel_dp, link_status)) { | 1583 | /* Try to read receiver status if the link appears to be up */ |
1584 | ret = intel_dp_aux_native_read(intel_dp, | ||
1585 | 0x000, intel_dp->dpcd, | ||
1586 | sizeof (intel_dp->dpcd)); | ||
1587 | if (ret != sizeof(intel_dp->dpcd)) { | ||
1353 | intel_dp_link_down(intel_dp); | 1588 | intel_dp_link_down(intel_dp); |
1354 | return; | 1589 | return; |
1355 | } | 1590 | } |
1356 | 1591 | ||
1357 | if (!intel_channel_eq_ok(link_status, intel_dp->lane_count)) | 1592 | if (!intel_channel_eq_ok(intel_dp)) { |
1358 | intel_dp_link_train(intel_dp); | 1593 | intel_dp_start_link_train(intel_dp); |
1594 | intel_dp_complete_link_train(intel_dp); | ||
1595 | } | ||
1359 | } | 1596 | } |
1360 | 1597 | ||
1361 | static enum drm_connector_status | 1598 | static enum drm_connector_status |
1362 | ironlake_dp_detect(struct drm_connector *connector) | 1599 | ironlake_dp_detect(struct intel_dp *intel_dp) |
1363 | { | 1600 | { |
1364 | struct drm_encoder *encoder = intel_attached_encoder(connector); | ||
1365 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | ||
1366 | enum drm_connector_status status; | 1601 | enum drm_connector_status status; |
1602 | bool ret; | ||
1367 | 1603 | ||
1368 | status = connector_status_disconnected; | 1604 | /* Can't disconnect eDP, but you can close the lid... */ |
1369 | if (intel_dp_aux_native_read(intel_dp, | 1605 | if (is_edp(intel_dp)) { |
1370 | 0x000, intel_dp->dpcd, | 1606 | status = intel_panel_detect(intel_dp->base.base.dev); |
1371 | sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) | 1607 | if (status == connector_status_unknown) |
1372 | { | ||
1373 | if (intel_dp->dpcd[0] != 0) | ||
1374 | status = connector_status_connected; | 1608 | status = connector_status_connected; |
1609 | return status; | ||
1375 | } | 1610 | } |
1611 | |||
1612 | status = connector_status_disconnected; | ||
1613 | ret = intel_dp_aux_native_read_retry(intel_dp, | ||
1614 | 0x000, intel_dp->dpcd, | ||
1615 | sizeof (intel_dp->dpcd)); | ||
1616 | if (ret && intel_dp->dpcd[DP_DPCD_REV] != 0) | ||
1617 | status = connector_status_connected; | ||
1376 | DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], | 1618 | DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], |
1377 | intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); | 1619 | intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); |
1378 | return status; | 1620 | return status; |
1379 | } | 1621 | } |
1380 | 1622 | ||
1381 | /** | ||
1382 | * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. | ||
1383 | * | ||
1384 | * \return true if DP port is connected. | ||
1385 | * \return false if DP port is disconnected. | ||
1386 | */ | ||
1387 | static enum drm_connector_status | 1623 | static enum drm_connector_status |
1388 | intel_dp_detect(struct drm_connector *connector, bool force) | 1624 | g4x_dp_detect(struct intel_dp *intel_dp) |
1389 | { | 1625 | { |
1390 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1626 | struct drm_device *dev = intel_dp->base.base.dev; |
1391 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | ||
1392 | struct drm_device *dev = intel_dp->base.enc.dev; | ||
1393 | struct drm_i915_private *dev_priv = dev->dev_private; | 1627 | struct drm_i915_private *dev_priv = dev->dev_private; |
1394 | uint32_t temp, bit; | ||
1395 | enum drm_connector_status status; | 1628 | enum drm_connector_status status; |
1396 | 1629 | uint32_t temp, bit; | |
1397 | intel_dp->has_audio = false; | ||
1398 | |||
1399 | if (HAS_PCH_SPLIT(dev)) | ||
1400 | return ironlake_dp_detect(connector); | ||
1401 | 1630 | ||
1402 | switch (intel_dp->output_reg) { | 1631 | switch (intel_dp->output_reg) { |
1403 | case DP_B: | 1632 | case DP_B: |
@@ -1419,31 +1648,66 @@ intel_dp_detect(struct drm_connector *connector, bool force) | |||
1419 | return connector_status_disconnected; | 1648 | return connector_status_disconnected; |
1420 | 1649 | ||
1421 | status = connector_status_disconnected; | 1650 | status = connector_status_disconnected; |
1422 | if (intel_dp_aux_native_read(intel_dp, | 1651 | if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, |
1423 | 0x000, intel_dp->dpcd, | ||
1424 | sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) | 1652 | sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) |
1425 | { | 1653 | { |
1426 | if (intel_dp->dpcd[0] != 0) | 1654 | if (intel_dp->dpcd[DP_DPCD_REV] != 0) |
1427 | status = connector_status_connected; | 1655 | status = connector_status_connected; |
1428 | } | 1656 | } |
1657 | |||
1429 | return status; | 1658 | return status; |
1430 | } | 1659 | } |
1431 | 1660 | ||
1661 | /** | ||
1662 | * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. | ||
1663 | * | ||
1664 | * \return true if DP port is connected. | ||
1665 | * \return false if DP port is disconnected. | ||
1666 | */ | ||
1667 | static enum drm_connector_status | ||
1668 | intel_dp_detect(struct drm_connector *connector, bool force) | ||
1669 | { | ||
1670 | struct intel_dp *intel_dp = intel_attached_dp(connector); | ||
1671 | struct drm_device *dev = intel_dp->base.base.dev; | ||
1672 | enum drm_connector_status status; | ||
1673 | struct edid *edid = NULL; | ||
1674 | |||
1675 | intel_dp->has_audio = false; | ||
1676 | |||
1677 | if (HAS_PCH_SPLIT(dev)) | ||
1678 | status = ironlake_dp_detect(intel_dp); | ||
1679 | else | ||
1680 | status = g4x_dp_detect(intel_dp); | ||
1681 | if (status != connector_status_connected) | ||
1682 | return status; | ||
1683 | |||
1684 | if (intel_dp->force_audio) { | ||
1685 | intel_dp->has_audio = intel_dp->force_audio > 0; | ||
1686 | } else { | ||
1687 | edid = drm_get_edid(connector, &intel_dp->adapter); | ||
1688 | if (edid) { | ||
1689 | intel_dp->has_audio = drm_detect_monitor_audio(edid); | ||
1690 | connector->display_info.raw_edid = NULL; | ||
1691 | kfree(edid); | ||
1692 | } | ||
1693 | } | ||
1694 | |||
1695 | return connector_status_connected; | ||
1696 | } | ||
1697 | |||
1432 | static int intel_dp_get_modes(struct drm_connector *connector) | 1698 | static int intel_dp_get_modes(struct drm_connector *connector) |
1433 | { | 1699 | { |
1434 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1700 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
1435 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1701 | struct drm_device *dev = intel_dp->base.base.dev; |
1436 | struct drm_device *dev = intel_dp->base.enc.dev; | ||
1437 | struct drm_i915_private *dev_priv = dev->dev_private; | 1702 | struct drm_i915_private *dev_priv = dev->dev_private; |
1438 | int ret; | 1703 | int ret; |
1439 | 1704 | ||
1440 | /* We should parse the EDID data and find out if it has an audio sink | 1705 | /* We should parse the EDID data and find out if it has an audio sink |
1441 | */ | 1706 | */ |
1442 | 1707 | ||
1443 | ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus); | 1708 | ret = intel_ddc_get_modes(connector, &intel_dp->adapter); |
1444 | if (ret) { | 1709 | if (ret) { |
1445 | if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && | 1710 | if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) { |
1446 | !dev_priv->panel_fixed_mode) { | ||
1447 | struct drm_display_mode *newmode; | 1711 | struct drm_display_mode *newmode; |
1448 | list_for_each_entry(newmode, &connector->probed_modes, | 1712 | list_for_each_entry(newmode, &connector->probed_modes, |
1449 | head) { | 1713 | head) { |
@@ -1459,7 +1723,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
1459 | } | 1723 | } |
1460 | 1724 | ||
1461 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ | 1725 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ |
1462 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { | 1726 | if (is_edp(intel_dp)) { |
1463 | if (dev_priv->panel_fixed_mode != NULL) { | 1727 | if (dev_priv->panel_fixed_mode != NULL) { |
1464 | struct drm_display_mode *mode; | 1728 | struct drm_display_mode *mode; |
1465 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); | 1729 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); |
@@ -1470,6 +1734,79 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
1470 | return 0; | 1734 | return 0; |
1471 | } | 1735 | } |
1472 | 1736 | ||
1737 | static bool | ||
1738 | intel_dp_detect_audio(struct drm_connector *connector) | ||
1739 | { | ||
1740 | struct intel_dp *intel_dp = intel_attached_dp(connector); | ||
1741 | struct edid *edid; | ||
1742 | bool has_audio = false; | ||
1743 | |||
1744 | edid = drm_get_edid(connector, &intel_dp->adapter); | ||
1745 | if (edid) { | ||
1746 | has_audio = drm_detect_monitor_audio(edid); | ||
1747 | |||
1748 | connector->display_info.raw_edid = NULL; | ||
1749 | kfree(edid); | ||
1750 | } | ||
1751 | |||
1752 | return has_audio; | ||
1753 | } | ||
1754 | |||
1755 | static int | ||
1756 | intel_dp_set_property(struct drm_connector *connector, | ||
1757 | struct drm_property *property, | ||
1758 | uint64_t val) | ||
1759 | { | ||
1760 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | ||
1761 | struct intel_dp *intel_dp = intel_attached_dp(connector); | ||
1762 | int ret; | ||
1763 | |||
1764 | ret = drm_connector_property_set_value(connector, property, val); | ||
1765 | if (ret) | ||
1766 | return ret; | ||
1767 | |||
1768 | if (property == dev_priv->force_audio_property) { | ||
1769 | int i = val; | ||
1770 | bool has_audio; | ||
1771 | |||
1772 | if (i == intel_dp->force_audio) | ||
1773 | return 0; | ||
1774 | |||
1775 | intel_dp->force_audio = i; | ||
1776 | |||
1777 | if (i == 0) | ||
1778 | has_audio = intel_dp_detect_audio(connector); | ||
1779 | else | ||
1780 | has_audio = i > 0; | ||
1781 | |||
1782 | if (has_audio == intel_dp->has_audio) | ||
1783 | return 0; | ||
1784 | |||
1785 | intel_dp->has_audio = has_audio; | ||
1786 | goto done; | ||
1787 | } | ||
1788 | |||
1789 | if (property == dev_priv->broadcast_rgb_property) { | ||
1790 | if (val == !!intel_dp->color_range) | ||
1791 | return 0; | ||
1792 | |||
1793 | intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; | ||
1794 | goto done; | ||
1795 | } | ||
1796 | |||
1797 | return -EINVAL; | ||
1798 | |||
1799 | done: | ||
1800 | if (intel_dp->base.base.crtc) { | ||
1801 | struct drm_crtc *crtc = intel_dp->base.base.crtc; | ||
1802 | drm_crtc_helper_set_mode(crtc, &crtc->mode, | ||
1803 | crtc->x, crtc->y, | ||
1804 | crtc->fb); | ||
1805 | } | ||
1806 | |||
1807 | return 0; | ||
1808 | } | ||
1809 | |||
1473 | static void | 1810 | static void |
1474 | intel_dp_destroy (struct drm_connector *connector) | 1811 | intel_dp_destroy (struct drm_connector *connector) |
1475 | { | 1812 | { |
@@ -1478,6 +1815,15 @@ intel_dp_destroy (struct drm_connector *connector) | |||
1478 | kfree(connector); | 1815 | kfree(connector); |
1479 | } | 1816 | } |
1480 | 1817 | ||
1818 | static void intel_dp_encoder_destroy(struct drm_encoder *encoder) | ||
1819 | { | ||
1820 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | ||
1821 | |||
1822 | i2c_del_adapter(&intel_dp->adapter); | ||
1823 | drm_encoder_cleanup(encoder); | ||
1824 | kfree(intel_dp); | ||
1825 | } | ||
1826 | |||
1481 | static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { | 1827 | static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { |
1482 | .dpms = intel_dp_dpms, | 1828 | .dpms = intel_dp_dpms, |
1483 | .mode_fixup = intel_dp_mode_fixup, | 1829 | .mode_fixup = intel_dp_mode_fixup, |
@@ -1490,26 +1836,26 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { | |||
1490 | .dpms = drm_helper_connector_dpms, | 1836 | .dpms = drm_helper_connector_dpms, |
1491 | .detect = intel_dp_detect, | 1837 | .detect = intel_dp_detect, |
1492 | .fill_modes = drm_helper_probe_single_connector_modes, | 1838 | .fill_modes = drm_helper_probe_single_connector_modes, |
1839 | .set_property = intel_dp_set_property, | ||
1493 | .destroy = intel_dp_destroy, | 1840 | .destroy = intel_dp_destroy, |
1494 | }; | 1841 | }; |
1495 | 1842 | ||
1496 | static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { | 1843 | static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { |
1497 | .get_modes = intel_dp_get_modes, | 1844 | .get_modes = intel_dp_get_modes, |
1498 | .mode_valid = intel_dp_mode_valid, | 1845 | .mode_valid = intel_dp_mode_valid, |
1499 | .best_encoder = intel_attached_encoder, | 1846 | .best_encoder = intel_best_encoder, |
1500 | }; | 1847 | }; |
1501 | 1848 | ||
1502 | static const struct drm_encoder_funcs intel_dp_enc_funcs = { | 1849 | static const struct drm_encoder_funcs intel_dp_enc_funcs = { |
1503 | .destroy = intel_encoder_destroy, | 1850 | .destroy = intel_dp_encoder_destroy, |
1504 | }; | 1851 | }; |
1505 | 1852 | ||
1506 | void | 1853 | static void |
1507 | intel_dp_hot_plug(struct intel_encoder *intel_encoder) | 1854 | intel_dp_hot_plug(struct intel_encoder *intel_encoder) |
1508 | { | 1855 | { |
1509 | struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); | 1856 | struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); |
1510 | 1857 | ||
1511 | if (intel_dp->dpms_mode == DRM_MODE_DPMS_ON) | 1858 | intel_dp_check_link_status(intel_dp); |
1512 | intel_dp_check_link_status(intel_dp); | ||
1513 | } | 1859 | } |
1514 | 1860 | ||
1515 | /* Return which DP Port should be selected for Transcoder DP control */ | 1861 | /* Return which DP Port should be selected for Transcoder DP control */ |
@@ -1554,6 +1900,13 @@ bool intel_dpd_is_edp(struct drm_device *dev) | |||
1554 | return false; | 1900 | return false; |
1555 | } | 1901 | } |
1556 | 1902 | ||
1903 | static void | ||
1904 | intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) | ||
1905 | { | ||
1906 | intel_attach_force_audio_property(connector); | ||
1907 | intel_attach_broadcast_rgb_property(connector); | ||
1908 | } | ||
1909 | |||
1557 | void | 1910 | void |
1558 | intel_dp_init(struct drm_device *dev, int output_reg) | 1911 | intel_dp_init(struct drm_device *dev, int output_reg) |
1559 | { | 1912 | { |
@@ -1569,6 +1922,8 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1569 | if (!intel_dp) | 1922 | if (!intel_dp) |
1570 | return; | 1923 | return; |
1571 | 1924 | ||
1925 | intel_dp->output_reg = output_reg; | ||
1926 | |||
1572 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 1927 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
1573 | if (!intel_connector) { | 1928 | if (!intel_connector) { |
1574 | kfree(intel_dp); | 1929 | kfree(intel_dp); |
@@ -1580,7 +1935,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1580 | if (intel_dpd_is_edp(dev)) | 1935 | if (intel_dpd_is_edp(dev)) |
1581 | intel_dp->is_pch_edp = true; | 1936 | intel_dp->is_pch_edp = true; |
1582 | 1937 | ||
1583 | if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { | 1938 | if (output_reg == DP_A || is_pch_edp(intel_dp)) { |
1584 | type = DRM_MODE_CONNECTOR_eDP; | 1939 | type = DRM_MODE_CONNECTOR_eDP; |
1585 | intel_encoder->type = INTEL_OUTPUT_EDP; | 1940 | intel_encoder->type = INTEL_OUTPUT_EDP; |
1586 | } else { | 1941 | } else { |
@@ -1601,23 +1956,18 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1601 | else if (output_reg == DP_D || output_reg == PCH_DP_D) | 1956 | else if (output_reg == DP_D || output_reg == PCH_DP_D) |
1602 | intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); | 1957 | intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); |
1603 | 1958 | ||
1604 | if (IS_eDP(intel_dp)) | 1959 | if (is_edp(intel_dp)) |
1605 | intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); | 1960 | intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); |
1606 | 1961 | ||
1607 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 1962 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
1608 | connector->interlace_allowed = true; | 1963 | connector->interlace_allowed = true; |
1609 | connector->doublescan_allowed = 0; | 1964 | connector->doublescan_allowed = 0; |
1610 | 1965 | ||
1611 | intel_dp->output_reg = output_reg; | 1966 | drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, |
1612 | intel_dp->has_audio = false; | ||
1613 | intel_dp->dpms_mode = DRM_MODE_DPMS_ON; | ||
1614 | |||
1615 | drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs, | ||
1616 | DRM_MODE_ENCODER_TMDS); | 1967 | DRM_MODE_ENCODER_TMDS); |
1617 | drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs); | 1968 | drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); |
1618 | 1969 | ||
1619 | drm_mode_connector_attach_encoder(&intel_connector->base, | 1970 | intel_connector_attach_encoder(intel_connector, intel_encoder); |
1620 | &intel_encoder->enc); | ||
1621 | drm_sysfs_connector_add(connector); | 1971 | drm_sysfs_connector_add(connector); |
1622 | 1972 | ||
1623 | /* Set up the DDC bus. */ | 1973 | /* Set up the DDC bus. */ |
@@ -1647,10 +1997,42 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1647 | 1997 | ||
1648 | intel_dp_i2c_init(intel_dp, intel_connector, name); | 1998 | intel_dp_i2c_init(intel_dp, intel_connector, name); |
1649 | 1999 | ||
1650 | intel_encoder->ddc_bus = &intel_dp->adapter; | 2000 | /* Cache some DPCD data in the eDP case */ |
2001 | if (is_edp(intel_dp)) { | ||
2002 | int ret; | ||
2003 | u32 pp_on, pp_div; | ||
2004 | |||
2005 | pp_on = I915_READ(PCH_PP_ON_DELAYS); | ||
2006 | pp_div = I915_READ(PCH_PP_DIVISOR); | ||
2007 | |||
2008 | /* Get T3 & T12 values (note: VESA not bspec terminology) */ | ||
2009 | dev_priv->panel_t3 = (pp_on & 0x1fff0000) >> 16; | ||
2010 | dev_priv->panel_t3 /= 10; /* t3 in 100us units */ | ||
2011 | dev_priv->panel_t12 = pp_div & 0xf; | ||
2012 | dev_priv->panel_t12 *= 100; /* t12 in 100ms units */ | ||
2013 | |||
2014 | ironlake_edp_panel_vdd_on(intel_dp); | ||
2015 | ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV, | ||
2016 | intel_dp->dpcd, | ||
2017 | sizeof(intel_dp->dpcd)); | ||
2018 | ironlake_edp_panel_vdd_off(intel_dp); | ||
2019 | if (ret == sizeof(intel_dp->dpcd)) { | ||
2020 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) | ||
2021 | dev_priv->no_aux_handshake = | ||
2022 | intel_dp->dpcd[DP_MAX_DOWNSPREAD] & | ||
2023 | DP_NO_AUX_HANDSHAKE_LINK_TRAINING; | ||
2024 | } else { | ||
2025 | /* if this fails, presume the device is a ghost */ | ||
2026 | DRM_INFO("failed to retrieve link info, disabling eDP\n"); | ||
2027 | intel_dp_encoder_destroy(&intel_dp->base.base); | ||
2028 | intel_dp_destroy(&intel_connector->base); | ||
2029 | return; | ||
2030 | } | ||
2031 | } | ||
2032 | |||
1651 | intel_encoder->hot_plug = intel_dp_hot_plug; | 2033 | intel_encoder->hot_plug = intel_dp_hot_plug; |
1652 | 2034 | ||
1653 | if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { | 2035 | if (is_edp(intel_dp)) { |
1654 | /* initialize panel mode from VBT if available for eDP */ | 2036 | /* initialize panel mode from VBT if available for eDP */ |
1655 | if (dev_priv->lfp_lvds_vbt_mode) { | 2037 | if (dev_priv->lfp_lvds_vbt_mode) { |
1656 | dev_priv->panel_fixed_mode = | 2038 | dev_priv->panel_fixed_mode = |
@@ -1662,6 +2044,8 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1662 | } | 2044 | } |
1663 | } | 2045 | } |
1664 | 2046 | ||
2047 | intel_dp_add_properties(intel_dp, connector); | ||
2048 | |||
1665 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written | 2049 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written |
1666 | * 0xd. Failure to do so will result in spurious interrupts being | 2050 | * 0xd. Failure to do so will result in spurious interrupts being |
1667 | * generated on the port when a cable is not attached. | 2051 | * generated on the port when a cable is not attached. |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8828b3ac6414..9ffa61eb4d7e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -26,14 +26,12 @@ | |||
26 | #define __INTEL_DRV_H__ | 26 | #define __INTEL_DRV_H__ |
27 | 27 | ||
28 | #include <linux/i2c.h> | 28 | #include <linux/i2c.h> |
29 | #include <linux/i2c-id.h> | ||
30 | #include <linux/i2c-algo-bit.h> | ||
31 | #include "i915_drv.h" | 29 | #include "i915_drv.h" |
32 | #include "drm_crtc.h" | 30 | #include "drm_crtc.h" |
33 | |||
34 | #include "drm_crtc_helper.h" | 31 | #include "drm_crtc_helper.h" |
32 | #include "drm_fb_helper.h" | ||
35 | 33 | ||
36 | #define wait_for(COND, MS, W) ({ \ | 34 | #define _wait_for(COND, MS, W) ({ \ |
37 | unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ | 35 | unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ |
38 | int ret__ = 0; \ | 36 | int ret__ = 0; \ |
39 | while (! (COND)) { \ | 37 | while (! (COND)) { \ |
@@ -41,11 +39,24 @@ | |||
41 | ret__ = -ETIMEDOUT; \ | 39 | ret__ = -ETIMEDOUT; \ |
42 | break; \ | 40 | break; \ |
43 | } \ | 41 | } \ |
44 | if (W) msleep(W); \ | 42 | if (W && !(in_atomic() || in_dbg_master())) msleep(W); \ |
45 | } \ | 43 | } \ |
46 | ret__; \ | 44 | ret__; \ |
47 | }) | 45 | }) |
48 | 46 | ||
47 | #define wait_for(COND, MS) _wait_for(COND, MS, 1) | ||
48 | #define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) | ||
49 | |||
50 | #define MSLEEP(x) do { \ | ||
51 | if (in_dbg_master()) \ | ||
52 | mdelay(x); \ | ||
53 | else \ | ||
54 | msleep(x); \ | ||
55 | } while(0) | ||
56 | |||
57 | #define KHz(x) (1000*x) | ||
58 | #define MHz(x) KHz(1000*x) | ||
59 | |||
49 | /* | 60 | /* |
50 | * Display related stuff | 61 | * Display related stuff |
51 | */ | 62 | */ |
@@ -96,25 +107,39 @@ | |||
96 | #define INTEL_DVO_CHIP_TMDS 2 | 107 | #define INTEL_DVO_CHIP_TMDS 2 |
97 | #define INTEL_DVO_CHIP_TVOUT 4 | 108 | #define INTEL_DVO_CHIP_TVOUT 4 |
98 | 109 | ||
99 | struct intel_i2c_chan { | 110 | /* drm_display_mode->private_flags */ |
100 | struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */ | 111 | #define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0) |
101 | u32 reg; /* GPIO reg */ | 112 | #define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT) |
102 | struct i2c_adapter adapter; | 113 | |
103 | struct i2c_algo_bit_data algo; | 114 | static inline void |
104 | }; | 115 | intel_mode_set_pixel_multiplier(struct drm_display_mode *mode, |
116 | int multiplier) | ||
117 | { | ||
118 | mode->clock *= multiplier; | ||
119 | mode->private_flags |= multiplier; | ||
120 | } | ||
121 | |||
122 | static inline int | ||
123 | intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode) | ||
124 | { | ||
125 | return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT; | ||
126 | } | ||
105 | 127 | ||
106 | struct intel_framebuffer { | 128 | struct intel_framebuffer { |
107 | struct drm_framebuffer base; | 129 | struct drm_framebuffer base; |
108 | struct drm_gem_object *obj; | 130 | struct drm_i915_gem_object *obj; |
109 | }; | 131 | }; |
110 | 132 | ||
133 | struct intel_fbdev { | ||
134 | struct drm_fb_helper helper; | ||
135 | struct intel_framebuffer ifb; | ||
136 | struct list_head fbdev_list; | ||
137 | struct drm_display_mode *our_mode; | ||
138 | }; | ||
111 | 139 | ||
112 | struct intel_encoder { | 140 | struct intel_encoder { |
113 | struct drm_encoder enc; | 141 | struct drm_encoder base; |
114 | int type; | 142 | int type; |
115 | struct i2c_adapter *i2c_bus; | ||
116 | struct i2c_adapter *ddc_bus; | ||
117 | bool load_detect_temp; | ||
118 | bool needs_tv_clock; | 143 | bool needs_tv_clock; |
119 | void (*hot_plug)(struct intel_encoder *); | 144 | void (*hot_plug)(struct intel_encoder *); |
120 | int crtc_mask; | 145 | int crtc_mask; |
@@ -123,32 +148,7 @@ struct intel_encoder { | |||
123 | 148 | ||
124 | struct intel_connector { | 149 | struct intel_connector { |
125 | struct drm_connector base; | 150 | struct drm_connector base; |
126 | }; | 151 | struct intel_encoder *encoder; |
127 | |||
128 | struct intel_crtc; | ||
129 | struct intel_overlay { | ||
130 | struct drm_device *dev; | ||
131 | struct intel_crtc *crtc; | ||
132 | struct drm_i915_gem_object *vid_bo; | ||
133 | struct drm_i915_gem_object *old_vid_bo; | ||
134 | int active; | ||
135 | int pfit_active; | ||
136 | u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */ | ||
137 | u32 color_key; | ||
138 | u32 brightness, contrast, saturation; | ||
139 | u32 old_xscale, old_yscale; | ||
140 | /* register access */ | ||
141 | u32 flip_addr; | ||
142 | struct drm_i915_gem_object *reg_bo; | ||
143 | void *virt_addr; | ||
144 | /* flip handling */ | ||
145 | uint32_t last_flip_req; | ||
146 | int hw_wedged; | ||
147 | #define HW_WEDGED 1 | ||
148 | #define NEEDS_WAIT_FOR_FLIP 2 | ||
149 | #define RELEASE_OLD_VID 3 | ||
150 | #define SWITCH_OFF_STAGE_1 4 | ||
151 | #define SWITCH_OFF_STAGE_2 5 | ||
152 | }; | 152 | }; |
153 | 153 | ||
154 | struct intel_crtc { | 154 | struct intel_crtc { |
@@ -157,6 +157,7 @@ struct intel_crtc { | |||
157 | enum plane plane; | 157 | enum plane plane; |
158 | u8 lut_r[256], lut_g[256], lut_b[256]; | 158 | u8 lut_r[256], lut_g[256], lut_b[256]; |
159 | int dpms_mode; | 159 | int dpms_mode; |
160 | bool active; /* is the crtc on? independent of the dpms mode */ | ||
160 | bool busy; /* is scanout buffer being updated frequently? */ | 161 | bool busy; /* is scanout buffer being updated frequently? */ |
161 | struct timer_list idle_timer; | 162 | struct timer_list idle_timer; |
162 | bool lowfreq_avail; | 163 | bool lowfreq_avail; |
@@ -164,80 +165,145 @@ struct intel_crtc { | |||
164 | struct intel_unpin_work *unpin_work; | 165 | struct intel_unpin_work *unpin_work; |
165 | int fdi_lanes; | 166 | int fdi_lanes; |
166 | 167 | ||
167 | struct drm_gem_object *cursor_bo; | 168 | struct drm_i915_gem_object *cursor_bo; |
168 | uint32_t cursor_addr; | 169 | uint32_t cursor_addr; |
169 | int16_t cursor_x, cursor_y; | 170 | int16_t cursor_x, cursor_y; |
170 | int16_t cursor_width, cursor_height; | 171 | int16_t cursor_width, cursor_height; |
171 | bool cursor_visible, cursor_on; | 172 | bool cursor_visible; |
172 | }; | 173 | }; |
173 | 174 | ||
174 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) | 175 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) |
175 | #define to_intel_connector(x) container_of(x, struct intel_connector, base) | 176 | #define to_intel_connector(x) container_of(x, struct intel_connector, base) |
176 | #define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) | 177 | #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) |
177 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) | 178 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) |
178 | 179 | ||
180 | #define DIP_TYPE_AVI 0x82 | ||
181 | #define DIP_VERSION_AVI 0x2 | ||
182 | #define DIP_LEN_AVI 13 | ||
183 | |||
184 | struct dip_infoframe { | ||
185 | uint8_t type; /* HB0 */ | ||
186 | uint8_t ver; /* HB1 */ | ||
187 | uint8_t len; /* HB2 - body len, not including checksum */ | ||
188 | uint8_t ecc; /* Header ECC */ | ||
189 | uint8_t checksum; /* PB0 */ | ||
190 | union { | ||
191 | struct { | ||
192 | /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */ | ||
193 | uint8_t Y_A_B_S; | ||
194 | /* PB2 - C 7:6, M 5:4, R 3:0 */ | ||
195 | uint8_t C_M_R; | ||
196 | /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */ | ||
197 | uint8_t ITC_EC_Q_SC; | ||
198 | /* PB4 - VIC 6:0 */ | ||
199 | uint8_t VIC; | ||
200 | /* PB5 - PR 3:0 */ | ||
201 | uint8_t PR; | ||
202 | /* PB6 to PB13 */ | ||
203 | uint16_t top_bar_end; | ||
204 | uint16_t bottom_bar_start; | ||
205 | uint16_t left_bar_end; | ||
206 | uint16_t right_bar_start; | ||
207 | } avi; | ||
208 | uint8_t payload[27]; | ||
209 | } __attribute__ ((packed)) body; | ||
210 | } __attribute__((packed)); | ||
211 | |||
212 | static inline struct drm_crtc * | ||
213 | intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) | ||
214 | { | ||
215 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
216 | return dev_priv->pipe_to_crtc_mapping[pipe]; | ||
217 | } | ||
218 | |||
219 | static inline struct drm_crtc * | ||
220 | intel_get_crtc_for_plane(struct drm_device *dev, int plane) | ||
221 | { | ||
222 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
223 | return dev_priv->plane_to_crtc_mapping[plane]; | ||
224 | } | ||
225 | |||
179 | struct intel_unpin_work { | 226 | struct intel_unpin_work { |
180 | struct work_struct work; | 227 | struct work_struct work; |
181 | struct drm_device *dev; | 228 | struct drm_device *dev; |
182 | struct drm_gem_object *old_fb_obj; | 229 | struct drm_i915_gem_object *old_fb_obj; |
183 | struct drm_gem_object *pending_flip_obj; | 230 | struct drm_i915_gem_object *pending_flip_obj; |
184 | struct drm_pending_vblank_event *event; | 231 | struct drm_pending_vblank_event *event; |
185 | int pending; | 232 | int pending; |
186 | bool enable_stall_check; | 233 | bool enable_stall_check; |
187 | }; | 234 | }; |
188 | 235 | ||
189 | struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, | ||
190 | const char *name); | ||
191 | void intel_i2c_destroy(struct i2c_adapter *adapter); | ||
192 | int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); | 236 | int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); |
193 | extern bool intel_ddc_probe(struct intel_encoder *intel_encoder); | 237 | extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); |
194 | void intel_i2c_quirk_set(struct drm_device *dev, bool enable); | 238 | |
195 | void intel_i2c_reset_gmbus(struct drm_device *dev); | 239 | extern void intel_attach_force_audio_property(struct drm_connector *connector); |
240 | extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); | ||
196 | 241 | ||
197 | extern void intel_crt_init(struct drm_device *dev); | 242 | extern void intel_crt_init(struct drm_device *dev); |
198 | extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); | 243 | extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); |
244 | void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); | ||
199 | extern bool intel_sdvo_init(struct drm_device *dev, int output_device); | 245 | extern bool intel_sdvo_init(struct drm_device *dev, int output_device); |
200 | extern void intel_dvo_init(struct drm_device *dev); | 246 | extern void intel_dvo_init(struct drm_device *dev); |
201 | extern void intel_tv_init(struct drm_device *dev); | 247 | extern void intel_tv_init(struct drm_device *dev); |
202 | extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj); | 248 | extern void intel_mark_busy(struct drm_device *dev, |
203 | extern void intel_lvds_init(struct drm_device *dev); | 249 | struct drm_i915_gem_object *obj); |
250 | extern bool intel_lvds_init(struct drm_device *dev); | ||
204 | extern void intel_dp_init(struct drm_device *dev, int dp_reg); | 251 | extern void intel_dp_init(struct drm_device *dev, int dp_reg); |
205 | void | 252 | void |
206 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 253 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
207 | struct drm_display_mode *adjusted_mode); | 254 | struct drm_display_mode *adjusted_mode); |
208 | extern bool intel_pch_has_edp(struct drm_crtc *crtc); | ||
209 | extern bool intel_dpd_is_edp(struct drm_device *dev); | 255 | extern bool intel_dpd_is_edp(struct drm_device *dev); |
210 | extern void intel_edp_link_config (struct intel_encoder *, int *, int *); | 256 | extern void intel_edp_link_config (struct intel_encoder *, int *, int *); |
257 | extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); | ||
211 | 258 | ||
212 | 259 | /* intel_panel.c */ | |
213 | extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, | 260 | extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, |
214 | struct drm_display_mode *adjusted_mode); | 261 | struct drm_display_mode *adjusted_mode); |
215 | extern void intel_pch_panel_fitting(struct drm_device *dev, | 262 | extern void intel_pch_panel_fitting(struct drm_device *dev, |
216 | int fitting_mode, | 263 | int fitting_mode, |
217 | struct drm_display_mode *mode, | 264 | struct drm_display_mode *mode, |
218 | struct drm_display_mode *adjusted_mode); | 265 | struct drm_display_mode *adjusted_mode); |
266 | extern u32 intel_panel_get_max_backlight(struct drm_device *dev); | ||
267 | extern u32 intel_panel_get_backlight(struct drm_device *dev); | ||
268 | extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); | ||
269 | extern void intel_panel_setup_backlight(struct drm_device *dev); | ||
270 | extern void intel_panel_enable_backlight(struct drm_device *dev); | ||
271 | extern void intel_panel_disable_backlight(struct drm_device *dev); | ||
272 | extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); | ||
219 | 273 | ||
220 | extern int intel_panel_fitter_pipe (struct drm_device *dev); | ||
221 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); | 274 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); |
222 | extern void intel_encoder_prepare (struct drm_encoder *encoder); | 275 | extern void intel_encoder_prepare (struct drm_encoder *encoder); |
223 | extern void intel_encoder_commit (struct drm_encoder *encoder); | 276 | extern void intel_encoder_commit (struct drm_encoder *encoder); |
224 | extern void intel_encoder_destroy(struct drm_encoder *encoder); | 277 | extern void intel_encoder_destroy(struct drm_encoder *encoder); |
225 | 278 | ||
226 | extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector); | 279 | static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) |
280 | { | ||
281 | return to_intel_connector(connector)->encoder; | ||
282 | } | ||
283 | |||
284 | extern void intel_connector_attach_encoder(struct intel_connector *connector, | ||
285 | struct intel_encoder *encoder); | ||
286 | extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); | ||
227 | 287 | ||
228 | extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | 288 | extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, |
229 | struct drm_crtc *crtc); | 289 | struct drm_crtc *crtc); |
230 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 290 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
231 | struct drm_file *file_priv); | 291 | struct drm_file *file_priv); |
232 | extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); | 292 | extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); |
233 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); | 293 | extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); |
234 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | 294 | |
235 | struct drm_connector *connector, | 295 | struct intel_load_detect_pipe { |
236 | struct drm_display_mode *mode, | 296 | struct drm_framebuffer *release_fb; |
237 | int *dpms_mode); | 297 | bool load_detect_temp; |
298 | int dpms_mode; | ||
299 | }; | ||
300 | extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | ||
301 | struct drm_connector *connector, | ||
302 | struct drm_display_mode *mode, | ||
303 | struct intel_load_detect_pipe *old); | ||
238 | extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, | 304 | extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, |
239 | struct drm_connector *connector, | 305 | struct drm_connector *connector, |
240 | int dpms_mode); | 306 | struct intel_load_detect_pipe *old); |
241 | 307 | ||
242 | extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); | 308 | extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); |
243 | extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); | 309 | extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); |
@@ -247,17 +313,21 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | |||
247 | u16 blue, int regno); | 313 | u16 blue, int regno); |
248 | extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | 314 | extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, |
249 | u16 *blue, int regno); | 315 | u16 *blue, int regno); |
250 | extern void intel_init_clock_gating(struct drm_device *dev); | 316 | extern void intel_enable_clock_gating(struct drm_device *dev); |
251 | extern void ironlake_enable_drps(struct drm_device *dev); | 317 | extern void ironlake_enable_drps(struct drm_device *dev); |
252 | extern void ironlake_disable_drps(struct drm_device *dev); | 318 | extern void ironlake_disable_drps(struct drm_device *dev); |
319 | extern void gen6_enable_rps(struct drm_i915_private *dev_priv); | ||
320 | extern void gen6_disable_rps(struct drm_device *dev); | ||
321 | extern void intel_init_emon(struct drm_device *dev); | ||
253 | 322 | ||
254 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, | 323 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, |
255 | struct drm_gem_object *obj); | 324 | struct drm_i915_gem_object *obj, |
325 | struct intel_ring_buffer *pipelined); | ||
256 | 326 | ||
257 | extern int intel_framebuffer_init(struct drm_device *dev, | 327 | extern int intel_framebuffer_init(struct drm_device *dev, |
258 | struct intel_framebuffer *ifb, | 328 | struct intel_framebuffer *ifb, |
259 | struct drm_mode_fb_cmd *mode_cmd, | 329 | struct drm_mode_fb_cmd *mode_cmd, |
260 | struct drm_gem_object *obj); | 330 | struct drm_i915_gem_object *obj); |
261 | extern int intel_fbdev_init(struct drm_device *dev); | 331 | extern int intel_fbdev_init(struct drm_device *dev); |
262 | extern void intel_fbdev_fini(struct drm_device *dev); | 332 | extern void intel_fbdev_fini(struct drm_device *dev); |
263 | 333 | ||
@@ -268,12 +338,13 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); | |||
268 | extern void intel_setup_overlay(struct drm_device *dev); | 338 | extern void intel_setup_overlay(struct drm_device *dev); |
269 | extern void intel_cleanup_overlay(struct drm_device *dev); | 339 | extern void intel_cleanup_overlay(struct drm_device *dev); |
270 | extern int intel_overlay_switch_off(struct intel_overlay *overlay); | 340 | extern int intel_overlay_switch_off(struct intel_overlay *overlay); |
271 | extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, | ||
272 | int interruptible); | ||
273 | extern int intel_overlay_put_image(struct drm_device *dev, void *data, | 341 | extern int intel_overlay_put_image(struct drm_device *dev, void *data, |
274 | struct drm_file *file_priv); | 342 | struct drm_file *file_priv); |
275 | extern int intel_overlay_attrs(struct drm_device *dev, void *data, | 343 | extern int intel_overlay_attrs(struct drm_device *dev, void *data, |
276 | struct drm_file *file_priv); | 344 | struct drm_file *file_priv); |
277 | 345 | ||
278 | extern void intel_fb_output_poll_changed(struct drm_device *dev); | 346 | extern void intel_fb_output_poll_changed(struct drm_device *dev); |
347 | extern void intel_fb_restore_mode(struct drm_device *dev); | ||
348 | |||
349 | extern void intel_init_clock_gating(struct drm_device *dev); | ||
279 | #endif /* __INTEL_DRV_H__ */ | 350 | #endif /* __INTEL_DRV_H__ */ |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 7c9ec1472d46..6eda1b51c636 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -72,7 +72,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = { | |||
72 | .name = "ch7017", | 72 | .name = "ch7017", |
73 | .dvo_reg = DVOC, | 73 | .dvo_reg = DVOC, |
74 | .slave_addr = 0x75, | 74 | .slave_addr = 0x75, |
75 | .gpio = GPIOE, | 75 | .gpio = GMBUS_PORT_DPB, |
76 | .dev_ops = &ch7017_ops, | 76 | .dev_ops = &ch7017_ops, |
77 | } | 77 | } |
78 | }; | 78 | }; |
@@ -88,7 +88,13 @@ struct intel_dvo { | |||
88 | 88 | ||
89 | static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder) | 89 | static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder) |
90 | { | 90 | { |
91 | return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base); | 91 | return container_of(encoder, struct intel_dvo, base.base); |
92 | } | ||
93 | |||
94 | static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector) | ||
95 | { | ||
96 | return container_of(intel_attached_encoder(connector), | ||
97 | struct intel_dvo, base); | ||
92 | } | 98 | } |
93 | 99 | ||
94 | static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) | 100 | static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) |
@@ -112,8 +118,7 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) | |||
112 | static int intel_dvo_mode_valid(struct drm_connector *connector, | 118 | static int intel_dvo_mode_valid(struct drm_connector *connector, |
113 | struct drm_display_mode *mode) | 119 | struct drm_display_mode *mode) |
114 | { | 120 | { |
115 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 121 | struct intel_dvo *intel_dvo = intel_attached_dvo(connector); |
116 | struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); | ||
117 | 122 | ||
118 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 123 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
119 | return MODE_NO_DBLESCAN; | 124 | return MODE_NO_DBLESCAN; |
@@ -173,7 +178,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, | |||
173 | int pipe = intel_crtc->pipe; | 178 | int pipe = intel_crtc->pipe; |
174 | u32 dvo_val; | 179 | u32 dvo_val; |
175 | u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; | 180 | u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; |
176 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | 181 | int dpll_reg = DPLL(pipe); |
177 | 182 | ||
178 | switch (dvo_reg) { | 183 | switch (dvo_reg) { |
179 | case DVOA: | 184 | case DVOA: |
@@ -224,23 +229,22 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, | |||
224 | static enum drm_connector_status | 229 | static enum drm_connector_status |
225 | intel_dvo_detect(struct drm_connector *connector, bool force) | 230 | intel_dvo_detect(struct drm_connector *connector, bool force) |
226 | { | 231 | { |
227 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 232 | struct intel_dvo *intel_dvo = intel_attached_dvo(connector); |
228 | struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); | ||
229 | |||
230 | return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); | 233 | return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); |
231 | } | 234 | } |
232 | 235 | ||
233 | static int intel_dvo_get_modes(struct drm_connector *connector) | 236 | static int intel_dvo_get_modes(struct drm_connector *connector) |
234 | { | 237 | { |
235 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 238 | struct intel_dvo *intel_dvo = intel_attached_dvo(connector); |
236 | struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); | 239 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
237 | 240 | ||
238 | /* We should probably have an i2c driver get_modes function for those | 241 | /* We should probably have an i2c driver get_modes function for those |
239 | * devices which will have a fixed set of modes determined by the chip | 242 | * devices which will have a fixed set of modes determined by the chip |
240 | * (TV-out, for example), but for now with just TMDS and LVDS, | 243 | * (TV-out, for example), but for now with just TMDS and LVDS, |
241 | * that's not the case. | 244 | * that's not the case. |
242 | */ | 245 | */ |
243 | intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus); | 246 | intel_ddc_get_modes(connector, |
247 | &dev_priv->gmbus[GMBUS_PORT_DPC].adapter); | ||
244 | if (!list_empty(&connector->probed_modes)) | 248 | if (!list_empty(&connector->probed_modes)) |
245 | return 1; | 249 | return 1; |
246 | 250 | ||
@@ -281,7 +285,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = { | |||
281 | static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { | 285 | static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { |
282 | .mode_valid = intel_dvo_mode_valid, | 286 | .mode_valid = intel_dvo_mode_valid, |
283 | .get_modes = intel_dvo_get_modes, | 287 | .get_modes = intel_dvo_get_modes, |
284 | .best_encoder = intel_attached_encoder, | 288 | .best_encoder = intel_best_encoder, |
285 | }; | 289 | }; |
286 | 290 | ||
287 | static void intel_dvo_enc_destroy(struct drm_encoder *encoder) | 291 | static void intel_dvo_enc_destroy(struct drm_encoder *encoder) |
@@ -311,8 +315,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector) | |||
311 | { | 315 | { |
312 | struct drm_device *dev = connector->dev; | 316 | struct drm_device *dev = connector->dev; |
313 | struct drm_i915_private *dev_priv = dev->dev_private; | 317 | struct drm_i915_private *dev_priv = dev->dev_private; |
314 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 318 | struct intel_dvo *intel_dvo = intel_attached_dvo(connector); |
315 | struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); | ||
316 | uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg); | 319 | uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg); |
317 | struct drm_display_mode *mode = NULL; | 320 | struct drm_display_mode *mode = NULL; |
318 | 321 | ||
@@ -323,7 +326,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector) | |||
323 | struct drm_crtc *crtc; | 326 | struct drm_crtc *crtc; |
324 | int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0; | 327 | int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0; |
325 | 328 | ||
326 | crtc = intel_get_crtc_from_pipe(dev, pipe); | 329 | crtc = intel_get_crtc_for_pipe(dev, pipe); |
327 | if (crtc) { | 330 | if (crtc) { |
328 | mode = intel_crtc_mode_get(dev, crtc); | 331 | mode = intel_crtc_mode_get(dev, crtc); |
329 | if (mode) { | 332 | if (mode) { |
@@ -341,11 +344,10 @@ intel_dvo_get_current_mode(struct drm_connector *connector) | |||
341 | 344 | ||
342 | void intel_dvo_init(struct drm_device *dev) | 345 | void intel_dvo_init(struct drm_device *dev) |
343 | { | 346 | { |
347 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
344 | struct intel_encoder *intel_encoder; | 348 | struct intel_encoder *intel_encoder; |
345 | struct intel_dvo *intel_dvo; | 349 | struct intel_dvo *intel_dvo; |
346 | struct intel_connector *intel_connector; | 350 | struct intel_connector *intel_connector; |
347 | struct i2c_adapter *i2cbus = NULL; | ||
348 | int ret = 0; | ||
349 | int i; | 351 | int i; |
350 | int encoder_type = DRM_MODE_ENCODER_NONE; | 352 | int encoder_type = DRM_MODE_ENCODER_NONE; |
351 | 353 | ||
@@ -360,16 +362,14 @@ void intel_dvo_init(struct drm_device *dev) | |||
360 | } | 362 | } |
361 | 363 | ||
362 | intel_encoder = &intel_dvo->base; | 364 | intel_encoder = &intel_dvo->base; |
363 | 365 | drm_encoder_init(dev, &intel_encoder->base, | |
364 | /* Set up the DDC bus */ | 366 | &intel_dvo_enc_funcs, encoder_type); |
365 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); | ||
366 | if (!intel_encoder->ddc_bus) | ||
367 | goto free_intel; | ||
368 | 367 | ||
369 | /* Now, try to find a controller */ | 368 | /* Now, try to find a controller */ |
370 | for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { | 369 | for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { |
371 | struct drm_connector *connector = &intel_connector->base; | 370 | struct drm_connector *connector = &intel_connector->base; |
372 | const struct intel_dvo_device *dvo = &intel_dvo_devices[i]; | 371 | const struct intel_dvo_device *dvo = &intel_dvo_devices[i]; |
372 | struct i2c_adapter *i2c; | ||
373 | int gpio; | 373 | int gpio; |
374 | 374 | ||
375 | /* Allow the I2C driver info to specify the GPIO to be used in | 375 | /* Allow the I2C driver info to specify the GPIO to be used in |
@@ -379,24 +379,18 @@ void intel_dvo_init(struct drm_device *dev) | |||
379 | if (dvo->gpio != 0) | 379 | if (dvo->gpio != 0) |
380 | gpio = dvo->gpio; | 380 | gpio = dvo->gpio; |
381 | else if (dvo->type == INTEL_DVO_CHIP_LVDS) | 381 | else if (dvo->type == INTEL_DVO_CHIP_LVDS) |
382 | gpio = GPIOB; | 382 | gpio = GMBUS_PORT_SSC; |
383 | else | 383 | else |
384 | gpio = GPIOE; | 384 | gpio = GMBUS_PORT_DPB; |
385 | 385 | ||
386 | /* Set up the I2C bus necessary for the chip we're probing. | 386 | /* Set up the I2C bus necessary for the chip we're probing. |
387 | * It appears that everything is on GPIOE except for panels | 387 | * It appears that everything is on GPIOE except for panels |
388 | * on i830 laptops, which are on GPIOB (DVOA). | 388 | * on i830 laptops, which are on GPIOB (DVOA). |
389 | */ | 389 | */ |
390 | if (i2cbus != NULL) | 390 | i2c = &dev_priv->gmbus[gpio].adapter; |
391 | intel_i2c_destroy(i2cbus); | ||
392 | if (!(i2cbus = intel_i2c_create(dev, gpio, | ||
393 | gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) { | ||
394 | continue; | ||
395 | } | ||
396 | 391 | ||
397 | intel_dvo->dev = *dvo; | 392 | intel_dvo->dev = *dvo; |
398 | ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus); | 393 | if (!dvo->dev_ops->init(&intel_dvo->dev, i2c)) |
399 | if (!ret) | ||
400 | continue; | 394 | continue; |
401 | 395 | ||
402 | intel_encoder->type = INTEL_OUTPUT_DVO; | 396 | intel_encoder->type = INTEL_OUTPUT_DVO; |
@@ -427,13 +421,10 @@ void intel_dvo_init(struct drm_device *dev) | |||
427 | connector->interlace_allowed = false; | 421 | connector->interlace_allowed = false; |
428 | connector->doublescan_allowed = false; | 422 | connector->doublescan_allowed = false; |
429 | 423 | ||
430 | drm_encoder_init(dev, &intel_encoder->enc, | 424 | drm_encoder_helper_add(&intel_encoder->base, |
431 | &intel_dvo_enc_funcs, encoder_type); | ||
432 | drm_encoder_helper_add(&intel_encoder->enc, | ||
433 | &intel_dvo_helper_funcs); | 425 | &intel_dvo_helper_funcs); |
434 | 426 | ||
435 | drm_mode_connector_attach_encoder(&intel_connector->base, | 427 | intel_connector_attach_encoder(intel_connector, intel_encoder); |
436 | &intel_encoder->enc); | ||
437 | if (dvo->type == INTEL_DVO_CHIP_LVDS) { | 428 | if (dvo->type == INTEL_DVO_CHIP_LVDS) { |
438 | /* For our LVDS chipsets, we should hopefully be able | 429 | /* For our LVDS chipsets, we should hopefully be able |
439 | * to dig the fixed panel mode out of the BIOS data. | 430 | * to dig the fixed panel mode out of the BIOS data. |
@@ -451,11 +442,7 @@ void intel_dvo_init(struct drm_device *dev) | |||
451 | return; | 442 | return; |
452 | } | 443 | } |
453 | 444 | ||
454 | intel_i2c_destroy(intel_encoder->ddc_bus); | 445 | drm_encoder_cleanup(&intel_encoder->base); |
455 | /* Didn't find a chip, so tear down. */ | ||
456 | if (i2cbus != NULL) | ||
457 | intel_i2c_destroy(i2cbus); | ||
458 | free_intel: | ||
459 | kfree(intel_dvo); | 446 | kfree(intel_dvo); |
460 | kfree(intel_connector); | 447 | kfree(intel_connector); |
461 | } | 448 | } |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index b61966c126d3..ec49bae73382 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -44,13 +44,6 @@ | |||
44 | #include "i915_drm.h" | 44 | #include "i915_drm.h" |
45 | #include "i915_drv.h" | 45 | #include "i915_drv.h" |
46 | 46 | ||
47 | struct intel_fbdev { | ||
48 | struct drm_fb_helper helper; | ||
49 | struct intel_framebuffer ifb; | ||
50 | struct list_head fbdev_list; | ||
51 | struct drm_display_mode *our_mode; | ||
52 | }; | ||
53 | |||
54 | static struct fb_ops intelfb_ops = { | 47 | static struct fb_ops intelfb_ops = { |
55 | .owner = THIS_MODULE, | 48 | .owner = THIS_MODULE, |
56 | .fb_check_var = drm_fb_helper_check_var, | 49 | .fb_check_var = drm_fb_helper_check_var, |
@@ -69,13 +62,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
69 | struct drm_fb_helper_surface_size *sizes) | 62 | struct drm_fb_helper_surface_size *sizes) |
70 | { | 63 | { |
71 | struct drm_device *dev = ifbdev->helper.dev; | 64 | struct drm_device *dev = ifbdev->helper.dev; |
65 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
72 | struct fb_info *info; | 66 | struct fb_info *info; |
73 | struct drm_framebuffer *fb; | 67 | struct drm_framebuffer *fb; |
74 | struct drm_mode_fb_cmd mode_cmd; | 68 | struct drm_mode_fb_cmd mode_cmd; |
75 | struct drm_gem_object *fbo = NULL; | 69 | struct drm_i915_gem_object *obj; |
76 | struct drm_i915_gem_object *obj_priv; | ||
77 | struct device *device = &dev->pdev->dev; | 70 | struct device *device = &dev->pdev->dev; |
78 | int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; | 71 | int size, ret; |
79 | 72 | ||
80 | /* we don't do packed 24bpp */ | 73 | /* we don't do packed 24bpp */ |
81 | if (sizes->surface_bpp == 24) | 74 | if (sizes->surface_bpp == 24) |
@@ -85,34 +78,27 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
85 | mode_cmd.height = sizes->surface_height; | 78 | mode_cmd.height = sizes->surface_height; |
86 | 79 | ||
87 | mode_cmd.bpp = sizes->surface_bpp; | 80 | mode_cmd.bpp = sizes->surface_bpp; |
88 | mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); | 81 | mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64); |
89 | mode_cmd.depth = sizes->surface_depth; | 82 | mode_cmd.depth = sizes->surface_depth; |
90 | 83 | ||
91 | size = mode_cmd.pitch * mode_cmd.height; | 84 | size = mode_cmd.pitch * mode_cmd.height; |
92 | size = ALIGN(size, PAGE_SIZE); | 85 | size = ALIGN(size, PAGE_SIZE); |
93 | fbo = i915_gem_alloc_object(dev, size); | 86 | obj = i915_gem_alloc_object(dev, size); |
94 | if (!fbo) { | 87 | if (!obj) { |
95 | DRM_ERROR("failed to allocate framebuffer\n"); | 88 | DRM_ERROR("failed to allocate framebuffer\n"); |
96 | ret = -ENOMEM; | 89 | ret = -ENOMEM; |
97 | goto out; | 90 | goto out; |
98 | } | 91 | } |
99 | obj_priv = to_intel_bo(fbo); | ||
100 | 92 | ||
101 | mutex_lock(&dev->struct_mutex); | 93 | mutex_lock(&dev->struct_mutex); |
102 | 94 | ||
103 | ret = intel_pin_and_fence_fb_obj(dev, fbo); | 95 | /* Flush everything out, we'll be doing GTT only from now on */ |
96 | ret = intel_pin_and_fence_fb_obj(dev, obj, false); | ||
104 | if (ret) { | 97 | if (ret) { |
105 | DRM_ERROR("failed to pin fb: %d\n", ret); | 98 | DRM_ERROR("failed to pin fb: %d\n", ret); |
106 | goto out_unref; | 99 | goto out_unref; |
107 | } | 100 | } |
108 | 101 | ||
109 | /* Flush everything out, we'll be doing GTT only from now on */ | ||
110 | ret = i915_gem_object_set_to_gtt_domain(fbo, 1); | ||
111 | if (ret) { | ||
112 | DRM_ERROR("failed to bind fb: %d.\n", ret); | ||
113 | goto out_unpin; | ||
114 | } | ||
115 | |||
116 | info = framebuffer_alloc(0, device); | 102 | info = framebuffer_alloc(0, device); |
117 | if (!info) { | 103 | if (!info) { |
118 | ret = -ENOMEM; | 104 | ret = -ENOMEM; |
@@ -121,7 +107,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
121 | 107 | ||
122 | info->par = ifbdev; | 108 | info->par = ifbdev; |
123 | 109 | ||
124 | ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo); | 110 | ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); |
125 | if (ret) | 111 | if (ret) |
126 | goto out_unpin; | 112 | goto out_unpin; |
127 | 113 | ||
@@ -135,6 +121,11 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
135 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; | 121 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; |
136 | info->fbops = &intelfb_ops; | 122 | info->fbops = &intelfb_ops; |
137 | 123 | ||
124 | ret = fb_alloc_cmap(&info->cmap, 256, 0); | ||
125 | if (ret) { | ||
126 | ret = -ENOMEM; | ||
127 | goto out_unpin; | ||
128 | } | ||
138 | /* setup aperture base/size for vesafb takeover */ | 129 | /* setup aperture base/size for vesafb takeover */ |
139 | info->apertures = alloc_apertures(1); | 130 | info->apertures = alloc_apertures(1); |
140 | if (!info->apertures) { | 131 | if (!info->apertures) { |
@@ -142,26 +133,17 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
142 | goto out_unpin; | 133 | goto out_unpin; |
143 | } | 134 | } |
144 | info->apertures->ranges[0].base = dev->mode_config.fb_base; | 135 | info->apertures->ranges[0].base = dev->mode_config.fb_base; |
145 | if (IS_I9XX(dev)) | 136 | info->apertures->ranges[0].size = |
146 | info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2); | 137 | dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
147 | else | ||
148 | info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); | ||
149 | 138 | ||
150 | info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; | 139 | info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; |
151 | info->fix.smem_len = size; | 140 | info->fix.smem_len = size; |
152 | 141 | ||
153 | info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset, | 142 | info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size); |
154 | size); | ||
155 | if (!info->screen_base) { | 143 | if (!info->screen_base) { |
156 | ret = -ENOSPC; | 144 | ret = -ENOSPC; |
157 | goto out_unpin; | 145 | goto out_unpin; |
158 | } | 146 | } |
159 | |||
160 | ret = fb_alloc_cmap(&info->cmap, 256, 0); | ||
161 | if (ret) { | ||
162 | ret = -ENOMEM; | ||
163 | goto out_unpin; | ||
164 | } | ||
165 | info->screen_size = size; | 147 | info->screen_size = size; |
166 | 148 | ||
167 | // memset(info->screen_base, 0, size); | 149 | // memset(info->screen_base, 0, size); |
@@ -169,10 +151,6 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
169 | drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); | 151 | drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); |
170 | drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); | 152 | drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); |
171 | 153 | ||
172 | /* FIXME: we really shouldn't expose mmio space at all */ | ||
173 | info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar); | ||
174 | info->fix.mmio_len = pci_resource_len(dev->pdev, mmio_bar); | ||
175 | |||
176 | info->pixmap.size = 64*1024; | 154 | info->pixmap.size = 64*1024; |
177 | info->pixmap.buf_align = 8; | 155 | info->pixmap.buf_align = 8; |
178 | info->pixmap.access_align = 32; | 156 | info->pixmap.access_align = 32; |
@@ -181,7 +159,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
181 | 159 | ||
182 | DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", | 160 | DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", |
183 | fb->width, fb->height, | 161 | fb->width, fb->height, |
184 | obj_priv->gtt_offset, fbo); | 162 | obj->gtt_offset, obj); |
185 | 163 | ||
186 | 164 | ||
187 | mutex_unlock(&dev->struct_mutex); | 165 | mutex_unlock(&dev->struct_mutex); |
@@ -189,9 +167,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
189 | return 0; | 167 | return 0; |
190 | 168 | ||
191 | out_unpin: | 169 | out_unpin: |
192 | i915_gem_object_unpin(fbo); | 170 | i915_gem_object_unpin(obj); |
193 | out_unref: | 171 | out_unref: |
194 | drm_gem_object_unreference(fbo); | 172 | drm_gem_object_unreference(&obj->base); |
195 | mutex_unlock(&dev->struct_mutex); | 173 | mutex_unlock(&dev->struct_mutex); |
196 | out: | 174 | out: |
197 | return ret; | 175 | return ret; |
@@ -219,8 +197,8 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = { | |||
219 | .fb_probe = intel_fb_find_or_create_single, | 197 | .fb_probe = intel_fb_find_or_create_single, |
220 | }; | 198 | }; |
221 | 199 | ||
222 | int intel_fbdev_destroy(struct drm_device *dev, | 200 | static void intel_fbdev_destroy(struct drm_device *dev, |
223 | struct intel_fbdev *ifbdev) | 201 | struct intel_fbdev *ifbdev) |
224 | { | 202 | { |
225 | struct fb_info *info; | 203 | struct fb_info *info; |
226 | struct intel_framebuffer *ifb = &ifbdev->ifb; | 204 | struct intel_framebuffer *ifb = &ifbdev->ifb; |
@@ -238,11 +216,9 @@ int intel_fbdev_destroy(struct drm_device *dev, | |||
238 | 216 | ||
239 | drm_framebuffer_cleanup(&ifb->base); | 217 | drm_framebuffer_cleanup(&ifb->base); |
240 | if (ifb->obj) { | 218 | if (ifb->obj) { |
241 | drm_gem_object_unreference(ifb->obj); | 219 | drm_gem_object_unreference_unlocked(&ifb->obj->base); |
242 | ifb->obj = NULL; | 220 | ifb->obj = NULL; |
243 | } | 221 | } |
244 | |||
245 | return 0; | ||
246 | } | 222 | } |
247 | 223 | ||
248 | int intel_fbdev_init(struct drm_device *dev) | 224 | int intel_fbdev_init(struct drm_device *dev) |
@@ -288,3 +264,13 @@ void intel_fb_output_poll_changed(struct drm_device *dev) | |||
288 | drm_i915_private_t *dev_priv = dev->dev_private; | 264 | drm_i915_private_t *dev_priv = dev->dev_private; |
289 | drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); | 265 | drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); |
290 | } | 266 | } |
267 | |||
268 | void intel_fb_restore_mode(struct drm_device *dev) | ||
269 | { | ||
270 | int ret; | ||
271 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
272 | |||
273 | ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); | ||
274 | if (ret) | ||
275 | DRM_DEBUG("failed to restore crtc mode\n"); | ||
276 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 926934a482ec..aa0a8e83142e 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -40,12 +40,76 @@ | |||
40 | struct intel_hdmi { | 40 | struct intel_hdmi { |
41 | struct intel_encoder base; | 41 | struct intel_encoder base; |
42 | u32 sdvox_reg; | 42 | u32 sdvox_reg; |
43 | int ddc_bus; | ||
44 | uint32_t color_range; | ||
43 | bool has_hdmi_sink; | 45 | bool has_hdmi_sink; |
46 | bool has_audio; | ||
47 | int force_audio; | ||
44 | }; | 48 | }; |
45 | 49 | ||
46 | static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) | 50 | static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) |
47 | { | 51 | { |
48 | return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base); | 52 | return container_of(encoder, struct intel_hdmi, base.base); |
53 | } | ||
54 | |||
55 | static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) | ||
56 | { | ||
57 | return container_of(intel_attached_encoder(connector), | ||
58 | struct intel_hdmi, base); | ||
59 | } | ||
60 | |||
61 | void intel_dip_infoframe_csum(struct dip_infoframe *avi_if) | ||
62 | { | ||
63 | uint8_t *data = (uint8_t *)avi_if; | ||
64 | uint8_t sum = 0; | ||
65 | unsigned i; | ||
66 | |||
67 | avi_if->checksum = 0; | ||
68 | avi_if->ecc = 0; | ||
69 | |||
70 | for (i = 0; i < sizeof(*avi_if); i++) | ||
71 | sum += data[i]; | ||
72 | |||
73 | avi_if->checksum = 0x100 - sum; | ||
74 | } | ||
75 | |||
76 | static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder) | ||
77 | { | ||
78 | struct dip_infoframe avi_if = { | ||
79 | .type = DIP_TYPE_AVI, | ||
80 | .ver = DIP_VERSION_AVI, | ||
81 | .len = DIP_LEN_AVI, | ||
82 | }; | ||
83 | uint32_t *data = (uint32_t *)&avi_if; | ||
84 | struct drm_device *dev = encoder->dev; | ||
85 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
86 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||
87 | u32 port; | ||
88 | unsigned i; | ||
89 | |||
90 | if (!intel_hdmi->has_hdmi_sink) | ||
91 | return; | ||
92 | |||
93 | /* XXX first guess at handling video port, is this corrent? */ | ||
94 | if (intel_hdmi->sdvox_reg == SDVOB) | ||
95 | port = VIDEO_DIP_PORT_B; | ||
96 | else if (intel_hdmi->sdvox_reg == SDVOC) | ||
97 | port = VIDEO_DIP_PORT_C; | ||
98 | else | ||
99 | return; | ||
100 | |||
101 | I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port | | ||
102 | VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC); | ||
103 | |||
104 | intel_dip_infoframe_csum(&avi_if); | ||
105 | for (i = 0; i < sizeof(avi_if); i += 4) { | ||
106 | I915_WRITE(VIDEO_DIP_DATA, *data); | ||
107 | data++; | ||
108 | } | ||
109 | |||
110 | I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port | | ||
111 | VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC | | ||
112 | VIDEO_DIP_ENABLE_AVI); | ||
49 | } | 113 | } |
50 | 114 | ||
51 | static void intel_hdmi_mode_set(struct drm_encoder *encoder, | 115 | static void intel_hdmi_mode_set(struct drm_encoder *encoder, |
@@ -60,15 +124,19 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
60 | u32 sdvox; | 124 | u32 sdvox; |
61 | 125 | ||
62 | sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; | 126 | sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; |
127 | sdvox |= intel_hdmi->color_range; | ||
63 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 128 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
64 | sdvox |= SDVO_VSYNC_ACTIVE_HIGH; | 129 | sdvox |= SDVO_VSYNC_ACTIVE_HIGH; |
65 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 130 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
66 | sdvox |= SDVO_HSYNC_ACTIVE_HIGH; | 131 | sdvox |= SDVO_HSYNC_ACTIVE_HIGH; |
67 | 132 | ||
68 | if (intel_hdmi->has_hdmi_sink) { | 133 | /* Required on CPT */ |
134 | if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) | ||
135 | sdvox |= HDMI_MODE_SELECT; | ||
136 | |||
137 | if (intel_hdmi->has_audio) { | ||
69 | sdvox |= SDVO_AUDIO_ENABLE; | 138 | sdvox |= SDVO_AUDIO_ENABLE; |
70 | if (HAS_PCH_CPT(dev)) | 139 | sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC; |
71 | sdvox |= HDMI_MODE_SELECT; | ||
72 | } | 140 | } |
73 | 141 | ||
74 | if (intel_crtc->pipe == 1) { | 142 | if (intel_crtc->pipe == 1) { |
@@ -80,6 +148,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
80 | 148 | ||
81 | I915_WRITE(intel_hdmi->sdvox_reg, sdvox); | 149 | I915_WRITE(intel_hdmi->sdvox_reg, sdvox); |
82 | POSTING_READ(intel_hdmi->sdvox_reg); | 150 | POSTING_READ(intel_hdmi->sdvox_reg); |
151 | |||
152 | intel_hdmi_set_avi_infoframe(encoder); | ||
83 | } | 153 | } |
84 | 154 | ||
85 | static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) | 155 | static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) |
@@ -123,7 +193,7 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector, | |||
123 | if (mode->clock > 165000) | 193 | if (mode->clock > 165000) |
124 | return MODE_CLOCK_HIGH; | 194 | return MODE_CLOCK_HIGH; |
125 | if (mode->clock < 20000) | 195 | if (mode->clock < 20000) |
126 | return MODE_CLOCK_HIGH; | 196 | return MODE_CLOCK_LOW; |
127 | 197 | ||
128 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 198 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
129 | return MODE_NO_DBLESCAN; | 199 | return MODE_NO_DBLESCAN; |
@@ -141,36 +211,121 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, | |||
141 | static enum drm_connector_status | 211 | static enum drm_connector_status |
142 | intel_hdmi_detect(struct drm_connector *connector, bool force) | 212 | intel_hdmi_detect(struct drm_connector *connector, bool force) |
143 | { | 213 | { |
144 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 214 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); |
145 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 215 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
146 | struct edid *edid = NULL; | 216 | struct edid *edid; |
147 | enum drm_connector_status status = connector_status_disconnected; | 217 | enum drm_connector_status status = connector_status_disconnected; |
148 | 218 | ||
149 | intel_hdmi->has_hdmi_sink = false; | 219 | intel_hdmi->has_hdmi_sink = false; |
150 | edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus); | 220 | intel_hdmi->has_audio = false; |
221 | edid = drm_get_edid(connector, | ||
222 | &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); | ||
151 | 223 | ||
152 | if (edid) { | 224 | if (edid) { |
153 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { | 225 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { |
154 | status = connector_status_connected; | 226 | status = connector_status_connected; |
155 | intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid); | 227 | intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid); |
228 | intel_hdmi->has_audio = drm_detect_monitor_audio(edid); | ||
156 | } | 229 | } |
157 | connector->display_info.raw_edid = NULL; | 230 | connector->display_info.raw_edid = NULL; |
158 | kfree(edid); | 231 | kfree(edid); |
159 | } | 232 | } |
160 | 233 | ||
234 | if (status == connector_status_connected) { | ||
235 | if (intel_hdmi->force_audio) | ||
236 | intel_hdmi->has_audio = intel_hdmi->force_audio > 0; | ||
237 | } | ||
238 | |||
161 | return status; | 239 | return status; |
162 | } | 240 | } |
163 | 241 | ||
164 | static int intel_hdmi_get_modes(struct drm_connector *connector) | 242 | static int intel_hdmi_get_modes(struct drm_connector *connector) |
165 | { | 243 | { |
166 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 244 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); |
167 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 245 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
168 | 246 | ||
169 | /* We should parse the EDID data and find out if it's an HDMI sink so | 247 | /* We should parse the EDID data and find out if it's an HDMI sink so |
170 | * we can send audio to it. | 248 | * we can send audio to it. |
171 | */ | 249 | */ |
172 | 250 | ||
173 | return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus); | 251 | return intel_ddc_get_modes(connector, |
252 | &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); | ||
253 | } | ||
254 | |||
255 | static bool | ||
256 | intel_hdmi_detect_audio(struct drm_connector *connector) | ||
257 | { | ||
258 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); | ||
259 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | ||
260 | struct edid *edid; | ||
261 | bool has_audio = false; | ||
262 | |||
263 | edid = drm_get_edid(connector, | ||
264 | &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); | ||
265 | if (edid) { | ||
266 | if (edid->input & DRM_EDID_INPUT_DIGITAL) | ||
267 | has_audio = drm_detect_monitor_audio(edid); | ||
268 | |||
269 | connector->display_info.raw_edid = NULL; | ||
270 | kfree(edid); | ||
271 | } | ||
272 | |||
273 | return has_audio; | ||
274 | } | ||
275 | |||
276 | static int | ||
277 | intel_hdmi_set_property(struct drm_connector *connector, | ||
278 | struct drm_property *property, | ||
279 | uint64_t val) | ||
280 | { | ||
281 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); | ||
282 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | ||
283 | int ret; | ||
284 | |||
285 | ret = drm_connector_property_set_value(connector, property, val); | ||
286 | if (ret) | ||
287 | return ret; | ||
288 | |||
289 | if (property == dev_priv->force_audio_property) { | ||
290 | int i = val; | ||
291 | bool has_audio; | ||
292 | |||
293 | if (i == intel_hdmi->force_audio) | ||
294 | return 0; | ||
295 | |||
296 | intel_hdmi->force_audio = i; | ||
297 | |||
298 | if (i == 0) | ||
299 | has_audio = intel_hdmi_detect_audio(connector); | ||
300 | else | ||
301 | has_audio = i > 0; | ||
302 | |||
303 | if (has_audio == intel_hdmi->has_audio) | ||
304 | return 0; | ||
305 | |||
306 | intel_hdmi->has_audio = has_audio; | ||
307 | goto done; | ||
308 | } | ||
309 | |||
310 | if (property == dev_priv->broadcast_rgb_property) { | ||
311 | if (val == !!intel_hdmi->color_range) | ||
312 | return 0; | ||
313 | |||
314 | intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; | ||
315 | goto done; | ||
316 | } | ||
317 | |||
318 | return -EINVAL; | ||
319 | |||
320 | done: | ||
321 | if (intel_hdmi->base.base.crtc) { | ||
322 | struct drm_crtc *crtc = intel_hdmi->base.base.crtc; | ||
323 | drm_crtc_helper_set_mode(crtc, &crtc->mode, | ||
324 | crtc->x, crtc->y, | ||
325 | crtc->fb); | ||
326 | } | ||
327 | |||
328 | return 0; | ||
174 | } | 329 | } |
175 | 330 | ||
176 | static void intel_hdmi_destroy(struct drm_connector *connector) | 331 | static void intel_hdmi_destroy(struct drm_connector *connector) |
@@ -192,19 +347,27 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { | |||
192 | .dpms = drm_helper_connector_dpms, | 347 | .dpms = drm_helper_connector_dpms, |
193 | .detect = intel_hdmi_detect, | 348 | .detect = intel_hdmi_detect, |
194 | .fill_modes = drm_helper_probe_single_connector_modes, | 349 | .fill_modes = drm_helper_probe_single_connector_modes, |
350 | .set_property = intel_hdmi_set_property, | ||
195 | .destroy = intel_hdmi_destroy, | 351 | .destroy = intel_hdmi_destroy, |
196 | }; | 352 | }; |
197 | 353 | ||
198 | static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { | 354 | static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { |
199 | .get_modes = intel_hdmi_get_modes, | 355 | .get_modes = intel_hdmi_get_modes, |
200 | .mode_valid = intel_hdmi_mode_valid, | 356 | .mode_valid = intel_hdmi_mode_valid, |
201 | .best_encoder = intel_attached_encoder, | 357 | .best_encoder = intel_best_encoder, |
202 | }; | 358 | }; |
203 | 359 | ||
204 | static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { | 360 | static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { |
205 | .destroy = intel_encoder_destroy, | 361 | .destroy = intel_encoder_destroy, |
206 | }; | 362 | }; |
207 | 363 | ||
364 | static void | ||
365 | intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) | ||
366 | { | ||
367 | intel_attach_force_audio_property(connector); | ||
368 | intel_attach_broadcast_rgb_property(connector); | ||
369 | } | ||
370 | |||
208 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | 371 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) |
209 | { | 372 | { |
210 | struct drm_i915_private *dev_priv = dev->dev_private; | 373 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -224,6 +387,9 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
224 | } | 387 | } |
225 | 388 | ||
226 | intel_encoder = &intel_hdmi->base; | 389 | intel_encoder = &intel_hdmi->base; |
390 | drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, | ||
391 | DRM_MODE_ENCODER_TMDS); | ||
392 | |||
227 | connector = &intel_connector->base; | 393 | connector = &intel_connector->base; |
228 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, | 394 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, |
229 | DRM_MODE_CONNECTOR_HDMIA); | 395 | DRM_MODE_CONNECTOR_HDMIA); |
@@ -239,39 +405,33 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
239 | /* Set up the DDC bus. */ | 405 | /* Set up the DDC bus. */ |
240 | if (sdvox_reg == SDVOB) { | 406 | if (sdvox_reg == SDVOB) { |
241 | intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); | 407 | intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); |
242 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); | 408 | intel_hdmi->ddc_bus = GMBUS_PORT_DPB; |
243 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | 409 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; |
244 | } else if (sdvox_reg == SDVOC) { | 410 | } else if (sdvox_reg == SDVOC) { |
245 | intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); | 411 | intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); |
246 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); | 412 | intel_hdmi->ddc_bus = GMBUS_PORT_DPC; |
247 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | 413 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; |
248 | } else if (sdvox_reg == HDMIB) { | 414 | } else if (sdvox_reg == HDMIB) { |
249 | intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); | 415 | intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); |
250 | intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, | 416 | intel_hdmi->ddc_bus = GMBUS_PORT_DPB; |
251 | "HDMIB"); | ||
252 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | 417 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; |
253 | } else if (sdvox_reg == HDMIC) { | 418 | } else if (sdvox_reg == HDMIC) { |
254 | intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); | 419 | intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); |
255 | intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, | 420 | intel_hdmi->ddc_bus = GMBUS_PORT_DPC; |
256 | "HDMIC"); | ||
257 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | 421 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; |
258 | } else if (sdvox_reg == HDMID) { | 422 | } else if (sdvox_reg == HDMID) { |
259 | intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); | 423 | intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); |
260 | intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, | 424 | intel_hdmi->ddc_bus = GMBUS_PORT_DPD; |
261 | "HDMID"); | ||
262 | dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; | 425 | dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; |
263 | } | 426 | } |
264 | if (!intel_encoder->ddc_bus) | ||
265 | goto err_connector; | ||
266 | 427 | ||
267 | intel_hdmi->sdvox_reg = sdvox_reg; | 428 | intel_hdmi->sdvox_reg = sdvox_reg; |
268 | 429 | ||
269 | drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs, | 430 | drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); |
270 | DRM_MODE_ENCODER_TMDS); | 431 | |
271 | drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs); | 432 | intel_hdmi_add_properties(intel_hdmi, connector); |
272 | 433 | ||
273 | drm_mode_connector_attach_encoder(&intel_connector->base, | 434 | intel_connector_attach_encoder(intel_connector, intel_encoder); |
274 | &intel_encoder->enc); | ||
275 | drm_sysfs_connector_add(connector); | 435 | drm_sysfs_connector_add(connector); |
276 | 436 | ||
277 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written | 437 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written |
@@ -282,13 +442,4 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
282 | u32 temp = I915_READ(PEG_BAND_GAP_DATA); | 442 | u32 temp = I915_READ(PEG_BAND_GAP_DATA); |
283 | I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); | 443 | I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); |
284 | } | 444 | } |
285 | |||
286 | return; | ||
287 | |||
288 | err_connector: | ||
289 | drm_connector_cleanup(connector); | ||
290 | kfree(intel_hdmi); | ||
291 | kfree(intel_connector); | ||
292 | |||
293 | return; | ||
294 | } | 445 | } |
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index c2649c7df14c..d98cee60b602 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006 Dave Airlie <airlied@linux.ie> | 2 | * Copyright (c) 2006 Dave Airlie <airlied@linux.ie> |
3 | * Copyright © 2006-2008 Intel Corporation | 3 | * Copyright © 2006-2008,2010 Intel Corporation |
4 | * Jesse Barnes <jesse.barnes@intel.com> | 4 | * Jesse Barnes <jesse.barnes@intel.com> |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
@@ -24,10 +24,9 @@ | |||
24 | * | 24 | * |
25 | * Authors: | 25 | * Authors: |
26 | * Eric Anholt <eric@anholt.net> | 26 | * Eric Anholt <eric@anholt.net> |
27 | * Chris Wilson <chris@chris-wilson.co.uk> | ||
27 | */ | 28 | */ |
28 | #include <linux/i2c.h> | 29 | #include <linux/i2c.h> |
29 | #include <linux/slab.h> | ||
30 | #include <linux/i2c-id.h> | ||
31 | #include <linux/i2c-algo-bit.h> | 30 | #include <linux/i2c-algo-bit.h> |
32 | #include "drmP.h" | 31 | #include "drmP.h" |
33 | #include "drm.h" | 32 | #include "drm.h" |
@@ -35,79 +34,107 @@ | |||
35 | #include "i915_drm.h" | 34 | #include "i915_drm.h" |
36 | #include "i915_drv.h" | 35 | #include "i915_drv.h" |
37 | 36 | ||
38 | void intel_i2c_quirk_set(struct drm_device *dev, bool enable) | 37 | /* Intel GPIO access functions */ |
38 | |||
39 | #define I2C_RISEFALL_TIME 20 | ||
40 | |||
41 | static inline struct intel_gmbus * | ||
42 | to_intel_gmbus(struct i2c_adapter *i2c) | ||
43 | { | ||
44 | return container_of(i2c, struct intel_gmbus, adapter); | ||
45 | } | ||
46 | |||
47 | struct intel_gpio { | ||
48 | struct i2c_adapter adapter; | ||
49 | struct i2c_algo_bit_data algo; | ||
50 | struct drm_i915_private *dev_priv; | ||
51 | u32 reg; | ||
52 | }; | ||
53 | |||
54 | void | ||
55 | intel_i2c_reset(struct drm_device *dev) | ||
39 | { | 56 | { |
40 | struct drm_i915_private *dev_priv = dev->dev_private; | 57 | struct drm_i915_private *dev_priv = dev->dev_private; |
58 | if (HAS_PCH_SPLIT(dev)) | ||
59 | I915_WRITE(PCH_GMBUS0, 0); | ||
60 | else | ||
61 | I915_WRITE(GMBUS0, 0); | ||
62 | } | ||
63 | |||
64 | static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) | ||
65 | { | ||
66 | u32 val; | ||
41 | 67 | ||
42 | /* When using bit bashing for I2C, this bit needs to be set to 1 */ | 68 | /* When using bit bashing for I2C, this bit needs to be set to 1 */ |
43 | if (!IS_PINEVIEW(dev)) | 69 | if (!IS_PINEVIEW(dev_priv->dev)) |
44 | return; | 70 | return; |
71 | |||
72 | val = I915_READ(DSPCLK_GATE_D); | ||
45 | if (enable) | 73 | if (enable) |
46 | I915_WRITE(DSPCLK_GATE_D, | 74 | val |= DPCUNIT_CLOCK_GATE_DISABLE; |
47 | I915_READ(DSPCLK_GATE_D) | DPCUNIT_CLOCK_GATE_DISABLE); | ||
48 | else | 75 | else |
49 | I915_WRITE(DSPCLK_GATE_D, | 76 | val &= ~DPCUNIT_CLOCK_GATE_DISABLE; |
50 | I915_READ(DSPCLK_GATE_D) & (~DPCUNIT_CLOCK_GATE_DISABLE)); | 77 | I915_WRITE(DSPCLK_GATE_D, val); |
51 | } | 78 | } |
52 | 79 | ||
53 | /* | 80 | static u32 get_reserved(struct intel_gpio *gpio) |
54 | * Intel GPIO access functions | 81 | { |
55 | */ | 82 | struct drm_i915_private *dev_priv = gpio->dev_priv; |
83 | struct drm_device *dev = dev_priv->dev; | ||
84 | u32 reserved = 0; | ||
56 | 85 | ||
57 | #define I2C_RISEFALL_TIME 20 | 86 | /* On most chips, these bits must be preserved in software. */ |
87 | if (!IS_I830(dev) && !IS_845G(dev)) | ||
88 | reserved = I915_READ_NOTRACE(gpio->reg) & | ||
89 | (GPIO_DATA_PULLUP_DISABLE | | ||
90 | GPIO_CLOCK_PULLUP_DISABLE); | ||
91 | |||
92 | return reserved; | ||
93 | } | ||
58 | 94 | ||
59 | static int get_clock(void *data) | 95 | static int get_clock(void *data) |
60 | { | 96 | { |
61 | struct intel_i2c_chan *chan = data; | 97 | struct intel_gpio *gpio = data; |
62 | struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; | 98 | struct drm_i915_private *dev_priv = gpio->dev_priv; |
63 | u32 val; | 99 | u32 reserved = get_reserved(gpio); |
64 | 100 | I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK); | |
65 | val = I915_READ(chan->reg); | 101 | I915_WRITE_NOTRACE(gpio->reg, reserved); |
66 | return ((val & GPIO_CLOCK_VAL_IN) != 0); | 102 | return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0; |
67 | } | 103 | } |
68 | 104 | ||
69 | static int get_data(void *data) | 105 | static int get_data(void *data) |
70 | { | 106 | { |
71 | struct intel_i2c_chan *chan = data; | 107 | struct intel_gpio *gpio = data; |
72 | struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; | 108 | struct drm_i915_private *dev_priv = gpio->dev_priv; |
73 | u32 val; | 109 | u32 reserved = get_reserved(gpio); |
74 | 110 | I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK); | |
75 | val = I915_READ(chan->reg); | 111 | I915_WRITE_NOTRACE(gpio->reg, reserved); |
76 | return ((val & GPIO_DATA_VAL_IN) != 0); | 112 | return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0; |
77 | } | 113 | } |
78 | 114 | ||
79 | static void set_clock(void *data, int state_high) | 115 | static void set_clock(void *data, int state_high) |
80 | { | 116 | { |
81 | struct intel_i2c_chan *chan = data; | 117 | struct intel_gpio *gpio = data; |
82 | struct drm_device *dev = chan->drm_dev; | 118 | struct drm_i915_private *dev_priv = gpio->dev_priv; |
83 | struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; | 119 | u32 reserved = get_reserved(gpio); |
84 | u32 reserved = 0, clock_bits; | 120 | u32 clock_bits; |
85 | |||
86 | /* On most chips, these bits must be preserved in software. */ | ||
87 | if (!IS_I830(dev) && !IS_845G(dev)) | ||
88 | reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE | | ||
89 | GPIO_CLOCK_PULLUP_DISABLE); | ||
90 | 121 | ||
91 | if (state_high) | 122 | if (state_high) |
92 | clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK; | 123 | clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK; |
93 | else | 124 | else |
94 | clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | | 125 | clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | |
95 | GPIO_CLOCK_VAL_MASK; | 126 | GPIO_CLOCK_VAL_MASK; |
96 | I915_WRITE(chan->reg, reserved | clock_bits); | 127 | |
97 | udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ | 128 | I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits); |
129 | POSTING_READ(gpio->reg); | ||
98 | } | 130 | } |
99 | 131 | ||
100 | static void set_data(void *data, int state_high) | 132 | static void set_data(void *data, int state_high) |
101 | { | 133 | { |
102 | struct intel_i2c_chan *chan = data; | 134 | struct intel_gpio *gpio = data; |
103 | struct drm_device *dev = chan->drm_dev; | 135 | struct drm_i915_private *dev_priv = gpio->dev_priv; |
104 | struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; | 136 | u32 reserved = get_reserved(gpio); |
105 | u32 reserved = 0, data_bits; | 137 | u32 data_bits; |
106 | |||
107 | /* On most chips, these bits must be preserved in software. */ | ||
108 | if (!IS_I830(dev) && !IS_845G(dev)) | ||
109 | reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE | | ||
110 | GPIO_CLOCK_PULLUP_DISABLE); | ||
111 | 138 | ||
112 | if (state_high) | 139 | if (state_high) |
113 | data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK; | 140 | data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK; |
@@ -115,109 +142,331 @@ static void set_data(void *data, int state_high) | |||
115 | data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | | 142 | data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | |
116 | GPIO_DATA_VAL_MASK; | 143 | GPIO_DATA_VAL_MASK; |
117 | 144 | ||
118 | I915_WRITE(chan->reg, reserved | data_bits); | 145 | I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits); |
119 | udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ | 146 | POSTING_READ(gpio->reg); |
120 | } | 147 | } |
121 | 148 | ||
122 | /* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C | 149 | static struct i2c_adapter * |
123 | * engine, but if the BIOS leaves it enabled, then that can break our use | 150 | intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin) |
124 | * of the bit-banging I2C interfaces. This is notably the case with the | ||
125 | * Mac Mini in EFI mode. | ||
126 | */ | ||
127 | void | ||
128 | intel_i2c_reset_gmbus(struct drm_device *dev) | ||
129 | { | 151 | { |
130 | struct drm_i915_private *dev_priv = dev->dev_private; | 152 | static const int map_pin_to_reg[] = { |
153 | 0, | ||
154 | GPIOB, | ||
155 | GPIOA, | ||
156 | GPIOC, | ||
157 | GPIOD, | ||
158 | GPIOE, | ||
159 | 0, | ||
160 | GPIOF, | ||
161 | }; | ||
162 | struct intel_gpio *gpio; | ||
131 | 163 | ||
132 | if (HAS_PCH_SPLIT(dev)) { | 164 | if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin]) |
133 | I915_WRITE(PCH_GMBUS0, 0); | 165 | return NULL; |
134 | } else { | 166 | |
135 | I915_WRITE(GMBUS0, 0); | 167 | gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL); |
168 | if (gpio == NULL) | ||
169 | return NULL; | ||
170 | |||
171 | gpio->reg = map_pin_to_reg[pin]; | ||
172 | if (HAS_PCH_SPLIT(dev_priv->dev)) | ||
173 | gpio->reg += PCH_GPIOA - GPIOA; | ||
174 | gpio->dev_priv = dev_priv; | ||
175 | |||
176 | snprintf(gpio->adapter.name, sizeof(gpio->adapter.name), | ||
177 | "i915 GPIO%c", "?BACDE?F"[pin]); | ||
178 | gpio->adapter.owner = THIS_MODULE; | ||
179 | gpio->adapter.algo_data = &gpio->algo; | ||
180 | gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev; | ||
181 | gpio->algo.setsda = set_data; | ||
182 | gpio->algo.setscl = set_clock; | ||
183 | gpio->algo.getsda = get_data; | ||
184 | gpio->algo.getscl = get_clock; | ||
185 | gpio->algo.udelay = I2C_RISEFALL_TIME; | ||
186 | gpio->algo.timeout = usecs_to_jiffies(2200); | ||
187 | gpio->algo.data = gpio; | ||
188 | |||
189 | if (i2c_bit_add_bus(&gpio->adapter)) | ||
190 | goto out_free; | ||
191 | |||
192 | return &gpio->adapter; | ||
193 | |||
194 | out_free: | ||
195 | kfree(gpio); | ||
196 | return NULL; | ||
197 | } | ||
198 | |||
199 | static int | ||
200 | intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv, | ||
201 | struct i2c_adapter *adapter, | ||
202 | struct i2c_msg *msgs, | ||
203 | int num) | ||
204 | { | ||
205 | struct intel_gpio *gpio = container_of(adapter, | ||
206 | struct intel_gpio, | ||
207 | adapter); | ||
208 | int ret; | ||
209 | |||
210 | intel_i2c_reset(dev_priv->dev); | ||
211 | |||
212 | intel_i2c_quirk_set(dev_priv, true); | ||
213 | set_data(gpio, 1); | ||
214 | set_clock(gpio, 1); | ||
215 | udelay(I2C_RISEFALL_TIME); | ||
216 | |||
217 | ret = adapter->algo->master_xfer(adapter, msgs, num); | ||
218 | |||
219 | set_data(gpio, 1); | ||
220 | set_clock(gpio, 1); | ||
221 | intel_i2c_quirk_set(dev_priv, false); | ||
222 | |||
223 | return ret; | ||
224 | } | ||
225 | |||
226 | static int | ||
227 | gmbus_xfer(struct i2c_adapter *adapter, | ||
228 | struct i2c_msg *msgs, | ||
229 | int num) | ||
230 | { | ||
231 | struct intel_gmbus *bus = container_of(adapter, | ||
232 | struct intel_gmbus, | ||
233 | adapter); | ||
234 | struct drm_i915_private *dev_priv = adapter->algo_data; | ||
235 | int i, reg_offset; | ||
236 | |||
237 | if (bus->force_bit) | ||
238 | return intel_i2c_quirk_xfer(dev_priv, | ||
239 | bus->force_bit, msgs, num); | ||
240 | |||
241 | reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0; | ||
242 | |||
243 | I915_WRITE(GMBUS0 + reg_offset, bus->reg0); | ||
244 | |||
245 | for (i = 0; i < num; i++) { | ||
246 | u16 len = msgs[i].len; | ||
247 | u8 *buf = msgs[i].buf; | ||
248 | |||
249 | if (msgs[i].flags & I2C_M_RD) { | ||
250 | I915_WRITE(GMBUS1 + reg_offset, | ||
251 | GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) | | ||
252 | (len << GMBUS_BYTE_COUNT_SHIFT) | | ||
253 | (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | | ||
254 | GMBUS_SLAVE_READ | GMBUS_SW_RDY); | ||
255 | POSTING_READ(GMBUS2+reg_offset); | ||
256 | do { | ||
257 | u32 val, loop = 0; | ||
258 | |||
259 | if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50)) | ||
260 | goto timeout; | ||
261 | if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) | ||
262 | goto clear_err; | ||
263 | |||
264 | val = I915_READ(GMBUS3 + reg_offset); | ||
265 | do { | ||
266 | *buf++ = val & 0xff; | ||
267 | val >>= 8; | ||
268 | } while (--len && ++loop < 4); | ||
269 | } while (len); | ||
270 | } else { | ||
271 | u32 val, loop; | ||
272 | |||
273 | val = loop = 0; | ||
274 | do { | ||
275 | val |= *buf++ << (8 * loop); | ||
276 | } while (--len && ++loop < 4); | ||
277 | |||
278 | I915_WRITE(GMBUS3 + reg_offset, val); | ||
279 | I915_WRITE(GMBUS1 + reg_offset, | ||
280 | (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) | | ||
281 | (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) | | ||
282 | (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | | ||
283 | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); | ||
284 | POSTING_READ(GMBUS2+reg_offset); | ||
285 | |||
286 | while (len) { | ||
287 | if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50)) | ||
288 | goto timeout; | ||
289 | if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) | ||
290 | goto clear_err; | ||
291 | |||
292 | val = loop = 0; | ||
293 | do { | ||
294 | val |= *buf++ << (8 * loop); | ||
295 | } while (--len && ++loop < 4); | ||
296 | |||
297 | I915_WRITE(GMBUS3 + reg_offset, val); | ||
298 | POSTING_READ(GMBUS2+reg_offset); | ||
299 | } | ||
300 | } | ||
301 | |||
302 | if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50)) | ||
303 | goto timeout; | ||
304 | if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) | ||
305 | goto clear_err; | ||
136 | } | 306 | } |
307 | |||
308 | goto done; | ||
309 | |||
310 | clear_err: | ||
311 | /* Toggle the Software Clear Interrupt bit. This has the effect | ||
312 | * of resetting the GMBUS controller and so clearing the | ||
313 | * BUS_ERROR raised by the slave's NAK. | ||
314 | */ | ||
315 | I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT); | ||
316 | I915_WRITE(GMBUS1 + reg_offset, 0); | ||
317 | |||
318 | done: | ||
319 | /* Mark the GMBUS interface as disabled. We will re-enable it at the | ||
320 | * start of the next xfer, till then let it sleep. | ||
321 | */ | ||
322 | I915_WRITE(GMBUS0 + reg_offset, 0); | ||
323 | return i; | ||
324 | |||
325 | timeout: | ||
326 | DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n", | ||
327 | bus->reg0 & 0xff, bus->adapter.name); | ||
328 | I915_WRITE(GMBUS0 + reg_offset, 0); | ||
329 | |||
330 | /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ | ||
331 | bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff); | ||
332 | if (!bus->force_bit) | ||
333 | return -ENOMEM; | ||
334 | |||
335 | return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num); | ||
137 | } | 336 | } |
138 | 337 | ||
338 | static u32 gmbus_func(struct i2c_adapter *adapter) | ||
339 | { | ||
340 | struct intel_gmbus *bus = container_of(adapter, | ||
341 | struct intel_gmbus, | ||
342 | adapter); | ||
343 | |||
344 | if (bus->force_bit) | ||
345 | bus->force_bit->algo->functionality(bus->force_bit); | ||
346 | |||
347 | return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | | ||
348 | /* I2C_FUNC_10BIT_ADDR | */ | ||
349 | I2C_FUNC_SMBUS_READ_BLOCK_DATA | | ||
350 | I2C_FUNC_SMBUS_BLOCK_PROC_CALL); | ||
351 | } | ||
352 | |||
353 | static const struct i2c_algorithm gmbus_algorithm = { | ||
354 | .master_xfer = gmbus_xfer, | ||
355 | .functionality = gmbus_func | ||
356 | }; | ||
357 | |||
139 | /** | 358 | /** |
140 | * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg | 359 | * intel_gmbus_setup - instantiate all Intel i2c GMBuses |
141 | * @dev: DRM device | 360 | * @dev: DRM device |
142 | * @output: driver specific output device | ||
143 | * @reg: GPIO reg to use | ||
144 | * @name: name for this bus | ||
145 | * @slave_addr: slave address (if fixed) | ||
146 | * | ||
147 | * Creates and registers a new i2c bus with the Linux i2c layer, for use | ||
148 | * in output probing and control (e.g. DDC or SDVO control functions). | ||
149 | * | ||
150 | * Possible values for @reg include: | ||
151 | * %GPIOA | ||
152 | * %GPIOB | ||
153 | * %GPIOC | ||
154 | * %GPIOD | ||
155 | * %GPIOE | ||
156 | * %GPIOF | ||
157 | * %GPIOG | ||
158 | * %GPIOH | ||
159 | * see PRM for details on how these different busses are used. | ||
160 | */ | 361 | */ |
161 | struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, | 362 | int intel_setup_gmbus(struct drm_device *dev) |
162 | const char *name) | ||
163 | { | 363 | { |
164 | struct intel_i2c_chan *chan; | 364 | static const char *names[GMBUS_NUM_PORTS] = { |
365 | "disabled", | ||
366 | "ssc", | ||
367 | "vga", | ||
368 | "panel", | ||
369 | "dpc", | ||
370 | "dpb", | ||
371 | "reserved", | ||
372 | "dpd", | ||
373 | }; | ||
374 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
375 | int ret, i; | ||
165 | 376 | ||
166 | chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL); | 377 | dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS, |
167 | if (!chan) | 378 | GFP_KERNEL); |
168 | goto out_free; | 379 | if (dev_priv->gmbus == NULL) |
380 | return -ENOMEM; | ||
169 | 381 | ||
170 | chan->drm_dev = dev; | 382 | for (i = 0; i < GMBUS_NUM_PORTS; i++) { |
171 | chan->reg = reg; | 383 | struct intel_gmbus *bus = &dev_priv->gmbus[i]; |
172 | snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name); | ||
173 | chan->adapter.owner = THIS_MODULE; | ||
174 | chan->adapter.algo_data = &chan->algo; | ||
175 | chan->adapter.dev.parent = &dev->pdev->dev; | ||
176 | chan->algo.setsda = set_data; | ||
177 | chan->algo.setscl = set_clock; | ||
178 | chan->algo.getsda = get_data; | ||
179 | chan->algo.getscl = get_clock; | ||
180 | chan->algo.udelay = 20; | ||
181 | chan->algo.timeout = usecs_to_jiffies(2200); | ||
182 | chan->algo.data = chan; | ||
183 | |||
184 | i2c_set_adapdata(&chan->adapter, chan); | ||
185 | |||
186 | if(i2c_bit_add_bus(&chan->adapter)) | ||
187 | goto out_free; | ||
188 | 384 | ||
189 | intel_i2c_reset_gmbus(dev); | 385 | bus->adapter.owner = THIS_MODULE; |
386 | bus->adapter.class = I2C_CLASS_DDC; | ||
387 | snprintf(bus->adapter.name, | ||
388 | sizeof(bus->adapter.name), | ||
389 | "i915 gmbus %s", | ||
390 | names[i]); | ||
190 | 391 | ||
191 | /* JJJ: raise SCL and SDA? */ | 392 | bus->adapter.dev.parent = &dev->pdev->dev; |
192 | intel_i2c_quirk_set(dev, true); | 393 | bus->adapter.algo_data = dev_priv; |
193 | set_data(chan, 1); | ||
194 | set_clock(chan, 1); | ||
195 | intel_i2c_quirk_set(dev, false); | ||
196 | udelay(20); | ||
197 | 394 | ||
198 | return &chan->adapter; | 395 | bus->adapter.algo = &gmbus_algorithm; |
396 | ret = i2c_add_adapter(&bus->adapter); | ||
397 | if (ret) | ||
398 | goto err; | ||
199 | 399 | ||
200 | out_free: | 400 | /* By default use a conservative clock rate */ |
201 | kfree(chan); | 401 | bus->reg0 = i | GMBUS_RATE_100KHZ; |
202 | return NULL; | 402 | |
403 | /* XXX force bit banging until GMBUS is fully debugged */ | ||
404 | bus->force_bit = intel_gpio_create(dev_priv, i); | ||
405 | } | ||
406 | |||
407 | intel_i2c_reset(dev_priv->dev); | ||
408 | |||
409 | return 0; | ||
410 | |||
411 | err: | ||
412 | while (--i) { | ||
413 | struct intel_gmbus *bus = &dev_priv->gmbus[i]; | ||
414 | i2c_del_adapter(&bus->adapter); | ||
415 | } | ||
416 | kfree(dev_priv->gmbus); | ||
417 | dev_priv->gmbus = NULL; | ||
418 | return ret; | ||
203 | } | 419 | } |
204 | 420 | ||
205 | /** | 421 | void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed) |
206 | * intel_i2c_destroy - unregister and free i2c bus resources | 422 | { |
207 | * @output: channel to free | 423 | struct intel_gmbus *bus = to_intel_gmbus(adapter); |
208 | * | 424 | |
209 | * Unregister the adapter from the i2c layer, then free the structure. | 425 | /* speed: |
210 | */ | 426 | * 0x0 = 100 KHz |
211 | void intel_i2c_destroy(struct i2c_adapter *adapter) | 427 | * 0x1 = 50 KHz |
428 | * 0x2 = 400 KHz | ||
429 | * 0x3 = 1000 Khz | ||
430 | */ | ||
431 | bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8); | ||
432 | } | ||
433 | |||
434 | void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) | ||
435 | { | ||
436 | struct intel_gmbus *bus = to_intel_gmbus(adapter); | ||
437 | |||
438 | if (force_bit) { | ||
439 | if (bus->force_bit == NULL) { | ||
440 | struct drm_i915_private *dev_priv = adapter->algo_data; | ||
441 | bus->force_bit = intel_gpio_create(dev_priv, | ||
442 | bus->reg0 & 0xff); | ||
443 | } | ||
444 | } else { | ||
445 | if (bus->force_bit) { | ||
446 | i2c_del_adapter(bus->force_bit); | ||
447 | kfree(bus->force_bit); | ||
448 | bus->force_bit = NULL; | ||
449 | } | ||
450 | } | ||
451 | } | ||
452 | |||
453 | void intel_teardown_gmbus(struct drm_device *dev) | ||
212 | { | 454 | { |
213 | struct intel_i2c_chan *chan; | 455 | struct drm_i915_private *dev_priv = dev->dev_private; |
456 | int i; | ||
214 | 457 | ||
215 | if (!adapter) | 458 | if (dev_priv->gmbus == NULL) |
216 | return; | 459 | return; |
217 | 460 | ||
218 | chan = container_of(adapter, | 461 | for (i = 0; i < GMBUS_NUM_PORTS; i++) { |
219 | struct intel_i2c_chan, | 462 | struct intel_gmbus *bus = &dev_priv->gmbus[i]; |
220 | adapter); | 463 | if (bus->force_bit) { |
221 | i2c_del_adapter(&chan->adapter); | 464 | i2c_del_adapter(bus->force_bit); |
222 | kfree(chan); | 465 | kfree(bus->force_bit); |
466 | } | ||
467 | i2c_del_adapter(&bus->adapter); | ||
468 | } | ||
469 | |||
470 | kfree(dev_priv->gmbus); | ||
471 | dev_priv->gmbus = NULL; | ||
223 | } | 472 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 6ec39a86ed06..b28f7bd9f88a 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -43,102 +43,110 @@ | |||
43 | /* Private structure for the integrated LVDS support */ | 43 | /* Private structure for the integrated LVDS support */ |
44 | struct intel_lvds { | 44 | struct intel_lvds { |
45 | struct intel_encoder base; | 45 | struct intel_encoder base; |
46 | |||
47 | struct edid *edid; | ||
48 | |||
46 | int fitting_mode; | 49 | int fitting_mode; |
47 | u32 pfit_control; | 50 | u32 pfit_control; |
48 | u32 pfit_pgm_ratios; | 51 | u32 pfit_pgm_ratios; |
52 | bool pfit_dirty; | ||
53 | |||
54 | struct drm_display_mode *fixed_mode; | ||
49 | }; | 55 | }; |
50 | 56 | ||
51 | static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder) | 57 | static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder) |
52 | { | 58 | { |
53 | return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base); | 59 | return container_of(encoder, struct intel_lvds, base.base); |
54 | } | 60 | } |
55 | 61 | ||
56 | /** | 62 | static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) |
57 | * Sets the backlight level. | ||
58 | * | ||
59 | * \param level backlight level, from 0 to intel_lvds_get_max_backlight(). | ||
60 | */ | ||
61 | static void intel_lvds_set_backlight(struct drm_device *dev, int level) | ||
62 | { | 63 | { |
63 | struct drm_i915_private *dev_priv = dev->dev_private; | 64 | return container_of(intel_attached_encoder(connector), |
64 | u32 blc_pwm_ctl, reg; | 65 | struct intel_lvds, base); |
65 | |||
66 | if (HAS_PCH_SPLIT(dev)) | ||
67 | reg = BLC_PWM_CPU_CTL; | ||
68 | else | ||
69 | reg = BLC_PWM_CTL; | ||
70 | |||
71 | blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK; | ||
72 | I915_WRITE(reg, (blc_pwm_ctl | | ||
73 | (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); | ||
74 | } | 66 | } |
75 | 67 | ||
76 | /** | 68 | /** |
77 | * Returns the maximum level of the backlight duty cycle field. | 69 | * Sets the power state for the panel. |
78 | */ | 70 | */ |
79 | static u32 intel_lvds_get_max_backlight(struct drm_device *dev) | 71 | static void intel_lvds_enable(struct intel_lvds *intel_lvds) |
80 | { | 72 | { |
73 | struct drm_device *dev = intel_lvds->base.base.dev; | ||
81 | struct drm_i915_private *dev_priv = dev->dev_private; | 74 | struct drm_i915_private *dev_priv = dev->dev_private; |
82 | u32 reg; | 75 | u32 ctl_reg, lvds_reg; |
83 | 76 | ||
84 | if (HAS_PCH_SPLIT(dev)) | 77 | if (HAS_PCH_SPLIT(dev)) { |
85 | reg = BLC_PWM_PCH_CTL2; | 78 | ctl_reg = PCH_PP_CONTROL; |
86 | else | 79 | lvds_reg = PCH_LVDS; |
87 | reg = BLC_PWM_CTL; | 80 | } else { |
81 | ctl_reg = PP_CONTROL; | ||
82 | lvds_reg = LVDS; | ||
83 | } | ||
84 | |||
85 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); | ||
88 | 86 | ||
89 | return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >> | 87 | if (intel_lvds->pfit_dirty) { |
90 | BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; | 88 | /* |
89 | * Enable automatic panel scaling so that non-native modes | ||
90 | * fill the screen. The panel fitter should only be | ||
91 | * adjusted whilst the pipe is disabled, according to | ||
92 | * register description and PRM. | ||
93 | */ | ||
94 | DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", | ||
95 | intel_lvds->pfit_control, | ||
96 | intel_lvds->pfit_pgm_ratios); | ||
97 | if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) { | ||
98 | DRM_ERROR("timed out waiting for panel to power off\n"); | ||
99 | } else { | ||
100 | I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); | ||
101 | I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); | ||
102 | intel_lvds->pfit_dirty = false; | ||
103 | } | ||
104 | } | ||
105 | |||
106 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); | ||
107 | POSTING_READ(lvds_reg); | ||
108 | |||
109 | intel_panel_enable_backlight(dev); | ||
91 | } | 110 | } |
92 | 111 | ||
93 | /** | 112 | static void intel_lvds_disable(struct intel_lvds *intel_lvds) |
94 | * Sets the power state for the panel. | ||
95 | */ | ||
96 | static void intel_lvds_set_power(struct drm_device *dev, bool on) | ||
97 | { | 113 | { |
114 | struct drm_device *dev = intel_lvds->base.base.dev; | ||
98 | struct drm_i915_private *dev_priv = dev->dev_private; | 115 | struct drm_i915_private *dev_priv = dev->dev_private; |
99 | u32 ctl_reg, status_reg, lvds_reg; | 116 | u32 ctl_reg, lvds_reg; |
100 | 117 | ||
101 | if (HAS_PCH_SPLIT(dev)) { | 118 | if (HAS_PCH_SPLIT(dev)) { |
102 | ctl_reg = PCH_PP_CONTROL; | 119 | ctl_reg = PCH_PP_CONTROL; |
103 | status_reg = PCH_PP_STATUS; | ||
104 | lvds_reg = PCH_LVDS; | 120 | lvds_reg = PCH_LVDS; |
105 | } else { | 121 | } else { |
106 | ctl_reg = PP_CONTROL; | 122 | ctl_reg = PP_CONTROL; |
107 | status_reg = PP_STATUS; | ||
108 | lvds_reg = LVDS; | 123 | lvds_reg = LVDS; |
109 | } | 124 | } |
110 | 125 | ||
111 | if (on) { | 126 | intel_panel_disable_backlight(dev); |
112 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); | ||
113 | POSTING_READ(lvds_reg); | ||
114 | 127 | ||
115 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) | | 128 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); |
116 | POWER_TARGET_ON); | ||
117 | if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0)) | ||
118 | DRM_ERROR("timed out waiting to enable LVDS pipe"); | ||
119 | |||
120 | intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle); | ||
121 | } else { | ||
122 | intel_lvds_set_backlight(dev, 0); | ||
123 | 129 | ||
124 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) & | 130 | if (intel_lvds->pfit_control) { |
125 | ~POWER_TARGET_ON); | 131 | if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) |
126 | if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0)) | 132 | DRM_ERROR("timed out waiting for panel to power off\n"); |
127 | DRM_ERROR("timed out waiting for LVDS pipe to turn off"); | ||
128 | 133 | ||
129 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); | 134 | I915_WRITE(PFIT_CONTROL, 0); |
130 | POSTING_READ(lvds_reg); | 135 | intel_lvds->pfit_dirty = true; |
131 | } | 136 | } |
137 | |||
138 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); | ||
139 | POSTING_READ(lvds_reg); | ||
132 | } | 140 | } |
133 | 141 | ||
134 | static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) | 142 | static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) |
135 | { | 143 | { |
136 | struct drm_device *dev = encoder->dev; | 144 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); |
137 | 145 | ||
138 | if (mode == DRM_MODE_DPMS_ON) | 146 | if (mode == DRM_MODE_DPMS_ON) |
139 | intel_lvds_set_power(dev, true); | 147 | intel_lvds_enable(intel_lvds); |
140 | else | 148 | else |
141 | intel_lvds_set_power(dev, false); | 149 | intel_lvds_disable(intel_lvds); |
142 | 150 | ||
143 | /* XXX: We never power down the LVDS pairs. */ | 151 | /* XXX: We never power down the LVDS pairs. */ |
144 | } | 152 | } |
@@ -146,16 +154,13 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
146 | static int intel_lvds_mode_valid(struct drm_connector *connector, | 154 | static int intel_lvds_mode_valid(struct drm_connector *connector, |
147 | struct drm_display_mode *mode) | 155 | struct drm_display_mode *mode) |
148 | { | 156 | { |
149 | struct drm_device *dev = connector->dev; | 157 | struct intel_lvds *intel_lvds = intel_attached_lvds(connector); |
150 | struct drm_i915_private *dev_priv = dev->dev_private; | 158 | struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode; |
151 | struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode; | ||
152 | 159 | ||
153 | if (fixed_mode) { | 160 | if (mode->hdisplay > fixed_mode->hdisplay) |
154 | if (mode->hdisplay > fixed_mode->hdisplay) | 161 | return MODE_PANEL; |
155 | return MODE_PANEL; | 162 | if (mode->vdisplay > fixed_mode->vdisplay) |
156 | if (mode->vdisplay > fixed_mode->vdisplay) | 163 | return MODE_PANEL; |
157 | return MODE_PANEL; | ||
158 | } | ||
159 | 164 | ||
160 | return MODE_OK; | 165 | return MODE_OK; |
161 | } | 166 | } |
@@ -223,12 +228,13 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
223 | struct drm_device *dev = encoder->dev; | 228 | struct drm_device *dev = encoder->dev; |
224 | struct drm_i915_private *dev_priv = dev->dev_private; | 229 | struct drm_i915_private *dev_priv = dev->dev_private; |
225 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 230 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
226 | struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); | 231 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); |
227 | struct drm_encoder *tmp_encoder; | 232 | struct drm_encoder *tmp_encoder; |
228 | u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; | 233 | u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; |
234 | int pipe; | ||
229 | 235 | ||
230 | /* Should never happen!! */ | 236 | /* Should never happen!! */ |
231 | if (!IS_I965G(dev) && intel_crtc->pipe == 0) { | 237 | if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) { |
232 | DRM_ERROR("Can't support LVDS on pipe A\n"); | 238 | DRM_ERROR("Can't support LVDS on pipe A\n"); |
233 | return false; | 239 | return false; |
234 | } | 240 | } |
@@ -241,9 +247,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
241 | return false; | 247 | return false; |
242 | } | 248 | } |
243 | } | 249 | } |
244 | /* If we don't have a panel mode, there is nothing we can do */ | ||
245 | if (dev_priv->panel_fixed_mode == NULL) | ||
246 | return true; | ||
247 | 250 | ||
248 | /* | 251 | /* |
249 | * We have timings from the BIOS for the panel, put them in | 252 | * We have timings from the BIOS for the panel, put them in |
@@ -251,7 +254,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
251 | * with the panel scaling set up to source from the H/VDisplay | 254 | * with the panel scaling set up to source from the H/VDisplay |
252 | * of the original mode. | 255 | * of the original mode. |
253 | */ | 256 | */ |
254 | intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); | 257 | intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode); |
255 | 258 | ||
256 | if (HAS_PCH_SPLIT(dev)) { | 259 | if (HAS_PCH_SPLIT(dev)) { |
257 | intel_pch_panel_fitting(dev, intel_lvds->fitting_mode, | 260 | intel_pch_panel_fitting(dev, intel_lvds->fitting_mode, |
@@ -259,19 +262,13 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
259 | return true; | 262 | return true; |
260 | } | 263 | } |
261 | 264 | ||
262 | /* Make sure pre-965s set dither correctly */ | ||
263 | if (!IS_I965G(dev)) { | ||
264 | if (dev_priv->panel_wants_dither || dev_priv->lvds_dither) | ||
265 | pfit_control |= PANEL_8TO6_DITHER_ENABLE; | ||
266 | } | ||
267 | |||
268 | /* Native modes don't need fitting */ | 265 | /* Native modes don't need fitting */ |
269 | if (adjusted_mode->hdisplay == mode->hdisplay && | 266 | if (adjusted_mode->hdisplay == mode->hdisplay && |
270 | adjusted_mode->vdisplay == mode->vdisplay) | 267 | adjusted_mode->vdisplay == mode->vdisplay) |
271 | goto out; | 268 | goto out; |
272 | 269 | ||
273 | /* 965+ wants fuzzy fitting */ | 270 | /* 965+ wants fuzzy fitting */ |
274 | if (IS_I965G(dev)) | 271 | if (INTEL_INFO(dev)->gen >= 4) |
275 | pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | | 272 | pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | |
276 | PFIT_FILTER_FUZZY); | 273 | PFIT_FILTER_FUZZY); |
277 | 274 | ||
@@ -281,8 +278,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
281 | * to register description and PRM. | 278 | * to register description and PRM. |
282 | * Change the value here to see the borders for debugging | 279 | * Change the value here to see the borders for debugging |
283 | */ | 280 | */ |
284 | I915_WRITE(BCLRPAT_A, 0); | 281 | for_each_pipe(pipe) |
285 | I915_WRITE(BCLRPAT_B, 0); | 282 | I915_WRITE(BCLRPAT(pipe), 0); |
286 | 283 | ||
287 | switch (intel_lvds->fitting_mode) { | 284 | switch (intel_lvds->fitting_mode) { |
288 | case DRM_MODE_SCALE_CENTER: | 285 | case DRM_MODE_SCALE_CENTER: |
@@ -297,18 +294,17 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
297 | 294 | ||
298 | case DRM_MODE_SCALE_ASPECT: | 295 | case DRM_MODE_SCALE_ASPECT: |
299 | /* Scale but preserve the aspect ratio */ | 296 | /* Scale but preserve the aspect ratio */ |
300 | if (IS_I965G(dev)) { | 297 | if (INTEL_INFO(dev)->gen >= 4) { |
301 | u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; | 298 | u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; |
302 | u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; | 299 | u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; |
303 | 300 | ||
304 | pfit_control |= PFIT_ENABLE; | ||
305 | /* 965+ is easy, it does everything in hw */ | 301 | /* 965+ is easy, it does everything in hw */ |
306 | if (scaled_width > scaled_height) | 302 | if (scaled_width > scaled_height) |
307 | pfit_control |= PFIT_SCALING_PILLAR; | 303 | pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR; |
308 | else if (scaled_width < scaled_height) | 304 | else if (scaled_width < scaled_height) |
309 | pfit_control |= PFIT_SCALING_LETTER; | 305 | pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER; |
310 | else | 306 | else if (adjusted_mode->hdisplay != mode->hdisplay) |
311 | pfit_control |= PFIT_SCALING_AUTO; | 307 | pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO; |
312 | } else { | 308 | } else { |
313 | u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; | 309 | u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; |
314 | u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; | 310 | u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; |
@@ -355,13 +351,17 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
355 | * Full scaling, even if it changes the aspect ratio. | 351 | * Full scaling, even if it changes the aspect ratio. |
356 | * Fortunately this is all done for us in hw. | 352 | * Fortunately this is all done for us in hw. |
357 | */ | 353 | */ |
358 | pfit_control |= PFIT_ENABLE; | 354 | if (mode->vdisplay != adjusted_mode->vdisplay || |
359 | if (IS_I965G(dev)) | 355 | mode->hdisplay != adjusted_mode->hdisplay) { |
360 | pfit_control |= PFIT_SCALING_AUTO; | 356 | pfit_control |= PFIT_ENABLE; |
361 | else | 357 | if (INTEL_INFO(dev)->gen >= 4) |
362 | pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | | 358 | pfit_control |= PFIT_SCALING_AUTO; |
363 | VERT_INTERP_BILINEAR | | 359 | else |
364 | HORIZ_INTERP_BILINEAR); | 360 | pfit_control |= (VERT_AUTO_SCALE | |
361 | VERT_INTERP_BILINEAR | | ||
362 | HORIZ_AUTO_SCALE | | ||
363 | HORIZ_INTERP_BILINEAR); | ||
364 | } | ||
365 | break; | 365 | break; |
366 | 366 | ||
367 | default: | 367 | default: |
@@ -369,8 +369,22 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
369 | } | 369 | } |
370 | 370 | ||
371 | out: | 371 | out: |
372 | intel_lvds->pfit_control = pfit_control; | 372 | /* If not enabling scaling, be consistent and always use 0. */ |
373 | intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; | 373 | if ((pfit_control & PFIT_ENABLE) == 0) { |
374 | pfit_control = 0; | ||
375 | pfit_pgm_ratios = 0; | ||
376 | } | ||
377 | |||
378 | /* Make sure pre-965 set dither correctly */ | ||
379 | if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither) | ||
380 | pfit_control |= PANEL_8TO6_DITHER_ENABLE; | ||
381 | |||
382 | if (pfit_control != intel_lvds->pfit_control || | ||
383 | pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { | ||
384 | intel_lvds->pfit_control = pfit_control; | ||
385 | intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; | ||
386 | intel_lvds->pfit_dirty = true; | ||
387 | } | ||
374 | dev_priv->lvds_border_bits = border; | 388 | dev_priv->lvds_border_bits = border; |
375 | 389 | ||
376 | /* | 390 | /* |
@@ -386,56 +400,66 @@ static void intel_lvds_prepare(struct drm_encoder *encoder) | |||
386 | { | 400 | { |
387 | struct drm_device *dev = encoder->dev; | 401 | struct drm_device *dev = encoder->dev; |
388 | struct drm_i915_private *dev_priv = dev->dev_private; | 402 | struct drm_i915_private *dev_priv = dev->dev_private; |
389 | u32 reg; | 403 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); |
390 | 404 | ||
391 | if (HAS_PCH_SPLIT(dev)) | 405 | /* We try to do the minimum that is necessary in order to unlock |
392 | reg = BLC_PWM_CPU_CTL; | 406 | * the registers for mode setting. |
393 | else | 407 | * |
394 | reg = BLC_PWM_CTL; | 408 | * On Ironlake, this is quite simple as we just set the unlock key |
395 | 409 | * and ignore all subtleties. (This may cause some issues...) | |
396 | dev_priv->saveBLC_PWM_CTL = I915_READ(reg); | 410 | * |
397 | dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & | 411 | * Prior to Ironlake, we must disable the pipe if we want to adjust |
398 | BACKLIGHT_DUTY_CYCLE_MASK); | 412 | * the panel fitter. However at all other times we can just reset |
413 | * the registers regardless. | ||
414 | */ | ||
399 | 415 | ||
400 | intel_lvds_set_power(dev, false); | 416 | if (HAS_PCH_SPLIT(dev)) { |
417 | I915_WRITE(PCH_PP_CONTROL, | ||
418 | I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); | ||
419 | } else if (intel_lvds->pfit_dirty) { | ||
420 | I915_WRITE(PP_CONTROL, | ||
421 | (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS) | ||
422 | & ~POWER_TARGET_ON); | ||
423 | } else { | ||
424 | I915_WRITE(PP_CONTROL, | ||
425 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); | ||
426 | } | ||
401 | } | 427 | } |
402 | 428 | ||
403 | static void intel_lvds_commit( struct drm_encoder *encoder) | 429 | static void intel_lvds_commit(struct drm_encoder *encoder) |
404 | { | 430 | { |
405 | struct drm_device *dev = encoder->dev; | 431 | struct drm_device *dev = encoder->dev; |
406 | struct drm_i915_private *dev_priv = dev->dev_private; | 432 | struct drm_i915_private *dev_priv = dev->dev_private; |
433 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | ||
407 | 434 | ||
408 | if (dev_priv->backlight_duty_cycle == 0) | 435 | /* Undo any unlocking done in prepare to prevent accidental |
409 | dev_priv->backlight_duty_cycle = | 436 | * adjustment of the registers. |
410 | intel_lvds_get_max_backlight(dev); | 437 | */ |
438 | if (HAS_PCH_SPLIT(dev)) { | ||
439 | u32 val = I915_READ(PCH_PP_CONTROL); | ||
440 | if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS) | ||
441 | I915_WRITE(PCH_PP_CONTROL, val & 0x3); | ||
442 | } else { | ||
443 | u32 val = I915_READ(PP_CONTROL); | ||
444 | if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS) | ||
445 | I915_WRITE(PP_CONTROL, val & 0x3); | ||
446 | } | ||
411 | 447 | ||
412 | intel_lvds_set_power(dev, true); | 448 | /* Always do a full power on as we do not know what state |
449 | * we were left in. | ||
450 | */ | ||
451 | intel_lvds_enable(intel_lvds); | ||
413 | } | 452 | } |
414 | 453 | ||
415 | static void intel_lvds_mode_set(struct drm_encoder *encoder, | 454 | static void intel_lvds_mode_set(struct drm_encoder *encoder, |
416 | struct drm_display_mode *mode, | 455 | struct drm_display_mode *mode, |
417 | struct drm_display_mode *adjusted_mode) | 456 | struct drm_display_mode *adjusted_mode) |
418 | { | 457 | { |
419 | struct drm_device *dev = encoder->dev; | ||
420 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
421 | struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); | ||
422 | |||
423 | /* | 458 | /* |
424 | * The LVDS pin pair will already have been turned on in the | 459 | * The LVDS pin pair will already have been turned on in the |
425 | * intel_crtc_mode_set since it has a large impact on the DPLL | 460 | * intel_crtc_mode_set since it has a large impact on the DPLL |
426 | * settings. | 461 | * settings. |
427 | */ | 462 | */ |
428 | |||
429 | if (HAS_PCH_SPLIT(dev)) | ||
430 | return; | ||
431 | |||
432 | /* | ||
433 | * Enable automatic panel scaling so that non-native modes fill the | ||
434 | * screen. Should be enabled before the pipe is enabled, according to | ||
435 | * register description and PRM. | ||
436 | */ | ||
437 | I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); | ||
438 | I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); | ||
439 | } | 463 | } |
440 | 464 | ||
441 | /** | 465 | /** |
@@ -449,15 +473,13 @@ static enum drm_connector_status | |||
449 | intel_lvds_detect(struct drm_connector *connector, bool force) | 473 | intel_lvds_detect(struct drm_connector *connector, bool force) |
450 | { | 474 | { |
451 | struct drm_device *dev = connector->dev; | 475 | struct drm_device *dev = connector->dev; |
452 | enum drm_connector_status status = connector_status_connected; | 476 | enum drm_connector_status status; |
453 | 477 | ||
454 | /* ACPI lid methods were generally unreliable in this generation, so | 478 | status = intel_panel_detect(dev); |
455 | * don't even bother. | 479 | if (status != connector_status_unknown) |
456 | */ | 480 | return status; |
457 | if (IS_GEN2(dev) || IS_GEN3(dev)) | ||
458 | return connector_status_connected; | ||
459 | 481 | ||
460 | return status; | 482 | return connector_status_connected; |
461 | } | 483 | } |
462 | 484 | ||
463 | /** | 485 | /** |
@@ -465,38 +487,19 @@ intel_lvds_detect(struct drm_connector *connector, bool force) | |||
465 | */ | 487 | */ |
466 | static int intel_lvds_get_modes(struct drm_connector *connector) | 488 | static int intel_lvds_get_modes(struct drm_connector *connector) |
467 | { | 489 | { |
490 | struct intel_lvds *intel_lvds = intel_attached_lvds(connector); | ||
468 | struct drm_device *dev = connector->dev; | 491 | struct drm_device *dev = connector->dev; |
469 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 492 | struct drm_display_mode *mode; |
470 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
471 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
472 | int ret = 0; | ||
473 | |||
474 | if (dev_priv->lvds_edid_good) { | ||
475 | ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); | ||
476 | |||
477 | if (ret) | ||
478 | return ret; | ||
479 | } | ||
480 | |||
481 | /* Didn't get an EDID, so | ||
482 | * Set wide sync ranges so we get all modes | ||
483 | * handed to valid_mode for checking | ||
484 | */ | ||
485 | connector->display_info.min_vfreq = 0; | ||
486 | connector->display_info.max_vfreq = 200; | ||
487 | connector->display_info.min_hfreq = 0; | ||
488 | connector->display_info.max_hfreq = 200; | ||
489 | |||
490 | if (dev_priv->panel_fixed_mode != NULL) { | ||
491 | struct drm_display_mode *mode; | ||
492 | 493 | ||
493 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); | 494 | if (intel_lvds->edid) |
494 | drm_mode_probed_add(connector, mode); | 495 | return drm_add_edid_modes(connector, intel_lvds->edid); |
495 | 496 | ||
496 | return 1; | 497 | mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); |
497 | } | 498 | if (mode == NULL) |
499 | return 0; | ||
498 | 500 | ||
499 | return 0; | 501 | drm_mode_probed_add(connector, mode); |
502 | return 1; | ||
500 | } | 503 | } |
501 | 504 | ||
502 | static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) | 505 | static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) |
@@ -536,6 +539,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
536 | struct drm_device *dev = dev_priv->dev; | 539 | struct drm_device *dev = dev_priv->dev; |
537 | struct drm_connector *connector = dev_priv->int_lvds_connector; | 540 | struct drm_connector *connector = dev_priv->int_lvds_connector; |
538 | 541 | ||
542 | if (dev->switch_power_state != DRM_SWITCH_POWER_ON) | ||
543 | return NOTIFY_OK; | ||
544 | |||
539 | /* | 545 | /* |
540 | * check and update the status of LVDS connector after receiving | 546 | * check and update the status of LVDS connector after receiving |
541 | * the LID nofication event. | 547 | * the LID nofication event. |
@@ -587,18 +593,17 @@ static int intel_lvds_set_property(struct drm_connector *connector, | |||
587 | struct drm_property *property, | 593 | struct drm_property *property, |
588 | uint64_t value) | 594 | uint64_t value) |
589 | { | 595 | { |
596 | struct intel_lvds *intel_lvds = intel_attached_lvds(connector); | ||
590 | struct drm_device *dev = connector->dev; | 597 | struct drm_device *dev = connector->dev; |
591 | 598 | ||
592 | if (property == dev->mode_config.scaling_mode_property && | 599 | if (property == dev->mode_config.scaling_mode_property) { |
593 | connector->encoder) { | 600 | struct drm_crtc *crtc = intel_lvds->base.base.crtc; |
594 | struct drm_crtc *crtc = connector->encoder->crtc; | ||
595 | struct drm_encoder *encoder = connector->encoder; | ||
596 | struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); | ||
597 | 601 | ||
598 | if (value == DRM_MODE_SCALE_NONE) { | 602 | if (value == DRM_MODE_SCALE_NONE) { |
599 | DRM_DEBUG_KMS("no scaling not supported\n"); | 603 | DRM_DEBUG_KMS("no scaling not supported\n"); |
600 | return 0; | 604 | return -EINVAL; |
601 | } | 605 | } |
606 | |||
602 | if (intel_lvds->fitting_mode == value) { | 607 | if (intel_lvds->fitting_mode == value) { |
603 | /* the LVDS scaling property is not changed */ | 608 | /* the LVDS scaling property is not changed */ |
604 | return 0; | 609 | return 0; |
@@ -628,7 +633,7 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { | |||
628 | static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { | 633 | static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { |
629 | .get_modes = intel_lvds_get_modes, | 634 | .get_modes = intel_lvds_get_modes, |
630 | .mode_valid = intel_lvds_mode_valid, | 635 | .mode_valid = intel_lvds_mode_valid, |
631 | .best_encoder = intel_attached_encoder, | 636 | .best_encoder = intel_best_encoder, |
632 | }; | 637 | }; |
633 | 638 | ||
634 | static const struct drm_connector_funcs intel_lvds_connector_funcs = { | 639 | static const struct drm_connector_funcs intel_lvds_connector_funcs = { |
@@ -701,6 +706,14 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
701 | }, | 706 | }, |
702 | { | 707 | { |
703 | .callback = intel_no_lvds_dmi_callback, | 708 | .callback = intel_no_lvds_dmi_callback, |
709 | .ident = "AOpen i915GMm-HFS", | ||
710 | .matches = { | ||
711 | DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), | ||
712 | DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"), | ||
713 | }, | ||
714 | }, | ||
715 | { | ||
716 | .callback = intel_no_lvds_dmi_callback, | ||
704 | .ident = "Aopen i945GTt-VFA", | 717 | .ident = "Aopen i945GTt-VFA", |
705 | .matches = { | 718 | .matches = { |
706 | DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), | 719 | DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), |
@@ -714,6 +727,14 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
714 | DMI_MATCH(DMI_PRODUCT_NAME, "U800"), | 727 | DMI_MATCH(DMI_PRODUCT_NAME, "U800"), |
715 | }, | 728 | }, |
716 | }, | 729 | }, |
730 | { | ||
731 | .callback = intel_no_lvds_dmi_callback, | ||
732 | .ident = "Asus EeeBox PC EB1007", | ||
733 | .matches = { | ||
734 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."), | ||
735 | DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"), | ||
736 | }, | ||
737 | }, | ||
717 | 738 | ||
718 | { } /* terminating entry */ | 739 | { } /* terminating entry */ |
719 | }; | 740 | }; |
@@ -726,16 +747,14 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
726 | * Find the reduced downclock for LVDS in EDID. | 747 | * Find the reduced downclock for LVDS in EDID. |
727 | */ | 748 | */ |
728 | static void intel_find_lvds_downclock(struct drm_device *dev, | 749 | static void intel_find_lvds_downclock(struct drm_device *dev, |
729 | struct drm_connector *connector) | 750 | struct drm_display_mode *fixed_mode, |
751 | struct drm_connector *connector) | ||
730 | { | 752 | { |
731 | struct drm_i915_private *dev_priv = dev->dev_private; | 753 | struct drm_i915_private *dev_priv = dev->dev_private; |
732 | struct drm_display_mode *scan, *panel_fixed_mode; | 754 | struct drm_display_mode *scan; |
733 | int temp_downclock; | 755 | int temp_downclock; |
734 | 756 | ||
735 | panel_fixed_mode = dev_priv->panel_fixed_mode; | 757 | temp_downclock = fixed_mode->clock; |
736 | temp_downclock = panel_fixed_mode->clock; | ||
737 | |||
738 | mutex_lock(&dev->mode_config.mutex); | ||
739 | list_for_each_entry(scan, &connector->probed_modes, head) { | 758 | list_for_each_entry(scan, &connector->probed_modes, head) { |
740 | /* | 759 | /* |
741 | * If one mode has the same resolution with the fixed_panel | 760 | * If one mode has the same resolution with the fixed_panel |
@@ -744,14 +763,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev, | |||
744 | * case we can set the different FPx0/1 to dynamically select | 763 | * case we can set the different FPx0/1 to dynamically select |
745 | * between low and high frequency. | 764 | * between low and high frequency. |
746 | */ | 765 | */ |
747 | if (scan->hdisplay == panel_fixed_mode->hdisplay && | 766 | if (scan->hdisplay == fixed_mode->hdisplay && |
748 | scan->hsync_start == panel_fixed_mode->hsync_start && | 767 | scan->hsync_start == fixed_mode->hsync_start && |
749 | scan->hsync_end == panel_fixed_mode->hsync_end && | 768 | scan->hsync_end == fixed_mode->hsync_end && |
750 | scan->htotal == panel_fixed_mode->htotal && | 769 | scan->htotal == fixed_mode->htotal && |
751 | scan->vdisplay == panel_fixed_mode->vdisplay && | 770 | scan->vdisplay == fixed_mode->vdisplay && |
752 | scan->vsync_start == panel_fixed_mode->vsync_start && | 771 | scan->vsync_start == fixed_mode->vsync_start && |
753 | scan->vsync_end == panel_fixed_mode->vsync_end && | 772 | scan->vsync_end == fixed_mode->vsync_end && |
754 | scan->vtotal == panel_fixed_mode->vtotal) { | 773 | scan->vtotal == fixed_mode->vtotal) { |
755 | if (scan->clock < temp_downclock) { | 774 | if (scan->clock < temp_downclock) { |
756 | /* | 775 | /* |
757 | * The downclock is already found. But we | 776 | * The downclock is already found. But we |
@@ -761,17 +780,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev, | |||
761 | } | 780 | } |
762 | } | 781 | } |
763 | } | 782 | } |
764 | mutex_unlock(&dev->mode_config.mutex); | 783 | if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) { |
765 | if (temp_downclock < panel_fixed_mode->clock && | ||
766 | i915_lvds_downclock) { | ||
767 | /* We found the downclock for LVDS. */ | 784 | /* We found the downclock for LVDS. */ |
768 | dev_priv->lvds_downclock_avail = 1; | 785 | dev_priv->lvds_downclock_avail = 1; |
769 | dev_priv->lvds_downclock = temp_downclock; | 786 | dev_priv->lvds_downclock = temp_downclock; |
770 | DRM_DEBUG_KMS("LVDS downclock is found in EDID. " | 787 | DRM_DEBUG_KMS("LVDS downclock is found in EDID. " |
771 | "Normal clock %dKhz, downclock %dKhz\n", | 788 | "Normal clock %dKhz, downclock %dKhz\n", |
772 | panel_fixed_mode->clock, temp_downclock); | 789 | fixed_mode->clock, temp_downclock); |
773 | } | 790 | } |
774 | return; | ||
775 | } | 791 | } |
776 | 792 | ||
777 | /* | 793 | /* |
@@ -780,38 +796,48 @@ static void intel_find_lvds_downclock(struct drm_device *dev, | |||
780 | * If it is present, return 1. | 796 | * If it is present, return 1. |
781 | * If it is not present, return false. | 797 | * If it is not present, return false. |
782 | * If no child dev is parsed from VBT, it assumes that the LVDS is present. | 798 | * If no child dev is parsed from VBT, it assumes that the LVDS is present. |
783 | * Note: The addin_offset should also be checked for LVDS panel. | ||
784 | * Only when it is non-zero, it is assumed that it is present. | ||
785 | */ | 799 | */ |
786 | static int lvds_is_present_in_vbt(struct drm_device *dev) | 800 | static bool lvds_is_present_in_vbt(struct drm_device *dev, |
801 | u8 *i2c_pin) | ||
787 | { | 802 | { |
788 | struct drm_i915_private *dev_priv = dev->dev_private; | 803 | struct drm_i915_private *dev_priv = dev->dev_private; |
789 | struct child_device_config *p_child; | 804 | int i; |
790 | int i, ret; | ||
791 | 805 | ||
792 | if (!dev_priv->child_dev_num) | 806 | if (!dev_priv->child_dev_num) |
793 | return 1; | 807 | return true; |
794 | 808 | ||
795 | ret = 0; | ||
796 | for (i = 0; i < dev_priv->child_dev_num; i++) { | 809 | for (i = 0; i < dev_priv->child_dev_num; i++) { |
797 | p_child = dev_priv->child_dev + i; | 810 | struct child_device_config *child = dev_priv->child_dev + i; |
798 | /* | 811 | |
799 | * If the device type is not LFP, continue. | 812 | /* If the device type is not LFP, continue. |
800 | * If the device type is 0x22, it is also regarded as LFP. | 813 | * We have to check both the new identifiers as well as the |
814 | * old for compatibility with some BIOSes. | ||
801 | */ | 815 | */ |
802 | if (p_child->device_type != DEVICE_TYPE_INT_LFP && | 816 | if (child->device_type != DEVICE_TYPE_INT_LFP && |
803 | p_child->device_type != DEVICE_TYPE_LFP) | 817 | child->device_type != DEVICE_TYPE_LFP) |
804 | continue; | 818 | continue; |
805 | 819 | ||
806 | /* The addin_offset should be checked. Only when it is | 820 | if (child->i2c_pin) |
807 | * non-zero, it is regarded as present. | 821 | *i2c_pin = child->i2c_pin; |
822 | |||
823 | /* However, we cannot trust the BIOS writers to populate | ||
824 | * the VBT correctly. Since LVDS requires additional | ||
825 | * information from AIM blocks, a non-zero addin offset is | ||
826 | * a good indicator that the LVDS is actually present. | ||
808 | */ | 827 | */ |
809 | if (p_child->addin_offset) { | 828 | if (child->addin_offset) |
810 | ret = 1; | 829 | return true; |
811 | break; | 830 | |
812 | } | 831 | /* But even then some BIOS writers perform some black magic |
832 | * and instantiate the device without reference to any | ||
833 | * additional data. Trust that if the VBT was written into | ||
834 | * the OpRegion then they have validated the LVDS's existence. | ||
835 | */ | ||
836 | if (dev_priv->opregion.vbt) | ||
837 | return true; | ||
813 | } | 838 | } |
814 | return ret; | 839 | |
840 | return false; | ||
815 | } | 841 | } |
816 | 842 | ||
817 | /** | 843 | /** |
@@ -821,7 +847,7 @@ static int lvds_is_present_in_vbt(struct drm_device *dev) | |||
821 | * Create the connector, register the LVDS DDC bus, and try to figure out what | 847 | * Create the connector, register the LVDS DDC bus, and try to figure out what |
822 | * modes we can display on the LVDS panel (if present). | 848 | * modes we can display on the LVDS panel (if present). |
823 | */ | 849 | */ |
824 | void intel_lvds_init(struct drm_device *dev) | 850 | bool intel_lvds_init(struct drm_device *dev) |
825 | { | 851 | { |
826 | struct drm_i915_private *dev_priv = dev->dev_private; | 852 | struct drm_i915_private *dev_priv = dev->dev_private; |
827 | struct intel_lvds *intel_lvds; | 853 | struct intel_lvds *intel_lvds; |
@@ -832,52 +858,59 @@ void intel_lvds_init(struct drm_device *dev) | |||
832 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ | 858 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ |
833 | struct drm_crtc *crtc; | 859 | struct drm_crtc *crtc; |
834 | u32 lvds; | 860 | u32 lvds; |
835 | int pipe, gpio = GPIOC; | 861 | int pipe; |
862 | u8 pin; | ||
836 | 863 | ||
837 | /* Skip init on machines we know falsely report LVDS */ | 864 | /* Skip init on machines we know falsely report LVDS */ |
838 | if (dmi_check_system(intel_no_lvds)) | 865 | if (dmi_check_system(intel_no_lvds)) |
839 | return; | 866 | return false; |
840 | 867 | ||
841 | if (!lvds_is_present_in_vbt(dev)) { | 868 | pin = GMBUS_PORT_PANEL; |
869 | if (!lvds_is_present_in_vbt(dev, &pin)) { | ||
842 | DRM_DEBUG_KMS("LVDS is not present in VBT\n"); | 870 | DRM_DEBUG_KMS("LVDS is not present in VBT\n"); |
843 | return; | 871 | return false; |
844 | } | 872 | } |
845 | 873 | ||
846 | if (HAS_PCH_SPLIT(dev)) { | 874 | if (HAS_PCH_SPLIT(dev)) { |
847 | if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) | 875 | if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) |
848 | return; | 876 | return false; |
849 | if (dev_priv->edp_support) { | 877 | if (dev_priv->edp.support) { |
850 | DRM_DEBUG_KMS("disable LVDS for eDP support\n"); | 878 | DRM_DEBUG_KMS("disable LVDS for eDP support\n"); |
851 | return; | 879 | return false; |
852 | } | 880 | } |
853 | gpio = PCH_GPIOC; | ||
854 | } | 881 | } |
855 | 882 | ||
856 | intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); | 883 | intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); |
857 | if (!intel_lvds) { | 884 | if (!intel_lvds) { |
858 | return; | 885 | return false; |
859 | } | 886 | } |
860 | 887 | ||
861 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 888 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
862 | if (!intel_connector) { | 889 | if (!intel_connector) { |
863 | kfree(intel_lvds); | 890 | kfree(intel_lvds); |
864 | return; | 891 | return false; |
892 | } | ||
893 | |||
894 | if (!HAS_PCH_SPLIT(dev)) { | ||
895 | intel_lvds->pfit_control = I915_READ(PFIT_CONTROL); | ||
865 | } | 896 | } |
866 | 897 | ||
867 | intel_encoder = &intel_lvds->base; | 898 | intel_encoder = &intel_lvds->base; |
868 | encoder = &intel_encoder->enc; | 899 | encoder = &intel_encoder->base; |
869 | connector = &intel_connector->base; | 900 | connector = &intel_connector->base; |
870 | drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, | 901 | drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, |
871 | DRM_MODE_CONNECTOR_LVDS); | 902 | DRM_MODE_CONNECTOR_LVDS); |
872 | 903 | ||
873 | drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs, | 904 | drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, |
874 | DRM_MODE_ENCODER_LVDS); | 905 | DRM_MODE_ENCODER_LVDS); |
875 | 906 | ||
876 | drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); | 907 | intel_connector_attach_encoder(intel_connector, intel_encoder); |
877 | intel_encoder->type = INTEL_OUTPUT_LVDS; | 908 | intel_encoder->type = INTEL_OUTPUT_LVDS; |
878 | 909 | ||
879 | intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); | 910 | intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); |
880 | intel_encoder->crtc_mask = (1 << 1); | 911 | intel_encoder->crtc_mask = (1 << 1); |
912 | if (INTEL_INFO(dev)->gen >= 5) | ||
913 | intel_encoder->crtc_mask |= (1 << 0); | ||
881 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); | 914 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); |
882 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); | 915 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); |
883 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 916 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
@@ -904,43 +937,50 @@ void intel_lvds_init(struct drm_device *dev) | |||
904 | * if closed, act like it's not there for now | 937 | * if closed, act like it's not there for now |
905 | */ | 938 | */ |
906 | 939 | ||
907 | /* Set up the DDC bus. */ | ||
908 | intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); | ||
909 | if (!intel_encoder->ddc_bus) { | ||
910 | dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " | ||
911 | "failed.\n"); | ||
912 | goto failed; | ||
913 | } | ||
914 | |||
915 | /* | 940 | /* |
916 | * Attempt to get the fixed panel mode from DDC. Assume that the | 941 | * Attempt to get the fixed panel mode from DDC. Assume that the |
917 | * preferred mode is the right one. | 942 | * preferred mode is the right one. |
918 | */ | 943 | */ |
919 | dev_priv->lvds_edid_good = true; | 944 | intel_lvds->edid = drm_get_edid(connector, |
920 | 945 | &dev_priv->gmbus[pin].adapter); | |
921 | if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus)) | 946 | if (intel_lvds->edid) { |
922 | dev_priv->lvds_edid_good = false; | 947 | if (drm_add_edid_modes(connector, |
948 | intel_lvds->edid)) { | ||
949 | drm_mode_connector_update_edid_property(connector, | ||
950 | intel_lvds->edid); | ||
951 | } else { | ||
952 | kfree(intel_lvds->edid); | ||
953 | intel_lvds->edid = NULL; | ||
954 | } | ||
955 | } | ||
956 | if (!intel_lvds->edid) { | ||
957 | /* Didn't get an EDID, so | ||
958 | * Set wide sync ranges so we get all modes | ||
959 | * handed to valid_mode for checking | ||
960 | */ | ||
961 | connector->display_info.min_vfreq = 0; | ||
962 | connector->display_info.max_vfreq = 200; | ||
963 | connector->display_info.min_hfreq = 0; | ||
964 | connector->display_info.max_hfreq = 200; | ||
965 | } | ||
923 | 966 | ||
924 | list_for_each_entry(scan, &connector->probed_modes, head) { | 967 | list_for_each_entry(scan, &connector->probed_modes, head) { |
925 | mutex_lock(&dev->mode_config.mutex); | ||
926 | if (scan->type & DRM_MODE_TYPE_PREFERRED) { | 968 | if (scan->type & DRM_MODE_TYPE_PREFERRED) { |
927 | dev_priv->panel_fixed_mode = | 969 | intel_lvds->fixed_mode = |
928 | drm_mode_duplicate(dev, scan); | 970 | drm_mode_duplicate(dev, scan); |
929 | mutex_unlock(&dev->mode_config.mutex); | 971 | intel_find_lvds_downclock(dev, |
930 | intel_find_lvds_downclock(dev, connector); | 972 | intel_lvds->fixed_mode, |
973 | connector); | ||
931 | goto out; | 974 | goto out; |
932 | } | 975 | } |
933 | mutex_unlock(&dev->mode_config.mutex); | ||
934 | } | 976 | } |
935 | 977 | ||
936 | /* Failed to get EDID, what about VBT? */ | 978 | /* Failed to get EDID, what about VBT? */ |
937 | if (dev_priv->lfp_lvds_vbt_mode) { | 979 | if (dev_priv->lfp_lvds_vbt_mode) { |
938 | mutex_lock(&dev->mode_config.mutex); | 980 | intel_lvds->fixed_mode = |
939 | dev_priv->panel_fixed_mode = | ||
940 | drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); | 981 | drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); |
941 | mutex_unlock(&dev->mode_config.mutex); | 982 | if (intel_lvds->fixed_mode) { |
942 | if (dev_priv->panel_fixed_mode) { | 983 | intel_lvds->fixed_mode->type |= |
943 | dev_priv->panel_fixed_mode->type |= | ||
944 | DRM_MODE_TYPE_PREFERRED; | 984 | DRM_MODE_TYPE_PREFERRED; |
945 | goto out; | 985 | goto out; |
946 | } | 986 | } |
@@ -958,28 +998,36 @@ void intel_lvds_init(struct drm_device *dev) | |||
958 | 998 | ||
959 | lvds = I915_READ(LVDS); | 999 | lvds = I915_READ(LVDS); |
960 | pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; | 1000 | pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; |
961 | crtc = intel_get_crtc_from_pipe(dev, pipe); | 1001 | crtc = intel_get_crtc_for_pipe(dev, pipe); |
962 | 1002 | ||
963 | if (crtc && (lvds & LVDS_PORT_EN)) { | 1003 | if (crtc && (lvds & LVDS_PORT_EN)) { |
964 | dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc); | 1004 | intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc); |
965 | if (dev_priv->panel_fixed_mode) { | 1005 | if (intel_lvds->fixed_mode) { |
966 | dev_priv->panel_fixed_mode->type |= | 1006 | intel_lvds->fixed_mode->type |= |
967 | DRM_MODE_TYPE_PREFERRED; | 1007 | DRM_MODE_TYPE_PREFERRED; |
968 | goto out; | 1008 | goto out; |
969 | } | 1009 | } |
970 | } | 1010 | } |
971 | 1011 | ||
972 | /* If we still don't have a mode after all that, give up. */ | 1012 | /* If we still don't have a mode after all that, give up. */ |
973 | if (!dev_priv->panel_fixed_mode) | 1013 | if (!intel_lvds->fixed_mode) |
974 | goto failed; | 1014 | goto failed; |
975 | 1015 | ||
976 | out: | 1016 | out: |
977 | if (HAS_PCH_SPLIT(dev)) { | 1017 | if (HAS_PCH_SPLIT(dev)) { |
978 | u32 pwm; | 1018 | u32 pwm; |
979 | /* make sure PWM is enabled */ | 1019 | |
1020 | pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0; | ||
1021 | |||
1022 | /* make sure PWM is enabled and locked to the LVDS pipe */ | ||
980 | pwm = I915_READ(BLC_PWM_CPU_CTL2); | 1023 | pwm = I915_READ(BLC_PWM_CPU_CTL2); |
981 | pwm |= (PWM_ENABLE | PWM_PIPE_B); | 1024 | if (pipe == 0 && (pwm & PWM_PIPE_B)) |
982 | I915_WRITE(BLC_PWM_CPU_CTL2, pwm); | 1025 | I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE); |
1026 | if (pipe) | ||
1027 | pwm |= PWM_PIPE_B; | ||
1028 | else | ||
1029 | pwm &= ~PWM_PIPE_B; | ||
1030 | I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE); | ||
983 | 1031 | ||
984 | pwm = I915_READ(BLC_PWM_PCH_CTL1); | 1032 | pwm = I915_READ(BLC_PWM_PCH_CTL1); |
985 | pwm |= PWM_PCH_ENABLE; | 1033 | pwm |= PWM_PCH_ENABLE; |
@@ -993,14 +1041,13 @@ out: | |||
993 | /* keep the LVDS connector */ | 1041 | /* keep the LVDS connector */ |
994 | dev_priv->int_lvds_connector = connector; | 1042 | dev_priv->int_lvds_connector = connector; |
995 | drm_sysfs_connector_add(connector); | 1043 | drm_sysfs_connector_add(connector); |
996 | return; | 1044 | return true; |
997 | 1045 | ||
998 | failed: | 1046 | failed: |
999 | DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); | 1047 | DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); |
1000 | if (intel_encoder->ddc_bus) | ||
1001 | intel_i2c_destroy(intel_encoder->ddc_bus); | ||
1002 | drm_connector_cleanup(connector); | 1048 | drm_connector_cleanup(connector); |
1003 | drm_encoder_cleanup(encoder); | 1049 | drm_encoder_cleanup(encoder); |
1004 | kfree(intel_lvds); | 1050 | kfree(intel_lvds); |
1005 | kfree(intel_connector); | 1051 | kfree(intel_connector); |
1052 | return false; | ||
1006 | } | 1053 | } |
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 4b1fd3d9c73c..3b26a3ba02dd 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> | 2 | * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> |
3 | * Copyright (c) 2007 Intel Corporation | 3 | * Copyright (c) 2007, 2010 Intel Corporation |
4 | * Jesse Barnes <jesse.barnes@intel.com> | 4 | * Jesse Barnes <jesse.barnes@intel.com> |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
@@ -34,11 +34,11 @@ | |||
34 | * intel_ddc_probe | 34 | * intel_ddc_probe |
35 | * | 35 | * |
36 | */ | 36 | */ |
37 | bool intel_ddc_probe(struct intel_encoder *intel_encoder) | 37 | bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus) |
38 | { | 38 | { |
39 | struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private; | ||
39 | u8 out_buf[] = { 0x0, 0x0}; | 40 | u8 out_buf[] = { 0x0, 0x0}; |
40 | u8 buf[2]; | 41 | u8 buf[2]; |
41 | int ret; | ||
42 | struct i2c_msg msgs[] = { | 42 | struct i2c_msg msgs[] = { |
43 | { | 43 | { |
44 | .addr = 0x50, | 44 | .addr = 0x50, |
@@ -54,13 +54,7 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder) | |||
54 | } | 54 | } |
55 | }; | 55 | }; |
56 | 56 | ||
57 | intel_i2c_quirk_set(intel_encoder->enc.dev, true); | 57 | return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2; |
58 | ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2); | ||
59 | intel_i2c_quirk_set(intel_encoder->enc.dev, false); | ||
60 | if (ret == 2) | ||
61 | return true; | ||
62 | |||
63 | return false; | ||
64 | } | 58 | } |
65 | 59 | ||
66 | /** | 60 | /** |
@@ -76,9 +70,7 @@ int intel_ddc_get_modes(struct drm_connector *connector, | |||
76 | struct edid *edid; | 70 | struct edid *edid; |
77 | int ret = 0; | 71 | int ret = 0; |
78 | 72 | ||
79 | intel_i2c_quirk_set(connector->dev, true); | ||
80 | edid = drm_get_edid(connector, adapter); | 73 | edid = drm_get_edid(connector, adapter); |
81 | intel_i2c_quirk_set(connector->dev, false); | ||
82 | if (edid) { | 74 | if (edid) { |
83 | drm_mode_connector_update_edid_property(connector, edid); | 75 | drm_mode_connector_update_edid_property(connector, edid); |
84 | ret = drm_add_edid_modes(connector, edid); | 76 | ret = drm_add_edid_modes(connector, edid); |
@@ -88,3 +80,63 @@ int intel_ddc_get_modes(struct drm_connector *connector, | |||
88 | 80 | ||
89 | return ret; | 81 | return ret; |
90 | } | 82 | } |
83 | |||
84 | static const char *force_audio_names[] = { | ||
85 | "off", | ||
86 | "auto", | ||
87 | "on", | ||
88 | }; | ||
89 | |||
90 | void | ||
91 | intel_attach_force_audio_property(struct drm_connector *connector) | ||
92 | { | ||
93 | struct drm_device *dev = connector->dev; | ||
94 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
95 | struct drm_property *prop; | ||
96 | int i; | ||
97 | |||
98 | prop = dev_priv->force_audio_property; | ||
99 | if (prop == NULL) { | ||
100 | prop = drm_property_create(dev, DRM_MODE_PROP_ENUM, | ||
101 | "audio", | ||
102 | ARRAY_SIZE(force_audio_names)); | ||
103 | if (prop == NULL) | ||
104 | return; | ||
105 | |||
106 | for (i = 0; i < ARRAY_SIZE(force_audio_names); i++) | ||
107 | drm_property_add_enum(prop, i, i-1, force_audio_names[i]); | ||
108 | |||
109 | dev_priv->force_audio_property = prop; | ||
110 | } | ||
111 | drm_connector_attach_property(connector, prop, 0); | ||
112 | } | ||
113 | |||
114 | static const char *broadcast_rgb_names[] = { | ||
115 | "Full", | ||
116 | "Limited 16:235", | ||
117 | }; | ||
118 | |||
119 | void | ||
120 | intel_attach_broadcast_rgb_property(struct drm_connector *connector) | ||
121 | { | ||
122 | struct drm_device *dev = connector->dev; | ||
123 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
124 | struct drm_property *prop; | ||
125 | int i; | ||
126 | |||
127 | prop = dev_priv->broadcast_rgb_property; | ||
128 | if (prop == NULL) { | ||
129 | prop = drm_property_create(dev, DRM_MODE_PROP_ENUM, | ||
130 | "Broadcast RGB", | ||
131 | ARRAY_SIZE(broadcast_rgb_names)); | ||
132 | if (prop == NULL) | ||
133 | return; | ||
134 | |||
135 | for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++) | ||
136 | drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]); | ||
137 | |||
138 | dev_priv->broadcast_rgb_property = prop; | ||
139 | } | ||
140 | |||
141 | drm_connector_attach_property(connector, prop, 0); | ||
142 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index ea5d3fea4b61..d2c710422908 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -26,22 +26,24 @@ | |||
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/acpi.h> | 28 | #include <linux/acpi.h> |
29 | #include <linux/acpi_io.h> | ||
29 | #include <acpi/video.h> | 30 | #include <acpi/video.h> |
30 | 31 | ||
31 | #include "drmP.h" | 32 | #include "drmP.h" |
32 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
33 | #include "i915_drv.h" | 34 | #include "i915_drv.h" |
35 | #include "intel_drv.h" | ||
34 | 36 | ||
35 | #define PCI_ASLE 0xe4 | 37 | #define PCI_ASLE 0xe4 |
36 | #define PCI_LBPC 0xf4 | ||
37 | #define PCI_ASLS 0xfc | 38 | #define PCI_ASLS 0xfc |
38 | 39 | ||
39 | #define OPREGION_SZ (8*1024) | ||
40 | #define OPREGION_HEADER_OFFSET 0 | 40 | #define OPREGION_HEADER_OFFSET 0 |
41 | #define OPREGION_ACPI_OFFSET 0x100 | 41 | #define OPREGION_ACPI_OFFSET 0x100 |
42 | #define ACPI_CLID 0x01ac /* current lid state indicator */ | ||
43 | #define ACPI_CDCK 0x01b0 /* current docking state indicator */ | ||
42 | #define OPREGION_SWSCI_OFFSET 0x200 | 44 | #define OPREGION_SWSCI_OFFSET 0x200 |
43 | #define OPREGION_ASLE_OFFSET 0x300 | 45 | #define OPREGION_ASLE_OFFSET 0x300 |
44 | #define OPREGION_VBT_OFFSET 0x1000 | 46 | #define OPREGION_VBT_OFFSET 0x400 |
45 | 47 | ||
46 | #define OPREGION_SIGNATURE "IntelGraphicsMem" | 48 | #define OPREGION_SIGNATURE "IntelGraphicsMem" |
47 | #define MBOX_ACPI (1<<0) | 49 | #define MBOX_ACPI (1<<0) |
@@ -143,40 +145,22 @@ struct opregion_asle { | |||
143 | #define ACPI_DIGITAL_OUTPUT (3<<8) | 145 | #define ACPI_DIGITAL_OUTPUT (3<<8) |
144 | #define ACPI_LVDS_OUTPUT (4<<8) | 146 | #define ACPI_LVDS_OUTPUT (4<<8) |
145 | 147 | ||
148 | #ifdef CONFIG_ACPI | ||
146 | static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | 149 | static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) |
147 | { | 150 | { |
148 | struct drm_i915_private *dev_priv = dev->dev_private; | 151 | struct drm_i915_private *dev_priv = dev->dev_private; |
149 | struct opregion_asle *asle = dev_priv->opregion.asle; | 152 | struct opregion_asle *asle = dev_priv->opregion.asle; |
150 | u32 blc_pwm_ctl, blc_pwm_ctl2; | 153 | u32 max; |
151 | u32 max_backlight, level, shift; | ||
152 | 154 | ||
153 | if (!(bclp & ASLE_BCLP_VALID)) | 155 | if (!(bclp & ASLE_BCLP_VALID)) |
154 | return ASLE_BACKLIGHT_FAILED; | 156 | return ASLE_BACKLIGHT_FAILED; |
155 | 157 | ||
156 | bclp &= ASLE_BCLP_MSK; | 158 | bclp &= ASLE_BCLP_MSK; |
157 | if (bclp < 0 || bclp > 255) | 159 | if (bclp > 255) |
158 | return ASLE_BACKLIGHT_FAILED; | 160 | return ASLE_BACKLIGHT_FAILED; |
159 | 161 | ||
160 | blc_pwm_ctl = I915_READ(BLC_PWM_CTL); | 162 | max = intel_panel_get_max_backlight(dev); |
161 | blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); | 163 | intel_panel_set_backlight(dev, bclp * max / 255); |
162 | |||
163 | if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE)) | ||
164 | pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); | ||
165 | else { | ||
166 | if (IS_PINEVIEW(dev)) { | ||
167 | blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); | ||
168 | max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> | ||
169 | BACKLIGHT_MODULATION_FREQ_SHIFT; | ||
170 | shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1; | ||
171 | } else { | ||
172 | blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK; | ||
173 | max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> | ||
174 | BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; | ||
175 | shift = BACKLIGHT_DUTY_CYCLE_SHIFT; | ||
176 | } | ||
177 | level = (bclp * max_backlight) / 255; | ||
178 | I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift)); | ||
179 | } | ||
180 | asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; | 164 | asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; |
181 | 165 | ||
182 | return 0; | 166 | return 0; |
@@ -211,7 +195,7 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) | |||
211 | return 0; | 195 | return 0; |
212 | } | 196 | } |
213 | 197 | ||
214 | void opregion_asle_intr(struct drm_device *dev) | 198 | void intel_opregion_asle_intr(struct drm_device *dev) |
215 | { | 199 | { |
216 | struct drm_i915_private *dev_priv = dev->dev_private; | 200 | struct drm_i915_private *dev_priv = dev->dev_private; |
217 | struct opregion_asle *asle = dev_priv->opregion.asle; | 201 | struct opregion_asle *asle = dev_priv->opregion.asle; |
@@ -243,37 +227,8 @@ void opregion_asle_intr(struct drm_device *dev) | |||
243 | asle->aslc = asle_stat; | 227 | asle->aslc = asle_stat; |
244 | } | 228 | } |
245 | 229 | ||
246 | static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp) | 230 | /* Only present on Ironlake+ */ |
247 | { | 231 | void intel_opregion_gse_intr(struct drm_device *dev) |
248 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
249 | struct opregion_asle *asle = dev_priv->opregion.asle; | ||
250 | u32 cpu_pwm_ctl, pch_pwm_ctl2; | ||
251 | u32 max_backlight, level; | ||
252 | |||
253 | if (!(bclp & ASLE_BCLP_VALID)) | ||
254 | return ASLE_BACKLIGHT_FAILED; | ||
255 | |||
256 | bclp &= ASLE_BCLP_MSK; | ||
257 | if (bclp < 0 || bclp > 255) | ||
258 | return ASLE_BACKLIGHT_FAILED; | ||
259 | |||
260 | cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL); | ||
261 | pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); | ||
262 | /* get the max PWM frequency */ | ||
263 | max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK; | ||
264 | /* calculate the expected PMW frequency */ | ||
265 | level = (bclp * max_backlight) / 255; | ||
266 | /* reserve the high 16 bits */ | ||
267 | cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK); | ||
268 | /* write the updated PWM frequency */ | ||
269 | I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level); | ||
270 | |||
271 | asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; | ||
272 | |||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | void ironlake_opregion_gse_intr(struct drm_device *dev) | ||
277 | { | 232 | { |
278 | struct drm_i915_private *dev_priv = dev->dev_private; | 233 | struct drm_i915_private *dev_priv = dev->dev_private; |
279 | struct opregion_asle *asle = dev_priv->opregion.asle; | 234 | struct opregion_asle *asle = dev_priv->opregion.asle; |
@@ -296,7 +251,7 @@ void ironlake_opregion_gse_intr(struct drm_device *dev) | |||
296 | } | 251 | } |
297 | 252 | ||
298 | if (asle_req & ASLE_SET_BACKLIGHT) | 253 | if (asle_req & ASLE_SET_BACKLIGHT) |
299 | asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp); | 254 | asle_stat |= asle_set_backlight(dev, asle->bclp); |
300 | 255 | ||
301 | if (asle_req & ASLE_SET_PFIT) { | 256 | if (asle_req & ASLE_SET_PFIT) { |
302 | DRM_DEBUG_DRIVER("Pfit is not supported\n"); | 257 | DRM_DEBUG_DRIVER("Pfit is not supported\n"); |
@@ -315,20 +270,14 @@ void ironlake_opregion_gse_intr(struct drm_device *dev) | |||
315 | #define ASLE_PFIT_EN (1<<2) | 270 | #define ASLE_PFIT_EN (1<<2) |
316 | #define ASLE_PFMB_EN (1<<3) | 271 | #define ASLE_PFMB_EN (1<<3) |
317 | 272 | ||
318 | void opregion_enable_asle(struct drm_device *dev) | 273 | void intel_opregion_enable_asle(struct drm_device *dev) |
319 | { | 274 | { |
320 | struct drm_i915_private *dev_priv = dev->dev_private; | 275 | struct drm_i915_private *dev_priv = dev->dev_private; |
321 | struct opregion_asle *asle = dev_priv->opregion.asle; | 276 | struct opregion_asle *asle = dev_priv->opregion.asle; |
322 | 277 | ||
323 | if (asle) { | 278 | if (asle) { |
324 | if (IS_MOBILE(dev)) { | 279 | if (IS_MOBILE(dev)) |
325 | unsigned long irqflags; | ||
326 | |||
327 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | ||
328 | intel_enable_asle(dev); | 280 | intel_enable_asle(dev); |
329 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, | ||
330 | irqflags); | ||
331 | } | ||
332 | 281 | ||
333 | asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | | 282 | asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | |
334 | ASLE_PFMB_EN; | 283 | ASLE_PFMB_EN; |
@@ -464,7 +413,58 @@ blind_set: | |||
464 | goto end; | 413 | goto end; |
465 | } | 414 | } |
466 | 415 | ||
467 | int intel_opregion_init(struct drm_device *dev, int resume) | 416 | void intel_opregion_init(struct drm_device *dev) |
417 | { | ||
418 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
419 | struct intel_opregion *opregion = &dev_priv->opregion; | ||
420 | |||
421 | if (!opregion->header) | ||
422 | return; | ||
423 | |||
424 | if (opregion->acpi) { | ||
425 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
426 | intel_didl_outputs(dev); | ||
427 | |||
428 | /* Notify BIOS we are ready to handle ACPI video ext notifs. | ||
429 | * Right now, all the events are handled by the ACPI video module. | ||
430 | * We don't actually need to do anything with them. */ | ||
431 | opregion->acpi->csts = 0; | ||
432 | opregion->acpi->drdy = 1; | ||
433 | |||
434 | system_opregion = opregion; | ||
435 | register_acpi_notifier(&intel_opregion_notifier); | ||
436 | } | ||
437 | |||
438 | if (opregion->asle) | ||
439 | intel_opregion_enable_asle(dev); | ||
440 | } | ||
441 | |||
442 | void intel_opregion_fini(struct drm_device *dev) | ||
443 | { | ||
444 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
445 | struct intel_opregion *opregion = &dev_priv->opregion; | ||
446 | |||
447 | if (!opregion->header) | ||
448 | return; | ||
449 | |||
450 | if (opregion->acpi) { | ||
451 | opregion->acpi->drdy = 0; | ||
452 | |||
453 | system_opregion = NULL; | ||
454 | unregister_acpi_notifier(&intel_opregion_notifier); | ||
455 | } | ||
456 | |||
457 | /* just clear all opregion memory pointers now */ | ||
458 | iounmap(opregion->header); | ||
459 | opregion->header = NULL; | ||
460 | opregion->acpi = NULL; | ||
461 | opregion->swsci = NULL; | ||
462 | opregion->asle = NULL; | ||
463 | opregion->vbt = NULL; | ||
464 | } | ||
465 | #endif | ||
466 | |||
467 | int intel_opregion_setup(struct drm_device *dev) | ||
468 | { | 468 | { |
469 | struct drm_i915_private *dev_priv = dev->dev_private; | 469 | struct drm_i915_private *dev_priv = dev->dev_private; |
470 | struct intel_opregion *opregion = &dev_priv->opregion; | 470 | struct intel_opregion *opregion = &dev_priv->opregion; |
@@ -479,29 +479,25 @@ int intel_opregion_init(struct drm_device *dev, int resume) | |||
479 | return -ENOTSUPP; | 479 | return -ENOTSUPP; |
480 | } | 480 | } |
481 | 481 | ||
482 | base = ioremap(asls, OPREGION_SZ); | 482 | base = acpi_os_ioremap(asls, OPREGION_SIZE); |
483 | if (!base) | 483 | if (!base) |
484 | return -ENOMEM; | 484 | return -ENOMEM; |
485 | 485 | ||
486 | opregion->header = base; | 486 | if (memcmp(base, OPREGION_SIGNATURE, 16)) { |
487 | if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) { | ||
488 | DRM_DEBUG_DRIVER("opregion signature mismatch\n"); | 487 | DRM_DEBUG_DRIVER("opregion signature mismatch\n"); |
489 | err = -EINVAL; | 488 | err = -EINVAL; |
490 | goto err_out; | 489 | goto err_out; |
491 | } | 490 | } |
491 | opregion->header = base; | ||
492 | opregion->vbt = base + OPREGION_VBT_OFFSET; | ||
493 | |||
494 | opregion->lid_state = base + ACPI_CLID; | ||
492 | 495 | ||
493 | mboxes = opregion->header->mboxes; | 496 | mboxes = opregion->header->mboxes; |
494 | if (mboxes & MBOX_ACPI) { | 497 | if (mboxes & MBOX_ACPI) { |
495 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); | 498 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); |
496 | opregion->acpi = base + OPREGION_ACPI_OFFSET; | 499 | opregion->acpi = base + OPREGION_ACPI_OFFSET; |
497 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
498 | intel_didl_outputs(dev); | ||
499 | } else { | ||
500 | DRM_DEBUG_DRIVER("Public ACPI methods not supported\n"); | ||
501 | err = -ENOTSUPP; | ||
502 | goto err_out; | ||
503 | } | 500 | } |
504 | opregion->enabled = 1; | ||
505 | 501 | ||
506 | if (mboxes & MBOX_SWSCI) { | 502 | if (mboxes & MBOX_SWSCI) { |
507 | DRM_DEBUG_DRIVER("SWSCI supported\n"); | 503 | DRM_DEBUG_DRIVER("SWSCI supported\n"); |
@@ -510,53 +506,11 @@ int intel_opregion_init(struct drm_device *dev, int resume) | |||
510 | if (mboxes & MBOX_ASLE) { | 506 | if (mboxes & MBOX_ASLE) { |
511 | DRM_DEBUG_DRIVER("ASLE supported\n"); | 507 | DRM_DEBUG_DRIVER("ASLE supported\n"); |
512 | opregion->asle = base + OPREGION_ASLE_OFFSET; | 508 | opregion->asle = base + OPREGION_ASLE_OFFSET; |
513 | opregion_enable_asle(dev); | ||
514 | } | 509 | } |
515 | 510 | ||
516 | if (!resume) | ||
517 | acpi_video_register(); | ||
518 | |||
519 | |||
520 | /* Notify BIOS we are ready to handle ACPI video ext notifs. | ||
521 | * Right now, all the events are handled by the ACPI video module. | ||
522 | * We don't actually need to do anything with them. */ | ||
523 | opregion->acpi->csts = 0; | ||
524 | opregion->acpi->drdy = 1; | ||
525 | |||
526 | system_opregion = opregion; | ||
527 | register_acpi_notifier(&intel_opregion_notifier); | ||
528 | |||
529 | return 0; | 511 | return 0; |
530 | 512 | ||
531 | err_out: | 513 | err_out: |
532 | iounmap(opregion->header); | 514 | iounmap(base); |
533 | opregion->header = NULL; | ||
534 | acpi_video_register(); | ||
535 | return err; | 515 | return err; |
536 | } | 516 | } |
537 | |||
538 | void intel_opregion_free(struct drm_device *dev, int suspend) | ||
539 | { | ||
540 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
541 | struct intel_opregion *opregion = &dev_priv->opregion; | ||
542 | |||
543 | if (!opregion->enabled) | ||
544 | return; | ||
545 | |||
546 | if (!suspend) | ||
547 | acpi_video_unregister(); | ||
548 | |||
549 | opregion->acpi->drdy = 0; | ||
550 | |||
551 | system_opregion = NULL; | ||
552 | unregister_acpi_notifier(&intel_opregion_notifier); | ||
553 | |||
554 | /* just clear all opregion memory pointers now */ | ||
555 | iounmap(opregion->header); | ||
556 | opregion->header = NULL; | ||
557 | opregion->acpi = NULL; | ||
558 | opregion->swsci = NULL; | ||
559 | opregion->asle = NULL; | ||
560 | |||
561 | opregion->enabled = 0; | ||
562 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 1d306a458be6..9e2959bc91cd 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -170,91 +170,180 @@ struct overlay_registers { | |||
170 | u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES]; | 170 | u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES]; |
171 | }; | 171 | }; |
172 | 172 | ||
173 | /* overlay flip addr flag */ | 173 | struct intel_overlay { |
174 | #define OFC_UPDATE 0x1 | 174 | struct drm_device *dev; |
175 | 175 | struct intel_crtc *crtc; | |
176 | #define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev)) | 176 | struct drm_i915_gem_object *vid_bo; |
177 | #define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev)) | 177 | struct drm_i915_gem_object *old_vid_bo; |
178 | 178 | int active; | |
179 | int pfit_active; | ||
180 | u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */ | ||
181 | u32 color_key; | ||
182 | u32 brightness, contrast, saturation; | ||
183 | u32 old_xscale, old_yscale; | ||
184 | /* register access */ | ||
185 | u32 flip_addr; | ||
186 | struct drm_i915_gem_object *reg_bo; | ||
187 | /* flip handling */ | ||
188 | uint32_t last_flip_req; | ||
189 | void (*flip_tail)(struct intel_overlay *); | ||
190 | }; | ||
179 | 191 | ||
180 | static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay) | 192 | static struct overlay_registers * |
193 | intel_overlay_map_regs(struct intel_overlay *overlay) | ||
181 | { | 194 | { |
182 | drm_i915_private_t *dev_priv = overlay->dev->dev_private; | 195 | drm_i915_private_t *dev_priv = overlay->dev->dev_private; |
183 | struct overlay_registers *regs; | 196 | struct overlay_registers *regs; |
184 | 197 | ||
185 | /* no recursive mappings */ | 198 | if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) |
186 | BUG_ON(overlay->virt_addr); | 199 | regs = overlay->reg_bo->phys_obj->handle->vaddr; |
200 | else | ||
201 | regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping, | ||
202 | overlay->reg_bo->gtt_offset); | ||
187 | 203 | ||
188 | if (OVERLAY_NONPHYSICAL(overlay->dev)) { | 204 | return regs; |
189 | regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | 205 | } |
190 | overlay->reg_bo->gtt_offset, | ||
191 | KM_USER0); | ||
192 | 206 | ||
193 | if (!regs) { | 207 | static void intel_overlay_unmap_regs(struct intel_overlay *overlay, |
194 | DRM_ERROR("failed to map overlay regs in GTT\n"); | 208 | struct overlay_registers *regs) |
195 | return NULL; | 209 | { |
196 | } | 210 | if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) |
197 | } else | 211 | io_mapping_unmap(regs); |
198 | regs = overlay->reg_bo->phys_obj->handle->vaddr; | 212 | } |
213 | |||
214 | static int intel_overlay_do_wait_request(struct intel_overlay *overlay, | ||
215 | struct drm_i915_gem_request *request, | ||
216 | void (*tail)(struct intel_overlay *)) | ||
217 | { | ||
218 | struct drm_device *dev = overlay->dev; | ||
219 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
220 | int ret; | ||
199 | 221 | ||
200 | return overlay->virt_addr = regs; | 222 | BUG_ON(overlay->last_flip_req); |
223 | ret = i915_add_request(LP_RING(dev_priv), NULL, request); | ||
224 | if (ret) { | ||
225 | kfree(request); | ||
226 | return ret; | ||
227 | } | ||
228 | overlay->last_flip_req = request->seqno; | ||
229 | overlay->flip_tail = tail; | ||
230 | ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req); | ||
231 | if (ret) | ||
232 | return ret; | ||
233 | |||
234 | overlay->last_flip_req = 0; | ||
235 | return 0; | ||
201 | } | 236 | } |
202 | 237 | ||
203 | static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) | 238 | /* Workaround for i830 bug where pipe a must be enable to change control regs */ |
239 | static int | ||
240 | i830_activate_pipe_a(struct drm_device *dev) | ||
204 | { | 241 | { |
205 | if (OVERLAY_NONPHYSICAL(overlay->dev)) | 242 | drm_i915_private_t *dev_priv = dev->dev_private; |
206 | io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0); | 243 | struct intel_crtc *crtc; |
244 | struct drm_crtc_helper_funcs *crtc_funcs; | ||
245 | struct drm_display_mode vesa_640x480 = { | ||
246 | DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, | ||
247 | 752, 800, 0, 480, 489, 492, 525, 0, | ||
248 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) | ||
249 | }, *mode; | ||
250 | |||
251 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]); | ||
252 | if (crtc->dpms_mode == DRM_MODE_DPMS_ON) | ||
253 | return 0; | ||
207 | 254 | ||
208 | overlay->virt_addr = NULL; | 255 | /* most i8xx have pipe a forced on, so don't trust dpms mode */ |
256 | if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE) | ||
257 | return 0; | ||
209 | 258 | ||
210 | return; | 259 | crtc_funcs = crtc->base.helper_private; |
260 | if (crtc_funcs->dpms == NULL) | ||
261 | return 0; | ||
262 | |||
263 | DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n"); | ||
264 | |||
265 | mode = drm_mode_duplicate(dev, &vesa_640x480); | ||
266 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | ||
267 | if(!drm_crtc_helper_set_mode(&crtc->base, mode, | ||
268 | crtc->base.x, crtc->base.y, | ||
269 | crtc->base.fb)) | ||
270 | return 0; | ||
271 | |||
272 | crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON); | ||
273 | return 1; | ||
274 | } | ||
275 | |||
276 | static void | ||
277 | i830_deactivate_pipe_a(struct drm_device *dev) | ||
278 | { | ||
279 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
280 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0]; | ||
281 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
282 | |||
283 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); | ||
211 | } | 284 | } |
212 | 285 | ||
213 | /* overlay needs to be disable in OCMD reg */ | 286 | /* overlay needs to be disable in OCMD reg */ |
214 | static int intel_overlay_on(struct intel_overlay *overlay) | 287 | static int intel_overlay_on(struct intel_overlay *overlay) |
215 | { | 288 | { |
216 | struct drm_device *dev = overlay->dev; | 289 | struct drm_device *dev = overlay->dev; |
290 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
291 | struct drm_i915_gem_request *request; | ||
292 | int pipe_a_quirk = 0; | ||
217 | int ret; | 293 | int ret; |
218 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
219 | 294 | ||
220 | BUG_ON(overlay->active); | 295 | BUG_ON(overlay->active); |
221 | |||
222 | overlay->active = 1; | 296 | overlay->active = 1; |
223 | overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP; | ||
224 | 297 | ||
225 | BEGIN_LP_RING(4); | 298 | if (IS_I830(dev)) { |
299 | pipe_a_quirk = i830_activate_pipe_a(dev); | ||
300 | if (pipe_a_quirk < 0) | ||
301 | return pipe_a_quirk; | ||
302 | } | ||
303 | |||
304 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
305 | if (request == NULL) { | ||
306 | ret = -ENOMEM; | ||
307 | goto out; | ||
308 | } | ||
309 | |||
310 | ret = BEGIN_LP_RING(4); | ||
311 | if (ret) { | ||
312 | kfree(request); | ||
313 | goto out; | ||
314 | } | ||
315 | |||
226 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); | 316 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); |
227 | OUT_RING(overlay->flip_addr | OFC_UPDATE); | 317 | OUT_RING(overlay->flip_addr | OFC_UPDATE); |
228 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 318 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
229 | OUT_RING(MI_NOOP); | 319 | OUT_RING(MI_NOOP); |
230 | ADVANCE_LP_RING(); | 320 | ADVANCE_LP_RING(); |
231 | 321 | ||
232 | overlay->last_flip_req = | 322 | ret = intel_overlay_do_wait_request(overlay, request, NULL); |
233 | i915_add_request(dev, NULL, 0, &dev_priv->render_ring); | 323 | out: |
234 | if (overlay->last_flip_req == 0) | 324 | if (pipe_a_quirk) |
235 | return -ENOMEM; | 325 | i830_deactivate_pipe_a(dev); |
236 | 326 | ||
237 | ret = i915_do_wait_request(dev, | 327 | return ret; |
238 | overlay->last_flip_req, 1, &dev_priv->render_ring); | ||
239 | if (ret != 0) | ||
240 | return ret; | ||
241 | |||
242 | overlay->hw_wedged = 0; | ||
243 | overlay->last_flip_req = 0; | ||
244 | return 0; | ||
245 | } | 328 | } |
246 | 329 | ||
247 | /* overlay needs to be enabled in OCMD reg */ | 330 | /* overlay needs to be enabled in OCMD reg */ |
248 | static void intel_overlay_continue(struct intel_overlay *overlay, | 331 | static int intel_overlay_continue(struct intel_overlay *overlay, |
249 | bool load_polyphase_filter) | 332 | bool load_polyphase_filter) |
250 | { | 333 | { |
251 | struct drm_device *dev = overlay->dev; | 334 | struct drm_device *dev = overlay->dev; |
252 | drm_i915_private_t *dev_priv = dev->dev_private; | 335 | drm_i915_private_t *dev_priv = dev->dev_private; |
336 | struct drm_i915_gem_request *request; | ||
253 | u32 flip_addr = overlay->flip_addr; | 337 | u32 flip_addr = overlay->flip_addr; |
254 | u32 tmp; | 338 | u32 tmp; |
339 | int ret; | ||
255 | 340 | ||
256 | BUG_ON(!overlay->active); | 341 | BUG_ON(!overlay->active); |
257 | 342 | ||
343 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
344 | if (request == NULL) | ||
345 | return -ENOMEM; | ||
346 | |||
258 | if (load_polyphase_filter) | 347 | if (load_polyphase_filter) |
259 | flip_addr |= OFC_UPDATE; | 348 | flip_addr |= OFC_UPDATE; |
260 | 349 | ||
@@ -263,226 +352,154 @@ static void intel_overlay_continue(struct intel_overlay *overlay, | |||
263 | if (tmp & (1 << 17)) | 352 | if (tmp & (1 << 17)) |
264 | DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); | 353 | DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); |
265 | 354 | ||
266 | BEGIN_LP_RING(2); | 355 | ret = BEGIN_LP_RING(2); |
356 | if (ret) { | ||
357 | kfree(request); | ||
358 | return ret; | ||
359 | } | ||
267 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); | 360 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); |
268 | OUT_RING(flip_addr); | 361 | OUT_RING(flip_addr); |
269 | ADVANCE_LP_RING(); | 362 | ADVANCE_LP_RING(); |
270 | 363 | ||
271 | overlay->last_flip_req = | 364 | ret = i915_add_request(LP_RING(dev_priv), NULL, request); |
272 | i915_add_request(dev, NULL, 0, &dev_priv->render_ring); | 365 | if (ret) { |
366 | kfree(request); | ||
367 | return ret; | ||
368 | } | ||
369 | |||
370 | overlay->last_flip_req = request->seqno; | ||
371 | return 0; | ||
273 | } | 372 | } |
274 | 373 | ||
275 | static int intel_overlay_wait_flip(struct intel_overlay *overlay) | 374 | static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) |
276 | { | 375 | { |
277 | struct drm_device *dev = overlay->dev; | 376 | struct drm_i915_gem_object *obj = overlay->old_vid_bo; |
278 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
279 | int ret; | ||
280 | u32 tmp; | ||
281 | |||
282 | if (overlay->last_flip_req != 0) { | ||
283 | ret = i915_do_wait_request(dev, overlay->last_flip_req, | ||
284 | 1, &dev_priv->render_ring); | ||
285 | if (ret == 0) { | ||
286 | overlay->last_flip_req = 0; | ||
287 | |||
288 | tmp = I915_READ(ISR); | ||
289 | 377 | ||
290 | if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) | 378 | i915_gem_object_unpin(obj); |
291 | return 0; | 379 | drm_gem_object_unreference(&obj->base); |
292 | } | ||
293 | } | ||
294 | 380 | ||
295 | /* synchronous slowpath */ | 381 | overlay->old_vid_bo = NULL; |
296 | overlay->hw_wedged = RELEASE_OLD_VID; | 382 | } |
297 | 383 | ||
298 | BEGIN_LP_RING(2); | 384 | static void intel_overlay_off_tail(struct intel_overlay *overlay) |
299 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 385 | { |
300 | OUT_RING(MI_NOOP); | 386 | struct drm_i915_gem_object *obj = overlay->vid_bo; |
301 | ADVANCE_LP_RING(); | ||
302 | 387 | ||
303 | overlay->last_flip_req = | 388 | /* never have the overlay hw on without showing a frame */ |
304 | i915_add_request(dev, NULL, 0, &dev_priv->render_ring); | 389 | BUG_ON(!overlay->vid_bo); |
305 | if (overlay->last_flip_req == 0) | ||
306 | return -ENOMEM; | ||
307 | 390 | ||
308 | ret = i915_do_wait_request(dev, overlay->last_flip_req, | 391 | i915_gem_object_unpin(obj); |
309 | 1, &dev_priv->render_ring); | 392 | drm_gem_object_unreference(&obj->base); |
310 | if (ret != 0) | 393 | overlay->vid_bo = NULL; |
311 | return ret; | ||
312 | 394 | ||
313 | overlay->hw_wedged = 0; | 395 | overlay->crtc->overlay = NULL; |
314 | overlay->last_flip_req = 0; | 396 | overlay->crtc = NULL; |
315 | return 0; | 397 | overlay->active = 0; |
316 | } | 398 | } |
317 | 399 | ||
318 | /* overlay needs to be disabled in OCMD reg */ | 400 | /* overlay needs to be disabled in OCMD reg */ |
319 | static int intel_overlay_off(struct intel_overlay *overlay) | 401 | static int intel_overlay_off(struct intel_overlay *overlay) |
320 | { | 402 | { |
321 | u32 flip_addr = overlay->flip_addr; | ||
322 | struct drm_device *dev = overlay->dev; | 403 | struct drm_device *dev = overlay->dev; |
323 | drm_i915_private_t *dev_priv = dev->dev_private; | 404 | struct drm_i915_private *dev_priv = dev->dev_private; |
405 | u32 flip_addr = overlay->flip_addr; | ||
406 | struct drm_i915_gem_request *request; | ||
324 | int ret; | 407 | int ret; |
325 | 408 | ||
326 | BUG_ON(!overlay->active); | 409 | BUG_ON(!overlay->active); |
327 | 410 | ||
411 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
412 | if (request == NULL) | ||
413 | return -ENOMEM; | ||
414 | |||
328 | /* According to intel docs the overlay hw may hang (when switching | 415 | /* According to intel docs the overlay hw may hang (when switching |
329 | * off) without loading the filter coeffs. It is however unclear whether | 416 | * off) without loading the filter coeffs. It is however unclear whether |
330 | * this applies to the disabling of the overlay or to the switching off | 417 | * this applies to the disabling of the overlay or to the switching off |
331 | * of the hw. Do it in both cases */ | 418 | * of the hw. Do it in both cases */ |
332 | flip_addr |= OFC_UPDATE; | 419 | flip_addr |= OFC_UPDATE; |
333 | 420 | ||
421 | ret = BEGIN_LP_RING(6); | ||
422 | if (ret) { | ||
423 | kfree(request); | ||
424 | return ret; | ||
425 | } | ||
334 | /* wait for overlay to go idle */ | 426 | /* wait for overlay to go idle */ |
335 | overlay->hw_wedged = SWITCH_OFF_STAGE_1; | ||
336 | |||
337 | BEGIN_LP_RING(4); | ||
338 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); | 427 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); |
339 | OUT_RING(flip_addr); | 428 | OUT_RING(flip_addr); |
340 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 429 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
341 | OUT_RING(MI_NOOP); | ||
342 | ADVANCE_LP_RING(); | ||
343 | |||
344 | overlay->last_flip_req = | ||
345 | i915_add_request(dev, NULL, 0, &dev_priv->render_ring); | ||
346 | if (overlay->last_flip_req == 0) | ||
347 | return -ENOMEM; | ||
348 | |||
349 | ret = i915_do_wait_request(dev, overlay->last_flip_req, | ||
350 | 1, &dev_priv->render_ring); | ||
351 | if (ret != 0) | ||
352 | return ret; | ||
353 | |||
354 | /* turn overlay off */ | 430 | /* turn overlay off */ |
355 | overlay->hw_wedged = SWITCH_OFF_STAGE_2; | 431 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); |
356 | |||
357 | BEGIN_LP_RING(4); | ||
358 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); | ||
359 | OUT_RING(flip_addr); | 432 | OUT_RING(flip_addr); |
360 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 433 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
361 | OUT_RING(MI_NOOP); | ||
362 | ADVANCE_LP_RING(); | 434 | ADVANCE_LP_RING(); |
363 | 435 | ||
364 | overlay->last_flip_req = | 436 | return intel_overlay_do_wait_request(overlay, request, |
365 | i915_add_request(dev, NULL, 0, &dev_priv->render_ring); | 437 | intel_overlay_off_tail); |
366 | if (overlay->last_flip_req == 0) | ||
367 | return -ENOMEM; | ||
368 | |||
369 | ret = i915_do_wait_request(dev, overlay->last_flip_req, | ||
370 | 1, &dev_priv->render_ring); | ||
371 | if (ret != 0) | ||
372 | return ret; | ||
373 | |||
374 | overlay->hw_wedged = 0; | ||
375 | overlay->last_flip_req = 0; | ||
376 | return ret; | ||
377 | } | ||
378 | |||
379 | static void intel_overlay_off_tail(struct intel_overlay *overlay) | ||
380 | { | ||
381 | struct drm_gem_object *obj; | ||
382 | |||
383 | /* never have the overlay hw on without showing a frame */ | ||
384 | BUG_ON(!overlay->vid_bo); | ||
385 | obj = &overlay->vid_bo->base; | ||
386 | |||
387 | i915_gem_object_unpin(obj); | ||
388 | drm_gem_object_unreference(obj); | ||
389 | overlay->vid_bo = NULL; | ||
390 | |||
391 | overlay->crtc->overlay = NULL; | ||
392 | overlay->crtc = NULL; | ||
393 | overlay->active = 0; | ||
394 | } | 438 | } |
395 | 439 | ||
396 | /* recover from an interruption due to a signal | 440 | /* recover from an interruption due to a signal |
397 | * We have to be careful not to repeat work forever an make forward progess. */ | 441 | * We have to be careful not to repeat work forever an make forward progess. */ |
398 | int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, | 442 | static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) |
399 | int interruptible) | ||
400 | { | 443 | { |
401 | struct drm_device *dev = overlay->dev; | 444 | struct drm_device *dev = overlay->dev; |
402 | struct drm_gem_object *obj; | ||
403 | drm_i915_private_t *dev_priv = dev->dev_private; | 445 | drm_i915_private_t *dev_priv = dev->dev_private; |
404 | u32 flip_addr; | ||
405 | int ret; | 446 | int ret; |
406 | 447 | ||
407 | if (overlay->hw_wedged == HW_WEDGED) | 448 | if (overlay->last_flip_req == 0) |
408 | return -EIO; | 449 | return 0; |
409 | |||
410 | if (overlay->last_flip_req == 0) { | ||
411 | overlay->last_flip_req = | ||
412 | i915_add_request(dev, NULL, 0, &dev_priv->render_ring); | ||
413 | if (overlay->last_flip_req == 0) | ||
414 | return -ENOMEM; | ||
415 | } | ||
416 | 450 | ||
417 | ret = i915_do_wait_request(dev, overlay->last_flip_req, | 451 | ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req); |
418 | interruptible, &dev_priv->render_ring); | 452 | if (ret) |
419 | if (ret != 0) | ||
420 | return ret; | 453 | return ret; |
421 | 454 | ||
422 | switch (overlay->hw_wedged) { | 455 | if (overlay->flip_tail) |
423 | case RELEASE_OLD_VID: | 456 | overlay->flip_tail(overlay); |
424 | obj = &overlay->old_vid_bo->base; | ||
425 | i915_gem_object_unpin(obj); | ||
426 | drm_gem_object_unreference(obj); | ||
427 | overlay->old_vid_bo = NULL; | ||
428 | break; | ||
429 | case SWITCH_OFF_STAGE_1: | ||
430 | flip_addr = overlay->flip_addr; | ||
431 | flip_addr |= OFC_UPDATE; | ||
432 | |||
433 | overlay->hw_wedged = SWITCH_OFF_STAGE_2; | ||
434 | |||
435 | BEGIN_LP_RING(4); | ||
436 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); | ||
437 | OUT_RING(flip_addr); | ||
438 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | ||
439 | OUT_RING(MI_NOOP); | ||
440 | ADVANCE_LP_RING(); | ||
441 | |||
442 | overlay->last_flip_req = i915_add_request(dev, NULL, | ||
443 | 0, &dev_priv->render_ring); | ||
444 | if (overlay->last_flip_req == 0) | ||
445 | return -ENOMEM; | ||
446 | |||
447 | ret = i915_do_wait_request(dev, overlay->last_flip_req, | ||
448 | interruptible, &dev_priv->render_ring); | ||
449 | if (ret != 0) | ||
450 | return ret; | ||
451 | |||
452 | case SWITCH_OFF_STAGE_2: | ||
453 | intel_overlay_off_tail(overlay); | ||
454 | break; | ||
455 | default: | ||
456 | BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP); | ||
457 | } | ||
458 | 457 | ||
459 | overlay->hw_wedged = 0; | ||
460 | overlay->last_flip_req = 0; | 458 | overlay->last_flip_req = 0; |
461 | return 0; | 459 | return 0; |
462 | } | 460 | } |
463 | 461 | ||
464 | /* Wait for pending overlay flip and release old frame. | 462 | /* Wait for pending overlay flip and release old frame. |
465 | * Needs to be called before the overlay register are changed | 463 | * Needs to be called before the overlay register are changed |
466 | * via intel_overlay_(un)map_regs_atomic */ | 464 | * via intel_overlay_(un)map_regs |
465 | */ | ||
467 | static int intel_overlay_release_old_vid(struct intel_overlay *overlay) | 466 | static int intel_overlay_release_old_vid(struct intel_overlay *overlay) |
468 | { | 467 | { |
468 | struct drm_device *dev = overlay->dev; | ||
469 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
469 | int ret; | 470 | int ret; |
470 | struct drm_gem_object *obj; | ||
471 | 471 | ||
472 | /* only wait if there is actually an old frame to release to | 472 | /* Only wait if there is actually an old frame to release to |
473 | * guarantee forward progress */ | 473 | * guarantee forward progress. |
474 | */ | ||
474 | if (!overlay->old_vid_bo) | 475 | if (!overlay->old_vid_bo) |
475 | return 0; | 476 | return 0; |
476 | 477 | ||
477 | ret = intel_overlay_wait_flip(overlay); | 478 | if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { |
478 | if (ret != 0) | 479 | struct drm_i915_gem_request *request; |
479 | return ret; | ||
480 | 480 | ||
481 | obj = &overlay->old_vid_bo->base; | 481 | /* synchronous slowpath */ |
482 | i915_gem_object_unpin(obj); | 482 | request = kzalloc(sizeof(*request), GFP_KERNEL); |
483 | drm_gem_object_unreference(obj); | 483 | if (request == NULL) |
484 | overlay->old_vid_bo = NULL; | 484 | return -ENOMEM; |
485 | |||
486 | ret = BEGIN_LP_RING(2); | ||
487 | if (ret) { | ||
488 | kfree(request); | ||
489 | return ret; | ||
490 | } | ||
491 | |||
492 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | ||
493 | OUT_RING(MI_NOOP); | ||
494 | ADVANCE_LP_RING(); | ||
495 | |||
496 | ret = intel_overlay_do_wait_request(overlay, request, | ||
497 | intel_overlay_release_old_vid_tail); | ||
498 | if (ret) | ||
499 | return ret; | ||
500 | } | ||
485 | 501 | ||
502 | intel_overlay_release_old_vid_tail(overlay); | ||
486 | return 0; | 503 | return 0; |
487 | } | 504 | } |
488 | 505 | ||
@@ -506,65 +523,65 @@ struct put_image_params { | |||
506 | static int packed_depth_bytes(u32 format) | 523 | static int packed_depth_bytes(u32 format) |
507 | { | 524 | { |
508 | switch (format & I915_OVERLAY_DEPTH_MASK) { | 525 | switch (format & I915_OVERLAY_DEPTH_MASK) { |
509 | case I915_OVERLAY_YUV422: | 526 | case I915_OVERLAY_YUV422: |
510 | return 4; | 527 | return 4; |
511 | case I915_OVERLAY_YUV411: | 528 | case I915_OVERLAY_YUV411: |
512 | /* return 6; not implemented */ | 529 | /* return 6; not implemented */ |
513 | default: | 530 | default: |
514 | return -EINVAL; | 531 | return -EINVAL; |
515 | } | 532 | } |
516 | } | 533 | } |
517 | 534 | ||
518 | static int packed_width_bytes(u32 format, short width) | 535 | static int packed_width_bytes(u32 format, short width) |
519 | { | 536 | { |
520 | switch (format & I915_OVERLAY_DEPTH_MASK) { | 537 | switch (format & I915_OVERLAY_DEPTH_MASK) { |
521 | case I915_OVERLAY_YUV422: | 538 | case I915_OVERLAY_YUV422: |
522 | return width << 1; | 539 | return width << 1; |
523 | default: | 540 | default: |
524 | return -EINVAL; | 541 | return -EINVAL; |
525 | } | 542 | } |
526 | } | 543 | } |
527 | 544 | ||
528 | static int uv_hsubsampling(u32 format) | 545 | static int uv_hsubsampling(u32 format) |
529 | { | 546 | { |
530 | switch (format & I915_OVERLAY_DEPTH_MASK) { | 547 | switch (format & I915_OVERLAY_DEPTH_MASK) { |
531 | case I915_OVERLAY_YUV422: | 548 | case I915_OVERLAY_YUV422: |
532 | case I915_OVERLAY_YUV420: | 549 | case I915_OVERLAY_YUV420: |
533 | return 2; | 550 | return 2; |
534 | case I915_OVERLAY_YUV411: | 551 | case I915_OVERLAY_YUV411: |
535 | case I915_OVERLAY_YUV410: | 552 | case I915_OVERLAY_YUV410: |
536 | return 4; | 553 | return 4; |
537 | default: | 554 | default: |
538 | return -EINVAL; | 555 | return -EINVAL; |
539 | } | 556 | } |
540 | } | 557 | } |
541 | 558 | ||
542 | static int uv_vsubsampling(u32 format) | 559 | static int uv_vsubsampling(u32 format) |
543 | { | 560 | { |
544 | switch (format & I915_OVERLAY_DEPTH_MASK) { | 561 | switch (format & I915_OVERLAY_DEPTH_MASK) { |
545 | case I915_OVERLAY_YUV420: | 562 | case I915_OVERLAY_YUV420: |
546 | case I915_OVERLAY_YUV410: | 563 | case I915_OVERLAY_YUV410: |
547 | return 2; | 564 | return 2; |
548 | case I915_OVERLAY_YUV422: | 565 | case I915_OVERLAY_YUV422: |
549 | case I915_OVERLAY_YUV411: | 566 | case I915_OVERLAY_YUV411: |
550 | return 1; | 567 | return 1; |
551 | default: | 568 | default: |
552 | return -EINVAL; | 569 | return -EINVAL; |
553 | } | 570 | } |
554 | } | 571 | } |
555 | 572 | ||
556 | static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) | 573 | static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) |
557 | { | 574 | { |
558 | u32 mask, shift, ret; | 575 | u32 mask, shift, ret; |
559 | if (IS_I9XX(dev)) { | 576 | if (IS_GEN2(dev)) { |
560 | mask = 0x3f; | ||
561 | shift = 6; | ||
562 | } else { | ||
563 | mask = 0x1f; | 577 | mask = 0x1f; |
564 | shift = 5; | 578 | shift = 5; |
579 | } else { | ||
580 | mask = 0x3f; | ||
581 | shift = 6; | ||
565 | } | 582 | } |
566 | ret = ((offset + width + mask) >> shift) - (offset >> shift); | 583 | ret = ((offset + width + mask) >> shift) - (offset >> shift); |
567 | if (IS_I9XX(dev)) | 584 | if (!IS_GEN2(dev)) |
568 | ret <<= 1; | 585 | ret <<= 1; |
569 | ret -=1; | 586 | ret -=1; |
570 | return ret << 2; | 587 | return ret << 2; |
@@ -587,7 +604,9 @@ static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = { | |||
587 | 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, | 604 | 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, |
588 | 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, | 605 | 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, |
589 | 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, | 606 | 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, |
590 | 0xb000, 0x3000, 0x0800, 0x3000, 0xb000}; | 607 | 0xb000, 0x3000, 0x0800, 0x3000, 0xb000 |
608 | }; | ||
609 | |||
591 | static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = { | 610 | static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = { |
592 | 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60, | 611 | 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60, |
593 | 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40, | 612 | 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40, |
@@ -597,7 +616,8 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = { | |||
597 | 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0, | 616 | 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0, |
598 | 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240, | 617 | 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240, |
599 | 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0, | 618 | 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0, |
600 | 0x3000, 0x0800, 0x3000}; | 619 | 0x3000, 0x0800, 0x3000 |
620 | }; | ||
601 | 621 | ||
602 | static void update_polyphase_filter(struct overlay_registers *regs) | 622 | static void update_polyphase_filter(struct overlay_registers *regs) |
603 | { | 623 | { |
@@ -630,29 +650,31 @@ static bool update_scaling_factors(struct intel_overlay *overlay, | |||
630 | yscale = 1 << FP_SHIFT; | 650 | yscale = 1 << FP_SHIFT; |
631 | 651 | ||
632 | /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/ | 652 | /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/ |
633 | xscale_UV = xscale/uv_hscale; | 653 | xscale_UV = xscale/uv_hscale; |
634 | yscale_UV = yscale/uv_vscale; | 654 | yscale_UV = yscale/uv_vscale; |
635 | /* make the Y scale to UV scale ratio an exact multiply */ | 655 | /* make the Y scale to UV scale ratio an exact multiply */ |
636 | xscale = xscale_UV * uv_hscale; | 656 | xscale = xscale_UV * uv_hscale; |
637 | yscale = yscale_UV * uv_vscale; | 657 | yscale = yscale_UV * uv_vscale; |
638 | /*} else { | 658 | /*} else { |
639 | xscale_UV = 0; | 659 | xscale_UV = 0; |
640 | yscale_UV = 0; | 660 | yscale_UV = 0; |
641 | }*/ | 661 | }*/ |
642 | 662 | ||
643 | if (xscale != overlay->old_xscale || yscale != overlay->old_yscale) | 663 | if (xscale != overlay->old_xscale || yscale != overlay->old_yscale) |
644 | scale_changed = true; | 664 | scale_changed = true; |
645 | overlay->old_xscale = xscale; | 665 | overlay->old_xscale = xscale; |
646 | overlay->old_yscale = yscale; | 666 | overlay->old_yscale = yscale; |
647 | 667 | ||
648 | regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20) | 668 | regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) | |
649 | | ((xscale >> FP_SHIFT) << 16) | 669 | ((xscale >> FP_SHIFT) << 16) | |
650 | | ((xscale & FRACT_MASK) << 3); | 670 | ((xscale & FRACT_MASK) << 3)); |
651 | regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20) | 671 | |
652 | | ((xscale_UV >> FP_SHIFT) << 16) | 672 | regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) | |
653 | | ((xscale_UV & FRACT_MASK) << 3); | 673 | ((xscale_UV >> FP_SHIFT) << 16) | |
654 | regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16) | 674 | ((xscale_UV & FRACT_MASK) << 3)); |
655 | | ((yscale_UV >> FP_SHIFT) << 0); | 675 | |
676 | regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) | | ||
677 | ((yscale_UV >> FP_SHIFT) << 0))); | ||
656 | 678 | ||
657 | if (scale_changed) | 679 | if (scale_changed) |
658 | update_polyphase_filter(regs); | 680 | update_polyphase_filter(regs); |
@@ -664,22 +686,28 @@ static void update_colorkey(struct intel_overlay *overlay, | |||
664 | struct overlay_registers *regs) | 686 | struct overlay_registers *regs) |
665 | { | 687 | { |
666 | u32 key = overlay->color_key; | 688 | u32 key = overlay->color_key; |
689 | |||
667 | switch (overlay->crtc->base.fb->bits_per_pixel) { | 690 | switch (overlay->crtc->base.fb->bits_per_pixel) { |
668 | case 8: | 691 | case 8: |
669 | regs->DCLRKV = 0; | 692 | regs->DCLRKV = 0; |
670 | regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE; | 693 | regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE; |
671 | case 16: | 694 | break; |
672 | if (overlay->crtc->base.fb->depth == 15) { | 695 | |
673 | regs->DCLRKV = RGB15_TO_COLORKEY(key); | 696 | case 16: |
674 | regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE; | 697 | if (overlay->crtc->base.fb->depth == 15) { |
675 | } else { | 698 | regs->DCLRKV = RGB15_TO_COLORKEY(key); |
676 | regs->DCLRKV = RGB16_TO_COLORKEY(key); | 699 | regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE; |
677 | regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE; | 700 | } else { |
678 | } | 701 | regs->DCLRKV = RGB16_TO_COLORKEY(key); |
679 | case 24: | 702 | regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE; |
680 | case 32: | 703 | } |
681 | regs->DCLRKV = key; | 704 | break; |
682 | regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE; | 705 | |
706 | case 24: | ||
707 | case 32: | ||
708 | regs->DCLRKV = key; | ||
709 | regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE; | ||
710 | break; | ||
683 | } | 711 | } |
684 | } | 712 | } |
685 | 713 | ||
@@ -689,53 +717,52 @@ static u32 overlay_cmd_reg(struct put_image_params *params) | |||
689 | 717 | ||
690 | if (params->format & I915_OVERLAY_YUV_PLANAR) { | 718 | if (params->format & I915_OVERLAY_YUV_PLANAR) { |
691 | switch (params->format & I915_OVERLAY_DEPTH_MASK) { | 719 | switch (params->format & I915_OVERLAY_DEPTH_MASK) { |
692 | case I915_OVERLAY_YUV422: | 720 | case I915_OVERLAY_YUV422: |
693 | cmd |= OCMD_YUV_422_PLANAR; | 721 | cmd |= OCMD_YUV_422_PLANAR; |
694 | break; | 722 | break; |
695 | case I915_OVERLAY_YUV420: | 723 | case I915_OVERLAY_YUV420: |
696 | cmd |= OCMD_YUV_420_PLANAR; | 724 | cmd |= OCMD_YUV_420_PLANAR; |
697 | break; | 725 | break; |
698 | case I915_OVERLAY_YUV411: | 726 | case I915_OVERLAY_YUV411: |
699 | case I915_OVERLAY_YUV410: | 727 | case I915_OVERLAY_YUV410: |
700 | cmd |= OCMD_YUV_410_PLANAR; | 728 | cmd |= OCMD_YUV_410_PLANAR; |
701 | break; | 729 | break; |
702 | } | 730 | } |
703 | } else { /* YUV packed */ | 731 | } else { /* YUV packed */ |
704 | switch (params->format & I915_OVERLAY_DEPTH_MASK) { | 732 | switch (params->format & I915_OVERLAY_DEPTH_MASK) { |
705 | case I915_OVERLAY_YUV422: | 733 | case I915_OVERLAY_YUV422: |
706 | cmd |= OCMD_YUV_422_PACKED; | 734 | cmd |= OCMD_YUV_422_PACKED; |
707 | break; | 735 | break; |
708 | case I915_OVERLAY_YUV411: | 736 | case I915_OVERLAY_YUV411: |
709 | cmd |= OCMD_YUV_411_PACKED; | 737 | cmd |= OCMD_YUV_411_PACKED; |
710 | break; | 738 | break; |
711 | } | 739 | } |
712 | 740 | ||
713 | switch (params->format & I915_OVERLAY_SWAP_MASK) { | 741 | switch (params->format & I915_OVERLAY_SWAP_MASK) { |
714 | case I915_OVERLAY_NO_SWAP: | 742 | case I915_OVERLAY_NO_SWAP: |
715 | break; | 743 | break; |
716 | case I915_OVERLAY_UV_SWAP: | 744 | case I915_OVERLAY_UV_SWAP: |
717 | cmd |= OCMD_UV_SWAP; | 745 | cmd |= OCMD_UV_SWAP; |
718 | break; | 746 | break; |
719 | case I915_OVERLAY_Y_SWAP: | 747 | case I915_OVERLAY_Y_SWAP: |
720 | cmd |= OCMD_Y_SWAP; | 748 | cmd |= OCMD_Y_SWAP; |
721 | break; | 749 | break; |
722 | case I915_OVERLAY_Y_AND_UV_SWAP: | 750 | case I915_OVERLAY_Y_AND_UV_SWAP: |
723 | cmd |= OCMD_Y_AND_UV_SWAP; | 751 | cmd |= OCMD_Y_AND_UV_SWAP; |
724 | break; | 752 | break; |
725 | } | 753 | } |
726 | } | 754 | } |
727 | 755 | ||
728 | return cmd; | 756 | return cmd; |
729 | } | 757 | } |
730 | 758 | ||
731 | int intel_overlay_do_put_image(struct intel_overlay *overlay, | 759 | static int intel_overlay_do_put_image(struct intel_overlay *overlay, |
732 | struct drm_gem_object *new_bo, | 760 | struct drm_i915_gem_object *new_bo, |
733 | struct put_image_params *params) | 761 | struct put_image_params *params) |
734 | { | 762 | { |
735 | int ret, tmp_width; | 763 | int ret, tmp_width; |
736 | struct overlay_registers *regs; | 764 | struct overlay_registers *regs; |
737 | bool scale_changed = false; | 765 | bool scale_changed = false; |
738 | struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo); | ||
739 | struct drm_device *dev = overlay->dev; | 766 | struct drm_device *dev = overlay->dev; |
740 | 767 | ||
741 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 768 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
@@ -746,7 +773,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
746 | if (ret != 0) | 773 | if (ret != 0) |
747 | return ret; | 774 | return ret; |
748 | 775 | ||
749 | ret = i915_gem_object_pin(new_bo, PAGE_SIZE); | 776 | ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true); |
750 | if (ret != 0) | 777 | if (ret != 0) |
751 | return ret; | 778 | return ret; |
752 | 779 | ||
@@ -754,25 +781,29 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
754 | if (ret != 0) | 781 | if (ret != 0) |
755 | goto out_unpin; | 782 | goto out_unpin; |
756 | 783 | ||
784 | ret = i915_gem_object_put_fence(new_bo); | ||
785 | if (ret) | ||
786 | goto out_unpin; | ||
787 | |||
757 | if (!overlay->active) { | 788 | if (!overlay->active) { |
758 | regs = intel_overlay_map_regs_atomic(overlay); | 789 | regs = intel_overlay_map_regs(overlay); |
759 | if (!regs) { | 790 | if (!regs) { |
760 | ret = -ENOMEM; | 791 | ret = -ENOMEM; |
761 | goto out_unpin; | 792 | goto out_unpin; |
762 | } | 793 | } |
763 | regs->OCONFIG = OCONF_CC_OUT_8BIT; | 794 | regs->OCONFIG = OCONF_CC_OUT_8BIT; |
764 | if (IS_I965GM(overlay->dev)) | 795 | if (IS_GEN4(overlay->dev)) |
765 | regs->OCONFIG |= OCONF_CSC_MODE_BT709; | 796 | regs->OCONFIG |= OCONF_CSC_MODE_BT709; |
766 | regs->OCONFIG |= overlay->crtc->pipe == 0 ? | 797 | regs->OCONFIG |= overlay->crtc->pipe == 0 ? |
767 | OCONF_PIPE_A : OCONF_PIPE_B; | 798 | OCONF_PIPE_A : OCONF_PIPE_B; |
768 | intel_overlay_unmap_regs_atomic(overlay); | 799 | intel_overlay_unmap_regs(overlay, regs); |
769 | 800 | ||
770 | ret = intel_overlay_on(overlay); | 801 | ret = intel_overlay_on(overlay); |
771 | if (ret != 0) | 802 | if (ret != 0) |
772 | goto out_unpin; | 803 | goto out_unpin; |
773 | } | 804 | } |
774 | 805 | ||
775 | regs = intel_overlay_map_regs_atomic(overlay); | 806 | regs = intel_overlay_map_regs(overlay); |
776 | if (!regs) { | 807 | if (!regs) { |
777 | ret = -ENOMEM; | 808 | ret = -ENOMEM; |
778 | goto out_unpin; | 809 | goto out_unpin; |
@@ -788,9 +819,9 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
788 | 819 | ||
789 | regs->SWIDTH = params->src_w; | 820 | regs->SWIDTH = params->src_w; |
790 | regs->SWIDTHSW = calc_swidthsw(overlay->dev, | 821 | regs->SWIDTHSW = calc_swidthsw(overlay->dev, |
791 | params->offset_Y, tmp_width); | 822 | params->offset_Y, tmp_width); |
792 | regs->SHEIGHT = params->src_h; | 823 | regs->SHEIGHT = params->src_h; |
793 | regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y; | 824 | regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y; |
794 | regs->OSTRIDE = params->stride_Y; | 825 | regs->OSTRIDE = params->stride_Y; |
795 | 826 | ||
796 | if (params->format & I915_OVERLAY_YUV_PLANAR) { | 827 | if (params->format & I915_OVERLAY_YUV_PLANAR) { |
@@ -799,13 +830,13 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
799 | u32 tmp_U, tmp_V; | 830 | u32 tmp_U, tmp_V; |
800 | regs->SWIDTH |= (params->src_w/uv_hscale) << 16; | 831 | regs->SWIDTH |= (params->src_w/uv_hscale) << 16; |
801 | tmp_U = calc_swidthsw(overlay->dev, params->offset_U, | 832 | tmp_U = calc_swidthsw(overlay->dev, params->offset_U, |
802 | params->src_w/uv_hscale); | 833 | params->src_w/uv_hscale); |
803 | tmp_V = calc_swidthsw(overlay->dev, params->offset_V, | 834 | tmp_V = calc_swidthsw(overlay->dev, params->offset_V, |
804 | params->src_w/uv_hscale); | 835 | params->src_w/uv_hscale); |
805 | regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; | 836 | regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; |
806 | regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; | 837 | regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; |
807 | regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U; | 838 | regs->OBUF_0U = new_bo->gtt_offset + params->offset_U; |
808 | regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V; | 839 | regs->OBUF_0V = new_bo->gtt_offset + params->offset_V; |
809 | regs->OSTRIDE |= params->stride_UV << 16; | 840 | regs->OSTRIDE |= params->stride_UV << 16; |
810 | } | 841 | } |
811 | 842 | ||
@@ -815,12 +846,14 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
815 | 846 | ||
816 | regs->OCMD = overlay_cmd_reg(params); | 847 | regs->OCMD = overlay_cmd_reg(params); |
817 | 848 | ||
818 | intel_overlay_unmap_regs_atomic(overlay); | 849 | intel_overlay_unmap_regs(overlay, regs); |
819 | 850 | ||
820 | intel_overlay_continue(overlay, scale_changed); | 851 | ret = intel_overlay_continue(overlay, scale_changed); |
852 | if (ret) | ||
853 | goto out_unpin; | ||
821 | 854 | ||
822 | overlay->old_vid_bo = overlay->vid_bo; | 855 | overlay->old_vid_bo = overlay->vid_bo; |
823 | overlay->vid_bo = to_intel_bo(new_bo); | 856 | overlay->vid_bo = new_bo; |
824 | 857 | ||
825 | return 0; | 858 | return 0; |
826 | 859 | ||
@@ -831,18 +864,16 @@ out_unpin: | |||
831 | 864 | ||
832 | int intel_overlay_switch_off(struct intel_overlay *overlay) | 865 | int intel_overlay_switch_off(struct intel_overlay *overlay) |
833 | { | 866 | { |
834 | int ret; | ||
835 | struct overlay_registers *regs; | 867 | struct overlay_registers *regs; |
836 | struct drm_device *dev = overlay->dev; | 868 | struct drm_device *dev = overlay->dev; |
869 | int ret; | ||
837 | 870 | ||
838 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 871 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
839 | BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); | 872 | BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); |
840 | 873 | ||
841 | if (overlay->hw_wedged) { | 874 | ret = intel_overlay_recover_from_interrupt(overlay); |
842 | ret = intel_overlay_recover_from_interrupt(overlay, 1); | 875 | if (ret != 0) |
843 | if (ret != 0) | 876 | return ret; |
844 | return ret; | ||
845 | } | ||
846 | 877 | ||
847 | if (!overlay->active) | 878 | if (!overlay->active) |
848 | return 0; | 879 | return 0; |
@@ -851,33 +882,29 @@ int intel_overlay_switch_off(struct intel_overlay *overlay) | |||
851 | if (ret != 0) | 882 | if (ret != 0) |
852 | return ret; | 883 | return ret; |
853 | 884 | ||
854 | regs = intel_overlay_map_regs_atomic(overlay); | 885 | regs = intel_overlay_map_regs(overlay); |
855 | regs->OCMD = 0; | 886 | regs->OCMD = 0; |
856 | intel_overlay_unmap_regs_atomic(overlay); | 887 | intel_overlay_unmap_regs(overlay, regs); |
857 | 888 | ||
858 | ret = intel_overlay_off(overlay); | 889 | ret = intel_overlay_off(overlay); |
859 | if (ret != 0) | 890 | if (ret != 0) |
860 | return ret; | 891 | return ret; |
861 | 892 | ||
862 | intel_overlay_off_tail(overlay); | 893 | intel_overlay_off_tail(overlay); |
863 | |||
864 | return 0; | 894 | return 0; |
865 | } | 895 | } |
866 | 896 | ||
867 | static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, | 897 | static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, |
868 | struct intel_crtc *crtc) | 898 | struct intel_crtc *crtc) |
869 | { | 899 | { |
870 | drm_i915_private_t *dev_priv = overlay->dev->dev_private; | 900 | drm_i915_private_t *dev_priv = overlay->dev->dev_private; |
871 | u32 pipeconf; | ||
872 | int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF; | ||
873 | 901 | ||
874 | if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON) | 902 | if (!crtc->active) |
875 | return -EINVAL; | 903 | return -EINVAL; |
876 | 904 | ||
877 | pipeconf = I915_READ(pipeconf_reg); | ||
878 | |||
879 | /* can't use the overlay with double wide pipe */ | 905 | /* can't use the overlay with double wide pipe */ |
880 | if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE) | 906 | if (INTEL_INFO(overlay->dev)->gen < 4 && |
907 | (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE) | ||
881 | return -EINVAL; | 908 | return -EINVAL; |
882 | 909 | ||
883 | return 0; | 910 | return 0; |
@@ -886,20 +913,22 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, | |||
886 | static void update_pfit_vscale_ratio(struct intel_overlay *overlay) | 913 | static void update_pfit_vscale_ratio(struct intel_overlay *overlay) |
887 | { | 914 | { |
888 | struct drm_device *dev = overlay->dev; | 915 | struct drm_device *dev = overlay->dev; |
889 | drm_i915_private_t *dev_priv = dev->dev_private; | 916 | drm_i915_private_t *dev_priv = dev->dev_private; |
890 | u32 ratio; | ||
891 | u32 pfit_control = I915_READ(PFIT_CONTROL); | 917 | u32 pfit_control = I915_READ(PFIT_CONTROL); |
918 | u32 ratio; | ||
892 | 919 | ||
893 | /* XXX: This is not the same logic as in the xorg driver, but more in | 920 | /* XXX: This is not the same logic as in the xorg driver, but more in |
894 | * line with the intel documentation for the i965 */ | 921 | * line with the intel documentation for the i965 |
895 | if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) { | 922 | */ |
896 | ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT; | 923 | if (INTEL_INFO(dev)->gen >= 4) { |
897 | } else { /* on i965 use the PGM reg to read out the autoscaler values */ | 924 | /* on i965 use the PGM reg to read out the autoscaler values */ |
898 | ratio = I915_READ(PFIT_PGM_RATIOS); | 925 | ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; |
899 | if (IS_I965G(dev)) | 926 | } else { |
900 | ratio >>= PFIT_VERT_SCALE_SHIFT_965; | 927 | if (pfit_control & VERT_AUTO_SCALE) |
928 | ratio = I915_READ(PFIT_AUTO_RATIOS); | ||
901 | else | 929 | else |
902 | ratio >>= PFIT_VERT_SCALE_SHIFT; | 930 | ratio = I915_READ(PFIT_PGM_RATIOS); |
931 | ratio >>= PFIT_VERT_SCALE_SHIFT; | ||
903 | } | 932 | } |
904 | 933 | ||
905 | overlay->pfit_vscale_ratio = ratio; | 934 | overlay->pfit_vscale_ratio = ratio; |
@@ -910,12 +939,10 @@ static int check_overlay_dst(struct intel_overlay *overlay, | |||
910 | { | 939 | { |
911 | struct drm_display_mode *mode = &overlay->crtc->base.mode; | 940 | struct drm_display_mode *mode = &overlay->crtc->base.mode; |
912 | 941 | ||
913 | if ((rec->dst_x < mode->crtc_hdisplay) | 942 | if (rec->dst_x < mode->crtc_hdisplay && |
914 | && (rec->dst_x + rec->dst_width | 943 | rec->dst_x + rec->dst_width <= mode->crtc_hdisplay && |
915 | <= mode->crtc_hdisplay) | 944 | rec->dst_y < mode->crtc_vdisplay && |
916 | && (rec->dst_y < mode->crtc_vdisplay) | 945 | rec->dst_y + rec->dst_height <= mode->crtc_vdisplay) |
917 | && (rec->dst_y + rec->dst_height | ||
918 | <= mode->crtc_vdisplay)) | ||
919 | return 0; | 946 | return 0; |
920 | else | 947 | else |
921 | return -EINVAL; | 948 | return -EINVAL; |
@@ -938,55 +965,61 @@ static int check_overlay_scaling(struct put_image_params *rec) | |||
938 | 965 | ||
939 | static int check_overlay_src(struct drm_device *dev, | 966 | static int check_overlay_src(struct drm_device *dev, |
940 | struct drm_intel_overlay_put_image *rec, | 967 | struct drm_intel_overlay_put_image *rec, |
941 | struct drm_gem_object *new_bo) | 968 | struct drm_i915_gem_object *new_bo) |
942 | { | 969 | { |
943 | u32 stride_mask; | ||
944 | int depth; | ||
945 | int uv_hscale = uv_hsubsampling(rec->flags); | 970 | int uv_hscale = uv_hsubsampling(rec->flags); |
946 | int uv_vscale = uv_vsubsampling(rec->flags); | 971 | int uv_vscale = uv_vsubsampling(rec->flags); |
947 | size_t tmp; | 972 | u32 stride_mask; |
973 | int depth; | ||
974 | u32 tmp; | ||
948 | 975 | ||
949 | /* check src dimensions */ | 976 | /* check src dimensions */ |
950 | if (IS_845G(dev) || IS_I830(dev)) { | 977 | if (IS_845G(dev) || IS_I830(dev)) { |
951 | if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY | 978 | if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || |
952 | || rec->src_width > IMAGE_MAX_WIDTH_LEGACY) | 979 | rec->src_width > IMAGE_MAX_WIDTH_LEGACY) |
953 | return -EINVAL; | 980 | return -EINVAL; |
954 | } else { | 981 | } else { |
955 | if (rec->src_height > IMAGE_MAX_HEIGHT | 982 | if (rec->src_height > IMAGE_MAX_HEIGHT || |
956 | || rec->src_width > IMAGE_MAX_WIDTH) | 983 | rec->src_width > IMAGE_MAX_WIDTH) |
957 | return -EINVAL; | 984 | return -EINVAL; |
958 | } | 985 | } |
986 | |||
959 | /* better safe than sorry, use 4 as the maximal subsampling ratio */ | 987 | /* better safe than sorry, use 4 as the maximal subsampling ratio */ |
960 | if (rec->src_height < N_VERT_Y_TAPS*4 | 988 | if (rec->src_height < N_VERT_Y_TAPS*4 || |
961 | || rec->src_width < N_HORIZ_Y_TAPS*4) | 989 | rec->src_width < N_HORIZ_Y_TAPS*4) |
962 | return -EINVAL; | 990 | return -EINVAL; |
963 | 991 | ||
964 | /* check alignment constraints */ | 992 | /* check alignment constraints */ |
965 | switch (rec->flags & I915_OVERLAY_TYPE_MASK) { | 993 | switch (rec->flags & I915_OVERLAY_TYPE_MASK) { |
966 | case I915_OVERLAY_RGB: | 994 | case I915_OVERLAY_RGB: |
967 | /* not implemented */ | 995 | /* not implemented */ |
996 | return -EINVAL; | ||
997 | |||
998 | case I915_OVERLAY_YUV_PACKED: | ||
999 | if (uv_vscale != 1) | ||
968 | return -EINVAL; | 1000 | return -EINVAL; |
969 | case I915_OVERLAY_YUV_PACKED: | 1001 | |
970 | depth = packed_depth_bytes(rec->flags); | 1002 | depth = packed_depth_bytes(rec->flags); |
971 | if (uv_vscale != 1) | 1003 | if (depth < 0) |
972 | return -EINVAL; | 1004 | return depth; |
973 | if (depth < 0) | 1005 | |
974 | return depth; | 1006 | /* ignore UV planes */ |
975 | /* ignore UV planes */ | 1007 | rec->stride_UV = 0; |
976 | rec->stride_UV = 0; | 1008 | rec->offset_U = 0; |
977 | rec->offset_U = 0; | 1009 | rec->offset_V = 0; |
978 | rec->offset_V = 0; | 1010 | /* check pixel alignment */ |
979 | /* check pixel alignment */ | 1011 | if (rec->offset_Y % depth) |
980 | if (rec->offset_Y % depth) | ||
981 | return -EINVAL; | ||
982 | break; | ||
983 | case I915_OVERLAY_YUV_PLANAR: | ||
984 | if (uv_vscale < 0 || uv_hscale < 0) | ||
985 | return -EINVAL; | ||
986 | /* no offset restrictions for planar formats */ | ||
987 | break; | ||
988 | default: | ||
989 | return -EINVAL; | 1012 | return -EINVAL; |
1013 | break; | ||
1014 | |||
1015 | case I915_OVERLAY_YUV_PLANAR: | ||
1016 | if (uv_vscale < 0 || uv_hscale < 0) | ||
1017 | return -EINVAL; | ||
1018 | /* no offset restrictions for planar formats */ | ||
1019 | break; | ||
1020 | |||
1021 | default: | ||
1022 | return -EINVAL; | ||
990 | } | 1023 | } |
991 | 1024 | ||
992 | if (rec->src_width % uv_hscale) | 1025 | if (rec->src_width % uv_hscale) |
@@ -1000,47 +1033,74 @@ static int check_overlay_src(struct drm_device *dev, | |||
1000 | 1033 | ||
1001 | if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) | 1034 | if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) |
1002 | return -EINVAL; | 1035 | return -EINVAL; |
1003 | if (IS_I965G(dev) && rec->stride_Y < 512) | 1036 | if (IS_GEN4(dev) && rec->stride_Y < 512) |
1004 | return -EINVAL; | 1037 | return -EINVAL; |
1005 | 1038 | ||
1006 | tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? | 1039 | tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? |
1007 | 4 : 8; | 1040 | 4096 : 8192; |
1008 | if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024) | 1041 | if (rec->stride_Y > tmp || rec->stride_UV > 2*1024) |
1009 | return -EINVAL; | 1042 | return -EINVAL; |
1010 | 1043 | ||
1011 | /* check buffer dimensions */ | 1044 | /* check buffer dimensions */ |
1012 | switch (rec->flags & I915_OVERLAY_TYPE_MASK) { | 1045 | switch (rec->flags & I915_OVERLAY_TYPE_MASK) { |
1013 | case I915_OVERLAY_RGB: | 1046 | case I915_OVERLAY_RGB: |
1014 | case I915_OVERLAY_YUV_PACKED: | 1047 | case I915_OVERLAY_YUV_PACKED: |
1015 | /* always 4 Y values per depth pixels */ | 1048 | /* always 4 Y values per depth pixels */ |
1016 | if (packed_width_bytes(rec->flags, rec->src_width) | 1049 | if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y) |
1017 | > rec->stride_Y) | 1050 | return -EINVAL; |
1018 | return -EINVAL; | 1051 | |
1019 | 1052 | tmp = rec->stride_Y*rec->src_height; | |
1020 | tmp = rec->stride_Y*rec->src_height; | 1053 | if (rec->offset_Y + tmp > new_bo->base.size) |
1021 | if (rec->offset_Y + tmp > new_bo->size) | 1054 | return -EINVAL; |
1022 | return -EINVAL; | 1055 | break; |
1023 | break; | 1056 | |
1024 | case I915_OVERLAY_YUV_PLANAR: | 1057 | case I915_OVERLAY_YUV_PLANAR: |
1025 | if (rec->src_width > rec->stride_Y) | 1058 | if (rec->src_width > rec->stride_Y) |
1026 | return -EINVAL; | 1059 | return -EINVAL; |
1027 | if (rec->src_width/uv_hscale > rec->stride_UV) | 1060 | if (rec->src_width/uv_hscale > rec->stride_UV) |
1028 | return -EINVAL; | 1061 | return -EINVAL; |
1029 | 1062 | ||
1030 | tmp = rec->stride_Y*rec->src_height; | 1063 | tmp = rec->stride_Y * rec->src_height; |
1031 | if (rec->offset_Y + tmp > new_bo->size) | 1064 | if (rec->offset_Y + tmp > new_bo->base.size) |
1032 | return -EINVAL; | 1065 | return -EINVAL; |
1033 | tmp = rec->stride_UV*rec->src_height; | 1066 | |
1034 | tmp /= uv_vscale; | 1067 | tmp = rec->stride_UV * (rec->src_height / uv_vscale); |
1035 | if (rec->offset_U + tmp > new_bo->size | 1068 | if (rec->offset_U + tmp > new_bo->base.size || |
1036 | || rec->offset_V + tmp > new_bo->size) | 1069 | rec->offset_V + tmp > new_bo->base.size) |
1037 | return -EINVAL; | 1070 | return -EINVAL; |
1038 | break; | 1071 | break; |
1039 | } | 1072 | } |
1040 | 1073 | ||
1041 | return 0; | 1074 | return 0; |
1042 | } | 1075 | } |
1043 | 1076 | ||
1077 | /** | ||
1078 | * Return the pipe currently connected to the panel fitter, | ||
1079 | * or -1 if the panel fitter is not present or not in use | ||
1080 | */ | ||
1081 | static int intel_panel_fitter_pipe(struct drm_device *dev) | ||
1082 | { | ||
1083 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1084 | u32 pfit_control; | ||
1085 | |||
1086 | /* i830 doesn't have a panel fitter */ | ||
1087 | if (IS_I830(dev)) | ||
1088 | return -1; | ||
1089 | |||
1090 | pfit_control = I915_READ(PFIT_CONTROL); | ||
1091 | |||
1092 | /* See if the panel fitter is in use */ | ||
1093 | if ((pfit_control & PFIT_ENABLE) == 0) | ||
1094 | return -1; | ||
1095 | |||
1096 | /* 965 can place panel fitter on either pipe */ | ||
1097 | if (IS_GEN4(dev)) | ||
1098 | return (pfit_control >> 29) & 0x3; | ||
1099 | |||
1100 | /* older chips can only use pipe 1 */ | ||
1101 | return 1; | ||
1102 | } | ||
1103 | |||
1044 | int intel_overlay_put_image(struct drm_device *dev, void *data, | 1104 | int intel_overlay_put_image(struct drm_device *dev, void *data, |
1045 | struct drm_file *file_priv) | 1105 | struct drm_file *file_priv) |
1046 | { | 1106 | { |
@@ -1049,7 +1109,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1049 | struct intel_overlay *overlay; | 1109 | struct intel_overlay *overlay; |
1050 | struct drm_mode_object *drmmode_obj; | 1110 | struct drm_mode_object *drmmode_obj; |
1051 | struct intel_crtc *crtc; | 1111 | struct intel_crtc *crtc; |
1052 | struct drm_gem_object *new_bo; | 1112 | struct drm_i915_gem_object *new_bo; |
1053 | struct put_image_params *params; | 1113 | struct put_image_params *params; |
1054 | int ret; | 1114 | int ret; |
1055 | 1115 | ||
@@ -1081,16 +1141,16 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1081 | return -ENOMEM; | 1141 | return -ENOMEM; |
1082 | 1142 | ||
1083 | drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id, | 1143 | drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id, |
1084 | DRM_MODE_OBJECT_CRTC); | 1144 | DRM_MODE_OBJECT_CRTC); |
1085 | if (!drmmode_obj) { | 1145 | if (!drmmode_obj) { |
1086 | ret = -ENOENT; | 1146 | ret = -ENOENT; |
1087 | goto out_free; | 1147 | goto out_free; |
1088 | } | 1148 | } |
1089 | crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); | 1149 | crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); |
1090 | 1150 | ||
1091 | new_bo = drm_gem_object_lookup(dev, file_priv, | 1151 | new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv, |
1092 | put_image_rec->bo_handle); | 1152 | put_image_rec->bo_handle)); |
1093 | if (!new_bo) { | 1153 | if (&new_bo->base == NULL) { |
1094 | ret = -ENOENT; | 1154 | ret = -ENOENT; |
1095 | goto out_free; | 1155 | goto out_free; |
1096 | } | 1156 | } |
@@ -1098,12 +1158,16 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1098 | mutex_lock(&dev->mode_config.mutex); | 1158 | mutex_lock(&dev->mode_config.mutex); |
1099 | mutex_lock(&dev->struct_mutex); | 1159 | mutex_lock(&dev->struct_mutex); |
1100 | 1160 | ||
1101 | if (overlay->hw_wedged) { | 1161 | if (new_bo->tiling_mode) { |
1102 | ret = intel_overlay_recover_from_interrupt(overlay, 1); | 1162 | DRM_ERROR("buffer used for overlay image can not be tiled\n"); |
1103 | if (ret != 0) | 1163 | ret = -EINVAL; |
1104 | goto out_unlock; | 1164 | goto out_unlock; |
1105 | } | 1165 | } |
1106 | 1166 | ||
1167 | ret = intel_overlay_recover_from_interrupt(overlay); | ||
1168 | if (ret != 0) | ||
1169 | goto out_unlock; | ||
1170 | |||
1107 | if (overlay->crtc != crtc) { | 1171 | if (overlay->crtc != crtc) { |
1108 | struct drm_display_mode *mode = &crtc->base.mode; | 1172 | struct drm_display_mode *mode = &crtc->base.mode; |
1109 | ret = intel_overlay_switch_off(overlay); | 1173 | ret = intel_overlay_switch_off(overlay); |
@@ -1117,9 +1181,9 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1117 | overlay->crtc = crtc; | 1181 | overlay->crtc = crtc; |
1118 | crtc->overlay = overlay; | 1182 | crtc->overlay = overlay; |
1119 | 1183 | ||
1120 | if (intel_panel_fitter_pipe(dev) == crtc->pipe | 1184 | /* line too wide, i.e. one-line-mode */ |
1121 | /* and line to wide, i.e. one-line-mode */ | 1185 | if (mode->hdisplay > 1024 && |
1122 | && mode->hdisplay > 1024) { | 1186 | intel_panel_fitter_pipe(dev) == crtc->pipe) { |
1123 | overlay->pfit_active = 1; | 1187 | overlay->pfit_active = 1; |
1124 | update_pfit_vscale_ratio(overlay); | 1188 | update_pfit_vscale_ratio(overlay); |
1125 | } else | 1189 | } else |
@@ -1132,10 +1196,10 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1132 | 1196 | ||
1133 | if (overlay->pfit_active) { | 1197 | if (overlay->pfit_active) { |
1134 | params->dst_y = ((((u32)put_image_rec->dst_y) << 12) / | 1198 | params->dst_y = ((((u32)put_image_rec->dst_y) << 12) / |
1135 | overlay->pfit_vscale_ratio); | 1199 | overlay->pfit_vscale_ratio); |
1136 | /* shifting right rounds downwards, so add 1 */ | 1200 | /* shifting right rounds downwards, so add 1 */ |
1137 | params->dst_h = ((((u32)put_image_rec->dst_height) << 12) / | 1201 | params->dst_h = ((((u32)put_image_rec->dst_height) << 12) / |
1138 | overlay->pfit_vscale_ratio) + 1; | 1202 | overlay->pfit_vscale_ratio) + 1; |
1139 | } else { | 1203 | } else { |
1140 | params->dst_y = put_image_rec->dst_y; | 1204 | params->dst_y = put_image_rec->dst_y; |
1141 | params->dst_h = put_image_rec->dst_height; | 1205 | params->dst_h = put_image_rec->dst_height; |
@@ -1147,8 +1211,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1147 | params->src_h = put_image_rec->src_height; | 1211 | params->src_h = put_image_rec->src_height; |
1148 | params->src_scan_w = put_image_rec->src_scan_width; | 1212 | params->src_scan_w = put_image_rec->src_scan_width; |
1149 | params->src_scan_h = put_image_rec->src_scan_height; | 1213 | params->src_scan_h = put_image_rec->src_scan_height; |
1150 | if (params->src_scan_h > params->src_h | 1214 | if (params->src_scan_h > params->src_h || |
1151 | || params->src_scan_w > params->src_w) { | 1215 | params->src_scan_w > params->src_w) { |
1152 | ret = -EINVAL; | 1216 | ret = -EINVAL; |
1153 | goto out_unlock; | 1217 | goto out_unlock; |
1154 | } | 1218 | } |
@@ -1182,7 +1246,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1182 | out_unlock: | 1246 | out_unlock: |
1183 | mutex_unlock(&dev->struct_mutex); | 1247 | mutex_unlock(&dev->struct_mutex); |
1184 | mutex_unlock(&dev->mode_config.mutex); | 1248 | mutex_unlock(&dev->mode_config.mutex); |
1185 | drm_gem_object_unreference_unlocked(new_bo); | 1249 | drm_gem_object_unreference_unlocked(&new_bo->base); |
1186 | out_free: | 1250 | out_free: |
1187 | kfree(params); | 1251 | kfree(params); |
1188 | 1252 | ||
@@ -1204,7 +1268,7 @@ static bool check_gamma_bounds(u32 gamma1, u32 gamma2) | |||
1204 | return false; | 1268 | return false; |
1205 | 1269 | ||
1206 | for (i = 0; i < 3; i++) { | 1270 | for (i = 0; i < 3; i++) { |
1207 | if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff)) | 1271 | if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff)) |
1208 | return false; | 1272 | return false; |
1209 | } | 1273 | } |
1210 | 1274 | ||
@@ -1225,16 +1289,18 @@ static bool check_gamma5_errata(u32 gamma5) | |||
1225 | 1289 | ||
1226 | static int check_gamma(struct drm_intel_overlay_attrs *attrs) | 1290 | static int check_gamma(struct drm_intel_overlay_attrs *attrs) |
1227 | { | 1291 | { |
1228 | if (!check_gamma_bounds(0, attrs->gamma0) | 1292 | if (!check_gamma_bounds(0, attrs->gamma0) || |
1229 | || !check_gamma_bounds(attrs->gamma0, attrs->gamma1) | 1293 | !check_gamma_bounds(attrs->gamma0, attrs->gamma1) || |
1230 | || !check_gamma_bounds(attrs->gamma1, attrs->gamma2) | 1294 | !check_gamma_bounds(attrs->gamma1, attrs->gamma2) || |
1231 | || !check_gamma_bounds(attrs->gamma2, attrs->gamma3) | 1295 | !check_gamma_bounds(attrs->gamma2, attrs->gamma3) || |
1232 | || !check_gamma_bounds(attrs->gamma3, attrs->gamma4) | 1296 | !check_gamma_bounds(attrs->gamma3, attrs->gamma4) || |
1233 | || !check_gamma_bounds(attrs->gamma4, attrs->gamma5) | 1297 | !check_gamma_bounds(attrs->gamma4, attrs->gamma5) || |
1234 | || !check_gamma_bounds(attrs->gamma5, 0x00ffffff)) | 1298 | !check_gamma_bounds(attrs->gamma5, 0x00ffffff)) |
1235 | return -EINVAL; | 1299 | return -EINVAL; |
1300 | |||
1236 | if (!check_gamma5_errata(attrs->gamma5)) | 1301 | if (!check_gamma5_errata(attrs->gamma5)) |
1237 | return -EINVAL; | 1302 | return -EINVAL; |
1303 | |||
1238 | return 0; | 1304 | return 0; |
1239 | } | 1305 | } |
1240 | 1306 | ||
@@ -1261,13 +1327,14 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, | |||
1261 | mutex_lock(&dev->mode_config.mutex); | 1327 | mutex_lock(&dev->mode_config.mutex); |
1262 | mutex_lock(&dev->struct_mutex); | 1328 | mutex_lock(&dev->struct_mutex); |
1263 | 1329 | ||
1330 | ret = -EINVAL; | ||
1264 | if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) { | 1331 | if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) { |
1265 | attrs->color_key = overlay->color_key; | 1332 | attrs->color_key = overlay->color_key; |
1266 | attrs->brightness = overlay->brightness; | 1333 | attrs->brightness = overlay->brightness; |
1267 | attrs->contrast = overlay->contrast; | 1334 | attrs->contrast = overlay->contrast; |
1268 | attrs->saturation = overlay->saturation; | 1335 | attrs->saturation = overlay->saturation; |
1269 | 1336 | ||
1270 | if (IS_I9XX(dev)) { | 1337 | if (!IS_GEN2(dev)) { |
1271 | attrs->gamma0 = I915_READ(OGAMC0); | 1338 | attrs->gamma0 = I915_READ(OGAMC0); |
1272 | attrs->gamma1 = I915_READ(OGAMC1); | 1339 | attrs->gamma1 = I915_READ(OGAMC1); |
1273 | attrs->gamma2 = I915_READ(OGAMC2); | 1340 | attrs->gamma2 = I915_READ(OGAMC2); |
@@ -1275,29 +1342,20 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, | |||
1275 | attrs->gamma4 = I915_READ(OGAMC4); | 1342 | attrs->gamma4 = I915_READ(OGAMC4); |
1276 | attrs->gamma5 = I915_READ(OGAMC5); | 1343 | attrs->gamma5 = I915_READ(OGAMC5); |
1277 | } | 1344 | } |
1278 | ret = 0; | ||
1279 | } else { | 1345 | } else { |
1280 | overlay->color_key = attrs->color_key; | 1346 | if (attrs->brightness < -128 || attrs->brightness > 127) |
1281 | if (attrs->brightness >= -128 && attrs->brightness <= 127) { | ||
1282 | overlay->brightness = attrs->brightness; | ||
1283 | } else { | ||
1284 | ret = -EINVAL; | ||
1285 | goto out_unlock; | 1347 | goto out_unlock; |
1286 | } | 1348 | if (attrs->contrast > 255) |
1287 | if (attrs->contrast <= 255) { | ||
1288 | overlay->contrast = attrs->contrast; | ||
1289 | } else { | ||
1290 | ret = -EINVAL; | ||
1291 | goto out_unlock; | 1349 | goto out_unlock; |
1292 | } | 1350 | if (attrs->saturation > 1023) |
1293 | if (attrs->saturation <= 1023) { | ||
1294 | overlay->saturation = attrs->saturation; | ||
1295 | } else { | ||
1296 | ret = -EINVAL; | ||
1297 | goto out_unlock; | 1351 | goto out_unlock; |
1298 | } | ||
1299 | 1352 | ||
1300 | regs = intel_overlay_map_regs_atomic(overlay); | 1353 | overlay->color_key = attrs->color_key; |
1354 | overlay->brightness = attrs->brightness; | ||
1355 | overlay->contrast = attrs->contrast; | ||
1356 | overlay->saturation = attrs->saturation; | ||
1357 | |||
1358 | regs = intel_overlay_map_regs(overlay); | ||
1301 | if (!regs) { | 1359 | if (!regs) { |
1302 | ret = -ENOMEM; | 1360 | ret = -ENOMEM; |
1303 | goto out_unlock; | 1361 | goto out_unlock; |
@@ -1305,13 +1363,11 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, | |||
1305 | 1363 | ||
1306 | update_reg_attrs(overlay, regs); | 1364 | update_reg_attrs(overlay, regs); |
1307 | 1365 | ||
1308 | intel_overlay_unmap_regs_atomic(overlay); | 1366 | intel_overlay_unmap_regs(overlay, regs); |
1309 | 1367 | ||
1310 | if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { | 1368 | if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { |
1311 | if (!IS_I9XX(dev)) { | 1369 | if (IS_GEN2(dev)) |
1312 | ret = -EINVAL; | ||
1313 | goto out_unlock; | 1370 | goto out_unlock; |
1314 | } | ||
1315 | 1371 | ||
1316 | if (overlay->active) { | 1372 | if (overlay->active) { |
1317 | ret = -EBUSY; | 1373 | ret = -EBUSY; |
@@ -1319,7 +1375,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, | |||
1319 | } | 1375 | } |
1320 | 1376 | ||
1321 | ret = check_gamma(attrs); | 1377 | ret = check_gamma(attrs); |
1322 | if (ret != 0) | 1378 | if (ret) |
1323 | goto out_unlock; | 1379 | goto out_unlock; |
1324 | 1380 | ||
1325 | I915_WRITE(OGAMC0, attrs->gamma0); | 1381 | I915_WRITE(OGAMC0, attrs->gamma0); |
@@ -1329,9 +1385,9 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, | |||
1329 | I915_WRITE(OGAMC4, attrs->gamma4); | 1385 | I915_WRITE(OGAMC4, attrs->gamma4); |
1330 | I915_WRITE(OGAMC5, attrs->gamma5); | 1386 | I915_WRITE(OGAMC5, attrs->gamma5); |
1331 | } | 1387 | } |
1332 | ret = 0; | ||
1333 | } | 1388 | } |
1334 | 1389 | ||
1390 | ret = 0; | ||
1335 | out_unlock: | 1391 | out_unlock: |
1336 | mutex_unlock(&dev->struct_mutex); | 1392 | mutex_unlock(&dev->struct_mutex); |
1337 | mutex_unlock(&dev->mode_config.mutex); | 1393 | mutex_unlock(&dev->mode_config.mutex); |
@@ -1343,39 +1399,50 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1343 | { | 1399 | { |
1344 | drm_i915_private_t *dev_priv = dev->dev_private; | 1400 | drm_i915_private_t *dev_priv = dev->dev_private; |
1345 | struct intel_overlay *overlay; | 1401 | struct intel_overlay *overlay; |
1346 | struct drm_gem_object *reg_bo; | 1402 | struct drm_i915_gem_object *reg_bo; |
1347 | struct overlay_registers *regs; | 1403 | struct overlay_registers *regs; |
1348 | int ret; | 1404 | int ret; |
1349 | 1405 | ||
1350 | if (!OVERLAY_EXISTS(dev)) | 1406 | if (!HAS_OVERLAY(dev)) |
1351 | return; | 1407 | return; |
1352 | 1408 | ||
1353 | overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL); | 1409 | overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL); |
1354 | if (!overlay) | 1410 | if (!overlay) |
1355 | return; | 1411 | return; |
1412 | |||
1413 | mutex_lock(&dev->struct_mutex); | ||
1414 | if (WARN_ON(dev_priv->overlay)) | ||
1415 | goto out_free; | ||
1416 | |||
1356 | overlay->dev = dev; | 1417 | overlay->dev = dev; |
1357 | 1418 | ||
1358 | reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); | 1419 | reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); |
1359 | if (!reg_bo) | 1420 | if (!reg_bo) |
1360 | goto out_free; | 1421 | goto out_free; |
1361 | overlay->reg_bo = to_intel_bo(reg_bo); | 1422 | overlay->reg_bo = reg_bo; |
1362 | 1423 | ||
1363 | if (OVERLAY_NONPHYSICAL(dev)) { | 1424 | if (OVERLAY_NEEDS_PHYSICAL(dev)) { |
1364 | ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); | ||
1365 | if (ret) { | ||
1366 | DRM_ERROR("failed to pin overlay register bo\n"); | ||
1367 | goto out_free_bo; | ||
1368 | } | ||
1369 | overlay->flip_addr = overlay->reg_bo->gtt_offset; | ||
1370 | } else { | ||
1371 | ret = i915_gem_attach_phys_object(dev, reg_bo, | 1425 | ret = i915_gem_attach_phys_object(dev, reg_bo, |
1372 | I915_GEM_PHYS_OVERLAY_REGS, | 1426 | I915_GEM_PHYS_OVERLAY_REGS, |
1373 | 0); | 1427 | PAGE_SIZE); |
1374 | if (ret) { | 1428 | if (ret) { |
1375 | DRM_ERROR("failed to attach phys overlay regs\n"); | 1429 | DRM_ERROR("failed to attach phys overlay regs\n"); |
1376 | goto out_free_bo; | 1430 | goto out_free_bo; |
1377 | } | 1431 | } |
1378 | overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; | 1432 | overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; |
1433 | } else { | ||
1434 | ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); | ||
1435 | if (ret) { | ||
1436 | DRM_ERROR("failed to pin overlay register bo\n"); | ||
1437 | goto out_free_bo; | ||
1438 | } | ||
1439 | overlay->flip_addr = reg_bo->gtt_offset; | ||
1440 | |||
1441 | ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); | ||
1442 | if (ret) { | ||
1443 | DRM_ERROR("failed to move overlay register bo into the GTT\n"); | ||
1444 | goto out_unpin_bo; | ||
1445 | } | ||
1379 | } | 1446 | } |
1380 | 1447 | ||
1381 | /* init all values */ | 1448 | /* init all values */ |
@@ -1384,42 +1451,51 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1384 | overlay->contrast = 75; | 1451 | overlay->contrast = 75; |
1385 | overlay->saturation = 146; | 1452 | overlay->saturation = 146; |
1386 | 1453 | ||
1387 | regs = intel_overlay_map_regs_atomic(overlay); | 1454 | regs = intel_overlay_map_regs(overlay); |
1388 | if (!regs) | 1455 | if (!regs) |
1389 | goto out_free_bo; | 1456 | goto out_unpin_bo; |
1390 | 1457 | ||
1391 | memset(regs, 0, sizeof(struct overlay_registers)); | 1458 | memset(regs, 0, sizeof(struct overlay_registers)); |
1392 | update_polyphase_filter(regs); | 1459 | update_polyphase_filter(regs); |
1393 | |||
1394 | update_reg_attrs(overlay, regs); | 1460 | update_reg_attrs(overlay, regs); |
1395 | 1461 | ||
1396 | intel_overlay_unmap_regs_atomic(overlay); | 1462 | intel_overlay_unmap_regs(overlay, regs); |
1397 | 1463 | ||
1398 | dev_priv->overlay = overlay; | 1464 | dev_priv->overlay = overlay; |
1465 | mutex_unlock(&dev->struct_mutex); | ||
1399 | DRM_INFO("initialized overlay support\n"); | 1466 | DRM_INFO("initialized overlay support\n"); |
1400 | return; | 1467 | return; |
1401 | 1468 | ||
1469 | out_unpin_bo: | ||
1470 | if (!OVERLAY_NEEDS_PHYSICAL(dev)) | ||
1471 | i915_gem_object_unpin(reg_bo); | ||
1402 | out_free_bo: | 1472 | out_free_bo: |
1403 | drm_gem_object_unreference(reg_bo); | 1473 | drm_gem_object_unreference(®_bo->base); |
1404 | out_free: | 1474 | out_free: |
1475 | mutex_unlock(&dev->struct_mutex); | ||
1405 | kfree(overlay); | 1476 | kfree(overlay); |
1406 | return; | 1477 | return; |
1407 | } | 1478 | } |
1408 | 1479 | ||
1409 | void intel_cleanup_overlay(struct drm_device *dev) | 1480 | void intel_cleanup_overlay(struct drm_device *dev) |
1410 | { | 1481 | { |
1411 | drm_i915_private_t *dev_priv = dev->dev_private; | 1482 | drm_i915_private_t *dev_priv = dev->dev_private; |
1412 | 1483 | ||
1413 | if (dev_priv->overlay) { | 1484 | if (!dev_priv->overlay) |
1414 | /* The bo's should be free'd by the generic code already. | 1485 | return; |
1415 | * Furthermore modesetting teardown happens beforehand so the | ||
1416 | * hardware should be off already */ | ||
1417 | BUG_ON(dev_priv->overlay->active); | ||
1418 | 1486 | ||
1419 | kfree(dev_priv->overlay); | 1487 | /* The bo's should be free'd by the generic code already. |
1420 | } | 1488 | * Furthermore modesetting teardown happens beforehand so the |
1489 | * hardware should be off already */ | ||
1490 | BUG_ON(dev_priv->overlay->active); | ||
1491 | |||
1492 | drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base); | ||
1493 | kfree(dev_priv->overlay); | ||
1421 | } | 1494 | } |
1422 | 1495 | ||
1496 | #ifdef CONFIG_DEBUG_FS | ||
1497 | #include <linux/seq_file.h> | ||
1498 | |||
1423 | struct intel_overlay_error_state { | 1499 | struct intel_overlay_error_state { |
1424 | struct overlay_registers regs; | 1500 | struct overlay_registers regs; |
1425 | unsigned long base; | 1501 | unsigned long base; |
@@ -1427,6 +1503,29 @@ struct intel_overlay_error_state { | |||
1427 | u32 isr; | 1503 | u32 isr; |
1428 | }; | 1504 | }; |
1429 | 1505 | ||
1506 | static struct overlay_registers * | ||
1507 | intel_overlay_map_regs_atomic(struct intel_overlay *overlay) | ||
1508 | { | ||
1509 | drm_i915_private_t *dev_priv = overlay->dev->dev_private; | ||
1510 | struct overlay_registers *regs; | ||
1511 | |||
1512 | if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) | ||
1513 | regs = overlay->reg_bo->phys_obj->handle->vaddr; | ||
1514 | else | ||
1515 | regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | ||
1516 | overlay->reg_bo->gtt_offset); | ||
1517 | |||
1518 | return regs; | ||
1519 | } | ||
1520 | |||
1521 | static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, | ||
1522 | struct overlay_registers *regs) | ||
1523 | { | ||
1524 | if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) | ||
1525 | io_mapping_unmap_atomic(regs); | ||
1526 | } | ||
1527 | |||
1528 | |||
1430 | struct intel_overlay_error_state * | 1529 | struct intel_overlay_error_state * |
1431 | intel_overlay_capture_error_state(struct drm_device *dev) | 1530 | intel_overlay_capture_error_state(struct drm_device *dev) |
1432 | { | 1531 | { |
@@ -1444,17 +1543,17 @@ intel_overlay_capture_error_state(struct drm_device *dev) | |||
1444 | 1543 | ||
1445 | error->dovsta = I915_READ(DOVSTA); | 1544 | error->dovsta = I915_READ(DOVSTA); |
1446 | error->isr = I915_READ(ISR); | 1545 | error->isr = I915_READ(ISR); |
1447 | if (OVERLAY_NONPHYSICAL(overlay->dev)) | 1546 | if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) |
1448 | error->base = (long) overlay->reg_bo->gtt_offset; | ||
1449 | else | ||
1450 | error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr; | 1547 | error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr; |
1548 | else | ||
1549 | error->base = (long) overlay->reg_bo->gtt_offset; | ||
1451 | 1550 | ||
1452 | regs = intel_overlay_map_regs_atomic(overlay); | 1551 | regs = intel_overlay_map_regs_atomic(overlay); |
1453 | if (!regs) | 1552 | if (!regs) |
1454 | goto err; | 1553 | goto err; |
1455 | 1554 | ||
1456 | memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers)); | 1555 | memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers)); |
1457 | intel_overlay_unmap_regs_atomic(overlay); | 1556 | intel_overlay_unmap_regs_atomic(overlay, regs); |
1458 | 1557 | ||
1459 | return error; | 1558 | return error; |
1460 | 1559 | ||
@@ -1515,3 +1614,4 @@ intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_s | |||
1515 | P(UVSCALEV); | 1614 | P(UVSCALEV); |
1516 | #undef P | 1615 | #undef P |
1517 | } | 1616 | } |
1617 | #endif | ||
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index e7f5299d9d57..a06ff07a4d3b 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -30,6 +30,8 @@ | |||
30 | 30 | ||
31 | #include "intel_drv.h" | 31 | #include "intel_drv.h" |
32 | 32 | ||
33 | #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ | ||
34 | |||
33 | void | 35 | void |
34 | intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, | 36 | intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, |
35 | struct drm_display_mode *adjusted_mode) | 37 | struct drm_display_mode *adjusted_mode) |
@@ -109,3 +111,197 @@ done: | |||
109 | dev_priv->pch_pf_pos = (x << 16) | y; | 111 | dev_priv->pch_pf_pos = (x << 16) | y; |
110 | dev_priv->pch_pf_size = (width << 16) | height; | 112 | dev_priv->pch_pf_size = (width << 16) | height; |
111 | } | 113 | } |
114 | |||
115 | static int is_backlight_combination_mode(struct drm_device *dev) | ||
116 | { | ||
117 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
118 | |||
119 | if (INTEL_INFO(dev)->gen >= 4) | ||
120 | return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; | ||
121 | |||
122 | if (IS_GEN2(dev)) | ||
123 | return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE; | ||
124 | |||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) | ||
129 | { | ||
130 | u32 val; | ||
131 | |||
132 | /* Restore the CTL value if it lost, e.g. GPU reset */ | ||
133 | |||
134 | if (HAS_PCH_SPLIT(dev_priv->dev)) { | ||
135 | val = I915_READ(BLC_PWM_PCH_CTL2); | ||
136 | if (dev_priv->saveBLC_PWM_CTL2 == 0) { | ||
137 | dev_priv->saveBLC_PWM_CTL2 = val; | ||
138 | } else if (val == 0) { | ||
139 | I915_WRITE(BLC_PWM_PCH_CTL2, | ||
140 | dev_priv->saveBLC_PWM_CTL); | ||
141 | val = dev_priv->saveBLC_PWM_CTL; | ||
142 | } | ||
143 | } else { | ||
144 | val = I915_READ(BLC_PWM_CTL); | ||
145 | if (dev_priv->saveBLC_PWM_CTL == 0) { | ||
146 | dev_priv->saveBLC_PWM_CTL = val; | ||
147 | dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); | ||
148 | } else if (val == 0) { | ||
149 | I915_WRITE(BLC_PWM_CTL, | ||
150 | dev_priv->saveBLC_PWM_CTL); | ||
151 | I915_WRITE(BLC_PWM_CTL2, | ||
152 | dev_priv->saveBLC_PWM_CTL2); | ||
153 | val = dev_priv->saveBLC_PWM_CTL; | ||
154 | } | ||
155 | } | ||
156 | |||
157 | return val; | ||
158 | } | ||
159 | |||
160 | u32 intel_panel_get_max_backlight(struct drm_device *dev) | ||
161 | { | ||
162 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
163 | u32 max; | ||
164 | |||
165 | max = i915_read_blc_pwm_ctl(dev_priv); | ||
166 | if (max == 0) { | ||
167 | /* XXX add code here to query mode clock or hardware clock | ||
168 | * and program max PWM appropriately. | ||
169 | */ | ||
170 | printk_once(KERN_WARNING "fixme: max PWM is zero.\n"); | ||
171 | return 1; | ||
172 | } | ||
173 | |||
174 | if (HAS_PCH_SPLIT(dev)) { | ||
175 | max >>= 16; | ||
176 | } else { | ||
177 | if (IS_PINEVIEW(dev)) { | ||
178 | max >>= 17; | ||
179 | } else { | ||
180 | max >>= 16; | ||
181 | if (INTEL_INFO(dev)->gen < 4) | ||
182 | max &= ~1; | ||
183 | } | ||
184 | |||
185 | if (is_backlight_combination_mode(dev)) | ||
186 | max *= 0xff; | ||
187 | } | ||
188 | |||
189 | DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); | ||
190 | return max; | ||
191 | } | ||
192 | |||
193 | u32 intel_panel_get_backlight(struct drm_device *dev) | ||
194 | { | ||
195 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
196 | u32 val; | ||
197 | |||
198 | if (HAS_PCH_SPLIT(dev)) { | ||
199 | val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; | ||
200 | } else { | ||
201 | val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; | ||
202 | if (IS_PINEVIEW(dev)) | ||
203 | val >>= 1; | ||
204 | |||
205 | if (is_backlight_combination_mode(dev)){ | ||
206 | u8 lbpc; | ||
207 | |||
208 | val &= ~1; | ||
209 | pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); | ||
210 | val *= lbpc; | ||
211 | } | ||
212 | } | ||
213 | |||
214 | DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); | ||
215 | return val; | ||
216 | } | ||
217 | |||
218 | static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) | ||
219 | { | ||
220 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
221 | u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; | ||
222 | I915_WRITE(BLC_PWM_CPU_CTL, val | level); | ||
223 | } | ||
224 | |||
225 | void intel_panel_set_backlight(struct drm_device *dev, u32 level) | ||
226 | { | ||
227 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
228 | u32 tmp; | ||
229 | |||
230 | DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); | ||
231 | |||
232 | if (HAS_PCH_SPLIT(dev)) | ||
233 | return intel_pch_panel_set_backlight(dev, level); | ||
234 | |||
235 | if (is_backlight_combination_mode(dev)){ | ||
236 | u32 max = intel_panel_get_max_backlight(dev); | ||
237 | u8 lbpc; | ||
238 | |||
239 | lbpc = level * 0xfe / max + 1; | ||
240 | level /= lbpc; | ||
241 | pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc); | ||
242 | } | ||
243 | |||
244 | tmp = I915_READ(BLC_PWM_CTL); | ||
245 | if (IS_PINEVIEW(dev)) { | ||
246 | tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); | ||
247 | level <<= 1; | ||
248 | } else | ||
249 | tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; | ||
250 | I915_WRITE(BLC_PWM_CTL, tmp | level); | ||
251 | } | ||
252 | |||
253 | void intel_panel_disable_backlight(struct drm_device *dev) | ||
254 | { | ||
255 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
256 | |||
257 | if (dev_priv->backlight_enabled) { | ||
258 | dev_priv->backlight_level = intel_panel_get_backlight(dev); | ||
259 | dev_priv->backlight_enabled = false; | ||
260 | } | ||
261 | |||
262 | intel_panel_set_backlight(dev, 0); | ||
263 | } | ||
264 | |||
265 | void intel_panel_enable_backlight(struct drm_device *dev) | ||
266 | { | ||
267 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
268 | |||
269 | if (dev_priv->backlight_level == 0) | ||
270 | dev_priv->backlight_level = intel_panel_get_max_backlight(dev); | ||
271 | |||
272 | intel_panel_set_backlight(dev, dev_priv->backlight_level); | ||
273 | dev_priv->backlight_enabled = true; | ||
274 | } | ||
275 | |||
276 | void intel_panel_setup_backlight(struct drm_device *dev) | ||
277 | { | ||
278 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
279 | |||
280 | dev_priv->backlight_level = intel_panel_get_backlight(dev); | ||
281 | dev_priv->backlight_enabled = dev_priv->backlight_level != 0; | ||
282 | } | ||
283 | |||
284 | enum drm_connector_status | ||
285 | intel_panel_detect(struct drm_device *dev) | ||
286 | { | ||
287 | #if 0 | ||
288 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
289 | #endif | ||
290 | |||
291 | if (i915_panel_ignore_lid) | ||
292 | return i915_panel_ignore_lid > 0 ? | ||
293 | connector_status_connected : | ||
294 | connector_status_disconnected; | ||
295 | |||
296 | /* opregion lid state on HP 2540p is wrong at boot up, | ||
297 | * appears to be either the BIOS or Linux ACPI fault */ | ||
298 | #if 0 | ||
299 | /* Assume that the BIOS does not lie through the OpRegion... */ | ||
300 | if (dev_priv->opregion.lid_state) | ||
301 | return ioread32(dev_priv->opregion.lid_state) & 0x1 ? | ||
302 | connector_status_connected : | ||
303 | connector_status_disconnected; | ||
304 | #endif | ||
305 | |||
306 | return connector_status_unknown; | ||
307 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cb3508f78bc3..95c4b1429935 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -32,6 +32,15 @@ | |||
32 | #include "i915_drv.h" | 32 | #include "i915_drv.h" |
33 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
34 | #include "i915_trace.h" | 34 | #include "i915_trace.h" |
35 | #include "intel_drv.h" | ||
36 | |||
37 | static inline int ring_space(struct intel_ring_buffer *ring) | ||
38 | { | ||
39 | int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); | ||
40 | if (space < 0) | ||
41 | space += ring->size; | ||
42 | return space; | ||
43 | } | ||
35 | 44 | ||
36 | static u32 i915_gem_get_seqno(struct drm_device *dev) | 45 | static u32 i915_gem_get_seqno(struct drm_device *dev) |
37 | { | 46 | { |
@@ -47,537 +56,718 @@ static u32 i915_gem_get_seqno(struct drm_device *dev) | |||
47 | return seqno; | 56 | return seqno; |
48 | } | 57 | } |
49 | 58 | ||
50 | static void | 59 | static int |
51 | render_ring_flush(struct drm_device *dev, | 60 | render_ring_flush(struct intel_ring_buffer *ring, |
52 | struct intel_ring_buffer *ring, | 61 | u32 invalidate_domains, |
53 | u32 invalidate_domains, | 62 | u32 flush_domains) |
54 | u32 flush_domains) | ||
55 | { | 63 | { |
56 | drm_i915_private_t *dev_priv = dev->dev_private; | 64 | struct drm_device *dev = ring->dev; |
57 | u32 cmd; | 65 | u32 cmd; |
66 | int ret; | ||
58 | 67 | ||
59 | #if WATCH_EXEC | 68 | /* |
60 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, | 69 | * read/write caches: |
61 | invalidate_domains, flush_domains); | 70 | * |
62 | #endif | 71 | * I915_GEM_DOMAIN_RENDER is always invalidated, but is |
63 | 72 | * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is | |
64 | trace_i915_gem_request_flush(dev, dev_priv->next_seqno, | 73 | * also flushed at 2d versus 3d pipeline switches. |
65 | invalidate_domains, flush_domains); | 74 | * |
66 | 75 | * read-only caches: | |
67 | if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { | 76 | * |
77 | * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if | ||
78 | * MI_READ_FLUSH is set, and is always flushed on 965. | ||
79 | * | ||
80 | * I915_GEM_DOMAIN_COMMAND may not exist? | ||
81 | * | ||
82 | * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is | ||
83 | * invalidated when MI_EXE_FLUSH is set. | ||
84 | * | ||
85 | * I915_GEM_DOMAIN_VERTEX, which exists on 965, is | ||
86 | * invalidated with every MI_FLUSH. | ||
87 | * | ||
88 | * TLBs: | ||
89 | * | ||
90 | * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND | ||
91 | * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and | ||
92 | * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER | ||
93 | * are flushed at any MI_FLUSH. | ||
94 | */ | ||
95 | |||
96 | cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; | ||
97 | if ((invalidate_domains|flush_domains) & | ||
98 | I915_GEM_DOMAIN_RENDER) | ||
99 | cmd &= ~MI_NO_WRITE_FLUSH; | ||
100 | if (INTEL_INFO(dev)->gen < 4) { | ||
68 | /* | 101 | /* |
69 | * read/write caches: | 102 | * On the 965, the sampler cache always gets flushed |
70 | * | 103 | * and this bit is reserved. |
71 | * I915_GEM_DOMAIN_RENDER is always invalidated, but is | ||
72 | * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is | ||
73 | * also flushed at 2d versus 3d pipeline switches. | ||
74 | * | ||
75 | * read-only caches: | ||
76 | * | ||
77 | * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if | ||
78 | * MI_READ_FLUSH is set, and is always flushed on 965. | ||
79 | * | ||
80 | * I915_GEM_DOMAIN_COMMAND may not exist? | ||
81 | * | ||
82 | * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is | ||
83 | * invalidated when MI_EXE_FLUSH is set. | ||
84 | * | ||
85 | * I915_GEM_DOMAIN_VERTEX, which exists on 965, is | ||
86 | * invalidated with every MI_FLUSH. | ||
87 | * | ||
88 | * TLBs: | ||
89 | * | ||
90 | * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND | ||
91 | * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and | ||
92 | * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER | ||
93 | * are flushed at any MI_FLUSH. | ||
94 | */ | 104 | */ |
95 | 105 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) | |
96 | cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; | 106 | cmd |= MI_READ_FLUSH; |
97 | if ((invalidate_domains|flush_domains) & | ||
98 | I915_GEM_DOMAIN_RENDER) | ||
99 | cmd &= ~MI_NO_WRITE_FLUSH; | ||
100 | if (!IS_I965G(dev)) { | ||
101 | /* | ||
102 | * On the 965, the sampler cache always gets flushed | ||
103 | * and this bit is reserved. | ||
104 | */ | ||
105 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) | ||
106 | cmd |= MI_READ_FLUSH; | ||
107 | } | ||
108 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) | ||
109 | cmd |= MI_EXE_FLUSH; | ||
110 | |||
111 | #if WATCH_EXEC | ||
112 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); | ||
113 | #endif | ||
114 | intel_ring_begin(dev, ring, 2); | ||
115 | intel_ring_emit(dev, ring, cmd); | ||
116 | intel_ring_emit(dev, ring, MI_NOOP); | ||
117 | intel_ring_advance(dev, ring); | ||
118 | } | 107 | } |
119 | } | 108 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) |
109 | cmd |= MI_EXE_FLUSH; | ||
120 | 110 | ||
121 | static unsigned int render_ring_get_head(struct drm_device *dev, | 111 | if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && |
122 | struct intel_ring_buffer *ring) | 112 | (IS_G4X(dev) || IS_GEN5(dev))) |
123 | { | 113 | cmd |= MI_INVALIDATE_ISP; |
124 | drm_i915_private_t *dev_priv = dev->dev_private; | 114 | |
125 | return I915_READ(PRB0_HEAD) & HEAD_ADDR; | 115 | ret = intel_ring_begin(ring, 2); |
116 | if (ret) | ||
117 | return ret; | ||
118 | |||
119 | intel_ring_emit(ring, cmd); | ||
120 | intel_ring_emit(ring, MI_NOOP); | ||
121 | intel_ring_advance(ring); | ||
122 | |||
123 | return 0; | ||
126 | } | 124 | } |
127 | 125 | ||
128 | static unsigned int render_ring_get_tail(struct drm_device *dev, | 126 | static void ring_write_tail(struct intel_ring_buffer *ring, |
129 | struct intel_ring_buffer *ring) | 127 | u32 value) |
130 | { | 128 | { |
131 | drm_i915_private_t *dev_priv = dev->dev_private; | 129 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
132 | return I915_READ(PRB0_TAIL) & TAIL_ADDR; | 130 | I915_WRITE_TAIL(ring, value); |
133 | } | 131 | } |
134 | 132 | ||
135 | static unsigned int render_ring_get_active_head(struct drm_device *dev, | 133 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) |
136 | struct intel_ring_buffer *ring) | ||
137 | { | 134 | { |
138 | drm_i915_private_t *dev_priv = dev->dev_private; | 135 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
139 | u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; | 136 | u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? |
137 | RING_ACTHD(ring->mmio_base) : ACTHD; | ||
140 | 138 | ||
141 | return I915_READ(acthd_reg); | 139 | return I915_READ(acthd_reg); |
142 | } | 140 | } |
143 | 141 | ||
144 | static void render_ring_advance_ring(struct drm_device *dev, | 142 | static int init_ring_common(struct intel_ring_buffer *ring) |
145 | struct intel_ring_buffer *ring) | ||
146 | { | ||
147 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
148 | I915_WRITE(PRB0_TAIL, ring->tail); | ||
149 | } | ||
150 | |||
151 | static int init_ring_common(struct drm_device *dev, | ||
152 | struct intel_ring_buffer *ring) | ||
153 | { | 143 | { |
144 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | ||
145 | struct drm_i915_gem_object *obj = ring->obj; | ||
154 | u32 head; | 146 | u32 head; |
155 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
156 | struct drm_i915_gem_object *obj_priv; | ||
157 | obj_priv = to_intel_bo(ring->gem_object); | ||
158 | 147 | ||
159 | /* Stop the ring if it's running. */ | 148 | /* Stop the ring if it's running. */ |
160 | I915_WRITE(ring->regs.ctl, 0); | 149 | I915_WRITE_CTL(ring, 0); |
161 | I915_WRITE(ring->regs.head, 0); | 150 | I915_WRITE_HEAD(ring, 0); |
162 | I915_WRITE(ring->regs.tail, 0); | 151 | ring->write_tail(ring, 0); |
163 | 152 | ||
164 | /* Initialize the ring. */ | 153 | /* Initialize the ring. */ |
165 | I915_WRITE(ring->regs.start, obj_priv->gtt_offset); | 154 | I915_WRITE_START(ring, obj->gtt_offset); |
166 | head = ring->get_head(dev, ring); | 155 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
167 | 156 | ||
168 | /* G45 ring initialization fails to reset head to zero */ | 157 | /* G45 ring initialization fails to reset head to zero */ |
169 | if (head != 0) { | 158 | if (head != 0) { |
170 | DRM_ERROR("%s head not reset to zero " | 159 | DRM_DEBUG_KMS("%s head not reset to zero " |
171 | "ctl %08x head %08x tail %08x start %08x\n", | 160 | "ctl %08x head %08x tail %08x start %08x\n", |
172 | ring->name, | 161 | ring->name, |
173 | I915_READ(ring->regs.ctl), | 162 | I915_READ_CTL(ring), |
174 | I915_READ(ring->regs.head), | 163 | I915_READ_HEAD(ring), |
175 | I915_READ(ring->regs.tail), | 164 | I915_READ_TAIL(ring), |
176 | I915_READ(ring->regs.start)); | 165 | I915_READ_START(ring)); |
177 | 166 | ||
178 | I915_WRITE(ring->regs.head, 0); | 167 | I915_WRITE_HEAD(ring, 0); |
179 | 168 | ||
180 | DRM_ERROR("%s head forced to zero " | 169 | if (I915_READ_HEAD(ring) & HEAD_ADDR) { |
181 | "ctl %08x head %08x tail %08x start %08x\n", | 170 | DRM_ERROR("failed to set %s head to zero " |
182 | ring->name, | 171 | "ctl %08x head %08x tail %08x start %08x\n", |
183 | I915_READ(ring->regs.ctl), | 172 | ring->name, |
184 | I915_READ(ring->regs.head), | 173 | I915_READ_CTL(ring), |
185 | I915_READ(ring->regs.tail), | 174 | I915_READ_HEAD(ring), |
186 | I915_READ(ring->regs.start)); | 175 | I915_READ_TAIL(ring), |
176 | I915_READ_START(ring)); | ||
177 | } | ||
187 | } | 178 | } |
188 | 179 | ||
189 | I915_WRITE(ring->regs.ctl, | 180 | I915_WRITE_CTL(ring, |
190 | ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) | 181 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) |
191 | | RING_NO_REPORT | RING_VALID); | 182 | | RING_REPORT_64K | RING_VALID); |
192 | 183 | ||
193 | head = I915_READ(ring->regs.head) & HEAD_ADDR; | ||
194 | /* If the head is still not zero, the ring is dead */ | 184 | /* If the head is still not zero, the ring is dead */ |
195 | if (head != 0) { | 185 | if ((I915_READ_CTL(ring) & RING_VALID) == 0 || |
186 | I915_READ_START(ring) != obj->gtt_offset || | ||
187 | (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { | ||
196 | DRM_ERROR("%s initialization failed " | 188 | DRM_ERROR("%s initialization failed " |
197 | "ctl %08x head %08x tail %08x start %08x\n", | 189 | "ctl %08x head %08x tail %08x start %08x\n", |
198 | ring->name, | 190 | ring->name, |
199 | I915_READ(ring->regs.ctl), | 191 | I915_READ_CTL(ring), |
200 | I915_READ(ring->regs.head), | 192 | I915_READ_HEAD(ring), |
201 | I915_READ(ring->regs.tail), | 193 | I915_READ_TAIL(ring), |
202 | I915_READ(ring->regs.start)); | 194 | I915_READ_START(ring)); |
203 | return -EIO; | 195 | return -EIO; |
204 | } | 196 | } |
205 | 197 | ||
206 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 198 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) |
207 | i915_kernel_lost_context(dev); | 199 | i915_kernel_lost_context(ring->dev); |
208 | else { | 200 | else { |
209 | ring->head = ring->get_head(dev, ring); | 201 | ring->head = I915_READ_HEAD(ring); |
210 | ring->tail = ring->get_tail(dev, ring); | 202 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
211 | ring->space = ring->head - (ring->tail + 8); | 203 | ring->space = ring_space(ring); |
212 | if (ring->space < 0) | ||
213 | ring->space += ring->size; | ||
214 | } | 204 | } |
205 | |||
215 | return 0; | 206 | return 0; |
216 | } | 207 | } |
217 | 208 | ||
218 | static int init_render_ring(struct drm_device *dev, | 209 | /* |
219 | struct intel_ring_buffer *ring) | 210 | * 965+ support PIPE_CONTROL commands, which provide finer grained control |
211 | * over cache flushing. | ||
212 | */ | ||
213 | struct pipe_control { | ||
214 | struct drm_i915_gem_object *obj; | ||
215 | volatile u32 *cpu_page; | ||
216 | u32 gtt_offset; | ||
217 | }; | ||
218 | |||
219 | static int | ||
220 | init_pipe_control(struct intel_ring_buffer *ring) | ||
220 | { | 221 | { |
221 | drm_i915_private_t *dev_priv = dev->dev_private; | 222 | struct pipe_control *pc; |
222 | int ret = init_ring_common(dev, ring); | 223 | struct drm_i915_gem_object *obj; |
223 | int mode; | 224 | int ret; |
225 | |||
226 | if (ring->private) | ||
227 | return 0; | ||
228 | |||
229 | pc = kmalloc(sizeof(*pc), GFP_KERNEL); | ||
230 | if (!pc) | ||
231 | return -ENOMEM; | ||
232 | |||
233 | obj = i915_gem_alloc_object(ring->dev, 4096); | ||
234 | if (obj == NULL) { | ||
235 | DRM_ERROR("Failed to allocate seqno page\n"); | ||
236 | ret = -ENOMEM; | ||
237 | goto err; | ||
238 | } | ||
239 | obj->cache_level = I915_CACHE_LLC; | ||
240 | |||
241 | ret = i915_gem_object_pin(obj, 4096, true); | ||
242 | if (ret) | ||
243 | goto err_unref; | ||
244 | |||
245 | pc->gtt_offset = obj->gtt_offset; | ||
246 | pc->cpu_page = kmap(obj->pages[0]); | ||
247 | if (pc->cpu_page == NULL) | ||
248 | goto err_unpin; | ||
249 | |||
250 | pc->obj = obj; | ||
251 | ring->private = pc; | ||
252 | return 0; | ||
253 | |||
254 | err_unpin: | ||
255 | i915_gem_object_unpin(obj); | ||
256 | err_unref: | ||
257 | drm_gem_object_unreference(&obj->base); | ||
258 | err: | ||
259 | kfree(pc); | ||
260 | return ret; | ||
261 | } | ||
262 | |||
263 | static void | ||
264 | cleanup_pipe_control(struct intel_ring_buffer *ring) | ||
265 | { | ||
266 | struct pipe_control *pc = ring->private; | ||
267 | struct drm_i915_gem_object *obj; | ||
268 | |||
269 | if (!ring->private) | ||
270 | return; | ||
271 | |||
272 | obj = pc->obj; | ||
273 | kunmap(obj->pages[0]); | ||
274 | i915_gem_object_unpin(obj); | ||
275 | drm_gem_object_unreference(&obj->base); | ||
276 | |||
277 | kfree(pc); | ||
278 | ring->private = NULL; | ||
279 | } | ||
224 | 280 | ||
225 | if (IS_I9XX(dev) && !IS_GEN3(dev)) { | 281 | static int init_render_ring(struct intel_ring_buffer *ring) |
226 | mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; | 282 | { |
227 | if (IS_GEN6(dev)) | 283 | struct drm_device *dev = ring->dev; |
284 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
285 | int ret = init_ring_common(ring); | ||
286 | |||
287 | if (INTEL_INFO(dev)->gen > 3) { | ||
288 | int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; | ||
289 | if (IS_GEN6(dev) || IS_GEN7(dev)) | ||
228 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; | 290 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; |
229 | I915_WRITE(MI_MODE, mode); | 291 | I915_WRITE(MI_MODE, mode); |
230 | } | 292 | } |
293 | |||
294 | if (INTEL_INFO(dev)->gen >= 6) { | ||
295 | } else if (IS_GEN5(dev)) { | ||
296 | ret = init_pipe_control(ring); | ||
297 | if (ret) | ||
298 | return ret; | ||
299 | } | ||
300 | |||
231 | return ret; | 301 | return ret; |
232 | } | 302 | } |
233 | 303 | ||
234 | #define PIPE_CONTROL_FLUSH(addr) \ | 304 | static void render_ring_cleanup(struct intel_ring_buffer *ring) |
305 | { | ||
306 | if (!ring->private) | ||
307 | return; | ||
308 | |||
309 | cleanup_pipe_control(ring); | ||
310 | } | ||
311 | |||
312 | static void | ||
313 | update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno) | ||
314 | { | ||
315 | struct drm_device *dev = ring->dev; | ||
316 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
317 | int id; | ||
318 | |||
319 | /* | ||
320 | * cs -> 1 = vcs, 0 = bcs | ||
321 | * vcs -> 1 = bcs, 0 = cs, | ||
322 | * bcs -> 1 = cs, 0 = vcs. | ||
323 | */ | ||
324 | id = ring - dev_priv->ring; | ||
325 | id += 2 - i; | ||
326 | id %= 3; | ||
327 | |||
328 | intel_ring_emit(ring, | ||
329 | MI_SEMAPHORE_MBOX | | ||
330 | MI_SEMAPHORE_REGISTER | | ||
331 | MI_SEMAPHORE_UPDATE); | ||
332 | intel_ring_emit(ring, seqno); | ||
333 | intel_ring_emit(ring, | ||
334 | RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i); | ||
335 | } | ||
336 | |||
337 | static int | ||
338 | gen6_add_request(struct intel_ring_buffer *ring, | ||
339 | u32 *result) | ||
340 | { | ||
341 | u32 seqno; | ||
342 | int ret; | ||
343 | |||
344 | ret = intel_ring_begin(ring, 10); | ||
345 | if (ret) | ||
346 | return ret; | ||
347 | |||
348 | seqno = i915_gem_get_seqno(ring->dev); | ||
349 | update_semaphore(ring, 0, seqno); | ||
350 | update_semaphore(ring, 1, seqno); | ||
351 | |||
352 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); | ||
353 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
354 | intel_ring_emit(ring, seqno); | ||
355 | intel_ring_emit(ring, MI_USER_INTERRUPT); | ||
356 | intel_ring_advance(ring); | ||
357 | |||
358 | *result = seqno; | ||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | int | ||
363 | intel_ring_sync(struct intel_ring_buffer *ring, | ||
364 | struct intel_ring_buffer *to, | ||
365 | u32 seqno) | ||
366 | { | ||
367 | int ret; | ||
368 | |||
369 | ret = intel_ring_begin(ring, 4); | ||
370 | if (ret) | ||
371 | return ret; | ||
372 | |||
373 | intel_ring_emit(ring, | ||
374 | MI_SEMAPHORE_MBOX | | ||
375 | MI_SEMAPHORE_REGISTER | | ||
376 | intel_ring_sync_index(ring, to) << 17 | | ||
377 | MI_SEMAPHORE_COMPARE); | ||
378 | intel_ring_emit(ring, seqno); | ||
379 | intel_ring_emit(ring, 0); | ||
380 | intel_ring_emit(ring, MI_NOOP); | ||
381 | intel_ring_advance(ring); | ||
382 | |||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ | ||
235 | do { \ | 387 | do { \ |
236 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ | 388 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ |
237 | PIPE_CONTROL_DEPTH_STALL | 2); \ | 389 | PIPE_CONTROL_DEPTH_STALL | 2); \ |
238 | OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ | 390 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ |
239 | OUT_RING(0); \ | 391 | intel_ring_emit(ring__, 0); \ |
240 | OUT_RING(0); \ | 392 | intel_ring_emit(ring__, 0); \ |
241 | } while (0) | 393 | } while (0) |
242 | 394 | ||
243 | /** | 395 | static int |
244 | * Creates a new sequence number, emitting a write of it to the status page | 396 | pc_render_add_request(struct intel_ring_buffer *ring, |
245 | * plus an interrupt, which will trigger i915_user_interrupt_handler. | 397 | u32 *result) |
246 | * | ||
247 | * Must be called with struct_lock held. | ||
248 | * | ||
249 | * Returned sequence numbers are nonzero on success. | ||
250 | */ | ||
251 | static u32 | ||
252 | render_ring_add_request(struct drm_device *dev, | ||
253 | struct intel_ring_buffer *ring, | ||
254 | struct drm_file *file_priv, | ||
255 | u32 flush_domains) | ||
256 | { | 398 | { |
257 | drm_i915_private_t *dev_priv = dev->dev_private; | 399 | struct drm_device *dev = ring->dev; |
258 | u32 seqno; | 400 | u32 seqno = i915_gem_get_seqno(dev); |
401 | struct pipe_control *pc = ring->private; | ||
402 | u32 scratch_addr = pc->gtt_offset + 128; | ||
403 | int ret; | ||
259 | 404 | ||
260 | seqno = i915_gem_get_seqno(dev); | 405 | /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently |
261 | 406 | * incoherent with writes to memory, i.e. completely fubar, | |
262 | if (IS_GEN6(dev)) { | 407 | * so we need to use PIPE_NOTIFY instead. |
263 | BEGIN_LP_RING(6); | 408 | * |
264 | OUT_RING(GFX_OP_PIPE_CONTROL | 3); | 409 | * However, we also need to workaround the qword write |
265 | OUT_RING(PIPE_CONTROL_QW_WRITE | | 410 | * incoherence by flushing the 6 PIPE_NOTIFY buffers out to |
266 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | | 411 | * memory before requesting an interrupt. |
267 | PIPE_CONTROL_NOTIFY); | 412 | */ |
268 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | 413 | ret = intel_ring_begin(ring, 32); |
269 | OUT_RING(seqno); | 414 | if (ret) |
270 | OUT_RING(0); | 415 | return ret; |
271 | OUT_RING(0); | 416 | |
272 | ADVANCE_LP_RING(); | 417 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | |
273 | } else if (HAS_PIPE_CONTROL(dev)) { | 418 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); |
274 | u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; | 419 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
420 | intel_ring_emit(ring, seqno); | ||
421 | intel_ring_emit(ring, 0); | ||
422 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | ||
423 | scratch_addr += 128; /* write to separate cachelines */ | ||
424 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | ||
425 | scratch_addr += 128; | ||
426 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | ||
427 | scratch_addr += 128; | ||
428 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | ||
429 | scratch_addr += 128; | ||
430 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | ||
431 | scratch_addr += 128; | ||
432 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | ||
433 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
434 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | | ||
435 | PIPE_CONTROL_NOTIFY); | ||
436 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | ||
437 | intel_ring_emit(ring, seqno); | ||
438 | intel_ring_emit(ring, 0); | ||
439 | intel_ring_advance(ring); | ||
440 | |||
441 | *result = seqno; | ||
442 | return 0; | ||
443 | } | ||
275 | 444 | ||
276 | /* | 445 | static int |
277 | * Workaround qword write incoherence by flushing the | 446 | render_ring_add_request(struct intel_ring_buffer *ring, |
278 | * PIPE_NOTIFY buffers out to memory before requesting | 447 | u32 *result) |
279 | * an interrupt. | 448 | { |
280 | */ | 449 | struct drm_device *dev = ring->dev; |
281 | BEGIN_LP_RING(32); | 450 | u32 seqno = i915_gem_get_seqno(dev); |
282 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | 451 | int ret; |
283 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); | ||
284 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
285 | OUT_RING(seqno); | ||
286 | OUT_RING(0); | ||
287 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
288 | scratch_addr += 128; /* write to separate cachelines */ | ||
289 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
290 | scratch_addr += 128; | ||
291 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
292 | scratch_addr += 128; | ||
293 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
294 | scratch_addr += 128; | ||
295 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
296 | scratch_addr += 128; | ||
297 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
298 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
299 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | | ||
300 | PIPE_CONTROL_NOTIFY); | ||
301 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
302 | OUT_RING(seqno); | ||
303 | OUT_RING(0); | ||
304 | ADVANCE_LP_RING(); | ||
305 | } else { | ||
306 | BEGIN_LP_RING(4); | ||
307 | OUT_RING(MI_STORE_DWORD_INDEX); | ||
308 | OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
309 | OUT_RING(seqno); | ||
310 | 452 | ||
311 | OUT_RING(MI_USER_INTERRUPT); | 453 | ret = intel_ring_begin(ring, 4); |
312 | ADVANCE_LP_RING(); | 454 | if (ret) |
313 | } | 455 | return ret; |
314 | return seqno; | 456 | |
457 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); | ||
458 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
459 | intel_ring_emit(ring, seqno); | ||
460 | intel_ring_emit(ring, MI_USER_INTERRUPT); | ||
461 | intel_ring_advance(ring); | ||
462 | |||
463 | *result = seqno; | ||
464 | return 0; | ||
315 | } | 465 | } |
316 | 466 | ||
317 | static u32 | 467 | static u32 |
318 | render_ring_get_gem_seqno(struct drm_device *dev, | 468 | ring_get_seqno(struct intel_ring_buffer *ring) |
319 | struct intel_ring_buffer *ring) | ||
320 | { | 469 | { |
321 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 470 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
322 | if (HAS_PIPE_CONTROL(dev)) | 471 | } |
323 | return ((volatile u32 *)(dev_priv->seqno_page))[0]; | 472 | |
324 | else | 473 | static u32 |
325 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 474 | pc_render_get_seqno(struct intel_ring_buffer *ring) |
475 | { | ||
476 | struct pipe_control *pc = ring->private; | ||
477 | return pc->cpu_page[0]; | ||
478 | } | ||
479 | |||
480 | static void | ||
481 | ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
482 | { | ||
483 | dev_priv->gt_irq_mask &= ~mask; | ||
484 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
485 | POSTING_READ(GTIMR); | ||
486 | } | ||
487 | |||
488 | static void | ||
489 | ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
490 | { | ||
491 | dev_priv->gt_irq_mask |= mask; | ||
492 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
493 | POSTING_READ(GTIMR); | ||
494 | } | ||
495 | |||
496 | static void | ||
497 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
498 | { | ||
499 | dev_priv->irq_mask &= ~mask; | ||
500 | I915_WRITE(IMR, dev_priv->irq_mask); | ||
501 | POSTING_READ(IMR); | ||
326 | } | 502 | } |
327 | 503 | ||
328 | static void | 504 | static void |
329 | render_ring_get_user_irq(struct drm_device *dev, | 505 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) |
330 | struct intel_ring_buffer *ring) | 506 | { |
507 | dev_priv->irq_mask |= mask; | ||
508 | I915_WRITE(IMR, dev_priv->irq_mask); | ||
509 | POSTING_READ(IMR); | ||
510 | } | ||
511 | |||
512 | static bool | ||
513 | render_ring_get_irq(struct intel_ring_buffer *ring) | ||
331 | { | 514 | { |
332 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 515 | struct drm_device *dev = ring->dev; |
333 | unsigned long irqflags; | 516 | drm_i915_private_t *dev_priv = dev->dev_private; |
334 | 517 | ||
335 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 518 | if (!dev->irq_enabled) |
336 | if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) { | 519 | return false; |
520 | |||
521 | spin_lock(&ring->irq_lock); | ||
522 | if (ring->irq_refcount++ == 0) { | ||
337 | if (HAS_PCH_SPLIT(dev)) | 523 | if (HAS_PCH_SPLIT(dev)) |
338 | ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); | 524 | ironlake_enable_irq(dev_priv, |
525 | GT_PIPE_NOTIFY | GT_USER_INTERRUPT); | ||
339 | else | 526 | else |
340 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); | 527 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); |
341 | } | 528 | } |
342 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 529 | spin_unlock(&ring->irq_lock); |
530 | |||
531 | return true; | ||
343 | } | 532 | } |
344 | 533 | ||
345 | static void | 534 | static void |
346 | render_ring_put_user_irq(struct drm_device *dev, | 535 | render_ring_put_irq(struct intel_ring_buffer *ring) |
347 | struct intel_ring_buffer *ring) | ||
348 | { | 536 | { |
349 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 537 | struct drm_device *dev = ring->dev; |
350 | unsigned long irqflags; | 538 | drm_i915_private_t *dev_priv = dev->dev_private; |
351 | 539 | ||
352 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 540 | spin_lock(&ring->irq_lock); |
353 | BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0); | 541 | if (--ring->irq_refcount == 0) { |
354 | if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) { | ||
355 | if (HAS_PCH_SPLIT(dev)) | 542 | if (HAS_PCH_SPLIT(dev)) |
356 | ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); | 543 | ironlake_disable_irq(dev_priv, |
544 | GT_USER_INTERRUPT | | ||
545 | GT_PIPE_NOTIFY); | ||
357 | else | 546 | else |
358 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); | 547 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); |
359 | } | 548 | } |
360 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 549 | spin_unlock(&ring->irq_lock); |
361 | } | 550 | } |
362 | 551 | ||
363 | static void render_setup_status_page(struct drm_device *dev, | 552 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) |
364 | struct intel_ring_buffer *ring) | ||
365 | { | 553 | { |
366 | drm_i915_private_t *dev_priv = dev->dev_private; | 554 | struct drm_device *dev = ring->dev; |
367 | if (IS_GEN6(dev)) { | 555 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
368 | I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr); | 556 | u32 mmio = 0; |
369 | I915_READ(HWS_PGA_GEN6); /* posting read */ | 557 | |
558 | /* The ring status page addresses are no longer next to the rest of | ||
559 | * the ring registers as of gen7. | ||
560 | */ | ||
561 | if (IS_GEN7(dev)) { | ||
562 | switch (ring->id) { | ||
563 | case RING_RENDER: | ||
564 | mmio = RENDER_HWS_PGA_GEN7; | ||
565 | break; | ||
566 | case RING_BLT: | ||
567 | mmio = BLT_HWS_PGA_GEN7; | ||
568 | break; | ||
569 | case RING_BSD: | ||
570 | mmio = BSD_HWS_PGA_GEN7; | ||
571 | break; | ||
572 | } | ||
573 | } else if (IS_GEN6(ring->dev)) { | ||
574 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); | ||
370 | } else { | 575 | } else { |
371 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); | 576 | mmio = RING_HWS_PGA(ring->mmio_base); |
372 | I915_READ(HWS_PGA); /* posting read */ | ||
373 | } | 577 | } |
374 | 578 | ||
579 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); | ||
580 | POSTING_READ(mmio); | ||
375 | } | 581 | } |
376 | 582 | ||
377 | void | 583 | static int |
378 | bsd_ring_flush(struct drm_device *dev, | 584 | bsd_ring_flush(struct intel_ring_buffer *ring, |
379 | struct intel_ring_buffer *ring, | 585 | u32 invalidate_domains, |
380 | u32 invalidate_domains, | 586 | u32 flush_domains) |
381 | u32 flush_domains) | ||
382 | { | 587 | { |
383 | intel_ring_begin(dev, ring, 2); | 588 | int ret; |
384 | intel_ring_emit(dev, ring, MI_FLUSH); | ||
385 | intel_ring_emit(dev, ring, MI_NOOP); | ||
386 | intel_ring_advance(dev, ring); | ||
387 | } | ||
388 | 589 | ||
389 | static inline unsigned int bsd_ring_get_head(struct drm_device *dev, | 590 | ret = intel_ring_begin(ring, 2); |
390 | struct intel_ring_buffer *ring) | 591 | if (ret) |
391 | { | 592 | return ret; |
392 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
393 | return I915_READ(BSD_RING_HEAD) & HEAD_ADDR; | ||
394 | } | ||
395 | 593 | ||
396 | static inline unsigned int bsd_ring_get_tail(struct drm_device *dev, | 594 | intel_ring_emit(ring, MI_FLUSH); |
397 | struct intel_ring_buffer *ring) | 595 | intel_ring_emit(ring, MI_NOOP); |
398 | { | 596 | intel_ring_advance(ring); |
399 | drm_i915_private_t *dev_priv = dev->dev_private; | 597 | return 0; |
400 | return I915_READ(BSD_RING_TAIL) & TAIL_ADDR; | ||
401 | } | 598 | } |
402 | 599 | ||
403 | static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev, | 600 | static int |
404 | struct intel_ring_buffer *ring) | 601 | ring_add_request(struct intel_ring_buffer *ring, |
602 | u32 *result) | ||
405 | { | 603 | { |
406 | drm_i915_private_t *dev_priv = dev->dev_private; | 604 | u32 seqno; |
407 | return I915_READ(BSD_RING_ACTHD); | 605 | int ret; |
408 | } | ||
409 | 606 | ||
410 | static inline void bsd_ring_advance_ring(struct drm_device *dev, | 607 | ret = intel_ring_begin(ring, 4); |
411 | struct intel_ring_buffer *ring) | 608 | if (ret) |
412 | { | 609 | return ret; |
413 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
414 | I915_WRITE(BSD_RING_TAIL, ring->tail); | ||
415 | } | ||
416 | 610 | ||
417 | static int init_bsd_ring(struct drm_device *dev, | 611 | seqno = i915_gem_get_seqno(ring->dev); |
418 | struct intel_ring_buffer *ring) | 612 | |
419 | { | 613 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
420 | return init_ring_common(dev, ring); | 614 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
615 | intel_ring_emit(ring, seqno); | ||
616 | intel_ring_emit(ring, MI_USER_INTERRUPT); | ||
617 | intel_ring_advance(ring); | ||
618 | |||
619 | *result = seqno; | ||
620 | return 0; | ||
421 | } | 621 | } |
422 | 622 | ||
423 | static u32 | 623 | static bool |
424 | bsd_ring_add_request(struct drm_device *dev, | 624 | gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
425 | struct intel_ring_buffer *ring, | ||
426 | struct drm_file *file_priv, | ||
427 | u32 flush_domains) | ||
428 | { | 625 | { |
429 | u32 seqno; | 626 | struct drm_device *dev = ring->dev; |
430 | 627 | drm_i915_private_t *dev_priv = dev->dev_private; | |
431 | seqno = i915_gem_get_seqno(dev); | ||
432 | 628 | ||
433 | intel_ring_begin(dev, ring, 4); | 629 | if (!dev->irq_enabled) |
434 | intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); | 630 | return false; |
435 | intel_ring_emit(dev, ring, | ||
436 | I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
437 | intel_ring_emit(dev, ring, seqno); | ||
438 | intel_ring_emit(dev, ring, MI_USER_INTERRUPT); | ||
439 | intel_ring_advance(dev, ring); | ||
440 | 631 | ||
441 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); | 632 | spin_lock(&ring->irq_lock); |
633 | if (ring->irq_refcount++ == 0) { | ||
634 | ring->irq_mask &= ~rflag; | ||
635 | I915_WRITE_IMR(ring, ring->irq_mask); | ||
636 | ironlake_enable_irq(dev_priv, gflag); | ||
637 | } | ||
638 | spin_unlock(&ring->irq_lock); | ||
442 | 639 | ||
443 | return seqno; | 640 | return true; |
444 | } | 641 | } |
445 | 642 | ||
446 | static void bsd_setup_status_page(struct drm_device *dev, | 643 | static void |
447 | struct intel_ring_buffer *ring) | 644 | gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
448 | { | 645 | { |
646 | struct drm_device *dev = ring->dev; | ||
449 | drm_i915_private_t *dev_priv = dev->dev_private; | 647 | drm_i915_private_t *dev_priv = dev->dev_private; |
450 | I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr); | 648 | |
451 | I915_READ(BSD_HWS_PGA); | 649 | spin_lock(&ring->irq_lock); |
650 | if (--ring->irq_refcount == 0) { | ||
651 | ring->irq_mask |= rflag; | ||
652 | I915_WRITE_IMR(ring, ring->irq_mask); | ||
653 | ironlake_disable_irq(dev_priv, gflag); | ||
654 | } | ||
655 | spin_unlock(&ring->irq_lock); | ||
452 | } | 656 | } |
453 | 657 | ||
454 | static void | 658 | static bool |
455 | bsd_ring_get_user_irq(struct drm_device *dev, | 659 | bsd_ring_get_irq(struct intel_ring_buffer *ring) |
456 | struct intel_ring_buffer *ring) | ||
457 | { | 660 | { |
458 | /* do nothing */ | 661 | struct drm_device *dev = ring->dev; |
662 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
663 | |||
664 | if (!dev->irq_enabled) | ||
665 | return false; | ||
666 | |||
667 | spin_lock(&ring->irq_lock); | ||
668 | if (ring->irq_refcount++ == 0) { | ||
669 | if (IS_G4X(dev)) | ||
670 | i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT); | ||
671 | else | ||
672 | ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT); | ||
673 | } | ||
674 | spin_unlock(&ring->irq_lock); | ||
675 | |||
676 | return true; | ||
459 | } | 677 | } |
460 | static void | 678 | static void |
461 | bsd_ring_put_user_irq(struct drm_device *dev, | 679 | bsd_ring_put_irq(struct intel_ring_buffer *ring) |
462 | struct intel_ring_buffer *ring) | ||
463 | { | 680 | { |
464 | /* do nothing */ | 681 | struct drm_device *dev = ring->dev; |
465 | } | 682 | drm_i915_private_t *dev_priv = dev->dev_private; |
466 | 683 | ||
467 | static u32 | 684 | spin_lock(&ring->irq_lock); |
468 | bsd_ring_get_gem_seqno(struct drm_device *dev, | 685 | if (--ring->irq_refcount == 0) { |
469 | struct intel_ring_buffer *ring) | 686 | if (IS_G4X(dev)) |
470 | { | 687 | i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT); |
471 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 688 | else |
689 | ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT); | ||
690 | } | ||
691 | spin_unlock(&ring->irq_lock); | ||
472 | } | 692 | } |
473 | 693 | ||
474 | static int | 694 | static int |
475 | bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 695 | ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) |
476 | struct intel_ring_buffer *ring, | 696 | { |
477 | struct drm_i915_gem_execbuffer2 *exec, | 697 | int ret; |
478 | struct drm_clip_rect *cliprects, | 698 | |
479 | uint64_t exec_offset) | 699 | ret = intel_ring_begin(ring, 2); |
480 | { | 700 | if (ret) |
481 | uint32_t exec_start; | 701 | return ret; |
482 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 702 | |
483 | intel_ring_begin(dev, ring, 2); | 703 | intel_ring_emit(ring, |
484 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | | 704 | MI_BATCH_BUFFER_START | (2 << 6) | |
485 | (2 << 6) | MI_BATCH_NON_SECURE_I965); | 705 | MI_BATCH_NON_SECURE_I965); |
486 | intel_ring_emit(dev, ring, exec_start); | 706 | intel_ring_emit(ring, offset); |
487 | intel_ring_advance(dev, ring); | 707 | intel_ring_advance(ring); |
708 | |||
488 | return 0; | 709 | return 0; |
489 | } | 710 | } |
490 | 711 | ||
491 | |||
492 | static int | 712 | static int |
493 | render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 713 | render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
494 | struct intel_ring_buffer *ring, | 714 | u32 offset, u32 len) |
495 | struct drm_i915_gem_execbuffer2 *exec, | ||
496 | struct drm_clip_rect *cliprects, | ||
497 | uint64_t exec_offset) | ||
498 | { | 715 | { |
499 | drm_i915_private_t *dev_priv = dev->dev_private; | 716 | struct drm_device *dev = ring->dev; |
500 | int nbox = exec->num_cliprects; | 717 | int ret; |
501 | int i = 0, count; | 718 | |
502 | uint32_t exec_start, exec_len; | 719 | if (IS_I830(dev) || IS_845G(dev)) { |
503 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 720 | ret = intel_ring_begin(ring, 4); |
504 | exec_len = (uint32_t) exec->batch_len; | 721 | if (ret) |
505 | 722 | return ret; | |
506 | trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); | 723 | |
507 | 724 | intel_ring_emit(ring, MI_BATCH_BUFFER); | |
508 | count = nbox ? nbox : 1; | 725 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
509 | 726 | intel_ring_emit(ring, offset + len - 8); | |
510 | for (i = 0; i < count; i++) { | 727 | intel_ring_emit(ring, 0); |
511 | if (i < nbox) { | 728 | } else { |
512 | int ret = i915_emit_box(dev, cliprects, i, | 729 | ret = intel_ring_begin(ring, 2); |
513 | exec->DR1, exec->DR4); | 730 | if (ret) |
514 | if (ret) | 731 | return ret; |
515 | return ret; | ||
516 | } | ||
517 | 732 | ||
518 | if (IS_I830(dev) || IS_845G(dev)) { | 733 | if (INTEL_INFO(dev)->gen >= 4) { |
519 | intel_ring_begin(dev, ring, 4); | 734 | intel_ring_emit(ring, |
520 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER); | 735 | MI_BATCH_BUFFER_START | (2 << 6) | |
521 | intel_ring_emit(dev, ring, | 736 | MI_BATCH_NON_SECURE_I965); |
522 | exec_start | MI_BATCH_NON_SECURE); | 737 | intel_ring_emit(ring, offset); |
523 | intel_ring_emit(dev, ring, exec_start + exec_len - 4); | ||
524 | intel_ring_emit(dev, ring, 0); | ||
525 | } else { | 738 | } else { |
526 | intel_ring_begin(dev, ring, 4); | 739 | intel_ring_emit(ring, |
527 | if (IS_I965G(dev)) { | 740 | MI_BATCH_BUFFER_START | (2 << 6)); |
528 | intel_ring_emit(dev, ring, | 741 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
529 | MI_BATCH_BUFFER_START | (2 << 6) | ||
530 | | MI_BATCH_NON_SECURE_I965); | ||
531 | intel_ring_emit(dev, ring, exec_start); | ||
532 | } else { | ||
533 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | ||
534 | | (2 << 6)); | ||
535 | intel_ring_emit(dev, ring, exec_start | | ||
536 | MI_BATCH_NON_SECURE); | ||
537 | } | ||
538 | } | 742 | } |
539 | intel_ring_advance(dev, ring); | ||
540 | } | ||
541 | |||
542 | if (IS_G4X(dev) || IS_IRONLAKE(dev)) { | ||
543 | intel_ring_begin(dev, ring, 2); | ||
544 | intel_ring_emit(dev, ring, MI_FLUSH | | ||
545 | MI_NO_WRITE_FLUSH | | ||
546 | MI_INVALIDATE_ISP ); | ||
547 | intel_ring_emit(dev, ring, MI_NOOP); | ||
548 | intel_ring_advance(dev, ring); | ||
549 | } | 743 | } |
550 | /* XXX breadcrumb */ | 744 | intel_ring_advance(ring); |
551 | 745 | ||
552 | return 0; | 746 | return 0; |
553 | } | 747 | } |
554 | 748 | ||
555 | static void cleanup_status_page(struct drm_device *dev, | 749 | static void cleanup_status_page(struct intel_ring_buffer *ring) |
556 | struct intel_ring_buffer *ring) | ||
557 | { | 750 | { |
558 | drm_i915_private_t *dev_priv = dev->dev_private; | 751 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
559 | struct drm_gem_object *obj; | 752 | struct drm_i915_gem_object *obj; |
560 | struct drm_i915_gem_object *obj_priv; | ||
561 | 753 | ||
562 | obj = ring->status_page.obj; | 754 | obj = ring->status_page.obj; |
563 | if (obj == NULL) | 755 | if (obj == NULL) |
564 | return; | 756 | return; |
565 | obj_priv = to_intel_bo(obj); | ||
566 | 757 | ||
567 | kunmap(obj_priv->pages[0]); | 758 | kunmap(obj->pages[0]); |
568 | i915_gem_object_unpin(obj); | 759 | i915_gem_object_unpin(obj); |
569 | drm_gem_object_unreference(obj); | 760 | drm_gem_object_unreference(&obj->base); |
570 | ring->status_page.obj = NULL; | 761 | ring->status_page.obj = NULL; |
571 | 762 | ||
572 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 763 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
573 | } | 764 | } |
574 | 765 | ||
575 | static int init_status_page(struct drm_device *dev, | 766 | static int init_status_page(struct intel_ring_buffer *ring) |
576 | struct intel_ring_buffer *ring) | ||
577 | { | 767 | { |
768 | struct drm_device *dev = ring->dev; | ||
578 | drm_i915_private_t *dev_priv = dev->dev_private; | 769 | drm_i915_private_t *dev_priv = dev->dev_private; |
579 | struct drm_gem_object *obj; | 770 | struct drm_i915_gem_object *obj; |
580 | struct drm_i915_gem_object *obj_priv; | ||
581 | int ret; | 771 | int ret; |
582 | 772 | ||
583 | obj = i915_gem_alloc_object(dev, 4096); | 773 | obj = i915_gem_alloc_object(dev, 4096); |
@@ -586,16 +776,15 @@ static int init_status_page(struct drm_device *dev, | |||
586 | ret = -ENOMEM; | 776 | ret = -ENOMEM; |
587 | goto err; | 777 | goto err; |
588 | } | 778 | } |
589 | obj_priv = to_intel_bo(obj); | 779 | obj->cache_level = I915_CACHE_LLC; |
590 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | ||
591 | 780 | ||
592 | ret = i915_gem_object_pin(obj, 4096); | 781 | ret = i915_gem_object_pin(obj, 4096, true); |
593 | if (ret != 0) { | 782 | if (ret != 0) { |
594 | goto err_unref; | 783 | goto err_unref; |
595 | } | 784 | } |
596 | 785 | ||
597 | ring->status_page.gfx_addr = obj_priv->gtt_offset; | 786 | ring->status_page.gfx_addr = obj->gtt_offset; |
598 | ring->status_page.page_addr = kmap(obj_priv->pages[0]); | 787 | ring->status_page.page_addr = kmap(obj->pages[0]); |
599 | if (ring->status_page.page_addr == NULL) { | 788 | if (ring->status_page.page_addr == NULL) { |
600 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 789 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
601 | goto err_unpin; | 790 | goto err_unpin; |
@@ -603,7 +792,7 @@ static int init_status_page(struct drm_device *dev, | |||
603 | ring->status_page.obj = obj; | 792 | ring->status_page.obj = obj; |
604 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 793 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
605 | 794 | ||
606 | ring->setup_status_page(dev, ring); | 795 | intel_ring_setup_status_page(ring); |
607 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", | 796 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
608 | ring->name, ring->status_page.gfx_addr); | 797 | ring->name, ring->status_page.gfx_addr); |
609 | 798 | ||
@@ -612,23 +801,28 @@ static int init_status_page(struct drm_device *dev, | |||
612 | err_unpin: | 801 | err_unpin: |
613 | i915_gem_object_unpin(obj); | 802 | i915_gem_object_unpin(obj); |
614 | err_unref: | 803 | err_unref: |
615 | drm_gem_object_unreference(obj); | 804 | drm_gem_object_unreference(&obj->base); |
616 | err: | 805 | err: |
617 | return ret; | 806 | return ret; |
618 | } | 807 | } |
619 | 808 | ||
620 | |||
621 | int intel_init_ring_buffer(struct drm_device *dev, | 809 | int intel_init_ring_buffer(struct drm_device *dev, |
622 | struct intel_ring_buffer *ring) | 810 | struct intel_ring_buffer *ring) |
623 | { | 811 | { |
624 | struct drm_i915_gem_object *obj_priv; | 812 | struct drm_i915_gem_object *obj; |
625 | struct drm_gem_object *obj; | ||
626 | int ret; | 813 | int ret; |
627 | 814 | ||
628 | ring->dev = dev; | 815 | ring->dev = dev; |
816 | INIT_LIST_HEAD(&ring->active_list); | ||
817 | INIT_LIST_HEAD(&ring->request_list); | ||
818 | INIT_LIST_HEAD(&ring->gpu_write_list); | ||
819 | |||
820 | init_waitqueue_head(&ring->irq_queue); | ||
821 | spin_lock_init(&ring->irq_lock); | ||
822 | ring->irq_mask = ~0; | ||
629 | 823 | ||
630 | if (I915_NEED_GFX_HWS(dev)) { | 824 | if (I915_NEED_GFX_HWS(dev)) { |
631 | ret = init_status_page(dev, ring); | 825 | ret = init_status_page(ring); |
632 | if (ret) | 826 | if (ret) |
633 | return ret; | 827 | return ret; |
634 | } | 828 | } |
@@ -640,15 +834,14 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
640 | goto err_hws; | 834 | goto err_hws; |
641 | } | 835 | } |
642 | 836 | ||
643 | ring->gem_object = obj; | 837 | ring->obj = obj; |
644 | 838 | ||
645 | ret = i915_gem_object_pin(obj, ring->alignment); | 839 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); |
646 | if (ret) | 840 | if (ret) |
647 | goto err_unref; | 841 | goto err_unref; |
648 | 842 | ||
649 | obj_priv = to_intel_bo(obj); | ||
650 | ring->map.size = ring->size; | 843 | ring->map.size = ring->size; |
651 | ring->map.offset = dev->agp->base + obj_priv->gtt_offset; | 844 | ring->map.offset = dev->agp->base + obj->gtt_offset; |
652 | ring->map.type = 0; | 845 | ring->map.type = 0; |
653 | ring->map.flags = 0; | 846 | ring->map.flags = 0; |
654 | ring->map.mtrr = 0; | 847 | ring->map.mtrr = 0; |
@@ -661,58 +854,68 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
661 | } | 854 | } |
662 | 855 | ||
663 | ring->virtual_start = ring->map.handle; | 856 | ring->virtual_start = ring->map.handle; |
664 | ret = ring->init(dev, ring); | 857 | ret = ring->init(ring); |
665 | if (ret) | 858 | if (ret) |
666 | goto err_unmap; | 859 | goto err_unmap; |
667 | 860 | ||
668 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 861 | /* Workaround an erratum on the i830 which causes a hang if |
669 | i915_kernel_lost_context(dev); | 862 | * the TAIL pointer points to within the last 2 cachelines |
670 | else { | 863 | * of the buffer. |
671 | ring->head = ring->get_head(dev, ring); | 864 | */ |
672 | ring->tail = ring->get_tail(dev, ring); | 865 | ring->effective_size = ring->size; |
673 | ring->space = ring->head - (ring->tail + 8); | 866 | if (IS_I830(ring->dev)) |
674 | if (ring->space < 0) | 867 | ring->effective_size -= 128; |
675 | ring->space += ring->size; | 868 | |
676 | } | 869 | return 0; |
677 | INIT_LIST_HEAD(&ring->active_list); | ||
678 | INIT_LIST_HEAD(&ring->request_list); | ||
679 | return ret; | ||
680 | 870 | ||
681 | err_unmap: | 871 | err_unmap: |
682 | drm_core_ioremapfree(&ring->map, dev); | 872 | drm_core_ioremapfree(&ring->map, dev); |
683 | err_unpin: | 873 | err_unpin: |
684 | i915_gem_object_unpin(obj); | 874 | i915_gem_object_unpin(obj); |
685 | err_unref: | 875 | err_unref: |
686 | drm_gem_object_unreference(obj); | 876 | drm_gem_object_unreference(&obj->base); |
687 | ring->gem_object = NULL; | 877 | ring->obj = NULL; |
688 | err_hws: | 878 | err_hws: |
689 | cleanup_status_page(dev, ring); | 879 | cleanup_status_page(ring); |
690 | return ret; | 880 | return ret; |
691 | } | 881 | } |
692 | 882 | ||
693 | void intel_cleanup_ring_buffer(struct drm_device *dev, | 883 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) |
694 | struct intel_ring_buffer *ring) | ||
695 | { | 884 | { |
696 | if (ring->gem_object == NULL) | 885 | struct drm_i915_private *dev_priv; |
886 | int ret; | ||
887 | |||
888 | if (ring->obj == NULL) | ||
697 | return; | 889 | return; |
698 | 890 | ||
699 | drm_core_ioremapfree(&ring->map, dev); | 891 | /* Disable the ring buffer. The ring must be idle at this point */ |
892 | dev_priv = ring->dev->dev_private; | ||
893 | ret = intel_wait_ring_idle(ring); | ||
894 | if (ret) | ||
895 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", | ||
896 | ring->name, ret); | ||
897 | |||
898 | I915_WRITE_CTL(ring, 0); | ||
700 | 899 | ||
701 | i915_gem_object_unpin(ring->gem_object); | 900 | drm_core_ioremapfree(&ring->map, ring->dev); |
702 | drm_gem_object_unreference(ring->gem_object); | 901 | |
703 | ring->gem_object = NULL; | 902 | i915_gem_object_unpin(ring->obj); |
704 | cleanup_status_page(dev, ring); | 903 | drm_gem_object_unreference(&ring->obj->base); |
904 | ring->obj = NULL; | ||
905 | |||
906 | if (ring->cleanup) | ||
907 | ring->cleanup(ring); | ||
908 | |||
909 | cleanup_status_page(ring); | ||
705 | } | 910 | } |
706 | 911 | ||
707 | int intel_wrap_ring_buffer(struct drm_device *dev, | 912 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
708 | struct intel_ring_buffer *ring) | ||
709 | { | 913 | { |
710 | unsigned int *virt; | 914 | unsigned int *virt; |
711 | int rem; | 915 | int rem = ring->size - ring->tail; |
712 | rem = ring->size - ring->tail; | ||
713 | 916 | ||
714 | if (ring->space < rem) { | 917 | if (ring->space < rem) { |
715 | int ret = intel_wait_ring_buffer(dev, ring, rem); | 918 | int ret = intel_wait_ring_buffer(ring, rem); |
716 | if (ret) | 919 | if (ret) |
717 | return ret; | 920 | return ret; |
718 | } | 921 | } |
@@ -725,25 +928,36 @@ int intel_wrap_ring_buffer(struct drm_device *dev, | |||
725 | } | 928 | } |
726 | 929 | ||
727 | ring->tail = 0; | 930 | ring->tail = 0; |
728 | ring->space = ring->head - 8; | 931 | ring->space = ring_space(ring); |
729 | 932 | ||
730 | return 0; | 933 | return 0; |
731 | } | 934 | } |
732 | 935 | ||
733 | int intel_wait_ring_buffer(struct drm_device *dev, | 936 | int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) |
734 | struct intel_ring_buffer *ring, int n) | ||
735 | { | 937 | { |
938 | struct drm_device *dev = ring->dev; | ||
939 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
736 | unsigned long end; | 940 | unsigned long end; |
941 | u32 head; | ||
942 | |||
943 | /* If the reported head position has wrapped or hasn't advanced, | ||
944 | * fallback to the slow and accurate path. | ||
945 | */ | ||
946 | head = intel_read_status_page(ring, 4); | ||
947 | if (head > ring->head) { | ||
948 | ring->head = head; | ||
949 | ring->space = ring_space(ring); | ||
950 | if (ring->space >= n) | ||
951 | return 0; | ||
952 | } | ||
737 | 953 | ||
738 | trace_i915_ring_wait_begin (dev); | 954 | trace_i915_ring_wait_begin(ring); |
739 | end = jiffies + 3 * HZ; | 955 | end = jiffies + 3 * HZ; |
740 | do { | 956 | do { |
741 | ring->head = ring->get_head(dev, ring); | 957 | ring->head = I915_READ_HEAD(ring); |
742 | ring->space = ring->head - (ring->tail + 8); | 958 | ring->space = ring_space(ring); |
743 | if (ring->space < 0) | ||
744 | ring->space += ring->size; | ||
745 | if (ring->space >= n) { | 959 | if (ring->space >= n) { |
746 | trace_i915_ring_wait_end (dev); | 960 | trace_i915_ring_wait_end(ring); |
747 | return 0; | 961 | return 0; |
748 | } | 962 | } |
749 | 963 | ||
@@ -753,116 +967,404 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
753 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 967 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
754 | } | 968 | } |
755 | 969 | ||
756 | yield(); | 970 | msleep(1); |
971 | if (atomic_read(&dev_priv->mm.wedged)) | ||
972 | return -EAGAIN; | ||
757 | } while (!time_after(jiffies, end)); | 973 | } while (!time_after(jiffies, end)); |
758 | trace_i915_ring_wait_end (dev); | 974 | trace_i915_ring_wait_end(ring); |
759 | return -EBUSY; | 975 | return -EBUSY; |
760 | } | 976 | } |
761 | 977 | ||
762 | void intel_ring_begin(struct drm_device *dev, | 978 | int intel_ring_begin(struct intel_ring_buffer *ring, |
763 | struct intel_ring_buffer *ring, int num_dwords) | 979 | int num_dwords) |
764 | { | 980 | { |
981 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | ||
765 | int n = 4*num_dwords; | 982 | int n = 4*num_dwords; |
766 | if (unlikely(ring->tail + n > ring->size)) | 983 | int ret; |
767 | intel_wrap_ring_buffer(dev, ring); | ||
768 | if (unlikely(ring->space < n)) | ||
769 | intel_wait_ring_buffer(dev, ring, n); | ||
770 | 984 | ||
771 | ring->space -= n; | 985 | if (unlikely(atomic_read(&dev_priv->mm.wedged))) |
772 | } | 986 | return -EIO; |
773 | 987 | ||
774 | void intel_ring_advance(struct drm_device *dev, | 988 | if (unlikely(ring->tail + n > ring->effective_size)) { |
775 | struct intel_ring_buffer *ring) | 989 | ret = intel_wrap_ring_buffer(ring); |
776 | { | 990 | if (unlikely(ret)) |
777 | ring->tail &= ring->size - 1; | 991 | return ret; |
778 | ring->advance_ring(dev, ring); | 992 | } |
993 | |||
994 | if (unlikely(ring->space < n)) { | ||
995 | ret = intel_wait_ring_buffer(ring, n); | ||
996 | if (unlikely(ret)) | ||
997 | return ret; | ||
998 | } | ||
999 | |||
1000 | ring->space -= n; | ||
1001 | return 0; | ||
779 | } | 1002 | } |
780 | 1003 | ||
781 | void intel_fill_struct(struct drm_device *dev, | 1004 | void intel_ring_advance(struct intel_ring_buffer *ring) |
782 | struct intel_ring_buffer *ring, | ||
783 | void *data, | ||
784 | unsigned int len) | ||
785 | { | 1005 | { |
786 | unsigned int *virt = ring->virtual_start + ring->tail; | ||
787 | BUG_ON((len&~(4-1)) != 0); | ||
788 | intel_ring_begin(dev, ring, len/4); | ||
789 | memcpy(virt, data, len); | ||
790 | ring->tail += len; | ||
791 | ring->tail &= ring->size - 1; | 1006 | ring->tail &= ring->size - 1; |
792 | ring->space -= len; | 1007 | ring->write_tail(ring, ring->tail); |
793 | intel_ring_advance(dev, ring); | ||
794 | } | 1008 | } |
795 | 1009 | ||
796 | struct intel_ring_buffer render_ring = { | 1010 | static const struct intel_ring_buffer render_ring = { |
797 | .name = "render ring", | 1011 | .name = "render ring", |
798 | .regs = { | 1012 | .id = RING_RENDER, |
799 | .ctl = PRB0_CTL, | 1013 | .mmio_base = RENDER_RING_BASE, |
800 | .head = PRB0_HEAD, | ||
801 | .tail = PRB0_TAIL, | ||
802 | .start = PRB0_START | ||
803 | }, | ||
804 | .ring_flag = I915_EXEC_RENDER, | ||
805 | .size = 32 * PAGE_SIZE, | 1014 | .size = 32 * PAGE_SIZE, |
806 | .alignment = PAGE_SIZE, | ||
807 | .virtual_start = NULL, | ||
808 | .dev = NULL, | ||
809 | .gem_object = NULL, | ||
810 | .head = 0, | ||
811 | .tail = 0, | ||
812 | .space = 0, | ||
813 | .user_irq_refcount = 0, | ||
814 | .irq_gem_seqno = 0, | ||
815 | .waiting_gem_seqno = 0, | ||
816 | .setup_status_page = render_setup_status_page, | ||
817 | .init = init_render_ring, | 1015 | .init = init_render_ring, |
818 | .get_head = render_ring_get_head, | 1016 | .write_tail = ring_write_tail, |
819 | .get_tail = render_ring_get_tail, | ||
820 | .get_active_head = render_ring_get_active_head, | ||
821 | .advance_ring = render_ring_advance_ring, | ||
822 | .flush = render_ring_flush, | 1017 | .flush = render_ring_flush, |
823 | .add_request = render_ring_add_request, | 1018 | .add_request = render_ring_add_request, |
824 | .get_gem_seqno = render_ring_get_gem_seqno, | 1019 | .get_seqno = ring_get_seqno, |
825 | .user_irq_get = render_ring_get_user_irq, | 1020 | .irq_get = render_ring_get_irq, |
826 | .user_irq_put = render_ring_put_user_irq, | 1021 | .irq_put = render_ring_put_irq, |
827 | .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, | 1022 | .dispatch_execbuffer = render_ring_dispatch_execbuffer, |
828 | .status_page = {NULL, 0, NULL}, | 1023 | .cleanup = render_ring_cleanup, |
829 | .map = {0,} | ||
830 | }; | 1024 | }; |
831 | 1025 | ||
832 | /* ring buffer for bit-stream decoder */ | 1026 | /* ring buffer for bit-stream decoder */ |
833 | 1027 | ||
834 | struct intel_ring_buffer bsd_ring = { | 1028 | static const struct intel_ring_buffer bsd_ring = { |
835 | .name = "bsd ring", | 1029 | .name = "bsd ring", |
836 | .regs = { | 1030 | .id = RING_BSD, |
837 | .ctl = BSD_RING_CTL, | 1031 | .mmio_base = BSD_RING_BASE, |
838 | .head = BSD_RING_HEAD, | ||
839 | .tail = BSD_RING_TAIL, | ||
840 | .start = BSD_RING_START | ||
841 | }, | ||
842 | .ring_flag = I915_EXEC_BSD, | ||
843 | .size = 32 * PAGE_SIZE, | 1032 | .size = 32 * PAGE_SIZE, |
844 | .alignment = PAGE_SIZE, | 1033 | .init = init_ring_common, |
845 | .virtual_start = NULL, | 1034 | .write_tail = ring_write_tail, |
846 | .dev = NULL, | ||
847 | .gem_object = NULL, | ||
848 | .head = 0, | ||
849 | .tail = 0, | ||
850 | .space = 0, | ||
851 | .user_irq_refcount = 0, | ||
852 | .irq_gem_seqno = 0, | ||
853 | .waiting_gem_seqno = 0, | ||
854 | .setup_status_page = bsd_setup_status_page, | ||
855 | .init = init_bsd_ring, | ||
856 | .get_head = bsd_ring_get_head, | ||
857 | .get_tail = bsd_ring_get_tail, | ||
858 | .get_active_head = bsd_ring_get_active_head, | ||
859 | .advance_ring = bsd_ring_advance_ring, | ||
860 | .flush = bsd_ring_flush, | 1035 | .flush = bsd_ring_flush, |
861 | .add_request = bsd_ring_add_request, | 1036 | .add_request = ring_add_request, |
862 | .get_gem_seqno = bsd_ring_get_gem_seqno, | 1037 | .get_seqno = ring_get_seqno, |
863 | .user_irq_get = bsd_ring_get_user_irq, | 1038 | .irq_get = bsd_ring_get_irq, |
864 | .user_irq_put = bsd_ring_put_user_irq, | 1039 | .irq_put = bsd_ring_put_irq, |
865 | .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, | 1040 | .dispatch_execbuffer = ring_dispatch_execbuffer, |
866 | .status_page = {NULL, 0, NULL}, | 1041 | }; |
867 | .map = {0,} | 1042 | |
1043 | |||
1044 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, | ||
1045 | u32 value) | ||
1046 | { | ||
1047 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | ||
1048 | |||
1049 | /* Every tail move must follow the sequence below */ | ||
1050 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | ||
1051 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | | ||
1052 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE); | ||
1053 | I915_WRITE(GEN6_BSD_RNCID, 0x0); | ||
1054 | |||
1055 | if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & | ||
1056 | GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, | ||
1057 | 50)) | ||
1058 | DRM_ERROR("timed out waiting for IDLE Indicator\n"); | ||
1059 | |||
1060 | I915_WRITE_TAIL(ring, value); | ||
1061 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | ||
1062 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | | ||
1063 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); | ||
1064 | } | ||
1065 | |||
1066 | static int gen6_ring_flush(struct intel_ring_buffer *ring, | ||
1067 | u32 invalidate, u32 flush) | ||
1068 | { | ||
1069 | uint32_t cmd; | ||
1070 | int ret; | ||
1071 | |||
1072 | ret = intel_ring_begin(ring, 4); | ||
1073 | if (ret) | ||
1074 | return ret; | ||
1075 | |||
1076 | cmd = MI_FLUSH_DW; | ||
1077 | if (invalidate & I915_GEM_GPU_DOMAINS) | ||
1078 | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; | ||
1079 | intel_ring_emit(ring, cmd); | ||
1080 | intel_ring_emit(ring, 0); | ||
1081 | intel_ring_emit(ring, 0); | ||
1082 | intel_ring_emit(ring, MI_NOOP); | ||
1083 | intel_ring_advance(ring); | ||
1084 | return 0; | ||
1085 | } | ||
1086 | |||
1087 | static int | ||
1088 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | ||
1089 | u32 offset, u32 len) | ||
1090 | { | ||
1091 | int ret; | ||
1092 | |||
1093 | ret = intel_ring_begin(ring, 2); | ||
1094 | if (ret) | ||
1095 | return ret; | ||
1096 | |||
1097 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); | ||
1098 | /* bit0-7 is the length on GEN6+ */ | ||
1099 | intel_ring_emit(ring, offset); | ||
1100 | intel_ring_advance(ring); | ||
1101 | |||
1102 | return 0; | ||
1103 | } | ||
1104 | |||
1105 | static bool | ||
1106 | gen6_render_ring_get_irq(struct intel_ring_buffer *ring) | ||
1107 | { | ||
1108 | return gen6_ring_get_irq(ring, | ||
1109 | GT_USER_INTERRUPT, | ||
1110 | GEN6_RENDER_USER_INTERRUPT); | ||
1111 | } | ||
1112 | |||
1113 | static void | ||
1114 | gen6_render_ring_put_irq(struct intel_ring_buffer *ring) | ||
1115 | { | ||
1116 | return gen6_ring_put_irq(ring, | ||
1117 | GT_USER_INTERRUPT, | ||
1118 | GEN6_RENDER_USER_INTERRUPT); | ||
1119 | } | ||
1120 | |||
1121 | static bool | ||
1122 | gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) | ||
1123 | { | ||
1124 | return gen6_ring_get_irq(ring, | ||
1125 | GT_GEN6_BSD_USER_INTERRUPT, | ||
1126 | GEN6_BSD_USER_INTERRUPT); | ||
1127 | } | ||
1128 | |||
1129 | static void | ||
1130 | gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) | ||
1131 | { | ||
1132 | return gen6_ring_put_irq(ring, | ||
1133 | GT_GEN6_BSD_USER_INTERRUPT, | ||
1134 | GEN6_BSD_USER_INTERRUPT); | ||
1135 | } | ||
1136 | |||
1137 | /* ring buffer for Video Codec for Gen6+ */ | ||
1138 | static const struct intel_ring_buffer gen6_bsd_ring = { | ||
1139 | .name = "gen6 bsd ring", | ||
1140 | .id = RING_BSD, | ||
1141 | .mmio_base = GEN6_BSD_RING_BASE, | ||
1142 | .size = 32 * PAGE_SIZE, | ||
1143 | .init = init_ring_common, | ||
1144 | .write_tail = gen6_bsd_ring_write_tail, | ||
1145 | .flush = gen6_ring_flush, | ||
1146 | .add_request = gen6_add_request, | ||
1147 | .get_seqno = ring_get_seqno, | ||
1148 | .irq_get = gen6_bsd_ring_get_irq, | ||
1149 | .irq_put = gen6_bsd_ring_put_irq, | ||
1150 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, | ||
1151 | }; | ||
1152 | |||
1153 | /* Blitter support (SandyBridge+) */ | ||
1154 | |||
1155 | static bool | ||
1156 | blt_ring_get_irq(struct intel_ring_buffer *ring) | ||
1157 | { | ||
1158 | return gen6_ring_get_irq(ring, | ||
1159 | GT_BLT_USER_INTERRUPT, | ||
1160 | GEN6_BLITTER_USER_INTERRUPT); | ||
1161 | } | ||
1162 | |||
1163 | static void | ||
1164 | blt_ring_put_irq(struct intel_ring_buffer *ring) | ||
1165 | { | ||
1166 | gen6_ring_put_irq(ring, | ||
1167 | GT_BLT_USER_INTERRUPT, | ||
1168 | GEN6_BLITTER_USER_INTERRUPT); | ||
1169 | } | ||
1170 | |||
1171 | |||
1172 | /* Workaround for some stepping of SNB, | ||
1173 | * each time when BLT engine ring tail moved, | ||
1174 | * the first command in the ring to be parsed | ||
1175 | * should be MI_BATCH_BUFFER_START | ||
1176 | */ | ||
1177 | #define NEED_BLT_WORKAROUND(dev) \ | ||
1178 | (IS_GEN6(dev) && (dev->pdev->revision < 8)) | ||
1179 | |||
1180 | static inline struct drm_i915_gem_object * | ||
1181 | to_blt_workaround(struct intel_ring_buffer *ring) | ||
1182 | { | ||
1183 | return ring->private; | ||
1184 | } | ||
1185 | |||
1186 | static int blt_ring_init(struct intel_ring_buffer *ring) | ||
1187 | { | ||
1188 | if (NEED_BLT_WORKAROUND(ring->dev)) { | ||
1189 | struct drm_i915_gem_object *obj; | ||
1190 | u32 *ptr; | ||
1191 | int ret; | ||
1192 | |||
1193 | obj = i915_gem_alloc_object(ring->dev, 4096); | ||
1194 | if (obj == NULL) | ||
1195 | return -ENOMEM; | ||
1196 | |||
1197 | ret = i915_gem_object_pin(obj, 4096, true); | ||
1198 | if (ret) { | ||
1199 | drm_gem_object_unreference(&obj->base); | ||
1200 | return ret; | ||
1201 | } | ||
1202 | |||
1203 | ptr = kmap(obj->pages[0]); | ||
1204 | *ptr++ = MI_BATCH_BUFFER_END; | ||
1205 | *ptr++ = MI_NOOP; | ||
1206 | kunmap(obj->pages[0]); | ||
1207 | |||
1208 | ret = i915_gem_object_set_to_gtt_domain(obj, false); | ||
1209 | if (ret) { | ||
1210 | i915_gem_object_unpin(obj); | ||
1211 | drm_gem_object_unreference(&obj->base); | ||
1212 | return ret; | ||
1213 | } | ||
1214 | |||
1215 | ring->private = obj; | ||
1216 | } | ||
1217 | |||
1218 | return init_ring_common(ring); | ||
1219 | } | ||
1220 | |||
1221 | static int blt_ring_begin(struct intel_ring_buffer *ring, | ||
1222 | int num_dwords) | ||
1223 | { | ||
1224 | if (ring->private) { | ||
1225 | int ret = intel_ring_begin(ring, num_dwords+2); | ||
1226 | if (ret) | ||
1227 | return ret; | ||
1228 | |||
1229 | intel_ring_emit(ring, MI_BATCH_BUFFER_START); | ||
1230 | intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset); | ||
1231 | |||
1232 | return 0; | ||
1233 | } else | ||
1234 | return intel_ring_begin(ring, 4); | ||
1235 | } | ||
1236 | |||
1237 | static int blt_ring_flush(struct intel_ring_buffer *ring, | ||
1238 | u32 invalidate, u32 flush) | ||
1239 | { | ||
1240 | uint32_t cmd; | ||
1241 | int ret; | ||
1242 | |||
1243 | ret = blt_ring_begin(ring, 4); | ||
1244 | if (ret) | ||
1245 | return ret; | ||
1246 | |||
1247 | cmd = MI_FLUSH_DW; | ||
1248 | if (invalidate & I915_GEM_DOMAIN_RENDER) | ||
1249 | cmd |= MI_INVALIDATE_TLB; | ||
1250 | intel_ring_emit(ring, cmd); | ||
1251 | intel_ring_emit(ring, 0); | ||
1252 | intel_ring_emit(ring, 0); | ||
1253 | intel_ring_emit(ring, MI_NOOP); | ||
1254 | intel_ring_advance(ring); | ||
1255 | return 0; | ||
1256 | } | ||
1257 | |||
1258 | static void blt_ring_cleanup(struct intel_ring_buffer *ring) | ||
1259 | { | ||
1260 | if (!ring->private) | ||
1261 | return; | ||
1262 | |||
1263 | i915_gem_object_unpin(ring->private); | ||
1264 | drm_gem_object_unreference(ring->private); | ||
1265 | ring->private = NULL; | ||
1266 | } | ||
1267 | |||
1268 | static const struct intel_ring_buffer gen6_blt_ring = { | ||
1269 | .name = "blt ring", | ||
1270 | .id = RING_BLT, | ||
1271 | .mmio_base = BLT_RING_BASE, | ||
1272 | .size = 32 * PAGE_SIZE, | ||
1273 | .init = blt_ring_init, | ||
1274 | .write_tail = ring_write_tail, | ||
1275 | .flush = blt_ring_flush, | ||
1276 | .add_request = gen6_add_request, | ||
1277 | .get_seqno = ring_get_seqno, | ||
1278 | .irq_get = blt_ring_get_irq, | ||
1279 | .irq_put = blt_ring_put_irq, | ||
1280 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, | ||
1281 | .cleanup = blt_ring_cleanup, | ||
868 | }; | 1282 | }; |
1283 | |||
1284 | int intel_init_render_ring_buffer(struct drm_device *dev) | ||
1285 | { | ||
1286 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1287 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | ||
1288 | |||
1289 | *ring = render_ring; | ||
1290 | if (INTEL_INFO(dev)->gen >= 6) { | ||
1291 | ring->add_request = gen6_add_request; | ||
1292 | ring->irq_get = gen6_render_ring_get_irq; | ||
1293 | ring->irq_put = gen6_render_ring_put_irq; | ||
1294 | } else if (IS_GEN5(dev)) { | ||
1295 | ring->add_request = pc_render_add_request; | ||
1296 | ring->get_seqno = pc_render_get_seqno; | ||
1297 | } | ||
1298 | |||
1299 | if (!I915_NEED_GFX_HWS(dev)) { | ||
1300 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; | ||
1301 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | ||
1302 | } | ||
1303 | |||
1304 | return intel_init_ring_buffer(dev, ring); | ||
1305 | } | ||
1306 | |||
1307 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | ||
1308 | { | ||
1309 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1310 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | ||
1311 | |||
1312 | *ring = render_ring; | ||
1313 | if (INTEL_INFO(dev)->gen >= 6) { | ||
1314 | ring->add_request = gen6_add_request; | ||
1315 | ring->irq_get = gen6_render_ring_get_irq; | ||
1316 | ring->irq_put = gen6_render_ring_put_irq; | ||
1317 | } else if (IS_GEN5(dev)) { | ||
1318 | ring->add_request = pc_render_add_request; | ||
1319 | ring->get_seqno = pc_render_get_seqno; | ||
1320 | } | ||
1321 | |||
1322 | ring->dev = dev; | ||
1323 | INIT_LIST_HEAD(&ring->active_list); | ||
1324 | INIT_LIST_HEAD(&ring->request_list); | ||
1325 | INIT_LIST_HEAD(&ring->gpu_write_list); | ||
1326 | |||
1327 | ring->size = size; | ||
1328 | ring->effective_size = ring->size; | ||
1329 | if (IS_I830(ring->dev)) | ||
1330 | ring->effective_size -= 128; | ||
1331 | |||
1332 | ring->map.offset = start; | ||
1333 | ring->map.size = size; | ||
1334 | ring->map.type = 0; | ||
1335 | ring->map.flags = 0; | ||
1336 | ring->map.mtrr = 0; | ||
1337 | |||
1338 | drm_core_ioremap_wc(&ring->map, dev); | ||
1339 | if (ring->map.handle == NULL) { | ||
1340 | DRM_ERROR("can not ioremap virtual address for" | ||
1341 | " ring buffer\n"); | ||
1342 | return -ENOMEM; | ||
1343 | } | ||
1344 | |||
1345 | ring->virtual_start = (void __force __iomem *)ring->map.handle; | ||
1346 | return 0; | ||
1347 | } | ||
1348 | |||
1349 | int intel_init_bsd_ring_buffer(struct drm_device *dev) | ||
1350 | { | ||
1351 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1352 | struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; | ||
1353 | |||
1354 | if (IS_GEN6(dev) || IS_GEN7(dev)) | ||
1355 | *ring = gen6_bsd_ring; | ||
1356 | else | ||
1357 | *ring = bsd_ring; | ||
1358 | |||
1359 | return intel_init_ring_buffer(dev, ring); | ||
1360 | } | ||
1361 | |||
1362 | int intel_init_blt_ring_buffer(struct drm_device *dev) | ||
1363 | { | ||
1364 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1365 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; | ||
1366 | |||
1367 | *ring = gen6_blt_ring; | ||
1368 | |||
1369 | return intel_init_ring_buffer(dev, ring); | ||
1370 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 525e7d3edda8..39ac2b634ae5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -1,69 +1,80 @@ | |||
1 | #ifndef _INTEL_RINGBUFFER_H_ | 1 | #ifndef _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ | 2 | #define _INTEL_RINGBUFFER_H_ |
3 | 3 | ||
4 | enum { | ||
5 | RCS = 0x0, | ||
6 | VCS, | ||
7 | BCS, | ||
8 | I915_NUM_RINGS, | ||
9 | }; | ||
10 | |||
4 | struct intel_hw_status_page { | 11 | struct intel_hw_status_page { |
5 | void *page_addr; | 12 | u32 __iomem *page_addr; |
6 | unsigned int gfx_addr; | 13 | unsigned int gfx_addr; |
7 | struct drm_gem_object *obj; | 14 | struct drm_i915_gem_object *obj; |
8 | }; | 15 | }; |
9 | 16 | ||
10 | struct drm_i915_gem_execbuffer2; | 17 | #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) |
18 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) | ||
19 | |||
20 | #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) | ||
21 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) | ||
22 | |||
23 | #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) | ||
24 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) | ||
25 | |||
26 | #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) | ||
27 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) | ||
28 | |||
29 | #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) | ||
30 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) | ||
31 | |||
32 | #define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) | ||
33 | #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) | ||
34 | #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) | ||
35 | |||
11 | struct intel_ring_buffer { | 36 | struct intel_ring_buffer { |
12 | const char *name; | 37 | const char *name; |
13 | struct ring_regs { | 38 | enum intel_ring_id { |
14 | u32 ctl; | 39 | RING_RENDER = 0x1, |
15 | u32 head; | 40 | RING_BSD = 0x2, |
16 | u32 tail; | 41 | RING_BLT = 0x4, |
17 | u32 start; | 42 | } id; |
18 | } regs; | 43 | u32 mmio_base; |
19 | unsigned int ring_flag; | 44 | void __iomem *virtual_start; |
20 | unsigned long size; | ||
21 | unsigned int alignment; | ||
22 | void *virtual_start; | ||
23 | struct drm_device *dev; | 45 | struct drm_device *dev; |
24 | struct drm_gem_object *gem_object; | 46 | struct drm_i915_gem_object *obj; |
25 | 47 | ||
26 | unsigned int head; | 48 | u32 head; |
27 | unsigned int tail; | 49 | u32 tail; |
28 | unsigned int space; | 50 | int space; |
51 | int size; | ||
52 | int effective_size; | ||
29 | struct intel_hw_status_page status_page; | 53 | struct intel_hw_status_page status_page; |
30 | 54 | ||
31 | u32 irq_gem_seqno; /* last seq seem at irq time */ | 55 | spinlock_t irq_lock; |
32 | u32 waiting_gem_seqno; | 56 | u32 irq_refcount; |
33 | int user_irq_refcount; | 57 | u32 irq_mask; |
34 | void (*user_irq_get)(struct drm_device *dev, | 58 | u32 irq_seqno; /* last seq seem at irq time */ |
35 | struct intel_ring_buffer *ring); | 59 | u32 trace_irq_seqno; |
36 | void (*user_irq_put)(struct drm_device *dev, | 60 | u32 waiting_seqno; |
37 | struct intel_ring_buffer *ring); | 61 | u32 sync_seqno[I915_NUM_RINGS-1]; |
38 | void (*setup_status_page)(struct drm_device *dev, | 62 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); |
39 | struct intel_ring_buffer *ring); | 63 | void (*irq_put)(struct intel_ring_buffer *ring); |
40 | 64 | ||
41 | int (*init)(struct drm_device *dev, | 65 | int (*init)(struct intel_ring_buffer *ring); |
42 | struct intel_ring_buffer *ring); | 66 | |
43 | 67 | void (*write_tail)(struct intel_ring_buffer *ring, | |
44 | unsigned int (*get_head)(struct drm_device *dev, | 68 | u32 value); |
45 | struct intel_ring_buffer *ring); | 69 | int __must_check (*flush)(struct intel_ring_buffer *ring, |
46 | unsigned int (*get_tail)(struct drm_device *dev, | 70 | u32 invalidate_domains, |
47 | struct intel_ring_buffer *ring); | 71 | u32 flush_domains); |
48 | unsigned int (*get_active_head)(struct drm_device *dev, | 72 | int (*add_request)(struct intel_ring_buffer *ring, |
49 | struct intel_ring_buffer *ring); | 73 | u32 *seqno); |
50 | void (*advance_ring)(struct drm_device *dev, | 74 | u32 (*get_seqno)(struct intel_ring_buffer *ring); |
51 | struct intel_ring_buffer *ring); | 75 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, |
52 | void (*flush)(struct drm_device *dev, | 76 | u32 offset, u32 length); |
53 | struct intel_ring_buffer *ring, | 77 | void (*cleanup)(struct intel_ring_buffer *ring); |
54 | u32 invalidate_domains, | ||
55 | u32 flush_domains); | ||
56 | u32 (*add_request)(struct drm_device *dev, | ||
57 | struct intel_ring_buffer *ring, | ||
58 | struct drm_file *file_priv, | ||
59 | u32 flush_domains); | ||
60 | u32 (*get_gem_seqno)(struct drm_device *dev, | ||
61 | struct intel_ring_buffer *ring); | ||
62 | int (*dispatch_gem_execbuffer)(struct drm_device *dev, | ||
63 | struct intel_ring_buffer *ring, | ||
64 | struct drm_i915_gem_execbuffer2 *exec, | ||
65 | struct drm_clip_rect *cliprects, | ||
66 | uint64_t exec_offset); | ||
67 | 78 | ||
68 | /** | 79 | /** |
69 | * List of objects currently involved in rendering from the | 80 | * List of objects currently involved in rendering from the |
@@ -83,49 +94,110 @@ struct intel_ring_buffer { | |||
83 | */ | 94 | */ |
84 | struct list_head request_list; | 95 | struct list_head request_list; |
85 | 96 | ||
97 | /** | ||
98 | * List of objects currently pending a GPU write flush. | ||
99 | * | ||
100 | * All elements on this list will belong to either the | ||
101 | * active_list or flushing_list, last_rendering_seqno can | ||
102 | * be used to differentiate between the two elements. | ||
103 | */ | ||
104 | struct list_head gpu_write_list; | ||
105 | |||
106 | /** | ||
107 | * Do we have some not yet emitted requests outstanding? | ||
108 | */ | ||
109 | u32 outstanding_lazy_request; | ||
110 | |||
86 | wait_queue_head_t irq_queue; | 111 | wait_queue_head_t irq_queue; |
87 | drm_local_map_t map; | 112 | drm_local_map_t map; |
113 | |||
114 | void *private; | ||
88 | }; | 115 | }; |
89 | 116 | ||
90 | static inline u32 | 117 | static inline u32 |
118 | intel_ring_sync_index(struct intel_ring_buffer *ring, | ||
119 | struct intel_ring_buffer *other) | ||
120 | { | ||
121 | int idx; | ||
122 | |||
123 | /* | ||
124 | * cs -> 0 = vcs, 1 = bcs | ||
125 | * vcs -> 0 = bcs, 1 = cs, | ||
126 | * bcs -> 0 = cs, 1 = vcs. | ||
127 | */ | ||
128 | |||
129 | idx = (other - ring) - 1; | ||
130 | if (idx < 0) | ||
131 | idx += I915_NUM_RINGS; | ||
132 | |||
133 | return idx; | ||
134 | } | ||
135 | |||
136 | static inline u32 | ||
91 | intel_read_status_page(struct intel_ring_buffer *ring, | 137 | intel_read_status_page(struct intel_ring_buffer *ring, |
92 | int reg) | 138 | int reg) |
139 | { | ||
140 | return ioread32(ring->status_page.page_addr + reg); | ||
141 | } | ||
142 | |||
143 | /** | ||
144 | * Reads a dword out of the status page, which is written to from the command | ||
145 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or | ||
146 | * MI_STORE_DATA_IMM. | ||
147 | * | ||
148 | * The following dwords have a reserved meaning: | ||
149 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. | ||
150 | * 0x04: ring 0 head pointer | ||
151 | * 0x05: ring 1 head pointer (915-class) | ||
152 | * 0x06: ring 2 head pointer (915-class) | ||
153 | * 0x10-0x1b: Context status DWords (GM45) | ||
154 | * 0x1f: Last written status offset. (GM45) | ||
155 | * | ||
156 | * The area from dword 0x20 to 0x3ff is available for driver usage. | ||
157 | */ | ||
158 | #define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg) | ||
159 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) | ||
160 | #define I915_GEM_HWS_INDEX 0x20 | ||
161 | #define I915_BREADCRUMB_INDEX 0x21 | ||
162 | |||
163 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); | ||
164 | |||
165 | int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); | ||
166 | static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring) | ||
93 | { | 167 | { |
94 | u32 *regs = ring->status_page.page_addr; | 168 | return intel_wait_ring_buffer(ring, ring->size - 8); |
95 | return regs[reg]; | ||
96 | } | 169 | } |
97 | 170 | ||
98 | int intel_init_ring_buffer(struct drm_device *dev, | 171 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); |
99 | struct intel_ring_buffer *ring); | 172 | |
100 | void intel_cleanup_ring_buffer(struct drm_device *dev, | 173 | static inline void intel_ring_emit(struct intel_ring_buffer *ring, |
101 | struct intel_ring_buffer *ring); | 174 | u32 data) |
102 | int intel_wait_ring_buffer(struct drm_device *dev, | ||
103 | struct intel_ring_buffer *ring, int n); | ||
104 | int intel_wrap_ring_buffer(struct drm_device *dev, | ||
105 | struct intel_ring_buffer *ring); | ||
106 | void intel_ring_begin(struct drm_device *dev, | ||
107 | struct intel_ring_buffer *ring, int n); | ||
108 | |||
109 | static inline void intel_ring_emit(struct drm_device *dev, | ||
110 | struct intel_ring_buffer *ring, | ||
111 | unsigned int data) | ||
112 | { | 175 | { |
113 | unsigned int *virt = ring->virtual_start + ring->tail; | 176 | iowrite32(data, ring->virtual_start + ring->tail); |
114 | *virt = data; | ||
115 | ring->tail += 4; | 177 | ring->tail += 4; |
116 | } | 178 | } |
117 | 179 | ||
118 | void intel_fill_struct(struct drm_device *dev, | 180 | void intel_ring_advance(struct intel_ring_buffer *ring); |
119 | struct intel_ring_buffer *ring, | 181 | |
120 | void *data, | 182 | u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); |
121 | unsigned int len); | 183 | int intel_ring_sync(struct intel_ring_buffer *ring, |
122 | void intel_ring_advance(struct drm_device *dev, | 184 | struct intel_ring_buffer *to, |
123 | struct intel_ring_buffer *ring); | 185 | u32 seqno); |
124 | 186 | ||
125 | u32 intel_ring_get_seqno(struct drm_device *dev, | 187 | int intel_init_render_ring_buffer(struct drm_device *dev); |
126 | struct intel_ring_buffer *ring); | 188 | int intel_init_bsd_ring_buffer(struct drm_device *dev); |
189 | int intel_init_blt_ring_buffer(struct drm_device *dev); | ||
190 | |||
191 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); | ||
192 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); | ||
193 | |||
194 | static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) | ||
195 | { | ||
196 | if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) | ||
197 | ring->trace_irq_seqno = seqno; | ||
198 | } | ||
127 | 199 | ||
128 | extern struct intel_ring_buffer render_ring; | 200 | /* DRI warts */ |
129 | extern struct intel_ring_buffer bsd_ring; | 201 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); |
130 | 202 | ||
131 | #endif /* _INTEL_RINGBUFFER_H_ */ | 203 | #endif /* _INTEL_RINGBUFFER_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index ee73e428a84a..30fe554d8936 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -46,6 +46,7 @@ | |||
46 | SDVO_TV_MASK) | 46 | SDVO_TV_MASK) |
47 | 47 | ||
48 | #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) | 48 | #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) |
49 | #define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK) | ||
49 | #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) | 50 | #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) |
50 | #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) | 51 | #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) |
51 | 52 | ||
@@ -65,8 +66,11 @@ static const char *tv_format_names[] = { | |||
65 | struct intel_sdvo { | 66 | struct intel_sdvo { |
66 | struct intel_encoder base; | 67 | struct intel_encoder base; |
67 | 68 | ||
69 | struct i2c_adapter *i2c; | ||
68 | u8 slave_addr; | 70 | u8 slave_addr; |
69 | 71 | ||
72 | struct i2c_adapter ddc; | ||
73 | |||
70 | /* Register for the SDVO device: SDVOB or SDVOC */ | 74 | /* Register for the SDVO device: SDVOB or SDVOC */ |
71 | int sdvo_reg; | 75 | int sdvo_reg; |
72 | 76 | ||
@@ -89,6 +93,12 @@ struct intel_sdvo { | |||
89 | uint16_t attached_output; | 93 | uint16_t attached_output; |
90 | 94 | ||
91 | /** | 95 | /** |
96 | * This is used to select the color range of RBG outputs in HDMI mode. | ||
97 | * It is only valid when using TMDS encoding and 8 bit per color mode. | ||
98 | */ | ||
99 | uint32_t color_range; | ||
100 | |||
101 | /** | ||
92 | * This is set if we're going to treat the device as TV-out. | 102 | * This is set if we're going to treat the device as TV-out. |
93 | * | 103 | * |
94 | * While we have these nice friendly flags for output types that ought | 104 | * While we have these nice friendly flags for output types that ought |
@@ -104,34 +114,25 @@ struct intel_sdvo { | |||
104 | * This is set if we treat the device as HDMI, instead of DVI. | 114 | * This is set if we treat the device as HDMI, instead of DVI. |
105 | */ | 115 | */ |
106 | bool is_hdmi; | 116 | bool is_hdmi; |
117 | bool has_hdmi_monitor; | ||
118 | bool has_hdmi_audio; | ||
107 | 119 | ||
108 | /** | 120 | /** |
109 | * This is set if we detect output of sdvo device as LVDS. | 121 | * This is set if we detect output of sdvo device as LVDS and |
122 | * have a valid fixed mode to use with the panel. | ||
110 | */ | 123 | */ |
111 | bool is_lvds; | 124 | bool is_lvds; |
112 | 125 | ||
113 | /** | 126 | /** |
114 | * This is sdvo flags for input timing. | ||
115 | */ | ||
116 | uint8_t sdvo_flags; | ||
117 | |||
118 | /** | ||
119 | * This is sdvo fixed pannel mode pointer | 127 | * This is sdvo fixed pannel mode pointer |
120 | */ | 128 | */ |
121 | struct drm_display_mode *sdvo_lvds_fixed_mode; | 129 | struct drm_display_mode *sdvo_lvds_fixed_mode; |
122 | 130 | ||
123 | /* | ||
124 | * supported encoding mode, used to determine whether HDMI is | ||
125 | * supported | ||
126 | */ | ||
127 | struct intel_sdvo_encode encode; | ||
128 | |||
129 | /* DDC bus used by this SDVO encoder */ | 131 | /* DDC bus used by this SDVO encoder */ |
130 | uint8_t ddc_bus; | 132 | uint8_t ddc_bus; |
131 | 133 | ||
132 | /* Mac mini hack -- use the same DDC as the analog connector */ | 134 | /* Input timings for adjusted_mode */ |
133 | struct i2c_adapter *analog_ddc_bus; | 135 | struct intel_sdvo_dtd input_dtd; |
134 | |||
135 | }; | 136 | }; |
136 | 137 | ||
137 | struct intel_sdvo_connector { | 138 | struct intel_sdvo_connector { |
@@ -140,6 +141,8 @@ struct intel_sdvo_connector { | |||
140 | /* Mark the type of connector */ | 141 | /* Mark the type of connector */ |
141 | uint16_t output_flag; | 142 | uint16_t output_flag; |
142 | 143 | ||
144 | int force_audio; | ||
145 | |||
143 | /* This contains all current supported TV format */ | 146 | /* This contains all current supported TV format */ |
144 | u8 tv_format_supported[TV_FORMAT_NUM]; | 147 | u8 tv_format_supported[TV_FORMAT_NUM]; |
145 | int format_supported_num; | 148 | int format_supported_num; |
@@ -186,9 +189,15 @@ struct intel_sdvo_connector { | |||
186 | u32 cur_dot_crawl, max_dot_crawl; | 189 | u32 cur_dot_crawl, max_dot_crawl; |
187 | }; | 190 | }; |
188 | 191 | ||
189 | static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder) | 192 | static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder) |
193 | { | ||
194 | return container_of(encoder, struct intel_sdvo, base.base); | ||
195 | } | ||
196 | |||
197 | static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector) | ||
190 | { | 198 | { |
191 | return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base); | 199 | return container_of(intel_attached_encoder(connector), |
200 | struct intel_sdvo, base); | ||
192 | } | 201 | } |
193 | 202 | ||
194 | static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) | 203 | static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) |
@@ -213,7 +222,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, | |||
213 | */ | 222 | */ |
214 | static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) | 223 | static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) |
215 | { | 224 | { |
216 | struct drm_device *dev = intel_sdvo->base.enc.dev; | 225 | struct drm_device *dev = intel_sdvo->base.base.dev; |
217 | struct drm_i915_private *dev_priv = dev->dev_private; | 226 | struct drm_i915_private *dev_priv = dev->dev_private; |
218 | u32 bval = val, cval = val; | 227 | u32 bval = val, cval = val; |
219 | int i; | 228 | int i; |
@@ -245,49 +254,29 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) | |||
245 | 254 | ||
246 | static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) | 255 | static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) |
247 | { | 256 | { |
248 | u8 out_buf[2] = { addr, 0 }; | ||
249 | u8 buf[2]; | ||
250 | struct i2c_msg msgs[] = { | 257 | struct i2c_msg msgs[] = { |
251 | { | 258 | { |
252 | .addr = intel_sdvo->slave_addr >> 1, | 259 | .addr = intel_sdvo->slave_addr, |
253 | .flags = 0, | 260 | .flags = 0, |
254 | .len = 1, | 261 | .len = 1, |
255 | .buf = out_buf, | 262 | .buf = &addr, |
256 | }, | 263 | }, |
257 | { | 264 | { |
258 | .addr = intel_sdvo->slave_addr >> 1, | 265 | .addr = intel_sdvo->slave_addr, |
259 | .flags = I2C_M_RD, | 266 | .flags = I2C_M_RD, |
260 | .len = 1, | 267 | .len = 1, |
261 | .buf = buf, | 268 | .buf = ch, |
262 | } | 269 | } |
263 | }; | 270 | }; |
264 | int ret; | 271 | int ret; |
265 | 272 | ||
266 | if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2) | 273 | if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2) |
267 | { | ||
268 | *ch = buf[0]; | ||
269 | return true; | 274 | return true; |
270 | } | ||
271 | 275 | ||
272 | DRM_DEBUG_KMS("i2c transfer returned %d\n", ret); | 276 | DRM_DEBUG_KMS("i2c transfer returned %d\n", ret); |
273 | return false; | 277 | return false; |
274 | } | 278 | } |
275 | 279 | ||
276 | static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch) | ||
277 | { | ||
278 | u8 out_buf[2] = { addr, ch }; | ||
279 | struct i2c_msg msgs[] = { | ||
280 | { | ||
281 | .addr = intel_sdvo->slave_addr >> 1, | ||
282 | .flags = 0, | ||
283 | .len = 2, | ||
284 | .buf = out_buf, | ||
285 | } | ||
286 | }; | ||
287 | |||
288 | return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1; | ||
289 | } | ||
290 | |||
291 | #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} | 280 | #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} |
292 | /** Mapping of command numbers to names, for debug output */ | 281 | /** Mapping of command numbers to names, for debug output */ |
293 | static const struct _sdvo_cmd_name { | 282 | static const struct _sdvo_cmd_name { |
@@ -432,22 +421,6 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, | |||
432 | DRM_LOG_KMS("\n"); | 421 | DRM_LOG_KMS("\n"); |
433 | } | 422 | } |
434 | 423 | ||
435 | static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, | ||
436 | const void *args, int args_len) | ||
437 | { | ||
438 | int i; | ||
439 | |||
440 | intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); | ||
441 | |||
442 | for (i = 0; i < args_len; i++) { | ||
443 | if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i, | ||
444 | ((u8*)args)[i])) | ||
445 | return false; | ||
446 | } | ||
447 | |||
448 | return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd); | ||
449 | } | ||
450 | |||
451 | static const char *cmd_status_names[] = { | 424 | static const char *cmd_status_names[] = { |
452 | "Power on", | 425 | "Power on", |
453 | "Success", | 426 | "Success", |
@@ -458,54 +431,108 @@ static const char *cmd_status_names[] = { | |||
458 | "Scaling not supported" | 431 | "Scaling not supported" |
459 | }; | 432 | }; |
460 | 433 | ||
461 | static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo, | 434 | static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, |
462 | void *response, int response_len, | 435 | const void *args, int args_len) |
463 | u8 status) | ||
464 | { | 436 | { |
465 | int i; | 437 | u8 buf[args_len*2 + 2], status; |
438 | struct i2c_msg msgs[args_len + 3]; | ||
439 | int i, ret; | ||
466 | 440 | ||
467 | DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); | 441 | intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); |
468 | for (i = 0; i < response_len; i++) | 442 | |
469 | DRM_LOG_KMS("%02X ", ((u8 *)response)[i]); | 443 | for (i = 0; i < args_len; i++) { |
470 | for (; i < 8; i++) | 444 | msgs[i].addr = intel_sdvo->slave_addr; |
471 | DRM_LOG_KMS(" "); | 445 | msgs[i].flags = 0; |
472 | if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) | 446 | msgs[i].len = 2; |
473 | DRM_LOG_KMS("(%s)", cmd_status_names[status]); | 447 | msgs[i].buf = buf + 2 *i; |
474 | else | 448 | buf[2*i + 0] = SDVO_I2C_ARG_0 - i; |
475 | DRM_LOG_KMS("(??? %d)", status); | 449 | buf[2*i + 1] = ((u8*)args)[i]; |
476 | DRM_LOG_KMS("\n"); | 450 | } |
451 | msgs[i].addr = intel_sdvo->slave_addr; | ||
452 | msgs[i].flags = 0; | ||
453 | msgs[i].len = 2; | ||
454 | msgs[i].buf = buf + 2*i; | ||
455 | buf[2*i + 0] = SDVO_I2C_OPCODE; | ||
456 | buf[2*i + 1] = cmd; | ||
457 | |||
458 | /* the following two are to read the response */ | ||
459 | status = SDVO_I2C_CMD_STATUS; | ||
460 | msgs[i+1].addr = intel_sdvo->slave_addr; | ||
461 | msgs[i+1].flags = 0; | ||
462 | msgs[i+1].len = 1; | ||
463 | msgs[i+1].buf = &status; | ||
464 | |||
465 | msgs[i+2].addr = intel_sdvo->slave_addr; | ||
466 | msgs[i+2].flags = I2C_M_RD; | ||
467 | msgs[i+2].len = 1; | ||
468 | msgs[i+2].buf = &status; | ||
469 | |||
470 | ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3); | ||
471 | if (ret < 0) { | ||
472 | DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); | ||
473 | return false; | ||
474 | } | ||
475 | if (ret != i+3) { | ||
476 | /* failure in I2C transfer */ | ||
477 | DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3); | ||
478 | return false; | ||
479 | } | ||
480 | |||
481 | return true; | ||
477 | } | 482 | } |
478 | 483 | ||
479 | static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, | 484 | static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, |
480 | void *response, int response_len) | 485 | void *response, int response_len) |
481 | { | 486 | { |
482 | int i; | 487 | u8 retry = 5; |
483 | u8 status; | 488 | u8 status; |
484 | u8 retry = 50; | 489 | int i; |
485 | 490 | ||
486 | while (retry--) { | 491 | DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); |
487 | /* Read the command response */ | ||
488 | for (i = 0; i < response_len; i++) { | ||
489 | if (!intel_sdvo_read_byte(intel_sdvo, | ||
490 | SDVO_I2C_RETURN_0 + i, | ||
491 | &((u8 *)response)[i])) | ||
492 | return false; | ||
493 | } | ||
494 | 492 | ||
495 | /* read the return status */ | 493 | /* |
496 | if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, | 494 | * The documentation states that all commands will be |
495 | * processed within 15µs, and that we need only poll | ||
496 | * the status byte a maximum of 3 times in order for the | ||
497 | * command to be complete. | ||
498 | * | ||
499 | * Check 5 times in case the hardware failed to read the docs. | ||
500 | */ | ||
501 | if (!intel_sdvo_read_byte(intel_sdvo, | ||
502 | SDVO_I2C_CMD_STATUS, | ||
503 | &status)) | ||
504 | goto log_fail; | ||
505 | |||
506 | while (status == SDVO_CMD_STATUS_PENDING && retry--) { | ||
507 | udelay(15); | ||
508 | if (!intel_sdvo_read_byte(intel_sdvo, | ||
509 | SDVO_I2C_CMD_STATUS, | ||
497 | &status)) | 510 | &status)) |
498 | return false; | 511 | goto log_fail; |
512 | } | ||
499 | 513 | ||
500 | intel_sdvo_debug_response(intel_sdvo, response, response_len, | 514 | if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) |
501 | status); | 515 | DRM_LOG_KMS("(%s)", cmd_status_names[status]); |
502 | if (status != SDVO_CMD_STATUS_PENDING) | 516 | else |
503 | break; | 517 | DRM_LOG_KMS("(??? %d)", status); |
504 | 518 | ||
505 | mdelay(50); | 519 | if (status != SDVO_CMD_STATUS_SUCCESS) |
520 | goto log_fail; | ||
521 | |||
522 | /* Read the command response */ | ||
523 | for (i = 0; i < response_len; i++) { | ||
524 | if (!intel_sdvo_read_byte(intel_sdvo, | ||
525 | SDVO_I2C_RETURN_0 + i, | ||
526 | &((u8 *)response)[i])) | ||
527 | goto log_fail; | ||
528 | DRM_LOG_KMS(" %02X", ((u8 *)response)[i]); | ||
506 | } | 529 | } |
530 | DRM_LOG_KMS("\n"); | ||
531 | return true; | ||
507 | 532 | ||
508 | return status == SDVO_CMD_STATUS_SUCCESS; | 533 | log_fail: |
534 | DRM_LOG_KMS("... failed\n"); | ||
535 | return false; | ||
509 | } | 536 | } |
510 | 537 | ||
511 | static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) | 538 | static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) |
@@ -518,63 +545,13 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) | |||
518 | return 4; | 545 | return 4; |
519 | } | 546 | } |
520 | 547 | ||
521 | /** | 548 | static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, |
522 | * Try to read the response after issuie the DDC switch command. But it | 549 | u8 ddc_bus) |
523 | * is noted that we must do the action of reading response and issuing DDC | ||
524 | * switch command in one I2C transaction. Otherwise when we try to start | ||
525 | * another I2C transaction after issuing the DDC bus switch, it will be | ||
526 | * switched to the internal SDVO register. | ||
527 | */ | ||
528 | static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, | ||
529 | u8 target) | ||
530 | { | 550 | { |
531 | u8 out_buf[2], cmd_buf[2], ret_value[2], ret; | 551 | /* This must be the immediately preceding write before the i2c xfer */ |
532 | struct i2c_msg msgs[] = { | 552 | return intel_sdvo_write_cmd(intel_sdvo, |
533 | { | 553 | SDVO_CMD_SET_CONTROL_BUS_SWITCH, |
534 | .addr = intel_sdvo->slave_addr >> 1, | 554 | &ddc_bus, 1); |
535 | .flags = 0, | ||
536 | .len = 2, | ||
537 | .buf = out_buf, | ||
538 | }, | ||
539 | /* the following two are to read the response */ | ||
540 | { | ||
541 | .addr = intel_sdvo->slave_addr >> 1, | ||
542 | .flags = 0, | ||
543 | .len = 1, | ||
544 | .buf = cmd_buf, | ||
545 | }, | ||
546 | { | ||
547 | .addr = intel_sdvo->slave_addr >> 1, | ||
548 | .flags = I2C_M_RD, | ||
549 | .len = 1, | ||
550 | .buf = ret_value, | ||
551 | }, | ||
552 | }; | ||
553 | |||
554 | intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH, | ||
555 | &target, 1); | ||
556 | /* write the DDC switch command argument */ | ||
557 | intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target); | ||
558 | |||
559 | out_buf[0] = SDVO_I2C_OPCODE; | ||
560 | out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; | ||
561 | cmd_buf[0] = SDVO_I2C_CMD_STATUS; | ||
562 | cmd_buf[1] = 0; | ||
563 | ret_value[0] = 0; | ||
564 | ret_value[1] = 0; | ||
565 | |||
566 | ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3); | ||
567 | if (ret != 3) { | ||
568 | /* failure in I2C transfer */ | ||
569 | DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); | ||
570 | return; | ||
571 | } | ||
572 | if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) { | ||
573 | DRM_DEBUG_KMS("DDC switch command returns response %d\n", | ||
574 | ret_value[0]); | ||
575 | return; | ||
576 | } | ||
577 | return; | ||
578 | } | 555 | } |
579 | 556 | ||
580 | static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) | 557 | static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) |
@@ -612,6 +589,7 @@ static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *i | |||
612 | { | 589 | { |
613 | struct intel_sdvo_get_trained_inputs_response response; | 590 | struct intel_sdvo_get_trained_inputs_response response; |
614 | 591 | ||
592 | BUILD_BUG_ON(sizeof(response) != 1); | ||
615 | if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS, | 593 | if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS, |
616 | &response, sizeof(response))) | 594 | &response, sizeof(response))) |
617 | return false; | 595 | return false; |
@@ -659,6 +637,7 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo | |||
659 | { | 637 | { |
660 | struct intel_sdvo_pixel_clock_range clocks; | 638 | struct intel_sdvo_pixel_clock_range clocks; |
661 | 639 | ||
640 | BUILD_BUG_ON(sizeof(clocks) != 4); | ||
662 | if (!intel_sdvo_get_value(intel_sdvo, | 641 | if (!intel_sdvo_get_value(intel_sdvo, |
663 | SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, | 642 | SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, |
664 | &clocks, sizeof(clocks))) | 643 | &clocks, sizeof(clocks))) |
@@ -726,6 +705,8 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo, | |||
726 | static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo, | 705 | static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo, |
727 | struct intel_sdvo_dtd *dtd) | 706 | struct intel_sdvo_dtd *dtd) |
728 | { | 707 | { |
708 | BUILD_BUG_ON(sizeof(dtd->part1) != 8); | ||
709 | BUILD_BUG_ON(sizeof(dtd->part2) != 8); | ||
729 | return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, | 710 | return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, |
730 | &dtd->part1, sizeof(dtd->part1)) && | 711 | &dtd->part1, sizeof(dtd->part1)) && |
731 | intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, | 712 | intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, |
@@ -819,17 +800,14 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, | |||
819 | mode->flags |= DRM_MODE_FLAG_PVSYNC; | 800 | mode->flags |= DRM_MODE_FLAG_PVSYNC; |
820 | } | 801 | } |
821 | 802 | ||
822 | static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo, | 803 | static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) |
823 | struct intel_sdvo_encode *encode) | ||
824 | { | 804 | { |
825 | if (intel_sdvo_get_value(intel_sdvo, | 805 | struct intel_sdvo_encode encode; |
826 | SDVO_CMD_GET_SUPP_ENCODE, | ||
827 | encode, sizeof(*encode))) | ||
828 | return true; | ||
829 | 806 | ||
830 | /* non-support means DVI */ | 807 | BUILD_BUG_ON(sizeof(encode) != 2); |
831 | memset(encode, 0, sizeof(*encode)); | 808 | return intel_sdvo_get_value(intel_sdvo, |
832 | return false; | 809 | SDVO_CMD_GET_SUPP_ENCODE, |
810 | &encode, sizeof(encode)); | ||
833 | } | 811 | } |
834 | 812 | ||
835 | static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, | 813 | static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, |
@@ -874,115 +852,36 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) | |||
874 | } | 852 | } |
875 | #endif | 853 | #endif |
876 | 854 | ||
877 | static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo, | 855 | static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) |
878 | int index, | ||
879 | uint8_t *data, int8_t size, uint8_t tx_rate) | ||
880 | { | ||
881 | uint8_t set_buf_index[2]; | ||
882 | |||
883 | set_buf_index[0] = index; | ||
884 | set_buf_index[1] = 0; | ||
885 | |||
886 | if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, | ||
887 | set_buf_index, 2)) | ||
888 | return false; | ||
889 | |||
890 | for (; size > 0; size -= 8) { | ||
891 | if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8)) | ||
892 | return false; | ||
893 | |||
894 | data += 8; | ||
895 | } | ||
896 | |||
897 | return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); | ||
898 | } | ||
899 | |||
900 | static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) | ||
901 | { | ||
902 | uint8_t csum = 0; | ||
903 | int i; | ||
904 | |||
905 | for (i = 0; i < size; i++) | ||
906 | csum += data[i]; | ||
907 | |||
908 | return 0x100 - csum; | ||
909 | } | ||
910 | |||
911 | #define DIP_TYPE_AVI 0x82 | ||
912 | #define DIP_VERSION_AVI 0x2 | ||
913 | #define DIP_LEN_AVI 13 | ||
914 | |||
915 | struct dip_infoframe { | ||
916 | uint8_t type; | ||
917 | uint8_t version; | ||
918 | uint8_t len; | ||
919 | uint8_t checksum; | ||
920 | union { | ||
921 | struct { | ||
922 | /* Packet Byte #1 */ | ||
923 | uint8_t S:2; | ||
924 | uint8_t B:2; | ||
925 | uint8_t A:1; | ||
926 | uint8_t Y:2; | ||
927 | uint8_t rsvd1:1; | ||
928 | /* Packet Byte #2 */ | ||
929 | uint8_t R:4; | ||
930 | uint8_t M:2; | ||
931 | uint8_t C:2; | ||
932 | /* Packet Byte #3 */ | ||
933 | uint8_t SC:2; | ||
934 | uint8_t Q:2; | ||
935 | uint8_t EC:3; | ||
936 | uint8_t ITC:1; | ||
937 | /* Packet Byte #4 */ | ||
938 | uint8_t VIC:7; | ||
939 | uint8_t rsvd2:1; | ||
940 | /* Packet Byte #5 */ | ||
941 | uint8_t PR:4; | ||
942 | uint8_t rsvd3:4; | ||
943 | /* Packet Byte #6~13 */ | ||
944 | uint16_t top_bar_end; | ||
945 | uint16_t bottom_bar_start; | ||
946 | uint16_t left_bar_end; | ||
947 | uint16_t right_bar_start; | ||
948 | } avi; | ||
949 | struct { | ||
950 | /* Packet Byte #1 */ | ||
951 | uint8_t channel_count:3; | ||
952 | uint8_t rsvd1:1; | ||
953 | uint8_t coding_type:4; | ||
954 | /* Packet Byte #2 */ | ||
955 | uint8_t sample_size:2; /* SS0, SS1 */ | ||
956 | uint8_t sample_frequency:3; | ||
957 | uint8_t rsvd2:3; | ||
958 | /* Packet Byte #3 */ | ||
959 | uint8_t coding_type_private:5; | ||
960 | uint8_t rsvd3:3; | ||
961 | /* Packet Byte #4 */ | ||
962 | uint8_t channel_allocation; | ||
963 | /* Packet Byte #5 */ | ||
964 | uint8_t rsvd4:3; | ||
965 | uint8_t level_shift:4; | ||
966 | uint8_t downmix_inhibit:1; | ||
967 | } audio; | ||
968 | uint8_t payload[28]; | ||
969 | } __attribute__ ((packed)) u; | ||
970 | } __attribute__((packed)); | ||
971 | |||
972 | static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, | ||
973 | struct drm_display_mode * mode) | ||
974 | { | 856 | { |
975 | struct dip_infoframe avi_if = { | 857 | struct dip_infoframe avi_if = { |
976 | .type = DIP_TYPE_AVI, | 858 | .type = DIP_TYPE_AVI, |
977 | .version = DIP_VERSION_AVI, | 859 | .ver = DIP_VERSION_AVI, |
978 | .len = DIP_LEN_AVI, | 860 | .len = DIP_LEN_AVI, |
979 | }; | 861 | }; |
862 | uint8_t tx_rate = SDVO_HBUF_TX_VSYNC; | ||
863 | uint8_t set_buf_index[2] = { 1, 0 }; | ||
864 | uint64_t *data = (uint64_t *)&avi_if; | ||
865 | unsigned i; | ||
866 | |||
867 | intel_dip_infoframe_csum(&avi_if); | ||
868 | |||
869 | if (!intel_sdvo_set_value(intel_sdvo, | ||
870 | SDVO_CMD_SET_HBUF_INDEX, | ||
871 | set_buf_index, 2)) | ||
872 | return false; | ||
980 | 873 | ||
981 | avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, | 874 | for (i = 0; i < sizeof(avi_if); i += 8) { |
982 | 4 + avi_if.len); | 875 | if (!intel_sdvo_set_value(intel_sdvo, |
983 | return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if, | 876 | SDVO_CMD_SET_HBUF_DATA, |
984 | 4 + avi_if.len, | 877 | data, 8)) |
985 | SDVO_HBUF_TX_VSYNC); | 878 | return false; |
879 | data++; | ||
880 | } | ||
881 | |||
882 | return intel_sdvo_set_value(intel_sdvo, | ||
883 | SDVO_CMD_SET_HBUF_TXRATE, | ||
884 | &tx_rate, 1); | ||
986 | } | 885 | } |
987 | 886 | ||
988 | static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) | 887 | static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) |
@@ -1022,8 +921,6 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, | |||
1022 | struct drm_display_mode *mode, | 921 | struct drm_display_mode *mode, |
1023 | struct drm_display_mode *adjusted_mode) | 922 | struct drm_display_mode *adjusted_mode) |
1024 | { | 923 | { |
1025 | struct intel_sdvo_dtd input_dtd; | ||
1026 | |||
1027 | /* Reset the input timing to the screen. Assume always input 0. */ | 924 | /* Reset the input timing to the screen. Assume always input 0. */ |
1028 | if (!intel_sdvo_set_target_input(intel_sdvo)) | 925 | if (!intel_sdvo_set_target_input(intel_sdvo)) |
1029 | return false; | 926 | return false; |
@@ -1035,14 +932,12 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, | |||
1035 | return false; | 932 | return false; |
1036 | 933 | ||
1037 | if (!intel_sdvo_get_preferred_input_timing(intel_sdvo, | 934 | if (!intel_sdvo_get_preferred_input_timing(intel_sdvo, |
1038 | &input_dtd)) | 935 | &intel_sdvo->input_dtd)) |
1039 | return false; | 936 | return false; |
1040 | 937 | ||
1041 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); | 938 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd); |
1042 | intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags; | ||
1043 | 939 | ||
1044 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 940 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
1045 | mode->clock = adjusted_mode->clock; | ||
1046 | return true; | 941 | return true; |
1047 | } | 942 | } |
1048 | 943 | ||
@@ -1050,7 +945,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
1050 | struct drm_display_mode *mode, | 945 | struct drm_display_mode *mode, |
1051 | struct drm_display_mode *adjusted_mode) | 946 | struct drm_display_mode *adjusted_mode) |
1052 | { | 947 | { |
1053 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); | 948 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); |
949 | int multiplier; | ||
1054 | 950 | ||
1055 | /* We need to construct preferred input timings based on our | 951 | /* We need to construct preferred input timings based on our |
1056 | * output timings. To do that, we have to set the output | 952 | * output timings. To do that, we have to set the output |
@@ -1065,10 +961,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
1065 | mode, | 961 | mode, |
1066 | adjusted_mode); | 962 | adjusted_mode); |
1067 | } else if (intel_sdvo->is_lvds) { | 963 | } else if (intel_sdvo->is_lvds) { |
1068 | drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0); | ||
1069 | |||
1070 | if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, | 964 | if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, |
1071 | intel_sdvo->sdvo_lvds_fixed_mode)) | 965 | intel_sdvo->sdvo_lvds_fixed_mode)) |
1072 | return false; | 966 | return false; |
1073 | 967 | ||
1074 | (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, | 968 | (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, |
@@ -1077,9 +971,10 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
1077 | } | 971 | } |
1078 | 972 | ||
1079 | /* Make the CRTC code factor in the SDVO pixel multiplier. The | 973 | /* Make the CRTC code factor in the SDVO pixel multiplier. The |
1080 | * SDVO device will be told of the multiplier during mode_set. | 974 | * SDVO device will factor out the multiplier during mode_set. |
1081 | */ | 975 | */ |
1082 | adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); | 976 | multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode); |
977 | intel_mode_set_pixel_multiplier(adjusted_mode, multiplier); | ||
1083 | 978 | ||
1084 | return true; | 979 | return true; |
1085 | } | 980 | } |
@@ -1092,11 +987,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1092 | struct drm_i915_private *dev_priv = dev->dev_private; | 987 | struct drm_i915_private *dev_priv = dev->dev_private; |
1093 | struct drm_crtc *crtc = encoder->crtc; | 988 | struct drm_crtc *crtc = encoder->crtc; |
1094 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 989 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1095 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); | 990 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); |
1096 | u32 sdvox = 0; | 991 | u32 sdvox; |
1097 | int sdvo_pixel_multiply, rate; | ||
1098 | struct intel_sdvo_in_out_map in_out; | 992 | struct intel_sdvo_in_out_map in_out; |
1099 | struct intel_sdvo_dtd input_dtd; | 993 | struct intel_sdvo_dtd input_dtd; |
994 | int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); | ||
995 | int rate; | ||
1100 | 996 | ||
1101 | if (!mode) | 997 | if (!mode) |
1102 | return; | 998 | return; |
@@ -1114,28 +1010,23 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1114 | SDVO_CMD_SET_IN_OUT_MAP, | 1010 | SDVO_CMD_SET_IN_OUT_MAP, |
1115 | &in_out, sizeof(in_out)); | 1011 | &in_out, sizeof(in_out)); |
1116 | 1012 | ||
1117 | if (intel_sdvo->is_hdmi) { | 1013 | /* Set the output timings to the screen */ |
1118 | if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode)) | 1014 | if (!intel_sdvo_set_target_output(intel_sdvo, |
1119 | return; | 1015 | intel_sdvo->attached_output)) |
1120 | 1016 | return; | |
1121 | sdvox |= SDVO_AUDIO_ENABLE; | ||
1122 | } | ||
1123 | 1017 | ||
1124 | /* We have tried to get input timing in mode_fixup, and filled into | 1018 | /* We have tried to get input timing in mode_fixup, and filled into |
1125 | adjusted_mode */ | 1019 | * adjusted_mode. |
1126 | intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); | ||
1127 | if (intel_sdvo->is_tv || intel_sdvo->is_lvds) | ||
1128 | input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags; | ||
1129 | |||
1130 | /* If it's a TV, we already set the output timing in mode_fixup. | ||
1131 | * Otherwise, the output timing is equal to the input timing. | ||
1132 | */ | 1020 | */ |
1133 | if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) { | 1021 | if (intel_sdvo->is_tv || intel_sdvo->is_lvds) { |
1022 | input_dtd = intel_sdvo->input_dtd; | ||
1023 | } else { | ||
1134 | /* Set the output timing to the screen */ | 1024 | /* Set the output timing to the screen */ |
1135 | if (!intel_sdvo_set_target_output(intel_sdvo, | 1025 | if (!intel_sdvo_set_target_output(intel_sdvo, |
1136 | intel_sdvo->attached_output)) | 1026 | intel_sdvo->attached_output)) |
1137 | return; | 1027 | return; |
1138 | 1028 | ||
1029 | intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); | ||
1139 | (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd); | 1030 | (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd); |
1140 | } | 1031 | } |
1141 | 1032 | ||
@@ -1143,31 +1034,22 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1143 | if (!intel_sdvo_set_target_input(intel_sdvo)) | 1034 | if (!intel_sdvo_set_target_input(intel_sdvo)) |
1144 | return; | 1035 | return; |
1145 | 1036 | ||
1146 | if (intel_sdvo->is_tv) { | 1037 | if (intel_sdvo->has_hdmi_monitor) { |
1147 | if (!intel_sdvo_set_tv_format(intel_sdvo)) | 1038 | intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); |
1148 | return; | 1039 | intel_sdvo_set_colorimetry(intel_sdvo, |
1149 | } | 1040 | SDVO_COLORIMETRY_RGB256); |
1041 | intel_sdvo_set_avi_infoframe(intel_sdvo); | ||
1042 | } else | ||
1043 | intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); | ||
1150 | 1044 | ||
1151 | /* We would like to use intel_sdvo_create_preferred_input_timing() to | 1045 | if (intel_sdvo->is_tv && |
1152 | * provide the device with a timing it can support, if it supports that | 1046 | !intel_sdvo_set_tv_format(intel_sdvo)) |
1153 | * feature. However, presumably we would need to adjust the CRTC to | 1047 | return; |
1154 | * output the preferred timing, and we don't support that currently. | ||
1155 | */ | ||
1156 | #if 0 | ||
1157 | success = intel_sdvo_create_preferred_input_timing(encoder, clock, | ||
1158 | width, height); | ||
1159 | if (success) { | ||
1160 | struct intel_sdvo_dtd *input_dtd; | ||
1161 | 1048 | ||
1162 | intel_sdvo_get_preferred_input_timing(encoder, &input_dtd); | ||
1163 | intel_sdvo_set_input_timing(encoder, &input_dtd); | ||
1164 | } | ||
1165 | #else | ||
1166 | (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); | 1049 | (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); |
1167 | #endif | ||
1168 | 1050 | ||
1169 | sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); | 1051 | switch (pixel_multiplier) { |
1170 | switch (sdvo_pixel_multiply) { | 1052 | default: |
1171 | case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; | 1053 | case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; |
1172 | case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; | 1054 | case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; |
1173 | case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; | 1055 | case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; |
@@ -1176,14 +1058,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1176 | return; | 1058 | return; |
1177 | 1059 | ||
1178 | /* Set the SDVO control regs. */ | 1060 | /* Set the SDVO control regs. */ |
1179 | if (IS_I965G(dev)) { | 1061 | if (INTEL_INFO(dev)->gen >= 4) { |
1180 | sdvox |= SDVO_BORDER_ENABLE; | 1062 | sdvox = 0; |
1063 | if (intel_sdvo->is_hdmi) | ||
1064 | sdvox |= intel_sdvo->color_range; | ||
1065 | if (INTEL_INFO(dev)->gen < 5) | ||
1066 | sdvox |= SDVO_BORDER_ENABLE; | ||
1181 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 1067 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
1182 | sdvox |= SDVO_VSYNC_ACTIVE_HIGH; | 1068 | sdvox |= SDVO_VSYNC_ACTIVE_HIGH; |
1183 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 1069 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
1184 | sdvox |= SDVO_HSYNC_ACTIVE_HIGH; | 1070 | sdvox |= SDVO_HSYNC_ACTIVE_HIGH; |
1185 | } else { | 1071 | } else { |
1186 | sdvox |= I915_READ(intel_sdvo->sdvo_reg); | 1072 | sdvox = I915_READ(intel_sdvo->sdvo_reg); |
1187 | switch (intel_sdvo->sdvo_reg) { | 1073 | switch (intel_sdvo->sdvo_reg) { |
1188 | case SDVOB: | 1074 | case SDVOB: |
1189 | sdvox &= SDVOB_PRESERVE_MASK; | 1075 | sdvox &= SDVOB_PRESERVE_MASK; |
@@ -1196,16 +1082,19 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1196 | } | 1082 | } |
1197 | if (intel_crtc->pipe == 1) | 1083 | if (intel_crtc->pipe == 1) |
1198 | sdvox |= SDVO_PIPE_B_SELECT; | 1084 | sdvox |= SDVO_PIPE_B_SELECT; |
1085 | if (intel_sdvo->has_hdmi_audio) | ||
1086 | sdvox |= SDVO_AUDIO_ENABLE; | ||
1199 | 1087 | ||
1200 | if (IS_I965G(dev)) { | 1088 | if (INTEL_INFO(dev)->gen >= 4) { |
1201 | /* done in crtc_mode_set as the dpll_md reg must be written early */ | 1089 | /* done in crtc_mode_set as the dpll_md reg must be written early */ |
1202 | } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { | 1090 | } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { |
1203 | /* done in crtc_mode_set as it lives inside the dpll register */ | 1091 | /* done in crtc_mode_set as it lives inside the dpll register */ |
1204 | } else { | 1092 | } else { |
1205 | sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; | 1093 | sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT; |
1206 | } | 1094 | } |
1207 | 1095 | ||
1208 | if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL) | 1096 | if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL && |
1097 | INTEL_INFO(dev)->gen < 5) | ||
1209 | sdvox |= SDVO_STALL_SELECT; | 1098 | sdvox |= SDVO_STALL_SELECT; |
1210 | intel_sdvo_write_sdvox(intel_sdvo, sdvox); | 1099 | intel_sdvo_write_sdvox(intel_sdvo, sdvox); |
1211 | } | 1100 | } |
@@ -1214,7 +1103,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) | |||
1214 | { | 1103 | { |
1215 | struct drm_device *dev = encoder->dev; | 1104 | struct drm_device *dev = encoder->dev; |
1216 | struct drm_i915_private *dev_priv = dev->dev_private; | 1105 | struct drm_i915_private *dev_priv = dev->dev_private; |
1217 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); | 1106 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); |
1218 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 1107 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
1219 | u32 temp; | 1108 | u32 temp; |
1220 | 1109 | ||
@@ -1260,8 +1149,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) | |||
1260 | static int intel_sdvo_mode_valid(struct drm_connector *connector, | 1149 | static int intel_sdvo_mode_valid(struct drm_connector *connector, |
1261 | struct drm_display_mode *mode) | 1150 | struct drm_display_mode *mode) |
1262 | { | 1151 | { |
1263 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1152 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); |
1264 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); | ||
1265 | 1153 | ||
1266 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 1154 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
1267 | return MODE_NO_DBLESCAN; | 1155 | return MODE_NO_DBLESCAN; |
@@ -1285,7 +1173,39 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector, | |||
1285 | 1173 | ||
1286 | static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) | 1174 | static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) |
1287 | { | 1175 | { |
1288 | return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps)); | 1176 | BUILD_BUG_ON(sizeof(*caps) != 8); |
1177 | if (!intel_sdvo_get_value(intel_sdvo, | ||
1178 | SDVO_CMD_GET_DEVICE_CAPS, | ||
1179 | caps, sizeof(*caps))) | ||
1180 | return false; | ||
1181 | |||
1182 | DRM_DEBUG_KMS("SDVO capabilities:\n" | ||
1183 | " vendor_id: %d\n" | ||
1184 | " device_id: %d\n" | ||
1185 | " device_rev_id: %d\n" | ||
1186 | " sdvo_version_major: %d\n" | ||
1187 | " sdvo_version_minor: %d\n" | ||
1188 | " sdvo_inputs_mask: %d\n" | ||
1189 | " smooth_scaling: %d\n" | ||
1190 | " sharp_scaling: %d\n" | ||
1191 | " up_scaling: %d\n" | ||
1192 | " down_scaling: %d\n" | ||
1193 | " stall_support: %d\n" | ||
1194 | " output_flags: %d\n", | ||
1195 | caps->vendor_id, | ||
1196 | caps->device_id, | ||
1197 | caps->device_rev_id, | ||
1198 | caps->sdvo_version_major, | ||
1199 | caps->sdvo_version_minor, | ||
1200 | caps->sdvo_inputs_mask, | ||
1201 | caps->smooth_scaling, | ||
1202 | caps->sharp_scaling, | ||
1203 | caps->up_scaling, | ||
1204 | caps->down_scaling, | ||
1205 | caps->stall_support, | ||
1206 | caps->output_flags); | ||
1207 | |||
1208 | return true; | ||
1289 | } | 1209 | } |
1290 | 1210 | ||
1291 | /* No use! */ | 1211 | /* No use! */ |
@@ -1360,128 +1280,85 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) | |||
1360 | static bool | 1280 | static bool |
1361 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) | 1281 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) |
1362 | { | 1282 | { |
1363 | int caps = 0; | 1283 | /* Is there more than one type of output? */ |
1364 | 1284 | int caps = intel_sdvo->caps.output_flags & 0xf; | |
1365 | if (intel_sdvo->caps.output_flags & | 1285 | return caps & -caps; |
1366 | (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) | ||
1367 | caps++; | ||
1368 | if (intel_sdvo->caps.output_flags & | ||
1369 | (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)) | ||
1370 | caps++; | ||
1371 | if (intel_sdvo->caps.output_flags & | ||
1372 | (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1)) | ||
1373 | caps++; | ||
1374 | if (intel_sdvo->caps.output_flags & | ||
1375 | (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1)) | ||
1376 | caps++; | ||
1377 | if (intel_sdvo->caps.output_flags & | ||
1378 | (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1)) | ||
1379 | caps++; | ||
1380 | |||
1381 | if (intel_sdvo->caps.output_flags & | ||
1382 | (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1)) | ||
1383 | caps++; | ||
1384 | |||
1385 | if (intel_sdvo->caps.output_flags & | ||
1386 | (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)) | ||
1387 | caps++; | ||
1388 | |||
1389 | return (caps > 1); | ||
1390 | } | 1286 | } |
1391 | 1287 | ||
1392 | static struct drm_connector * | 1288 | static struct edid * |
1393 | intel_find_analog_connector(struct drm_device *dev) | 1289 | intel_sdvo_get_edid(struct drm_connector *connector) |
1394 | { | 1290 | { |
1395 | struct drm_connector *connector; | 1291 | struct intel_sdvo *sdvo = intel_attached_sdvo(connector); |
1396 | struct drm_encoder *encoder; | 1292 | return drm_get_edid(connector, &sdvo->ddc); |
1397 | struct intel_sdvo *intel_sdvo; | ||
1398 | |||
1399 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
1400 | intel_sdvo = enc_to_intel_sdvo(encoder); | ||
1401 | if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) { | ||
1402 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
1403 | if (encoder == intel_attached_encoder(connector)) | ||
1404 | return connector; | ||
1405 | } | ||
1406 | } | ||
1407 | } | ||
1408 | return NULL; | ||
1409 | } | 1293 | } |
1410 | 1294 | ||
1411 | static int | 1295 | /* Mac mini hack -- use the same DDC as the analog connector */ |
1412 | intel_analog_is_connected(struct drm_device *dev) | 1296 | static struct edid * |
1297 | intel_sdvo_get_analog_edid(struct drm_connector *connector) | ||
1413 | { | 1298 | { |
1414 | struct drm_connector *analog_connector; | 1299 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
1415 | |||
1416 | analog_connector = intel_find_analog_connector(dev); | ||
1417 | if (!analog_connector) | ||
1418 | return false; | ||
1419 | |||
1420 | if (analog_connector->funcs->detect(analog_connector, false) == | ||
1421 | connector_status_disconnected) | ||
1422 | return false; | ||
1423 | 1300 | ||
1424 | return true; | 1301 | return drm_get_edid(connector, |
1302 | &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); | ||
1425 | } | 1303 | } |
1426 | 1304 | ||
1427 | enum drm_connector_status | 1305 | enum drm_connector_status |
1428 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | 1306 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) |
1429 | { | 1307 | { |
1430 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1308 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); |
1431 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); | 1309 | enum drm_connector_status status; |
1432 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); | 1310 | struct edid *edid; |
1433 | enum drm_connector_status status = connector_status_connected; | ||
1434 | struct edid *edid = NULL; | ||
1435 | 1311 | ||
1436 | edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus); | 1312 | edid = intel_sdvo_get_edid(connector); |
1437 | 1313 | ||
1438 | /* This is only applied to SDVO cards with multiple outputs */ | ||
1439 | if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) { | 1314 | if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) { |
1440 | uint8_t saved_ddc, temp_ddc; | 1315 | u8 ddc, saved_ddc = intel_sdvo->ddc_bus; |
1441 | saved_ddc = intel_sdvo->ddc_bus; | 1316 | |
1442 | temp_ddc = intel_sdvo->ddc_bus >> 1; | ||
1443 | /* | 1317 | /* |
1444 | * Don't use the 1 as the argument of DDC bus switch to get | 1318 | * Don't use the 1 as the argument of DDC bus switch to get |
1445 | * the EDID. It is used for SDVO SPD ROM. | 1319 | * the EDID. It is used for SDVO SPD ROM. |
1446 | */ | 1320 | */ |
1447 | while(temp_ddc > 1) { | 1321 | for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) { |
1448 | intel_sdvo->ddc_bus = temp_ddc; | 1322 | intel_sdvo->ddc_bus = ddc; |
1449 | edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus); | 1323 | edid = intel_sdvo_get_edid(connector); |
1450 | if (edid) { | 1324 | if (edid) |
1451 | /* | ||
1452 | * When we can get the EDID, maybe it is the | ||
1453 | * correct DDC bus. Update it. | ||
1454 | */ | ||
1455 | intel_sdvo->ddc_bus = temp_ddc; | ||
1456 | break; | 1325 | break; |
1457 | } | ||
1458 | temp_ddc >>= 1; | ||
1459 | } | 1326 | } |
1327 | /* | ||
1328 | * If we found the EDID on the other bus, | ||
1329 | * assume that is the correct DDC bus. | ||
1330 | */ | ||
1460 | if (edid == NULL) | 1331 | if (edid == NULL) |
1461 | intel_sdvo->ddc_bus = saved_ddc; | 1332 | intel_sdvo->ddc_bus = saved_ddc; |
1462 | } | 1333 | } |
1463 | /* when there is no edid and no monitor is connected with VGA | 1334 | |
1464 | * port, try to use the CRT ddc to read the EDID for DVI-connector | 1335 | /* |
1336 | * When there is no edid and no monitor is connected with VGA | ||
1337 | * port, try to use the CRT ddc to read the EDID for DVI-connector. | ||
1465 | */ | 1338 | */ |
1466 | if (edid == NULL && intel_sdvo->analog_ddc_bus && | 1339 | if (edid == NULL) |
1467 | !intel_analog_is_connected(connector->dev)) | 1340 | edid = intel_sdvo_get_analog_edid(connector); |
1468 | edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus); | ||
1469 | 1341 | ||
1342 | status = connector_status_unknown; | ||
1470 | if (edid != NULL) { | 1343 | if (edid != NULL) { |
1471 | bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); | ||
1472 | bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK); | ||
1473 | |||
1474 | /* DDC bus is shared, match EDID to connector type */ | 1344 | /* DDC bus is shared, match EDID to connector type */ |
1475 | if (is_digital && need_digital) | 1345 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { |
1476 | intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); | 1346 | status = connector_status_connected; |
1477 | else if (is_digital != need_digital) | 1347 | if (intel_sdvo->is_hdmi) { |
1348 | intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); | ||
1349 | intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); | ||
1350 | } | ||
1351 | } else | ||
1478 | status = connector_status_disconnected; | 1352 | status = connector_status_disconnected; |
1479 | |||
1480 | connector->display_info.raw_edid = NULL; | 1353 | connector->display_info.raw_edid = NULL; |
1481 | } else | 1354 | kfree(edid); |
1482 | status = connector_status_disconnected; | 1355 | } |
1483 | 1356 | ||
1484 | kfree(edid); | 1357 | if (status == connector_status_connected) { |
1358 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); | ||
1359 | if (intel_sdvo_connector->force_audio) | ||
1360 | intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0; | ||
1361 | } | ||
1485 | 1362 | ||
1486 | return status; | 1363 | return status; |
1487 | } | 1364 | } |
@@ -1490,34 +1367,55 @@ static enum drm_connector_status | |||
1490 | intel_sdvo_detect(struct drm_connector *connector, bool force) | 1367 | intel_sdvo_detect(struct drm_connector *connector, bool force) |
1491 | { | 1368 | { |
1492 | uint16_t response; | 1369 | uint16_t response; |
1493 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1370 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); |
1494 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); | ||
1495 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); | 1371 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); |
1496 | enum drm_connector_status ret; | 1372 | enum drm_connector_status ret; |
1497 | 1373 | ||
1498 | if (!intel_sdvo_write_cmd(intel_sdvo, | 1374 | if (!intel_sdvo_write_cmd(intel_sdvo, |
1499 | SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) | 1375 | SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) |
1500 | return connector_status_unknown; | 1376 | return connector_status_unknown; |
1501 | if (intel_sdvo->is_tv) { | 1377 | |
1502 | /* add 30ms delay when the output type is SDVO-TV */ | 1378 | /* add 30ms delay when the output type might be TV */ |
1379 | if (intel_sdvo->caps.output_flags & | ||
1380 | (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0)) | ||
1503 | mdelay(30); | 1381 | mdelay(30); |
1504 | } | 1382 | |
1505 | if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) | 1383 | if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) |
1506 | return connector_status_unknown; | 1384 | return connector_status_unknown; |
1507 | 1385 | ||
1508 | DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); | 1386 | DRM_DEBUG_KMS("SDVO response %d %d [%x]\n", |
1387 | response & 0xff, response >> 8, | ||
1388 | intel_sdvo_connector->output_flag); | ||
1509 | 1389 | ||
1510 | if (response == 0) | 1390 | if (response == 0) |
1511 | return connector_status_disconnected; | 1391 | return connector_status_disconnected; |
1512 | 1392 | ||
1513 | intel_sdvo->attached_output = response; | 1393 | intel_sdvo->attached_output = response; |
1514 | 1394 | ||
1395 | intel_sdvo->has_hdmi_monitor = false; | ||
1396 | intel_sdvo->has_hdmi_audio = false; | ||
1397 | |||
1515 | if ((intel_sdvo_connector->output_flag & response) == 0) | 1398 | if ((intel_sdvo_connector->output_flag & response) == 0) |
1516 | ret = connector_status_disconnected; | 1399 | ret = connector_status_disconnected; |
1517 | else if (response & SDVO_TMDS_MASK) | 1400 | else if (IS_TMDS(intel_sdvo_connector)) |
1518 | ret = intel_sdvo_hdmi_sink_detect(connector); | 1401 | ret = intel_sdvo_hdmi_sink_detect(connector); |
1519 | else | 1402 | else { |
1520 | ret = connector_status_connected; | 1403 | struct edid *edid; |
1404 | |||
1405 | /* if we have an edid check it matches the connection */ | ||
1406 | edid = intel_sdvo_get_edid(connector); | ||
1407 | if (edid == NULL) | ||
1408 | edid = intel_sdvo_get_analog_edid(connector); | ||
1409 | if (edid != NULL) { | ||
1410 | if (edid->input & DRM_EDID_INPUT_DIGITAL) | ||
1411 | ret = connector_status_disconnected; | ||
1412 | else | ||
1413 | ret = connector_status_connected; | ||
1414 | connector->display_info.raw_edid = NULL; | ||
1415 | kfree(edid); | ||
1416 | } else | ||
1417 | ret = connector_status_connected; | ||
1418 | } | ||
1521 | 1419 | ||
1522 | /* May update encoder flag for like clock for SDVO TV, etc.*/ | 1420 | /* May update encoder flag for like clock for SDVO TV, etc.*/ |
1523 | if (ret == connector_status_connected) { | 1421 | if (ret == connector_status_connected) { |
@@ -1538,12 +1436,10 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) | |||
1538 | 1436 | ||
1539 | static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | 1437 | static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) |
1540 | { | 1438 | { |
1541 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1439 | struct edid *edid; |
1542 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); | ||
1543 | int num_modes; | ||
1544 | 1440 | ||
1545 | /* set the bus switch and get the modes */ | 1441 | /* set the bus switch and get the modes */ |
1546 | num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus); | 1442 | edid = intel_sdvo_get_edid(connector); |
1547 | 1443 | ||
1548 | /* | 1444 | /* |
1549 | * Mac mini hack. On this device, the DVI-I connector shares one DDC | 1445 | * Mac mini hack. On this device, the DVI-I connector shares one DDC |
@@ -1551,12 +1447,21 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | |||
1551 | * DDC fails, check to see if the analog output is disconnected, in | 1447 | * DDC fails, check to see if the analog output is disconnected, in |
1552 | * which case we'll look there for the digital DDC data. | 1448 | * which case we'll look there for the digital DDC data. |
1553 | */ | 1449 | */ |
1554 | if (num_modes == 0 && | 1450 | if (edid == NULL) |
1555 | intel_sdvo->analog_ddc_bus && | 1451 | edid = intel_sdvo_get_analog_edid(connector); |
1556 | !intel_analog_is_connected(connector->dev)) { | 1452 | |
1557 | /* Switch to the analog ddc bus and try that | 1453 | if (edid != NULL) { |
1558 | */ | 1454 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); |
1559 | (void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus); | 1455 | bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); |
1456 | bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector); | ||
1457 | |||
1458 | if (connector_is_digital == monitor_is_digital) { | ||
1459 | drm_mode_connector_update_edid_property(connector, edid); | ||
1460 | drm_add_edid_modes(connector, edid); | ||
1461 | } | ||
1462 | |||
1463 | connector->display_info.raw_edid = NULL; | ||
1464 | kfree(edid); | ||
1560 | } | 1465 | } |
1561 | } | 1466 | } |
1562 | 1467 | ||
@@ -1565,7 +1470,7 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | |||
1565 | * Note! This is in reply order (see loop in get_tv_modes). | 1470 | * Note! This is in reply order (see loop in get_tv_modes). |
1566 | * XXX: all 60Hz refresh? | 1471 | * XXX: all 60Hz refresh? |
1567 | */ | 1472 | */ |
1568 | struct drm_display_mode sdvo_tv_modes[] = { | 1473 | static const struct drm_display_mode sdvo_tv_modes[] = { |
1569 | { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384, | 1474 | { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384, |
1570 | 416, 0, 200, 201, 232, 233, 0, | 1475 | 416, 0, 200, 201, 232, 233, 0, |
1571 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | 1476 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
@@ -1627,8 +1532,7 @@ struct drm_display_mode sdvo_tv_modes[] = { | |||
1627 | 1532 | ||
1628 | static void intel_sdvo_get_tv_modes(struct drm_connector *connector) | 1533 | static void intel_sdvo_get_tv_modes(struct drm_connector *connector) |
1629 | { | 1534 | { |
1630 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1535 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); |
1631 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); | ||
1632 | struct intel_sdvo_sdtv_resolution_request tv_res; | 1536 | struct intel_sdvo_sdtv_resolution_request tv_res; |
1633 | uint32_t reply = 0, format_map = 0; | 1537 | uint32_t reply = 0, format_map = 0; |
1634 | int i; | 1538 | int i; |
@@ -1644,7 +1548,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) | |||
1644 | return; | 1548 | return; |
1645 | 1549 | ||
1646 | BUILD_BUG_ON(sizeof(tv_res) != 3); | 1550 | BUILD_BUG_ON(sizeof(tv_res) != 3); |
1647 | if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, | 1551 | if (!intel_sdvo_write_cmd(intel_sdvo, |
1552 | SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, | ||
1648 | &tv_res, sizeof(tv_res))) | 1553 | &tv_res, sizeof(tv_res))) |
1649 | return; | 1554 | return; |
1650 | if (!intel_sdvo_read_response(intel_sdvo, &reply, 3)) | 1555 | if (!intel_sdvo_read_response(intel_sdvo, &reply, 3)) |
@@ -1662,8 +1567,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) | |||
1662 | 1567 | ||
1663 | static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | 1568 | static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) |
1664 | { | 1569 | { |
1665 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1570 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); |
1666 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); | ||
1667 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 1571 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
1668 | struct drm_display_mode *newmode; | 1572 | struct drm_display_mode *newmode; |
1669 | 1573 | ||
@@ -1672,7 +1576,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | |||
1672 | * Assume that the preferred modes are | 1576 | * Assume that the preferred modes are |
1673 | * arranged in priority order. | 1577 | * arranged in priority order. |
1674 | */ | 1578 | */ |
1675 | intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus); | 1579 | intel_ddc_get_modes(connector, intel_sdvo->i2c); |
1676 | if (list_empty(&connector->probed_modes) == false) | 1580 | if (list_empty(&connector->probed_modes) == false) |
1677 | goto end; | 1581 | goto end; |
1678 | 1582 | ||
@@ -1693,6 +1597,10 @@ end: | |||
1693 | if (newmode->type & DRM_MODE_TYPE_PREFERRED) { | 1597 | if (newmode->type & DRM_MODE_TYPE_PREFERRED) { |
1694 | intel_sdvo->sdvo_lvds_fixed_mode = | 1598 | intel_sdvo->sdvo_lvds_fixed_mode = |
1695 | drm_mode_duplicate(connector->dev, newmode); | 1599 | drm_mode_duplicate(connector->dev, newmode); |
1600 | |||
1601 | drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, | ||
1602 | 0); | ||
1603 | |||
1696 | intel_sdvo->is_lvds = true; | 1604 | intel_sdvo->is_lvds = true; |
1697 | break; | 1605 | break; |
1698 | } | 1606 | } |
@@ -1770,14 +1678,30 @@ static void intel_sdvo_destroy(struct drm_connector *connector) | |||
1770 | kfree(connector); | 1678 | kfree(connector); |
1771 | } | 1679 | } |
1772 | 1680 | ||
1681 | static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) | ||
1682 | { | ||
1683 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); | ||
1684 | struct edid *edid; | ||
1685 | bool has_audio = false; | ||
1686 | |||
1687 | if (!intel_sdvo->is_hdmi) | ||
1688 | return false; | ||
1689 | |||
1690 | edid = intel_sdvo_get_edid(connector); | ||
1691 | if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL) | ||
1692 | has_audio = drm_detect_monitor_audio(edid); | ||
1693 | |||
1694 | return has_audio; | ||
1695 | } | ||
1696 | |||
1773 | static int | 1697 | static int |
1774 | intel_sdvo_set_property(struct drm_connector *connector, | 1698 | intel_sdvo_set_property(struct drm_connector *connector, |
1775 | struct drm_property *property, | 1699 | struct drm_property *property, |
1776 | uint64_t val) | 1700 | uint64_t val) |
1777 | { | 1701 | { |
1778 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1702 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); |
1779 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); | ||
1780 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); | 1703 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); |
1704 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | ||
1781 | uint16_t temp_value; | 1705 | uint16_t temp_value; |
1782 | uint8_t cmd; | 1706 | uint8_t cmd; |
1783 | int ret; | 1707 | int ret; |
@@ -1786,6 +1710,35 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
1786 | if (ret) | 1710 | if (ret) |
1787 | return ret; | 1711 | return ret; |
1788 | 1712 | ||
1713 | if (property == dev_priv->force_audio_property) { | ||
1714 | int i = val; | ||
1715 | bool has_audio; | ||
1716 | |||
1717 | if (i == intel_sdvo_connector->force_audio) | ||
1718 | return 0; | ||
1719 | |||
1720 | intel_sdvo_connector->force_audio = i; | ||
1721 | |||
1722 | if (i == 0) | ||
1723 | has_audio = intel_sdvo_detect_hdmi_audio(connector); | ||
1724 | else | ||
1725 | has_audio = i > 0; | ||
1726 | |||
1727 | if (has_audio == intel_sdvo->has_hdmi_audio) | ||
1728 | return 0; | ||
1729 | |||
1730 | intel_sdvo->has_hdmi_audio = has_audio; | ||
1731 | goto done; | ||
1732 | } | ||
1733 | |||
1734 | if (property == dev_priv->broadcast_rgb_property) { | ||
1735 | if (val == !!intel_sdvo->color_range) | ||
1736 | return 0; | ||
1737 | |||
1738 | intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; | ||
1739 | goto done; | ||
1740 | } | ||
1741 | |||
1789 | #define CHECK_PROPERTY(name, NAME) \ | 1742 | #define CHECK_PROPERTY(name, NAME) \ |
1790 | if (intel_sdvo_connector->name == property) { \ | 1743 | if (intel_sdvo_connector->name == property) { \ |
1791 | if (intel_sdvo_connector->cur_##name == temp_value) return 0; \ | 1744 | if (intel_sdvo_connector->cur_##name == temp_value) return 0; \ |
@@ -1879,9 +1832,8 @@ set_value: | |||
1879 | 1832 | ||
1880 | 1833 | ||
1881 | done: | 1834 | done: |
1882 | if (encoder->crtc) { | 1835 | if (intel_sdvo->base.base.crtc) { |
1883 | struct drm_crtc *crtc = encoder->crtc; | 1836 | struct drm_crtc *crtc = intel_sdvo->base.base.crtc; |
1884 | |||
1885 | drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, | 1837 | drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, |
1886 | crtc->y, crtc->fb); | 1838 | crtc->y, crtc->fb); |
1887 | } | 1839 | } |
@@ -1909,20 +1861,18 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = { | |||
1909 | static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { | 1861 | static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { |
1910 | .get_modes = intel_sdvo_get_modes, | 1862 | .get_modes = intel_sdvo_get_modes, |
1911 | .mode_valid = intel_sdvo_mode_valid, | 1863 | .mode_valid = intel_sdvo_mode_valid, |
1912 | .best_encoder = intel_attached_encoder, | 1864 | .best_encoder = intel_best_encoder, |
1913 | }; | 1865 | }; |
1914 | 1866 | ||
1915 | static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) | 1867 | static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) |
1916 | { | 1868 | { |
1917 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); | 1869 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); |
1918 | |||
1919 | if (intel_sdvo->analog_ddc_bus) | ||
1920 | intel_i2c_destroy(intel_sdvo->analog_ddc_bus); | ||
1921 | 1870 | ||
1922 | if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) | 1871 | if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) |
1923 | drm_mode_destroy(encoder->dev, | 1872 | drm_mode_destroy(encoder->dev, |
1924 | intel_sdvo->sdvo_lvds_fixed_mode); | 1873 | intel_sdvo->sdvo_lvds_fixed_mode); |
1925 | 1874 | ||
1875 | i2c_del_adapter(&intel_sdvo->ddc); | ||
1926 | intel_encoder_destroy(encoder); | 1876 | intel_encoder_destroy(encoder); |
1927 | } | 1877 | } |
1928 | 1878 | ||
@@ -1990,54 +1940,39 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, | |||
1990 | intel_sdvo_guess_ddc_bus(sdvo); | 1940 | intel_sdvo_guess_ddc_bus(sdvo); |
1991 | } | 1941 | } |
1992 | 1942 | ||
1993 | static bool | 1943 | static void |
1994 | intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device) | 1944 | intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, |
1945 | struct intel_sdvo *sdvo, u32 reg) | ||
1995 | { | 1946 | { |
1996 | return intel_sdvo_set_target_output(intel_sdvo, | 1947 | struct sdvo_device_mapping *mapping; |
1997 | device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) && | 1948 | u8 pin, speed; |
1998 | intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, | ||
1999 | &intel_sdvo->is_hdmi, 1); | ||
2000 | } | ||
2001 | 1949 | ||
2002 | static struct intel_sdvo * | 1950 | if (IS_SDVOB(reg)) |
2003 | intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan) | 1951 | mapping = &dev_priv->sdvo_mappings[0]; |
2004 | { | 1952 | else |
2005 | struct drm_device *dev = chan->drm_dev; | 1953 | mapping = &dev_priv->sdvo_mappings[1]; |
2006 | struct drm_encoder *encoder; | ||
2007 | 1954 | ||
2008 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 1955 | pin = GMBUS_PORT_DPB; |
2009 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); | 1956 | speed = GMBUS_RATE_1MHZ >> 8; |
2010 | if (intel_sdvo->base.ddc_bus == &chan->adapter) | 1957 | if (mapping->initialized) { |
2011 | return intel_sdvo; | 1958 | pin = mapping->i2c_pin; |
1959 | speed = mapping->i2c_speed; | ||
2012 | } | 1960 | } |
2013 | 1961 | ||
2014 | return NULL; | 1962 | if (pin < GMBUS_NUM_PORTS) { |
1963 | sdvo->i2c = &dev_priv->gmbus[pin].adapter; | ||
1964 | intel_gmbus_set_speed(sdvo->i2c, speed); | ||
1965 | intel_gmbus_force_bit(sdvo->i2c, true); | ||
1966 | } else | ||
1967 | sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter; | ||
2015 | } | 1968 | } |
2016 | 1969 | ||
2017 | static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, | 1970 | static bool |
2018 | struct i2c_msg msgs[], int num) | 1971 | intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) |
2019 | { | 1972 | { |
2020 | struct intel_sdvo *intel_sdvo; | 1973 | return intel_sdvo_check_supp_encode(intel_sdvo); |
2021 | struct i2c_algo_bit_data *algo_data; | ||
2022 | const struct i2c_algorithm *algo; | ||
2023 | |||
2024 | algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; | ||
2025 | intel_sdvo = | ||
2026 | intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *) | ||
2027 | (algo_data->data)); | ||
2028 | if (intel_sdvo == NULL) | ||
2029 | return -EINVAL; | ||
2030 | |||
2031 | algo = intel_sdvo->base.i2c_bus->algo; | ||
2032 | |||
2033 | intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus); | ||
2034 | return algo->master_xfer(i2c_adap, msgs, num); | ||
2035 | } | 1974 | } |
2036 | 1975 | ||
2037 | static struct i2c_algorithm intel_sdvo_i2c_bit_algo = { | ||
2038 | .master_xfer = intel_sdvo_master_xfer, | ||
2039 | }; | ||
2040 | |||
2041 | static u8 | 1976 | static u8 |
2042 | intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) | 1977 | intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) |
2043 | { | 1978 | { |
@@ -2076,26 +2011,39 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) | |||
2076 | } | 2011 | } |
2077 | 2012 | ||
2078 | static void | 2013 | static void |
2079 | intel_sdvo_connector_init(struct drm_encoder *encoder, | 2014 | intel_sdvo_connector_init(struct intel_sdvo_connector *connector, |
2080 | struct drm_connector *connector) | 2015 | struct intel_sdvo *encoder) |
2081 | { | 2016 | { |
2082 | drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs, | 2017 | drm_connector_init(encoder->base.base.dev, |
2083 | connector->connector_type); | 2018 | &connector->base.base, |
2019 | &intel_sdvo_connector_funcs, | ||
2020 | connector->base.base.connector_type); | ||
2084 | 2021 | ||
2085 | drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); | 2022 | drm_connector_helper_add(&connector->base.base, |
2023 | &intel_sdvo_connector_helper_funcs); | ||
2086 | 2024 | ||
2087 | connector->interlace_allowed = 0; | 2025 | connector->base.base.interlace_allowed = 0; |
2088 | connector->doublescan_allowed = 0; | 2026 | connector->base.base.doublescan_allowed = 0; |
2089 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 2027 | connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; |
2090 | 2028 | ||
2091 | drm_mode_connector_attach_encoder(connector, encoder); | 2029 | intel_connector_attach_encoder(&connector->base, &encoder->base); |
2092 | drm_sysfs_connector_add(connector); | 2030 | drm_sysfs_connector_add(&connector->base.base); |
2031 | } | ||
2032 | |||
2033 | static void | ||
2034 | intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector) | ||
2035 | { | ||
2036 | struct drm_device *dev = connector->base.base.dev; | ||
2037 | |||
2038 | intel_attach_force_audio_property(&connector->base.base); | ||
2039 | if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) | ||
2040 | intel_attach_broadcast_rgb_property(&connector->base.base); | ||
2093 | } | 2041 | } |
2094 | 2042 | ||
2095 | static bool | 2043 | static bool |
2096 | intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | 2044 | intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) |
2097 | { | 2045 | { |
2098 | struct drm_encoder *encoder = &intel_sdvo->base.enc; | 2046 | struct drm_encoder *encoder = &intel_sdvo->base.base; |
2099 | struct drm_connector *connector; | 2047 | struct drm_connector *connector; |
2100 | struct intel_connector *intel_connector; | 2048 | struct intel_connector *intel_connector; |
2101 | struct intel_sdvo_connector *intel_sdvo_connector; | 2049 | struct intel_sdvo_connector *intel_sdvo_connector; |
@@ -2118,19 +2066,16 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2118 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; | 2066 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; |
2119 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | 2067 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; |
2120 | 2068 | ||
2121 | if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode) | 2069 | if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { |
2122 | && intel_sdvo_get_digital_encoding_mode(intel_sdvo, device) | ||
2123 | && intel_sdvo->is_hdmi) { | ||
2124 | /* enable hdmi encoding mode if supported */ | ||
2125 | intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); | ||
2126 | intel_sdvo_set_colorimetry(intel_sdvo, | ||
2127 | SDVO_COLORIMETRY_RGB256); | ||
2128 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; | 2070 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; |
2071 | intel_sdvo->is_hdmi = true; | ||
2129 | } | 2072 | } |
2130 | intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2073 | intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
2131 | (1 << INTEL_ANALOG_CLONE_BIT)); | 2074 | (1 << INTEL_ANALOG_CLONE_BIT)); |
2132 | 2075 | ||
2133 | intel_sdvo_connector_init(encoder, connector); | 2076 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); |
2077 | if (intel_sdvo->is_hdmi) | ||
2078 | intel_sdvo_add_hdmi_properties(intel_sdvo_connector); | ||
2134 | 2079 | ||
2135 | return true; | 2080 | return true; |
2136 | } | 2081 | } |
@@ -2138,36 +2083,36 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2138 | static bool | 2083 | static bool |
2139 | intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) | 2084 | intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) |
2140 | { | 2085 | { |
2141 | struct drm_encoder *encoder = &intel_sdvo->base.enc; | 2086 | struct drm_encoder *encoder = &intel_sdvo->base.base; |
2142 | struct drm_connector *connector; | 2087 | struct drm_connector *connector; |
2143 | struct intel_connector *intel_connector; | 2088 | struct intel_connector *intel_connector; |
2144 | struct intel_sdvo_connector *intel_sdvo_connector; | 2089 | struct intel_sdvo_connector *intel_sdvo_connector; |
2145 | 2090 | ||
2146 | intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); | 2091 | intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); |
2147 | if (!intel_sdvo_connector) | 2092 | if (!intel_sdvo_connector) |
2148 | return false; | 2093 | return false; |
2149 | 2094 | ||
2150 | intel_connector = &intel_sdvo_connector->base; | 2095 | intel_connector = &intel_sdvo_connector->base; |
2151 | connector = &intel_connector->base; | 2096 | connector = &intel_connector->base; |
2152 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; | 2097 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; |
2153 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | 2098 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; |
2154 | 2099 | ||
2155 | intel_sdvo->controlled_output |= type; | 2100 | intel_sdvo->controlled_output |= type; |
2156 | intel_sdvo_connector->output_flag = type; | 2101 | intel_sdvo_connector->output_flag = type; |
2157 | 2102 | ||
2158 | intel_sdvo->is_tv = true; | 2103 | intel_sdvo->is_tv = true; |
2159 | intel_sdvo->base.needs_tv_clock = true; | 2104 | intel_sdvo->base.needs_tv_clock = true; |
2160 | intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; | 2105 | intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; |
2161 | 2106 | ||
2162 | intel_sdvo_connector_init(encoder, connector); | 2107 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); |
2163 | 2108 | ||
2164 | if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) | 2109 | if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) |
2165 | goto err; | 2110 | goto err; |
2166 | 2111 | ||
2167 | if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) | 2112 | if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) |
2168 | goto err; | 2113 | goto err; |
2169 | 2114 | ||
2170 | return true; | 2115 | return true; |
2171 | 2116 | ||
2172 | err: | 2117 | err: |
2173 | intel_sdvo_destroy(connector); | 2118 | intel_sdvo_destroy(connector); |
@@ -2177,43 +2122,44 @@ err: | |||
2177 | static bool | 2122 | static bool |
2178 | intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) | 2123 | intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) |
2179 | { | 2124 | { |
2180 | struct drm_encoder *encoder = &intel_sdvo->base.enc; | 2125 | struct drm_encoder *encoder = &intel_sdvo->base.base; |
2181 | struct drm_connector *connector; | 2126 | struct drm_connector *connector; |
2182 | struct intel_connector *intel_connector; | 2127 | struct intel_connector *intel_connector; |
2183 | struct intel_sdvo_connector *intel_sdvo_connector; | 2128 | struct intel_sdvo_connector *intel_sdvo_connector; |
2184 | 2129 | ||
2185 | intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); | 2130 | intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); |
2186 | if (!intel_sdvo_connector) | 2131 | if (!intel_sdvo_connector) |
2187 | return false; | 2132 | return false; |
2188 | 2133 | ||
2189 | intel_connector = &intel_sdvo_connector->base; | 2134 | intel_connector = &intel_sdvo_connector->base; |
2190 | connector = &intel_connector->base; | 2135 | connector = &intel_connector->base; |
2191 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | 2136 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
2192 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | 2137 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; |
2193 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | 2138 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; |
2194 | 2139 | ||
2195 | if (device == 0) { | 2140 | if (device == 0) { |
2196 | intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; | 2141 | intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; |
2197 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; | 2142 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; |
2198 | } else if (device == 1) { | 2143 | } else if (device == 1) { |
2199 | intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; | 2144 | intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; |
2200 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; | 2145 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; |
2201 | } | 2146 | } |
2202 | 2147 | ||
2203 | intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2148 | intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
2204 | (1 << INTEL_ANALOG_CLONE_BIT)); | 2149 | (1 << INTEL_ANALOG_CLONE_BIT)); |
2205 | 2150 | ||
2206 | intel_sdvo_connector_init(encoder, connector); | 2151 | intel_sdvo_connector_init(intel_sdvo_connector, |
2207 | return true; | 2152 | intel_sdvo); |
2153 | return true; | ||
2208 | } | 2154 | } |
2209 | 2155 | ||
2210 | static bool | 2156 | static bool |
2211 | intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) | 2157 | intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) |
2212 | { | 2158 | { |
2213 | struct drm_encoder *encoder = &intel_sdvo->base.enc; | 2159 | struct drm_encoder *encoder = &intel_sdvo->base.base; |
2214 | struct drm_connector *connector; | 2160 | struct drm_connector *connector; |
2215 | struct intel_connector *intel_connector; | 2161 | struct intel_connector *intel_connector; |
2216 | struct intel_sdvo_connector *intel_sdvo_connector; | 2162 | struct intel_sdvo_connector *intel_sdvo_connector; |
2217 | 2163 | ||
2218 | intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); | 2164 | intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); |
2219 | if (!intel_sdvo_connector) | 2165 | if (!intel_sdvo_connector) |
@@ -2221,22 +2167,22 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) | |||
2221 | 2167 | ||
2222 | intel_connector = &intel_sdvo_connector->base; | 2168 | intel_connector = &intel_sdvo_connector->base; |
2223 | connector = &intel_connector->base; | 2169 | connector = &intel_connector->base; |
2224 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; | 2170 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; |
2225 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; | 2171 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; |
2226 | 2172 | ||
2227 | if (device == 0) { | 2173 | if (device == 0) { |
2228 | intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; | 2174 | intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; |
2229 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; | 2175 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; |
2230 | } else if (device == 1) { | 2176 | } else if (device == 1) { |
2231 | intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; | 2177 | intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; |
2232 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; | 2178 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; |
2233 | } | 2179 | } |
2234 | 2180 | ||
2235 | intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | | 2181 | intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | |
2236 | (1 << INTEL_SDVO_LVDS_CLONE_BIT)); | 2182 | (1 << INTEL_SDVO_LVDS_CLONE_BIT)); |
2237 | 2183 | ||
2238 | intel_sdvo_connector_init(encoder, connector); | 2184 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); |
2239 | if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) | 2185 | if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) |
2240 | goto err; | 2186 | goto err; |
2241 | 2187 | ||
2242 | return true; | 2188 | return true; |
@@ -2307,13 +2253,14 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, | |||
2307 | struct intel_sdvo_connector *intel_sdvo_connector, | 2253 | struct intel_sdvo_connector *intel_sdvo_connector, |
2308 | int type) | 2254 | int type) |
2309 | { | 2255 | { |
2310 | struct drm_device *dev = intel_sdvo->base.enc.dev; | 2256 | struct drm_device *dev = intel_sdvo->base.base.dev; |
2311 | struct intel_sdvo_tv_format format; | 2257 | struct intel_sdvo_tv_format format; |
2312 | uint32_t format_map, i; | 2258 | uint32_t format_map, i; |
2313 | 2259 | ||
2314 | if (!intel_sdvo_set_target_output(intel_sdvo, type)) | 2260 | if (!intel_sdvo_set_target_output(intel_sdvo, type)) |
2315 | return false; | 2261 | return false; |
2316 | 2262 | ||
2263 | BUILD_BUG_ON(sizeof(format) != 6); | ||
2317 | if (!intel_sdvo_get_value(intel_sdvo, | 2264 | if (!intel_sdvo_get_value(intel_sdvo, |
2318 | SDVO_CMD_GET_SUPPORTED_TV_FORMATS, | 2265 | SDVO_CMD_GET_SUPPORTED_TV_FORMATS, |
2319 | &format, sizeof(format))) | 2266 | &format, sizeof(format))) |
@@ -2373,7 +2320,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, | |||
2373 | struct intel_sdvo_connector *intel_sdvo_connector, | 2320 | struct intel_sdvo_connector *intel_sdvo_connector, |
2374 | struct intel_sdvo_enhancements_reply enhancements) | 2321 | struct intel_sdvo_enhancements_reply enhancements) |
2375 | { | 2322 | { |
2376 | struct drm_device *dev = intel_sdvo->base.enc.dev; | 2323 | struct drm_device *dev = intel_sdvo->base.base.dev; |
2377 | struct drm_connector *connector = &intel_sdvo_connector->base.base; | 2324 | struct drm_connector *connector = &intel_sdvo_connector->base.base; |
2378 | uint16_t response, data_value[2]; | 2325 | uint16_t response, data_value[2]; |
2379 | 2326 | ||
@@ -2502,7 +2449,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo, | |||
2502 | struct intel_sdvo_connector *intel_sdvo_connector, | 2449 | struct intel_sdvo_connector *intel_sdvo_connector, |
2503 | struct intel_sdvo_enhancements_reply enhancements) | 2450 | struct intel_sdvo_enhancements_reply enhancements) |
2504 | { | 2451 | { |
2505 | struct drm_device *dev = intel_sdvo->base.enc.dev; | 2452 | struct drm_device *dev = intel_sdvo->base.base.dev; |
2506 | struct drm_connector *connector = &intel_sdvo_connector->base.base; | 2453 | struct drm_connector *connector = &intel_sdvo_connector->base.base; |
2507 | uint16_t response, data_value[2]; | 2454 | uint16_t response, data_value[2]; |
2508 | 2455 | ||
@@ -2520,6 +2467,8 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, | |||
2520 | uint16_t response; | 2467 | uint16_t response; |
2521 | } enhancements; | 2468 | } enhancements; |
2522 | 2469 | ||
2470 | BUILD_BUG_ON(sizeof(enhancements) != 2); | ||
2471 | |||
2523 | enhancements.response = 0; | 2472 | enhancements.response = 0; |
2524 | intel_sdvo_get_value(intel_sdvo, | 2473 | intel_sdvo_get_value(intel_sdvo, |
2525 | SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, | 2474 | SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, |
@@ -2535,7 +2484,43 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, | |||
2535 | return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply); | 2484 | return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply); |
2536 | else | 2485 | else |
2537 | return true; | 2486 | return true; |
2487 | } | ||
2538 | 2488 | ||
2489 | static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter, | ||
2490 | struct i2c_msg *msgs, | ||
2491 | int num) | ||
2492 | { | ||
2493 | struct intel_sdvo *sdvo = adapter->algo_data; | ||
2494 | |||
2495 | if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus)) | ||
2496 | return -EIO; | ||
2497 | |||
2498 | return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num); | ||
2499 | } | ||
2500 | |||
2501 | static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter) | ||
2502 | { | ||
2503 | struct intel_sdvo *sdvo = adapter->algo_data; | ||
2504 | return sdvo->i2c->algo->functionality(sdvo->i2c); | ||
2505 | } | ||
2506 | |||
2507 | static const struct i2c_algorithm intel_sdvo_ddc_proxy = { | ||
2508 | .master_xfer = intel_sdvo_ddc_proxy_xfer, | ||
2509 | .functionality = intel_sdvo_ddc_proxy_func | ||
2510 | }; | ||
2511 | |||
2512 | static bool | ||
2513 | intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, | ||
2514 | struct drm_device *dev) | ||
2515 | { | ||
2516 | sdvo->ddc.owner = THIS_MODULE; | ||
2517 | sdvo->ddc.class = I2C_CLASS_DDC; | ||
2518 | snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy"); | ||
2519 | sdvo->ddc.dev.parent = &dev->pdev->dev; | ||
2520 | sdvo->ddc.algo_data = sdvo; | ||
2521 | sdvo->ddc.algo = &intel_sdvo_ddc_proxy; | ||
2522 | |||
2523 | return i2c_add_adapter(&sdvo->ddc) == 0; | ||
2539 | } | 2524 | } |
2540 | 2525 | ||
2541 | bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) | 2526 | bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) |
@@ -2543,95 +2528,64 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) | |||
2543 | struct drm_i915_private *dev_priv = dev->dev_private; | 2528 | struct drm_i915_private *dev_priv = dev->dev_private; |
2544 | struct intel_encoder *intel_encoder; | 2529 | struct intel_encoder *intel_encoder; |
2545 | struct intel_sdvo *intel_sdvo; | 2530 | struct intel_sdvo *intel_sdvo; |
2546 | u8 ch[0x40]; | ||
2547 | int i; | 2531 | int i; |
2548 | u32 i2c_reg, ddc_reg, analog_ddc_reg; | ||
2549 | 2532 | ||
2550 | intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); | 2533 | intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); |
2551 | if (!intel_sdvo) | 2534 | if (!intel_sdvo) |
2552 | return false; | 2535 | return false; |
2553 | 2536 | ||
2554 | intel_sdvo->sdvo_reg = sdvo_reg; | 2537 | intel_sdvo->sdvo_reg = sdvo_reg; |
2538 | intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1; | ||
2539 | intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); | ||
2540 | if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { | ||
2541 | kfree(intel_sdvo); | ||
2542 | return false; | ||
2543 | } | ||
2555 | 2544 | ||
2545 | /* encoder type will be decided later */ | ||
2556 | intel_encoder = &intel_sdvo->base; | 2546 | intel_encoder = &intel_sdvo->base; |
2557 | intel_encoder->type = INTEL_OUTPUT_SDVO; | 2547 | intel_encoder->type = INTEL_OUTPUT_SDVO; |
2558 | 2548 | drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); | |
2559 | if (HAS_PCH_SPLIT(dev)) { | ||
2560 | i2c_reg = PCH_GPIOE; | ||
2561 | ddc_reg = PCH_GPIOE; | ||
2562 | analog_ddc_reg = PCH_GPIOA; | ||
2563 | } else { | ||
2564 | i2c_reg = GPIOE; | ||
2565 | ddc_reg = GPIOE; | ||
2566 | analog_ddc_reg = GPIOA; | ||
2567 | } | ||
2568 | |||
2569 | /* setup the DDC bus. */ | ||
2570 | if (IS_SDVOB(sdvo_reg)) | ||
2571 | intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB"); | ||
2572 | else | ||
2573 | intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC"); | ||
2574 | |||
2575 | if (!intel_encoder->i2c_bus) | ||
2576 | goto err_inteloutput; | ||
2577 | |||
2578 | intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg); | ||
2579 | |||
2580 | /* Save the bit-banging i2c functionality for use by the DDC wrapper */ | ||
2581 | intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality; | ||
2582 | 2549 | ||
2583 | /* Read the regs to test if we can talk to the device */ | 2550 | /* Read the regs to test if we can talk to the device */ |
2584 | for (i = 0; i < 0x40; i++) { | 2551 | for (i = 0; i < 0x40; i++) { |
2585 | if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) { | 2552 | u8 byte; |
2553 | |||
2554 | if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) { | ||
2586 | DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", | 2555 | DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", |
2587 | IS_SDVOB(sdvo_reg) ? 'B' : 'C'); | 2556 | IS_SDVOB(sdvo_reg) ? 'B' : 'C'); |
2588 | goto err_i2c; | 2557 | goto err; |
2589 | } | 2558 | } |
2590 | } | 2559 | } |
2591 | 2560 | ||
2592 | /* setup the DDC bus. */ | 2561 | if (IS_SDVOB(sdvo_reg)) |
2593 | if (IS_SDVOB(sdvo_reg)) { | ||
2594 | intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS"); | ||
2595 | intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, | ||
2596 | "SDVOB/VGA DDC BUS"); | ||
2597 | dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; | 2562 | dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; |
2598 | } else { | 2563 | else |
2599 | intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS"); | ||
2600 | intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, | ||
2601 | "SDVOC/VGA DDC BUS"); | ||
2602 | dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; | 2564 | dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; |
2603 | } | ||
2604 | if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL) | ||
2605 | goto err_i2c; | ||
2606 | 2565 | ||
2607 | /* Wrap with our custom algo which switches to DDC mode */ | 2566 | drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); |
2608 | intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; | ||
2609 | |||
2610 | /* encoder type will be decided later */ | ||
2611 | drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0); | ||
2612 | drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); | ||
2613 | 2567 | ||
2614 | /* In default case sdvo lvds is false */ | 2568 | /* In default case sdvo lvds is false */ |
2615 | if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) | 2569 | if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) |
2616 | goto err_enc; | 2570 | goto err; |
2617 | 2571 | ||
2618 | if (intel_sdvo_output_setup(intel_sdvo, | 2572 | if (intel_sdvo_output_setup(intel_sdvo, |
2619 | intel_sdvo->caps.output_flags) != true) { | 2573 | intel_sdvo->caps.output_flags) != true) { |
2620 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", | 2574 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", |
2621 | IS_SDVOB(sdvo_reg) ? 'B' : 'C'); | 2575 | IS_SDVOB(sdvo_reg) ? 'B' : 'C'); |
2622 | goto err_enc; | 2576 | goto err; |
2623 | } | 2577 | } |
2624 | 2578 | ||
2625 | intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); | 2579 | intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); |
2626 | 2580 | ||
2627 | /* Set the input timing to the screen. Assume always input 0. */ | 2581 | /* Set the input timing to the screen. Assume always input 0. */ |
2628 | if (!intel_sdvo_set_target_input(intel_sdvo)) | 2582 | if (!intel_sdvo_set_target_input(intel_sdvo)) |
2629 | goto err_enc; | 2583 | goto err; |
2630 | 2584 | ||
2631 | if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, | 2585 | if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, |
2632 | &intel_sdvo->pixel_clock_min, | 2586 | &intel_sdvo->pixel_clock_min, |
2633 | &intel_sdvo->pixel_clock_max)) | 2587 | &intel_sdvo->pixel_clock_max)) |
2634 | goto err_enc; | 2588 | goto err; |
2635 | 2589 | ||
2636 | DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " | 2590 | DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " |
2637 | "clock range %dMHz - %dMHz, " | 2591 | "clock range %dMHz - %dMHz, " |
@@ -2651,16 +2605,9 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) | |||
2651 | (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); | 2605 | (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); |
2652 | return true; | 2606 | return true; |
2653 | 2607 | ||
2654 | err_enc: | 2608 | err: |
2655 | drm_encoder_cleanup(&intel_encoder->enc); | 2609 | drm_encoder_cleanup(&intel_encoder->base); |
2656 | err_i2c: | 2610 | i2c_del_adapter(&intel_sdvo->ddc); |
2657 | if (intel_sdvo->analog_ddc_bus != NULL) | ||
2658 | intel_i2c_destroy(intel_sdvo->analog_ddc_bus); | ||
2659 | if (intel_encoder->ddc_bus != NULL) | ||
2660 | intel_i2c_destroy(intel_encoder->ddc_bus); | ||
2661 | if (intel_encoder->i2c_bus != NULL) | ||
2662 | intel_i2c_destroy(intel_encoder->i2c_bus); | ||
2663 | err_inteloutput: | ||
2664 | kfree(intel_sdvo); | 2611 | kfree(intel_sdvo); |
2665 | 2612 | ||
2666 | return false; | 2613 | return false; |
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index a386b022e538..4f4e23bc2d16 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h | |||
@@ -230,7 +230,7 @@ struct intel_sdvo_set_target_input_args { | |||
230 | } __attribute__((packed)); | 230 | } __attribute__((packed)); |
231 | 231 | ||
232 | /** | 232 | /** |
233 | * Takes a struct intel_sdvo_output_flags of which outputs are targetted by | 233 | * Takes a struct intel_sdvo_output_flags of which outputs are targeted by |
234 | * future output commands. | 234 | * future output commands. |
235 | * | 235 | * |
236 | * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12], | 236 | * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12], |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 4a117e318a73..113e4e7264cd 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -48,7 +48,7 @@ struct intel_tv { | |||
48 | struct intel_encoder base; | 48 | struct intel_encoder base; |
49 | 49 | ||
50 | int type; | 50 | int type; |
51 | char *tv_format; | 51 | const char *tv_format; |
52 | int margin[4]; | 52 | int margin[4]; |
53 | u32 save_TV_H_CTL_1; | 53 | u32 save_TV_H_CTL_1; |
54 | u32 save_TV_H_CTL_2; | 54 | u32 save_TV_H_CTL_2; |
@@ -350,7 +350,7 @@ static const struct video_levels component_levels = { | |||
350 | 350 | ||
351 | 351 | ||
352 | struct tv_mode { | 352 | struct tv_mode { |
353 | char *name; | 353 | const char *name; |
354 | int clock; | 354 | int clock; |
355 | int refresh; /* in millihertz (for precision) */ | 355 | int refresh; /* in millihertz (for precision) */ |
356 | u32 oversample; | 356 | u32 oversample; |
@@ -900,7 +900,14 @@ static const struct tv_mode tv_modes[] = { | |||
900 | 900 | ||
901 | static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder) | 901 | static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder) |
902 | { | 902 | { |
903 | return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base); | 903 | return container_of(encoder, struct intel_tv, base.base); |
904 | } | ||
905 | |||
906 | static struct intel_tv *intel_attached_tv(struct drm_connector *connector) | ||
907 | { | ||
908 | return container_of(intel_attached_encoder(connector), | ||
909 | struct intel_tv, | ||
910 | base); | ||
904 | } | 911 | } |
905 | 912 | ||
906 | static void | 913 | static void |
@@ -922,7 +929,7 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode) | |||
922 | } | 929 | } |
923 | 930 | ||
924 | static const struct tv_mode * | 931 | static const struct tv_mode * |
925 | intel_tv_mode_lookup (char *tv_format) | 932 | intel_tv_mode_lookup(const char *tv_format) |
926 | { | 933 | { |
927 | int i; | 934 | int i; |
928 | 935 | ||
@@ -936,22 +943,23 @@ intel_tv_mode_lookup (char *tv_format) | |||
936 | } | 943 | } |
937 | 944 | ||
938 | static const struct tv_mode * | 945 | static const struct tv_mode * |
939 | intel_tv_mode_find (struct intel_tv *intel_tv) | 946 | intel_tv_mode_find(struct intel_tv *intel_tv) |
940 | { | 947 | { |
941 | return intel_tv_mode_lookup(intel_tv->tv_format); | 948 | return intel_tv_mode_lookup(intel_tv->tv_format); |
942 | } | 949 | } |
943 | 950 | ||
944 | static enum drm_mode_status | 951 | static enum drm_mode_status |
945 | intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) | 952 | intel_tv_mode_valid(struct drm_connector *connector, |
953 | struct drm_display_mode *mode) | ||
946 | { | 954 | { |
947 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 955 | struct intel_tv *intel_tv = intel_attached_tv(connector); |
948 | struct intel_tv *intel_tv = enc_to_intel_tv(encoder); | ||
949 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); | 956 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); |
950 | 957 | ||
951 | /* Ensure TV refresh is close to desired refresh */ | 958 | /* Ensure TV refresh is close to desired refresh */ |
952 | if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) | 959 | if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) |
953 | < 1000) | 960 | < 1000) |
954 | return MODE_OK; | 961 | return MODE_OK; |
962 | |||
955 | return MODE_CLOCK_RANGE; | 963 | return MODE_CLOCK_RANGE; |
956 | } | 964 | } |
957 | 965 | ||
@@ -998,6 +1006,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
998 | const struct video_levels *video_levels; | 1006 | const struct video_levels *video_levels; |
999 | const struct color_conversion *color_conversion; | 1007 | const struct color_conversion *color_conversion; |
1000 | bool burst_ena; | 1008 | bool burst_ena; |
1009 | int pipe = intel_crtc->pipe; | ||
1001 | 1010 | ||
1002 | if (!tv_mode) | 1011 | if (!tv_mode) |
1003 | return; /* can't happen (mode_prepare prevents this) */ | 1012 | return; /* can't happen (mode_prepare prevents this) */ |
@@ -1131,7 +1140,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1131 | color_conversion->av); | 1140 | color_conversion->av); |
1132 | } | 1141 | } |
1133 | 1142 | ||
1134 | if (IS_I965G(dev)) | 1143 | if (INTEL_INFO(dev)->gen >= 4) |
1135 | I915_WRITE(TV_CLR_KNOBS, 0x00404000); | 1144 | I915_WRITE(TV_CLR_KNOBS, 0x00404000); |
1136 | else | 1145 | else |
1137 | I915_WRITE(TV_CLR_KNOBS, 0x00606000); | 1146 | I915_WRITE(TV_CLR_KNOBS, 0x00606000); |
@@ -1141,14 +1150,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1141 | ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | | 1150 | ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | |
1142 | (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); | 1151 | (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); |
1143 | { | 1152 | { |
1144 | int pipeconf_reg = (intel_crtc->pipe == 0) ? | 1153 | int pipeconf_reg = PIPECONF(pipe); |
1145 | PIPEACONF : PIPEBCONF; | 1154 | int dspcntr_reg = DSPCNTR(intel_crtc->plane); |
1146 | int dspcntr_reg = (intel_crtc->plane == 0) ? | ||
1147 | DSPACNTR : DSPBCNTR; | ||
1148 | int pipeconf = I915_READ(pipeconf_reg); | 1155 | int pipeconf = I915_READ(pipeconf_reg); |
1149 | int dspcntr = I915_READ(dspcntr_reg); | 1156 | int dspcntr = I915_READ(dspcntr_reg); |
1150 | int dspbase_reg = (intel_crtc->plane == 0) ? | 1157 | int dspbase_reg = DSPADDR(intel_crtc->plane); |
1151 | DSPAADDR : DSPBADDR; | ||
1152 | int xpos = 0x0, ypos = 0x0; | 1158 | int xpos = 0x0, ypos = 0x0; |
1153 | unsigned int xsize, ysize; | 1159 | unsigned int xsize, ysize; |
1154 | /* Pipe must be off here */ | 1160 | /* Pipe must be off here */ |
@@ -1157,12 +1163,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1157 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | 1163 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); |
1158 | 1164 | ||
1159 | /* Wait for vblank for the disable to take effect */ | 1165 | /* Wait for vblank for the disable to take effect */ |
1160 | if (!IS_I9XX(dev)) | 1166 | if (IS_GEN2(dev)) |
1161 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1167 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
1162 | 1168 | ||
1163 | I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); | 1169 | I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE); |
1164 | /* Wait for vblank for the disable to take effect. */ | 1170 | /* Wait for vblank for the disable to take effect. */ |
1165 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1171 | intel_wait_for_pipe_off(dev, intel_crtc->pipe); |
1166 | 1172 | ||
1167 | /* Filter ctl must be set before TV_WIN_SIZE */ | 1173 | /* Filter ctl must be set before TV_WIN_SIZE */ |
1168 | I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE); | 1174 | I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE); |
@@ -1196,7 +1202,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1196 | I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]); | 1202 | I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]); |
1197 | for (i = 0; i < 43; i++) | 1203 | for (i = 0; i < 43; i++) |
1198 | I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]); | 1204 | I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]); |
1199 | I915_WRITE(TV_DAC, 0); | 1205 | I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE); |
1200 | I915_WRITE(TV_CTL, tv_ctl); | 1206 | I915_WRITE(TV_CTL, tv_ctl); |
1201 | } | 1207 | } |
1202 | 1208 | ||
@@ -1226,37 +1232,34 @@ static const struct drm_display_mode reported_modes[] = { | |||
1226 | * \return false if TV is disconnected. | 1232 | * \return false if TV is disconnected. |
1227 | */ | 1233 | */ |
1228 | static int | 1234 | static int |
1229 | intel_tv_detect_type (struct intel_tv *intel_tv) | 1235 | intel_tv_detect_type (struct intel_tv *intel_tv, |
1236 | struct drm_connector *connector) | ||
1230 | { | 1237 | { |
1231 | struct drm_encoder *encoder = &intel_tv->base.enc; | 1238 | struct drm_encoder *encoder = &intel_tv->base.base; |
1232 | struct drm_device *dev = encoder->dev; | 1239 | struct drm_device *dev = encoder->dev; |
1233 | struct drm_i915_private *dev_priv = dev->dev_private; | 1240 | struct drm_i915_private *dev_priv = dev->dev_private; |
1234 | unsigned long irqflags; | 1241 | unsigned long irqflags; |
1235 | u32 tv_ctl, save_tv_ctl; | 1242 | u32 tv_ctl, save_tv_ctl; |
1236 | u32 tv_dac, save_tv_dac; | 1243 | u32 tv_dac, save_tv_dac; |
1237 | int type = DRM_MODE_CONNECTOR_Unknown; | 1244 | int type; |
1238 | |||
1239 | tv_dac = I915_READ(TV_DAC); | ||
1240 | 1245 | ||
1241 | /* Disable TV interrupts around load detect or we'll recurse */ | 1246 | /* Disable TV interrupts around load detect or we'll recurse */ |
1242 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1247 | if (connector->polled & DRM_CONNECTOR_POLL_HPD) { |
1243 | i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | | 1248 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1244 | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); | 1249 | i915_disable_pipestat(dev_priv, 0, |
1245 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 1250 | PIPE_HOTPLUG_INTERRUPT_ENABLE | |
1251 | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); | ||
1252 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
1253 | } | ||
1246 | 1254 | ||
1247 | /* | 1255 | save_tv_dac = tv_dac = I915_READ(TV_DAC); |
1248 | * Detect TV by polling) | 1256 | save_tv_ctl = tv_ctl = I915_READ(TV_CTL); |
1249 | */ | 1257 | |
1250 | save_tv_dac = tv_dac; | 1258 | /* Poll for TV detection */ |
1251 | tv_ctl = I915_READ(TV_CTL); | 1259 | tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK); |
1252 | save_tv_ctl = tv_ctl; | ||
1253 | tv_ctl &= ~TV_ENC_ENABLE; | ||
1254 | tv_ctl &= ~TV_TEST_MODE_MASK; | ||
1255 | tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; | 1260 | tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; |
1256 | tv_dac &= ~TVDAC_SENSE_MASK; | 1261 | |
1257 | tv_dac &= ~DAC_A_MASK; | 1262 | tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK); |
1258 | tv_dac &= ~DAC_B_MASK; | ||
1259 | tv_dac &= ~DAC_C_MASK; | ||
1260 | tv_dac |= (TVDAC_STATE_CHG_EN | | 1263 | tv_dac |= (TVDAC_STATE_CHG_EN | |
1261 | TVDAC_A_SENSE_CTL | | 1264 | TVDAC_A_SENSE_CTL | |
1262 | TVDAC_B_SENSE_CTL | | 1265 | TVDAC_B_SENSE_CTL | |
@@ -1265,42 +1268,48 @@ intel_tv_detect_type (struct intel_tv *intel_tv) | |||
1265 | DAC_A_0_7_V | | 1268 | DAC_A_0_7_V | |
1266 | DAC_B_0_7_V | | 1269 | DAC_B_0_7_V | |
1267 | DAC_C_0_7_V); | 1270 | DAC_C_0_7_V); |
1271 | |||
1268 | I915_WRITE(TV_CTL, tv_ctl); | 1272 | I915_WRITE(TV_CTL, tv_ctl); |
1269 | I915_WRITE(TV_DAC, tv_dac); | 1273 | I915_WRITE(TV_DAC, tv_dac); |
1270 | POSTING_READ(TV_DAC); | 1274 | POSTING_READ(TV_DAC); |
1271 | msleep(20); | ||
1272 | 1275 | ||
1273 | tv_dac = I915_READ(TV_DAC); | 1276 | intel_wait_for_vblank(intel_tv->base.base.dev, |
1274 | I915_WRITE(TV_DAC, save_tv_dac); | 1277 | to_intel_crtc(intel_tv->base.base.crtc)->pipe); |
1275 | I915_WRITE(TV_CTL, save_tv_ctl); | ||
1276 | POSTING_READ(TV_CTL); | ||
1277 | msleep(20); | ||
1278 | 1278 | ||
1279 | /* | 1279 | type = -1; |
1280 | * A B C | 1280 | if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) { |
1281 | * 0 1 1 Composite | 1281 | DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac); |
1282 | * 1 0 X svideo | 1282 | /* |
1283 | * 0 0 0 Component | 1283 | * A B C |
1284 | */ | 1284 | * 0 1 1 Composite |
1285 | if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { | 1285 | * 1 0 X svideo |
1286 | DRM_DEBUG_KMS("Detected Composite TV connection\n"); | 1286 | * 0 0 0 Component |
1287 | type = DRM_MODE_CONNECTOR_Composite; | 1287 | */ |
1288 | } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { | 1288 | if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { |
1289 | DRM_DEBUG_KMS("Detected S-Video TV connection\n"); | 1289 | DRM_DEBUG_KMS("Detected Composite TV connection\n"); |
1290 | type = DRM_MODE_CONNECTOR_SVIDEO; | 1290 | type = DRM_MODE_CONNECTOR_Composite; |
1291 | } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { | 1291 | } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { |
1292 | DRM_DEBUG_KMS("Detected Component TV connection\n"); | 1292 | DRM_DEBUG_KMS("Detected S-Video TV connection\n"); |
1293 | type = DRM_MODE_CONNECTOR_Component; | 1293 | type = DRM_MODE_CONNECTOR_SVIDEO; |
1294 | } else { | 1294 | } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { |
1295 | DRM_DEBUG_KMS("No TV connection detected\n"); | 1295 | DRM_DEBUG_KMS("Detected Component TV connection\n"); |
1296 | type = -1; | 1296 | type = DRM_MODE_CONNECTOR_Component; |
1297 | } else { | ||
1298 | DRM_DEBUG_KMS("Unrecognised TV connection\n"); | ||
1299 | } | ||
1297 | } | 1300 | } |
1298 | 1301 | ||
1302 | I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); | ||
1303 | I915_WRITE(TV_CTL, save_tv_ctl); | ||
1304 | |||
1299 | /* Restore interrupt config */ | 1305 | /* Restore interrupt config */ |
1300 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1306 | if (connector->polled & DRM_CONNECTOR_POLL_HPD) { |
1301 | i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | | 1307 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1302 | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); | 1308 | i915_enable_pipestat(dev_priv, 0, |
1303 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 1309 | PIPE_HOTPLUG_INTERRUPT_ENABLE | |
1310 | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); | ||
1311 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
1312 | } | ||
1304 | 1313 | ||
1305 | return type; | 1314 | return type; |
1306 | } | 1315 | } |
@@ -1311,8 +1320,7 @@ intel_tv_detect_type (struct intel_tv *intel_tv) | |||
1311 | */ | 1320 | */ |
1312 | static void intel_tv_find_better_format(struct drm_connector *connector) | 1321 | static void intel_tv_find_better_format(struct drm_connector *connector) |
1313 | { | 1322 | { |
1314 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1323 | struct intel_tv *intel_tv = intel_attached_tv(connector); |
1315 | struct intel_tv *intel_tv = enc_to_intel_tv(encoder); | ||
1316 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); | 1324 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); |
1317 | int i; | 1325 | int i; |
1318 | 1326 | ||
@@ -1344,25 +1352,23 @@ static enum drm_connector_status | |||
1344 | intel_tv_detect(struct drm_connector *connector, bool force) | 1352 | intel_tv_detect(struct drm_connector *connector, bool force) |
1345 | { | 1353 | { |
1346 | struct drm_display_mode mode; | 1354 | struct drm_display_mode mode; |
1347 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1355 | struct intel_tv *intel_tv = intel_attached_tv(connector); |
1348 | struct intel_tv *intel_tv = enc_to_intel_tv(encoder); | ||
1349 | int type; | 1356 | int type; |
1350 | 1357 | ||
1351 | mode = reported_modes[0]; | 1358 | mode = reported_modes[0]; |
1352 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); | 1359 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); |
1353 | 1360 | ||
1354 | if (encoder->crtc && encoder->crtc->enabled) { | 1361 | if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { |
1355 | type = intel_tv_detect_type(intel_tv); | 1362 | type = intel_tv_detect_type(intel_tv, connector); |
1356 | } else if (force) { | 1363 | } else if (force) { |
1357 | struct drm_crtc *crtc; | 1364 | struct intel_load_detect_pipe tmp; |
1358 | int dpms_mode; | 1365 | |
1359 | 1366 | if (intel_get_load_detect_pipe(&intel_tv->base, connector, | |
1360 | crtc = intel_get_load_detect_pipe(&intel_tv->base, connector, | 1367 | &mode, &tmp)) { |
1361 | &mode, &dpms_mode); | 1368 | type = intel_tv_detect_type(intel_tv, connector); |
1362 | if (crtc) { | 1369 | intel_release_load_detect_pipe(&intel_tv->base, |
1363 | type = intel_tv_detect_type(intel_tv); | 1370 | connector, |
1364 | intel_release_load_detect_pipe(&intel_tv->base, connector, | 1371 | &tmp); |
1365 | dpms_mode); | ||
1366 | } else | 1372 | } else |
1367 | return connector_status_unknown; | 1373 | return connector_status_unknown; |
1368 | } else | 1374 | } else |
@@ -1371,15 +1377,16 @@ intel_tv_detect(struct drm_connector *connector, bool force) | |||
1371 | if (type < 0) | 1377 | if (type < 0) |
1372 | return connector_status_disconnected; | 1378 | return connector_status_disconnected; |
1373 | 1379 | ||
1380 | intel_tv->type = type; | ||
1374 | intel_tv_find_better_format(connector); | 1381 | intel_tv_find_better_format(connector); |
1382 | |||
1375 | return connector_status_connected; | 1383 | return connector_status_connected; |
1376 | } | 1384 | } |
1377 | 1385 | ||
1378 | static struct input_res { | 1386 | static const struct input_res { |
1379 | char *name; | 1387 | const char *name; |
1380 | int w, h; | 1388 | int w, h; |
1381 | } input_res_table[] = | 1389 | } input_res_table[] = { |
1382 | { | ||
1383 | {"640x480", 640, 480}, | 1390 | {"640x480", 640, 480}, |
1384 | {"800x600", 800, 600}, | 1391 | {"800x600", 800, 600}, |
1385 | {"1024x768", 1024, 768}, | 1392 | {"1024x768", 1024, 768}, |
@@ -1396,8 +1403,7 @@ static void | |||
1396 | intel_tv_chose_preferred_modes(struct drm_connector *connector, | 1403 | intel_tv_chose_preferred_modes(struct drm_connector *connector, |
1397 | struct drm_display_mode *mode_ptr) | 1404 | struct drm_display_mode *mode_ptr) |
1398 | { | 1405 | { |
1399 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1406 | struct intel_tv *intel_tv = intel_attached_tv(connector); |
1400 | struct intel_tv *intel_tv = enc_to_intel_tv(encoder); | ||
1401 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); | 1407 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); |
1402 | 1408 | ||
1403 | if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) | 1409 | if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) |
@@ -1422,15 +1428,14 @@ static int | |||
1422 | intel_tv_get_modes(struct drm_connector *connector) | 1428 | intel_tv_get_modes(struct drm_connector *connector) |
1423 | { | 1429 | { |
1424 | struct drm_display_mode *mode_ptr; | 1430 | struct drm_display_mode *mode_ptr; |
1425 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1431 | struct intel_tv *intel_tv = intel_attached_tv(connector); |
1426 | struct intel_tv *intel_tv = enc_to_intel_tv(encoder); | ||
1427 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); | 1432 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); |
1428 | int j, count = 0; | 1433 | int j, count = 0; |
1429 | u64 tmp; | 1434 | u64 tmp; |
1430 | 1435 | ||
1431 | for (j = 0; j < ARRAY_SIZE(input_res_table); | 1436 | for (j = 0; j < ARRAY_SIZE(input_res_table); |
1432 | j++) { | 1437 | j++) { |
1433 | struct input_res *input = &input_res_table[j]; | 1438 | const struct input_res *input = &input_res_table[j]; |
1434 | unsigned int hactive_s = input->w; | 1439 | unsigned int hactive_s = input->w; |
1435 | unsigned int vactive_s = input->h; | 1440 | unsigned int vactive_s = input->h; |
1436 | 1441 | ||
@@ -1488,9 +1493,8 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop | |||
1488 | uint64_t val) | 1493 | uint64_t val) |
1489 | { | 1494 | { |
1490 | struct drm_device *dev = connector->dev; | 1495 | struct drm_device *dev = connector->dev; |
1491 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1496 | struct intel_tv *intel_tv = intel_attached_tv(connector); |
1492 | struct intel_tv *intel_tv = enc_to_intel_tv(encoder); | 1497 | struct drm_crtc *crtc = intel_tv->base.base.crtc; |
1493 | struct drm_crtc *crtc = encoder->crtc; | ||
1494 | int ret = 0; | 1498 | int ret = 0; |
1495 | bool changed = false; | 1499 | bool changed = false; |
1496 | 1500 | ||
@@ -1555,7 +1559,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = { | |||
1555 | static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { | 1559 | static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { |
1556 | .mode_valid = intel_tv_mode_valid, | 1560 | .mode_valid = intel_tv_mode_valid, |
1557 | .get_modes = intel_tv_get_modes, | 1561 | .get_modes = intel_tv_get_modes, |
1558 | .best_encoder = intel_attached_encoder, | 1562 | .best_encoder = intel_best_encoder, |
1559 | }; | 1563 | }; |
1560 | 1564 | ||
1561 | static const struct drm_encoder_funcs intel_tv_enc_funcs = { | 1565 | static const struct drm_encoder_funcs intel_tv_enc_funcs = { |
@@ -1607,7 +1611,7 @@ intel_tv_init(struct drm_device *dev) | |||
1607 | struct intel_encoder *intel_encoder; | 1611 | struct intel_encoder *intel_encoder; |
1608 | struct intel_connector *intel_connector; | 1612 | struct intel_connector *intel_connector; |
1609 | u32 tv_dac_on, tv_dac_off, save_tv_dac; | 1613 | u32 tv_dac_on, tv_dac_off, save_tv_dac; |
1610 | char **tv_format_names; | 1614 | char *tv_format_names[ARRAY_SIZE(tv_modes)]; |
1611 | int i, initial_mode = 0; | 1615 | int i, initial_mode = 0; |
1612 | 1616 | ||
1613 | if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) | 1617 | if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) |
@@ -1658,18 +1662,29 @@ intel_tv_init(struct drm_device *dev) | |||
1658 | intel_encoder = &intel_tv->base; | 1662 | intel_encoder = &intel_tv->base; |
1659 | connector = &intel_connector->base; | 1663 | connector = &intel_connector->base; |
1660 | 1664 | ||
1665 | /* The documentation, for the older chipsets at least, recommend | ||
1666 | * using a polling method rather than hotplug detection for TVs. | ||
1667 | * This is because in order to perform the hotplug detection, the PLLs | ||
1668 | * for the TV must be kept alive increasing power drain and starving | ||
1669 | * bandwidth from other encoders. Notably for instance, it causes | ||
1670 | * pipe underruns on Crestline when this encoder is supposedly idle. | ||
1671 | * | ||
1672 | * More recent chipsets favour HDMI rather than integrated S-Video. | ||
1673 | */ | ||
1674 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
1675 | |||
1661 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, | 1676 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, |
1662 | DRM_MODE_CONNECTOR_SVIDEO); | 1677 | DRM_MODE_CONNECTOR_SVIDEO); |
1663 | 1678 | ||
1664 | drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs, | 1679 | drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, |
1665 | DRM_MODE_ENCODER_TVDAC); | 1680 | DRM_MODE_ENCODER_TVDAC); |
1666 | 1681 | ||
1667 | drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); | 1682 | intel_connector_attach_encoder(intel_connector, intel_encoder); |
1668 | intel_encoder->type = INTEL_OUTPUT_TVOUT; | 1683 | intel_encoder->type = INTEL_OUTPUT_TVOUT; |
1669 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 1684 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
1670 | intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); | 1685 | intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); |
1671 | intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1)); | 1686 | intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); |
1672 | intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); | 1687 | intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT); |
1673 | intel_tv->type = DRM_MODE_CONNECTOR_Unknown; | 1688 | intel_tv->type = DRM_MODE_CONNECTOR_Unknown; |
1674 | 1689 | ||
1675 | /* BIOS margin values */ | 1690 | /* BIOS margin values */ |
@@ -1678,21 +1693,19 @@ intel_tv_init(struct drm_device *dev) | |||
1678 | intel_tv->margin[TV_MARGIN_RIGHT] = 46; | 1693 | intel_tv->margin[TV_MARGIN_RIGHT] = 46; |
1679 | intel_tv->margin[TV_MARGIN_BOTTOM] = 37; | 1694 | intel_tv->margin[TV_MARGIN_BOTTOM] = 37; |
1680 | 1695 | ||
1681 | intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); | 1696 | intel_tv->tv_format = tv_modes[initial_mode].name; |
1682 | 1697 | ||
1683 | drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs); | 1698 | drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs); |
1684 | drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); | 1699 | drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); |
1685 | connector->interlace_allowed = false; | 1700 | connector->interlace_allowed = false; |
1686 | connector->doublescan_allowed = false; | 1701 | connector->doublescan_allowed = false; |
1687 | 1702 | ||
1688 | /* Create TV properties then attach current values */ | 1703 | /* Create TV properties then attach current values */ |
1689 | tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes), | ||
1690 | GFP_KERNEL); | ||
1691 | if (!tv_format_names) | ||
1692 | goto out; | ||
1693 | for (i = 0; i < ARRAY_SIZE(tv_modes); i++) | 1704 | for (i = 0; i < ARRAY_SIZE(tv_modes); i++) |
1694 | tv_format_names[i] = tv_modes[i].name; | 1705 | tv_format_names[i] = (char *)tv_modes[i].name; |
1695 | drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names); | 1706 | drm_mode_create_tv_properties(dev, |
1707 | ARRAY_SIZE(tv_modes), | ||
1708 | tv_format_names); | ||
1696 | 1709 | ||
1697 | drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, | 1710 | drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, |
1698 | initial_mode); | 1711 | initial_mode); |
@@ -1708,6 +1721,5 @@ intel_tv_init(struct drm_device *dev) | |||
1708 | drm_connector_attach_property(connector, | 1721 | drm_connector_attach_property(connector, |
1709 | dev->mode_config.tv_bottom_margin_property, | 1722 | dev->mode_config.tv_bottom_margin_property, |
1710 | intel_tv->margin[TV_MARGIN_BOTTOM]); | 1723 | intel_tv->margin[TV_MARGIN_BOTTOM]); |
1711 | out: | ||
1712 | drm_sysfs_connector_add(connector); | 1724 | drm_sysfs_connector_add(connector); |
1713 | } | 1725 | } |