aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 21:57:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 21:57:59 -0400
commitc48c43e422c1404fd72c57d1d21a6f6d01e18900 (patch)
tree48e5d3828b4f5479361986535f71a1ae44e4f3c1 /drivers/gpu/drm/i915
parent520045db940a381d2bee1c1b2179f7921b40fb10 (diff)
parent135cba0dc399fdd47bd3ae305c1db75fcd77243f (diff)
Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (476 commits) vmwgfx: Implement a proper GMR eviction mechanism drm/radeon/kms: fix r6xx/7xx 1D tiling CS checker v2 drm/radeon/kms: properly compute group_size on 6xx/7xx drm/radeon/kms: fix 2D tile height alignment in the r600 CS checker drm/radeon/kms/evergreen: set the clear state to the blit state drm/radeon/kms: don't poll dac load detect. gpu: Add Intel GMA500(Poulsbo) Stub Driver drm/radeon/kms: MC vram map needs to be >= pci aperture size drm/radeon/kms: implement display watermark support for evergreen drm/radeon/kms/evergreen: add some additional safe regs v2 drm/radeon/r600: fix tiling issues in CS checker. drm/i915: Move gpu_write_list to per-ring drm/i915: Invalidate the to-ring, flush the old-ring when updating domains drm/i915/ringbuffer: Write the value passed in to the tail register agp/intel: Restore valid PTE bit for Sandybridge after bdd3072 drm/i915: Fix flushing regression from 9af90d19f drm/i915/sdvo: Remove unused encoding member i915: enable AVI infoframe for intel_hdmi.c [v4] drm/i915: Fix current fb blocking for page flip drm/i915: IS_IRONLAKE is synonymous with gen == 5 ... Fix up conflicts in - drivers/gpu/drm/i915/{i915_gem.c, i915/intel_overlay.c}: due to the new simplified stack-based kmap_atomic() interface - drivers/gpu/drm/vmwgfx/vmwgfx_drv.c: added .llseek entry due to BKL removal cleanups.
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile4
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c66
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c10
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c336
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c360
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c214
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h271
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2209
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c148
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c72
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c54
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c259
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h335
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c28
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c286
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c234
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h6
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c127
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2357
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c658
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h160
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c69
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c29
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c193
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c484
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c435
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c16
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c (renamed from drivers/gpu/drm/i915/i915_opregion.c)181
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c1000
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c109
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c457
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h81
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1076
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c165
37 files changed, 7022 insertions, 5497 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 5c8e53458edb..fdc833d5cc7b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -26,15 +26,17 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
26 intel_dvo.o \ 26 intel_dvo.o \
27 intel_ringbuffer.o \ 27 intel_ringbuffer.o \
28 intel_overlay.o \ 28 intel_overlay.o \
29 intel_opregion.o \
29 dvo_ch7xxx.o \ 30 dvo_ch7xxx.o \
30 dvo_ch7017.o \ 31 dvo_ch7017.o \
31 dvo_ivch.o \ 32 dvo_ivch.o \
32 dvo_tfp410.o \ 33 dvo_tfp410.o \
33 dvo_sil164.o 34 dvo_sil164.o
34 35
35i915-$(CONFIG_ACPI) += i915_opregion.o
36i915-$(CONFIG_COMPAT) += i915_ioc32.o 36i915-$(CONFIG_COMPAT) += i915_ioc32.o
37 37
38i915-$(CONFIG_ACPI) += intel_acpi.o
39
38obj-$(CONFIG_DRM_I915) += i915.o 40obj-$(CONFIG_DRM_I915) += i915.o
39 41
40CFLAGS_i915_trace_points.o := -I$(src) 42CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 14d59804acd7..af70337567ce 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -165,67 +165,44 @@ struct ch7017_priv {
165static void ch7017_dump_regs(struct intel_dvo_device *dvo); 165static void ch7017_dump_regs(struct intel_dvo_device *dvo);
166static void ch7017_dpms(struct intel_dvo_device *dvo, int mode); 166static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
167 167
168static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) 168static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
169{ 169{
170 struct i2c_adapter *adapter = dvo->i2c_bus;
171 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
172 u8 out_buf[2];
173 u8 in_buf[2];
174
175 struct i2c_msg msgs[] = { 170 struct i2c_msg msgs[] = {
176 { 171 {
177 .addr = dvo->slave_addr, 172 .addr = dvo->slave_addr,
178 .flags = 0, 173 .flags = 0,
179 .len = 1, 174 .len = 1,
180 .buf = out_buf, 175 .buf = &addr,
181 }, 176 },
182 { 177 {
183 .addr = dvo->slave_addr, 178 .addr = dvo->slave_addr,
184 .flags = I2C_M_RD, 179 .flags = I2C_M_RD,
185 .len = 1, 180 .len = 1,
186 .buf = in_buf, 181 .buf = val,
187 } 182 }
188 }; 183 };
189 184 return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2;
190 out_buf[0] = addr;
191 out_buf[1] = 0;
192
193 if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
194 *val= in_buf[0];
195 return true;
196 };
197
198 return false;
199} 185}
200 186
201static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) 187static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
202{ 188{
203 struct i2c_adapter *adapter = dvo->i2c_bus; 189 uint8_t buf[2] = { addr, val };
204 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
205 uint8_t out_buf[2];
206 struct i2c_msg msg = { 190 struct i2c_msg msg = {
207 .addr = dvo->slave_addr, 191 .addr = dvo->slave_addr,
208 .flags = 0, 192 .flags = 0,
209 .len = 2, 193 .len = 2,
210 .buf = out_buf, 194 .buf = buf,
211 }; 195 };
212 196 return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1;
213 out_buf[0] = addr;
214 out_buf[1] = val;
215
216 if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
217 return true;
218
219 return false;
220} 197}
221 198
222/** Probes for a CH7017 on the given bus and slave address. */ 199/** Probes for a CH7017 on the given bus and slave address. */
223static bool ch7017_init(struct intel_dvo_device *dvo, 200static bool ch7017_init(struct intel_dvo_device *dvo,
224 struct i2c_adapter *adapter) 201 struct i2c_adapter *adapter)
225{ 202{
226 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
227 struct ch7017_priv *priv; 203 struct ch7017_priv *priv;
228 uint8_t val; 204 const char *str;
205 u8 val;
229 206
230 priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL); 207 priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
231 if (priv == NULL) 208 if (priv == NULL)
@@ -237,16 +214,27 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
237 if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) 214 if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
238 goto fail; 215 goto fail;
239 216
240 if (val != CH7017_DEVICE_ID_VALUE && 217 switch (val) {
241 val != CH7018_DEVICE_ID_VALUE && 218 case CH7017_DEVICE_ID_VALUE:
242 val != CH7019_DEVICE_ID_VALUE) { 219 str = "ch7017";
220 break;
221 case CH7018_DEVICE_ID_VALUE:
222 str = "ch7018";
223 break;
224 case CH7019_DEVICE_ID_VALUE:
225 str = "ch7019";
226 break;
227 default:
243 DRM_DEBUG_KMS("ch701x not detected, got %d: from %s " 228 DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
244 "Slave %d.\n", 229 "slave %d.\n",
245 val, i2cbus->adapter.name,dvo->slave_addr); 230 val, adapter->name,dvo->slave_addr);
246 goto fail; 231 goto fail;
247 } 232 }
248 233
234 DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
235 str, adapter->name, dvo->slave_addr);
249 return true; 236 return true;
237
250fail: 238fail:
251 kfree(priv); 239 kfree(priv);
252 return false; 240 return false;
@@ -368,7 +356,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
368 } 356 }
369 357
370 /* XXX: Should actually wait for update power status somehow */ 358 /* XXX: Should actually wait for update power status somehow */
371 udelay(20000); 359 msleep(20);
372} 360}
373 361
374static void ch7017_dump_regs(struct intel_dvo_device *dvo) 362static void ch7017_dump_regs(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 6f1944b24441..7eaa94e4ff06 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -113,7 +113,6 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
113{ 113{
114 struct ch7xxx_priv *ch7xxx= dvo->dev_priv; 114 struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
115 struct i2c_adapter *adapter = dvo->i2c_bus; 115 struct i2c_adapter *adapter = dvo->i2c_bus;
116 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
117 u8 out_buf[2]; 116 u8 out_buf[2];
118 u8 in_buf[2]; 117 u8 in_buf[2];
119 118
@@ -135,14 +134,14 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
135 out_buf[0] = addr; 134 out_buf[0] = addr;
136 out_buf[1] = 0; 135 out_buf[1] = 0;
137 136
138 if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { 137 if (i2c_transfer(adapter, msgs, 2) == 2) {
139 *ch = in_buf[0]; 138 *ch = in_buf[0];
140 return true; 139 return true;
141 }; 140 };
142 141
143 if (!ch7xxx->quiet) { 142 if (!ch7xxx->quiet) {
144 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", 143 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
145 addr, i2cbus->adapter.name, dvo->slave_addr); 144 addr, adapter->name, dvo->slave_addr);
146 } 145 }
147 return false; 146 return false;
148} 147}
@@ -152,7 +151,6 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
152{ 151{
153 struct ch7xxx_priv *ch7xxx = dvo->dev_priv; 152 struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
154 struct i2c_adapter *adapter = dvo->i2c_bus; 153 struct i2c_adapter *adapter = dvo->i2c_bus;
155 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
156 uint8_t out_buf[2]; 154 uint8_t out_buf[2];
157 struct i2c_msg msg = { 155 struct i2c_msg msg = {
158 .addr = dvo->slave_addr, 156 .addr = dvo->slave_addr,
@@ -164,12 +162,12 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
164 out_buf[0] = addr; 162 out_buf[0] = addr;
165 out_buf[1] = ch; 163 out_buf[1] = ch;
166 164
167 if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) 165 if (i2c_transfer(adapter, &msg, 1) == 1)
168 return true; 166 return true;
169 167
170 if (!ch7xxx->quiet) { 168 if (!ch7xxx->quiet) {
171 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", 169 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
172 addr, i2cbus->adapter.name, dvo->slave_addr); 170 addr, adapter->name, dvo->slave_addr);
173 } 171 }
174 172
175 return false; 173 return false;
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index a2ec3f487202..a12ed9414cc7 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -167,7 +167,6 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
167{ 167{
168 struct ivch_priv *priv = dvo->dev_priv; 168 struct ivch_priv *priv = dvo->dev_priv;
169 struct i2c_adapter *adapter = dvo->i2c_bus; 169 struct i2c_adapter *adapter = dvo->i2c_bus;
170 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
171 u8 out_buf[1]; 170 u8 out_buf[1];
172 u8 in_buf[2]; 171 u8 in_buf[2];
173 172
@@ -193,7 +192,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
193 192
194 out_buf[0] = addr; 193 out_buf[0] = addr;
195 194
196 if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) { 195 if (i2c_transfer(adapter, msgs, 3) == 3) {
197 *data = (in_buf[1] << 8) | in_buf[0]; 196 *data = (in_buf[1] << 8) | in_buf[0];
198 return true; 197 return true;
199 }; 198 };
@@ -201,7 +200,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
201 if (!priv->quiet) { 200 if (!priv->quiet) {
202 DRM_DEBUG_KMS("Unable to read register 0x%02x from " 201 DRM_DEBUG_KMS("Unable to read register 0x%02x from "
203 "%s:%02x.\n", 202 "%s:%02x.\n",
204 addr, i2cbus->adapter.name, dvo->slave_addr); 203 addr, adapter->name, dvo->slave_addr);
205 } 204 }
206 return false; 205 return false;
207} 206}
@@ -211,7 +210,6 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
211{ 210{
212 struct ivch_priv *priv = dvo->dev_priv; 211 struct ivch_priv *priv = dvo->dev_priv;
213 struct i2c_adapter *adapter = dvo->i2c_bus; 212 struct i2c_adapter *adapter = dvo->i2c_bus;
214 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
215 u8 out_buf[3]; 213 u8 out_buf[3];
216 struct i2c_msg msg = { 214 struct i2c_msg msg = {
217 .addr = dvo->slave_addr, 215 .addr = dvo->slave_addr,
@@ -224,12 +222,12 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
224 out_buf[1] = data & 0xff; 222 out_buf[1] = data & 0xff;
225 out_buf[2] = data >> 8; 223 out_buf[2] = data >> 8;
226 224
227 if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) 225 if (i2c_transfer(adapter, &msg, 1) == 1)
228 return true; 226 return true;
229 227
230 if (!priv->quiet) { 228 if (!priv->quiet) {
231 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", 229 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
232 addr, i2cbus->adapter.name, dvo->slave_addr); 230 addr, adapter->name, dvo->slave_addr);
233 } 231 }
234 232
235 return false; 233 return false;
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 9b8e6765cf26..e4b4091df942 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -69,7 +69,6 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
69{ 69{
70 struct sil164_priv *sil = dvo->dev_priv; 70 struct sil164_priv *sil = dvo->dev_priv;
71 struct i2c_adapter *adapter = dvo->i2c_bus; 71 struct i2c_adapter *adapter = dvo->i2c_bus;
72 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
73 u8 out_buf[2]; 72 u8 out_buf[2];
74 u8 in_buf[2]; 73 u8 in_buf[2];
75 74
@@ -91,14 +90,14 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
91 out_buf[0] = addr; 90 out_buf[0] = addr;
92 out_buf[1] = 0; 91 out_buf[1] = 0;
93 92
94 if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { 93 if (i2c_transfer(adapter, msgs, 2) == 2) {
95 *ch = in_buf[0]; 94 *ch = in_buf[0];
96 return true; 95 return true;
97 }; 96 };
98 97
99 if (!sil->quiet) { 98 if (!sil->quiet) {
100 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", 99 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
101 addr, i2cbus->adapter.name, dvo->slave_addr); 100 addr, adapter->name, dvo->slave_addr);
102 } 101 }
103 return false; 102 return false;
104} 103}
@@ -107,7 +106,6 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
107{ 106{
108 struct sil164_priv *sil= dvo->dev_priv; 107 struct sil164_priv *sil= dvo->dev_priv;
109 struct i2c_adapter *adapter = dvo->i2c_bus; 108 struct i2c_adapter *adapter = dvo->i2c_bus;
110 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
111 uint8_t out_buf[2]; 109 uint8_t out_buf[2];
112 struct i2c_msg msg = { 110 struct i2c_msg msg = {
113 .addr = dvo->slave_addr, 111 .addr = dvo->slave_addr,
@@ -119,12 +117,12 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
119 out_buf[0] = addr; 117 out_buf[0] = addr;
120 out_buf[1] = ch; 118 out_buf[1] = ch;
121 119
122 if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) 120 if (i2c_transfer(adapter, &msg, 1) == 1)
123 return true; 121 return true;
124 122
125 if (!sil->quiet) { 123 if (!sil->quiet) {
126 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", 124 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
127 addr, i2cbus->adapter.name, dvo->slave_addr); 125 addr, adapter->name, dvo->slave_addr);
128 } 126 }
129 127
130 return false; 128 return false;
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 56f66426207f..8ab2855bb544 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -94,7 +94,6 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
94{ 94{
95 struct tfp410_priv *tfp = dvo->dev_priv; 95 struct tfp410_priv *tfp = dvo->dev_priv;
96 struct i2c_adapter *adapter = dvo->i2c_bus; 96 struct i2c_adapter *adapter = dvo->i2c_bus;
97 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
98 u8 out_buf[2]; 97 u8 out_buf[2];
99 u8 in_buf[2]; 98 u8 in_buf[2];
100 99
@@ -116,14 +115,14 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
116 out_buf[0] = addr; 115 out_buf[0] = addr;
117 out_buf[1] = 0; 116 out_buf[1] = 0;
118 117
119 if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { 118 if (i2c_transfer(adapter, msgs, 2) == 2) {
120 *ch = in_buf[0]; 119 *ch = in_buf[0];
121 return true; 120 return true;
122 }; 121 };
123 122
124 if (!tfp->quiet) { 123 if (!tfp->quiet) {
125 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", 124 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
126 addr, i2cbus->adapter.name, dvo->slave_addr); 125 addr, adapter->name, dvo->slave_addr);
127 } 126 }
128 return false; 127 return false;
129} 128}
@@ -132,7 +131,6 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
132{ 131{
133 struct tfp410_priv *tfp = dvo->dev_priv; 132 struct tfp410_priv *tfp = dvo->dev_priv;
134 struct i2c_adapter *adapter = dvo->i2c_bus; 133 struct i2c_adapter *adapter = dvo->i2c_bus;
135 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
136 uint8_t out_buf[2]; 134 uint8_t out_buf[2];
137 struct i2c_msg msg = { 135 struct i2c_msg msg = {
138 .addr = dvo->slave_addr, 136 .addr = dvo->slave_addr,
@@ -144,12 +142,12 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
144 out_buf[0] = addr; 142 out_buf[0] = addr;
145 out_buf[1] = ch; 143 out_buf[1] = ch;
146 144
147 if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) 145 if (i2c_transfer(adapter, &msg, 1) == 1)
148 return true; 146 return true;
149 147
150 if (!tfp->quiet) { 148 if (!tfp->quiet) {
151 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", 149 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
152 addr, i2cbus->adapter.name, dvo->slave_addr); 150 addr, adapter->name, dvo->slave_addr);
153 } 151 }
154 152
155 return false; 153 return false;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 048149748fdc..1f4f3ceb63c7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -40,9 +40,51 @@
40 40
41#if defined(CONFIG_DEBUG_FS) 41#if defined(CONFIG_DEBUG_FS)
42 42
43#define ACTIVE_LIST 1 43enum {
44#define FLUSHING_LIST 2 44 ACTIVE_LIST,
45#define INACTIVE_LIST 3 45 FLUSHING_LIST,
46 INACTIVE_LIST,
47 PINNED_LIST,
48 DEFERRED_FREE_LIST,
49};
50
51static const char *yesno(int v)
52{
53 return v ? "yes" : "no";
54}
55
56static int i915_capabilities(struct seq_file *m, void *data)
57{
58 struct drm_info_node *node = (struct drm_info_node *) m->private;
59 struct drm_device *dev = node->minor->dev;
60 const struct intel_device_info *info = INTEL_INFO(dev);
61
62 seq_printf(m, "gen: %d\n", info->gen);
63#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
64 B(is_mobile);
65 B(is_i85x);
66 B(is_i915g);
67 B(is_i945gm);
68 B(is_g33);
69 B(need_gfx_hws);
70 B(is_g4x);
71 B(is_pineview);
72 B(is_broadwater);
73 B(is_crestline);
74 B(has_fbc);
75 B(has_rc6);
76 B(has_pipe_cxsr);
77 B(has_hotplug);
78 B(cursor_needs_physical);
79 B(has_overlay);
80 B(overlay_needs_physical);
81 B(supports_tv);
82 B(has_bsd_ring);
83 B(has_blt_ring);
84#undef B
85
86 return 0;
87}
46 88
47static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv) 89static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
48{ 90{
@@ -64,6 +106,29 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
64 } 106 }
65} 107}
66 108
109static void
110describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
111{
112 seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
113 &obj->base,
114 get_pin_flag(obj),
115 get_tiling_flag(obj),
116 obj->base.size,
117 obj->base.read_domains,
118 obj->base.write_domain,
119 obj->last_rendering_seqno,
120 obj->dirty ? " dirty" : "",
121 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
122 if (obj->base.name)
123 seq_printf(m, " (name: %d)", obj->base.name);
124 if (obj->fence_reg != I915_FENCE_REG_NONE)
125 seq_printf(m, " (fence: %d)", obj->fence_reg);
126 if (obj->gtt_space != NULL)
127 seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
128 if (obj->ring != NULL)
129 seq_printf(m, " (%s)", obj->ring->name);
130}
131
67static int i915_gem_object_list_info(struct seq_file *m, void *data) 132static int i915_gem_object_list_info(struct seq_file *m, void *data)
68{ 133{
69 struct drm_info_node *node = (struct drm_info_node *) m->private; 134 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -72,56 +137,80 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
72 struct drm_device *dev = node->minor->dev; 137 struct drm_device *dev = node->minor->dev;
73 drm_i915_private_t *dev_priv = dev->dev_private; 138 drm_i915_private_t *dev_priv = dev->dev_private;
74 struct drm_i915_gem_object *obj_priv; 139 struct drm_i915_gem_object *obj_priv;
75 spinlock_t *lock = NULL; 140 size_t total_obj_size, total_gtt_size;
141 int count, ret;
142
143 ret = mutex_lock_interruptible(&dev->struct_mutex);
144 if (ret)
145 return ret;
76 146
77 switch (list) { 147 switch (list) {
78 case ACTIVE_LIST: 148 case ACTIVE_LIST:
79 seq_printf(m, "Active:\n"); 149 seq_printf(m, "Active:\n");
80 lock = &dev_priv->mm.active_list_lock; 150 head = &dev_priv->mm.active_list;
81 head = &dev_priv->render_ring.active_list;
82 break; 151 break;
83 case INACTIVE_LIST: 152 case INACTIVE_LIST:
84 seq_printf(m, "Inactive:\n"); 153 seq_printf(m, "Inactive:\n");
85 head = &dev_priv->mm.inactive_list; 154 head = &dev_priv->mm.inactive_list;
86 break; 155 break;
156 case PINNED_LIST:
157 seq_printf(m, "Pinned:\n");
158 head = &dev_priv->mm.pinned_list;
159 break;
87 case FLUSHING_LIST: 160 case FLUSHING_LIST:
88 seq_printf(m, "Flushing:\n"); 161 seq_printf(m, "Flushing:\n");
89 head = &dev_priv->mm.flushing_list; 162 head = &dev_priv->mm.flushing_list;
90 break; 163 break;
164 case DEFERRED_FREE_LIST:
165 seq_printf(m, "Deferred free:\n");
166 head = &dev_priv->mm.deferred_free_list;
167 break;
91 default: 168 default:
92 DRM_INFO("Ooops, unexpected list\n"); 169 mutex_unlock(&dev->struct_mutex);
93 return 0; 170 return -EINVAL;
94 } 171 }
95 172
96 if (lock) 173 total_obj_size = total_gtt_size = count = 0;
97 spin_lock(lock); 174 list_for_each_entry(obj_priv, head, mm_list) {
98 list_for_each_entry(obj_priv, head, list) 175 seq_printf(m, " ");
99 { 176 describe_obj(m, obj_priv);
100 seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
101 &obj_priv->base,
102 get_pin_flag(obj_priv),
103 obj_priv->base.size,
104 obj_priv->base.read_domains,
105 obj_priv->base.write_domain,
106 obj_priv->last_rendering_seqno,
107 obj_priv->dirty ? " dirty" : "",
108 obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
109
110 if (obj_priv->base.name)
111 seq_printf(m, " (name: %d)", obj_priv->base.name);
112 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
113 seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
114 if (obj_priv->gtt_space != NULL)
115 seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
116
117 seq_printf(m, "\n"); 177 seq_printf(m, "\n");
178 total_obj_size += obj_priv->base.size;
179 total_gtt_size += obj_priv->gtt_space->size;
180 count++;
118 } 181 }
182 mutex_unlock(&dev->struct_mutex);
119 183
120 if (lock) 184 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
121 spin_unlock(lock); 185 count, total_obj_size, total_gtt_size);
122 return 0; 186 return 0;
123} 187}
124 188
189static int i915_gem_object_info(struct seq_file *m, void* data)
190{
191 struct drm_info_node *node = (struct drm_info_node *) m->private;
192 struct drm_device *dev = node->minor->dev;
193 struct drm_i915_private *dev_priv = dev->dev_private;
194 int ret;
195
196 ret = mutex_lock_interruptible(&dev->struct_mutex);
197 if (ret)
198 return ret;
199
200 seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
201 seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
202 seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
203 seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
204 seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
205 seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
206 seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
207
208 mutex_unlock(&dev->struct_mutex);
209
210 return 0;
211}
212
213
125static int i915_gem_pageflip_info(struct seq_file *m, void *data) 214static int i915_gem_pageflip_info(struct seq_file *m, void *data)
126{ 215{
127 struct drm_info_node *node = (struct drm_info_node *) m->private; 216 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -176,6 +265,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
176 struct drm_device *dev = node->minor->dev; 265 struct drm_device *dev = node->minor->dev;
177 drm_i915_private_t *dev_priv = dev->dev_private; 266 drm_i915_private_t *dev_priv = dev->dev_private;
178 struct drm_i915_gem_request *gem_request; 267 struct drm_i915_gem_request *gem_request;
268 int ret;
269
270 ret = mutex_lock_interruptible(&dev->struct_mutex);
271 if (ret)
272 return ret;
179 273
180 seq_printf(m, "Request:\n"); 274 seq_printf(m, "Request:\n");
181 list_for_each_entry(gem_request, &dev_priv->render_ring.request_list, 275 list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
@@ -184,6 +278,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
184 gem_request->seqno, 278 gem_request->seqno,
185 (int) (jiffies - gem_request->emitted_jiffies)); 279 (int) (jiffies - gem_request->emitted_jiffies));
186 } 280 }
281 mutex_unlock(&dev->struct_mutex);
282
187 return 0; 283 return 0;
188} 284}
189 285
@@ -192,16 +288,24 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
192 struct drm_info_node *node = (struct drm_info_node *) m->private; 288 struct drm_info_node *node = (struct drm_info_node *) m->private;
193 struct drm_device *dev = node->minor->dev; 289 struct drm_device *dev = node->minor->dev;
194 drm_i915_private_t *dev_priv = dev->dev_private; 290 drm_i915_private_t *dev_priv = dev->dev_private;
291 int ret;
292
293 ret = mutex_lock_interruptible(&dev->struct_mutex);
294 if (ret)
295 return ret;
195 296
196 if (dev_priv->render_ring.status_page.page_addr != NULL) { 297 if (dev_priv->render_ring.status_page.page_addr != NULL) {
197 seq_printf(m, "Current sequence: %d\n", 298 seq_printf(m, "Current sequence: %d\n",
198 i915_get_gem_seqno(dev, &dev_priv->render_ring)); 299 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
199 } else { 300 } else {
200 seq_printf(m, "Current sequence: hws uninitialized\n"); 301 seq_printf(m, "Current sequence: hws uninitialized\n");
201 } 302 }
202 seq_printf(m, "Waiter sequence: %d\n", 303 seq_printf(m, "Waiter sequence: %d\n",
203 dev_priv->mm.waiting_gem_seqno); 304 dev_priv->mm.waiting_gem_seqno);
204 seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); 305 seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
306
307 mutex_unlock(&dev->struct_mutex);
308
205 return 0; 309 return 0;
206} 310}
207 311
@@ -211,6 +315,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
211 struct drm_info_node *node = (struct drm_info_node *) m->private; 315 struct drm_info_node *node = (struct drm_info_node *) m->private;
212 struct drm_device *dev = node->minor->dev; 316 struct drm_device *dev = node->minor->dev;
213 drm_i915_private_t *dev_priv = dev->dev_private; 317 drm_i915_private_t *dev_priv = dev->dev_private;
318 int ret;
319
320 ret = mutex_lock_interruptible(&dev->struct_mutex);
321 if (ret)
322 return ret;
214 323
215 if (!HAS_PCH_SPLIT(dev)) { 324 if (!HAS_PCH_SPLIT(dev)) {
216 seq_printf(m, "Interrupt enable: %08x\n", 325 seq_printf(m, "Interrupt enable: %08x\n",
@@ -247,7 +356,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
247 atomic_read(&dev_priv->irq_received)); 356 atomic_read(&dev_priv->irq_received));
248 if (dev_priv->render_ring.status_page.page_addr != NULL) { 357 if (dev_priv->render_ring.status_page.page_addr != NULL) {
249 seq_printf(m, "Current sequence: %d\n", 358 seq_printf(m, "Current sequence: %d\n",
250 i915_get_gem_seqno(dev, &dev_priv->render_ring)); 359 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
251 } else { 360 } else {
252 seq_printf(m, "Current sequence: hws uninitialized\n"); 361 seq_printf(m, "Current sequence: hws uninitialized\n");
253 } 362 }
@@ -255,6 +364,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
255 dev_priv->mm.waiting_gem_seqno); 364 dev_priv->mm.waiting_gem_seqno);
256 seq_printf(m, "IRQ sequence: %d\n", 365 seq_printf(m, "IRQ sequence: %d\n",
257 dev_priv->mm.irq_gem_seqno); 366 dev_priv->mm.irq_gem_seqno);
367 mutex_unlock(&dev->struct_mutex);
368
258 return 0; 369 return 0;
259} 370}
260 371
@@ -263,7 +374,11 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
263 struct drm_info_node *node = (struct drm_info_node *) m->private; 374 struct drm_info_node *node = (struct drm_info_node *) m->private;
264 struct drm_device *dev = node->minor->dev; 375 struct drm_device *dev = node->minor->dev;
265 drm_i915_private_t *dev_priv = dev->dev_private; 376 drm_i915_private_t *dev_priv = dev->dev_private;
266 int i; 377 int i, ret;
378
379 ret = mutex_lock_interruptible(&dev->struct_mutex);
380 if (ret)
381 return ret;
267 382
268 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 383 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
269 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 384 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
@@ -289,6 +404,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
289 seq_printf(m, "\n"); 404 seq_printf(m, "\n");
290 } 405 }
291 } 406 }
407 mutex_unlock(&dev->struct_mutex);
292 408
293 return 0; 409 return 0;
294} 410}
@@ -313,16 +429,19 @@ static int i915_hws_info(struct seq_file *m, void *data)
313 return 0; 429 return 0;
314} 430}
315 431
316static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count) 432static void i915_dump_object(struct seq_file *m,
433 struct io_mapping *mapping,
434 struct drm_i915_gem_object *obj_priv)
317{ 435{
318 int page, i; 436 int page, page_count, i;
319 uint32_t *mem;
320 437
438 page_count = obj_priv->base.size / PAGE_SIZE;
321 for (page = 0; page < page_count; page++) { 439 for (page = 0; page < page_count; page++) {
322 mem = kmap_atomic(pages[page], KM_USER0); 440 u32 *mem = io_mapping_map_wc(mapping,
441 obj_priv->gtt_offset + page * PAGE_SIZE);
323 for (i = 0; i < PAGE_SIZE; i += 4) 442 for (i = 0; i < PAGE_SIZE; i += 4)
324 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 443 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
325 kunmap_atomic(mem, KM_USER0); 444 io_mapping_unmap(mem);
326 } 445 }
327} 446}
328 447
@@ -335,27 +454,20 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
335 struct drm_i915_gem_object *obj_priv; 454 struct drm_i915_gem_object *obj_priv;
336 int ret; 455 int ret;
337 456
338 spin_lock(&dev_priv->mm.active_list_lock); 457 ret = mutex_lock_interruptible(&dev->struct_mutex);
458 if (ret)
459 return ret;
339 460
340 list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, 461 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
341 list) {
342 obj = &obj_priv->base; 462 obj = &obj_priv->base;
343 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 463 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
344 ret = i915_gem_object_get_pages(obj, 0); 464 seq_printf(m, "--- gtt_offset = 0x%08x\n",
345 if (ret) { 465 obj_priv->gtt_offset);
346 DRM_ERROR("Failed to get pages: %d\n", ret); 466 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
347 spin_unlock(&dev_priv->mm.active_list_lock);
348 return ret;
349 }
350
351 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
352 i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
353
354 i915_gem_object_put_pages(obj);
355 } 467 }
356 } 468 }
357 469
358 spin_unlock(&dev_priv->mm.active_list_lock); 470 mutex_unlock(&dev->struct_mutex);
359 471
360 return 0; 472 return 0;
361} 473}
@@ -365,20 +477,24 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
365 struct drm_info_node *node = (struct drm_info_node *) m->private; 477 struct drm_info_node *node = (struct drm_info_node *) m->private;
366 struct drm_device *dev = node->minor->dev; 478 struct drm_device *dev = node->minor->dev;
367 drm_i915_private_t *dev_priv = dev->dev_private; 479 drm_i915_private_t *dev_priv = dev->dev_private;
368 u8 *virt; 480 int ret;
369 uint32_t *ptr, off; 481
482 ret = mutex_lock_interruptible(&dev->struct_mutex);
483 if (ret)
484 return ret;
370 485
371 if (!dev_priv->render_ring.gem_object) { 486 if (!dev_priv->render_ring.gem_object) {
372 seq_printf(m, "No ringbuffer setup\n"); 487 seq_printf(m, "No ringbuffer setup\n");
373 return 0; 488 } else {
374 } 489 u8 *virt = dev_priv->render_ring.virtual_start;
375 490 uint32_t off;
376 virt = dev_priv->render_ring.virtual_start;
377 491
378 for (off = 0; off < dev_priv->render_ring.size; off += 4) { 492 for (off = 0; off < dev_priv->render_ring.size; off += 4) {
379 ptr = (uint32_t *)(virt + off); 493 uint32_t *ptr = (uint32_t *)(virt + off);
380 seq_printf(m, "%08x : %08x\n", off, *ptr); 494 seq_printf(m, "%08x : %08x\n", off, *ptr);
495 }
381 } 496 }
497 mutex_unlock(&dev->struct_mutex);
382 498
383 return 0; 499 return 0;
384} 500}
@@ -396,7 +512,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
396 seq_printf(m, "RingHead : %08x\n", head); 512 seq_printf(m, "RingHead : %08x\n", head);
397 seq_printf(m, "RingTail : %08x\n", tail); 513 seq_printf(m, "RingTail : %08x\n", tail);
398 seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size); 514 seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
399 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); 515 seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
400 516
401 return 0; 517 return 0;
402} 518}
@@ -458,7 +574,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
458 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); 574 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
459 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); 575 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
460 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); 576 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
461 if (IS_I965G(dev)) { 577 if (INTEL_INFO(dev)->gen >= 4) {
462 seq_printf(m, " INSTPS: 0x%08x\n", error->instps); 578 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
463 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 579 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
464 } 580 }
@@ -642,6 +758,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
642 } else { 758 } else {
643 seq_printf(m, "FBC disabled: "); 759 seq_printf(m, "FBC disabled: ");
644 switch (dev_priv->no_fbc_reason) { 760 switch (dev_priv->no_fbc_reason) {
761 case FBC_NO_OUTPUT:
762 seq_printf(m, "no outputs");
763 break;
645 case FBC_STOLEN_TOO_SMALL: 764 case FBC_STOLEN_TOO_SMALL:
646 seq_printf(m, "not enough stolen memory"); 765 seq_printf(m, "not enough stolen memory");
647 break; 766 break;
@@ -675,15 +794,17 @@ static int i915_sr_status(struct seq_file *m, void *unused)
675 drm_i915_private_t *dev_priv = dev->dev_private; 794 drm_i915_private_t *dev_priv = dev->dev_private;
676 bool sr_enabled = false; 795 bool sr_enabled = false;
677 796
678 if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev)) 797 if (IS_GEN5(dev))
798 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
799 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
679 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 800 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
680 else if (IS_I915GM(dev)) 801 else if (IS_I915GM(dev))
681 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 802 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
682 else if (IS_PINEVIEW(dev)) 803 else if (IS_PINEVIEW(dev))
683 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 804 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
684 805
685 seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" : 806 seq_printf(m, "self-refresh: %s\n",
686 "disabled"); 807 sr_enabled ? "enabled" : "disabled");
687 808
688 return 0; 809 return 0;
689} 810}
@@ -694,10 +815,16 @@ static int i915_emon_status(struct seq_file *m, void *unused)
694 struct drm_device *dev = node->minor->dev; 815 struct drm_device *dev = node->minor->dev;
695 drm_i915_private_t *dev_priv = dev->dev_private; 816 drm_i915_private_t *dev_priv = dev->dev_private;
696 unsigned long temp, chipset, gfx; 817 unsigned long temp, chipset, gfx;
818 int ret;
819
820 ret = mutex_lock_interruptible(&dev->struct_mutex);
821 if (ret)
822 return ret;
697 823
698 temp = i915_mch_val(dev_priv); 824 temp = i915_mch_val(dev_priv);
699 chipset = i915_chipset_val(dev_priv); 825 chipset = i915_chipset_val(dev_priv);
700 gfx = i915_gfx_val(dev_priv); 826 gfx = i915_gfx_val(dev_priv);
827 mutex_unlock(&dev->struct_mutex);
701 828
702 seq_printf(m, "GMCH temp: %ld\n", temp); 829 seq_printf(m, "GMCH temp: %ld\n", temp);
703 seq_printf(m, "Chipset power: %ld\n", chipset); 830 seq_printf(m, "Chipset power: %ld\n", chipset);
@@ -718,6 +845,68 @@ static int i915_gfxec(struct seq_file *m, void *unused)
718 return 0; 845 return 0;
719} 846}
720 847
848static int i915_opregion(struct seq_file *m, void *unused)
849{
850 struct drm_info_node *node = (struct drm_info_node *) m->private;
851 struct drm_device *dev = node->minor->dev;
852 drm_i915_private_t *dev_priv = dev->dev_private;
853 struct intel_opregion *opregion = &dev_priv->opregion;
854 int ret;
855
856 ret = mutex_lock_interruptible(&dev->struct_mutex);
857 if (ret)
858 return ret;
859
860 if (opregion->header)
861 seq_write(m, opregion->header, OPREGION_SIZE);
862
863 mutex_unlock(&dev->struct_mutex);
864
865 return 0;
866}
867
868static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
869{
870 struct drm_info_node *node = (struct drm_info_node *) m->private;
871 struct drm_device *dev = node->minor->dev;
872 drm_i915_private_t *dev_priv = dev->dev_private;
873 struct intel_fbdev *ifbdev;
874 struct intel_framebuffer *fb;
875 int ret;
876
877 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
878 if (ret)
879 return ret;
880
881 ifbdev = dev_priv->fbdev;
882 fb = to_intel_framebuffer(ifbdev->helper.fb);
883
884 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
885 fb->base.width,
886 fb->base.height,
887 fb->base.depth,
888 fb->base.bits_per_pixel);
889 describe_obj(m, to_intel_bo(fb->obj));
890 seq_printf(m, "\n");
891
892 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
893 if (&fb->base == ifbdev->helper.fb)
894 continue;
895
896 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
897 fb->base.width,
898 fb->base.height,
899 fb->base.depth,
900 fb->base.bits_per_pixel);
901 describe_obj(m, to_intel_bo(fb->obj));
902 seq_printf(m, "\n");
903 }
904
905 mutex_unlock(&dev->mode_config.mutex);
906
907 return 0;
908}
909
721static int 910static int
722i915_wedged_open(struct inode *inode, 911i915_wedged_open(struct inode *inode,
723 struct file *filp) 912 struct file *filp)
@@ -741,6 +930,9 @@ i915_wedged_read(struct file *filp,
741 "wedged : %d\n", 930 "wedged : %d\n",
742 atomic_read(&dev_priv->mm.wedged)); 931 atomic_read(&dev_priv->mm.wedged));
743 932
933 if (len > sizeof (buf))
934 len = sizeof (buf);
935
744 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 936 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
745} 937}
746 938
@@ -770,7 +962,7 @@ i915_wedged_write(struct file *filp,
770 962
771 atomic_set(&dev_priv->mm.wedged, val); 963 atomic_set(&dev_priv->mm.wedged, val);
772 if (val) { 964 if (val) {
773 DRM_WAKEUP(&dev_priv->irq_queue); 965 wake_up_all(&dev_priv->irq_queue);
774 queue_work(dev_priv->wq, &dev_priv->error_work); 966 queue_work(dev_priv->wq, &dev_priv->error_work);
775 } 967 }
776 968
@@ -824,9 +1016,13 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
824} 1016}
825 1017
826static struct drm_info_list i915_debugfs_list[] = { 1018static struct drm_info_list i915_debugfs_list[] = {
1019 {"i915_capabilities", i915_capabilities, 0, 0},
1020 {"i915_gem_objects", i915_gem_object_info, 0},
827 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 1021 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
828 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 1022 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
829 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 1023 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
1024 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
1025 {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
830 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 1026 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
831 {"i915_gem_request", i915_gem_request_info, 0}, 1027 {"i915_gem_request", i915_gem_request_info, 0},
832 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 1028 {"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -846,6 +1042,8 @@ static struct drm_info_list i915_debugfs_list[] = {
846 {"i915_gfxec", i915_gfxec, 0}, 1042 {"i915_gfxec", i915_gfxec, 0},
847 {"i915_fbc_status", i915_fbc_status, 0}, 1043 {"i915_fbc_status", i915_fbc_status, 0},
848 {"i915_sr_status", i915_sr_status, 0}, 1044 {"i915_sr_status", i915_sr_status, 0},
1045 {"i915_opregion", i915_opregion, 0},
1046 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
849}; 1047};
850#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 1048#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
851 1049
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2dd2c93ebfa3..7a26f4dd21ae 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -40,8 +40,7 @@
40#include <linux/pnp.h> 40#include <linux/pnp.h>
41#include <linux/vga_switcheroo.h> 41#include <linux/vga_switcheroo.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43 43#include <acpi/video.h>
44extern int intel_max_stolen; /* from AGP driver */
45 44
46/** 45/**
47 * Sets up the hardware status page for devices that need a physical address 46 * Sets up the hardware status page for devices that need a physical address
@@ -64,7 +63,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
64 63
65 memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); 64 memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
66 65
67 if (IS_I965G(dev)) 66 if (INTEL_INFO(dev)->gen >= 4)
68 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & 67 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
69 0xf0; 68 0xf0;
70 69
@@ -133,8 +132,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
133 132
134 mutex_lock(&dev->struct_mutex); 133 mutex_lock(&dev->struct_mutex);
135 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 134 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
136 if (HAS_BSD(dev)) 135 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
137 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); 136 intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
138 mutex_unlock(&dev->struct_mutex); 137 mutex_unlock(&dev->struct_mutex);
139 138
140 /* Clear the HWS virtual address at teardown */ 139 /* Clear the HWS virtual address at teardown */
@@ -222,7 +221,7 @@ static int i915_dma_resume(struct drm_device * dev)
222 DRM_DEBUG_DRIVER("hw status page @ %p\n", 221 DRM_DEBUG_DRIVER("hw status page @ %p\n",
223 ring->status_page.page_addr); 222 ring->status_page.page_addr);
224 if (ring->status_page.gfx_addr != 0) 223 if (ring->status_page.gfx_addr != 0)
225 ring->setup_status_page(dev, ring); 224 intel_ring_setup_status_page(dev, ring);
226 else 225 else
227 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 226 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
228 227
@@ -377,7 +376,7 @@ i915_emit_box(struct drm_device *dev,
377 return -EINVAL; 376 return -EINVAL;
378 } 377 }
379 378
380 if (IS_I965G(dev)) { 379 if (INTEL_INFO(dev)->gen >= 4) {
381 BEGIN_LP_RING(4); 380 BEGIN_LP_RING(4);
382 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 381 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
383 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 382 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
@@ -481,7 +480,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
481 480
482 if (!IS_I830(dev) && !IS_845G(dev)) { 481 if (!IS_I830(dev) && !IS_845G(dev)) {
483 BEGIN_LP_RING(2); 482 BEGIN_LP_RING(2);
484 if (IS_I965G(dev)) { 483 if (INTEL_INFO(dev)->gen >= 4) {
485 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); 484 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
486 OUT_RING(batch->start); 485 OUT_RING(batch->start);
487 } else { 486 } else {
@@ -500,7 +499,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
500 } 499 }
501 500
502 501
503 if (IS_G4X(dev) || IS_IRONLAKE(dev)) { 502 if (IS_G4X(dev) || IS_GEN5(dev)) {
504 BEGIN_LP_RING(2); 503 BEGIN_LP_RING(2);
505 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); 504 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
506 OUT_RING(MI_NOOP); 505 OUT_RING(MI_NOOP);
@@ -765,6 +764,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
765 case I915_PARAM_HAS_BSD: 764 case I915_PARAM_HAS_BSD:
766 value = HAS_BSD(dev); 765 value = HAS_BSD(dev);
767 break; 766 break;
767 case I915_PARAM_HAS_BLT:
768 value = HAS_BLT(dev);
769 break;
768 default: 770 default:
769 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 771 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
770 param->param); 772 param->param);
@@ -888,12 +890,12 @@ static int
888intel_alloc_mchbar_resource(struct drm_device *dev) 890intel_alloc_mchbar_resource(struct drm_device *dev)
889{ 891{
890 drm_i915_private_t *dev_priv = dev->dev_private; 892 drm_i915_private_t *dev_priv = dev->dev_private;
891 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 893 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
892 u32 temp_lo, temp_hi = 0; 894 u32 temp_lo, temp_hi = 0;
893 u64 mchbar_addr; 895 u64 mchbar_addr;
894 int ret; 896 int ret;
895 897
896 if (IS_I965G(dev)) 898 if (INTEL_INFO(dev)->gen >= 4)
897 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 899 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
898 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); 900 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
899 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 901 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
@@ -920,7 +922,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
920 return ret; 922 return ret;
921 } 923 }
922 924
923 if (IS_I965G(dev)) 925 if (INTEL_INFO(dev)->gen >= 4)
924 pci_write_config_dword(dev_priv->bridge_dev, reg + 4, 926 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
925 upper_32_bits(dev_priv->mch_res.start)); 927 upper_32_bits(dev_priv->mch_res.start));
926 928
@@ -934,7 +936,7 @@ static void
934intel_setup_mchbar(struct drm_device *dev) 936intel_setup_mchbar(struct drm_device *dev)
935{ 937{
936 drm_i915_private_t *dev_priv = dev->dev_private; 938 drm_i915_private_t *dev_priv = dev->dev_private;
937 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 939 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
938 u32 temp; 940 u32 temp;
939 bool enabled; 941 bool enabled;
940 942
@@ -971,7 +973,7 @@ static void
971intel_teardown_mchbar(struct drm_device *dev) 973intel_teardown_mchbar(struct drm_device *dev)
972{ 974{
973 drm_i915_private_t *dev_priv = dev->dev_private; 975 drm_i915_private_t *dev_priv = dev->dev_private;
974 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 976 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
975 u32 temp; 977 u32 temp;
976 978
977 if (dev_priv->mchbar_need_disable) { 979 if (dev_priv->mchbar_need_disable) {
@@ -990,174 +992,6 @@ intel_teardown_mchbar(struct drm_device *dev)
990 release_resource(&dev_priv->mch_res); 992 release_resource(&dev_priv->mch_res);
991} 993}
992 994
993/**
994 * i915_probe_agp - get AGP bootup configuration
995 * @pdev: PCI device
996 * @aperture_size: returns AGP aperture configured size
997 * @preallocated_size: returns size of BIOS preallocated AGP space
998 *
999 * Since Intel integrated graphics are UMA, the BIOS has to set aside
1000 * some RAM for the framebuffer at early boot. This code figures out
1001 * how much was set aside so we can use it for our own purposes.
1002 */
1003static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
1004 uint32_t *preallocated_size,
1005 uint32_t *start)
1006{
1007 struct drm_i915_private *dev_priv = dev->dev_private;
1008 u16 tmp = 0;
1009 unsigned long overhead;
1010 unsigned long stolen;
1011
1012 /* Get the fb aperture size and "stolen" memory amount. */
1013 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp);
1014
1015 *aperture_size = 1024 * 1024;
1016 *preallocated_size = 1024 * 1024;
1017
1018 switch (dev->pdev->device) {
1019 case PCI_DEVICE_ID_INTEL_82830_CGC:
1020 case PCI_DEVICE_ID_INTEL_82845G_IG:
1021 case PCI_DEVICE_ID_INTEL_82855GM_IG:
1022 case PCI_DEVICE_ID_INTEL_82865_IG:
1023 if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
1024 *aperture_size *= 64;
1025 else
1026 *aperture_size *= 128;
1027 break;
1028 default:
1029 /* 9xx supports large sizes, just look at the length */
1030 *aperture_size = pci_resource_len(dev->pdev, 2);
1031 break;
1032 }
1033
1034 /*
1035 * Some of the preallocated space is taken by the GTT
1036 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
1037 */
1038 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
1039 overhead = 4096;
1040 else
1041 overhead = (*aperture_size / 1024) + 4096;
1042
1043 if (IS_GEN6(dev)) {
1044 /* SNB has memory control reg at 0x50.w */
1045 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
1046
1047 switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
1048 case INTEL_855_GMCH_GMS_DISABLED:
1049 DRM_ERROR("video memory is disabled\n");
1050 return -1;
1051 case SNB_GMCH_GMS_STOLEN_32M:
1052 stolen = 32 * 1024 * 1024;
1053 break;
1054 case SNB_GMCH_GMS_STOLEN_64M:
1055 stolen = 64 * 1024 * 1024;
1056 break;
1057 case SNB_GMCH_GMS_STOLEN_96M:
1058 stolen = 96 * 1024 * 1024;
1059 break;
1060 case SNB_GMCH_GMS_STOLEN_128M:
1061 stolen = 128 * 1024 * 1024;
1062 break;
1063 case SNB_GMCH_GMS_STOLEN_160M:
1064 stolen = 160 * 1024 * 1024;
1065 break;
1066 case SNB_GMCH_GMS_STOLEN_192M:
1067 stolen = 192 * 1024 * 1024;
1068 break;
1069 case SNB_GMCH_GMS_STOLEN_224M:
1070 stolen = 224 * 1024 * 1024;
1071 break;
1072 case SNB_GMCH_GMS_STOLEN_256M:
1073 stolen = 256 * 1024 * 1024;
1074 break;
1075 case SNB_GMCH_GMS_STOLEN_288M:
1076 stolen = 288 * 1024 * 1024;
1077 break;
1078 case SNB_GMCH_GMS_STOLEN_320M:
1079 stolen = 320 * 1024 * 1024;
1080 break;
1081 case SNB_GMCH_GMS_STOLEN_352M:
1082 stolen = 352 * 1024 * 1024;
1083 break;
1084 case SNB_GMCH_GMS_STOLEN_384M:
1085 stolen = 384 * 1024 * 1024;
1086 break;
1087 case SNB_GMCH_GMS_STOLEN_416M:
1088 stolen = 416 * 1024 * 1024;
1089 break;
1090 case SNB_GMCH_GMS_STOLEN_448M:
1091 stolen = 448 * 1024 * 1024;
1092 break;
1093 case SNB_GMCH_GMS_STOLEN_480M:
1094 stolen = 480 * 1024 * 1024;
1095 break;
1096 case SNB_GMCH_GMS_STOLEN_512M:
1097 stolen = 512 * 1024 * 1024;
1098 break;
1099 default:
1100 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1101 tmp & SNB_GMCH_GMS_STOLEN_MASK);
1102 return -1;
1103 }
1104 } else {
1105 switch (tmp & INTEL_GMCH_GMS_MASK) {
1106 case INTEL_855_GMCH_GMS_DISABLED:
1107 DRM_ERROR("video memory is disabled\n");
1108 return -1;
1109 case INTEL_855_GMCH_GMS_STOLEN_1M:
1110 stolen = 1 * 1024 * 1024;
1111 break;
1112 case INTEL_855_GMCH_GMS_STOLEN_4M:
1113 stolen = 4 * 1024 * 1024;
1114 break;
1115 case INTEL_855_GMCH_GMS_STOLEN_8M:
1116 stolen = 8 * 1024 * 1024;
1117 break;
1118 case INTEL_855_GMCH_GMS_STOLEN_16M:
1119 stolen = 16 * 1024 * 1024;
1120 break;
1121 case INTEL_855_GMCH_GMS_STOLEN_32M:
1122 stolen = 32 * 1024 * 1024;
1123 break;
1124 case INTEL_915G_GMCH_GMS_STOLEN_48M:
1125 stolen = 48 * 1024 * 1024;
1126 break;
1127 case INTEL_915G_GMCH_GMS_STOLEN_64M:
1128 stolen = 64 * 1024 * 1024;
1129 break;
1130 case INTEL_GMCH_GMS_STOLEN_128M:
1131 stolen = 128 * 1024 * 1024;
1132 break;
1133 case INTEL_GMCH_GMS_STOLEN_256M:
1134 stolen = 256 * 1024 * 1024;
1135 break;
1136 case INTEL_GMCH_GMS_STOLEN_96M:
1137 stolen = 96 * 1024 * 1024;
1138 break;
1139 case INTEL_GMCH_GMS_STOLEN_160M:
1140 stolen = 160 * 1024 * 1024;
1141 break;
1142 case INTEL_GMCH_GMS_STOLEN_224M:
1143 stolen = 224 * 1024 * 1024;
1144 break;
1145 case INTEL_GMCH_GMS_STOLEN_352M:
1146 stolen = 352 * 1024 * 1024;
1147 break;
1148 default:
1149 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1150 tmp & INTEL_GMCH_GMS_MASK);
1151 return -1;
1152 }
1153 }
1154
1155 *preallocated_size = stolen - overhead;
1156 *start = overhead;
1157
1158 return 0;
1159}
1160
1161#define PTE_ADDRESS_MASK 0xfffff000 995#define PTE_ADDRESS_MASK 0xfffff000
1162#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ 996#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
1163#define PTE_MAPPING_TYPE_UNCACHED (0 << 1) 997#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
@@ -1181,11 +1015,11 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1181{ 1015{
1182 unsigned long *gtt; 1016 unsigned long *gtt;
1183 unsigned long entry, phys; 1017 unsigned long entry, phys;
1184 int gtt_bar = IS_I9XX(dev) ? 0 : 1; 1018 int gtt_bar = IS_GEN2(dev) ? 1 : 0;
1185 int gtt_offset, gtt_size; 1019 int gtt_offset, gtt_size;
1186 1020
1187 if (IS_I965G(dev)) { 1021 if (INTEL_INFO(dev)->gen >= 4) {
1188 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { 1022 if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) {
1189 gtt_offset = 2*1024*1024; 1023 gtt_offset = 2*1024*1024;
1190 gtt_size = 2*1024*1024; 1024 gtt_size = 2*1024*1024;
1191 } else { 1025 } else {
@@ -1210,10 +1044,8 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1210 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); 1044 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
1211 1045
1212 /* Mask out these reserved bits on this hardware. */ 1046 /* Mask out these reserved bits on this hardware. */
1213 if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || 1047 if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
1214 IS_I945G(dev) || IS_I945GM(dev)) {
1215 entry &= ~PTE_ADDRESS_MASK_HIGH; 1048 entry &= ~PTE_ADDRESS_MASK_HIGH;
1216 }
1217 1049
1218 /* If it's not a mapping type we know, then bail. */ 1050 /* If it's not a mapping type we know, then bail. */
1219 if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && 1051 if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
@@ -1252,7 +1084,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1252 unsigned long ll_base = 0; 1084 unsigned long ll_base = 0;
1253 1085
1254 /* Leave 1M for line length buffer & misc. */ 1086 /* Leave 1M for line length buffer & misc. */
1255 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); 1087 compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0);
1256 if (!compressed_fb) { 1088 if (!compressed_fb) {
1257 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 1089 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1258 i915_warn_stolen(dev); 1090 i915_warn_stolen(dev);
@@ -1273,7 +1105,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1273 } 1105 }
1274 1106
1275 if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { 1107 if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
1276 compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, 1108 compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096,
1277 4096, 0); 1109 4096, 0);
1278 if (!compressed_llb) { 1110 if (!compressed_llb) {
1279 i915_warn_stolen(dev); 1111 i915_warn_stolen(dev);
@@ -1343,10 +1175,8 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
1343 /* i915 resume handler doesn't set to D0 */ 1175 /* i915 resume handler doesn't set to D0 */
1344 pci_set_power_state(dev->pdev, PCI_D0); 1176 pci_set_power_state(dev->pdev, PCI_D0);
1345 i915_resume(dev); 1177 i915_resume(dev);
1346 drm_kms_helper_poll_enable(dev);
1347 } else { 1178 } else {
1348 printk(KERN_ERR "i915: switched off\n"); 1179 printk(KERN_ERR "i915: switched off\n");
1349 drm_kms_helper_poll_disable(dev);
1350 i915_suspend(dev, pmm); 1180 i915_suspend(dev, pmm);
1351 } 1181 }
1352} 1182}
@@ -1363,23 +1193,14 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1363} 1193}
1364 1194
1365static int i915_load_modeset_init(struct drm_device *dev, 1195static int i915_load_modeset_init(struct drm_device *dev,
1366 unsigned long prealloc_start,
1367 unsigned long prealloc_size, 1196 unsigned long prealloc_size,
1368 unsigned long agp_size) 1197 unsigned long agp_size)
1369{ 1198{
1370 struct drm_i915_private *dev_priv = dev->dev_private; 1199 struct drm_i915_private *dev_priv = dev->dev_private;
1371 int fb_bar = IS_I9XX(dev) ? 2 : 0;
1372 int ret = 0; 1200 int ret = 0;
1373 1201
1374 dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) & 1202 /* Basic memrange allocator for stolen space (aka mm.vram) */
1375 0xff000000; 1203 drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
1376
1377 /* Basic memrange allocator for stolen space (aka vram) */
1378 drm_mm_init(&dev_priv->vram, 0, prealloc_size);
1379 DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
1380
1381 /* We're off and running w/KMS */
1382 dev_priv->mm.suspended = 0;
1383 1204
1384 /* Let GEM Manage from end of prealloc space to end of aperture. 1205 /* Let GEM Manage from end of prealloc space to end of aperture.
1385 * 1206 *
@@ -1414,7 +1235,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
1414 */ 1235 */
1415 dev_priv->allow_batchbuffer = 1; 1236 dev_priv->allow_batchbuffer = 1;
1416 1237
1417 ret = intel_init_bios(dev); 1238 ret = intel_parse_bios(dev);
1418 if (ret) 1239 if (ret)
1419 DRM_INFO("failed to find VBIOS tables\n"); 1240 DRM_INFO("failed to find VBIOS tables\n");
1420 1241
@@ -1423,6 +1244,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
1423 if (ret) 1244 if (ret)
1424 goto cleanup_ringbuffer; 1245 goto cleanup_ringbuffer;
1425 1246
1247 intel_register_dsm_handler();
1248
1426 ret = vga_switcheroo_register_client(dev->pdev, 1249 ret = vga_switcheroo_register_client(dev->pdev,
1427 i915_switcheroo_set_state, 1250 i915_switcheroo_set_state,
1428 i915_switcheroo_can_switch); 1251 i915_switcheroo_can_switch);
@@ -1443,17 +1266,15 @@ static int i915_load_modeset_init(struct drm_device *dev,
1443 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1266 /* FIXME: do pre/post-mode set stuff in core KMS code */
1444 dev->vblank_disable_allowed = 1; 1267 dev->vblank_disable_allowed = 1;
1445 1268
1446 /*
1447 * Initialize the hardware status page IRQ location.
1448 */
1449
1450 I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
1451
1452 ret = intel_fbdev_init(dev); 1269 ret = intel_fbdev_init(dev);
1453 if (ret) 1270 if (ret)
1454 goto cleanup_irq; 1271 goto cleanup_irq;
1455 1272
1456 drm_kms_helper_poll_init(dev); 1273 drm_kms_helper_poll_init(dev);
1274
1275 /* We're off and running w/KMS */
1276 dev_priv->mm.suspended = 0;
1277
1457 return 0; 1278 return 0;
1458 1279
1459cleanup_irq: 1280cleanup_irq:
@@ -1907,7 +1728,7 @@ static struct drm_i915_private *i915_mch_dev;
1907 * - dev_priv->fmax 1728 * - dev_priv->fmax
1908 * - dev_priv->gpu_busy 1729 * - dev_priv->gpu_busy
1909 */ 1730 */
1910DEFINE_SPINLOCK(mchdev_lock); 1731static DEFINE_SPINLOCK(mchdev_lock);
1911 1732
1912/** 1733/**
1913 * i915_read_mch_val - return value for IPS use 1734 * i915_read_mch_val - return value for IPS use
@@ -2062,7 +1883,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2062 struct drm_i915_private *dev_priv; 1883 struct drm_i915_private *dev_priv;
2063 resource_size_t base, size; 1884 resource_size_t base, size;
2064 int ret = 0, mmio_bar; 1885 int ret = 0, mmio_bar;
2065 uint32_t agp_size, prealloc_size, prealloc_start; 1886 uint32_t agp_size, prealloc_size;
2066 /* i915 has 4 more counters */ 1887 /* i915 has 4 more counters */
2067 dev->counters += 4; 1888 dev->counters += 4;
2068 dev->types[6] = _DRM_STAT_IRQ; 1889 dev->types[6] = _DRM_STAT_IRQ;
@@ -2079,7 +1900,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2079 dev_priv->info = (struct intel_device_info *) flags; 1900 dev_priv->info = (struct intel_device_info *) flags;
2080 1901
2081 /* Add register map (needed for suspend/resume) */ 1902 /* Add register map (needed for suspend/resume) */
2082 mmio_bar = IS_I9XX(dev) ? 0 : 1; 1903 mmio_bar = IS_GEN2(dev) ? 1 : 0;
2083 base = pci_resource_start(dev->pdev, mmio_bar); 1904 base = pci_resource_start(dev->pdev, mmio_bar);
2084 size = pci_resource_len(dev->pdev, mmio_bar); 1905 size = pci_resource_len(dev->pdev, mmio_bar);
2085 1906
@@ -2121,17 +1942,32 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2121 "performance may suffer.\n"); 1942 "performance may suffer.\n");
2122 } 1943 }
2123 1944
2124 ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start); 1945 dev_priv->mm.gtt = intel_gtt_get();
2125 if (ret) 1946 if (!dev_priv->mm.gtt) {
1947 DRM_ERROR("Failed to initialize GTT\n");
1948 ret = -ENODEV;
2126 goto out_iomapfree; 1949 goto out_iomapfree;
2127
2128 if (prealloc_size > intel_max_stolen) {
2129 DRM_INFO("detected %dM stolen memory, trimming to %dM\n",
2130 prealloc_size >> 20, intel_max_stolen >> 20);
2131 prealloc_size = intel_max_stolen;
2132 } 1950 }
2133 1951
2134 dev_priv->wq = create_singlethread_workqueue("i915"); 1952 prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
1953 agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1954
1955 /* The i915 workqueue is primarily used for batched retirement of
1956 * requests (and thus managing bo) once the task has been completed
1957 * by the GPU. i915_gem_retire_requests() is called directly when we
1958 * need high-priority retirement, such as waiting for an explicit
1959 * bo.
1960 *
1961 * It is also used for periodic low-priority events, such as
1962 * idle-timers and hangcheck.
1963 *
1964 * All tasks on the workqueue are expected to acquire the dev mutex
1965 * so there is no point in running more than one instance of the
1966 * workqueue at any time: max_active = 1 and NON_REENTRANT.
1967 */
1968 dev_priv->wq = alloc_workqueue("i915",
1969 WQ_UNBOUND | WQ_NON_REENTRANT,
1970 1);
2135 if (dev_priv->wq == NULL) { 1971 if (dev_priv->wq == NULL) {
2136 DRM_ERROR("Failed to create our workqueue.\n"); 1972 DRM_ERROR("Failed to create our workqueue.\n");
2137 ret = -ENOMEM; 1973 ret = -ENOMEM;
@@ -2159,13 +1995,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2159 1995
2160 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1996 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2161 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1997 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2162 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { 1998 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
2163 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 1999 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2164 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2000 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2165 } 2001 }
2166 2002
2167 /* Try to make sure MCHBAR is enabled before poking at it */ 2003 /* Try to make sure MCHBAR is enabled before poking at it */
2168 intel_setup_mchbar(dev); 2004 intel_setup_mchbar(dev);
2005 intel_setup_gmbus(dev);
2006 intel_opregion_setup(dev);
2007
2008 /* Make sure the bios did its job and set up vital registers */
2009 intel_setup_bios(dev);
2169 2010
2170 i915_gem_load(dev); 2011 i915_gem_load(dev);
2171 2012
@@ -2178,7 +2019,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2178 2019
2179 if (IS_PINEVIEW(dev)) 2020 if (IS_PINEVIEW(dev))
2180 i915_pineview_get_mem_freq(dev); 2021 i915_pineview_get_mem_freq(dev);
2181 else if (IS_IRONLAKE(dev)) 2022 else if (IS_GEN5(dev))
2182 i915_ironlake_get_mem_freq(dev); 2023 i915_ironlake_get_mem_freq(dev);
2183 2024
2184 /* On the 945G/GM, the chipset reports the MSI capability on the 2025 /* On the 945G/GM, the chipset reports the MSI capability on the
@@ -2212,8 +2053,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2212 intel_detect_pch(dev); 2053 intel_detect_pch(dev);
2213 2054
2214 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2055 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2215 ret = i915_load_modeset_init(dev, prealloc_start, 2056 ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
2216 prealloc_size, agp_size);
2217 if (ret < 0) { 2057 if (ret < 0) {
2218 DRM_ERROR("failed to init modeset\n"); 2058 DRM_ERROR("failed to init modeset\n");
2219 goto out_workqueue_free; 2059 goto out_workqueue_free;
@@ -2221,7 +2061,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2221 } 2061 }
2222 2062
2223 /* Must be done after probing outputs */ 2063 /* Must be done after probing outputs */
2224 intel_opregion_init(dev, 0); 2064 intel_opregion_init(dev);
2065 acpi_video_register();
2225 2066
2226 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 2067 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
2227 (unsigned long) dev); 2068 (unsigned long) dev);
@@ -2231,9 +2072,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2231 dev_priv->mchdev_lock = &mchdev_lock; 2072 dev_priv->mchdev_lock = &mchdev_lock;
2232 spin_unlock(&mchdev_lock); 2073 spin_unlock(&mchdev_lock);
2233 2074
2234 /* XXX Prevent module unload due to memory corruption bugs. */
2235 __module_get(THIS_MODULE);
2236
2237 return 0; 2075 return 0;
2238 2076
2239out_workqueue_free: 2077out_workqueue_free:
@@ -2252,15 +2090,20 @@ free_priv:
2252int i915_driver_unload(struct drm_device *dev) 2090int i915_driver_unload(struct drm_device *dev)
2253{ 2091{
2254 struct drm_i915_private *dev_priv = dev->dev_private; 2092 struct drm_i915_private *dev_priv = dev->dev_private;
2255 2093 int ret;
2256 i915_destroy_error_state(dev);
2257 2094
2258 spin_lock(&mchdev_lock); 2095 spin_lock(&mchdev_lock);
2259 i915_mch_dev = NULL; 2096 i915_mch_dev = NULL;
2260 spin_unlock(&mchdev_lock); 2097 spin_unlock(&mchdev_lock);
2261 2098
2262 destroy_workqueue(dev_priv->wq); 2099 mutex_lock(&dev->struct_mutex);
2263 del_timer_sync(&dev_priv->hangcheck_timer); 2100 ret = i915_gpu_idle(dev);
2101 if (ret)
2102 DRM_ERROR("failed to idle hardware: %d\n", ret);
2103 mutex_unlock(&dev->struct_mutex);
2104
2105 /* Cancel the retire work handler, which should be idle now. */
2106 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
2264 2107
2265 io_mapping_free(dev_priv->mm.gtt_mapping); 2108 io_mapping_free(dev_priv->mm.gtt_mapping);
2266 if (dev_priv->mm.gtt_mtrr >= 0) { 2109 if (dev_priv->mm.gtt_mtrr >= 0) {
@@ -2269,7 +2112,10 @@ int i915_driver_unload(struct drm_device *dev)
2269 dev_priv->mm.gtt_mtrr = -1; 2112 dev_priv->mm.gtt_mtrr = -1;
2270 } 2113 }
2271 2114
2115 acpi_video_unregister();
2116
2272 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2117 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2118 intel_fbdev_fini(dev);
2273 intel_modeset_cleanup(dev); 2119 intel_modeset_cleanup(dev);
2274 2120
2275 /* 2121 /*
@@ -2281,20 +2127,25 @@ int i915_driver_unload(struct drm_device *dev)
2281 dev_priv->child_dev = NULL; 2127 dev_priv->child_dev = NULL;
2282 dev_priv->child_dev_num = 0; 2128 dev_priv->child_dev_num = 0;
2283 } 2129 }
2284 drm_irq_uninstall(dev); 2130
2285 vga_switcheroo_unregister_client(dev->pdev); 2131 vga_switcheroo_unregister_client(dev->pdev);
2286 vga_client_register(dev->pdev, NULL, NULL, NULL); 2132 vga_client_register(dev->pdev, NULL, NULL, NULL);
2287 } 2133 }
2288 2134
2135 /* Free error state after interrupts are fully disabled. */
2136 del_timer_sync(&dev_priv->hangcheck_timer);
2137 cancel_work_sync(&dev_priv->error_work);
2138 i915_destroy_error_state(dev);
2139
2289 if (dev->pdev->msi_enabled) 2140 if (dev->pdev->msi_enabled)
2290 pci_disable_msi(dev->pdev); 2141 pci_disable_msi(dev->pdev);
2291 2142
2292 if (dev_priv->regs != NULL) 2143 intel_opregion_fini(dev);
2293 iounmap(dev_priv->regs);
2294
2295 intel_opregion_free(dev, 0);
2296 2144
2297 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2145 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2146 /* Flush any outstanding unpin_work. */
2147 flush_workqueue(dev_priv->wq);
2148
2298 i915_gem_free_all_phys_object(dev); 2149 i915_gem_free_all_phys_object(dev);
2299 2150
2300 mutex_lock(&dev->struct_mutex); 2151 mutex_lock(&dev->struct_mutex);
@@ -2302,34 +2153,41 @@ int i915_driver_unload(struct drm_device *dev)
2302 mutex_unlock(&dev->struct_mutex); 2153 mutex_unlock(&dev->struct_mutex);
2303 if (I915_HAS_FBC(dev) && i915_powersave) 2154 if (I915_HAS_FBC(dev) && i915_powersave)
2304 i915_cleanup_compression(dev); 2155 i915_cleanup_compression(dev);
2305 drm_mm_takedown(&dev_priv->vram); 2156 drm_mm_takedown(&dev_priv->mm.vram);
2306 i915_gem_lastclose(dev);
2307 2157
2308 intel_cleanup_overlay(dev); 2158 intel_cleanup_overlay(dev);
2159
2160 if (!I915_NEED_GFX_HWS(dev))
2161 i915_free_hws(dev);
2309 } 2162 }
2310 2163
2164 if (dev_priv->regs != NULL)
2165 iounmap(dev_priv->regs);
2166
2167 intel_teardown_gmbus(dev);
2311 intel_teardown_mchbar(dev); 2168 intel_teardown_mchbar(dev);
2312 2169
2170 destroy_workqueue(dev_priv->wq);
2171
2313 pci_dev_put(dev_priv->bridge_dev); 2172 pci_dev_put(dev_priv->bridge_dev);
2314 kfree(dev->dev_private); 2173 kfree(dev->dev_private);
2315 2174
2316 return 0; 2175 return 0;
2317} 2176}
2318 2177
2319int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) 2178int i915_driver_open(struct drm_device *dev, struct drm_file *file)
2320{ 2179{
2321 struct drm_i915_file_private *i915_file_priv; 2180 struct drm_i915_file_private *file_priv;
2322 2181
2323 DRM_DEBUG_DRIVER("\n"); 2182 DRM_DEBUG_DRIVER("\n");
2324 i915_file_priv = (struct drm_i915_file_private *) 2183 file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
2325 kmalloc(sizeof(*i915_file_priv), GFP_KERNEL); 2184 if (!file_priv)
2326
2327 if (!i915_file_priv)
2328 return -ENOMEM; 2185 return -ENOMEM;
2329 2186
2330 file_priv->driver_priv = i915_file_priv; 2187 file->driver_priv = file_priv;
2331 2188
2332 INIT_LIST_HEAD(&i915_file_priv->mm.request_list); 2189 spin_lock_init(&file_priv->mm.lock);
2190 INIT_LIST_HEAD(&file_priv->mm.request_list);
2333 2191
2334 return 0; 2192 return 0;
2335} 2193}
@@ -2372,11 +2230,11 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
2372 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 2230 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
2373} 2231}
2374 2232
2375void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) 2233void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
2376{ 2234{
2377 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 2235 struct drm_i915_file_private *file_priv = file->driver_priv;
2378 2236
2379 kfree(i915_file_priv); 2237 kfree(file_priv);
2380} 2238}
2381 2239
2382struct drm_ioctl_desc i915_ioctls[] = { 2240struct drm_ioctl_desc i915_ioctls[] = {
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 895ab896e336..3467dd420760 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -32,6 +32,7 @@
32#include "drm.h" 32#include "drm.h"
33#include "i915_drm.h" 33#include "i915_drm.h"
34#include "i915_drv.h" 34#include "i915_drv.h"
35#include "intel_drv.h"
35 36
36#include <linux/console.h> 37#include <linux/console.h>
37#include "drm_crtc_helper.h" 38#include "drm_crtc_helper.h"
@@ -61,86 +62,110 @@ extern int intel_agp_enabled;
61 .driver_data = (unsigned long) info } 62 .driver_data = (unsigned long) info }
62 63
63static const struct intel_device_info intel_i830_info = { 64static const struct intel_device_info intel_i830_info = {
64 .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, 65 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
66 .has_overlay = 1, .overlay_needs_physical = 1,
65}; 67};
66 68
67static const struct intel_device_info intel_845g_info = { 69static const struct intel_device_info intel_845g_info = {
68 .gen = 2, .is_i8xx = 1, 70 .gen = 2,
71 .has_overlay = 1, .overlay_needs_physical = 1,
69}; 72};
70 73
71static const struct intel_device_info intel_i85x_info = { 74static const struct intel_device_info intel_i85x_info = {
72 .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, 75 .gen = 2, .is_i85x = 1, .is_mobile = 1,
73 .cursor_needs_physical = 1, 76 .cursor_needs_physical = 1,
77 .has_overlay = 1, .overlay_needs_physical = 1,
74}; 78};
75 79
76static const struct intel_device_info intel_i865g_info = { 80static const struct intel_device_info intel_i865g_info = {
77 .gen = 2, .is_i8xx = 1, 81 .gen = 2,
82 .has_overlay = 1, .overlay_needs_physical = 1,
78}; 83};
79 84
80static const struct intel_device_info intel_i915g_info = { 85static const struct intel_device_info intel_i915g_info = {
81 .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, 86 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
87 .has_overlay = 1, .overlay_needs_physical = 1,
82}; 88};
83static const struct intel_device_info intel_i915gm_info = { 89static const struct intel_device_info intel_i915gm_info = {
84 .gen = 3, .is_i9xx = 1, .is_mobile = 1, 90 .gen = 3, .is_mobile = 1,
85 .cursor_needs_physical = 1, 91 .cursor_needs_physical = 1,
92 .has_overlay = 1, .overlay_needs_physical = 1,
93 .supports_tv = 1,
86}; 94};
87static const struct intel_device_info intel_i945g_info = { 95static const struct intel_device_info intel_i945g_info = {
88 .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, 96 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
97 .has_overlay = 1, .overlay_needs_physical = 1,
89}; 98};
90static const struct intel_device_info intel_i945gm_info = { 99static const struct intel_device_info intel_i945gm_info = {
91 .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, 100 .gen = 3, .is_i945gm = 1, .is_mobile = 1,
92 .has_hotplug = 1, .cursor_needs_physical = 1, 101 .has_hotplug = 1, .cursor_needs_physical = 1,
102 .has_overlay = 1, .overlay_needs_physical = 1,
103 .supports_tv = 1,
93}; 104};
94 105
95static const struct intel_device_info intel_i965g_info = { 106static const struct intel_device_info intel_i965g_info = {
96 .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, 107 .gen = 4, .is_broadwater = 1,
97 .has_hotplug = 1, 108 .has_hotplug = 1,
109 .has_overlay = 1,
98}; 110};
99 111
100static const struct intel_device_info intel_i965gm_info = { 112static const struct intel_device_info intel_i965gm_info = {
101 .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, 113 .gen = 4, .is_crestline = 1,
102 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, 114 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
115 .has_overlay = 1,
116 .supports_tv = 1,
103}; 117};
104 118
105static const struct intel_device_info intel_g33_info = { 119static const struct intel_device_info intel_g33_info = {
106 .gen = 3, .is_g33 = 1, .is_i9xx = 1, 120 .gen = 3, .is_g33 = 1,
107 .need_gfx_hws = 1, .has_hotplug = 1, 121 .need_gfx_hws = 1, .has_hotplug = 1,
122 .has_overlay = 1,
108}; 123};
109 124
110static const struct intel_device_info intel_g45_info = { 125static const struct intel_device_info intel_g45_info = {
111 .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, 126 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
112 .has_pipe_cxsr = 1, .has_hotplug = 1, 127 .has_pipe_cxsr = 1, .has_hotplug = 1,
128 .has_bsd_ring = 1,
113}; 129};
114 130
115static const struct intel_device_info intel_gm45_info = { 131static const struct intel_device_info intel_gm45_info = {
116 .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, 132 .gen = 4, .is_g4x = 1,
117 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 133 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
118 .has_pipe_cxsr = 1, .has_hotplug = 1, 134 .has_pipe_cxsr = 1, .has_hotplug = 1,
135 .supports_tv = 1,
136 .has_bsd_ring = 1,
119}; 137};
120 138
121static const struct intel_device_info intel_pineview_info = { 139static const struct intel_device_info intel_pineview_info = {
122 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 140 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
123 .need_gfx_hws = 1, .has_hotplug = 1, 141 .need_gfx_hws = 1, .has_hotplug = 1,
142 .has_overlay = 1,
124}; 143};
125 144
126static const struct intel_device_info intel_ironlake_d_info = { 145static const struct intel_device_info intel_ironlake_d_info = {
127 .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, 146 .gen = 5,
128 .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, 147 .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
148 .has_bsd_ring = 1,
129}; 149};
130 150
131static const struct intel_device_info intel_ironlake_m_info = { 151static const struct intel_device_info intel_ironlake_m_info = {
132 .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, 152 .gen = 5, .is_mobile = 1,
133 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, 153 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
154 .has_bsd_ring = 1,
134}; 155};
135 156
136static const struct intel_device_info intel_sandybridge_d_info = { 157static const struct intel_device_info intel_sandybridge_d_info = {
137 .gen = 6, .is_i965g = 1, .is_i9xx = 1, 158 .gen = 6,
138 .need_gfx_hws = 1, .has_hotplug = 1, 159 .need_gfx_hws = 1, .has_hotplug = 1,
160 .has_bsd_ring = 1,
161 .has_blt_ring = 1,
139}; 162};
140 163
141static const struct intel_device_info intel_sandybridge_m_info = { 164static const struct intel_device_info intel_sandybridge_m_info = {
142 .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, 165 .gen = 6, .is_mobile = 1,
143 .need_gfx_hws = 1, .has_hotplug = 1, 166 .need_gfx_hws = 1, .has_hotplug = 1,
167 .has_bsd_ring = 1,
168 .has_blt_ring = 1,
144}; 169};
145 170
146static const struct pci_device_id pciidlist[] = { /* aka */ 171static const struct pci_device_id pciidlist[] = { /* aka */
@@ -237,7 +262,7 @@ static int i915_drm_freeze(struct drm_device *dev)
237 262
238 i915_save_state(dev); 263 i915_save_state(dev);
239 264
240 intel_opregion_free(dev, 1); 265 intel_opregion_fini(dev);
241 266
242 /* Modeset on resume, not lid events */ 267 /* Modeset on resume, not lid events */
243 dev_priv->modeset_on_lid = 0; 268 dev_priv->modeset_on_lid = 0;
@@ -258,6 +283,8 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
258 if (state.event == PM_EVENT_PRETHAW) 283 if (state.event == PM_EVENT_PRETHAW)
259 return 0; 284 return 0;
260 285
286 drm_kms_helper_poll_disable(dev);
287
261 error = i915_drm_freeze(dev); 288 error = i915_drm_freeze(dev);
262 if (error) 289 if (error)
263 return error; 290 return error;
@@ -277,8 +304,7 @@ static int i915_drm_thaw(struct drm_device *dev)
277 int error = 0; 304 int error = 0;
278 305
279 i915_restore_state(dev); 306 i915_restore_state(dev);
280 307 intel_opregion_setup(dev);
281 intel_opregion_init(dev, 1);
282 308
283 /* KMS EnterVT equivalent */ 309 /* KMS EnterVT equivalent */
284 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 310 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -294,6 +320,8 @@ static int i915_drm_thaw(struct drm_device *dev)
294 drm_helper_resume_force_mode(dev); 320 drm_helper_resume_force_mode(dev);
295 } 321 }
296 322
323 intel_opregion_init(dev);
324
297 dev_priv->modeset_on_lid = 0; 325 dev_priv->modeset_on_lid = 0;
298 326
299 return error; 327 return error;
@@ -301,12 +329,79 @@ static int i915_drm_thaw(struct drm_device *dev)
301 329
302int i915_resume(struct drm_device *dev) 330int i915_resume(struct drm_device *dev)
303{ 331{
332 int ret;
333
304 if (pci_enable_device(dev->pdev)) 334 if (pci_enable_device(dev->pdev))
305 return -EIO; 335 return -EIO;
306 336
307 pci_set_master(dev->pdev); 337 pci_set_master(dev->pdev);
308 338
309 return i915_drm_thaw(dev); 339 ret = i915_drm_thaw(dev);
340 if (ret)
341 return ret;
342
343 drm_kms_helper_poll_enable(dev);
344 return 0;
345}
346
347static int i8xx_do_reset(struct drm_device *dev, u8 flags)
348{
349 struct drm_i915_private *dev_priv = dev->dev_private;
350
351 if (IS_I85X(dev))
352 return -ENODEV;
353
354 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
355 POSTING_READ(D_STATE);
356
357 if (IS_I830(dev) || IS_845G(dev)) {
358 I915_WRITE(DEBUG_RESET_I830,
359 DEBUG_RESET_DISPLAY |
360 DEBUG_RESET_RENDER |
361 DEBUG_RESET_FULL);
362 POSTING_READ(DEBUG_RESET_I830);
363 msleep(1);
364
365 I915_WRITE(DEBUG_RESET_I830, 0);
366 POSTING_READ(DEBUG_RESET_I830);
367 }
368
369 msleep(1);
370
371 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
372 POSTING_READ(D_STATE);
373
374 return 0;
375}
376
377static int i965_reset_complete(struct drm_device *dev)
378{
379 u8 gdrst;
380 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
381 return gdrst & 0x1;
382}
383
384static int i965_do_reset(struct drm_device *dev, u8 flags)
385{
386 u8 gdrst;
387
388 /*
389 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
390 * well as the reset bit (GR/bit 0). Setting the GR bit
391 * triggers the reset; when done, the hardware will clear it.
392 */
393 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
394 pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
395
396 return wait_for(i965_reset_complete(dev), 500);
397}
398
399static int ironlake_do_reset(struct drm_device *dev, u8 flags)
400{
401 struct drm_i915_private *dev_priv = dev->dev_private;
402 u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
403 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
404 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
310} 405}
311 406
312/** 407/**
@@ -325,54 +420,39 @@ int i915_resume(struct drm_device *dev)
325 * - re-init interrupt state 420 * - re-init interrupt state
326 * - re-init display 421 * - re-init display
327 */ 422 */
328int i965_reset(struct drm_device *dev, u8 flags) 423int i915_reset(struct drm_device *dev, u8 flags)
329{ 424{
330 drm_i915_private_t *dev_priv = dev->dev_private; 425 drm_i915_private_t *dev_priv = dev->dev_private;
331 unsigned long timeout;
332 u8 gdrst;
333 /* 426 /*
334 * We really should only reset the display subsystem if we actually 427 * We really should only reset the display subsystem if we actually
335 * need to 428 * need to
336 */ 429 */
337 bool need_display = true; 430 bool need_display = true;
431 int ret;
338 432
339 mutex_lock(&dev->struct_mutex); 433 mutex_lock(&dev->struct_mutex);
340 434
341 /* 435 i915_gem_reset(dev);
342 * Clear request list 436
343 */ 437 ret = -ENODEV;
344 i915_gem_retire_requests(dev); 438 if (get_seconds() - dev_priv->last_gpu_reset < 5) {
345 439 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
346 if (need_display) 440 } else switch (INTEL_INFO(dev)->gen) {
347 i915_save_display(dev); 441 case 5:
348 442 ret = ironlake_do_reset(dev, flags);
349 if (IS_I965G(dev) || IS_G4X(dev)) { 443 break;
350 /* 444 case 4:
351 * Set the domains we want to reset, then the reset bit (bit 0). 445 ret = i965_do_reset(dev, flags);
352 * Clear the reset bit after a while and wait for hardware status 446 break;
353 * bit (bit 1) to be set 447 case 2:
354 */ 448 ret = i8xx_do_reset(dev, flags);
355 pci_read_config_byte(dev->pdev, GDRST, &gdrst); 449 break;
356 pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0)); 450 }
357 udelay(50); 451 dev_priv->last_gpu_reset = get_seconds();
358 pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe); 452 if (ret) {
359 453 DRM_ERROR("Failed to reset chip.\n");
360 /* ...we don't want to loop forever though, 500ms should be plenty */
361 timeout = jiffies + msecs_to_jiffies(500);
362 do {
363 udelay(100);
364 pci_read_config_byte(dev->pdev, GDRST, &gdrst);
365 } while ((gdrst & 0x1) && time_after(timeout, jiffies));
366
367 if (gdrst & 0x1) {
368 WARN(true, "i915: Failed to reset chip\n");
369 mutex_unlock(&dev->struct_mutex);
370 return -EIO;
371 }
372 } else {
373 DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
374 mutex_unlock(&dev->struct_mutex); 454 mutex_unlock(&dev->struct_mutex);
375 return -ENODEV; 455 return ret;
376 } 456 }
377 457
378 /* Ok, now get things going again... */ 458 /* Ok, now get things going again... */
@@ -400,13 +480,19 @@ int i965_reset(struct drm_device *dev, u8 flags)
400 mutex_lock(&dev->struct_mutex); 480 mutex_lock(&dev->struct_mutex);
401 } 481 }
402 482
483 mutex_unlock(&dev->struct_mutex);
484
403 /* 485 /*
404 * Display needs restore too... 486 * Perform a full modeset as on later generations, e.g. Ironlake, we may
487 * need to retrain the display link and cannot just restore the register
488 * values.
405 */ 489 */
406 if (need_display) 490 if (need_display) {
407 i915_restore_display(dev); 491 mutex_lock(&dev->mode_config.mutex);
492 drm_helper_resume_force_mode(dev);
493 mutex_unlock(&dev->mode_config.mutex);
494 }
408 495
409 mutex_unlock(&dev->struct_mutex);
410 return 0; 496 return 0;
411} 497}
412 498
@@ -524,8 +610,6 @@ static struct drm_driver driver = {
524 .irq_uninstall = i915_driver_irq_uninstall, 610 .irq_uninstall = i915_driver_irq_uninstall,
525 .irq_handler = i915_driver_irq_handler, 611 .irq_handler = i915_driver_irq_handler,
526 .reclaim_buffers = drm_core_reclaim_buffers, 612 .reclaim_buffers = drm_core_reclaim_buffers,
527 .get_map_ofs = drm_core_get_map_ofs,
528 .get_reg_ofs = drm_core_get_reg_ofs,
529 .master_create = i915_master_create, 613 .master_create = i915_master_create,
530 .master_destroy = i915_master_destroy, 614 .master_destroy = i915_master_destroy,
531#if defined(CONFIG_DEBUG_FS) 615#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index af4a263cf257..2c2c19b6285e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -34,6 +34,8 @@
34#include "intel_bios.h" 34#include "intel_bios.h"
35#include "intel_ringbuffer.h" 35#include "intel_ringbuffer.h"
36#include <linux/io-mapping.h> 36#include <linux/io-mapping.h>
37#include <linux/i2c.h>
38#include <drm/intel-gtt.h>
37 39
38/* General customization: 40/* General customization:
39 */ 41 */
@@ -73,11 +75,9 @@ enum plane {
73#define DRIVER_PATCHLEVEL 0 75#define DRIVER_PATCHLEVEL 0
74 76
75#define WATCH_COHERENCY 0 77#define WATCH_COHERENCY 0
76#define WATCH_BUF 0
77#define WATCH_EXEC 0 78#define WATCH_EXEC 0
78#define WATCH_LRU 0
79#define WATCH_RELOC 0 79#define WATCH_RELOC 0
80#define WATCH_INACTIVE 0 80#define WATCH_LISTS 0
81#define WATCH_PWRITE 0 81#define WATCH_PWRITE 0
82 82
83#define I915_GEM_PHYS_CURSOR_0 1 83#define I915_GEM_PHYS_CURSOR_0 1
@@ -110,8 +110,9 @@ struct intel_opregion {
110 struct opregion_acpi *acpi; 110 struct opregion_acpi *acpi;
111 struct opregion_swsci *swsci; 111 struct opregion_swsci *swsci;
112 struct opregion_asle *asle; 112 struct opregion_asle *asle;
113 int enabled; 113 void *vbt;
114}; 114};
115#define OPREGION_SIZE (8*1024)
115 116
116struct intel_overlay; 117struct intel_overlay;
117struct intel_overlay_error_state; 118struct intel_overlay_error_state;
@@ -125,13 +126,16 @@ struct drm_i915_master_private {
125struct drm_i915_fence_reg { 126struct drm_i915_fence_reg {
126 struct drm_gem_object *obj; 127 struct drm_gem_object *obj;
127 struct list_head lru_list; 128 struct list_head lru_list;
129 bool gpu;
128}; 130};
129 131
130struct sdvo_device_mapping { 132struct sdvo_device_mapping {
133 u8 initialized;
131 u8 dvo_port; 134 u8 dvo_port;
132 u8 slave_addr; 135 u8 slave_addr;
133 u8 dvo_wiring; 136 u8 dvo_wiring;
134 u8 initialized; 137 u8 i2c_pin;
138 u8 i2c_speed;
135 u8 ddc_pin; 139 u8 ddc_pin;
136}; 140};
137 141
@@ -193,28 +197,29 @@ struct drm_i915_display_funcs {
193struct intel_device_info { 197struct intel_device_info {
194 u8 gen; 198 u8 gen;
195 u8 is_mobile : 1; 199 u8 is_mobile : 1;
196 u8 is_i8xx : 1;
197 u8 is_i85x : 1; 200 u8 is_i85x : 1;
198 u8 is_i915g : 1; 201 u8 is_i915g : 1;
199 u8 is_i9xx : 1;
200 u8 is_i945gm : 1; 202 u8 is_i945gm : 1;
201 u8 is_i965g : 1;
202 u8 is_i965gm : 1;
203 u8 is_g33 : 1; 203 u8 is_g33 : 1;
204 u8 need_gfx_hws : 1; 204 u8 need_gfx_hws : 1;
205 u8 is_g4x : 1; 205 u8 is_g4x : 1;
206 u8 is_pineview : 1; 206 u8 is_pineview : 1;
207 u8 is_broadwater : 1; 207 u8 is_broadwater : 1;
208 u8 is_crestline : 1; 208 u8 is_crestline : 1;
209 u8 is_ironlake : 1;
210 u8 has_fbc : 1; 209 u8 has_fbc : 1;
211 u8 has_rc6 : 1; 210 u8 has_rc6 : 1;
212 u8 has_pipe_cxsr : 1; 211 u8 has_pipe_cxsr : 1;
213 u8 has_hotplug : 1; 212 u8 has_hotplug : 1;
214 u8 cursor_needs_physical : 1; 213 u8 cursor_needs_physical : 1;
214 u8 has_overlay : 1;
215 u8 overlay_needs_physical : 1;
216 u8 supports_tv : 1;
217 u8 has_bsd_ring : 1;
218 u8 has_blt_ring : 1;
215}; 219};
216 220
217enum no_fbc_reason { 221enum no_fbc_reason {
222 FBC_NO_OUTPUT, /* no outputs enabled to compress */
218 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ 223 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
219 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ 224 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
220 FBC_MODE_TOO_LARGE, /* mode too large for compression */ 225 FBC_MODE_TOO_LARGE, /* mode too large for compression */
@@ -241,9 +246,16 @@ typedef struct drm_i915_private {
241 246
242 void __iomem *regs; 247 void __iomem *regs;
243 248
249 struct intel_gmbus {
250 struct i2c_adapter adapter;
251 struct i2c_adapter *force_bit;
252 u32 reg0;
253 } *gmbus;
254
244 struct pci_dev *bridge_dev; 255 struct pci_dev *bridge_dev;
245 struct intel_ring_buffer render_ring; 256 struct intel_ring_buffer render_ring;
246 struct intel_ring_buffer bsd_ring; 257 struct intel_ring_buffer bsd_ring;
258 struct intel_ring_buffer blt_ring;
247 uint32_t next_seqno; 259 uint32_t next_seqno;
248 260
249 drm_dma_handle_t *status_page_dmah; 261 drm_dma_handle_t *status_page_dmah;
@@ -263,6 +275,9 @@ typedef struct drm_i915_private {
263 int front_offset; 275 int front_offset;
264 int current_page; 276 int current_page;
265 int page_flipping; 277 int page_flipping;
278#define I915_DEBUG_READ (1<<0)
279#define I915_DEBUG_WRITE (1<<1)
280 unsigned long debug_flags;
266 281
267 wait_queue_head_t irq_queue; 282 wait_queue_head_t irq_queue;
268 atomic_t irq_received; 283 atomic_t irq_received;
@@ -289,24 +304,21 @@ typedef struct drm_i915_private {
289 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 304 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
290 int vblank_pipe; 305 int vblank_pipe;
291 int num_pipe; 306 int num_pipe;
292 u32 flush_rings;
293#define FLUSH_RENDER_RING 0x1
294#define FLUSH_BSD_RING 0x2
295 307
296 /* For hangcheck timer */ 308 /* For hangcheck timer */
297#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ 309#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */
298 struct timer_list hangcheck_timer; 310 struct timer_list hangcheck_timer;
299 int hangcheck_count; 311 int hangcheck_count;
300 uint32_t last_acthd; 312 uint32_t last_acthd;
301 uint32_t last_instdone; 313 uint32_t last_instdone;
302 uint32_t last_instdone1; 314 uint32_t last_instdone1;
303 315
304 struct drm_mm vram;
305
306 unsigned long cfb_size; 316 unsigned long cfb_size;
307 unsigned long cfb_pitch; 317 unsigned long cfb_pitch;
318 unsigned long cfb_offset;
308 int cfb_fence; 319 int cfb_fence;
309 int cfb_plane; 320 int cfb_plane;
321 int cfb_y;
310 322
311 int irq_enabled; 323 int irq_enabled;
312 324
@@ -316,8 +328,7 @@ typedef struct drm_i915_private {
316 struct intel_overlay *overlay; 328 struct intel_overlay *overlay;
317 329
318 /* LVDS info */ 330 /* LVDS info */
319 int backlight_duty_cycle; /* restore backlight to this value */ 331 int backlight_level; /* restore backlight to this value */
320 bool panel_wants_dither;
321 struct drm_display_mode *panel_fixed_mode; 332 struct drm_display_mode *panel_fixed_mode;
322 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 333 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
323 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 334 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -328,13 +339,23 @@ typedef struct drm_i915_private {
328 unsigned int lvds_vbt:1; 339 unsigned int lvds_vbt:1;
329 unsigned int int_crt_support:1; 340 unsigned int int_crt_support:1;
330 unsigned int lvds_use_ssc:1; 341 unsigned int lvds_use_ssc:1;
331 unsigned int edp_support:1;
332 int lvds_ssc_freq; 342 int lvds_ssc_freq;
333 int edp_bpp; 343 struct {
344 int rate;
345 int lanes;
346 int preemphasis;
347 int vswing;
348
349 bool initialized;
350 bool support;
351 int bpp;
352 struct edp_power_seq pps;
353 } edp;
354 bool no_aux_handshake;
334 355
335 struct notifier_block lid_notifier; 356 struct notifier_block lid_notifier;
336 357
337 int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */ 358 int crt_ddc_pin;
338 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 359 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
339 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 360 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
340 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 361 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -344,6 +365,7 @@ typedef struct drm_i915_private {
344 spinlock_t error_lock; 365 spinlock_t error_lock;
345 struct drm_i915_error_state *first_error; 366 struct drm_i915_error_state *first_error;
346 struct work_struct error_work; 367 struct work_struct error_work;
368 struct completion error_completion;
347 struct workqueue_struct *wq; 369 struct workqueue_struct *wq;
348 370
349 /* Display functions */ 371 /* Display functions */
@@ -507,6 +529,11 @@ typedef struct drm_i915_private {
507 u32 saveMCHBAR_RENDER_STANDBY; 529 u32 saveMCHBAR_RENDER_STANDBY;
508 530
509 struct { 531 struct {
532 /** Bridge to intel-gtt-ko */
533 struct intel_gtt *gtt;
534 /** Memory allocator for GTT stolen memory */
535 struct drm_mm vram;
536 /** Memory allocator for GTT */
510 struct drm_mm gtt_space; 537 struct drm_mm gtt_space;
511 538
512 struct io_mapping *gtt_mapping; 539 struct io_mapping *gtt_mapping;
@@ -521,7 +548,16 @@ typedef struct drm_i915_private {
521 */ 548 */
522 struct list_head shrink_list; 549 struct list_head shrink_list;
523 550
524 spinlock_t active_list_lock; 551 /**
552 * List of objects currently involved in rendering.
553 *
554 * Includes buffers having the contents of their GPU caches
555 * flushed, not necessarily primitives. last_rendering_seqno
556 * represents when the rendering involved will be completed.
557 *
558 * A reference is held on the buffer while on this list.
559 */
560 struct list_head active_list;
525 561
526 /** 562 /**
527 * List of objects which are not in the ringbuffer but which 563 * List of objects which are not in the ringbuffer but which
@@ -535,15 +571,6 @@ typedef struct drm_i915_private {
535 struct list_head flushing_list; 571 struct list_head flushing_list;
536 572
537 /** 573 /**
538 * List of objects currently pending a GPU write flush.
539 *
540 * All elements on this list will belong to either the
541 * active_list or flushing_list, last_rendering_seqno can
542 * be used to differentiate between the two elements.
543 */
544 struct list_head gpu_write_list;
545
546 /**
547 * LRU list of objects which are not in the ringbuffer and 574 * LRU list of objects which are not in the ringbuffer and
548 * are ready to unbind, but are still in the GTT. 575 * are ready to unbind, but are still in the GTT.
549 * 576 *
@@ -555,6 +582,12 @@ typedef struct drm_i915_private {
555 */ 582 */
556 struct list_head inactive_list; 583 struct list_head inactive_list;
557 584
585 /**
586 * LRU list of objects which are not in the ringbuffer but
587 * are still pinned in the GTT.
588 */
589 struct list_head pinned_list;
590
558 /** LRU list of objects with fence regs on them. */ 591 /** LRU list of objects with fence regs on them. */
559 struct list_head fence_list; 592 struct list_head fence_list;
560 593
@@ -611,6 +644,17 @@ typedef struct drm_i915_private {
611 644
612 /* storage for physical objects */ 645 /* storage for physical objects */
613 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; 646 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
647
648 uint32_t flush_rings;
649
650 /* accounting, useful for userland debugging */
651 size_t object_memory;
652 size_t pin_memory;
653 size_t gtt_memory;
654 size_t gtt_total;
655 u32 object_count;
656 u32 pin_count;
657 u32 gtt_count;
614 } mm; 658 } mm;
615 struct sdvo_device_mapping sdvo_mappings[2]; 659 struct sdvo_device_mapping sdvo_mappings[2];
616 /* indicate whether the LVDS_BORDER should be enabled or not */ 660 /* indicate whether the LVDS_BORDER should be enabled or not */
@@ -626,8 +670,6 @@ typedef struct drm_i915_private {
626 /* Reclocking support */ 670 /* Reclocking support */
627 bool render_reclock_avail; 671 bool render_reclock_avail;
628 bool lvds_downclock_avail; 672 bool lvds_downclock_avail;
629 /* indicate whether the LVDS EDID is OK */
630 bool lvds_edid_good;
631 /* indicates the reduced downclock for LVDS*/ 673 /* indicates the reduced downclock for LVDS*/
632 int lvds_downclock; 674 int lvds_downclock;
633 struct work_struct idle_work; 675 struct work_struct idle_work;
@@ -661,6 +703,8 @@ typedef struct drm_i915_private {
661 struct drm_mm_node *compressed_fb; 703 struct drm_mm_node *compressed_fb;
662 struct drm_mm_node *compressed_llb; 704 struct drm_mm_node *compressed_llb;
663 705
706 unsigned long last_gpu_reset;
707
664 /* list of fbdev register on this device */ 708 /* list of fbdev register on this device */
665 struct intel_fbdev *fbdev; 709 struct intel_fbdev *fbdev;
666} drm_i915_private_t; 710} drm_i915_private_t;
@@ -673,7 +717,8 @@ struct drm_i915_gem_object {
673 struct drm_mm_node *gtt_space; 717 struct drm_mm_node *gtt_space;
674 718
675 /** This object's place on the active/flushing/inactive lists */ 719 /** This object's place on the active/flushing/inactive lists */
676 struct list_head list; 720 struct list_head ring_list;
721 struct list_head mm_list;
677 /** This object's place on GPU write list */ 722 /** This object's place on GPU write list */
678 struct list_head gpu_write_list; 723 struct list_head gpu_write_list;
679 /** This object's place on eviction list */ 724 /** This object's place on eviction list */
@@ -816,12 +861,14 @@ struct drm_i915_gem_request {
816 /** global list entry for this request */ 861 /** global list entry for this request */
817 struct list_head list; 862 struct list_head list;
818 863
864 struct drm_i915_file_private *file_priv;
819 /** file_priv list entry for this request */ 865 /** file_priv list entry for this request */
820 struct list_head client_list; 866 struct list_head client_list;
821}; 867};
822 868
823struct drm_i915_file_private { 869struct drm_i915_file_private {
824 struct { 870 struct {
871 struct spinlock lock;
825 struct list_head request_list; 872 struct list_head request_list;
826 } mm; 873 } mm;
827}; 874};
@@ -862,7 +909,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
862extern int i915_emit_box(struct drm_device *dev, 909extern int i915_emit_box(struct drm_device *dev,
863 struct drm_clip_rect *boxes, 910 struct drm_clip_rect *boxes,
864 int i, int DR1, int DR4); 911 int i, int DR1, int DR4);
865extern int i965_reset(struct drm_device *dev, u8 flags); 912extern int i915_reset(struct drm_device *dev, u8 flags);
866extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 913extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
867extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 914extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
868extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 915extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -871,7 +918,6 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
871 918
872/* i915_irq.c */ 919/* i915_irq.c */
873void i915_hangcheck_elapsed(unsigned long data); 920void i915_hangcheck_elapsed(unsigned long data);
874void i915_destroy_error_state(struct drm_device *dev);
875extern int i915_irq_emit(struct drm_device *dev, void *data, 921extern int i915_irq_emit(struct drm_device *dev, void *data,
876 struct drm_file *file_priv); 922 struct drm_file *file_priv);
877extern int i915_irq_wait(struct drm_device *dev, void *data, 923extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -908,6 +954,12 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
908 954
909void intel_enable_asle (struct drm_device *dev); 955void intel_enable_asle (struct drm_device *dev);
910 956
957#ifdef CONFIG_DEBUG_FS
958extern void i915_destroy_error_state(struct drm_device *dev);
959#else
960#define i915_destroy_error_state(x)
961#endif
962
911 963
912/* i915_mem.c */ 964/* i915_mem.c */
913extern int i915_mem_alloc(struct drm_device *dev, void *data, 965extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -922,6 +974,7 @@ extern void i915_mem_takedown(struct mem_block **heap);
922extern void i915_mem_release(struct drm_device * dev, 974extern void i915_mem_release(struct drm_device * dev,
923 struct drm_file *file_priv, struct mem_block *heap); 975 struct drm_file *file_priv, struct mem_block *heap);
924/* i915_gem.c */ 976/* i915_gem.c */
977int i915_gem_check_is_wedged(struct drm_device *dev);
925int i915_gem_init_ioctl(struct drm_device *dev, void *data, 978int i915_gem_init_ioctl(struct drm_device *dev, void *data,
926 struct drm_file *file_priv); 979 struct drm_file *file_priv);
927int i915_gem_create_ioctl(struct drm_device *dev, void *data, 980int i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -972,13 +1025,22 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
972int i915_gem_object_unbind(struct drm_gem_object *obj); 1025int i915_gem_object_unbind(struct drm_gem_object *obj);
973void i915_gem_release_mmap(struct drm_gem_object *obj); 1026void i915_gem_release_mmap(struct drm_gem_object *obj);
974void i915_gem_lastclose(struct drm_device *dev); 1027void i915_gem_lastclose(struct drm_device *dev);
975uint32_t i915_get_gem_seqno(struct drm_device *dev, 1028
976 struct intel_ring_buffer *ring); 1029/**
977bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); 1030 * Returns true if seq1 is later than seq2.
978int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 1031 */
979int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); 1032static inline bool
1033i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1034{
1035 return (int32_t)(seq1 - seq2) >= 0;
1036}
1037
1038int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
1039 bool interruptible);
1040int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
1041 bool interruptible);
980void i915_gem_retire_requests(struct drm_device *dev); 1042void i915_gem_retire_requests(struct drm_device *dev);
981void i915_gem_retire_work_handler(struct work_struct *work); 1043void i915_gem_reset(struct drm_device *dev);
982void i915_gem_clflush_object(struct drm_gem_object *obj); 1044void i915_gem_clflush_object(struct drm_gem_object *obj);
983int i915_gem_object_set_domain(struct drm_gem_object *obj, 1045int i915_gem_object_set_domain(struct drm_gem_object *obj,
984 uint32_t read_domains, 1046 uint32_t read_domains,
@@ -990,16 +1052,18 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
990int i915_gpu_idle(struct drm_device *dev); 1052int i915_gpu_idle(struct drm_device *dev);
991int i915_gem_idle(struct drm_device *dev); 1053int i915_gem_idle(struct drm_device *dev);
992uint32_t i915_add_request(struct drm_device *dev, 1054uint32_t i915_add_request(struct drm_device *dev,
993 struct drm_file *file_priv, 1055 struct drm_file *file_priv,
994 uint32_t flush_domains, 1056 struct drm_i915_gem_request *request,
995 struct intel_ring_buffer *ring); 1057 struct intel_ring_buffer *ring);
996int i915_do_wait_request(struct drm_device *dev, 1058int i915_do_wait_request(struct drm_device *dev,
997 uint32_t seqno, int interruptible, 1059 uint32_t seqno,
998 struct intel_ring_buffer *ring); 1060 bool interruptible,
1061 struct intel_ring_buffer *ring);
999int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 1062int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
1000int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 1063int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
1001 int write); 1064 int write);
1002int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj); 1065int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
1066 bool pipelined);
1003int i915_gem_attach_phys_object(struct drm_device *dev, 1067int i915_gem_attach_phys_object(struct drm_device *dev,
1004 struct drm_gem_object *obj, 1068 struct drm_gem_object *obj,
1005 int id, 1069 int id,
@@ -1007,10 +1071,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
1007void i915_gem_detach_phys_object(struct drm_device *dev, 1071void i915_gem_detach_phys_object(struct drm_device *dev,
1008 struct drm_gem_object *obj); 1072 struct drm_gem_object *obj);
1009void i915_gem_free_all_phys_object(struct drm_device *dev); 1073void i915_gem_free_all_phys_object(struct drm_device *dev);
1010int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
1011void i915_gem_object_put_pages(struct drm_gem_object *obj);
1012void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 1074void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
1013int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
1014 1075
1015void i915_gem_shrinker_init(void); 1076void i915_gem_shrinker_init(void);
1016void i915_gem_shrinker_exit(void); 1077void i915_gem_shrinker_exit(void);
@@ -1032,15 +1093,14 @@ bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
1032/* i915_gem_debug.c */ 1093/* i915_gem_debug.c */
1033void i915_gem_dump_object(struct drm_gem_object *obj, int len, 1094void i915_gem_dump_object(struct drm_gem_object *obj, int len,
1034 const char *where, uint32_t mark); 1095 const char *where, uint32_t mark);
1035#if WATCH_INACTIVE 1096#if WATCH_LISTS
1036void i915_verify_inactive(struct drm_device *dev, char *file, int line); 1097int i915_verify_lists(struct drm_device *dev);
1037#else 1098#else
1038#define i915_verify_inactive(dev, file, line) 1099#define i915_verify_lists(dev) 0
1039#endif 1100#endif
1040void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); 1101void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
1041void i915_gem_dump_object(struct drm_gem_object *obj, int len, 1102void i915_gem_dump_object(struct drm_gem_object *obj, int len,
1042 const char *where, uint32_t mark); 1103 const char *where, uint32_t mark);
1043void i915_dump_lru(struct drm_device *dev, const char *where);
1044 1104
1045/* i915_debugfs.c */ 1105/* i915_debugfs.c */
1046int i915_debugfs_init(struct drm_minor *minor); 1106int i915_debugfs_init(struct drm_minor *minor);
@@ -1054,21 +1114,42 @@ extern int i915_restore_state(struct drm_device *dev);
1054extern int i915_save_state(struct drm_device *dev); 1114extern int i915_save_state(struct drm_device *dev);
1055extern int i915_restore_state(struct drm_device *dev); 1115extern int i915_restore_state(struct drm_device *dev);
1056 1116
1117/* intel_i2c.c */
1118extern int intel_setup_gmbus(struct drm_device *dev);
1119extern void intel_teardown_gmbus(struct drm_device *dev);
1120extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
1121extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
1122extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
1123{
1124 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
1125}
1126extern void intel_i2c_reset(struct drm_device *dev);
1127
1128/* intel_opregion.c */
1129extern int intel_opregion_setup(struct drm_device *dev);
1057#ifdef CONFIG_ACPI 1130#ifdef CONFIG_ACPI
1058/* i915_opregion.c */ 1131extern void intel_opregion_init(struct drm_device *dev);
1059extern int intel_opregion_init(struct drm_device *dev, int resume); 1132extern void intel_opregion_fini(struct drm_device *dev);
1060extern void intel_opregion_free(struct drm_device *dev, int suspend); 1133extern void intel_opregion_asle_intr(struct drm_device *dev);
1061extern void opregion_asle_intr(struct drm_device *dev); 1134extern void intel_opregion_gse_intr(struct drm_device *dev);
1062extern void ironlake_opregion_gse_intr(struct drm_device *dev); 1135extern void intel_opregion_enable_asle(struct drm_device *dev);
1063extern void opregion_enable_asle(struct drm_device *dev);
1064#else 1136#else
1065static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; } 1137static inline void intel_opregion_init(struct drm_device *dev) { return; }
1066static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; } 1138static inline void intel_opregion_fini(struct drm_device *dev) { return; }
1067static inline void opregion_asle_intr(struct drm_device *dev) { return; } 1139static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
1068static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; } 1140static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
1069static inline void opregion_enable_asle(struct drm_device *dev) { return; } 1141static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
1070#endif 1142#endif
1071 1143
1144/* intel_acpi.c */
1145#ifdef CONFIG_ACPI
1146extern void intel_register_dsm_handler(void);
1147extern void intel_unregister_dsm_handler(void);
1148#else
1149static inline void intel_register_dsm_handler(void) { return; }
1150static inline void intel_unregister_dsm_handler(void) { return; }
1151#endif /* CONFIG_ACPI */
1152
1072/* modesetting */ 1153/* modesetting */
1073extern void intel_modeset_init(struct drm_device *dev); 1154extern void intel_modeset_init(struct drm_device *dev);
1074extern void intel_modeset_cleanup(struct drm_device *dev); 1155extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -1084,8 +1165,10 @@ extern void intel_detect_pch (struct drm_device *dev);
1084extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); 1165extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1085 1166
1086/* overlay */ 1167/* overlay */
1168#ifdef CONFIG_DEBUG_FS
1087extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 1169extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
1088extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); 1170extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
1171#endif
1089 1172
1090/** 1173/**
1091 * Lock test for when it's just for synchronization of ring access. 1174 * Lock test for when it's just for synchronization of ring access.
@@ -1099,8 +1182,26 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
1099 LOCK_TEST_WITH_RETURN(dev, file_priv); \ 1182 LOCK_TEST_WITH_RETURN(dev, file_priv); \
1100} while (0) 1183} while (0)
1101 1184
1102#define I915_READ(reg) readl(dev_priv->regs + (reg)) 1185static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg)
1103#define I915_WRITE(reg, val) writel(val, dev_priv->regs + (reg)) 1186{
1187 u32 val;
1188
1189 val = readl(dev_priv->regs + reg);
1190 if (dev_priv->debug_flags & I915_DEBUG_READ)
1191 printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg);
1192 return val;
1193}
1194
1195static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
1196 u32 val)
1197{
1198 writel(val, dev_priv->regs + reg);
1199 if (dev_priv->debug_flags & I915_DEBUG_WRITE)
1200 printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg);
1201}
1202
1203#define I915_READ(reg) i915_read(dev_priv, (reg))
1204#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val))
1104#define I915_READ16(reg) readw(dev_priv->regs + (reg)) 1205#define I915_READ16(reg) readw(dev_priv->regs + (reg))
1105#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) 1206#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
1106#define I915_READ8(reg) readb(dev_priv->regs + (reg)) 1207#define I915_READ8(reg) readb(dev_priv->regs + (reg))
@@ -1110,6 +1211,11 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
1110#define POSTING_READ(reg) (void)I915_READ(reg) 1211#define POSTING_READ(reg) (void)I915_READ(reg)
1111#define POSTING_READ16(reg) (void)I915_READ16(reg) 1212#define POSTING_READ16(reg) (void)I915_READ16(reg)
1112 1213
1214#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
1215 I915_DEBUG_WRITE)
1216#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
1217 I915_DEBUG_WRITE))
1218
1113#define I915_VERBOSE 0 1219#define I915_VERBOSE 0
1114 1220
1115#define BEGIN_LP_RING(n) do { \ 1221#define BEGIN_LP_RING(n) do { \
@@ -1166,8 +1272,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
1166#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1272#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1167#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1273#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1168#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 1274#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1169#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
1170#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
1171#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 1275#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1172#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 1276#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1173#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) 1277#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
@@ -1178,8 +1282,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
1178#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 1282#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1179#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 1283#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1180#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1284#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1181#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
1182#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
1183#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1285#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1184 1286
1185#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 1287#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
@@ -1188,33 +1290,34 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
1188#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 1290#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1189#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 1291#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1190 1292
1191#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) 1293#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1294#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
1192#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1295#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1193 1296
1297#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1298#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1299
1194/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1300/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1195 * rows, which changed the alignment requirements and fence programming. 1301 * rows, which changed the alignment requirements and fence programming.
1196 */ 1302 */
1197#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ 1303#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1198 IS_I915GM(dev))) 1304 IS_I915GM(dev)))
1199#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_PINEVIEW(dev)) 1305#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1200#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1306#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1201#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1307#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1202#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) 1308#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1203#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ 1309#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1204 !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
1205 !IS_GEN6(dev))
1206#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 1310#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1207/* dsparb controlled by hw only */ 1311/* dsparb controlled by hw only */
1208#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1312#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1209 1313
1210#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) 1314#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1211#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1315#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1212#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1316#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1213#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) 1317#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
1214 1318
1215#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ 1319#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
1216 IS_GEN6(dev)) 1320#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
1217#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
1218 1321
1219#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1322#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1220#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1323#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index eb6c473c6d1b..8eb8453208b5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -37,7 +37,9 @@
37#include <linux/intel-gtt.h> 37#include <linux/intel-gtt.h>
38 38
39static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); 39static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
40static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 40
41static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
42 bool pipelined);
41static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 43static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
42static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 44static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
43static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, 45static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@ -46,7 +48,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
46 uint64_t offset, 48 uint64_t offset,
47 uint64_t size); 49 uint64_t size);
48static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); 50static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
49static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 51static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
52 bool interruptible);
50static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 53static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
51 unsigned alignment); 54 unsigned alignment);
52static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 55static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
@@ -55,9 +58,111 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
55 struct drm_file *file_priv); 58 struct drm_file *file_priv);
56static void i915_gem_free_object_tail(struct drm_gem_object *obj); 59static void i915_gem_free_object_tail(struct drm_gem_object *obj);
57 60
61static int
62i915_gem_object_get_pages(struct drm_gem_object *obj,
63 gfp_t gfpmask);
64
65static void
66i915_gem_object_put_pages(struct drm_gem_object *obj);
67
58static LIST_HEAD(shrink_list); 68static LIST_HEAD(shrink_list);
59static DEFINE_SPINLOCK(shrink_list_lock); 69static DEFINE_SPINLOCK(shrink_list_lock);
60 70
71/* some bookkeeping */
72static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
73 size_t size)
74{
75 dev_priv->mm.object_count++;
76 dev_priv->mm.object_memory += size;
77}
78
79static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
80 size_t size)
81{
82 dev_priv->mm.object_count--;
83 dev_priv->mm.object_memory -= size;
84}
85
86static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
87 size_t size)
88{
89 dev_priv->mm.gtt_count++;
90 dev_priv->mm.gtt_memory += size;
91}
92
93static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
94 size_t size)
95{
96 dev_priv->mm.gtt_count--;
97 dev_priv->mm.gtt_memory -= size;
98}
99
100static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
101 size_t size)
102{
103 dev_priv->mm.pin_count++;
104 dev_priv->mm.pin_memory += size;
105}
106
107static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
108 size_t size)
109{
110 dev_priv->mm.pin_count--;
111 dev_priv->mm.pin_memory -= size;
112}
113
114int
115i915_gem_check_is_wedged(struct drm_device *dev)
116{
117 struct drm_i915_private *dev_priv = dev->dev_private;
118 struct completion *x = &dev_priv->error_completion;
119 unsigned long flags;
120 int ret;
121
122 if (!atomic_read(&dev_priv->mm.wedged))
123 return 0;
124
125 ret = wait_for_completion_interruptible(x);
126 if (ret)
127 return ret;
128
129 /* Success, we reset the GPU! */
130 if (!atomic_read(&dev_priv->mm.wedged))
131 return 0;
132
133 /* GPU is hung, bump the completion count to account for
134 * the token we just consumed so that we never hit zero and
135 * end up waiting upon a subsequent completion event that
136 * will never happen.
137 */
138 spin_lock_irqsave(&x->wait.lock, flags);
139 x->done++;
140 spin_unlock_irqrestore(&x->wait.lock, flags);
141 return -EIO;
142}
143
144static int i915_mutex_lock_interruptible(struct drm_device *dev)
145{
146 struct drm_i915_private *dev_priv = dev->dev_private;
147 int ret;
148
149 ret = i915_gem_check_is_wedged(dev);
150 if (ret)
151 return ret;
152
153 ret = mutex_lock_interruptible(&dev->struct_mutex);
154 if (ret)
155 return ret;
156
157 if (atomic_read(&dev_priv->mm.wedged)) {
158 mutex_unlock(&dev->struct_mutex);
159 return -EAGAIN;
160 }
161
162 WARN_ON(i915_verify_lists(dev));
163 return 0;
164}
165
61static inline bool 166static inline bool
62i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) 167i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
63{ 168{
@@ -66,7 +171,8 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
66 obj_priv->pin_count == 0; 171 obj_priv->pin_count == 0;
67} 172}
68 173
69int i915_gem_do_init(struct drm_device *dev, unsigned long start, 174int i915_gem_do_init(struct drm_device *dev,
175 unsigned long start,
70 unsigned long end) 176 unsigned long end)
71{ 177{
72 drm_i915_private_t *dev_priv = dev->dev_private; 178 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -80,7 +186,7 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
80 drm_mm_init(&dev_priv->mm.gtt_space, start, 186 drm_mm_init(&dev_priv->mm.gtt_space, start,
81 end - start); 187 end - start);
82 188
83 dev->gtt_total = (uint32_t) (end - start); 189 dev_priv->mm.gtt_total = end - start;
84 190
85 return 0; 191 return 0;
86} 192}
@@ -103,14 +209,16 @@ int
103i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 209i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
104 struct drm_file *file_priv) 210 struct drm_file *file_priv)
105{ 211{
212 struct drm_i915_private *dev_priv = dev->dev_private;
106 struct drm_i915_gem_get_aperture *args = data; 213 struct drm_i915_gem_get_aperture *args = data;
107 214
108 if (!(dev->driver->driver_features & DRIVER_GEM)) 215 if (!(dev->driver->driver_features & DRIVER_GEM))
109 return -ENODEV; 216 return -ENODEV;
110 217
111 args->aper_size = dev->gtt_total; 218 mutex_lock(&dev->struct_mutex);
112 args->aper_available_size = (args->aper_size - 219 args->aper_size = dev_priv->mm.gtt_total;
113 atomic_read(&dev->pin_memory)); 220 args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
221 mutex_unlock(&dev->struct_mutex);
114 222
115 return 0; 223 return 0;
116} 224}
@@ -136,12 +244,17 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
136 return -ENOMEM; 244 return -ENOMEM;
137 245
138 ret = drm_gem_handle_create(file_priv, obj, &handle); 246 ret = drm_gem_handle_create(file_priv, obj, &handle);
139 /* drop reference from allocate - handle holds it now */
140 drm_gem_object_unreference_unlocked(obj);
141 if (ret) { 247 if (ret) {
248 drm_gem_object_release(obj);
249 i915_gem_info_remove_obj(dev->dev_private, obj->size);
250 kfree(obj);
142 return ret; 251 return ret;
143 } 252 }
144 253
254 /* drop reference from allocate - handle holds it now */
255 drm_gem_object_unreference(obj);
256 trace_i915_gem_object_create(obj);
257
145 args->handle = handle; 258 args->handle = handle;
146 return 0; 259 return 0;
147} 260}
@@ -152,19 +265,14 @@ fast_shmem_read(struct page **pages,
152 char __user *data, 265 char __user *data,
153 int length) 266 int length)
154{ 267{
155 char __iomem *vaddr; 268 char *vaddr;
156 int unwritten; 269 int ret;
157 270
158 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]); 271 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
159 if (vaddr == NULL) 272 ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
160 return -ENOMEM;
161 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
162 kunmap_atomic(vaddr); 273 kunmap_atomic(vaddr);
163 274
164 if (unwritten) 275 return ret;
165 return -EFAULT;
166
167 return 0;
168} 276}
169 277
170static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) 278static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
@@ -258,22 +366,10 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
258 loff_t offset, page_base; 366 loff_t offset, page_base;
259 char __user *user_data; 367 char __user *user_data;
260 int page_offset, page_length; 368 int page_offset, page_length;
261 int ret;
262 369
263 user_data = (char __user *) (uintptr_t) args->data_ptr; 370 user_data = (char __user *) (uintptr_t) args->data_ptr;
264 remain = args->size; 371 remain = args->size;
265 372
266 mutex_lock(&dev->struct_mutex);
267
268 ret = i915_gem_object_get_pages(obj, 0);
269 if (ret != 0)
270 goto fail_unlock;
271
272 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
273 args->size);
274 if (ret != 0)
275 goto fail_put_pages;
276
277 obj_priv = to_intel_bo(obj); 373 obj_priv = to_intel_bo(obj);
278 offset = args->offset; 374 offset = args->offset;
279 375
@@ -290,23 +386,17 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
290 if ((page_offset + remain) > PAGE_SIZE) 386 if ((page_offset + remain) > PAGE_SIZE)
291 page_length = PAGE_SIZE - page_offset; 387 page_length = PAGE_SIZE - page_offset;
292 388
293 ret = fast_shmem_read(obj_priv->pages, 389 if (fast_shmem_read(obj_priv->pages,
294 page_base, page_offset, 390 page_base, page_offset,
295 user_data, page_length); 391 user_data, page_length))
296 if (ret) 392 return -EFAULT;
297 goto fail_put_pages;
298 393
299 remain -= page_length; 394 remain -= page_length;
300 user_data += page_length; 395 user_data += page_length;
301 offset += page_length; 396 offset += page_length;
302 } 397 }
303 398
304fail_put_pages: 399 return 0;
305 i915_gem_object_put_pages(obj);
306fail_unlock:
307 mutex_unlock(&dev->struct_mutex);
308
309 return ret;
310} 400}
311 401
312static int 402static int
@@ -367,31 +457,28 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
367 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 457 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
368 num_pages = last_data_page - first_data_page + 1; 458 num_pages = last_data_page - first_data_page + 1;
369 459
370 user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); 460 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
371 if (user_pages == NULL) 461 if (user_pages == NULL)
372 return -ENOMEM; 462 return -ENOMEM;
373 463
464 mutex_unlock(&dev->struct_mutex);
374 down_read(&mm->mmap_sem); 465 down_read(&mm->mmap_sem);
375 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, 466 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
376 num_pages, 1, 0, user_pages, NULL); 467 num_pages, 1, 0, user_pages, NULL);
377 up_read(&mm->mmap_sem); 468 up_read(&mm->mmap_sem);
469 mutex_lock(&dev->struct_mutex);
378 if (pinned_pages < num_pages) { 470 if (pinned_pages < num_pages) {
379 ret = -EFAULT; 471 ret = -EFAULT;
380 goto fail_put_user_pages; 472 goto out;
381 } 473 }
382 474
383 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 475 ret = i915_gem_object_set_cpu_read_domain_range(obj,
384 476 args->offset,
385 mutex_lock(&dev->struct_mutex); 477 args->size);
386
387 ret = i915_gem_object_get_pages_or_evict(obj);
388 if (ret) 478 if (ret)
389 goto fail_unlock; 479 goto out;
390 480
391 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, 481 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
392 args->size);
393 if (ret != 0)
394 goto fail_put_pages;
395 482
396 obj_priv = to_intel_bo(obj); 483 obj_priv = to_intel_bo(obj);
397 offset = args->offset; 484 offset = args->offset;
@@ -436,11 +523,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
436 offset += page_length; 523 offset += page_length;
437 } 524 }
438 525
439fail_put_pages: 526out:
440 i915_gem_object_put_pages(obj);
441fail_unlock:
442 mutex_unlock(&dev->struct_mutex);
443fail_put_user_pages:
444 for (i = 0; i < pinned_pages; i++) { 527 for (i = 0; i < pinned_pages; i++) {
445 SetPageDirty(user_pages[i]); 528 SetPageDirty(user_pages[i]);
446 page_cache_release(user_pages[i]); 529 page_cache_release(user_pages[i]);
@@ -462,37 +545,64 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
462 struct drm_i915_gem_pread *args = data; 545 struct drm_i915_gem_pread *args = data;
463 struct drm_gem_object *obj; 546 struct drm_gem_object *obj;
464 struct drm_i915_gem_object *obj_priv; 547 struct drm_i915_gem_object *obj_priv;
465 int ret; 548 int ret = 0;
549
550 ret = i915_mutex_lock_interruptible(dev);
551 if (ret)
552 return ret;
466 553
467 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 554 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
468 if (obj == NULL) 555 if (obj == NULL) {
469 return -ENOENT; 556 ret = -ENOENT;
557 goto unlock;
558 }
470 obj_priv = to_intel_bo(obj); 559 obj_priv = to_intel_bo(obj);
471 560
472 /* Bounds check source. */ 561 /* Bounds check source. */
473 if (args->offset > obj->size || args->size > obj->size - args->offset) { 562 if (args->offset > obj->size || args->size > obj->size - args->offset) {
474 ret = -EINVAL; 563 ret = -EINVAL;
475 goto err; 564 goto out;
476 } 565 }
477 566
567 if (args->size == 0)
568 goto out;
569
478 if (!access_ok(VERIFY_WRITE, 570 if (!access_ok(VERIFY_WRITE,
479 (char __user *)(uintptr_t)args->data_ptr, 571 (char __user *)(uintptr_t)args->data_ptr,
480 args->size)) { 572 args->size)) {
481 ret = -EFAULT; 573 ret = -EFAULT;
482 goto err; 574 goto out;
483 } 575 }
484 576
485 if (i915_gem_object_needs_bit17_swizzle(obj)) { 577 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
486 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); 578 args->size);
487 } else { 579 if (ret) {
488 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); 580 ret = -EFAULT;
489 if (ret != 0) 581 goto out;
490 ret = i915_gem_shmem_pread_slow(dev, obj, args,
491 file_priv);
492 } 582 }
493 583
494err: 584 ret = i915_gem_object_get_pages_or_evict(obj);
495 drm_gem_object_unreference_unlocked(obj); 585 if (ret)
586 goto out;
587
588 ret = i915_gem_object_set_cpu_read_domain_range(obj,
589 args->offset,
590 args->size);
591 if (ret)
592 goto out_put;
593
594 ret = -EFAULT;
595 if (!i915_gem_object_needs_bit17_swizzle(obj))
596 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
597 if (ret == -EFAULT)
598 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
599
600out_put:
601 i915_gem_object_put_pages(obj);
602out:
603 drm_gem_object_unreference(obj);
604unlock:
605 mutex_unlock(&dev->struct_mutex);
496 return ret; 606 return ret;
497} 607}
498 608
@@ -513,9 +623,7 @@ fast_user_write(struct io_mapping *mapping,
513 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, 623 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
514 user_data, length); 624 user_data, length);
515 io_mapping_unmap_atomic(vaddr_atomic); 625 io_mapping_unmap_atomic(vaddr_atomic);
516 if (unwritten) 626 return unwritten;
517 return -EFAULT;
518 return 0;
519} 627}
520 628
521/* Here's the write path which can sleep for 629/* Here's the write path which can sleep for
@@ -548,18 +656,14 @@ fast_shmem_write(struct page **pages,
548 char __user *data, 656 char __user *data,
549 int length) 657 int length)
550{ 658{
551 char __iomem *vaddr; 659 char *vaddr;
552 unsigned long unwritten; 660 int ret;
553 661
554 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]); 662 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
555 if (vaddr == NULL) 663 ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
556 return -ENOMEM;
557 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
558 kunmap_atomic(vaddr); 664 kunmap_atomic(vaddr);
559 665
560 if (unwritten) 666 return ret;
561 return -EFAULT;
562 return 0;
563} 667}
564 668
565/** 669/**
@@ -577,22 +681,10 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
577 loff_t offset, page_base; 681 loff_t offset, page_base;
578 char __user *user_data; 682 char __user *user_data;
579 int page_offset, page_length; 683 int page_offset, page_length;
580 int ret;
581 684
582 user_data = (char __user *) (uintptr_t) args->data_ptr; 685 user_data = (char __user *) (uintptr_t) args->data_ptr;
583 remain = args->size; 686 remain = args->size;
584 687
585
586 mutex_lock(&dev->struct_mutex);
587 ret = i915_gem_object_pin(obj, 0);
588 if (ret) {
589 mutex_unlock(&dev->struct_mutex);
590 return ret;
591 }
592 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
593 if (ret)
594 goto fail;
595
596 obj_priv = to_intel_bo(obj); 688 obj_priv = to_intel_bo(obj);
597 offset = obj_priv->gtt_offset + args->offset; 689 offset = obj_priv->gtt_offset + args->offset;
598 690
@@ -609,26 +701,21 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
609 if ((page_offset + remain) > PAGE_SIZE) 701 if ((page_offset + remain) > PAGE_SIZE)
610 page_length = PAGE_SIZE - page_offset; 702 page_length = PAGE_SIZE - page_offset;
611 703
612 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
613 page_offset, user_data, page_length);
614
615 /* If we get a fault while copying data, then (presumably) our 704 /* If we get a fault while copying data, then (presumably) our
616 * source page isn't available. Return the error and we'll 705 * source page isn't available. Return the error and we'll
617 * retry in the slow path. 706 * retry in the slow path.
618 */ 707 */
619 if (ret) 708 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
620 goto fail; 709 page_offset, user_data, page_length))
710
711 return -EFAULT;
621 712
622 remain -= page_length; 713 remain -= page_length;
623 user_data += page_length; 714 user_data += page_length;
624 offset += page_length; 715 offset += page_length;
625 } 716 }
626 717
627fail: 718 return 0;
628 i915_gem_object_unpin(obj);
629 mutex_unlock(&dev->struct_mutex);
630
631 return ret;
632} 719}
633 720
634/** 721/**
@@ -665,27 +752,24 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
665 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 752 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
666 num_pages = last_data_page - first_data_page + 1; 753 num_pages = last_data_page - first_data_page + 1;
667 754
668 user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); 755 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
669 if (user_pages == NULL) 756 if (user_pages == NULL)
670 return -ENOMEM; 757 return -ENOMEM;
671 758
759 mutex_unlock(&dev->struct_mutex);
672 down_read(&mm->mmap_sem); 760 down_read(&mm->mmap_sem);
673 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, 761 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
674 num_pages, 0, 0, user_pages, NULL); 762 num_pages, 0, 0, user_pages, NULL);
675 up_read(&mm->mmap_sem); 763 up_read(&mm->mmap_sem);
764 mutex_lock(&dev->struct_mutex);
676 if (pinned_pages < num_pages) { 765 if (pinned_pages < num_pages) {
677 ret = -EFAULT; 766 ret = -EFAULT;
678 goto out_unpin_pages; 767 goto out_unpin_pages;
679 } 768 }
680 769
681 mutex_lock(&dev->struct_mutex);
682 ret = i915_gem_object_pin(obj, 0);
683 if (ret)
684 goto out_unlock;
685
686 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 770 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
687 if (ret) 771 if (ret)
688 goto out_unpin_object; 772 goto out_unpin_pages;
689 773
690 obj_priv = to_intel_bo(obj); 774 obj_priv = to_intel_bo(obj);
691 offset = obj_priv->gtt_offset + args->offset; 775 offset = obj_priv->gtt_offset + args->offset;
@@ -721,10 +805,6 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
721 data_ptr += page_length; 805 data_ptr += page_length;
722 } 806 }
723 807
724out_unpin_object:
725 i915_gem_object_unpin(obj);
726out_unlock:
727 mutex_unlock(&dev->struct_mutex);
728out_unpin_pages: 808out_unpin_pages:
729 for (i = 0; i < pinned_pages; i++) 809 for (i = 0; i < pinned_pages; i++)
730 page_cache_release(user_pages[i]); 810 page_cache_release(user_pages[i]);
@@ -747,21 +827,10 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
747 loff_t offset, page_base; 827 loff_t offset, page_base;
748 char __user *user_data; 828 char __user *user_data;
749 int page_offset, page_length; 829 int page_offset, page_length;
750 int ret;
751 830
752 user_data = (char __user *) (uintptr_t) args->data_ptr; 831 user_data = (char __user *) (uintptr_t) args->data_ptr;
753 remain = args->size; 832 remain = args->size;
754 833
755 mutex_lock(&dev->struct_mutex);
756
757 ret = i915_gem_object_get_pages(obj, 0);
758 if (ret != 0)
759 goto fail_unlock;
760
761 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
762 if (ret != 0)
763 goto fail_put_pages;
764
765 obj_priv = to_intel_bo(obj); 834 obj_priv = to_intel_bo(obj);
766 offset = args->offset; 835 offset = args->offset;
767 obj_priv->dirty = 1; 836 obj_priv->dirty = 1;
@@ -779,23 +848,17 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
779 if ((page_offset + remain) > PAGE_SIZE) 848 if ((page_offset + remain) > PAGE_SIZE)
780 page_length = PAGE_SIZE - page_offset; 849 page_length = PAGE_SIZE - page_offset;
781 850
782 ret = fast_shmem_write(obj_priv->pages, 851 if (fast_shmem_write(obj_priv->pages,
783 page_base, page_offset, 852 page_base, page_offset,
784 user_data, page_length); 853 user_data, page_length))
785 if (ret) 854 return -EFAULT;
786 goto fail_put_pages;
787 855
788 remain -= page_length; 856 remain -= page_length;
789 user_data += page_length; 857 user_data += page_length;
790 offset += page_length; 858 offset += page_length;
791 } 859 }
792 860
793fail_put_pages: 861 return 0;
794 i915_gem_object_put_pages(obj);
795fail_unlock:
796 mutex_unlock(&dev->struct_mutex);
797
798 return ret;
799} 862}
800 863
801/** 864/**
@@ -833,30 +896,26 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
833 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 896 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
834 num_pages = last_data_page - first_data_page + 1; 897 num_pages = last_data_page - first_data_page + 1;
835 898
836 user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); 899 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
837 if (user_pages == NULL) 900 if (user_pages == NULL)
838 return -ENOMEM; 901 return -ENOMEM;
839 902
903 mutex_unlock(&dev->struct_mutex);
840 down_read(&mm->mmap_sem); 904 down_read(&mm->mmap_sem);
841 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, 905 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
842 num_pages, 0, 0, user_pages, NULL); 906 num_pages, 0, 0, user_pages, NULL);
843 up_read(&mm->mmap_sem); 907 up_read(&mm->mmap_sem);
908 mutex_lock(&dev->struct_mutex);
844 if (pinned_pages < num_pages) { 909 if (pinned_pages < num_pages) {
845 ret = -EFAULT; 910 ret = -EFAULT;
846 goto fail_put_user_pages; 911 goto out;
847 } 912 }
848 913
849 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 914 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
850
851 mutex_lock(&dev->struct_mutex);
852
853 ret = i915_gem_object_get_pages_or_evict(obj);
854 if (ret) 915 if (ret)
855 goto fail_unlock; 916 goto out;
856 917
857 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 918 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
858 if (ret != 0)
859 goto fail_put_pages;
860 919
861 obj_priv = to_intel_bo(obj); 920 obj_priv = to_intel_bo(obj);
862 offset = args->offset; 921 offset = args->offset;
@@ -902,11 +961,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
902 offset += page_length; 961 offset += page_length;
903 } 962 }
904 963
905fail_put_pages: 964out:
906 i915_gem_object_put_pages(obj);
907fail_unlock:
908 mutex_unlock(&dev->struct_mutex);
909fail_put_user_pages:
910 for (i = 0; i < pinned_pages; i++) 965 for (i = 0; i < pinned_pages; i++)
911 page_cache_release(user_pages[i]); 966 page_cache_release(user_pages[i]);
912 drm_free_large(user_pages); 967 drm_free_large(user_pages);
@@ -921,29 +976,46 @@ fail_put_user_pages:
921 */ 976 */
922int 977int
923i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 978i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
924 struct drm_file *file_priv) 979 struct drm_file *file)
925{ 980{
926 struct drm_i915_gem_pwrite *args = data; 981 struct drm_i915_gem_pwrite *args = data;
927 struct drm_gem_object *obj; 982 struct drm_gem_object *obj;
928 struct drm_i915_gem_object *obj_priv; 983 struct drm_i915_gem_object *obj_priv;
929 int ret = 0; 984 int ret = 0;
930 985
931 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 986 ret = i915_mutex_lock_interruptible(dev);
932 if (obj == NULL) 987 if (ret)
933 return -ENOENT; 988 return ret;
989
990 obj = drm_gem_object_lookup(dev, file, args->handle);
991 if (obj == NULL) {
992 ret = -ENOENT;
993 goto unlock;
994 }
934 obj_priv = to_intel_bo(obj); 995 obj_priv = to_intel_bo(obj);
935 996
997
936 /* Bounds check destination. */ 998 /* Bounds check destination. */
937 if (args->offset > obj->size || args->size > obj->size - args->offset) { 999 if (args->offset > obj->size || args->size > obj->size - args->offset) {
938 ret = -EINVAL; 1000 ret = -EINVAL;
939 goto err; 1001 goto out;
940 } 1002 }
941 1003
1004 if (args->size == 0)
1005 goto out;
1006
942 if (!access_ok(VERIFY_READ, 1007 if (!access_ok(VERIFY_READ,
943 (char __user *)(uintptr_t)args->data_ptr, 1008 (char __user *)(uintptr_t)args->data_ptr,
944 args->size)) { 1009 args->size)) {
945 ret = -EFAULT; 1010 ret = -EFAULT;
946 goto err; 1011 goto out;
1012 }
1013
1014 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
1015 args->size);
1016 if (ret) {
1017 ret = -EFAULT;
1018 goto out;
947 } 1019 }
948 1020
949 /* We can only do the GTT pwrite on untiled buffers, as otherwise 1021 /* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -953,32 +1025,47 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
953 * perspective, requiring manual detiling by the client. 1025 * perspective, requiring manual detiling by the client.
954 */ 1026 */
955 if (obj_priv->phys_obj) 1027 if (obj_priv->phys_obj)
956 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); 1028 ret = i915_gem_phys_pwrite(dev, obj, args, file);
957 else if (obj_priv->tiling_mode == I915_TILING_NONE && 1029 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
958 dev->gtt_total != 0 && 1030 obj_priv->gtt_space &&
959 obj->write_domain != I915_GEM_DOMAIN_CPU) { 1031 obj->write_domain != I915_GEM_DOMAIN_CPU) {
960 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); 1032 ret = i915_gem_object_pin(obj, 0);
961 if (ret == -EFAULT) { 1033 if (ret)
962 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, 1034 goto out;
963 file_priv); 1035
964 } 1036 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
965 } else if (i915_gem_object_needs_bit17_swizzle(obj)) { 1037 if (ret)
966 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv); 1038 goto out_unpin;
1039
1040 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1041 if (ret == -EFAULT)
1042 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
1043
1044out_unpin:
1045 i915_gem_object_unpin(obj);
967 } else { 1046 } else {
968 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); 1047 ret = i915_gem_object_get_pages_or_evict(obj);
969 if (ret == -EFAULT) { 1048 if (ret)
970 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, 1049 goto out;
971 file_priv);
972 }
973 }
974 1050
975#if WATCH_PWRITE 1051 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
976 if (ret) 1052 if (ret)
977 DRM_INFO("pwrite failed %d\n", ret); 1053 goto out_put;
978#endif
979 1054
980err: 1055 ret = -EFAULT;
981 drm_gem_object_unreference_unlocked(obj); 1056 if (!i915_gem_object_needs_bit17_swizzle(obj))
1057 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1058 if (ret == -EFAULT)
1059 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
1060
1061out_put:
1062 i915_gem_object_put_pages(obj);
1063 }
1064
1065out:
1066 drm_gem_object_unreference(obj);
1067unlock:
1068 mutex_unlock(&dev->struct_mutex);
982 return ret; 1069 return ret;
983} 1070}
984 1071
@@ -1014,19 +1101,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1014 if (write_domain != 0 && read_domains != write_domain) 1101 if (write_domain != 0 && read_domains != write_domain)
1015 return -EINVAL; 1102 return -EINVAL;
1016 1103
1104 ret = i915_mutex_lock_interruptible(dev);
1105 if (ret)
1106 return ret;
1107
1017 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 1108 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1018 if (obj == NULL) 1109 if (obj == NULL) {
1019 return -ENOENT; 1110 ret = -ENOENT;
1111 goto unlock;
1112 }
1020 obj_priv = to_intel_bo(obj); 1113 obj_priv = to_intel_bo(obj);
1021 1114
1022 mutex_lock(&dev->struct_mutex);
1023
1024 intel_mark_busy(dev, obj); 1115 intel_mark_busy(dev, obj);
1025 1116
1026#if WATCH_BUF
1027 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
1028 obj, obj->size, read_domains, write_domain);
1029#endif
1030 if (read_domains & I915_GEM_DOMAIN_GTT) { 1117 if (read_domains & I915_GEM_DOMAIN_GTT) {
1031 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); 1118 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1032 1119
@@ -1050,12 +1137,12 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1050 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 1137 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1051 } 1138 }
1052 1139
1053
1054 /* Maintain LRU order of "inactive" objects */ 1140 /* Maintain LRU order of "inactive" objects */
1055 if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) 1141 if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
1056 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1142 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1057 1143
1058 drm_gem_object_unreference(obj); 1144 drm_gem_object_unreference(obj);
1145unlock:
1059 mutex_unlock(&dev->struct_mutex); 1146 mutex_unlock(&dev->struct_mutex);
1060 return ret; 1147 return ret;
1061} 1148}
@@ -1069,30 +1156,27 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1069{ 1156{
1070 struct drm_i915_gem_sw_finish *args = data; 1157 struct drm_i915_gem_sw_finish *args = data;
1071 struct drm_gem_object *obj; 1158 struct drm_gem_object *obj;
1072 struct drm_i915_gem_object *obj_priv;
1073 int ret = 0; 1159 int ret = 0;
1074 1160
1075 if (!(dev->driver->driver_features & DRIVER_GEM)) 1161 if (!(dev->driver->driver_features & DRIVER_GEM))
1076 return -ENODEV; 1162 return -ENODEV;
1077 1163
1078 mutex_lock(&dev->struct_mutex); 1164 ret = i915_mutex_lock_interruptible(dev);
1165 if (ret)
1166 return ret;
1167
1079 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 1168 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1080 if (obj == NULL) { 1169 if (obj == NULL) {
1081 mutex_unlock(&dev->struct_mutex); 1170 ret = -ENOENT;
1082 return -ENOENT; 1171 goto unlock;
1083 } 1172 }
1084 1173
1085#if WATCH_BUF
1086 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
1087 __func__, args->handle, obj, obj->size);
1088#endif
1089 obj_priv = to_intel_bo(obj);
1090
1091 /* Pinned buffers may be scanout, so flush the cache */ 1174 /* Pinned buffers may be scanout, so flush the cache */
1092 if (obj_priv->pin_count) 1175 if (to_intel_bo(obj)->pin_count)
1093 i915_gem_object_flush_cpu_write_domain(obj); 1176 i915_gem_object_flush_cpu_write_domain(obj);
1094 1177
1095 drm_gem_object_unreference(obj); 1178 drm_gem_object_unreference(obj);
1179unlock:
1096 mutex_unlock(&dev->struct_mutex); 1180 mutex_unlock(&dev->struct_mutex);
1097 return ret; 1181 return ret;
1098} 1182}
@@ -1181,13 +1265,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1181 1265
1182 /* Need a new fence register? */ 1266 /* Need a new fence register? */
1183 if (obj_priv->tiling_mode != I915_TILING_NONE) { 1267 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1184 ret = i915_gem_object_get_fence_reg(obj); 1268 ret = i915_gem_object_get_fence_reg(obj, true);
1185 if (ret) 1269 if (ret)
1186 goto unlock; 1270 goto unlock;
1187 } 1271 }
1188 1272
1189 if (i915_gem_object_is_inactive(obj_priv)) 1273 if (i915_gem_object_is_inactive(obj_priv))
1190 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1274 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1191 1275
1192 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + 1276 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1193 page_offset; 1277 page_offset;
@@ -1246,7 +1330,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1246 obj->size / PAGE_SIZE, 0, 0); 1330 obj->size / PAGE_SIZE, 0, 0);
1247 if (!list->file_offset_node) { 1331 if (!list->file_offset_node) {
1248 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); 1332 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1249 ret = -ENOMEM; 1333 ret = -ENOSPC;
1250 goto out_free_list; 1334 goto out_free_list;
1251 } 1335 }
1252 1336
@@ -1258,9 +1342,9 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1258 } 1342 }
1259 1343
1260 list->hash.key = list->file_offset_node->start; 1344 list->hash.key = list->file_offset_node->start;
1261 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { 1345 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
1346 if (ret) {
1262 DRM_ERROR("failed to add to map hash\n"); 1347 DRM_ERROR("failed to add to map hash\n");
1263 ret = -ENOMEM;
1264 goto out_free_mm; 1348 goto out_free_mm;
1265 } 1349 }
1266 1350
@@ -1345,14 +1429,14 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1345 * Minimum alignment is 4k (GTT page size), but might be greater 1429 * Minimum alignment is 4k (GTT page size), but might be greater
1346 * if a fence register is needed for the object. 1430 * if a fence register is needed for the object.
1347 */ 1431 */
1348 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE) 1432 if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
1349 return 4096; 1433 return 4096;
1350 1434
1351 /* 1435 /*
1352 * Previous chips need to be aligned to the size of the smallest 1436 * Previous chips need to be aligned to the size of the smallest
1353 * fence register that can contain the object. 1437 * fence register that can contain the object.
1354 */ 1438 */
1355 if (IS_I9XX(dev)) 1439 if (INTEL_INFO(dev)->gen == 3)
1356 start = 1024*1024; 1440 start = 1024*1024;
1357 else 1441 else
1358 start = 512*1024; 1442 start = 512*1024;
@@ -1390,29 +1474,27 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1390 if (!(dev->driver->driver_features & DRIVER_GEM)) 1474 if (!(dev->driver->driver_features & DRIVER_GEM))
1391 return -ENODEV; 1475 return -ENODEV;
1392 1476
1393 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 1477 ret = i915_mutex_lock_interruptible(dev);
1394 if (obj == NULL) 1478 if (ret)
1395 return -ENOENT; 1479 return ret;
1396
1397 mutex_lock(&dev->struct_mutex);
1398 1480
1481 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1482 if (obj == NULL) {
1483 ret = -ENOENT;
1484 goto unlock;
1485 }
1399 obj_priv = to_intel_bo(obj); 1486 obj_priv = to_intel_bo(obj);
1400 1487
1401 if (obj_priv->madv != I915_MADV_WILLNEED) { 1488 if (obj_priv->madv != I915_MADV_WILLNEED) {
1402 DRM_ERROR("Attempting to mmap a purgeable buffer\n"); 1489 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1403 drm_gem_object_unreference(obj); 1490 ret = -EINVAL;
1404 mutex_unlock(&dev->struct_mutex); 1491 goto out;
1405 return -EINVAL;
1406 } 1492 }
1407 1493
1408
1409 if (!obj_priv->mmap_offset) { 1494 if (!obj_priv->mmap_offset) {
1410 ret = i915_gem_create_mmap_offset(obj); 1495 ret = i915_gem_create_mmap_offset(obj);
1411 if (ret) { 1496 if (ret)
1412 drm_gem_object_unreference(obj); 1497 goto out;
1413 mutex_unlock(&dev->struct_mutex);
1414 return ret;
1415 }
1416 } 1498 }
1417 1499
1418 args->offset = obj_priv->mmap_offset; 1500 args->offset = obj_priv->mmap_offset;
@@ -1423,20 +1505,18 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1423 */ 1505 */
1424 if (!obj_priv->agp_mem) { 1506 if (!obj_priv->agp_mem) {
1425 ret = i915_gem_object_bind_to_gtt(obj, 0); 1507 ret = i915_gem_object_bind_to_gtt(obj, 0);
1426 if (ret) { 1508 if (ret)
1427 drm_gem_object_unreference(obj); 1509 goto out;
1428 mutex_unlock(&dev->struct_mutex);
1429 return ret;
1430 }
1431 } 1510 }
1432 1511
1512out:
1433 drm_gem_object_unreference(obj); 1513 drm_gem_object_unreference(obj);
1514unlock:
1434 mutex_unlock(&dev->struct_mutex); 1515 mutex_unlock(&dev->struct_mutex);
1435 1516 return ret;
1436 return 0;
1437} 1517}
1438 1518
1439void 1519static void
1440i915_gem_object_put_pages(struct drm_gem_object *obj) 1520i915_gem_object_put_pages(struct drm_gem_object *obj)
1441{ 1521{
1442 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1522 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -1470,13 +1550,25 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1470 obj_priv->pages = NULL; 1550 obj_priv->pages = NULL;
1471} 1551}
1472 1552
1553static uint32_t
1554i915_gem_next_request_seqno(struct drm_device *dev,
1555 struct intel_ring_buffer *ring)
1556{
1557 drm_i915_private_t *dev_priv = dev->dev_private;
1558
1559 ring->outstanding_lazy_request = true;
1560 return dev_priv->next_seqno;
1561}
1562
1473static void 1563static void
1474i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno, 1564i915_gem_object_move_to_active(struct drm_gem_object *obj,
1475 struct intel_ring_buffer *ring) 1565 struct intel_ring_buffer *ring)
1476{ 1566{
1477 struct drm_device *dev = obj->dev; 1567 struct drm_device *dev = obj->dev;
1478 drm_i915_private_t *dev_priv = dev->dev_private; 1568 struct drm_i915_private *dev_priv = dev->dev_private;
1479 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1569 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1570 uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
1571
1480 BUG_ON(ring == NULL); 1572 BUG_ON(ring == NULL);
1481 obj_priv->ring = ring; 1573 obj_priv->ring = ring;
1482 1574
@@ -1485,10 +1577,10 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
1485 drm_gem_object_reference(obj); 1577 drm_gem_object_reference(obj);
1486 obj_priv->active = 1; 1578 obj_priv->active = 1;
1487 } 1579 }
1580
1488 /* Move from whatever list we were on to the tail of execution. */ 1581 /* Move from whatever list we were on to the tail of execution. */
1489 spin_lock(&dev_priv->mm.active_list_lock); 1582 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
1490 list_move_tail(&obj_priv->list, &ring->active_list); 1583 list_move_tail(&obj_priv->ring_list, &ring->active_list);
1491 spin_unlock(&dev_priv->mm.active_list_lock);
1492 obj_priv->last_rendering_seqno = seqno; 1584 obj_priv->last_rendering_seqno = seqno;
1493} 1585}
1494 1586
@@ -1500,7 +1592,8 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1500 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1592 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1501 1593
1502 BUG_ON(!obj_priv->active); 1594 BUG_ON(!obj_priv->active);
1503 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); 1595 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
1596 list_del_init(&obj_priv->ring_list);
1504 obj_priv->last_rendering_seqno = 0; 1597 obj_priv->last_rendering_seqno = 0;
1505} 1598}
1506 1599
@@ -1538,11 +1631,11 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1538 drm_i915_private_t *dev_priv = dev->dev_private; 1631 drm_i915_private_t *dev_priv = dev->dev_private;
1539 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1632 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1540 1633
1541 i915_verify_inactive(dev, __FILE__, __LINE__);
1542 if (obj_priv->pin_count != 0) 1634 if (obj_priv->pin_count != 0)
1543 list_del_init(&obj_priv->list); 1635 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
1544 else 1636 else
1545 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1637 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1638 list_del_init(&obj_priv->ring_list);
1546 1639
1547 BUG_ON(!list_empty(&obj_priv->gpu_write_list)); 1640 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1548 1641
@@ -1552,30 +1645,28 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1552 obj_priv->active = 0; 1645 obj_priv->active = 0;
1553 drm_gem_object_unreference(obj); 1646 drm_gem_object_unreference(obj);
1554 } 1647 }
1555 i915_verify_inactive(dev, __FILE__, __LINE__); 1648 WARN_ON(i915_verify_lists(dev));
1556} 1649}
1557 1650
1558static void 1651static void
1559i915_gem_process_flushing_list(struct drm_device *dev, 1652i915_gem_process_flushing_list(struct drm_device *dev,
1560 uint32_t flush_domains, uint32_t seqno, 1653 uint32_t flush_domains,
1561 struct intel_ring_buffer *ring) 1654 struct intel_ring_buffer *ring)
1562{ 1655{
1563 drm_i915_private_t *dev_priv = dev->dev_private; 1656 drm_i915_private_t *dev_priv = dev->dev_private;
1564 struct drm_i915_gem_object *obj_priv, *next; 1657 struct drm_i915_gem_object *obj_priv, *next;
1565 1658
1566 list_for_each_entry_safe(obj_priv, next, 1659 list_for_each_entry_safe(obj_priv, next,
1567 &dev_priv->mm.gpu_write_list, 1660 &ring->gpu_write_list,
1568 gpu_write_list) { 1661 gpu_write_list) {
1569 struct drm_gem_object *obj = &obj_priv->base; 1662 struct drm_gem_object *obj = &obj_priv->base;
1570 1663
1571 if ((obj->write_domain & flush_domains) == 1664 if (obj->write_domain & flush_domains) {
1572 obj->write_domain &&
1573 obj_priv->ring->ring_flag == ring->ring_flag) {
1574 uint32_t old_write_domain = obj->write_domain; 1665 uint32_t old_write_domain = obj->write_domain;
1575 1666
1576 obj->write_domain = 0; 1667 obj->write_domain = 0;
1577 list_del_init(&obj_priv->gpu_write_list); 1668 list_del_init(&obj_priv->gpu_write_list);
1578 i915_gem_object_move_to_active(obj, seqno, ring); 1669 i915_gem_object_move_to_active(obj, ring);
1579 1670
1580 /* update the fence lru list */ 1671 /* update the fence lru list */
1581 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { 1672 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -1593,23 +1684,27 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1593} 1684}
1594 1685
1595uint32_t 1686uint32_t
1596i915_add_request(struct drm_device *dev, struct drm_file *file_priv, 1687i915_add_request(struct drm_device *dev,
1597 uint32_t flush_domains, struct intel_ring_buffer *ring) 1688 struct drm_file *file,
1689 struct drm_i915_gem_request *request,
1690 struct intel_ring_buffer *ring)
1598{ 1691{
1599 drm_i915_private_t *dev_priv = dev->dev_private; 1692 drm_i915_private_t *dev_priv = dev->dev_private;
1600 struct drm_i915_file_private *i915_file_priv = NULL; 1693 struct drm_i915_file_private *file_priv = NULL;
1601 struct drm_i915_gem_request *request;
1602 uint32_t seqno; 1694 uint32_t seqno;
1603 int was_empty; 1695 int was_empty;
1604 1696
1605 if (file_priv != NULL) 1697 if (file != NULL)
1606 i915_file_priv = file_priv->driver_priv; 1698 file_priv = file->driver_priv;
1607 1699
1608 request = kzalloc(sizeof(*request), GFP_KERNEL); 1700 if (request == NULL) {
1609 if (request == NULL) 1701 request = kzalloc(sizeof(*request), GFP_KERNEL);
1610 return 0; 1702 if (request == NULL)
1703 return 0;
1704 }
1611 1705
1612 seqno = ring->add_request(dev, ring, file_priv, flush_domains); 1706 seqno = ring->add_request(dev, ring, 0);
1707 ring->outstanding_lazy_request = false;
1613 1708
1614 request->seqno = seqno; 1709 request->seqno = seqno;
1615 request->ring = ring; 1710 request->ring = ring;
@@ -1617,23 +1712,20 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1617 was_empty = list_empty(&ring->request_list); 1712 was_empty = list_empty(&ring->request_list);
1618 list_add_tail(&request->list, &ring->request_list); 1713 list_add_tail(&request->list, &ring->request_list);
1619 1714
1620 if (i915_file_priv) { 1715 if (file_priv) {
1716 spin_lock(&file_priv->mm.lock);
1717 request->file_priv = file_priv;
1621 list_add_tail(&request->client_list, 1718 list_add_tail(&request->client_list,
1622 &i915_file_priv->mm.request_list); 1719 &file_priv->mm.request_list);
1623 } else { 1720 spin_unlock(&file_priv->mm.lock);
1624 INIT_LIST_HEAD(&request->client_list);
1625 } 1721 }
1626 1722
1627 /* Associate any objects on the flushing list matching the write
1628 * domain we're flushing with our flush.
1629 */
1630 if (flush_domains != 0)
1631 i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
1632
1633 if (!dev_priv->mm.suspended) { 1723 if (!dev_priv->mm.suspended) {
1634 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 1724 mod_timer(&dev_priv->hangcheck_timer,
1725 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1635 if (was_empty) 1726 if (was_empty)
1636 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1727 queue_delayed_work(dev_priv->wq,
1728 &dev_priv->mm.retire_work, HZ);
1637 } 1729 }
1638 return seqno; 1730 return seqno;
1639} 1731}
@@ -1644,91 +1736,105 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1644 * Ensures that all commands in the ring are finished 1736 * Ensures that all commands in the ring are finished
1645 * before signalling the CPU 1737 * before signalling the CPU
1646 */ 1738 */
1647static uint32_t 1739static void
1648i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) 1740i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
1649{ 1741{
1650 uint32_t flush_domains = 0; 1742 uint32_t flush_domains = 0;
1651 1743
1652 /* The sampler always gets flushed on i965 (sigh) */ 1744 /* The sampler always gets flushed on i965 (sigh) */
1653 if (IS_I965G(dev)) 1745 if (INTEL_INFO(dev)->gen >= 4)
1654 flush_domains |= I915_GEM_DOMAIN_SAMPLER; 1746 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1655 1747
1656 ring->flush(dev, ring, 1748 ring->flush(dev, ring,
1657 I915_GEM_DOMAIN_COMMAND, flush_domains); 1749 I915_GEM_DOMAIN_COMMAND, flush_domains);
1658 return flush_domains;
1659} 1750}
1660 1751
1661/** 1752static inline void
1662 * Moves buffers associated only with the given active seqno from the active 1753i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1663 * to inactive list, potentially freeing them.
1664 */
1665static void
1666i915_gem_retire_request(struct drm_device *dev,
1667 struct drm_i915_gem_request *request)
1668{ 1754{
1669 drm_i915_private_t *dev_priv = dev->dev_private; 1755 struct drm_i915_file_private *file_priv = request->file_priv;
1670 1756
1671 trace_i915_gem_request_retire(dev, request->seqno); 1757 if (!file_priv)
1758 return;
1672 1759
1673 /* Move any buffers on the active list that are no longer referenced 1760 spin_lock(&file_priv->mm.lock);
1674 * by the ringbuffer to the flushing/inactive lists as appropriate. 1761 list_del(&request->client_list);
1675 */ 1762 request->file_priv = NULL;
1676 spin_lock(&dev_priv->mm.active_list_lock); 1763 spin_unlock(&file_priv->mm.lock);
1677 while (!list_empty(&request->ring->active_list)) { 1764}
1678 struct drm_gem_object *obj;
1679 struct drm_i915_gem_object *obj_priv;
1680 1765
1681 obj_priv = list_first_entry(&request->ring->active_list, 1766static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1682 struct drm_i915_gem_object, 1767 struct intel_ring_buffer *ring)
1683 list); 1768{
1684 obj = &obj_priv->base; 1769 while (!list_empty(&ring->request_list)) {
1770 struct drm_i915_gem_request *request;
1685 1771
1686 /* If the seqno being retired doesn't match the oldest in the 1772 request = list_first_entry(&ring->request_list,
1687 * list, then the oldest in the list must still be newer than 1773 struct drm_i915_gem_request,
1688 * this seqno. 1774 list);
1689 */
1690 if (obj_priv->last_rendering_seqno != request->seqno)
1691 goto out;
1692 1775
1693#if WATCH_LRU 1776 list_del(&request->list);
1694 DRM_INFO("%s: retire %d moves to inactive list %p\n", 1777 i915_gem_request_remove_from_client(request);
1695 __func__, request->seqno, obj); 1778 kfree(request);
1696#endif 1779 }
1697 1780
1698 if (obj->write_domain != 0) 1781 while (!list_empty(&ring->active_list)) {
1699 i915_gem_object_move_to_flushing(obj); 1782 struct drm_i915_gem_object *obj_priv;
1700 else { 1783
1701 /* Take a reference on the object so it won't be 1784 obj_priv = list_first_entry(&ring->active_list,
1702 * freed while the spinlock is held. The list 1785 struct drm_i915_gem_object,
1703 * protection for this spinlock is safe when breaking 1786 ring_list);
1704 * the lock like this since the next thing we do 1787
1705 * is just get the head of the list again. 1788 obj_priv->base.write_domain = 0;
1706 */ 1789 list_del_init(&obj_priv->gpu_write_list);
1707 drm_gem_object_reference(obj); 1790 i915_gem_object_move_to_inactive(&obj_priv->base);
1708 i915_gem_object_move_to_inactive(obj);
1709 spin_unlock(&dev_priv->mm.active_list_lock);
1710 drm_gem_object_unreference(obj);
1711 spin_lock(&dev_priv->mm.active_list_lock);
1712 }
1713 } 1791 }
1714out:
1715 spin_unlock(&dev_priv->mm.active_list_lock);
1716} 1792}
1717 1793
1718/** 1794void i915_gem_reset(struct drm_device *dev)
1719 * Returns true if seq1 is later than seq2.
1720 */
1721bool
1722i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1723{ 1795{
1724 return (int32_t)(seq1 - seq2) >= 0; 1796 struct drm_i915_private *dev_priv = dev->dev_private;
1725} 1797 struct drm_i915_gem_object *obj_priv;
1798 int i;
1726 1799
1727uint32_t 1800 i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
1728i915_get_gem_seqno(struct drm_device *dev, 1801 i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
1729 struct intel_ring_buffer *ring) 1802 i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
1730{ 1803
1731 return ring->get_gem_seqno(dev, ring); 1804 /* Remove anything from the flushing lists. The GPU cache is likely
1805 * to be lost on reset along with the data, so simply move the
1806 * lost bo to the inactive list.
1807 */
1808 while (!list_empty(&dev_priv->mm.flushing_list)) {
1809 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1810 struct drm_i915_gem_object,
1811 mm_list);
1812
1813 obj_priv->base.write_domain = 0;
1814 list_del_init(&obj_priv->gpu_write_list);
1815 i915_gem_object_move_to_inactive(&obj_priv->base);
1816 }
1817
1818 /* Move everything out of the GPU domains to ensure we do any
1819 * necessary invalidation upon reuse.
1820 */
1821 list_for_each_entry(obj_priv,
1822 &dev_priv->mm.inactive_list,
1823 mm_list)
1824 {
1825 obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1826 }
1827
1828 /* The fence registers are invalidated so clear them out */
1829 for (i = 0; i < 16; i++) {
1830 struct drm_i915_fence_reg *reg;
1831
1832 reg = &dev_priv->fence_regs[i];
1833 if (!reg->obj)
1834 continue;
1835
1836 i915_gem_clear_fence_reg(reg->obj);
1837 }
1732} 1838}
1733 1839
1734/** 1840/**
@@ -1741,38 +1847,58 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1741 drm_i915_private_t *dev_priv = dev->dev_private; 1847 drm_i915_private_t *dev_priv = dev->dev_private;
1742 uint32_t seqno; 1848 uint32_t seqno;
1743 1849
1744 if (!ring->status_page.page_addr 1850 if (!ring->status_page.page_addr ||
1745 || list_empty(&ring->request_list)) 1851 list_empty(&ring->request_list))
1746 return; 1852 return;
1747 1853
1748 seqno = i915_get_gem_seqno(dev, ring); 1854 WARN_ON(i915_verify_lists(dev));
1749 1855
1856 seqno = ring->get_seqno(dev, ring);
1750 while (!list_empty(&ring->request_list)) { 1857 while (!list_empty(&ring->request_list)) {
1751 struct drm_i915_gem_request *request; 1858 struct drm_i915_gem_request *request;
1752 uint32_t retiring_seqno;
1753 1859
1754 request = list_first_entry(&ring->request_list, 1860 request = list_first_entry(&ring->request_list,
1755 struct drm_i915_gem_request, 1861 struct drm_i915_gem_request,
1756 list); 1862 list);
1757 retiring_seqno = request->seqno;
1758 1863
1759 if (i915_seqno_passed(seqno, retiring_seqno) || 1864 if (!i915_seqno_passed(seqno, request->seqno))
1760 atomic_read(&dev_priv->mm.wedged)) { 1865 break;
1761 i915_gem_retire_request(dev, request); 1866
1867 trace_i915_gem_request_retire(dev, request->seqno);
1868
1869 list_del(&request->list);
1870 i915_gem_request_remove_from_client(request);
1871 kfree(request);
1872 }
1873
1874 /* Move any buffers on the active list that are no longer referenced
1875 * by the ringbuffer to the flushing/inactive lists as appropriate.
1876 */
1877 while (!list_empty(&ring->active_list)) {
1878 struct drm_gem_object *obj;
1879 struct drm_i915_gem_object *obj_priv;
1880
1881 obj_priv = list_first_entry(&ring->active_list,
1882 struct drm_i915_gem_object,
1883 ring_list);
1762 1884
1763 list_del(&request->list); 1885 if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
1764 list_del(&request->client_list);
1765 kfree(request);
1766 } else
1767 break; 1886 break;
1887
1888 obj = &obj_priv->base;
1889 if (obj->write_domain != 0)
1890 i915_gem_object_move_to_flushing(obj);
1891 else
1892 i915_gem_object_move_to_inactive(obj);
1768 } 1893 }
1769 1894
1770 if (unlikely (dev_priv->trace_irq_seqno && 1895 if (unlikely (dev_priv->trace_irq_seqno &&
1771 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { 1896 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1772
1773 ring->user_irq_put(dev, ring); 1897 ring->user_irq_put(dev, ring);
1774 dev_priv->trace_irq_seqno = 0; 1898 dev_priv->trace_irq_seqno = 0;
1775 } 1899 }
1900
1901 WARN_ON(i915_verify_lists(dev));
1776} 1902}
1777 1903
1778void 1904void
@@ -1790,16 +1916,16 @@ i915_gem_retire_requests(struct drm_device *dev)
1790 */ 1916 */
1791 list_for_each_entry_safe(obj_priv, tmp, 1917 list_for_each_entry_safe(obj_priv, tmp,
1792 &dev_priv->mm.deferred_free_list, 1918 &dev_priv->mm.deferred_free_list,
1793 list) 1919 mm_list)
1794 i915_gem_free_object_tail(&obj_priv->base); 1920 i915_gem_free_object_tail(&obj_priv->base);
1795 } 1921 }
1796 1922
1797 i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); 1923 i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
1798 if (HAS_BSD(dev)) 1924 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
1799 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); 1925 i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
1800} 1926}
1801 1927
1802void 1928static void
1803i915_gem_retire_work_handler(struct work_struct *work) 1929i915_gem_retire_work_handler(struct work_struct *work)
1804{ 1930{
1805 drm_i915_private_t *dev_priv; 1931 drm_i915_private_t *dev_priv;
@@ -1809,20 +1935,25 @@ i915_gem_retire_work_handler(struct work_struct *work)
1809 mm.retire_work.work); 1935 mm.retire_work.work);
1810 dev = dev_priv->dev; 1936 dev = dev_priv->dev;
1811 1937
1812 mutex_lock(&dev->struct_mutex); 1938 /* Come back later if the device is busy... */
1939 if (!mutex_trylock(&dev->struct_mutex)) {
1940 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1941 return;
1942 }
1943
1813 i915_gem_retire_requests(dev); 1944 i915_gem_retire_requests(dev);
1814 1945
1815 if (!dev_priv->mm.suspended && 1946 if (!dev_priv->mm.suspended &&
1816 (!list_empty(&dev_priv->render_ring.request_list) || 1947 (!list_empty(&dev_priv->render_ring.request_list) ||
1817 (HAS_BSD(dev) && 1948 !list_empty(&dev_priv->bsd_ring.request_list) ||
1818 !list_empty(&dev_priv->bsd_ring.request_list)))) 1949 !list_empty(&dev_priv->blt_ring.request_list)))
1819 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1950 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1820 mutex_unlock(&dev->struct_mutex); 1951 mutex_unlock(&dev->struct_mutex);
1821} 1952}
1822 1953
1823int 1954int
1824i915_do_wait_request(struct drm_device *dev, uint32_t seqno, 1955i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1825 int interruptible, struct intel_ring_buffer *ring) 1956 bool interruptible, struct intel_ring_buffer *ring)
1826{ 1957{
1827 drm_i915_private_t *dev_priv = dev->dev_private; 1958 drm_i915_private_t *dev_priv = dev->dev_private;
1828 u32 ier; 1959 u32 ier;
@@ -1831,9 +1962,16 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1831 BUG_ON(seqno == 0); 1962 BUG_ON(seqno == 0);
1832 1963
1833 if (atomic_read(&dev_priv->mm.wedged)) 1964 if (atomic_read(&dev_priv->mm.wedged))
1834 return -EIO; 1965 return -EAGAIN;
1835 1966
1836 if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) { 1967 if (ring->outstanding_lazy_request) {
1968 seqno = i915_add_request(dev, NULL, NULL, ring);
1969 if (seqno == 0)
1970 return -ENOMEM;
1971 }
1972 BUG_ON(seqno == dev_priv->next_seqno);
1973
1974 if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
1837 if (HAS_PCH_SPLIT(dev)) 1975 if (HAS_PCH_SPLIT(dev))
1838 ier = I915_READ(DEIER) | I915_READ(GTIER); 1976 ier = I915_READ(DEIER) | I915_READ(GTIER);
1839 else 1977 else
@@ -1852,12 +1990,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1852 if (interruptible) 1990 if (interruptible)
1853 ret = wait_event_interruptible(ring->irq_queue, 1991 ret = wait_event_interruptible(ring->irq_queue,
1854 i915_seqno_passed( 1992 i915_seqno_passed(
1855 ring->get_gem_seqno(dev, ring), seqno) 1993 ring->get_seqno(dev, ring), seqno)
1856 || atomic_read(&dev_priv->mm.wedged)); 1994 || atomic_read(&dev_priv->mm.wedged));
1857 else 1995 else
1858 wait_event(ring->irq_queue, 1996 wait_event(ring->irq_queue,
1859 i915_seqno_passed( 1997 i915_seqno_passed(
1860 ring->get_gem_seqno(dev, ring), seqno) 1998 ring->get_seqno(dev, ring), seqno)
1861 || atomic_read(&dev_priv->mm.wedged)); 1999 || atomic_read(&dev_priv->mm.wedged));
1862 2000
1863 ring->user_irq_put(dev, ring); 2001 ring->user_irq_put(dev, ring);
@@ -1866,11 +2004,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1866 trace_i915_gem_request_wait_end(dev, seqno); 2004 trace_i915_gem_request_wait_end(dev, seqno);
1867 } 2005 }
1868 if (atomic_read(&dev_priv->mm.wedged)) 2006 if (atomic_read(&dev_priv->mm.wedged))
1869 ret = -EIO; 2007 ret = -EAGAIN;
1870 2008
1871 if (ret && ret != -ERESTARTSYS) 2009 if (ret && ret != -ERESTARTSYS)
1872 DRM_ERROR("%s returns %d (awaiting %d at %d)\n", 2010 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
1873 __func__, ret, seqno, ring->get_gem_seqno(dev, ring)); 2011 __func__, ret, seqno, ring->get_seqno(dev, ring),
2012 dev_priv->next_seqno);
1874 2013
1875 /* Directly dispatch request retiring. While we have the work queue 2014 /* Directly dispatch request retiring. While we have the work queue
1876 * to handle this, the waiter on a request often wants an associated 2015 * to handle this, the waiter on a request often wants an associated
@@ -1889,27 +2028,48 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1889 */ 2028 */
1890static int 2029static int
1891i915_wait_request(struct drm_device *dev, uint32_t seqno, 2030i915_wait_request(struct drm_device *dev, uint32_t seqno,
1892 struct intel_ring_buffer *ring) 2031 struct intel_ring_buffer *ring)
1893{ 2032{
1894 return i915_do_wait_request(dev, seqno, 1, ring); 2033 return i915_do_wait_request(dev, seqno, 1, ring);
1895} 2034}
1896 2035
1897static void 2036static void
2037i915_gem_flush_ring(struct drm_device *dev,
2038 struct drm_file *file_priv,
2039 struct intel_ring_buffer *ring,
2040 uint32_t invalidate_domains,
2041 uint32_t flush_domains)
2042{
2043 ring->flush(dev, ring, invalidate_domains, flush_domains);
2044 i915_gem_process_flushing_list(dev, flush_domains, ring);
2045}
2046
2047static void
1898i915_gem_flush(struct drm_device *dev, 2048i915_gem_flush(struct drm_device *dev,
2049 struct drm_file *file_priv,
1899 uint32_t invalidate_domains, 2050 uint32_t invalidate_domains,
1900 uint32_t flush_domains) 2051 uint32_t flush_domains,
2052 uint32_t flush_rings)
1901{ 2053{
1902 drm_i915_private_t *dev_priv = dev->dev_private; 2054 drm_i915_private_t *dev_priv = dev->dev_private;
2055
1903 if (flush_domains & I915_GEM_DOMAIN_CPU) 2056 if (flush_domains & I915_GEM_DOMAIN_CPU)
1904 drm_agp_chipset_flush(dev); 2057 drm_agp_chipset_flush(dev);
1905 dev_priv->render_ring.flush(dev, &dev_priv->render_ring, 2058
1906 invalidate_domains, 2059 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
1907 flush_domains); 2060 if (flush_rings & RING_RENDER)
1908 2061 i915_gem_flush_ring(dev, file_priv,
1909 if (HAS_BSD(dev)) 2062 &dev_priv->render_ring,
1910 dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring, 2063 invalidate_domains, flush_domains);
1911 invalidate_domains, 2064 if (flush_rings & RING_BSD)
1912 flush_domains); 2065 i915_gem_flush_ring(dev, file_priv,
2066 &dev_priv->bsd_ring,
2067 invalidate_domains, flush_domains);
2068 if (flush_rings & RING_BLT)
2069 i915_gem_flush_ring(dev, file_priv,
2070 &dev_priv->blt_ring,
2071 invalidate_domains, flush_domains);
2072 }
1913} 2073}
1914 2074
1915/** 2075/**
@@ -1917,7 +2077,8 @@ i915_gem_flush(struct drm_device *dev,
1917 * safe to unbind from the GTT or access from the CPU. 2077 * safe to unbind from the GTT or access from the CPU.
1918 */ 2078 */
1919static int 2079static int
1920i915_gem_object_wait_rendering(struct drm_gem_object *obj) 2080i915_gem_object_wait_rendering(struct drm_gem_object *obj,
2081 bool interruptible)
1921{ 2082{
1922 struct drm_device *dev = obj->dev; 2083 struct drm_device *dev = obj->dev;
1923 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2084 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -1932,13 +2093,11 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1932 * it. 2093 * it.
1933 */ 2094 */
1934 if (obj_priv->active) { 2095 if (obj_priv->active) {
1935#if WATCH_BUF 2096 ret = i915_do_wait_request(dev,
1936 DRM_INFO("%s: object %p wait for seqno %08x\n", 2097 obj_priv->last_rendering_seqno,
1937 __func__, obj, obj_priv->last_rendering_seqno); 2098 interruptible,
1938#endif 2099 obj_priv->ring);
1939 ret = i915_wait_request(dev, 2100 if (ret)
1940 obj_priv->last_rendering_seqno, obj_priv->ring);
1941 if (ret != 0)
1942 return ret; 2101 return ret;
1943 } 2102 }
1944 2103
@@ -1952,14 +2111,10 @@ int
1952i915_gem_object_unbind(struct drm_gem_object *obj) 2111i915_gem_object_unbind(struct drm_gem_object *obj)
1953{ 2112{
1954 struct drm_device *dev = obj->dev; 2113 struct drm_device *dev = obj->dev;
1955 drm_i915_private_t *dev_priv = dev->dev_private; 2114 struct drm_i915_private *dev_priv = dev->dev_private;
1956 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2115 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1957 int ret = 0; 2116 int ret = 0;
1958 2117
1959#if WATCH_BUF
1960 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1961 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1962#endif
1963 if (obj_priv->gtt_space == NULL) 2118 if (obj_priv->gtt_space == NULL)
1964 return 0; 2119 return 0;
1965 2120
@@ -1984,33 +2139,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1984 * should be safe and we need to cleanup or else we might 2139 * should be safe and we need to cleanup or else we might
1985 * cause memory corruption through use-after-free. 2140 * cause memory corruption through use-after-free.
1986 */ 2141 */
2142 if (ret) {
2143 i915_gem_clflush_object(obj);
2144 obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
2145 }
1987 2146
1988 /* release the fence reg _after_ flushing */ 2147 /* release the fence reg _after_ flushing */
1989 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 2148 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1990 i915_gem_clear_fence_reg(obj); 2149 i915_gem_clear_fence_reg(obj);
1991 2150
1992 if (obj_priv->agp_mem != NULL) { 2151 drm_unbind_agp(obj_priv->agp_mem);
1993 drm_unbind_agp(obj_priv->agp_mem); 2152 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1994 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1995 obj_priv->agp_mem = NULL;
1996 }
1997 2153
1998 i915_gem_object_put_pages(obj); 2154 i915_gem_object_put_pages(obj);
1999 BUG_ON(obj_priv->pages_refcount); 2155 BUG_ON(obj_priv->pages_refcount);
2000 2156
2001 if (obj_priv->gtt_space) { 2157 i915_gem_info_remove_gtt(dev_priv, obj->size);
2002 atomic_dec(&dev->gtt_count); 2158 list_del_init(&obj_priv->mm_list);
2003 atomic_sub(obj->size, &dev->gtt_memory);
2004
2005 drm_mm_put_block(obj_priv->gtt_space);
2006 obj_priv->gtt_space = NULL;
2007 }
2008 2159
2009 /* Remove ourselves from the LRU list if present. */ 2160 drm_mm_put_block(obj_priv->gtt_space);
2010 spin_lock(&dev_priv->mm.active_list_lock); 2161 obj_priv->gtt_space = NULL;
2011 if (!list_empty(&obj_priv->list)) 2162 obj_priv->gtt_offset = 0;
2012 list_del_init(&obj_priv->list);
2013 spin_unlock(&dev_priv->mm.active_list_lock);
2014 2163
2015 if (i915_gem_object_is_purgeable(obj_priv)) 2164 if (i915_gem_object_is_purgeable(obj_priv))
2016 i915_gem_object_truncate(obj); 2165 i915_gem_object_truncate(obj);
@@ -2020,48 +2169,50 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2020 return ret; 2169 return ret;
2021} 2170}
2022 2171
2172static int i915_ring_idle(struct drm_device *dev,
2173 struct intel_ring_buffer *ring)
2174{
2175 if (list_empty(&ring->gpu_write_list))
2176 return 0;
2177
2178 i915_gem_flush_ring(dev, NULL, ring,
2179 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2180 return i915_wait_request(dev,
2181 i915_gem_next_request_seqno(dev, ring),
2182 ring);
2183}
2184
2023int 2185int
2024i915_gpu_idle(struct drm_device *dev) 2186i915_gpu_idle(struct drm_device *dev)
2025{ 2187{
2026 drm_i915_private_t *dev_priv = dev->dev_private; 2188 drm_i915_private_t *dev_priv = dev->dev_private;
2027 bool lists_empty; 2189 bool lists_empty;
2028 uint32_t seqno1, seqno2;
2029 int ret; 2190 int ret;
2030 2191
2031 spin_lock(&dev_priv->mm.active_list_lock);
2032 lists_empty = (list_empty(&dev_priv->mm.flushing_list) && 2192 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2033 list_empty(&dev_priv->render_ring.active_list) && 2193 list_empty(&dev_priv->render_ring.active_list) &&
2034 (!HAS_BSD(dev) || 2194 list_empty(&dev_priv->bsd_ring.active_list) &&
2035 list_empty(&dev_priv->bsd_ring.active_list))); 2195 list_empty(&dev_priv->blt_ring.active_list));
2036 spin_unlock(&dev_priv->mm.active_list_lock);
2037
2038 if (lists_empty) 2196 if (lists_empty)
2039 return 0; 2197 return 0;
2040 2198
2041 /* Flush everything onto the inactive list. */ 2199 /* Flush everything onto the inactive list. */
2042 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2200 ret = i915_ring_idle(dev, &dev_priv->render_ring);
2043 seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, 2201 if (ret)
2044 &dev_priv->render_ring); 2202 return ret;
2045 if (seqno1 == 0)
2046 return -ENOMEM;
2047 ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
2048
2049 if (HAS_BSD(dev)) {
2050 seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
2051 &dev_priv->bsd_ring);
2052 if (seqno2 == 0)
2053 return -ENOMEM;
2054 2203
2055 ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring); 2204 ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
2056 if (ret) 2205 if (ret)
2057 return ret; 2206 return ret;
2058 }
2059 2207
2208 ret = i915_ring_idle(dev, &dev_priv->blt_ring);
2209 if (ret)
2210 return ret;
2060 2211
2061 return ret; 2212 return 0;
2062} 2213}
2063 2214
2064int 2215static int
2065i915_gem_object_get_pages(struct drm_gem_object *obj, 2216i915_gem_object_get_pages(struct drm_gem_object *obj,
2066 gfp_t gfpmask) 2217 gfp_t gfpmask)
2067{ 2218{
@@ -2241,7 +2392,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2241 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); 2392 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2242} 2393}
2243 2394
2244static int i915_find_fence_reg(struct drm_device *dev) 2395static int i915_find_fence_reg(struct drm_device *dev,
2396 bool interruptible)
2245{ 2397{
2246 struct drm_i915_fence_reg *reg = NULL; 2398 struct drm_i915_fence_reg *reg = NULL;
2247 struct drm_i915_gem_object *obj_priv = NULL; 2399 struct drm_i915_gem_object *obj_priv = NULL;
@@ -2286,7 +2438,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
2286 * private reference to obj like the other callers of put_fence_reg 2438 * private reference to obj like the other callers of put_fence_reg
2287 * (set_tiling ioctl) do. */ 2439 * (set_tiling ioctl) do. */
2288 drm_gem_object_reference(obj); 2440 drm_gem_object_reference(obj);
2289 ret = i915_gem_object_put_fence_reg(obj); 2441 ret = i915_gem_object_put_fence_reg(obj, interruptible);
2290 drm_gem_object_unreference(obj); 2442 drm_gem_object_unreference(obj);
2291 if (ret != 0) 2443 if (ret != 0)
2292 return ret; 2444 return ret;
@@ -2308,7 +2460,8 @@ static int i915_find_fence_reg(struct drm_device *dev)
2308 * and tiling format. 2460 * and tiling format.
2309 */ 2461 */
2310int 2462int
2311i915_gem_object_get_fence_reg(struct drm_gem_object *obj) 2463i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
2464 bool interruptible)
2312{ 2465{
2313 struct drm_device *dev = obj->dev; 2466 struct drm_device *dev = obj->dev;
2314 struct drm_i915_private *dev_priv = dev->dev_private; 2467 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2343,7 +2496,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2343 break; 2496 break;
2344 } 2497 }
2345 2498
2346 ret = i915_find_fence_reg(dev); 2499 ret = i915_find_fence_reg(dev, interruptible);
2347 if (ret < 0) 2500 if (ret < 0)
2348 return ret; 2501 return ret;
2349 2502
@@ -2421,15 +2574,19 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2421 * i915_gem_object_put_fence_reg - waits on outstanding fenced access 2574 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2422 * to the buffer to finish, and then resets the fence register. 2575 * to the buffer to finish, and then resets the fence register.
2423 * @obj: tiled object holding a fence register. 2576 * @obj: tiled object holding a fence register.
2577 * @bool: whether the wait upon the fence is interruptible
2424 * 2578 *
2425 * Zeroes out the fence register itself and clears out the associated 2579 * Zeroes out the fence register itself and clears out the associated
2426 * data structures in dev_priv and obj_priv. 2580 * data structures in dev_priv and obj_priv.
2427 */ 2581 */
2428int 2582int
2429i915_gem_object_put_fence_reg(struct drm_gem_object *obj) 2583i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
2584 bool interruptible)
2430{ 2585{
2431 struct drm_device *dev = obj->dev; 2586 struct drm_device *dev = obj->dev;
2587 struct drm_i915_private *dev_priv = dev->dev_private;
2432 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2588 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2589 struct drm_i915_fence_reg *reg;
2433 2590
2434 if (obj_priv->fence_reg == I915_FENCE_REG_NONE) 2591 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2435 return 0; 2592 return 0;
@@ -2444,20 +2601,23 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2444 * therefore we must wait for any outstanding access to complete 2601 * therefore we must wait for any outstanding access to complete
2445 * before clearing the fence. 2602 * before clearing the fence.
2446 */ 2603 */
2447 if (!IS_I965G(dev)) { 2604 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2605 if (reg->gpu) {
2448 int ret; 2606 int ret;
2449 2607
2450 ret = i915_gem_object_flush_gpu_write_domain(obj); 2608 ret = i915_gem_object_flush_gpu_write_domain(obj, true);
2451 if (ret != 0) 2609 if (ret)
2452 return ret; 2610 return ret;
2453 2611
2454 ret = i915_gem_object_wait_rendering(obj); 2612 ret = i915_gem_object_wait_rendering(obj, interruptible);
2455 if (ret != 0) 2613 if (ret)
2456 return ret; 2614 return ret;
2615
2616 reg->gpu = false;
2457 } 2617 }
2458 2618
2459 i915_gem_object_flush_gtt_write_domain(obj); 2619 i915_gem_object_flush_gtt_write_domain(obj);
2460 i915_gem_clear_fence_reg (obj); 2620 i915_gem_clear_fence_reg(obj);
2461 2621
2462 return 0; 2622 return 0;
2463} 2623}
@@ -2490,7 +2650,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2490 /* If the object is bigger than the entire aperture, reject it early 2650 /* If the object is bigger than the entire aperture, reject it early
2491 * before evicting everything in a vain attempt to find space. 2651 * before evicting everything in a vain attempt to find space.
2492 */ 2652 */
2493 if (obj->size > dev->gtt_total) { 2653 if (obj->size > dev_priv->mm.gtt_total) {
2494 DRM_ERROR("Attempting to bind an object larger than the aperture\n"); 2654 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2495 return -E2BIG; 2655 return -E2BIG;
2496 } 2656 }
@@ -2498,19 +2658,13 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2498 search_free: 2658 search_free:
2499 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, 2659 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2500 obj->size, alignment, 0); 2660 obj->size, alignment, 0);
2501 if (free_space != NULL) { 2661 if (free_space != NULL)
2502 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, 2662 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2503 alignment); 2663 alignment);
2504 if (obj_priv->gtt_space != NULL)
2505 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2506 }
2507 if (obj_priv->gtt_space == NULL) { 2664 if (obj_priv->gtt_space == NULL) {
2508 /* If the gtt is empty and we're still having trouble 2665 /* If the gtt is empty and we're still having trouble
2509 * fitting our object in, we're out of memory. 2666 * fitting our object in, we're out of memory.
2510 */ 2667 */
2511#if WATCH_LRU
2512 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2513#endif
2514 ret = i915_gem_evict_something(dev, obj->size, alignment); 2668 ret = i915_gem_evict_something(dev, obj->size, alignment);
2515 if (ret) 2669 if (ret)
2516 return ret; 2670 return ret;
@@ -2518,10 +2672,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2518 goto search_free; 2672 goto search_free;
2519 } 2673 }
2520 2674
2521#if WATCH_BUF
2522 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2523 obj->size, obj_priv->gtt_offset);
2524#endif
2525 ret = i915_gem_object_get_pages(obj, gfpmask); 2675 ret = i915_gem_object_get_pages(obj, gfpmask);
2526 if (ret) { 2676 if (ret) {
2527 drm_mm_put_block(obj_priv->gtt_space); 2677 drm_mm_put_block(obj_priv->gtt_space);
@@ -2553,7 +2703,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2553 obj_priv->agp_mem = drm_agp_bind_pages(dev, 2703 obj_priv->agp_mem = drm_agp_bind_pages(dev,
2554 obj_priv->pages, 2704 obj_priv->pages,
2555 obj->size >> PAGE_SHIFT, 2705 obj->size >> PAGE_SHIFT,
2556 obj_priv->gtt_offset, 2706 obj_priv->gtt_space->start,
2557 obj_priv->agp_type); 2707 obj_priv->agp_type);
2558 if (obj_priv->agp_mem == NULL) { 2708 if (obj_priv->agp_mem == NULL) {
2559 i915_gem_object_put_pages(obj); 2709 i915_gem_object_put_pages(obj);
@@ -2566,11 +2716,10 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2566 2716
2567 goto search_free; 2717 goto search_free;
2568 } 2718 }
2569 atomic_inc(&dev->gtt_count);
2570 atomic_add(obj->size, &dev->gtt_memory);
2571 2719
2572 /* keep track of bounds object by adding it to the inactive list */ 2720 /* keep track of bounds object by adding it to the inactive list */
2573 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 2721 list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
2722 i915_gem_info_add_gtt(dev_priv, obj->size);
2574 2723
2575 /* Assert that the object is not currently in any GPU domain. As it 2724 /* Assert that the object is not currently in any GPU domain. As it
2576 * wasn't in the GTT, there shouldn't be any way it could have been in 2725 * wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2579,6 +2728,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2579 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 2728 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2580 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 2729 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2581 2730
2731 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2582 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset); 2732 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2583 2733
2584 return 0; 2734 return 0;
@@ -2603,25 +2753,30 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
2603 2753
2604/** Flushes any GPU write domain for the object if it's dirty. */ 2754/** Flushes any GPU write domain for the object if it's dirty. */
2605static int 2755static int
2606i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) 2756i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
2757 bool pipelined)
2607{ 2758{
2608 struct drm_device *dev = obj->dev; 2759 struct drm_device *dev = obj->dev;
2609 uint32_t old_write_domain; 2760 uint32_t old_write_domain;
2610 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2611 2761
2612 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2762 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2613 return 0; 2763 return 0;
2614 2764
2615 /* Queue the GPU write cache flushing we need. */ 2765 /* Queue the GPU write cache flushing we need. */
2616 old_write_domain = obj->write_domain; 2766 old_write_domain = obj->write_domain;
2617 i915_gem_flush(dev, 0, obj->write_domain); 2767 i915_gem_flush_ring(dev, NULL,
2618 if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0) 2768 to_intel_bo(obj)->ring,
2619 return -ENOMEM; 2769 0, obj->write_domain);
2770 BUG_ON(obj->write_domain);
2620 2771
2621 trace_i915_gem_object_change_domain(obj, 2772 trace_i915_gem_object_change_domain(obj,
2622 obj->read_domains, 2773 obj->read_domains,
2623 old_write_domain); 2774 old_write_domain);
2624 return 0; 2775
2776 if (pipelined)
2777 return 0;
2778
2779 return i915_gem_object_wait_rendering(obj, true);
2625} 2780}
2626 2781
2627/** Flushes the GTT write domain for the object if it's dirty. */ 2782/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2665,26 +2820,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2665 old_write_domain); 2820 old_write_domain);
2666} 2821}
2667 2822
2668int
2669i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2670{
2671 int ret = 0;
2672
2673 switch (obj->write_domain) {
2674 case I915_GEM_DOMAIN_GTT:
2675 i915_gem_object_flush_gtt_write_domain(obj);
2676 break;
2677 case I915_GEM_DOMAIN_CPU:
2678 i915_gem_object_flush_cpu_write_domain(obj);
2679 break;
2680 default:
2681 ret = i915_gem_object_flush_gpu_write_domain(obj);
2682 break;
2683 }
2684
2685 return ret;
2686}
2687
2688/** 2823/**
2689 * Moves a single object to the GTT read, and possibly write domain. 2824 * Moves a single object to the GTT read, and possibly write domain.
2690 * 2825 *
@@ -2702,32 +2837,28 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2702 if (obj_priv->gtt_space == NULL) 2837 if (obj_priv->gtt_space == NULL)
2703 return -EINVAL; 2838 return -EINVAL;
2704 2839
2705 ret = i915_gem_object_flush_gpu_write_domain(obj); 2840 ret = i915_gem_object_flush_gpu_write_domain(obj, false);
2706 if (ret != 0) 2841 if (ret != 0)
2707 return ret; 2842 return ret;
2708 2843
2709 /* Wait on any GPU rendering and flushing to occur. */ 2844 i915_gem_object_flush_cpu_write_domain(obj);
2710 ret = i915_gem_object_wait_rendering(obj); 2845
2711 if (ret != 0) 2846 if (write) {
2712 return ret; 2847 ret = i915_gem_object_wait_rendering(obj, true);
2848 if (ret)
2849 return ret;
2850 }
2713 2851
2714 old_write_domain = obj->write_domain; 2852 old_write_domain = obj->write_domain;
2715 old_read_domains = obj->read_domains; 2853 old_read_domains = obj->read_domains;
2716 2854
2717 /* If we're writing through the GTT domain, then CPU and GPU caches
2718 * will need to be invalidated at next use.
2719 */
2720 if (write)
2721 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2722
2723 i915_gem_object_flush_cpu_write_domain(obj);
2724
2725 /* It should now be out of any other write domains, and we can update 2855 /* It should now be out of any other write domains, and we can update
2726 * the domain values for our changes. 2856 * the domain values for our changes.
2727 */ 2857 */
2728 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 2858 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2729 obj->read_domains |= I915_GEM_DOMAIN_GTT; 2859 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2730 if (write) { 2860 if (write) {
2861 obj->read_domains = I915_GEM_DOMAIN_GTT;
2731 obj->write_domain = I915_GEM_DOMAIN_GTT; 2862 obj->write_domain = I915_GEM_DOMAIN_GTT;
2732 obj_priv->dirty = 1; 2863 obj_priv->dirty = 1;
2733 } 2864 }
@@ -2744,51 +2875,36 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2744 * wait, as in modesetting process we're not supposed to be interrupted. 2875 * wait, as in modesetting process we're not supposed to be interrupted.
2745 */ 2876 */
2746int 2877int
2747i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) 2878i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
2879 bool pipelined)
2748{ 2880{
2749 struct drm_device *dev = obj->dev;
2750 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2881 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2751 uint32_t old_write_domain, old_read_domains; 2882 uint32_t old_read_domains;
2752 int ret; 2883 int ret;
2753 2884
2754 /* Not valid to be called on unbound objects. */ 2885 /* Not valid to be called on unbound objects. */
2755 if (obj_priv->gtt_space == NULL) 2886 if (obj_priv->gtt_space == NULL)
2756 return -EINVAL; 2887 return -EINVAL;
2757 2888
2758 ret = i915_gem_object_flush_gpu_write_domain(obj); 2889 ret = i915_gem_object_flush_gpu_write_domain(obj, true);
2759 if (ret) 2890 if (ret)
2760 return ret; 2891 return ret;
2761 2892
2762 /* Wait on any GPU rendering and flushing to occur. */ 2893 /* Currently, we are always called from an non-interruptible context. */
2763 if (obj_priv->active) { 2894 if (!pipelined) {
2764#if WATCH_BUF 2895 ret = i915_gem_object_wait_rendering(obj, false);
2765 DRM_INFO("%s: object %p wait for seqno %08x\n", 2896 if (ret)
2766 __func__, obj, obj_priv->last_rendering_seqno);
2767#endif
2768 ret = i915_do_wait_request(dev,
2769 obj_priv->last_rendering_seqno,
2770 0,
2771 obj_priv->ring);
2772 if (ret != 0)
2773 return ret; 2897 return ret;
2774 } 2898 }
2775 2899
2776 i915_gem_object_flush_cpu_write_domain(obj); 2900 i915_gem_object_flush_cpu_write_domain(obj);
2777 2901
2778 old_write_domain = obj->write_domain;
2779 old_read_domains = obj->read_domains; 2902 old_read_domains = obj->read_domains;
2780 2903 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2781 /* It should now be out of any other write domains, and we can update
2782 * the domain values for our changes.
2783 */
2784 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2785 obj->read_domains = I915_GEM_DOMAIN_GTT;
2786 obj->write_domain = I915_GEM_DOMAIN_GTT;
2787 obj_priv->dirty = 1;
2788 2904
2789 trace_i915_gem_object_change_domain(obj, 2905 trace_i915_gem_object_change_domain(obj,
2790 old_read_domains, 2906 old_read_domains,
2791 old_write_domain); 2907 obj->write_domain);
2792 2908
2793 return 0; 2909 return 0;
2794} 2910}
@@ -2805,12 +2921,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2805 uint32_t old_write_domain, old_read_domains; 2921 uint32_t old_write_domain, old_read_domains;
2806 int ret; 2922 int ret;
2807 2923
2808 ret = i915_gem_object_flush_gpu_write_domain(obj); 2924 ret = i915_gem_object_flush_gpu_write_domain(obj, false);
2809 if (ret)
2810 return ret;
2811
2812 /* Wait on any GPU rendering and flushing to occur. */
2813 ret = i915_gem_object_wait_rendering(obj);
2814 if (ret != 0) 2925 if (ret != 0)
2815 return ret; 2926 return ret;
2816 2927
@@ -2821,6 +2932,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2821 */ 2932 */
2822 i915_gem_object_set_to_full_cpu_read_domain(obj); 2933 i915_gem_object_set_to_full_cpu_read_domain(obj);
2823 2934
2935 if (write) {
2936 ret = i915_gem_object_wait_rendering(obj, true);
2937 if (ret)
2938 return ret;
2939 }
2940
2824 old_write_domain = obj->write_domain; 2941 old_write_domain = obj->write_domain;
2825 old_read_domains = obj->read_domains; 2942 old_read_domains = obj->read_domains;
2826 2943
@@ -2840,7 +2957,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2840 * need to be invalidated at next use. 2957 * need to be invalidated at next use.
2841 */ 2958 */
2842 if (write) { 2959 if (write) {
2843 obj->read_domains &= I915_GEM_DOMAIN_CPU; 2960 obj->read_domains = I915_GEM_DOMAIN_CPU;
2844 obj->write_domain = I915_GEM_DOMAIN_CPU; 2961 obj->write_domain = I915_GEM_DOMAIN_CPU;
2845 } 2962 }
2846 2963
@@ -2963,26 +3080,18 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2963 * drm_agp_chipset_flush 3080 * drm_agp_chipset_flush
2964 */ 3081 */
2965static void 3082static void
2966i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) 3083i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
3084 struct intel_ring_buffer *ring)
2967{ 3085{
2968 struct drm_device *dev = obj->dev; 3086 struct drm_device *dev = obj->dev;
2969 drm_i915_private_t *dev_priv = dev->dev_private; 3087 struct drm_i915_private *dev_priv = dev->dev_private;
2970 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 3088 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2971 uint32_t invalidate_domains = 0; 3089 uint32_t invalidate_domains = 0;
2972 uint32_t flush_domains = 0; 3090 uint32_t flush_domains = 0;
2973 uint32_t old_read_domains; 3091 uint32_t old_read_domains;
2974 3092
2975 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2976 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
2977
2978 intel_mark_busy(dev, obj); 3093 intel_mark_busy(dev, obj);
2979 3094
2980#if WATCH_BUF
2981 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2982 __func__, obj,
2983 obj->read_domains, obj->pending_read_domains,
2984 obj->write_domain, obj->pending_write_domain);
2985#endif
2986 /* 3095 /*
2987 * If the object isn't moving to a new write domain, 3096 * If the object isn't moving to a new write domain,
2988 * let the object stay in multiple read domains 3097 * let the object stay in multiple read domains
@@ -3009,13 +3118,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
3009 * stale data. That is, any new read domains. 3118 * stale data. That is, any new read domains.
3010 */ 3119 */
3011 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; 3120 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
3012 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { 3121 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
3013#if WATCH_BUF
3014 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
3015 __func__, flush_domains, invalidate_domains);
3016#endif
3017 i915_gem_clflush_object(obj); 3122 i915_gem_clflush_object(obj);
3018 }
3019 3123
3020 old_read_domains = obj->read_domains; 3124 old_read_domains = obj->read_domains;
3021 3125
@@ -3029,21 +3133,12 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
3029 obj->pending_write_domain = obj->write_domain; 3133 obj->pending_write_domain = obj->write_domain;
3030 obj->read_domains = obj->pending_read_domains; 3134 obj->read_domains = obj->pending_read_domains;
3031 3135
3032 if (flush_domains & I915_GEM_GPU_DOMAINS) {
3033 if (obj_priv->ring == &dev_priv->render_ring)
3034 dev_priv->flush_rings |= FLUSH_RENDER_RING;
3035 else if (obj_priv->ring == &dev_priv->bsd_ring)
3036 dev_priv->flush_rings |= FLUSH_BSD_RING;
3037 }
3038
3039 dev->invalidate_domains |= invalidate_domains; 3136 dev->invalidate_domains |= invalidate_domains;
3040 dev->flush_domains |= flush_domains; 3137 dev->flush_domains |= flush_domains;
3041#if WATCH_BUF 3138 if (flush_domains & I915_GEM_GPU_DOMAINS)
3042 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n", 3139 dev_priv->mm.flush_rings |= obj_priv->ring->id;
3043 __func__, 3140 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
3044 obj->read_domains, obj->write_domain, 3141 dev_priv->mm.flush_rings |= ring->id;
3045 dev->invalidate_domains, dev->flush_domains);
3046#endif
3047 3142
3048 trace_i915_gem_object_change_domain(obj, 3143 trace_i915_gem_object_change_domain(obj,
3049 old_read_domains, 3144 old_read_domains,
@@ -3106,12 +3201,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3106 if (offset == 0 && size == obj->size) 3201 if (offset == 0 && size == obj->size)
3107 return i915_gem_object_set_to_cpu_domain(obj, 0); 3202 return i915_gem_object_set_to_cpu_domain(obj, 0);
3108 3203
3109 ret = i915_gem_object_flush_gpu_write_domain(obj); 3204 ret = i915_gem_object_flush_gpu_write_domain(obj, false);
3110 if (ret)
3111 return ret;
3112
3113 /* Wait on any GPU rendering and flushing to occur. */
3114 ret = i915_gem_object_wait_rendering(obj);
3115 if (ret != 0) 3205 if (ret != 0)
3116 return ret; 3206 return ret;
3117 i915_gem_object_flush_gtt_write_domain(obj); 3207 i915_gem_object_flush_gtt_write_domain(obj);
@@ -3164,66 +3254,42 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3164 * Pin an object to the GTT and evaluate the relocations landing in it. 3254 * Pin an object to the GTT and evaluate the relocations landing in it.
3165 */ 3255 */
3166static int 3256static int
3167i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, 3257i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
3168 struct drm_file *file_priv, 3258 struct drm_file *file_priv,
3169 struct drm_i915_gem_exec_object2 *entry, 3259 struct drm_i915_gem_exec_object2 *entry)
3170 struct drm_i915_gem_relocation_entry *relocs)
3171{ 3260{
3172 struct drm_device *dev = obj->dev; 3261 struct drm_device *dev = obj->base.dev;
3173 drm_i915_private_t *dev_priv = dev->dev_private; 3262 drm_i915_private_t *dev_priv = dev->dev_private;
3174 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 3263 struct drm_i915_gem_relocation_entry __user *user_relocs;
3175 int i, ret; 3264 struct drm_gem_object *target_obj = NULL;
3176 void __iomem *reloc_page; 3265 uint32_t target_handle = 0;
3177 bool need_fence; 3266 int i, ret = 0;
3178
3179 need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3180 obj_priv->tiling_mode != I915_TILING_NONE;
3181
3182 /* Check fence reg constraints and rebind if necessary */
3183 if (need_fence &&
3184 !i915_gem_object_fence_offset_ok(obj,
3185 obj_priv->tiling_mode)) {
3186 ret = i915_gem_object_unbind(obj);
3187 if (ret)
3188 return ret;
3189 }
3190 3267
3191 /* Choose the GTT offset for our buffer and put it there. */ 3268 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
3192 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); 3269 for (i = 0; i < entry->relocation_count; i++) {
3193 if (ret) 3270 struct drm_i915_gem_relocation_entry reloc;
3194 return ret; 3271 uint32_t target_offset;
3195 3272
3196 /* 3273 if (__copy_from_user_inatomic(&reloc,
3197 * Pre-965 chips need a fence register set up in order to 3274 user_relocs+i,
3198 * properly handle blits to/from tiled surfaces. 3275 sizeof(reloc))) {
3199 */ 3276 ret = -EFAULT;
3200 if (need_fence) { 3277 break;
3201 ret = i915_gem_object_get_fence_reg(obj);
3202 if (ret != 0) {
3203 i915_gem_object_unpin(obj);
3204 return ret;
3205 } 3278 }
3206 }
3207 3279
3208 entry->offset = obj_priv->gtt_offset; 3280 if (reloc.target_handle != target_handle) {
3281 drm_gem_object_unreference(target_obj);
3209 3282
3210 /* Apply the relocations, using the GTT aperture to avoid cache 3283 target_obj = drm_gem_object_lookup(dev, file_priv,
3211 * flushing requirements. 3284 reloc.target_handle);
3212 */ 3285 if (target_obj == NULL) {
3213 for (i = 0; i < entry->relocation_count; i++) { 3286 ret = -ENOENT;
3214 struct drm_i915_gem_relocation_entry *reloc= &relocs[i]; 3287 break;
3215 struct drm_gem_object *target_obj; 3288 }
3216 struct drm_i915_gem_object *target_obj_priv; 3289
3217 uint32_t reloc_val, reloc_offset; 3290 target_handle = reloc.target_handle;
3218 uint32_t __iomem *reloc_entry;
3219
3220 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
3221 reloc->target_handle);
3222 if (target_obj == NULL) {
3223 i915_gem_object_unpin(obj);
3224 return -ENOENT;
3225 } 3291 }
3226 target_obj_priv = to_intel_bo(target_obj); 3292 target_offset = to_intel_bo(target_obj)->gtt_offset;
3227 3293
3228#if WATCH_RELOC 3294#if WATCH_RELOC
3229 DRM_INFO("%s: obj %p offset %08x target %d " 3295 DRM_INFO("%s: obj %p offset %08x target %d "
@@ -3231,267 +3297,266 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3231 "presumed %08x delta %08x\n", 3297 "presumed %08x delta %08x\n",
3232 __func__, 3298 __func__,
3233 obj, 3299 obj,
3234 (int) reloc->offset, 3300 (int) reloc.offset,
3235 (int) reloc->target_handle, 3301 (int) reloc.target_handle,
3236 (int) reloc->read_domains, 3302 (int) reloc.read_domains,
3237 (int) reloc->write_domain, 3303 (int) reloc.write_domain,
3238 (int) target_obj_priv->gtt_offset, 3304 (int) target_offset,
3239 (int) reloc->presumed_offset, 3305 (int) reloc.presumed_offset,
3240 reloc->delta); 3306 reloc.delta);
3241#endif 3307#endif
3242 3308
3243 /* The target buffer should have appeared before us in the 3309 /* The target buffer should have appeared before us in the
3244 * exec_object list, so it should have a GTT space bound by now. 3310 * exec_object list, so it should have a GTT space bound by now.
3245 */ 3311 */
3246 if (target_obj_priv->gtt_space == NULL) { 3312 if (target_offset == 0) {
3247 DRM_ERROR("No GTT space found for object %d\n", 3313 DRM_ERROR("No GTT space found for object %d\n",
3248 reloc->target_handle); 3314 reloc.target_handle);
3249 drm_gem_object_unreference(target_obj); 3315 ret = -EINVAL;
3250 i915_gem_object_unpin(obj); 3316 break;
3251 return -EINVAL;
3252 } 3317 }
3253 3318
3254 /* Validate that the target is in a valid r/w GPU domain */ 3319 /* Validate that the target is in a valid r/w GPU domain */
3255 if (reloc->write_domain & (reloc->write_domain - 1)) { 3320 if (reloc.write_domain & (reloc.write_domain - 1)) {
3256 DRM_ERROR("reloc with multiple write domains: " 3321 DRM_ERROR("reloc with multiple write domains: "
3257 "obj %p target %d offset %d " 3322 "obj %p target %d offset %d "
3258 "read %08x write %08x", 3323 "read %08x write %08x",
3259 obj, reloc->target_handle, 3324 obj, reloc.target_handle,
3260 (int) reloc->offset, 3325 (int) reloc.offset,
3261 reloc->read_domains, 3326 reloc.read_domains,
3262 reloc->write_domain); 3327 reloc.write_domain);
3263 drm_gem_object_unreference(target_obj); 3328 ret = -EINVAL;
3264 i915_gem_object_unpin(obj); 3329 break;
3265 return -EINVAL;
3266 } 3330 }
3267 if (reloc->write_domain & I915_GEM_DOMAIN_CPU || 3331 if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
3268 reloc->read_domains & I915_GEM_DOMAIN_CPU) { 3332 reloc.read_domains & I915_GEM_DOMAIN_CPU) {
3269 DRM_ERROR("reloc with read/write CPU domains: " 3333 DRM_ERROR("reloc with read/write CPU domains: "
3270 "obj %p target %d offset %d " 3334 "obj %p target %d offset %d "
3271 "read %08x write %08x", 3335 "read %08x write %08x",
3272 obj, reloc->target_handle, 3336 obj, reloc.target_handle,
3273 (int) reloc->offset, 3337 (int) reloc.offset,
3274 reloc->read_domains, 3338 reloc.read_domains,
3275 reloc->write_domain); 3339 reloc.write_domain);
3276 drm_gem_object_unreference(target_obj); 3340 ret = -EINVAL;
3277 i915_gem_object_unpin(obj); 3341 break;
3278 return -EINVAL;
3279 } 3342 }
3280 if (reloc->write_domain && target_obj->pending_write_domain && 3343 if (reloc.write_domain && target_obj->pending_write_domain &&
3281 reloc->write_domain != target_obj->pending_write_domain) { 3344 reloc.write_domain != target_obj->pending_write_domain) {
3282 DRM_ERROR("Write domain conflict: " 3345 DRM_ERROR("Write domain conflict: "
3283 "obj %p target %d offset %d " 3346 "obj %p target %d offset %d "
3284 "new %08x old %08x\n", 3347 "new %08x old %08x\n",
3285 obj, reloc->target_handle, 3348 obj, reloc.target_handle,
3286 (int) reloc->offset, 3349 (int) reloc.offset,
3287 reloc->write_domain, 3350 reloc.write_domain,
3288 target_obj->pending_write_domain); 3351 target_obj->pending_write_domain);
3289 drm_gem_object_unreference(target_obj); 3352 ret = -EINVAL;
3290 i915_gem_object_unpin(obj); 3353 break;
3291 return -EINVAL;
3292 } 3354 }
3293 3355
3294 target_obj->pending_read_domains |= reloc->read_domains; 3356 target_obj->pending_read_domains |= reloc.read_domains;
3295 target_obj->pending_write_domain |= reloc->write_domain; 3357 target_obj->pending_write_domain |= reloc.write_domain;
3296 3358
3297 /* If the relocation already has the right value in it, no 3359 /* If the relocation already has the right value in it, no
3298 * more work needs to be done. 3360 * more work needs to be done.
3299 */ 3361 */
3300 if (target_obj_priv->gtt_offset == reloc->presumed_offset) { 3362 if (target_offset == reloc.presumed_offset)
3301 drm_gem_object_unreference(target_obj);
3302 continue; 3363 continue;
3303 }
3304 3364
3305 /* Check that the relocation address is valid... */ 3365 /* Check that the relocation address is valid... */
3306 if (reloc->offset > obj->size - 4) { 3366 if (reloc.offset > obj->base.size - 4) {
3307 DRM_ERROR("Relocation beyond object bounds: " 3367 DRM_ERROR("Relocation beyond object bounds: "
3308 "obj %p target %d offset %d size %d.\n", 3368 "obj %p target %d offset %d size %d.\n",
3309 obj, reloc->target_handle, 3369 obj, reloc.target_handle,
3310 (int) reloc->offset, (int) obj->size); 3370 (int) reloc.offset, (int) obj->base.size);
3311 drm_gem_object_unreference(target_obj); 3371 ret = -EINVAL;
3312 i915_gem_object_unpin(obj); 3372 break;
3313 return -EINVAL;
3314 } 3373 }
3315 if (reloc->offset & 3) { 3374 if (reloc.offset & 3) {
3316 DRM_ERROR("Relocation not 4-byte aligned: " 3375 DRM_ERROR("Relocation not 4-byte aligned: "
3317 "obj %p target %d offset %d.\n", 3376 "obj %p target %d offset %d.\n",
3318 obj, reloc->target_handle, 3377 obj, reloc.target_handle,
3319 (int) reloc->offset); 3378 (int) reloc.offset);
3320 drm_gem_object_unreference(target_obj); 3379 ret = -EINVAL;
3321 i915_gem_object_unpin(obj); 3380 break;
3322 return -EINVAL;
3323 } 3381 }
3324 3382
3325 /* and points to somewhere within the target object. */ 3383 /* and points to somewhere within the target object. */
3326 if (reloc->delta >= target_obj->size) { 3384 if (reloc.delta >= target_obj->size) {
3327 DRM_ERROR("Relocation beyond target object bounds: " 3385 DRM_ERROR("Relocation beyond target object bounds: "
3328 "obj %p target %d delta %d size %d.\n", 3386 "obj %p target %d delta %d size %d.\n",
3329 obj, reloc->target_handle, 3387 obj, reloc.target_handle,
3330 (int) reloc->delta, (int) target_obj->size); 3388 (int) reloc.delta, (int) target_obj->size);
3331 drm_gem_object_unreference(target_obj); 3389 ret = -EINVAL;
3332 i915_gem_object_unpin(obj); 3390 break;
3333 return -EINVAL;
3334 }
3335
3336 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3337 if (ret != 0) {
3338 drm_gem_object_unreference(target_obj);
3339 i915_gem_object_unpin(obj);
3340 return -EINVAL;
3341 } 3391 }
3342 3392
3343 /* Map the page containing the relocation we're going to 3393 reloc.delta += target_offset;
3344 * perform. 3394 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
3345 */ 3395 uint32_t page_offset = reloc.offset & ~PAGE_MASK;
3346 reloc_offset = obj_priv->gtt_offset + reloc->offset; 3396 char *vaddr;
3347 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3348 (reloc_offset &
3349 ~(PAGE_SIZE - 1)));
3350 reloc_entry = (uint32_t __iomem *)(reloc_page +
3351 (reloc_offset & (PAGE_SIZE - 1)));
3352 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
3353
3354#if WATCH_BUF
3355 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
3356 obj, (unsigned int) reloc->offset,
3357 readl(reloc_entry), reloc_val);
3358#endif
3359 writel(reloc_val, reloc_entry);
3360 io_mapping_unmap_atomic(reloc_page);
3361
3362 /* The updated presumed offset for this entry will be
3363 * copied back out to the user.
3364 */
3365 reloc->presumed_offset = target_obj_priv->gtt_offset;
3366
3367 drm_gem_object_unreference(target_obj);
3368 }
3369
3370#if WATCH_BUF
3371 if (0)
3372 i915_gem_dump_object(obj, 128, __func__, ~0);
3373#endif
3374 return 0;
3375}
3376 3397
3377/* Throttle our rendering by waiting until the ring has completed our requests 3398 vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]);
3378 * emitted over 20 msec ago. 3399 *(uint32_t *)(vaddr + page_offset) = reloc.delta;
3379 * 3400 kunmap_atomic(vaddr);
3380 * Note that if we were to use the current jiffies each time around the loop, 3401 } else {
3381 * we wouldn't escape the function with any frames outstanding if the time to 3402 uint32_t __iomem *reloc_entry;
3382 * render a frame was over 20ms. 3403 void __iomem *reloc_page;
3383 *
3384 * This should get us reasonable parallelism between CPU and GPU but also
3385 * relatively low latency when blocking on a particular request to finish.
3386 */
3387static int
3388i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3389{
3390 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3391 int ret = 0;
3392 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3393
3394 mutex_lock(&dev->struct_mutex);
3395 while (!list_empty(&i915_file_priv->mm.request_list)) {
3396 struct drm_i915_gem_request *request;
3397 3404
3398 request = list_first_entry(&i915_file_priv->mm.request_list, 3405 ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
3399 struct drm_i915_gem_request, 3406 if (ret)
3400 client_list); 3407 break;
3401 3408
3402 if (time_after_eq(request->emitted_jiffies, recent_enough)) 3409 /* Map the page containing the relocation we're going to perform. */
3403 break; 3410 reloc.offset += obj->gtt_offset;
3411 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3412 reloc.offset & PAGE_MASK);
3413 reloc_entry = (uint32_t __iomem *)
3414 (reloc_page + (reloc.offset & ~PAGE_MASK));
3415 iowrite32(reloc.delta, reloc_entry);
3416 io_mapping_unmap_atomic(reloc_page);
3417 }
3404 3418
3405 ret = i915_wait_request(dev, request->seqno, request->ring); 3419 /* and update the user's relocation entry */
3406 if (ret != 0) 3420 reloc.presumed_offset = target_offset;
3407 break; 3421 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
3422 &reloc.presumed_offset,
3423 sizeof(reloc.presumed_offset))) {
3424 ret = -EFAULT;
3425 break;
3426 }
3408 } 3427 }
3409 mutex_unlock(&dev->struct_mutex);
3410 3428
3429 drm_gem_object_unreference(target_obj);
3411 return ret; 3430 return ret;
3412} 3431}
3413 3432
3414static int 3433static int
3415i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list, 3434i915_gem_execbuffer_pin(struct drm_device *dev,
3416 uint32_t buffer_count, 3435 struct drm_file *file,
3417 struct drm_i915_gem_relocation_entry **relocs) 3436 struct drm_gem_object **object_list,
3437 struct drm_i915_gem_exec_object2 *exec_list,
3438 int count)
3418{ 3439{
3419 uint32_t reloc_count = 0, reloc_index = 0, i; 3440 struct drm_i915_private *dev_priv = dev->dev_private;
3420 int ret; 3441 int ret, i, retry;
3421 3442
3422 *relocs = NULL; 3443 /* attempt to pin all of the buffers into the GTT */
3423 for (i = 0; i < buffer_count; i++) { 3444 for (retry = 0; retry < 2; retry++) {
3424 if (reloc_count + exec_list[i].relocation_count < reloc_count) 3445 ret = 0;
3425 return -EINVAL; 3446 for (i = 0; i < count; i++) {
3426 reloc_count += exec_list[i].relocation_count; 3447 struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
3427 } 3448 struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
3449 bool need_fence =
3450 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3451 obj->tiling_mode != I915_TILING_NONE;
3452
3453 /* Check fence reg constraints and rebind if necessary */
3454 if (need_fence &&
3455 !i915_gem_object_fence_offset_ok(&obj->base,
3456 obj->tiling_mode)) {
3457 ret = i915_gem_object_unbind(&obj->base);
3458 if (ret)
3459 break;
3460 }
3428 3461
3429 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); 3462 ret = i915_gem_object_pin(&obj->base, entry->alignment);
3430 if (*relocs == NULL) { 3463 if (ret)
3431 DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count); 3464 break;
3432 return -ENOMEM;
3433 }
3434 3465
3435 for (i = 0; i < buffer_count; i++) { 3466 /*
3436 struct drm_i915_gem_relocation_entry __user *user_relocs; 3467 * Pre-965 chips need a fence register set up in order
3468 * to properly handle blits to/from tiled surfaces.
3469 */
3470 if (need_fence) {
3471 ret = i915_gem_object_get_fence_reg(&obj->base, true);
3472 if (ret) {
3473 i915_gem_object_unpin(&obj->base);
3474 break;
3475 }
3437 3476
3438 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; 3477 dev_priv->fence_regs[obj->fence_reg].gpu = true;
3478 }
3439 3479
3440 ret = copy_from_user(&(*relocs)[reloc_index], 3480 entry->offset = obj->gtt_offset;
3441 user_relocs,
3442 exec_list[i].relocation_count *
3443 sizeof(**relocs));
3444 if (ret != 0) {
3445 drm_free_large(*relocs);
3446 *relocs = NULL;
3447 return -EFAULT;
3448 } 3481 }
3449 3482
3450 reloc_index += exec_list[i].relocation_count; 3483 while (i--)
3484 i915_gem_object_unpin(object_list[i]);
3485
3486 if (ret == 0)
3487 break;
3488
3489 if (ret != -ENOSPC || retry)
3490 return ret;
3491
3492 ret = i915_gem_evict_everything(dev);
3493 if (ret)
3494 return ret;
3451 } 3495 }
3452 3496
3453 return 0; 3497 return 0;
3454} 3498}
3455 3499
3500/* Throttle our rendering by waiting until the ring has completed our requests
3501 * emitted over 20 msec ago.
3502 *
3503 * Note that if we were to use the current jiffies each time around the loop,
3504 * we wouldn't escape the function with any frames outstanding if the time to
3505 * render a frame was over 20ms.
3506 *
3507 * This should get us reasonable parallelism between CPU and GPU but also
3508 * relatively low latency when blocking on a particular request to finish.
3509 */
3456static int 3510static int
3457i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, 3511i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3458 uint32_t buffer_count,
3459 struct drm_i915_gem_relocation_entry *relocs)
3460{ 3512{
3461 uint32_t reloc_count = 0, i; 3513 struct drm_i915_private *dev_priv = dev->dev_private;
3462 int ret = 0; 3514 struct drm_i915_file_private *file_priv = file->driver_priv;
3463 3515 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3464 if (relocs == NULL) 3516 struct drm_i915_gem_request *request;
3465 return 0; 3517 struct intel_ring_buffer *ring = NULL;
3518 u32 seqno = 0;
3519 int ret;
3466 3520
3467 for (i = 0; i < buffer_count; i++) { 3521 spin_lock(&file_priv->mm.lock);
3468 struct drm_i915_gem_relocation_entry __user *user_relocs; 3522 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3469 int unwritten; 3523 if (time_after_eq(request->emitted_jiffies, recent_enough))
3524 break;
3470 3525
3471 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; 3526 ring = request->ring;
3527 seqno = request->seqno;
3528 }
3529 spin_unlock(&file_priv->mm.lock);
3472 3530
3473 unwritten = copy_to_user(user_relocs, 3531 if (seqno == 0)
3474 &relocs[reloc_count], 3532 return 0;
3475 exec_list[i].relocation_count *
3476 sizeof(*relocs));
3477 3533
3478 if (unwritten) { 3534 ret = 0;
3479 ret = -EFAULT; 3535 if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
3480 goto err; 3536 /* And wait for the seqno passing without holding any locks and
3481 } 3537 * causing extra latency for others. This is safe as the irq
3538 * generation is designed to be run atomically and so is
3539 * lockless.
3540 */
3541 ring->user_irq_get(dev, ring);
3542 ret = wait_event_interruptible(ring->irq_queue,
3543 i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
3544 || atomic_read(&dev_priv->mm.wedged));
3545 ring->user_irq_put(dev, ring);
3482 3546
3483 reloc_count += exec_list[i].relocation_count; 3547 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3548 ret = -EIO;
3484 } 3549 }
3485 3550
3486err: 3551 if (ret == 0)
3487 drm_free_large(relocs); 3552 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3488 3553
3489 return ret; 3554 return ret;
3490} 3555}
3491 3556
3492static int 3557static int
3493i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, 3558i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
3494 uint64_t exec_offset) 3559 uint64_t exec_offset)
3495{ 3560{
3496 uint32_t exec_start, exec_len; 3561 uint32_t exec_start, exec_len;
3497 3562
@@ -3508,44 +3573,32 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
3508} 3573}
3509 3574
3510static int 3575static int
3511i915_gem_wait_for_pending_flip(struct drm_device *dev, 3576validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
3512 struct drm_gem_object **object_list, 3577 int count)
3513 int count)
3514{ 3578{
3515 drm_i915_private_t *dev_priv = dev->dev_private; 3579 int i;
3516 struct drm_i915_gem_object *obj_priv;
3517 DEFINE_WAIT(wait);
3518 int i, ret = 0;
3519 3580
3520 for (;;) { 3581 for (i = 0; i < count; i++) {
3521 prepare_to_wait(&dev_priv->pending_flip_queue, 3582 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
3522 &wait, TASK_INTERRUPTIBLE); 3583 size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
3523 for (i = 0; i < count; i++) {
3524 obj_priv = to_intel_bo(object_list[i]);
3525 if (atomic_read(&obj_priv->pending_flip) > 0)
3526 break;
3527 }
3528 if (i == count)
3529 break;
3530 3584
3531 if (!signal_pending(current)) { 3585 if (!access_ok(VERIFY_READ, ptr, length))
3532 mutex_unlock(&dev->struct_mutex); 3586 return -EFAULT;
3533 schedule(); 3587
3534 mutex_lock(&dev->struct_mutex); 3588 /* we may also need to update the presumed offsets */
3535 continue; 3589 if (!access_ok(VERIFY_WRITE, ptr, length))
3536 } 3590 return -EFAULT;
3537 ret = -ERESTARTSYS; 3591
3538 break; 3592 if (fault_in_pages_readable(ptr, length))
3593 return -EFAULT;
3539 } 3594 }
3540 finish_wait(&dev_priv->pending_flip_queue, &wait);
3541 3595
3542 return ret; 3596 return 0;
3543} 3597}
3544 3598
3545 3599static int
3546int
3547i915_gem_do_execbuffer(struct drm_device *dev, void *data, 3600i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3548 struct drm_file *file_priv, 3601 struct drm_file *file,
3549 struct drm_i915_gem_execbuffer2 *args, 3602 struct drm_i915_gem_execbuffer2 *args,
3550 struct drm_i915_gem_exec_object2 *exec_list) 3603 struct drm_i915_gem_exec_object2 *exec_list)
3551{ 3604{
@@ -3554,26 +3607,47 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3554 struct drm_gem_object *batch_obj; 3607 struct drm_gem_object *batch_obj;
3555 struct drm_i915_gem_object *obj_priv; 3608 struct drm_i915_gem_object *obj_priv;
3556 struct drm_clip_rect *cliprects = NULL; 3609 struct drm_clip_rect *cliprects = NULL;
3557 struct drm_i915_gem_relocation_entry *relocs = NULL; 3610 struct drm_i915_gem_request *request = NULL;
3558 int ret = 0, ret2, i, pinned = 0; 3611 int ret, i, flips;
3559 uint64_t exec_offset; 3612 uint64_t exec_offset;
3560 uint32_t seqno, flush_domains, reloc_index;
3561 int pin_tries, flips;
3562 3613
3563 struct intel_ring_buffer *ring = NULL; 3614 struct intel_ring_buffer *ring = NULL;
3564 3615
3616 ret = i915_gem_check_is_wedged(dev);
3617 if (ret)
3618 return ret;
3619
3620 ret = validate_exec_list(exec_list, args->buffer_count);
3621 if (ret)
3622 return ret;
3623
3565#if WATCH_EXEC 3624#if WATCH_EXEC
3566 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 3625 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3567 (int) args->buffers_ptr, args->buffer_count, args->batch_len); 3626 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3568#endif 3627#endif
3569 if (args->flags & I915_EXEC_BSD) { 3628 switch (args->flags & I915_EXEC_RING_MASK) {
3629 case I915_EXEC_DEFAULT:
3630 case I915_EXEC_RENDER:
3631 ring = &dev_priv->render_ring;
3632 break;
3633 case I915_EXEC_BSD:
3570 if (!HAS_BSD(dev)) { 3634 if (!HAS_BSD(dev)) {
3571 DRM_ERROR("execbuf with wrong flag\n"); 3635 DRM_ERROR("execbuf with invalid ring (BSD)\n");
3572 return -EINVAL; 3636 return -EINVAL;
3573 } 3637 }
3574 ring = &dev_priv->bsd_ring; 3638 ring = &dev_priv->bsd_ring;
3575 } else { 3639 break;
3576 ring = &dev_priv->render_ring; 3640 case I915_EXEC_BLT:
3641 if (!HAS_BLT(dev)) {
3642 DRM_ERROR("execbuf with invalid ring (BLT)\n");
3643 return -EINVAL;
3644 }
3645 ring = &dev_priv->blt_ring;
3646 break;
3647 default:
3648 DRM_ERROR("execbuf with unknown ring: %d\n",
3649 (int)(args->flags & I915_EXEC_RING_MASK));
3650 return -EINVAL;
3577 } 3651 }
3578 3652
3579 if (args->buffer_count < 1) { 3653 if (args->buffer_count < 1) {
@@ -3608,20 +3682,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3608 } 3682 }
3609 } 3683 }
3610 3684
3611 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count, 3685 request = kzalloc(sizeof(*request), GFP_KERNEL);
3612 &relocs); 3686 if (request == NULL) {
3613 if (ret != 0) 3687 ret = -ENOMEM;
3614 goto pre_mutex_err; 3688 goto pre_mutex_err;
3689 }
3615 3690
3616 mutex_lock(&dev->struct_mutex); 3691 ret = i915_mutex_lock_interruptible(dev);
3617 3692 if (ret)
3618 i915_verify_inactive(dev, __FILE__, __LINE__);
3619
3620 if (atomic_read(&dev_priv->mm.wedged)) {
3621 mutex_unlock(&dev->struct_mutex);
3622 ret = -EIO;
3623 goto pre_mutex_err; 3693 goto pre_mutex_err;
3624 }
3625 3694
3626 if (dev_priv->mm.suspended) { 3695 if (dev_priv->mm.suspended) {
3627 mutex_unlock(&dev->struct_mutex); 3696 mutex_unlock(&dev->struct_mutex);
@@ -3630,9 +3699,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3630 } 3699 }
3631 3700
3632 /* Look up object handles */ 3701 /* Look up object handles */
3633 flips = 0;
3634 for (i = 0; i < args->buffer_count; i++) { 3702 for (i = 0; i < args->buffer_count; i++) {
3635 object_list[i] = drm_gem_object_lookup(dev, file_priv, 3703 object_list[i] = drm_gem_object_lookup(dev, file,
3636 exec_list[i].handle); 3704 exec_list[i].handle);
3637 if (object_list[i] == NULL) { 3705 if (object_list[i] == NULL) {
3638 DRM_ERROR("Invalid object handle %d at index %d\n", 3706 DRM_ERROR("Invalid object handle %d at index %d\n",
@@ -3653,75 +3721,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3653 goto err; 3721 goto err;
3654 } 3722 }
3655 obj_priv->in_execbuffer = true; 3723 obj_priv->in_execbuffer = true;
3656 flips += atomic_read(&obj_priv->pending_flip);
3657 } 3724 }
3658 3725
3659 if (flips > 0) { 3726 /* Move the objects en-masse into the GTT, evicting if necessary. */
3660 ret = i915_gem_wait_for_pending_flip(dev, object_list, 3727 ret = i915_gem_execbuffer_pin(dev, file,
3661 args->buffer_count); 3728 object_list, exec_list,
3662 if (ret) 3729 args->buffer_count);
3663 goto err; 3730 if (ret)
3664 } 3731 goto err;
3665
3666 /* Pin and relocate */
3667 for (pin_tries = 0; ; pin_tries++) {
3668 ret = 0;
3669 reloc_index = 0;
3670
3671 for (i = 0; i < args->buffer_count; i++) {
3672 object_list[i]->pending_read_domains = 0;
3673 object_list[i]->pending_write_domain = 0;
3674 ret = i915_gem_object_pin_and_relocate(object_list[i],
3675 file_priv,
3676 &exec_list[i],
3677 &relocs[reloc_index]);
3678 if (ret)
3679 break;
3680 pinned = i + 1;
3681 reloc_index += exec_list[i].relocation_count;
3682 }
3683 /* success */
3684 if (ret == 0)
3685 break;
3686
3687 /* error other than GTT full, or we've already tried again */
3688 if (ret != -ENOSPC || pin_tries >= 1) {
3689 if (ret != -ERESTARTSYS) {
3690 unsigned long long total_size = 0;
3691 int num_fences = 0;
3692 for (i = 0; i < args->buffer_count; i++) {
3693 obj_priv = to_intel_bo(object_list[i]);
3694
3695 total_size += object_list[i]->size;
3696 num_fences +=
3697 exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
3698 obj_priv->tiling_mode != I915_TILING_NONE;
3699 }
3700 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
3701 pinned+1, args->buffer_count,
3702 total_size, num_fences,
3703 ret);
3704 DRM_ERROR("%d objects [%d pinned], "
3705 "%d object bytes [%d pinned], "
3706 "%d/%d gtt bytes\n",
3707 atomic_read(&dev->object_count),
3708 atomic_read(&dev->pin_count),
3709 atomic_read(&dev->object_memory),
3710 atomic_read(&dev->pin_memory),
3711 atomic_read(&dev->gtt_memory),
3712 dev->gtt_total);
3713 }
3714 goto err;
3715 }
3716
3717 /* unpin all of our buffers */
3718 for (i = 0; i < pinned; i++)
3719 i915_gem_object_unpin(object_list[i]);
3720 pinned = 0;
3721 3732
3722 /* evict everyone we can from the aperture */ 3733 /* The objects are in their final locations, apply the relocations. */
3723 ret = i915_gem_evict_everything(dev); 3734 for (i = 0; i < args->buffer_count; i++) {
3724 if (ret && ret != -ENOSPC) 3735 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3736 obj->base.pending_read_domains = 0;
3737 obj->base.pending_write_domain = 0;
3738 ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
3739 if (ret)
3725 goto err; 3740 goto err;
3726 } 3741 }
3727 3742
@@ -3734,33 +3749,29 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3734 } 3749 }
3735 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; 3750 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
3736 3751
3737 /* Sanity check the batch buffer, prior to moving objects */ 3752 /* Sanity check the batch buffer */
3738 exec_offset = exec_list[args->buffer_count - 1].offset; 3753 exec_offset = to_intel_bo(batch_obj)->gtt_offset;
3739 ret = i915_gem_check_execbuffer (args, exec_offset); 3754 ret = i915_gem_check_execbuffer(args, exec_offset);
3740 if (ret != 0) { 3755 if (ret != 0) {
3741 DRM_ERROR("execbuf with invalid offset/length\n"); 3756 DRM_ERROR("execbuf with invalid offset/length\n");
3742 goto err; 3757 goto err;
3743 } 3758 }
3744 3759
3745 i915_verify_inactive(dev, __FILE__, __LINE__);
3746
3747 /* Zero the global flush/invalidate flags. These 3760 /* Zero the global flush/invalidate flags. These
3748 * will be modified as new domains are computed 3761 * will be modified as new domains are computed
3749 * for each object 3762 * for each object
3750 */ 3763 */
3751 dev->invalidate_domains = 0; 3764 dev->invalidate_domains = 0;
3752 dev->flush_domains = 0; 3765 dev->flush_domains = 0;
3753 dev_priv->flush_rings = 0; 3766 dev_priv->mm.flush_rings = 0;
3754 3767
3755 for (i = 0; i < args->buffer_count; i++) { 3768 for (i = 0; i < args->buffer_count; i++) {
3756 struct drm_gem_object *obj = object_list[i]; 3769 struct drm_gem_object *obj = object_list[i];
3757 3770
3758 /* Compute new gpu domains and update invalidate/flush */ 3771 /* Compute new gpu domains and update invalidate/flush */
3759 i915_gem_object_set_to_gpu_domain(obj); 3772 i915_gem_object_set_to_gpu_domain(obj, ring);
3760 } 3773 }
3761 3774
3762 i915_verify_inactive(dev, __FILE__, __LINE__);
3763
3764 if (dev->invalidate_domains | dev->flush_domains) { 3775 if (dev->invalidate_domains | dev->flush_domains) {
3765#if WATCH_EXEC 3776#if WATCH_EXEC
3766 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", 3777 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
@@ -3768,38 +3779,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3768 dev->invalidate_domains, 3779 dev->invalidate_domains,
3769 dev->flush_domains); 3780 dev->flush_domains);
3770#endif 3781#endif
3771 i915_gem_flush(dev, 3782 i915_gem_flush(dev, file,
3772 dev->invalidate_domains, 3783 dev->invalidate_domains,
3773 dev->flush_domains); 3784 dev->flush_domains,
3774 if (dev_priv->flush_rings & FLUSH_RENDER_RING) 3785 dev_priv->mm.flush_rings);
3775 (void)i915_add_request(dev, file_priv,
3776 dev->flush_domains,
3777 &dev_priv->render_ring);
3778 if (dev_priv->flush_rings & FLUSH_BSD_RING)
3779 (void)i915_add_request(dev, file_priv,
3780 dev->flush_domains,
3781 &dev_priv->bsd_ring);
3782 } 3786 }
3783 3787
3784 for (i = 0; i < args->buffer_count; i++) { 3788 for (i = 0; i < args->buffer_count; i++) {
3785 struct drm_gem_object *obj = object_list[i]; 3789 struct drm_gem_object *obj = object_list[i];
3786 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3787 uint32_t old_write_domain = obj->write_domain; 3790 uint32_t old_write_domain = obj->write_domain;
3788
3789 obj->write_domain = obj->pending_write_domain; 3791 obj->write_domain = obj->pending_write_domain;
3790 if (obj->write_domain)
3791 list_move_tail(&obj_priv->gpu_write_list,
3792 &dev_priv->mm.gpu_write_list);
3793 else
3794 list_del_init(&obj_priv->gpu_write_list);
3795
3796 trace_i915_gem_object_change_domain(obj, 3792 trace_i915_gem_object_change_domain(obj,
3797 obj->read_domains, 3793 obj->read_domains,
3798 old_write_domain); 3794 old_write_domain);
3799 } 3795 }
3800 3796
3801 i915_verify_inactive(dev, __FILE__, __LINE__);
3802
3803#if WATCH_COHERENCY 3797#if WATCH_COHERENCY
3804 for (i = 0; i < args->buffer_count; i++) { 3798 for (i = 0; i < args->buffer_count; i++) {
3805 i915_gem_object_check_coherency(object_list[i], 3799 i915_gem_object_check_coherency(object_list[i],
@@ -3814,9 +3808,38 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3814 ~0); 3808 ~0);
3815#endif 3809#endif
3816 3810
3811 /* Check for any pending flips. As we only maintain a flip queue depth
3812 * of 1, we can simply insert a WAIT for the next display flip prior
3813 * to executing the batch and avoid stalling the CPU.
3814 */
3815 flips = 0;
3816 for (i = 0; i < args->buffer_count; i++) {
3817 if (object_list[i]->write_domain)
3818 flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
3819 }
3820 if (flips) {
3821 int plane, flip_mask;
3822
3823 for (plane = 0; flips >> plane; plane++) {
3824 if (((flips >> plane) & 1) == 0)
3825 continue;
3826
3827 if (plane)
3828 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
3829 else
3830 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
3831
3832 intel_ring_begin(dev, ring, 2);
3833 intel_ring_emit(dev, ring,
3834 MI_WAIT_FOR_EVENT | flip_mask);
3835 intel_ring_emit(dev, ring, MI_NOOP);
3836 intel_ring_advance(dev, ring);
3837 }
3838 }
3839
3817 /* Exec the batchbuffer */ 3840 /* Exec the batchbuffer */
3818 ret = ring->dispatch_gem_execbuffer(dev, ring, args, 3841 ret = ring->dispatch_gem_execbuffer(dev, ring, args,
3819 cliprects, exec_offset); 3842 cliprects, exec_offset);
3820 if (ret) { 3843 if (ret) {
3821 DRM_ERROR("dispatch failed %d\n", ret); 3844 DRM_ERROR("dispatch failed %d\n", ret);
3822 goto err; 3845 goto err;
@@ -3826,38 +3849,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3826 * Ensure that the commands in the batch buffer are 3849 * Ensure that the commands in the batch buffer are
3827 * finished before the interrupt fires 3850 * finished before the interrupt fires
3828 */ 3851 */
3829 flush_domains = i915_retire_commands(dev, ring); 3852 i915_retire_commands(dev, ring);
3830
3831 i915_verify_inactive(dev, __FILE__, __LINE__);
3832 3853
3833 /*
3834 * Get a seqno representing the execution of the current buffer,
3835 * which we can wait on. We would like to mitigate these interrupts,
3836 * likely by only creating seqnos occasionally (so that we have
3837 * *some* interrupts representing completion of buffers that we can
3838 * wait on when trying to clear up gtt space).
3839 */
3840 seqno = i915_add_request(dev, file_priv, flush_domains, ring);
3841 BUG_ON(seqno == 0);
3842 for (i = 0; i < args->buffer_count; i++) { 3854 for (i = 0; i < args->buffer_count; i++) {
3843 struct drm_gem_object *obj = object_list[i]; 3855 struct drm_gem_object *obj = object_list[i];
3844 obj_priv = to_intel_bo(obj);
3845 3856
3846 i915_gem_object_move_to_active(obj, seqno, ring); 3857 i915_gem_object_move_to_active(obj, ring);
3847#if WATCH_LRU 3858 if (obj->write_domain)
3848 DRM_INFO("%s: move to exec list %p\n", __func__, obj); 3859 list_move_tail(&to_intel_bo(obj)->gpu_write_list,
3849#endif 3860 &ring->gpu_write_list);
3850 } 3861 }
3851#if WATCH_LRU
3852 i915_dump_lru(dev, __func__);
3853#endif
3854 3862
3855 i915_verify_inactive(dev, __FILE__, __LINE__); 3863 i915_add_request(dev, file, request, ring);
3864 request = NULL;
3856 3865
3857err: 3866err:
3858 for (i = 0; i < pinned; i++)
3859 i915_gem_object_unpin(object_list[i]);
3860
3861 for (i = 0; i < args->buffer_count; i++) { 3867 for (i = 0; i < args->buffer_count; i++) {
3862 if (object_list[i]) { 3868 if (object_list[i]) {
3863 obj_priv = to_intel_bo(object_list[i]); 3869 obj_priv = to_intel_bo(object_list[i]);
@@ -3869,22 +3875,9 @@ err:
3869 mutex_unlock(&dev->struct_mutex); 3875 mutex_unlock(&dev->struct_mutex);
3870 3876
3871pre_mutex_err: 3877pre_mutex_err:
3872 /* Copy the updated relocations out regardless of current error
3873 * state. Failure to update the relocs would mean that the next
3874 * time userland calls execbuf, it would do so with presumed offset
3875 * state that didn't match the actual object state.
3876 */
3877 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3878 relocs);
3879 if (ret2 != 0) {
3880 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3881
3882 if (ret == 0)
3883 ret = ret2;
3884 }
3885
3886 drm_free_large(object_list); 3878 drm_free_large(object_list);
3887 kfree(cliprects); 3879 kfree(cliprects);
3880 kfree(request);
3888 3881
3889 return ret; 3882 return ret;
3890} 3883}
@@ -3941,7 +3934,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3941 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; 3934 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
3942 exec2_list[i].alignment = exec_list[i].alignment; 3935 exec2_list[i].alignment = exec_list[i].alignment;
3943 exec2_list[i].offset = exec_list[i].offset; 3936 exec2_list[i].offset = exec_list[i].offset;
3944 if (!IS_I965G(dev)) 3937 if (INTEL_INFO(dev)->gen < 4)
3945 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; 3938 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
3946 else 3939 else
3947 exec2_list[i].flags = 0; 3940 exec2_list[i].flags = 0;
@@ -4038,12 +4031,12 @@ int
4038i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) 4031i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4039{ 4032{
4040 struct drm_device *dev = obj->dev; 4033 struct drm_device *dev = obj->dev;
4034 struct drm_i915_private *dev_priv = dev->dev_private;
4041 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 4035 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4042 int ret; 4036 int ret;
4043 4037
4044 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); 4038 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4045 4039 WARN_ON(i915_verify_lists(dev));
4046 i915_verify_inactive(dev, __FILE__, __LINE__);
4047 4040
4048 if (obj_priv->gtt_space != NULL) { 4041 if (obj_priv->gtt_space != NULL) {
4049 if (alignment == 0) 4042 if (alignment == 0)
@@ -4071,14 +4064,13 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4071 * remove it from the inactive list 4064 * remove it from the inactive list
4072 */ 4065 */
4073 if (obj_priv->pin_count == 1) { 4066 if (obj_priv->pin_count == 1) {
4074 atomic_inc(&dev->pin_count); 4067 i915_gem_info_add_pin(dev_priv, obj->size);
4075 atomic_add(obj->size, &dev->pin_memory); 4068 if (!obj_priv->active)
4076 if (!obj_priv->active && 4069 list_move_tail(&obj_priv->mm_list,
4077 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 4070 &dev_priv->mm.pinned_list);
4078 list_del_init(&obj_priv->list);
4079 } 4071 }
4080 i915_verify_inactive(dev, __FILE__, __LINE__);
4081 4072
4073 WARN_ON(i915_verify_lists(dev));
4082 return 0; 4074 return 0;
4083} 4075}
4084 4076
@@ -4089,7 +4081,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
4089 drm_i915_private_t *dev_priv = dev->dev_private; 4081 drm_i915_private_t *dev_priv = dev->dev_private;
4090 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 4082 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4091 4083
4092 i915_verify_inactive(dev, __FILE__, __LINE__); 4084 WARN_ON(i915_verify_lists(dev));
4093 obj_priv->pin_count--; 4085 obj_priv->pin_count--;
4094 BUG_ON(obj_priv->pin_count < 0); 4086 BUG_ON(obj_priv->pin_count < 0);
4095 BUG_ON(obj_priv->gtt_space == NULL); 4087 BUG_ON(obj_priv->gtt_space == NULL);
@@ -4099,14 +4091,12 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
4099 * the inactive list 4091 * the inactive list
4100 */ 4092 */
4101 if (obj_priv->pin_count == 0) { 4093 if (obj_priv->pin_count == 0) {
4102 if (!obj_priv->active && 4094 if (!obj_priv->active)
4103 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 4095 list_move_tail(&obj_priv->mm_list,
4104 list_move_tail(&obj_priv->list,
4105 &dev_priv->mm.inactive_list); 4096 &dev_priv->mm.inactive_list);
4106 atomic_dec(&dev->pin_count); 4097 i915_gem_info_remove_pin(dev_priv, obj->size);
4107 atomic_sub(obj->size, &dev->pin_memory);
4108 } 4098 }
4109 i915_verify_inactive(dev, __FILE__, __LINE__); 4099 WARN_ON(i915_verify_lists(dev));
4110} 4100}
4111 4101
4112int 4102int
@@ -4118,41 +4108,36 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4118 struct drm_i915_gem_object *obj_priv; 4108 struct drm_i915_gem_object *obj_priv;
4119 int ret; 4109 int ret;
4120 4110
4121 mutex_lock(&dev->struct_mutex); 4111 ret = i915_mutex_lock_interruptible(dev);
4112 if (ret)
4113 return ret;
4122 4114
4123 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 4115 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4124 if (obj == NULL) { 4116 if (obj == NULL) {
4125 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", 4117 ret = -ENOENT;
4126 args->handle); 4118 goto unlock;
4127 mutex_unlock(&dev->struct_mutex);
4128 return -ENOENT;
4129 } 4119 }
4130 obj_priv = to_intel_bo(obj); 4120 obj_priv = to_intel_bo(obj);
4131 4121
4132 if (obj_priv->madv != I915_MADV_WILLNEED) { 4122 if (obj_priv->madv != I915_MADV_WILLNEED) {
4133 DRM_ERROR("Attempting to pin a purgeable buffer\n"); 4123 DRM_ERROR("Attempting to pin a purgeable buffer\n");
4134 drm_gem_object_unreference(obj); 4124 ret = -EINVAL;
4135 mutex_unlock(&dev->struct_mutex); 4125 goto out;
4136 return -EINVAL;
4137 } 4126 }
4138 4127
4139 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { 4128 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
4140 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", 4129 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4141 args->handle); 4130 args->handle);
4142 drm_gem_object_unreference(obj); 4131 ret = -EINVAL;
4143 mutex_unlock(&dev->struct_mutex); 4132 goto out;
4144 return -EINVAL;
4145 } 4133 }
4146 4134
4147 obj_priv->user_pin_count++; 4135 obj_priv->user_pin_count++;
4148 obj_priv->pin_filp = file_priv; 4136 obj_priv->pin_filp = file_priv;
4149 if (obj_priv->user_pin_count == 1) { 4137 if (obj_priv->user_pin_count == 1) {
4150 ret = i915_gem_object_pin(obj, args->alignment); 4138 ret = i915_gem_object_pin(obj, args->alignment);
4151 if (ret != 0) { 4139 if (ret)
4152 drm_gem_object_unreference(obj); 4140 goto out;
4153 mutex_unlock(&dev->struct_mutex);
4154 return ret;
4155 }
4156 } 4141 }
4157 4142
4158 /* XXX - flush the CPU caches for pinned objects 4143 /* XXX - flush the CPU caches for pinned objects
@@ -4160,10 +4145,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4160 */ 4145 */
4161 i915_gem_object_flush_cpu_write_domain(obj); 4146 i915_gem_object_flush_cpu_write_domain(obj);
4162 args->offset = obj_priv->gtt_offset; 4147 args->offset = obj_priv->gtt_offset;
4148out:
4163 drm_gem_object_unreference(obj); 4149 drm_gem_object_unreference(obj);
4150unlock:
4164 mutex_unlock(&dev->struct_mutex); 4151 mutex_unlock(&dev->struct_mutex);
4165 4152 return ret;
4166 return 0;
4167} 4153}
4168 4154
4169int 4155int
@@ -4173,24 +4159,24 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4173 struct drm_i915_gem_pin *args = data; 4159 struct drm_i915_gem_pin *args = data;
4174 struct drm_gem_object *obj; 4160 struct drm_gem_object *obj;
4175 struct drm_i915_gem_object *obj_priv; 4161 struct drm_i915_gem_object *obj_priv;
4162 int ret;
4176 4163
4177 mutex_lock(&dev->struct_mutex); 4164 ret = i915_mutex_lock_interruptible(dev);
4165 if (ret)
4166 return ret;
4178 4167
4179 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 4168 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4180 if (obj == NULL) { 4169 if (obj == NULL) {
4181 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", 4170 ret = -ENOENT;
4182 args->handle); 4171 goto unlock;
4183 mutex_unlock(&dev->struct_mutex);
4184 return -ENOENT;
4185 } 4172 }
4186
4187 obj_priv = to_intel_bo(obj); 4173 obj_priv = to_intel_bo(obj);
4174
4188 if (obj_priv->pin_filp != file_priv) { 4175 if (obj_priv->pin_filp != file_priv) {
4189 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", 4176 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4190 args->handle); 4177 args->handle);
4191 drm_gem_object_unreference(obj); 4178 ret = -EINVAL;
4192 mutex_unlock(&dev->struct_mutex); 4179 goto out;
4193 return -EINVAL;
4194 } 4180 }
4195 obj_priv->user_pin_count--; 4181 obj_priv->user_pin_count--;
4196 if (obj_priv->user_pin_count == 0) { 4182 if (obj_priv->user_pin_count == 0) {
@@ -4198,9 +4184,11 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4198 i915_gem_object_unpin(obj); 4184 i915_gem_object_unpin(obj);
4199 } 4185 }
4200 4186
4187out:
4201 drm_gem_object_unreference(obj); 4188 drm_gem_object_unreference(obj);
4189unlock:
4202 mutex_unlock(&dev->struct_mutex); 4190 mutex_unlock(&dev->struct_mutex);
4203 return 0; 4191 return ret;
4204} 4192}
4205 4193
4206int 4194int
@@ -4210,22 +4198,24 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4210 struct drm_i915_gem_busy *args = data; 4198 struct drm_i915_gem_busy *args = data;
4211 struct drm_gem_object *obj; 4199 struct drm_gem_object *obj;
4212 struct drm_i915_gem_object *obj_priv; 4200 struct drm_i915_gem_object *obj_priv;
4201 int ret;
4202
4203 ret = i915_mutex_lock_interruptible(dev);
4204 if (ret)
4205 return ret;
4213 4206
4214 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 4207 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4215 if (obj == NULL) { 4208 if (obj == NULL) {
4216 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", 4209 ret = -ENOENT;
4217 args->handle); 4210 goto unlock;
4218 return -ENOENT;
4219 } 4211 }
4220 4212 obj_priv = to_intel_bo(obj);
4221 mutex_lock(&dev->struct_mutex);
4222 4213
4223 /* Count all active objects as busy, even if they are currently not used 4214 /* Count all active objects as busy, even if they are currently not used
4224 * by the gpu. Users of this interface expect objects to eventually 4215 * by the gpu. Users of this interface expect objects to eventually
4225 * become non-busy without any further actions, therefore emit any 4216 * become non-busy without any further actions, therefore emit any
4226 * necessary flushes here. 4217 * necessary flushes here.
4227 */ 4218 */
4228 obj_priv = to_intel_bo(obj);
4229 args->busy = obj_priv->active; 4219 args->busy = obj_priv->active;
4230 if (args->busy) { 4220 if (args->busy) {
4231 /* Unconditionally flush objects, even when the gpu still uses this 4221 /* Unconditionally flush objects, even when the gpu still uses this
@@ -4233,10 +4223,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4233 * use this buffer rather sooner than later, so issuing the required 4223 * use this buffer rather sooner than later, so issuing the required
4234 * flush earlier is beneficial. 4224 * flush earlier is beneficial.
4235 */ 4225 */
4236 if (obj->write_domain) { 4226 if (obj->write_domain & I915_GEM_GPU_DOMAINS)
4237 i915_gem_flush(dev, 0, obj->write_domain); 4227 i915_gem_flush_ring(dev, file_priv,
4238 (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring); 4228 obj_priv->ring,
4239 } 4229 0, obj->write_domain);
4240 4230
4241 /* Update the active list for the hardware's current position. 4231 /* Update the active list for the hardware's current position.
4242 * Otherwise this only updates on a delayed timer or when irqs 4232 * Otherwise this only updates on a delayed timer or when irqs
@@ -4249,8 +4239,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4249 } 4239 }
4250 4240
4251 drm_gem_object_unreference(obj); 4241 drm_gem_object_unreference(obj);
4242unlock:
4252 mutex_unlock(&dev->struct_mutex); 4243 mutex_unlock(&dev->struct_mutex);
4253 return 0; 4244 return ret;
4254} 4245}
4255 4246
4256int 4247int
@@ -4267,6 +4258,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4267 struct drm_i915_gem_madvise *args = data; 4258 struct drm_i915_gem_madvise *args = data;
4268 struct drm_gem_object *obj; 4259 struct drm_gem_object *obj;
4269 struct drm_i915_gem_object *obj_priv; 4260 struct drm_i915_gem_object *obj_priv;
4261 int ret;
4270 4262
4271 switch (args->madv) { 4263 switch (args->madv) {
4272 case I915_MADV_DONTNEED: 4264 case I915_MADV_DONTNEED:
@@ -4276,22 +4268,20 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4276 return -EINVAL; 4268 return -EINVAL;
4277 } 4269 }
4278 4270
4271 ret = i915_mutex_lock_interruptible(dev);
4272 if (ret)
4273 return ret;
4274
4279 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 4275 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4280 if (obj == NULL) { 4276 if (obj == NULL) {
4281 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n", 4277 ret = -ENOENT;
4282 args->handle); 4278 goto unlock;
4283 return -ENOENT;
4284 } 4279 }
4285
4286 mutex_lock(&dev->struct_mutex);
4287 obj_priv = to_intel_bo(obj); 4280 obj_priv = to_intel_bo(obj);
4288 4281
4289 if (obj_priv->pin_count) { 4282 if (obj_priv->pin_count) {
4290 drm_gem_object_unreference(obj); 4283 ret = -EINVAL;
4291 mutex_unlock(&dev->struct_mutex); 4284 goto out;
4292
4293 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4294 return -EINVAL;
4295 } 4285 }
4296 4286
4297 if (obj_priv->madv != __I915_MADV_PURGED) 4287 if (obj_priv->madv != __I915_MADV_PURGED)
@@ -4304,15 +4294,17 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4304 4294
4305 args->retained = obj_priv->madv != __I915_MADV_PURGED; 4295 args->retained = obj_priv->madv != __I915_MADV_PURGED;
4306 4296
4297out:
4307 drm_gem_object_unreference(obj); 4298 drm_gem_object_unreference(obj);
4299unlock:
4308 mutex_unlock(&dev->struct_mutex); 4300 mutex_unlock(&dev->struct_mutex);
4309 4301 return ret;
4310 return 0;
4311} 4302}
4312 4303
4313struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, 4304struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4314 size_t size) 4305 size_t size)
4315{ 4306{
4307 struct drm_i915_private *dev_priv = dev->dev_private;
4316 struct drm_i915_gem_object *obj; 4308 struct drm_i915_gem_object *obj;
4317 4309
4318 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 4310 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
@@ -4324,18 +4316,19 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4324 return NULL; 4316 return NULL;
4325 } 4317 }
4326 4318
4319 i915_gem_info_add_obj(dev_priv, size);
4320
4327 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4321 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4328 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4322 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4329 4323
4330 obj->agp_type = AGP_USER_MEMORY; 4324 obj->agp_type = AGP_USER_MEMORY;
4331 obj->base.driver_private = NULL; 4325 obj->base.driver_private = NULL;
4332 obj->fence_reg = I915_FENCE_REG_NONE; 4326 obj->fence_reg = I915_FENCE_REG_NONE;
4333 INIT_LIST_HEAD(&obj->list); 4327 INIT_LIST_HEAD(&obj->mm_list);
4328 INIT_LIST_HEAD(&obj->ring_list);
4334 INIT_LIST_HEAD(&obj->gpu_write_list); 4329 INIT_LIST_HEAD(&obj->gpu_write_list);
4335 obj->madv = I915_MADV_WILLNEED; 4330 obj->madv = I915_MADV_WILLNEED;
4336 4331
4337 trace_i915_gem_object_create(&obj->base);
4338
4339 return &obj->base; 4332 return &obj->base;
4340} 4333}
4341 4334
@@ -4355,7 +4348,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj)
4355 4348
4356 ret = i915_gem_object_unbind(obj); 4349 ret = i915_gem_object_unbind(obj);
4357 if (ret == -ERESTARTSYS) { 4350 if (ret == -ERESTARTSYS) {
4358 list_move(&obj_priv->list, 4351 list_move(&obj_priv->mm_list,
4359 &dev_priv->mm.deferred_free_list); 4352 &dev_priv->mm.deferred_free_list);
4360 return; 4353 return;
4361 } 4354 }
@@ -4364,6 +4357,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj)
4364 i915_gem_free_mmap_offset(obj); 4357 i915_gem_free_mmap_offset(obj);
4365 4358
4366 drm_gem_object_release(obj); 4359 drm_gem_object_release(obj);
4360 i915_gem_info_remove_obj(dev_priv, obj->size);
4367 4361
4368 kfree(obj_priv->page_cpu_valid); 4362 kfree(obj_priv->page_cpu_valid);
4369 kfree(obj_priv->bit_17); 4363 kfree(obj_priv->bit_17);
@@ -4394,10 +4388,7 @@ i915_gem_idle(struct drm_device *dev)
4394 4388
4395 mutex_lock(&dev->struct_mutex); 4389 mutex_lock(&dev->struct_mutex);
4396 4390
4397 if (dev_priv->mm.suspended || 4391 if (dev_priv->mm.suspended) {
4398 (dev_priv->render_ring.gem_object == NULL) ||
4399 (HAS_BSD(dev) &&
4400 dev_priv->bsd_ring.gem_object == NULL)) {
4401 mutex_unlock(&dev->struct_mutex); 4392 mutex_unlock(&dev->struct_mutex);
4402 return 0; 4393 return 0;
4403 } 4394 }
@@ -4422,7 +4413,7 @@ i915_gem_idle(struct drm_device *dev)
4422 * And not confound mm.suspended! 4413 * And not confound mm.suspended!
4423 */ 4414 */
4424 dev_priv->mm.suspended = 1; 4415 dev_priv->mm.suspended = 1;
4425 del_timer(&dev_priv->hangcheck_timer); 4416 del_timer_sync(&dev_priv->hangcheck_timer);
4426 4417
4427 i915_kernel_lost_context(dev); 4418 i915_kernel_lost_context(dev);
4428 i915_gem_cleanup_ringbuffer(dev); 4419 i915_gem_cleanup_ringbuffer(dev);
@@ -4502,36 +4493,34 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
4502 drm_i915_private_t *dev_priv = dev->dev_private; 4493 drm_i915_private_t *dev_priv = dev->dev_private;
4503 int ret; 4494 int ret;
4504 4495
4505 dev_priv->render_ring = render_ring;
4506
4507 if (!I915_NEED_GFX_HWS(dev)) {
4508 dev_priv->render_ring.status_page.page_addr
4509 = dev_priv->status_page_dmah->vaddr;
4510 memset(dev_priv->render_ring.status_page.page_addr,
4511 0, PAGE_SIZE);
4512 }
4513
4514 if (HAS_PIPE_CONTROL(dev)) { 4496 if (HAS_PIPE_CONTROL(dev)) {
4515 ret = i915_gem_init_pipe_control(dev); 4497 ret = i915_gem_init_pipe_control(dev);
4516 if (ret) 4498 if (ret)
4517 return ret; 4499 return ret;
4518 } 4500 }
4519 4501
4520 ret = intel_init_ring_buffer(dev, &dev_priv->render_ring); 4502 ret = intel_init_render_ring_buffer(dev);
4521 if (ret) 4503 if (ret)
4522 goto cleanup_pipe_control; 4504 goto cleanup_pipe_control;
4523 4505
4524 if (HAS_BSD(dev)) { 4506 if (HAS_BSD(dev)) {
4525 dev_priv->bsd_ring = bsd_ring; 4507 ret = intel_init_bsd_ring_buffer(dev);
4526 ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
4527 if (ret) 4508 if (ret)
4528 goto cleanup_render_ring; 4509 goto cleanup_render_ring;
4529 } 4510 }
4530 4511
4512 if (HAS_BLT(dev)) {
4513 ret = intel_init_blt_ring_buffer(dev);
4514 if (ret)
4515 goto cleanup_bsd_ring;
4516 }
4517
4531 dev_priv->next_seqno = 1; 4518 dev_priv->next_seqno = 1;
4532 4519
4533 return 0; 4520 return 0;
4534 4521
4522cleanup_bsd_ring:
4523 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
4535cleanup_render_ring: 4524cleanup_render_ring:
4536 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 4525 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4537cleanup_pipe_control: 4526cleanup_pipe_control:
@@ -4546,8 +4535,8 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4546 drm_i915_private_t *dev_priv = dev->dev_private; 4535 drm_i915_private_t *dev_priv = dev->dev_private;
4547 4536
4548 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 4537 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4549 if (HAS_BSD(dev)) 4538 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
4550 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); 4539 intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
4551 if (HAS_PIPE_CONTROL(dev)) 4540 if (HAS_PIPE_CONTROL(dev))
4552 i915_gem_cleanup_pipe_control(dev); 4541 i915_gem_cleanup_pipe_control(dev);
4553} 4542}
@@ -4576,15 +4565,15 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4576 return ret; 4565 return ret;
4577 } 4566 }
4578 4567
4579 spin_lock(&dev_priv->mm.active_list_lock); 4568 BUG_ON(!list_empty(&dev_priv->mm.active_list));
4580 BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); 4569 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
4581 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list)); 4570 BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
4582 spin_unlock(&dev_priv->mm.active_list_lock); 4571 BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
4583
4584 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 4572 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4585 BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 4573 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4586 BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); 4574 BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
4587 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); 4575 BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
4576 BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
4588 mutex_unlock(&dev->struct_mutex); 4577 mutex_unlock(&dev->struct_mutex);
4589 4578
4590 ret = drm_irq_install(dev); 4579 ret = drm_irq_install(dev);
@@ -4626,28 +4615,34 @@ i915_gem_lastclose(struct drm_device *dev)
4626 DRM_ERROR("failed to idle hardware: %d\n", ret); 4615 DRM_ERROR("failed to idle hardware: %d\n", ret);
4627} 4616}
4628 4617
4618static void
4619init_ring_lists(struct intel_ring_buffer *ring)
4620{
4621 INIT_LIST_HEAD(&ring->active_list);
4622 INIT_LIST_HEAD(&ring->request_list);
4623 INIT_LIST_HEAD(&ring->gpu_write_list);
4624}
4625
4629void 4626void
4630i915_gem_load(struct drm_device *dev) 4627i915_gem_load(struct drm_device *dev)
4631{ 4628{
4632 int i; 4629 int i;
4633 drm_i915_private_t *dev_priv = dev->dev_private; 4630 drm_i915_private_t *dev_priv = dev->dev_private;
4634 4631
4635 spin_lock_init(&dev_priv->mm.active_list_lock); 4632 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4636 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4633 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4637 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4638 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4634 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4635 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
4639 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4636 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4640 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); 4637 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
4641 INIT_LIST_HEAD(&dev_priv->render_ring.active_list); 4638 init_ring_lists(&dev_priv->render_ring);
4642 INIT_LIST_HEAD(&dev_priv->render_ring.request_list); 4639 init_ring_lists(&dev_priv->bsd_ring);
4643 if (HAS_BSD(dev)) { 4640 init_ring_lists(&dev_priv->blt_ring);
4644 INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
4645 INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
4646 }
4647 for (i = 0; i < 16; i++) 4641 for (i = 0; i < 16; i++)
4648 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 4642 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4649 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4643 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4650 i915_gem_retire_work_handler); 4644 i915_gem_retire_work_handler);
4645 init_completion(&dev_priv->error_completion);
4651 spin_lock(&shrink_list_lock); 4646 spin_lock(&shrink_list_lock);
4652 list_add(&dev_priv->mm.shrink_list, &shrink_list); 4647 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4653 spin_unlock(&shrink_list_lock); 4648 spin_unlock(&shrink_list_lock);
@@ -4666,21 +4661,30 @@ i915_gem_load(struct drm_device *dev)
4666 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4661 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4667 dev_priv->fence_reg_start = 3; 4662 dev_priv->fence_reg_start = 3;
4668 4663
4669 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4664 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4670 dev_priv->num_fence_regs = 16; 4665 dev_priv->num_fence_regs = 16;
4671 else 4666 else
4672 dev_priv->num_fence_regs = 8; 4667 dev_priv->num_fence_regs = 8;
4673 4668
4674 /* Initialize fence registers to zero */ 4669 /* Initialize fence registers to zero */
4675 if (IS_I965G(dev)) { 4670 switch (INTEL_INFO(dev)->gen) {
4671 case 6:
4672 for (i = 0; i < 16; i++)
4673 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
4674 break;
4675 case 5:
4676 case 4:
4676 for (i = 0; i < 16; i++) 4677 for (i = 0; i < 16; i++)
4677 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0); 4678 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4678 } else { 4679 break;
4679 for (i = 0; i < 8; i++) 4680 case 3:
4680 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4681 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4681 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4682 for (i = 0; i < 8; i++) 4682 for (i = 0; i < 8; i++)
4683 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); 4683 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4684 case 2:
4685 for (i = 0; i < 8; i++)
4686 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4687 break;
4684 } 4688 }
4685 i915_gem_detect_bit_6_swizzle(dev); 4689 i915_gem_detect_bit_6_swizzle(dev);
4686 init_waitqueue_head(&dev_priv->pending_flip_queue); 4690 init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -4690,8 +4694,8 @@ i915_gem_load(struct drm_device *dev)
4690 * Create a physically contiguous memory object for this object 4694 * Create a physically contiguous memory object for this object
4691 * e.g. for cursor + overlay regs 4695 * e.g. for cursor + overlay regs
4692 */ 4696 */
4693int i915_gem_init_phys_object(struct drm_device *dev, 4697static int i915_gem_init_phys_object(struct drm_device *dev,
4694 int id, int size, int align) 4698 int id, int size, int align)
4695{ 4699{
4696 drm_i915_private_t *dev_priv = dev->dev_private; 4700 drm_i915_private_t *dev_priv = dev->dev_private;
4697 struct drm_i915_gem_phys_object *phys_obj; 4701 struct drm_i915_gem_phys_object *phys_obj;
@@ -4723,7 +4727,7 @@ kfree_obj:
4723 return ret; 4727 return ret;
4724} 4728}
4725 4729
4726void i915_gem_free_phys_object(struct drm_device *dev, int id) 4730static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4727{ 4731{
4728 drm_i915_private_t *dev_priv = dev->dev_private; 4732 drm_i915_private_t *dev_priv = dev->dev_private;
4729 struct drm_i915_gem_phys_object *phys_obj; 4733 struct drm_i915_gem_phys_object *phys_obj;
@@ -4868,18 +4872,25 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4868 return 0; 4872 return 0;
4869} 4873}
4870 4874
4871void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv) 4875void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4872{ 4876{
4873 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 4877 struct drm_i915_file_private *file_priv = file->driver_priv;
4874 4878
4875 /* Clean up our request list when the client is going away, so that 4879 /* Clean up our request list when the client is going away, so that
4876 * later retire_requests won't dereference our soon-to-be-gone 4880 * later retire_requests won't dereference our soon-to-be-gone
4877 * file_priv. 4881 * file_priv.
4878 */ 4882 */
4879 mutex_lock(&dev->struct_mutex); 4883 spin_lock(&file_priv->mm.lock);
4880 while (!list_empty(&i915_file_priv->mm.request_list)) 4884 while (!list_empty(&file_priv->mm.request_list)) {
4881 list_del_init(i915_file_priv->mm.request_list.next); 4885 struct drm_i915_gem_request *request;
4882 mutex_unlock(&dev->struct_mutex); 4886
4887 request = list_first_entry(&file_priv->mm.request_list,
4888 struct drm_i915_gem_request,
4889 client_list);
4890 list_del(&request->client_list);
4891 request->file_priv = NULL;
4892 }
4893 spin_unlock(&file_priv->mm.lock);
4883} 4894}
4884 4895
4885static int 4896static int
@@ -4888,12 +4899,10 @@ i915_gpu_is_active(struct drm_device *dev)
4888 drm_i915_private_t *dev_priv = dev->dev_private; 4899 drm_i915_private_t *dev_priv = dev->dev_private;
4889 int lists_empty; 4900 int lists_empty;
4890 4901
4891 spin_lock(&dev_priv->mm.active_list_lock);
4892 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 4902 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4893 list_empty(&dev_priv->render_ring.active_list); 4903 list_empty(&dev_priv->render_ring.active_list) &&
4894 if (HAS_BSD(dev)) 4904 list_empty(&dev_priv->bsd_ring.active_list) &&
4895 lists_empty &= list_empty(&dev_priv->bsd_ring.active_list); 4905 list_empty(&dev_priv->blt_ring.active_list);
4896 spin_unlock(&dev_priv->mm.active_list_lock);
4897 4906
4898 return !lists_empty; 4907 return !lists_empty;
4899} 4908}
@@ -4915,7 +4924,7 @@ i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
4915 if (mutex_trylock(&dev->struct_mutex)) { 4924 if (mutex_trylock(&dev->struct_mutex)) {
4916 list_for_each_entry(obj_priv, 4925 list_for_each_entry(obj_priv,
4917 &dev_priv->mm.inactive_list, 4926 &dev_priv->mm.inactive_list,
4918 list) 4927 mm_list)
4919 cnt++; 4928 cnt++;
4920 mutex_unlock(&dev->struct_mutex); 4929 mutex_unlock(&dev->struct_mutex);
4921 } 4930 }
@@ -4941,7 +4950,7 @@ rescan:
4941 4950
4942 list_for_each_entry_safe(obj_priv, next_obj, 4951 list_for_each_entry_safe(obj_priv, next_obj,
4943 &dev_priv->mm.inactive_list, 4952 &dev_priv->mm.inactive_list,
4944 list) { 4953 mm_list) {
4945 if (i915_gem_object_is_purgeable(obj_priv)) { 4954 if (i915_gem_object_is_purgeable(obj_priv)) {
4946 i915_gem_object_unbind(&obj_priv->base); 4955 i915_gem_object_unbind(&obj_priv->base);
4947 if (--nr_to_scan <= 0) 4956 if (--nr_to_scan <= 0)
@@ -4970,7 +4979,7 @@ rescan:
4970 4979
4971 list_for_each_entry_safe(obj_priv, next_obj, 4980 list_for_each_entry_safe(obj_priv, next_obj,
4972 &dev_priv->mm.inactive_list, 4981 &dev_priv->mm.inactive_list,
4973 list) { 4982 mm_list) {
4974 if (nr_to_scan > 0) { 4983 if (nr_to_scan > 0) {
4975 i915_gem_object_unbind(&obj_priv->base); 4984 i915_gem_object_unbind(&obj_priv->base);
4976 nr_to_scan--; 4985 nr_to_scan--;
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 80f380b1d951..48644b840a8d 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -30,29 +30,112 @@
30#include "i915_drm.h" 30#include "i915_drm.h"
31#include "i915_drv.h" 31#include "i915_drv.h"
32 32
33#if WATCH_INACTIVE 33#if WATCH_LISTS
34void 34int
35i915_verify_inactive(struct drm_device *dev, char *file, int line) 35i915_verify_lists(struct drm_device *dev)
36{ 36{
37 static int warned;
37 drm_i915_private_t *dev_priv = dev->dev_private; 38 drm_i915_private_t *dev_priv = dev->dev_private;
38 struct drm_gem_object *obj; 39 struct drm_i915_gem_object *obj;
39 struct drm_i915_gem_object *obj_priv; 40 int err = 0;
40 41
41 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { 42 if (warned)
42 obj = &obj_priv->base; 43 return 0;
43 if (obj_priv->pin_count || obj_priv->active || 44
44 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | 45 list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
45 I915_GEM_DOMAIN_GTT))) 46 if (obj->base.dev != dev ||
46 DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n", 47 !atomic_read(&obj->base.refcount.refcount)) {
48 DRM_ERROR("freed render active %p\n", obj);
49 err++;
50 break;
51 } else if (!obj->active ||
52 (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
53 DRM_ERROR("invalid render active %p (a %d r %x)\n",
54 obj,
55 obj->active,
56 obj->base.read_domains);
57 err++;
58 } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
59 DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
60 obj,
61 obj->base.write_domain,
62 !list_empty(&obj->gpu_write_list));
63 err++;
64 }
65 }
66
67 list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
68 if (obj->base.dev != dev ||
69 !atomic_read(&obj->base.refcount.refcount)) {
70 DRM_ERROR("freed flushing %p\n", obj);
71 err++;
72 break;
73 } else if (!obj->active ||
74 (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
75 list_empty(&obj->gpu_write_list)){
76 DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
47 obj, 77 obj,
48 obj_priv->pin_count, obj_priv->active, 78 obj->active,
49 obj->write_domain, file, line); 79 obj->base.write_domain,
80 !list_empty(&obj->gpu_write_list));
81 err++;
82 }
83 }
84
85 list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
86 if (obj->base.dev != dev ||
87 !atomic_read(&obj->base.refcount.refcount)) {
88 DRM_ERROR("freed gpu write %p\n", obj);
89 err++;
90 break;
91 } else if (!obj->active ||
92 (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
93 DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
94 obj,
95 obj->active,
96 obj->base.write_domain);
97 err++;
98 }
99 }
100
101 list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
102 if (obj->base.dev != dev ||
103 !atomic_read(&obj->base.refcount.refcount)) {
104 DRM_ERROR("freed inactive %p\n", obj);
105 err++;
106 break;
107 } else if (obj->pin_count || obj->active ||
108 (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
109 DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
110 obj,
111 obj->pin_count, obj->active,
112 obj->base.write_domain);
113 err++;
114 }
50 } 115 }
116
117 list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
118 if (obj->base.dev != dev ||
119 !atomic_read(&obj->base.refcount.refcount)) {
120 DRM_ERROR("freed pinned %p\n", obj);
121 err++;
122 break;
123 } else if (!obj->pin_count || obj->active ||
124 (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
125 DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
126 obj,
127 obj->pin_count, obj->active,
128 obj->base.write_domain);
129 err++;
130 }
131 }
132
133 return warned = err;
51} 134}
52#endif /* WATCH_INACTIVE */ 135#endif /* WATCH_INACTIVE */
53 136
54 137
55#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE 138#if WATCH_EXEC | WATCH_PWRITE
56static void 139static void
57i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, 140i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
58 uint32_t bias, uint32_t mark) 141 uint32_t bias, uint32_t mark)
@@ -97,41 +180,6 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
97} 180}
98#endif 181#endif
99 182
100#if WATCH_LRU
101void
102i915_dump_lru(struct drm_device *dev, const char *where)
103{
104 drm_i915_private_t *dev_priv = dev->dev_private;
105 struct drm_i915_gem_object *obj_priv;
106
107 DRM_INFO("active list %s {\n", where);
108 spin_lock(&dev_priv->mm.active_list_lock);
109 list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
110 list)
111 {
112 DRM_INFO(" %p: %08x\n", obj_priv,
113 obj_priv->last_rendering_seqno);
114 }
115 spin_unlock(&dev_priv->mm.active_list_lock);
116 DRM_INFO("}\n");
117 DRM_INFO("flushing list %s {\n", where);
118 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
119 list)
120 {
121 DRM_INFO(" %p: %08x\n", obj_priv,
122 obj_priv->last_rendering_seqno);
123 }
124 DRM_INFO("}\n");
125 DRM_INFO("inactive %s {\n", where);
126 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
127 DRM_INFO(" %p: %08x\n", obj_priv,
128 obj_priv->last_rendering_seqno);
129 }
130 DRM_INFO("}\n");
131}
132#endif
133
134
135#if WATCH_COHERENCY 183#if WATCH_COHERENCY
136void 184void
137i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) 185i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 5c428fa3e0b3..43a4013f53fa 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -31,49 +31,6 @@
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include "i915_drm.h" 32#include "i915_drm.h"
33 33
34static struct drm_i915_gem_object *
35i915_gem_next_active_object(struct drm_device *dev,
36 struct list_head **render_iter,
37 struct list_head **bsd_iter)
38{
39 drm_i915_private_t *dev_priv = dev->dev_private;
40 struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
41
42 if (*render_iter != &dev_priv->render_ring.active_list)
43 render_obj = list_entry(*render_iter,
44 struct drm_i915_gem_object,
45 list);
46
47 if (HAS_BSD(dev)) {
48 if (*bsd_iter != &dev_priv->bsd_ring.active_list)
49 bsd_obj = list_entry(*bsd_iter,
50 struct drm_i915_gem_object,
51 list);
52
53 if (render_obj == NULL) {
54 *bsd_iter = (*bsd_iter)->next;
55 return bsd_obj;
56 }
57
58 if (bsd_obj == NULL) {
59 *render_iter = (*render_iter)->next;
60 return render_obj;
61 }
62
63 /* XXX can we handle seqno wrapping? */
64 if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
65 *render_iter = (*render_iter)->next;
66 return render_obj;
67 } else {
68 *bsd_iter = (*bsd_iter)->next;
69 return bsd_obj;
70 }
71 } else {
72 *render_iter = (*render_iter)->next;
73 return render_obj;
74 }
75}
76
77static bool 34static bool
78mark_free(struct drm_i915_gem_object *obj_priv, 35mark_free(struct drm_i915_gem_object *obj_priv,
79 struct list_head *unwind) 36 struct list_head *unwind)
@@ -83,18 +40,12 @@ mark_free(struct drm_i915_gem_object *obj_priv,
83 return drm_mm_scan_add_block(obj_priv->gtt_space); 40 return drm_mm_scan_add_block(obj_priv->gtt_space);
84} 41}
85 42
86#define i915_for_each_active_object(OBJ, R, B) \
87 *(R) = dev_priv->render_ring.active_list.next; \
88 *(B) = dev_priv->bsd_ring.active_list.next; \
89 while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
90
91int 43int
92i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment) 44i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
93{ 45{
94 drm_i915_private_t *dev_priv = dev->dev_private; 46 drm_i915_private_t *dev_priv = dev->dev_private;
95 struct list_head eviction_list, unwind_list; 47 struct list_head eviction_list, unwind_list;
96 struct drm_i915_gem_object *obj_priv; 48 struct drm_i915_gem_object *obj_priv;
97 struct list_head *render_iter, *bsd_iter;
98 int ret = 0; 49 int ret = 0;
99 50
100 i915_gem_retire_requests(dev); 51 i915_gem_retire_requests(dev);
@@ -131,13 +82,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
131 drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); 82 drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
132 83
133 /* First see if there is a large enough contiguous idle region... */ 84 /* First see if there is a large enough contiguous idle region... */
134 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { 85 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
135 if (mark_free(obj_priv, &unwind_list)) 86 if (mark_free(obj_priv, &unwind_list))
136 goto found; 87 goto found;
137 } 88 }
138 89
139 /* Now merge in the soon-to-be-expired objects... */ 90 /* Now merge in the soon-to-be-expired objects... */
140 i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { 91 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
141 /* Does the object require an outstanding flush? */ 92 /* Does the object require an outstanding flush? */
142 if (obj_priv->base.write_domain || obj_priv->pin_count) 93 if (obj_priv->base.write_domain || obj_priv->pin_count)
143 continue; 94 continue;
@@ -147,14 +98,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
147 } 98 }
148 99
149 /* Finally add anything with a pending flush (in order of retirement) */ 100 /* Finally add anything with a pending flush (in order of retirement) */
150 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { 101 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
151 if (obj_priv->pin_count) 102 if (obj_priv->pin_count)
152 continue; 103 continue;
153 104
154 if (mark_free(obj_priv, &unwind_list)) 105 if (mark_free(obj_priv, &unwind_list))
155 goto found; 106 goto found;
156 } 107 }
157 i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { 108 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
158 if (! obj_priv->base.write_domain || obj_priv->pin_count) 109 if (! obj_priv->base.write_domain || obj_priv->pin_count)
159 continue; 110 continue;
160 111
@@ -212,14 +163,11 @@ i915_gem_evict_everything(struct drm_device *dev)
212 int ret; 163 int ret;
213 bool lists_empty; 164 bool lists_empty;
214 165
215 spin_lock(&dev_priv->mm.active_list_lock);
216 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 166 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
217 list_empty(&dev_priv->mm.flushing_list) && 167 list_empty(&dev_priv->mm.flushing_list) &&
218 list_empty(&dev_priv->render_ring.active_list) && 168 list_empty(&dev_priv->render_ring.active_list) &&
219 (!HAS_BSD(dev) 169 list_empty(&dev_priv->bsd_ring.active_list) &&
220 || list_empty(&dev_priv->bsd_ring.active_list))); 170 list_empty(&dev_priv->blt_ring.active_list));
221 spin_unlock(&dev_priv->mm.active_list_lock);
222
223 if (lists_empty) 171 if (lists_empty)
224 return -ENOSPC; 172 return -ENOSPC;
225 173
@@ -234,13 +182,11 @@ i915_gem_evict_everything(struct drm_device *dev)
234 if (ret) 182 if (ret)
235 return ret; 183 return ret;
236 184
237 spin_lock(&dev_priv->mm.active_list_lock);
238 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 185 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
239 list_empty(&dev_priv->mm.flushing_list) && 186 list_empty(&dev_priv->mm.flushing_list) &&
240 list_empty(&dev_priv->render_ring.active_list) && 187 list_empty(&dev_priv->render_ring.active_list) &&
241 (!HAS_BSD(dev) 188 list_empty(&dev_priv->bsd_ring.active_list) &&
242 || list_empty(&dev_priv->bsd_ring.active_list))); 189 list_empty(&dev_priv->blt_ring.active_list));
243 spin_unlock(&dev_priv->mm.active_list_lock);
244 BUG_ON(!lists_empty); 190 BUG_ON(!lists_empty);
245 191
246 return 0; 192 return 0;
@@ -258,7 +204,7 @@ i915_gem_evict_inactive(struct drm_device *dev)
258 204
259 obj = &list_first_entry(&dev_priv->mm.inactive_list, 205 obj = &list_first_entry(&dev_priv->mm.inactive_list,
260 struct drm_i915_gem_object, 206 struct drm_i915_gem_object,
261 list)->base; 207 mm_list)->base;
262 208
263 ret = i915_gem_object_unbind(obj); 209 ret = i915_gem_object_unbind(obj);
264 if (ret != 0) { 210 if (ret != 0) {
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 710eca70b323..af352de70be1 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -92,13 +92,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
92 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 92 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
93 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 93 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
94 94
95 if (IS_IRONLAKE(dev) || IS_GEN6(dev)) { 95 if (IS_GEN5(dev) || IS_GEN6(dev)) {
96 /* On Ironlake whatever DRAM config, GPU always do 96 /* On Ironlake whatever DRAM config, GPU always do
97 * same swizzling setup. 97 * same swizzling setup.
98 */ 98 */
99 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 99 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
100 swizzle_y = I915_BIT_6_SWIZZLE_9; 100 swizzle_y = I915_BIT_6_SWIZZLE_9;
101 } else if (!IS_I9XX(dev)) { 101 } else if (IS_GEN2(dev)) {
102 /* As far as we know, the 865 doesn't have these bit 6 102 /* As far as we know, the 865 doesn't have these bit 6
103 * swizzling issues. 103 * swizzling issues.
104 */ 104 */
@@ -190,19 +190,19 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
190 if (tiling_mode == I915_TILING_NONE) 190 if (tiling_mode == I915_TILING_NONE)
191 return true; 191 return true;
192 192
193 if (!IS_I9XX(dev) || 193 if (IS_GEN2(dev) ||
194 (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) 194 (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
195 tile_width = 128; 195 tile_width = 128;
196 else 196 else
197 tile_width = 512; 197 tile_width = 512;
198 198
199 /* check maximum stride & object size */ 199 /* check maximum stride & object size */
200 if (IS_I965G(dev)) { 200 if (INTEL_INFO(dev)->gen >= 4) {
201 /* i965 stores the end address of the gtt mapping in the fence 201 /* i965 stores the end address of the gtt mapping in the fence
202 * reg, so dont bother to check the size */ 202 * reg, so dont bother to check the size */
203 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) 203 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
204 return false; 204 return false;
205 } else if (IS_GEN3(dev) || IS_GEN2(dev)) { 205 } else {
206 if (stride > 8192) 206 if (stride > 8192)
207 return false; 207 return false;
208 208
@@ -216,7 +216,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
216 } 216 }
217 217
218 /* 965+ just needs multiples of tile width */ 218 /* 965+ just needs multiples of tile width */
219 if (IS_I965G(dev)) { 219 if (INTEL_INFO(dev)->gen >= 4) {
220 if (stride & (tile_width - 1)) 220 if (stride & (tile_width - 1))
221 return false; 221 return false;
222 return true; 222 return true;
@@ -244,16 +244,18 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
244 if (tiling_mode == I915_TILING_NONE) 244 if (tiling_mode == I915_TILING_NONE)
245 return true; 245 return true;
246 246
247 if (!IS_I965G(dev)) { 247 if (INTEL_INFO(dev)->gen >= 4)
248 if (obj_priv->gtt_offset & (obj->size - 1)) 248 return true;
249
250 if (obj_priv->gtt_offset & (obj->size - 1))
251 return false;
252
253 if (IS_GEN3(dev)) {
254 if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
255 return false;
256 } else {
257 if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
249 return false; 258 return false;
250 if (IS_I9XX(dev)) {
251 if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
252 return false;
253 } else {
254 if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
255 return false;
256 }
257 } 259 }
258 260
259 return true; 261 return true;
@@ -271,7 +273,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
271 drm_i915_private_t *dev_priv = dev->dev_private; 273 drm_i915_private_t *dev_priv = dev->dev_private;
272 struct drm_gem_object *obj; 274 struct drm_gem_object *obj;
273 struct drm_i915_gem_object *obj_priv; 275 struct drm_i915_gem_object *obj_priv;
274 int ret = 0; 276 int ret;
277
278 ret = i915_gem_check_is_wedged(dev);
279 if (ret)
280 return ret;
275 281
276 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 282 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
277 if (obj == NULL) 283 if (obj == NULL)
@@ -328,7 +334,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
328 if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) 334 if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
329 ret = i915_gem_object_unbind(obj); 335 ret = i915_gem_object_unbind(obj);
330 else if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 336 else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
331 ret = i915_gem_object_put_fence_reg(obj); 337 ret = i915_gem_object_put_fence_reg(obj, true);
332 else 338 else
333 i915_gem_release_mmap(obj); 339 i915_gem_release_mmap(obj);
334 340
@@ -399,16 +405,14 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
399 * bit 17 of its physical address and therefore being interpreted differently 405 * bit 17 of its physical address and therefore being interpreted differently
400 * by the GPU. 406 * by the GPU.
401 */ 407 */
402static int 408static void
403i915_gem_swizzle_page(struct page *page) 409i915_gem_swizzle_page(struct page *page)
404{ 410{
411 char temp[64];
405 char *vaddr; 412 char *vaddr;
406 int i; 413 int i;
407 char temp[64];
408 414
409 vaddr = kmap(page); 415 vaddr = kmap(page);
410 if (vaddr == NULL)
411 return -ENOMEM;
412 416
413 for (i = 0; i < PAGE_SIZE; i += 128) { 417 for (i = 0; i < PAGE_SIZE; i += 128) {
414 memcpy(temp, &vaddr[i], 64); 418 memcpy(temp, &vaddr[i], 64);
@@ -417,8 +421,6 @@ i915_gem_swizzle_page(struct page *page)
417 } 421 }
418 422
419 kunmap(page); 423 kunmap(page);
420
421 return 0;
422} 424}
423 425
424void 426void
@@ -440,11 +442,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
440 char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; 442 char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
441 if ((new_bit_17 & 0x1) != 443 if ((new_bit_17 & 0x1) !=
442 (test_bit(i, obj_priv->bit_17) != 0)) { 444 (test_bit(i, obj_priv->bit_17) != 0)) {
443 int ret = i915_gem_swizzle_page(obj_priv->pages[i]); 445 i915_gem_swizzle_page(obj_priv->pages[i]);
444 if (ret != 0) {
445 DRM_ERROR("Failed to swizzle page\n");
446 return;
447 }
448 set_page_dirty(obj_priv->pages[i]); 446 set_page_dirty(obj_priv->pages[i]);
449 } 447 }
450 } 448 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b80010f0c4c9..729fd0c91d7b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -85,7 +85,7 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
85} 85}
86 86
87/* For display hotplug interrupt */ 87/* For display hotplug interrupt */
88void 88static void
89ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 89ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
90{ 90{
91 if ((dev_priv->irq_mask_reg & mask) != 0) { 91 if ((dev_priv->irq_mask_reg & mask) != 0) {
@@ -172,7 +172,7 @@ void intel_enable_asle (struct drm_device *dev)
172 else { 172 else {
173 i915_enable_pipestat(dev_priv, 1, 173 i915_enable_pipestat(dev_priv, 1,
174 PIPE_LEGACY_BLC_EVENT_ENABLE); 174 PIPE_LEGACY_BLC_EVENT_ENABLE);
175 if (IS_I965G(dev)) 175 if (INTEL_INFO(dev)->gen >= 4)
176 i915_enable_pipestat(dev_priv, 0, 176 i915_enable_pipestat(dev_priv, 0,
177 PIPE_LEGACY_BLC_EVENT_ENABLE); 177 PIPE_LEGACY_BLC_EVENT_ENABLE);
178 } 178 }
@@ -191,12 +191,7 @@ static int
191i915_pipe_enabled(struct drm_device *dev, int pipe) 191i915_pipe_enabled(struct drm_device *dev, int pipe)
192{ 192{
193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
194 unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF; 194 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
195
196 if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
197 return 1;
198
199 return 0;
200} 195}
201 196
202/* Called from drm generic code, passed a 'crtc', which 197/* Called from drm generic code, passed a 'crtc', which
@@ -207,10 +202,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
207 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 202 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
208 unsigned long high_frame; 203 unsigned long high_frame;
209 unsigned long low_frame; 204 unsigned long low_frame;
210 u32 high1, high2, low, count; 205 u32 high1, high2, low;
211
212 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
213 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
214 206
215 if (!i915_pipe_enabled(dev, pipe)) { 207 if (!i915_pipe_enabled(dev, pipe)) {
216 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 208 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -218,23 +210,23 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
218 return 0; 210 return 0;
219 } 211 }
220 212
213 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
214 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
215
221 /* 216 /*
222 * High & low register fields aren't synchronized, so make sure 217 * High & low register fields aren't synchronized, so make sure
223 * we get a low value that's stable across two reads of the high 218 * we get a low value that's stable across two reads of the high
224 * register. 219 * register.
225 */ 220 */
226 do { 221 do {
227 high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> 222 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
228 PIPE_FRAME_HIGH_SHIFT); 223 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
229 low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >> 224 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
230 PIPE_FRAME_LOW_SHIFT);
231 high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
232 PIPE_FRAME_HIGH_SHIFT);
233 } while (high1 != high2); 225 } while (high1 != high2);
234 226
235 count = (high1 << 8) | low; 227 high1 >>= PIPE_FRAME_HIGH_SHIFT;
236 228 low >>= PIPE_FRAME_LOW_SHIFT;
237 return count; 229 return (high1 << 8) | low;
238} 230}
239 231
240u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 232u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -260,16 +252,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
260 hotplug_work); 252 hotplug_work);
261 struct drm_device *dev = dev_priv->dev; 253 struct drm_device *dev = dev_priv->dev;
262 struct drm_mode_config *mode_config = &dev->mode_config; 254 struct drm_mode_config *mode_config = &dev->mode_config;
263 struct drm_encoder *encoder; 255 struct intel_encoder *encoder;
264 256
265 if (mode_config->num_encoder) { 257 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
266 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 258 if (encoder->hot_plug)
267 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 259 encoder->hot_plug(encoder);
268 260
269 if (intel_encoder->hot_plug)
270 (*intel_encoder->hot_plug) (intel_encoder);
271 }
272 }
273 /* Just fire off a uevent and let userspace tell us what to do */ 261 /* Just fire off a uevent and let userspace tell us what to do */
274 drm_helper_hpd_irq_event(dev); 262 drm_helper_hpd_irq_event(dev);
275} 263}
@@ -305,13 +293,30 @@ static void i915_handle_rps_change(struct drm_device *dev)
305 return; 293 return;
306} 294}
307 295
308irqreturn_t ironlake_irq_handler(struct drm_device *dev) 296static void notify_ring(struct drm_device *dev,
297 struct intel_ring_buffer *ring)
298{
299 struct drm_i915_private *dev_priv = dev->dev_private;
300 u32 seqno = ring->get_seqno(dev, ring);
301 ring->irq_gem_seqno = seqno;
302 trace_i915_gem_request_complete(dev, seqno);
303 wake_up_all(&ring->irq_queue);
304 dev_priv->hangcheck_count = 0;
305 mod_timer(&dev_priv->hangcheck_timer,
306 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
307}
308
309static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
309{ 310{
310 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 311 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
311 int ret = IRQ_NONE; 312 int ret = IRQ_NONE;
312 u32 de_iir, gt_iir, de_ier, pch_iir; 313 u32 de_iir, gt_iir, de_ier, pch_iir;
314 u32 hotplug_mask;
313 struct drm_i915_master_private *master_priv; 315 struct drm_i915_master_private *master_priv;
314 struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 316 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
317
318 if (IS_GEN6(dev))
319 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
315 320
316 /* disable master interrupt before clearing iir */ 321 /* disable master interrupt before clearing iir */
317 de_ier = I915_READ(DEIER); 322 de_ier = I915_READ(DEIER);
@@ -325,6 +330,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
325 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) 330 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
326 goto done; 331 goto done;
327 332
333 if (HAS_PCH_CPT(dev))
334 hotplug_mask = SDE_HOTPLUG_MASK_CPT;
335 else
336 hotplug_mask = SDE_HOTPLUG_MASK;
337
328 ret = IRQ_HANDLED; 338 ret = IRQ_HANDLED;
329 339
330 if (dev->primary->master) { 340 if (dev->primary->master) {
@@ -334,29 +344,24 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
334 READ_BREADCRUMB(dev_priv); 344 READ_BREADCRUMB(dev_priv);
335 } 345 }
336 346
337 if (gt_iir & GT_PIPE_NOTIFY) { 347 if (gt_iir & GT_PIPE_NOTIFY)
338 u32 seqno = render_ring->get_gem_seqno(dev, render_ring); 348 notify_ring(dev, &dev_priv->render_ring);
339 render_ring->irq_gem_seqno = seqno; 349 if (gt_iir & bsd_usr_interrupt)
340 trace_i915_gem_request_complete(dev, seqno); 350 notify_ring(dev, &dev_priv->bsd_ring);
341 DRM_WAKEUP(&dev_priv->render_ring.irq_queue); 351 if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT)
342 dev_priv->hangcheck_count = 0; 352 notify_ring(dev, &dev_priv->blt_ring);
343 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
344 }
345 if (gt_iir & GT_BSD_USER_INTERRUPT)
346 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
347
348 353
349 if (de_iir & DE_GSE) 354 if (de_iir & DE_GSE)
350 ironlake_opregion_gse_intr(dev); 355 intel_opregion_gse_intr(dev);
351 356
352 if (de_iir & DE_PLANEA_FLIP_DONE) { 357 if (de_iir & DE_PLANEA_FLIP_DONE) {
353 intel_prepare_page_flip(dev, 0); 358 intel_prepare_page_flip(dev, 0);
354 intel_finish_page_flip(dev, 0); 359 intel_finish_page_flip_plane(dev, 0);
355 } 360 }
356 361
357 if (de_iir & DE_PLANEB_FLIP_DONE) { 362 if (de_iir & DE_PLANEB_FLIP_DONE) {
358 intel_prepare_page_flip(dev, 1); 363 intel_prepare_page_flip(dev, 1);
359 intel_finish_page_flip(dev, 1); 364 intel_finish_page_flip_plane(dev, 1);
360 } 365 }
361 366
362 if (de_iir & DE_PIPEA_VBLANK) 367 if (de_iir & DE_PIPEA_VBLANK)
@@ -366,10 +371,8 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
366 drm_handle_vblank(dev, 1); 371 drm_handle_vblank(dev, 1);
367 372
368 /* check event from PCH */ 373 /* check event from PCH */
369 if ((de_iir & DE_PCH_EVENT) && 374 if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask))
370 (pch_iir & SDE_HOTPLUG_MASK)) {
371 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 375 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
372 }
373 376
374 if (de_iir & DE_PCU_EVENT) { 377 if (de_iir & DE_PCU_EVENT) {
375 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 378 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
@@ -404,23 +407,20 @@ static void i915_error_work_func(struct work_struct *work)
404 char *reset_event[] = { "RESET=1", NULL }; 407 char *reset_event[] = { "RESET=1", NULL };
405 char *reset_done_event[] = { "ERROR=0", NULL }; 408 char *reset_done_event[] = { "ERROR=0", NULL };
406 409
407 DRM_DEBUG_DRIVER("generating error event\n");
408 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 410 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
409 411
410 if (atomic_read(&dev_priv->mm.wedged)) { 412 if (atomic_read(&dev_priv->mm.wedged)) {
411 if (IS_I965G(dev)) { 413 DRM_DEBUG_DRIVER("resetting chip\n");
412 DRM_DEBUG_DRIVER("resetting chip\n"); 414 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
413 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); 415 if (!i915_reset(dev, GRDOM_RENDER)) {
414 if (!i965_reset(dev, GDRST_RENDER)) { 416 atomic_set(&dev_priv->mm.wedged, 0);
415 atomic_set(&dev_priv->mm.wedged, 0); 417 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
416 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
417 }
418 } else {
419 DRM_DEBUG_DRIVER("reboot required\n");
420 } 418 }
419 complete_all(&dev_priv->error_completion);
421 } 420 }
422} 421}
423 422
423#ifdef CONFIG_DEBUG_FS
424static struct drm_i915_error_object * 424static struct drm_i915_error_object *
425i915_error_object_create(struct drm_device *dev, 425i915_error_object_create(struct drm_device *dev,
426 struct drm_gem_object *src) 426 struct drm_gem_object *src)
@@ -510,7 +510,7 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring)
510 510
511 if (IS_I830(dev) || IS_845G(dev)) 511 if (IS_I830(dev) || IS_845G(dev))
512 cmd = MI_BATCH_BUFFER; 512 cmd = MI_BATCH_BUFFER;
513 else if (IS_I965G(dev)) 513 else if (INTEL_INFO(dev)->gen >= 4)
514 cmd = (MI_BATCH_BUFFER_START | (2 << 6) | 514 cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
515 MI_BATCH_NON_SECURE_I965); 515 MI_BATCH_NON_SECURE_I965);
516 else 516 else
@@ -583,13 +583,16 @@ static void i915_capture_error_state(struct drm_device *dev)
583 return; 583 return;
584 } 584 }
585 585
586 error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring); 586 DRM_DEBUG_DRIVER("generating error event\n");
587
588 error->seqno =
589 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
587 error->eir = I915_READ(EIR); 590 error->eir = I915_READ(EIR);
588 error->pgtbl_er = I915_READ(PGTBL_ER); 591 error->pgtbl_er = I915_READ(PGTBL_ER);
589 error->pipeastat = I915_READ(PIPEASTAT); 592 error->pipeastat = I915_READ(PIPEASTAT);
590 error->pipebstat = I915_READ(PIPEBSTAT); 593 error->pipebstat = I915_READ(PIPEBSTAT);
591 error->instpm = I915_READ(INSTPM); 594 error->instpm = I915_READ(INSTPM);
592 if (!IS_I965G(dev)) { 595 if (INTEL_INFO(dev)->gen < 4) {
593 error->ipeir = I915_READ(IPEIR); 596 error->ipeir = I915_READ(IPEIR);
594 error->ipehr = I915_READ(IPEHR); 597 error->ipehr = I915_READ(IPEHR);
595 error->instdone = I915_READ(INSTDONE); 598 error->instdone = I915_READ(INSTDONE);
@@ -611,9 +614,7 @@ static void i915_capture_error_state(struct drm_device *dev)
611 batchbuffer[0] = NULL; 614 batchbuffer[0] = NULL;
612 batchbuffer[1] = NULL; 615 batchbuffer[1] = NULL;
613 count = 0; 616 count = 0;
614 list_for_each_entry(obj_priv, 617 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
615 &dev_priv->render_ring.active_list, list) {
616
617 struct drm_gem_object *obj = &obj_priv->base; 618 struct drm_gem_object *obj = &obj_priv->base;
618 619
619 if (batchbuffer[0] == NULL && 620 if (batchbuffer[0] == NULL &&
@@ -630,7 +631,7 @@ static void i915_capture_error_state(struct drm_device *dev)
630 } 631 }
631 /* Scan the other lists for completeness for those bizarre errors. */ 632 /* Scan the other lists for completeness for those bizarre errors. */
632 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { 633 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
633 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { 634 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
634 struct drm_gem_object *obj = &obj_priv->base; 635 struct drm_gem_object *obj = &obj_priv->base;
635 636
636 if (batchbuffer[0] == NULL && 637 if (batchbuffer[0] == NULL &&
@@ -648,7 +649,7 @@ static void i915_capture_error_state(struct drm_device *dev)
648 } 649 }
649 } 650 }
650 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { 651 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
651 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { 652 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
652 struct drm_gem_object *obj = &obj_priv->base; 653 struct drm_gem_object *obj = &obj_priv->base;
653 654
654 if (batchbuffer[0] == NULL && 655 if (batchbuffer[0] == NULL &&
@@ -667,7 +668,7 @@ static void i915_capture_error_state(struct drm_device *dev)
667 } 668 }
668 669
669 /* We need to copy these to an anonymous buffer as the simplest 670 /* We need to copy these to an anonymous buffer as the simplest
670 * method to avoid being overwritten by userpace. 671 * method to avoid being overwritten by userspace.
671 */ 672 */
672 error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); 673 error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
673 if (batchbuffer[1] != batchbuffer[0]) 674 if (batchbuffer[1] != batchbuffer[0])
@@ -689,8 +690,7 @@ static void i915_capture_error_state(struct drm_device *dev)
689 690
690 if (error->active_bo) { 691 if (error->active_bo) {
691 int i = 0; 692 int i = 0;
692 list_for_each_entry(obj_priv, 693 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
693 &dev_priv->render_ring.active_list, list) {
694 struct drm_gem_object *obj = &obj_priv->base; 694 struct drm_gem_object *obj = &obj_priv->base;
695 695
696 error->active_bo[i].size = obj->size; 696 error->active_bo[i].size = obj->size;
@@ -743,6 +743,9 @@ void i915_destroy_error_state(struct drm_device *dev)
743 if (error) 743 if (error)
744 i915_error_state_free(dev, error); 744 i915_error_state_free(dev, error);
745} 745}
746#else
747#define i915_capture_error_state(x)
748#endif
746 749
747static void i915_report_and_clear_eir(struct drm_device *dev) 750static void i915_report_and_clear_eir(struct drm_device *dev)
748{ 751{
@@ -784,7 +787,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
784 } 787 }
785 } 788 }
786 789
787 if (IS_I9XX(dev)) { 790 if (!IS_GEN2(dev)) {
788 if (eir & I915_ERROR_PAGE_TABLE) { 791 if (eir & I915_ERROR_PAGE_TABLE) {
789 u32 pgtbl_err = I915_READ(PGTBL_ER); 792 u32 pgtbl_err = I915_READ(PGTBL_ER);
790 printk(KERN_ERR "page table error\n"); 793 printk(KERN_ERR "page table error\n");
@@ -810,7 +813,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
810 printk(KERN_ERR "instruction error\n"); 813 printk(KERN_ERR "instruction error\n");
811 printk(KERN_ERR " INSTPM: 0x%08x\n", 814 printk(KERN_ERR " INSTPM: 0x%08x\n",
812 I915_READ(INSTPM)); 815 I915_READ(INSTPM));
813 if (!IS_I965G(dev)) { 816 if (INTEL_INFO(dev)->gen < 4) {
814 u32 ipeir = I915_READ(IPEIR); 817 u32 ipeir = I915_READ(IPEIR);
815 818
816 printk(KERN_ERR " IPEIR: 0x%08x\n", 819 printk(KERN_ERR " IPEIR: 0x%08x\n",
@@ -875,12 +878,17 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
875 i915_report_and_clear_eir(dev); 878 i915_report_and_clear_eir(dev);
876 879
877 if (wedged) { 880 if (wedged) {
881 INIT_COMPLETION(dev_priv->error_completion);
878 atomic_set(&dev_priv->mm.wedged, 1); 882 atomic_set(&dev_priv->mm.wedged, 1);
879 883
880 /* 884 /*
881 * Wakeup waiting processes so they don't hang 885 * Wakeup waiting processes so they don't hang
882 */ 886 */
883 DRM_WAKEUP(&dev_priv->render_ring.irq_queue); 887 wake_up_all(&dev_priv->render_ring.irq_queue);
888 if (HAS_BSD(dev))
889 wake_up_all(&dev_priv->bsd_ring.irq_queue);
890 if (HAS_BLT(dev))
891 wake_up_all(&dev_priv->blt_ring.irq_queue);
884 } 892 }
885 893
886 queue_work(dev_priv->wq, &dev_priv->error_work); 894 queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -911,7 +919,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
911 919
912 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 920 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
913 obj_priv = to_intel_bo(work->pending_flip_obj); 921 obj_priv = to_intel_bo(work->pending_flip_obj);
914 if(IS_I965G(dev)) { 922 if (INTEL_INFO(dev)->gen >= 4) {
915 int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; 923 int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
916 stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; 924 stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
917 } else { 925 } else {
@@ -941,7 +949,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
941 unsigned long irqflags; 949 unsigned long irqflags;
942 int irq_received; 950 int irq_received;
943 int ret = IRQ_NONE; 951 int ret = IRQ_NONE;
944 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
945 952
946 atomic_inc(&dev_priv->irq_received); 953 atomic_inc(&dev_priv->irq_received);
947 954
@@ -950,7 +957,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
950 957
951 iir = I915_READ(IIR); 958 iir = I915_READ(IIR);
952 959
953 if (IS_I965G(dev)) 960 if (INTEL_INFO(dev)->gen >= 4)
954 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS; 961 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
955 else 962 else
956 vblank_status = PIPE_VBLANK_INTERRUPT_STATUS; 963 vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
@@ -1018,18 +1025,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1018 READ_BREADCRUMB(dev_priv); 1025 READ_BREADCRUMB(dev_priv);
1019 } 1026 }
1020 1027
1021 if (iir & I915_USER_INTERRUPT) { 1028 if (iir & I915_USER_INTERRUPT)
1022 u32 seqno = 1029 notify_ring(dev, &dev_priv->render_ring);
1023 render_ring->get_gem_seqno(dev, render_ring);
1024 render_ring->irq_gem_seqno = seqno;
1025 trace_i915_gem_request_complete(dev, seqno);
1026 DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
1027 dev_priv->hangcheck_count = 0;
1028 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1029 }
1030
1031 if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) 1030 if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
1032 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); 1031 notify_ring(dev, &dev_priv->bsd_ring);
1033 1032
1034 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { 1033 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1035 intel_prepare_page_flip(dev, 0); 1034 intel_prepare_page_flip(dev, 0);
@@ -1064,7 +1063,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1064 if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || 1063 if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
1065 (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || 1064 (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
1066 (iir & I915_ASLE_INTERRUPT)) 1065 (iir & I915_ASLE_INTERRUPT))
1067 opregion_asle_intr(dev); 1066 intel_opregion_asle_intr(dev);
1068 1067
1069 /* With MSI, interrupts are only generated when iir 1068 /* With MSI, interrupts are only generated when iir
1070 * transitions from zero to nonzero. If another bit got 1069 * transitions from zero to nonzero. If another bit got
@@ -1206,18 +1205,15 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
1206{ 1205{
1207 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1206 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1208 unsigned long irqflags; 1207 unsigned long irqflags;
1209 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
1210 u32 pipeconf;
1211 1208
1212 pipeconf = I915_READ(pipeconf_reg); 1209 if (!i915_pipe_enabled(dev, pipe))
1213 if (!(pipeconf & PIPEACONF_ENABLE))
1214 return -EINVAL; 1210 return -EINVAL;
1215 1211
1216 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1212 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1217 if (HAS_PCH_SPLIT(dev)) 1213 if (HAS_PCH_SPLIT(dev))
1218 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1214 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1219 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1215 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1220 else if (IS_I965G(dev)) 1216 else if (INTEL_INFO(dev)->gen >= 4)
1221 i915_enable_pipestat(dev_priv, pipe, 1217 i915_enable_pipestat(dev_priv, pipe,
1222 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1218 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1223 else 1219 else
@@ -1251,7 +1247,7 @@ void i915_enable_interrupt (struct drm_device *dev)
1251 struct drm_i915_private *dev_priv = dev->dev_private; 1247 struct drm_i915_private *dev_priv = dev->dev_private;
1252 1248
1253 if (!HAS_PCH_SPLIT(dev)) 1249 if (!HAS_PCH_SPLIT(dev))
1254 opregion_enable_asle(dev); 1250 intel_opregion_enable_asle(dev);
1255 dev_priv->irq_enabled = 1; 1251 dev_priv->irq_enabled = 1;
1256} 1252}
1257 1253
@@ -1310,7 +1306,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
1310 return -EINVAL; 1306 return -EINVAL;
1311} 1307}
1312 1308
1313struct drm_i915_gem_request * 1309static struct drm_i915_gem_request *
1314i915_get_tail_request(struct drm_device *dev) 1310i915_get_tail_request(struct drm_device *dev)
1315{ 1311{
1316 drm_i915_private_t *dev_priv = dev->dev_private; 1312 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -1330,11 +1326,7 @@ void i915_hangcheck_elapsed(unsigned long data)
1330 drm_i915_private_t *dev_priv = dev->dev_private; 1326 drm_i915_private_t *dev_priv = dev->dev_private;
1331 uint32_t acthd, instdone, instdone1; 1327 uint32_t acthd, instdone, instdone1;
1332 1328
1333 /* No reset support on this chip yet. */ 1329 if (INTEL_INFO(dev)->gen < 4) {
1334 if (IS_GEN6(dev))
1335 return;
1336
1337 if (!IS_I965G(dev)) {
1338 acthd = I915_READ(ACTHD); 1330 acthd = I915_READ(ACTHD);
1339 instdone = I915_READ(INSTDONE); 1331 instdone = I915_READ(INSTDONE);
1340 instdone1 = 0; 1332 instdone1 = 0;
@@ -1346,9 +1338,8 @@ void i915_hangcheck_elapsed(unsigned long data)
1346 1338
1347 /* If all work is done then ACTHD clearly hasn't advanced. */ 1339 /* If all work is done then ACTHD clearly hasn't advanced. */
1348 if (list_empty(&dev_priv->render_ring.request_list) || 1340 if (list_empty(&dev_priv->render_ring.request_list) ||
1349 i915_seqno_passed(i915_get_gem_seqno(dev, 1341 i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
1350 &dev_priv->render_ring), 1342 i915_get_tail_request(dev)->seqno)) {
1351 i915_get_tail_request(dev)->seqno)) {
1352 bool missed_wakeup = false; 1343 bool missed_wakeup = false;
1353 1344
1354 dev_priv->hangcheck_count = 0; 1345 dev_priv->hangcheck_count = 0;
@@ -1356,13 +1347,19 @@ void i915_hangcheck_elapsed(unsigned long data)
1356 /* Issue a wake-up to catch stuck h/w. */ 1347 /* Issue a wake-up to catch stuck h/w. */
1357 if (dev_priv->render_ring.waiting_gem_seqno && 1348 if (dev_priv->render_ring.waiting_gem_seqno &&
1358 waitqueue_active(&dev_priv->render_ring.irq_queue)) { 1349 waitqueue_active(&dev_priv->render_ring.irq_queue)) {
1359 DRM_WAKEUP(&dev_priv->render_ring.irq_queue); 1350 wake_up_all(&dev_priv->render_ring.irq_queue);
1360 missed_wakeup = true; 1351 missed_wakeup = true;
1361 } 1352 }
1362 1353
1363 if (dev_priv->bsd_ring.waiting_gem_seqno && 1354 if (dev_priv->bsd_ring.waiting_gem_seqno &&
1364 waitqueue_active(&dev_priv->bsd_ring.irq_queue)) { 1355 waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
1365 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); 1356 wake_up_all(&dev_priv->bsd_ring.irq_queue);
1357 missed_wakeup = true;
1358 }
1359
1360 if (dev_priv->blt_ring.waiting_gem_seqno &&
1361 waitqueue_active(&dev_priv->blt_ring.irq_queue)) {
1362 wake_up_all(&dev_priv->blt_ring.irq_queue);
1366 missed_wakeup = true; 1363 missed_wakeup = true;
1367 } 1364 }
1368 1365
@@ -1376,6 +1373,21 @@ void i915_hangcheck_elapsed(unsigned long data)
1376 dev_priv->last_instdone1 == instdone1) { 1373 dev_priv->last_instdone1 == instdone1) {
1377 if (dev_priv->hangcheck_count++ > 1) { 1374 if (dev_priv->hangcheck_count++ > 1) {
1378 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 1375 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1376
1377 if (!IS_GEN2(dev)) {
1378 /* Is the chip hanging on a WAIT_FOR_EVENT?
1379 * If so we can simply poke the RB_WAIT bit
1380 * and break the hang. This should work on
1381 * all but the second generation chipsets.
1382 */
1383 u32 tmp = I915_READ(PRB0_CTL);
1384 if (tmp & RING_WAIT) {
1385 I915_WRITE(PRB0_CTL, tmp);
1386 POSTING_READ(PRB0_CTL);
1387 goto out;
1388 }
1389 }
1390
1379 i915_handle_error(dev, true); 1391 i915_handle_error(dev, true);
1380 return; 1392 return;
1381 } 1393 }
@@ -1387,8 +1399,10 @@ void i915_hangcheck_elapsed(unsigned long data)
1387 dev_priv->last_instdone1 = instdone1; 1399 dev_priv->last_instdone1 = instdone1;
1388 } 1400 }
1389 1401
1402out:
1390 /* Reset timer case chip hangs without another request being added */ 1403 /* Reset timer case chip hangs without another request being added */
1391 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 1404 mod_timer(&dev_priv->hangcheck_timer,
1405 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1392} 1406}
1393 1407
1394/* drm_dma.h hooks 1408/* drm_dma.h hooks
@@ -1423,8 +1437,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1423 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1437 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1424 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1438 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1425 u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; 1439 u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
1426 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1440 u32 hotplug_mask;
1427 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1428 1441
1429 dev_priv->irq_mask_reg = ~display_mask; 1442 dev_priv->irq_mask_reg = ~display_mask;
1430 dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; 1443 dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
@@ -1435,20 +1448,35 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1435 I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); 1448 I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
1436 (void) I915_READ(DEIER); 1449 (void) I915_READ(DEIER);
1437 1450
1438 /* Gen6 only needs render pipe_control now */ 1451 if (IS_GEN6(dev)) {
1439 if (IS_GEN6(dev)) 1452 render_mask =
1440 render_mask = GT_PIPE_NOTIFY; 1453 GT_PIPE_NOTIFY |
1454 GT_GEN6_BSD_USER_INTERRUPT |
1455 GT_BLT_USER_INTERRUPT;
1456 }
1441 1457
1442 dev_priv->gt_irq_mask_reg = ~render_mask; 1458 dev_priv->gt_irq_mask_reg = ~render_mask;
1443 dev_priv->gt_irq_enable_reg = render_mask; 1459 dev_priv->gt_irq_enable_reg = render_mask;
1444 1460
1445 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1461 I915_WRITE(GTIIR, I915_READ(GTIIR));
1446 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); 1462 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
1447 if (IS_GEN6(dev)) 1463 if (IS_GEN6(dev)) {
1448 I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); 1464 I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
1465 I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
1466 I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
1467 }
1468
1449 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); 1469 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
1450 (void) I915_READ(GTIER); 1470 (void) I915_READ(GTIER);
1451 1471
1472 if (HAS_PCH_CPT(dev)) {
1473 hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
1474 SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
1475 } else {
1476 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1477 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1478 }
1479
1452 dev_priv->pch_irq_mask_reg = ~hotplug_mask; 1480 dev_priv->pch_irq_mask_reg = ~hotplug_mask;
1453 dev_priv->pch_irq_enable_reg = hotplug_mask; 1481 dev_priv->pch_irq_enable_reg = hotplug_mask;
1454 1482
@@ -1505,9 +1533,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1505 u32 error_mask; 1533 u32 error_mask;
1506 1534
1507 DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); 1535 DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
1508
1509 if (HAS_BSD(dev)) 1536 if (HAS_BSD(dev))
1510 DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); 1537 DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
1538 if (HAS_BLT(dev))
1539 DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue);
1511 1540
1512 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1541 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1513 1542
@@ -1577,7 +1606,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1577 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 1606 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1578 } 1607 }
1579 1608
1580 opregion_enable_asle(dev); 1609 intel_opregion_enable_asle(dev);
1581 1610
1582 return 0; 1611 return 0;
1583} 1612}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4f5e15577e89..25ed911a3112 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -25,52 +25,16 @@
25#ifndef _I915_REG_H_ 25#ifndef _I915_REG_H_
26#define _I915_REG_H_ 26#define _I915_REG_H_
27 27
28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
29
28/* 30/*
29 * The Bridge device's PCI config space has information about the 31 * The Bridge device's PCI config space has information about the
30 * fb aperture size and the amount of pre-reserved memory. 32 * fb aperture size and the amount of pre-reserved memory.
33 * This is all handled in the intel-gtt.ko module. i915.ko only
34 * cares about the vga bit for the vga rbiter.
31 */ 35 */
32#define INTEL_GMCH_CTRL 0x52 36#define INTEL_GMCH_CTRL 0x52
33#define INTEL_GMCH_VGA_DISABLE (1 << 1) 37#define INTEL_GMCH_VGA_DISABLE (1 << 1)
34#define INTEL_GMCH_ENABLED 0x4
35#define INTEL_GMCH_MEM_MASK 0x1
36#define INTEL_GMCH_MEM_64M 0x1
37#define INTEL_GMCH_MEM_128M 0
38
39#define INTEL_GMCH_GMS_MASK (0xf << 4)
40#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4)
41#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4)
42#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4)
43#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4)
44#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
45#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
46
47#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
48#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
49#define INTEL_GMCH_GMS_STOLEN_128M (0x8 << 4)
50#define INTEL_GMCH_GMS_STOLEN_256M (0x9 << 4)
51#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
52#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
53#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
54#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
55
56#define SNB_GMCH_CTRL 0x50
57#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
58#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
59#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
60#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
61#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
62#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
63#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
64#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
65#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
66#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
67#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
68#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
69#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
70#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
71#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
72#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
73#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
74 38
75/* PCI config space */ 39/* PCI config space */
76 40
@@ -106,10 +70,13 @@
106#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) 70#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
107#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) 71#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
108#define LBB 0xf4 72#define LBB 0xf4
109#define GDRST 0xc0 73
110#define GDRST_FULL (0<<2) 74/* Graphics reset regs */
111#define GDRST_RENDER (1<<2) 75#define I965_GDRST 0xc0 /* PCI config register */
112#define GDRST_MEDIA (3<<2) 76#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
77#define GRDOM_FULL (0<<2)
78#define GRDOM_RENDER (1<<2)
79#define GRDOM_MEDIA (3<<2)
113 80
114/* VGA stuff */ 81/* VGA stuff */
115 82
@@ -192,11 +159,11 @@
192#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) 159#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
193#define MI_STORE_DWORD_INDEX_SHIFT 2 160#define MI_STORE_DWORD_INDEX_SHIFT 2
194#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1) 161#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
162#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */
195#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) 163#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
196#define MI_BATCH_NON_SECURE (1) 164#define MI_BATCH_NON_SECURE (1)
197#define MI_BATCH_NON_SECURE_I965 (1<<8) 165#define MI_BATCH_NON_SECURE_I965 (1<<8)
198#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) 166#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
199
200/* 167/*
201 * 3D instructions used by the kernel 168 * 3D instructions used by the kernel
202 */ 169 */
@@ -249,6 +216,16 @@
249#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ 216#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
250#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */ 217#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
251 218
219
220/*
221 * Reset registers
222 */
223#define DEBUG_RESET_I830 0x6070
224#define DEBUG_RESET_FULL (1<<7)
225#define DEBUG_RESET_RENDER (1<<8)
226#define DEBUG_RESET_DISPLAY (1<<9)
227
228
252/* 229/*
253 * Fence registers 230 * Fence registers
254 */ 231 */
@@ -283,6 +260,17 @@
283#define PRB0_HEAD 0x02034 260#define PRB0_HEAD 0x02034
284#define PRB0_START 0x02038 261#define PRB0_START 0x02038
285#define PRB0_CTL 0x0203c 262#define PRB0_CTL 0x0203c
263#define RENDER_RING_BASE 0x02000
264#define BSD_RING_BASE 0x04000
265#define GEN6_BSD_RING_BASE 0x12000
266#define BLT_RING_BASE 0x22000
267#define RING_TAIL(base) ((base)+0x30)
268#define RING_HEAD(base) ((base)+0x34)
269#define RING_START(base) ((base)+0x38)
270#define RING_CTL(base) ((base)+0x3c)
271#define RING_HWS_PGA(base) ((base)+0x80)
272#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
273#define RING_ACTHD(base) ((base)+0x74)
286#define TAIL_ADDR 0x001FFFF8 274#define TAIL_ADDR 0x001FFFF8
287#define HEAD_WRAP_COUNT 0xFFE00000 275#define HEAD_WRAP_COUNT 0xFFE00000
288#define HEAD_WRAP_ONE 0x00200000 276#define HEAD_WRAP_ONE 0x00200000
@@ -295,6 +283,8 @@
295#define RING_VALID_MASK 0x00000001 283#define RING_VALID_MASK 0x00000001
296#define RING_VALID 0x00000001 284#define RING_VALID 0x00000001
297#define RING_INVALID 0x00000000 285#define RING_INVALID 0x00000000
286#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
287#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
298#define PRB1_TAIL 0x02040 /* 915+ only */ 288#define PRB1_TAIL 0x02040 /* 915+ only */
299#define PRB1_HEAD 0x02044 /* 915+ only */ 289#define PRB1_HEAD 0x02044 /* 915+ only */
300#define PRB1_START 0x02048 /* 915+ only */ 290#define PRB1_START 0x02048 /* 915+ only */
@@ -306,7 +296,6 @@
306#define INSTDONE1 0x0207c /* 965+ only */ 296#define INSTDONE1 0x0207c /* 965+ only */
307#define ACTHD_I965 0x02074 297#define ACTHD_I965 0x02074
308#define HWS_PGA 0x02080 298#define HWS_PGA 0x02080
309#define HWS_PGA_GEN6 0x04080
310#define HWS_ADDRESS_MASK 0xfffff000 299#define HWS_ADDRESS_MASK 0xfffff000
311#define HWS_START_ADDRESS_SHIFT 4 300#define HWS_START_ADDRESS_SHIFT 4
312#define PWRCTXA 0x2088 /* 965GM+ only */ 301#define PWRCTXA 0x2088 /* 965GM+ only */
@@ -464,17 +453,17 @@
464#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25) 453#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25)
465#define GEN6_BLITTER_SYNC_STATUS (1 << 24) 454#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
466#define GEN6_BLITTER_USER_INTERRUPT (1 << 22) 455#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
467/*
468 * BSD (bit stream decoder instruction and interrupt control register defines
469 * (G4X and Ironlake only)
470 */
471 456
472#define BSD_RING_TAIL 0x04030 457#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
473#define BSD_RING_HEAD 0x04034 458#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
474#define BSD_RING_START 0x04038 459#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
475#define BSD_RING_CTL 0x0403c 460#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
476#define BSD_RING_ACTHD 0x04074 461#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
477#define BSD_HWS_PGA 0x04080 462
463#define GEN6_BSD_IMR 0x120a8
464#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12)
465
466#define GEN6_BSD_RNCID 0x12198
478 467
479/* 468/*
480 * Framebuffer compression (915+ only) 469 * Framebuffer compression (915+ only)
@@ -579,12 +568,51 @@
579# define GPIO_DATA_VAL_IN (1 << 12) 568# define GPIO_DATA_VAL_IN (1 << 12)
580# define GPIO_DATA_PULLUP_DISABLE (1 << 13) 569# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
581 570
582#define GMBUS0 0x5100 571#define GMBUS0 0x5100 /* clock/port select */
583#define GMBUS1 0x5104 572#define GMBUS_RATE_100KHZ (0<<8)
584#define GMBUS2 0x5108 573#define GMBUS_RATE_50KHZ (1<<8)
585#define GMBUS3 0x510c 574#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
586#define GMBUS4 0x5110 575#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
587#define GMBUS5 0x5120 576#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
577#define GMBUS_PORT_DISABLED 0
578#define GMBUS_PORT_SSC 1
579#define GMBUS_PORT_VGADDC 2
580#define GMBUS_PORT_PANEL 3
581#define GMBUS_PORT_DPC 4 /* HDMIC */
582#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
583 /* 6 reserved */
584#define GMBUS_PORT_DPD 7 /* HDMID */
585#define GMBUS_NUM_PORTS 8
586#define GMBUS1 0x5104 /* command/status */
587#define GMBUS_SW_CLR_INT (1<<31)
588#define GMBUS_SW_RDY (1<<30)
589#define GMBUS_ENT (1<<29) /* enable timeout */
590#define GMBUS_CYCLE_NONE (0<<25)
591#define GMBUS_CYCLE_WAIT (1<<25)
592#define GMBUS_CYCLE_INDEX (2<<25)
593#define GMBUS_CYCLE_STOP (4<<25)
594#define GMBUS_BYTE_COUNT_SHIFT 16
595#define GMBUS_SLAVE_INDEX_SHIFT 8
596#define GMBUS_SLAVE_ADDR_SHIFT 1
597#define GMBUS_SLAVE_READ (1<<0)
598#define GMBUS_SLAVE_WRITE (0<<0)
599#define GMBUS2 0x5108 /* status */
600#define GMBUS_INUSE (1<<15)
601#define GMBUS_HW_WAIT_PHASE (1<<14)
602#define GMBUS_STALL_TIMEOUT (1<<13)
603#define GMBUS_INT (1<<12)
604#define GMBUS_HW_RDY (1<<11)
605#define GMBUS_SATOER (1<<10)
606#define GMBUS_ACTIVE (1<<9)
607#define GMBUS3 0x510c /* data buffer bytes 3-0 */
608#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */
609#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
610#define GMBUS_NAK_EN (1<<3)
611#define GMBUS_IDLE_EN (1<<2)
612#define GMBUS_HW_WAIT_EN (1<<1)
613#define GMBUS_HW_RDY_EN (1<<0)
614#define GMBUS5 0x5120 /* byte index */
615#define GMBUS_2BYTE_INDEX_EN (1<<31)
588 616
589/* 617/*
590 * Clock control & power management 618 * Clock control & power management
@@ -603,6 +631,7 @@
603#define VGA1_PD_P1_MASK (0x1f << 8) 631#define VGA1_PD_P1_MASK (0x1f << 8)
604#define DPLL_A 0x06014 632#define DPLL_A 0x06014
605#define DPLL_B 0x06018 633#define DPLL_B 0x06018
634#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B)
606#define DPLL_VCO_ENABLE (1 << 31) 635#define DPLL_VCO_ENABLE (1 << 31)
607#define DPLL_DVO_HIGH_SPEED (1 << 30) 636#define DPLL_DVO_HIGH_SPEED (1 << 30)
608#define DPLL_SYNCLOCK_ENABLE (1 << 29) 637#define DPLL_SYNCLOCK_ENABLE (1 << 29)
@@ -633,31 +662,6 @@
633#define LVDS 0x61180 662#define LVDS 0x61180
634#define LVDS_ON (1<<31) 663#define LVDS_ON (1<<31)
635 664
636#define ADPA 0x61100
637#define ADPA_DPMS_MASK (~(3<<10))
638#define ADPA_DPMS_ON (0<<10)
639#define ADPA_DPMS_SUSPEND (1<<10)
640#define ADPA_DPMS_STANDBY (2<<10)
641#define ADPA_DPMS_OFF (3<<10)
642
643#define RING_TAIL 0x00
644#define TAIL_ADDR 0x001FFFF8
645#define RING_HEAD 0x04
646#define HEAD_WRAP_COUNT 0xFFE00000
647#define HEAD_WRAP_ONE 0x00200000
648#define HEAD_ADDR 0x001FFFFC
649#define RING_START 0x08
650#define START_ADDR 0xFFFFF000
651#define RING_LEN 0x0C
652#define RING_NR_PAGES 0x001FF000
653#define RING_REPORT_MASK 0x00000006
654#define RING_REPORT_64K 0x00000002
655#define RING_REPORT_128K 0x00000004
656#define RING_NO_REPORT 0x00000000
657#define RING_VALID_MASK 0x00000001
658#define RING_VALID 0x00000001
659#define RING_INVALID 0x00000000
660
661/* Scratch pad debug 0 reg: 665/* Scratch pad debug 0 reg:
662 */ 666 */
663#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 667#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
@@ -736,10 +740,13 @@
736#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f 740#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
737#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 741#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
738#define DPLL_B_MD 0x06020 /* 965+ only */ 742#define DPLL_B_MD 0x06020 /* 965+ only */
743#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD)
739#define FPA0 0x06040 744#define FPA0 0x06040
740#define FPA1 0x06044 745#define FPA1 0x06044
741#define FPB0 0x06048 746#define FPB0 0x06048
742#define FPB1 0x0604c 747#define FPB1 0x0604c
748#define FP0(pipe) _PIPE(pipe, FPA0, FPB0)
749#define FP1(pipe) _PIPE(pipe, FPA1, FPB1)
743#define FP_N_DIV_MASK 0x003f0000 750#define FP_N_DIV_MASK 0x003f0000
744#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 751#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
745#define FP_N_DIV_SHIFT 16 752#define FP_N_DIV_SHIFT 16
@@ -760,6 +767,7 @@
760#define DPLLA_TEST_M_BYPASS (1 << 2) 767#define DPLLA_TEST_M_BYPASS (1 << 2)
761#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) 768#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
762#define D_STATE 0x6104 769#define D_STATE 0x6104
770#define DSTATE_GFX_RESET_I830 (1<<6)
763#define DSTATE_PLL_D3_OFF (1<<3) 771#define DSTATE_PLL_D3_OFF (1<<3)
764#define DSTATE_GFX_CLOCK_GATING (1<<1) 772#define DSTATE_GFX_CLOCK_GATING (1<<1)
765#define DSTATE_DOT_CLOCK_GATING (1<<0) 773#define DSTATE_DOT_CLOCK_GATING (1<<0)
@@ -926,6 +934,8 @@
926#define CLKCFG_MEM_800 (3 << 4) 934#define CLKCFG_MEM_800 (3 << 4)
927#define CLKCFG_MEM_MASK (7 << 4) 935#define CLKCFG_MEM_MASK (7 << 4)
928 936
937#define TSC1 0x11001
938#define TSE (1<<0)
929#define TR1 0x11006 939#define TR1 0x11006
930#define TSFS 0x11020 940#define TSFS 0x11020
931#define TSFS_SLOPE_MASK 0x0000ff00 941#define TSFS_SLOPE_MASK 0x0000ff00
@@ -1070,6 +1080,8 @@
1070#define MEMSTAT_SRC_CTL_STDBY 3 1080#define MEMSTAT_SRC_CTL_STDBY 3
1071#define RCPREVBSYTUPAVG 0x113b8 1081#define RCPREVBSYTUPAVG 0x113b8
1072#define RCPREVBSYTDNAVG 0x113bc 1082#define RCPREVBSYTDNAVG 0x113bc
1083#define PMMISC 0x11214
1084#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */
1073#define SDEW 0x1124c 1085#define SDEW 0x1124c
1074#define CSIEW0 0x11250 1086#define CSIEW0 0x11250
1075#define CSIEW1 0x11254 1087#define CSIEW1 0x11254
@@ -1150,6 +1162,15 @@
1150#define PIPEBSRC 0x6101c 1162#define PIPEBSRC 0x6101c
1151#define BCLRPAT_B 0x61020 1163#define BCLRPAT_B 0x61020
1152 1164
1165#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B)
1166#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B)
1167#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B)
1168#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
1169#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
1170#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
1171#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
1172#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
1173
1153/* VGA port control */ 1174/* VGA port control */
1154#define ADPA 0x61100 1175#define ADPA 0x61100
1155#define ADPA_DAC_ENABLE (1<<31) 1176#define ADPA_DAC_ENABLE (1<<31)
@@ -1173,6 +1194,7 @@
1173#define ADPA_DPMS_STANDBY (2<<10) 1194#define ADPA_DPMS_STANDBY (2<<10)
1174#define ADPA_DPMS_OFF (3<<10) 1195#define ADPA_DPMS_OFF (3<<10)
1175 1196
1197
1176/* Hotplug control (945+ only) */ 1198/* Hotplug control (945+ only) */
1177#define PORT_HOTPLUG_EN 0x61110 1199#define PORT_HOTPLUG_EN 0x61110
1178#define HDMIB_HOTPLUG_INT_EN (1 << 29) 1200#define HDMIB_HOTPLUG_INT_EN (1 << 29)
@@ -1331,6 +1353,22 @@
1331#define LVDS_B0B3_POWER_DOWN (0 << 2) 1353#define LVDS_B0B3_POWER_DOWN (0 << 2)
1332#define LVDS_B0B3_POWER_UP (3 << 2) 1354#define LVDS_B0B3_POWER_UP (3 << 2)
1333 1355
1356/* Video Data Island Packet control */
1357#define VIDEO_DIP_DATA 0x61178
1358#define VIDEO_DIP_CTL 0x61170
1359#define VIDEO_DIP_ENABLE (1 << 31)
1360#define VIDEO_DIP_PORT_B (1 << 29)
1361#define VIDEO_DIP_PORT_C (2 << 29)
1362#define VIDEO_DIP_ENABLE_AVI (1 << 21)
1363#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
1364#define VIDEO_DIP_ENABLE_SPD (8 << 21)
1365#define VIDEO_DIP_SELECT_AVI (0 << 19)
1366#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
1367#define VIDEO_DIP_SELECT_SPD (3 << 19)
1368#define VIDEO_DIP_FREQ_ONCE (0 << 16)
1369#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
1370#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
1371
1334/* Panel power sequencing */ 1372/* Panel power sequencing */
1335#define PP_STATUS 0x61200 1373#define PP_STATUS 0x61200
1336#define PP_ON (1 << 31) 1374#define PP_ON (1 << 31)
@@ -1346,6 +1384,9 @@
1346#define PP_SEQUENCE_ON (1 << 28) 1384#define PP_SEQUENCE_ON (1 << 28)
1347#define PP_SEQUENCE_OFF (2 << 28) 1385#define PP_SEQUENCE_OFF (2 << 28)
1348#define PP_SEQUENCE_MASK 0x30000000 1386#define PP_SEQUENCE_MASK 0x30000000
1387#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
1388#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
1389#define PP_SEQUENCE_STATE_MASK 0x0000000f
1349#define PP_CONTROL 0x61204 1390#define PP_CONTROL 0x61204
1350#define POWER_TARGET_ON (1 << 0) 1391#define POWER_TARGET_ON (1 << 0)
1351#define PP_ON_DELAYS 0x61208 1392#define PP_ON_DELAYS 0x61208
@@ -1481,6 +1522,7 @@
1481# define TV_TEST_MODE_MASK (7 << 0) 1522# define TV_TEST_MODE_MASK (7 << 0)
1482 1523
1483#define TV_DAC 0x68004 1524#define TV_DAC 0x68004
1525# define TV_DAC_SAVE 0x00ffff00
1484/** 1526/**
1485 * Reports that DAC state change logic has reported change (RO). 1527 * Reports that DAC state change logic has reported change (RO).
1486 * 1528 *
@@ -2075,29 +2117,35 @@
2075 2117
2076/* Display & cursor control */ 2118/* Display & cursor control */
2077 2119
2078/* dithering flag on Ironlake */
2079#define PIPE_ENABLE_DITHER (1 << 4)
2080#define PIPE_DITHER_TYPE_MASK (3 << 2)
2081#define PIPE_DITHER_TYPE_SPATIAL (0 << 2)
2082#define PIPE_DITHER_TYPE_ST01 (1 << 2)
2083/* Pipe A */ 2120/* Pipe A */
2084#define PIPEADSL 0x70000 2121#define PIPEADSL 0x70000
2085#define DSL_LINEMASK 0x00000fff 2122#define DSL_LINEMASK 0x00000fff
2086#define PIPEACONF 0x70008 2123#define PIPEACONF 0x70008
2087#define PIPEACONF_ENABLE (1<<31) 2124#define PIPECONF_ENABLE (1<<31)
2088#define PIPEACONF_DISABLE 0 2125#define PIPECONF_DISABLE 0
2089#define PIPEACONF_DOUBLE_WIDE (1<<30) 2126#define PIPECONF_DOUBLE_WIDE (1<<30)
2090#define I965_PIPECONF_ACTIVE (1<<30) 2127#define I965_PIPECONF_ACTIVE (1<<30)
2091#define PIPEACONF_SINGLE_WIDE 0 2128#define PIPECONF_SINGLE_WIDE 0
2092#define PIPEACONF_PIPE_UNLOCKED 0 2129#define PIPECONF_PIPE_UNLOCKED 0
2093#define PIPEACONF_PIPE_LOCKED (1<<25) 2130#define PIPECONF_PIPE_LOCKED (1<<25)
2094#define PIPEACONF_PALETTE 0 2131#define PIPECONF_PALETTE 0
2095#define PIPEACONF_GAMMA (1<<24) 2132#define PIPECONF_GAMMA (1<<24)
2096#define PIPECONF_FORCE_BORDER (1<<25) 2133#define PIPECONF_FORCE_BORDER (1<<25)
2097#define PIPECONF_PROGRESSIVE (0 << 21) 2134#define PIPECONF_PROGRESSIVE (0 << 21)
2098#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) 2135#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
2099#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) 2136#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
2100#define PIPECONF_CXSR_DOWNCLOCK (1<<16) 2137#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
2138#define PIPECONF_BPP_MASK (0x000000e0)
2139#define PIPECONF_BPP_8 (0<<5)
2140#define PIPECONF_BPP_10 (1<<5)
2141#define PIPECONF_BPP_6 (2<<5)
2142#define PIPECONF_BPP_12 (3<<5)
2143#define PIPECONF_DITHER_EN (1<<4)
2144#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
2145#define PIPECONF_DITHER_TYPE_SP (0<<2)
2146#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
2147#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
2148#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
2101#define PIPEASTAT 0x70024 2149#define PIPEASTAT 0x70024
2102#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) 2150#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
2103#define PIPE_CRC_ERROR_ENABLE (1UL<<29) 2151#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
@@ -2128,12 +2176,15 @@
2128#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ 2176#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
2129#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) 2177#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
2130#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) 2178#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
2131#define PIPE_BPC_MASK (7 << 5) /* Ironlake */ 2179#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
2132#define PIPE_8BPC (0 << 5) 2180#define PIPE_8BPC (0 << 5)
2133#define PIPE_10BPC (1 << 5) 2181#define PIPE_10BPC (1 << 5)
2134#define PIPE_6BPC (2 << 5) 2182#define PIPE_6BPC (2 << 5)
2135#define PIPE_12BPC (3 << 5) 2183#define PIPE_12BPC (3 << 5)
2136 2184
2185#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
2186#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL)
2187
2137#define DSPARB 0x70030 2188#define DSPARB 0x70030
2138#define DSPARB_CSTART_MASK (0x7f << 7) 2189#define DSPARB_CSTART_MASK (0x7f << 7)
2139#define DSPARB_CSTART_SHIFT 7 2190#define DSPARB_CSTART_SHIFT 7
@@ -2206,8 +2257,8 @@
2206#define WM1_LP_SR_EN (1<<31) 2257#define WM1_LP_SR_EN (1<<31)
2207#define WM1_LP_LATENCY_SHIFT 24 2258#define WM1_LP_LATENCY_SHIFT 24
2208#define WM1_LP_LATENCY_MASK (0x7f<<24) 2259#define WM1_LP_LATENCY_MASK (0x7f<<24)
2209#define WM1_LP_FBC_LP1_MASK (0xf<<20) 2260#define WM1_LP_FBC_MASK (0xf<<20)
2210#define WM1_LP_FBC_LP1_SHIFT 20 2261#define WM1_LP_FBC_SHIFT 20
2211#define WM1_LP_SR_MASK (0x1ff<<8) 2262#define WM1_LP_SR_MASK (0x1ff<<8)
2212#define WM1_LP_SR_SHIFT 8 2263#define WM1_LP_SR_SHIFT 8
2213#define WM1_LP_CURSOR_MASK (0x3f) 2264#define WM1_LP_CURSOR_MASK (0x3f)
@@ -2333,6 +2384,14 @@
2333#define DSPASURF 0x7019C /* 965+ only */ 2384#define DSPASURF 0x7019C /* 965+ only */
2334#define DSPATILEOFF 0x701A4 /* 965+ only */ 2385#define DSPATILEOFF 0x701A4 /* 965+ only */
2335 2386
2387#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR)
2388#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR)
2389#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE)
2390#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS)
2391#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE)
2392#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF)
2393#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF)
2394
2336/* VBIOS flags */ 2395/* VBIOS flags */
2337#define SWF00 0x71410 2396#define SWF00 0x71410
2338#define SWF01 0x71414 2397#define SWF01 0x71414
@@ -2397,6 +2456,7 @@
2397#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00 2456#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
2398 2457
2399#define FDI_PLL_BIOS_0 0x46000 2458#define FDI_PLL_BIOS_0 0x46000
2459#define FDI_PLL_FB_CLOCK_MASK 0xff
2400#define FDI_PLL_BIOS_1 0x46004 2460#define FDI_PLL_BIOS_1 0x46004
2401#define FDI_PLL_BIOS_2 0x46008 2461#define FDI_PLL_BIOS_2 0x46008
2402#define DISPLAY_PORT_PLL_BIOS_0 0x4600c 2462#define DISPLAY_PORT_PLL_BIOS_0 0x4600c
@@ -2420,46 +2480,47 @@
2420#define PIPEA_DATA_M1 0x60030 2480#define PIPEA_DATA_M1 0x60030
2421#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ 2481#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
2422#define TU_SIZE_MASK 0x7e000000 2482#define TU_SIZE_MASK 0x7e000000
2423#define PIPEA_DATA_M1_OFFSET 0 2483#define PIPE_DATA_M1_OFFSET 0
2424#define PIPEA_DATA_N1 0x60034 2484#define PIPEA_DATA_N1 0x60034
2425#define PIPEA_DATA_N1_OFFSET 0 2485#define PIPE_DATA_N1_OFFSET 0
2426 2486
2427#define PIPEA_DATA_M2 0x60038 2487#define PIPEA_DATA_M2 0x60038
2428#define PIPEA_DATA_M2_OFFSET 0 2488#define PIPE_DATA_M2_OFFSET 0
2429#define PIPEA_DATA_N2 0x6003c 2489#define PIPEA_DATA_N2 0x6003c
2430#define PIPEA_DATA_N2_OFFSET 0 2490#define PIPE_DATA_N2_OFFSET 0
2431 2491
2432#define PIPEA_LINK_M1 0x60040 2492#define PIPEA_LINK_M1 0x60040
2433#define PIPEA_LINK_M1_OFFSET 0 2493#define PIPE_LINK_M1_OFFSET 0
2434#define PIPEA_LINK_N1 0x60044 2494#define PIPEA_LINK_N1 0x60044
2435#define PIPEA_LINK_N1_OFFSET 0 2495#define PIPE_LINK_N1_OFFSET 0
2436 2496
2437#define PIPEA_LINK_M2 0x60048 2497#define PIPEA_LINK_M2 0x60048
2438#define PIPEA_LINK_M2_OFFSET 0 2498#define PIPE_LINK_M2_OFFSET 0
2439#define PIPEA_LINK_N2 0x6004c 2499#define PIPEA_LINK_N2 0x6004c
2440#define PIPEA_LINK_N2_OFFSET 0 2500#define PIPE_LINK_N2_OFFSET 0
2441 2501
2442/* PIPEB timing regs are same start from 0x61000 */ 2502/* PIPEB timing regs are same start from 0x61000 */
2443 2503
2444#define PIPEB_DATA_M1 0x61030 2504#define PIPEB_DATA_M1 0x61030
2445#define PIPEB_DATA_M1_OFFSET 0
2446#define PIPEB_DATA_N1 0x61034 2505#define PIPEB_DATA_N1 0x61034
2447#define PIPEB_DATA_N1_OFFSET 0
2448 2506
2449#define PIPEB_DATA_M2 0x61038 2507#define PIPEB_DATA_M2 0x61038
2450#define PIPEB_DATA_M2_OFFSET 0
2451#define PIPEB_DATA_N2 0x6103c 2508#define PIPEB_DATA_N2 0x6103c
2452#define PIPEB_DATA_N2_OFFSET 0
2453 2509
2454#define PIPEB_LINK_M1 0x61040 2510#define PIPEB_LINK_M1 0x61040
2455#define PIPEB_LINK_M1_OFFSET 0
2456#define PIPEB_LINK_N1 0x61044 2511#define PIPEB_LINK_N1 0x61044
2457#define PIPEB_LINK_N1_OFFSET 0
2458 2512
2459#define PIPEB_LINK_M2 0x61048 2513#define PIPEB_LINK_M2 0x61048
2460#define PIPEB_LINK_M2_OFFSET 0
2461#define PIPEB_LINK_N2 0x6104c 2514#define PIPEB_LINK_N2 0x6104c
2462#define PIPEB_LINK_N2_OFFSET 0 2515
2516#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1)
2517#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1)
2518#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2)
2519#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2)
2520#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1)
2521#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1)
2522#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2)
2523#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2)
2463 2524
2464/* CPU panel fitter */ 2525/* CPU panel fitter */
2465#define PFA_CTL_1 0x68080 2526#define PFA_CTL_1 0x68080
@@ -2516,7 +2577,8 @@
2516#define GT_SYNC_STATUS (1 << 2) 2577#define GT_SYNC_STATUS (1 << 2)
2517#define GT_USER_INTERRUPT (1 << 0) 2578#define GT_USER_INTERRUPT (1 << 0)
2518#define GT_BSD_USER_INTERRUPT (1 << 5) 2579#define GT_BSD_USER_INTERRUPT (1 << 5)
2519 2580#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
2581#define GT_BLT_USER_INTERRUPT (1 << 22)
2520 2582
2521#define GTISR 0x44010 2583#define GTISR 0x44010
2522#define GTIMR 0x44014 2584#define GTIMR 0x44014
@@ -2551,6 +2613,10 @@
2551#define SDE_PORTD_HOTPLUG_CPT (1 << 23) 2613#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
2552#define SDE_PORTC_HOTPLUG_CPT (1 << 22) 2614#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
2553#define SDE_PORTB_HOTPLUG_CPT (1 << 21) 2615#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
2616#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
2617 SDE_PORTD_HOTPLUG_CPT | \
2618 SDE_PORTC_HOTPLUG_CPT | \
2619 SDE_PORTB_HOTPLUG_CPT)
2554 2620
2555#define SDEISR 0xc4000 2621#define SDEISR 0xc4000
2556#define SDEIMR 0xc4004 2622#define SDEIMR 0xc4004
@@ -2600,11 +2666,14 @@
2600 2666
2601#define PCH_DPLL_A 0xc6014 2667#define PCH_DPLL_A 0xc6014
2602#define PCH_DPLL_B 0xc6018 2668#define PCH_DPLL_B 0xc6018
2669#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
2603 2670
2604#define PCH_FPA0 0xc6040 2671#define PCH_FPA0 0xc6040
2605#define PCH_FPA1 0xc6044 2672#define PCH_FPA1 0xc6044
2606#define PCH_FPB0 0xc6048 2673#define PCH_FPB0 0xc6048
2607#define PCH_FPB1 0xc604c 2674#define PCH_FPB1 0xc604c
2675#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0)
2676#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1)
2608 2677
2609#define PCH_DPLL_TEST 0xc606c 2678#define PCH_DPLL_TEST 0xc606c
2610 2679
@@ -2690,6 +2759,13 @@
2690#define TRANS_VBLANK_B 0xe1010 2759#define TRANS_VBLANK_B 0xe1010
2691#define TRANS_VSYNC_B 0xe1014 2760#define TRANS_VSYNC_B 0xe1014
2692 2761
2762#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B)
2763#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B)
2764#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B)
2765#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B)
2766#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B)
2767#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B)
2768
2693#define TRANSB_DATA_M1 0xe1030 2769#define TRANSB_DATA_M1 0xe1030
2694#define TRANSB_DATA_N1 0xe1034 2770#define TRANSB_DATA_N1 0xe1034
2695#define TRANSB_DATA_M2 0xe1038 2771#define TRANSB_DATA_M2 0xe1038
@@ -2701,6 +2777,7 @@
2701 2777
2702#define TRANSACONF 0xf0008 2778#define TRANSACONF 0xf0008
2703#define TRANSBCONF 0xf1008 2779#define TRANSBCONF 0xf1008
2780#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF)
2704#define TRANS_DISABLE (0<<31) 2781#define TRANS_DISABLE (0<<31)
2705#define TRANS_ENABLE (1<<31) 2782#define TRANS_ENABLE (1<<31)
2706#define TRANS_STATE_MASK (1<<30) 2783#define TRANS_STATE_MASK (1<<30)
@@ -2721,10 +2798,15 @@
2721#define FDI_RXA_CHICKEN 0xc200c 2798#define FDI_RXA_CHICKEN 0xc200c
2722#define FDI_RXB_CHICKEN 0xc2010 2799#define FDI_RXB_CHICKEN 0xc2010
2723#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1) 2800#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1)
2801#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN)
2802
2803#define SOUTH_DSPCLK_GATE_D 0xc2020
2804#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
2724 2805
2725/* CPU: FDI_TX */ 2806/* CPU: FDI_TX */
2726#define FDI_TXA_CTL 0x60100 2807#define FDI_TXA_CTL 0x60100
2727#define FDI_TXB_CTL 0x61100 2808#define FDI_TXB_CTL 0x61100
2809#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL)
2728#define FDI_TX_DISABLE (0<<31) 2810#define FDI_TX_DISABLE (0<<31)
2729#define FDI_TX_ENABLE (1<<31) 2811#define FDI_TX_ENABLE (1<<31)
2730#define FDI_LINK_TRAIN_PATTERN_1 (0<<28) 2812#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
@@ -2766,8 +2848,8 @@
2766/* FDI_RX, FDI_X is hard-wired to Transcoder_X */ 2848/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
2767#define FDI_RXA_CTL 0xf000c 2849#define FDI_RXA_CTL 0xf000c
2768#define FDI_RXB_CTL 0xf100c 2850#define FDI_RXB_CTL 0xf100c
2851#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL)
2769#define FDI_RX_ENABLE (1<<31) 2852#define FDI_RX_ENABLE (1<<31)
2770#define FDI_RX_DISABLE (0<<31)
2771/* train, dp width same as FDI_TX */ 2853/* train, dp width same as FDI_TX */
2772#define FDI_DP_PORT_WIDTH_X8 (7<<19) 2854#define FDI_DP_PORT_WIDTH_X8 (7<<19)
2773#define FDI_8BPC (0<<16) 2855#define FDI_8BPC (0<<16)
@@ -2782,8 +2864,7 @@
2782#define FDI_FS_ERR_REPORT_ENABLE (1<<9) 2864#define FDI_FS_ERR_REPORT_ENABLE (1<<9)
2783#define FDI_FE_ERR_REPORT_ENABLE (1<<8) 2865#define FDI_FE_ERR_REPORT_ENABLE (1<<8)
2784#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) 2866#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
2785#define FDI_SEL_RAWCLK (0<<4) 2867#define FDI_PCDCLK (1<<4)
2786#define FDI_SEL_PCDCLK (1<<4)
2787/* CPT */ 2868/* CPT */
2788#define FDI_AUTO_TRAINING (1<<10) 2869#define FDI_AUTO_TRAINING (1<<10)
2789#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8) 2870#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
@@ -2798,6 +2879,9 @@
2798#define FDI_RXA_TUSIZE2 0xf0038 2879#define FDI_RXA_TUSIZE2 0xf0038
2799#define FDI_RXB_TUSIZE1 0xf1030 2880#define FDI_RXB_TUSIZE1 0xf1030
2800#define FDI_RXB_TUSIZE2 0xf1038 2881#define FDI_RXB_TUSIZE2 0xf1038
2882#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC)
2883#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1)
2884#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2)
2801 2885
2802/* FDI_RX interrupt register format */ 2886/* FDI_RX interrupt register format */
2803#define FDI_RX_INTER_LANE_ALIGN (1<<10) 2887#define FDI_RX_INTER_LANE_ALIGN (1<<10)
@@ -2816,6 +2900,8 @@
2816#define FDI_RXA_IMR 0xf0018 2900#define FDI_RXA_IMR 0xf0018
2817#define FDI_RXB_IIR 0xf1014 2901#define FDI_RXB_IIR 0xf1014
2818#define FDI_RXB_IMR 0xf1018 2902#define FDI_RXB_IMR 0xf1018
2903#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR)
2904#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR)
2819 2905
2820#define FDI_PLL_CTL_1 0xfe000 2906#define FDI_PLL_CTL_1 0xfe000
2821#define FDI_PLL_CTL_2 0xfe004 2907#define FDI_PLL_CTL_2 0xfe004
@@ -2935,6 +3021,7 @@
2935#define TRANS_DP_CTL_A 0xe0300 3021#define TRANS_DP_CTL_A 0xe0300
2936#define TRANS_DP_CTL_B 0xe1300 3022#define TRANS_DP_CTL_B 0xe1300
2937#define TRANS_DP_CTL_C 0xe2300 3023#define TRANS_DP_CTL_C 0xe2300
3024#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000)
2938#define TRANS_DP_OUTPUT_ENABLE (1<<31) 3025#define TRANS_DP_OUTPUT_ENABLE (1<<31)
2939#define TRANS_DP_PORT_SEL_B (0<<29) 3026#define TRANS_DP_PORT_SEL_B (0<<29)
2940#define TRANS_DP_PORT_SEL_C (1<<29) 3027#define TRANS_DP_PORT_SEL_C (1<<29)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 31f08581e93a..989c19d2d959 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
256 dev_priv->saveFPA1 = I915_READ(FPA1); 256 dev_priv->saveFPA1 = I915_READ(FPA1);
257 dev_priv->saveDPLL_A = I915_READ(DPLL_A); 257 dev_priv->saveDPLL_A = I915_READ(DPLL_A);
258 } 258 }
259 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) 259 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
260 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); 260 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
261 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); 261 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
262 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); 262 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -294,7 +294,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
294 dev_priv->saveDSPASIZE = I915_READ(DSPASIZE); 294 dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
295 dev_priv->saveDSPAPOS = I915_READ(DSPAPOS); 295 dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
296 dev_priv->saveDSPAADDR = I915_READ(DSPAADDR); 296 dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
297 if (IS_I965G(dev)) { 297 if (INTEL_INFO(dev)->gen >= 4) {
298 dev_priv->saveDSPASURF = I915_READ(DSPASURF); 298 dev_priv->saveDSPASURF = I915_READ(DSPASURF);
299 dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); 299 dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
300 } 300 }
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
313 dev_priv->saveFPB1 = I915_READ(FPB1); 313 dev_priv->saveFPB1 = I915_READ(FPB1);
314 dev_priv->saveDPLL_B = I915_READ(DPLL_B); 314 dev_priv->saveDPLL_B = I915_READ(DPLL_B);
315 } 315 }
316 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) 316 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
317 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); 317 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
318 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); 318 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
319 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); 319 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -351,7 +351,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
351 dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE); 351 dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
352 dev_priv->saveDSPBPOS = I915_READ(DSPBPOS); 352 dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
353 dev_priv->saveDSPBADDR = I915_READ(DSPBADDR); 353 dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
354 if (IS_I965GM(dev) || IS_GM45(dev)) { 354 if (INTEL_INFO(dev)->gen >= 4) {
355 dev_priv->saveDSPBSURF = I915_READ(DSPBSURF); 355 dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
356 dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); 356 dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
357 } 357 }
@@ -404,7 +404,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
404 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); 404 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
405 POSTING_READ(dpll_a_reg); 405 POSTING_READ(dpll_a_reg);
406 udelay(150); 406 udelay(150);
407 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { 407 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
408 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); 408 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
409 POSTING_READ(DPLL_A_MD); 409 POSTING_READ(DPLL_A_MD);
410 } 410 }
@@ -448,7 +448,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
448 I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); 448 I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
449 I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); 449 I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
450 I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); 450 I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
451 if (IS_I965G(dev)) { 451 if (INTEL_INFO(dev)->gen >= 4) {
452 I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); 452 I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
453 I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); 453 I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
454 } 454 }
@@ -473,7 +473,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
473 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); 473 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
474 POSTING_READ(dpll_b_reg); 474 POSTING_READ(dpll_b_reg);
475 udelay(150); 475 udelay(150);
476 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { 476 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
477 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); 477 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
478 POSTING_READ(DPLL_B_MD); 478 POSTING_READ(DPLL_B_MD);
479 } 479 }
@@ -517,7 +517,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
517 I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); 517 I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
518 I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); 518 I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
519 I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); 519 I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
520 if (IS_I965G(dev)) { 520 if (INTEL_INFO(dev)->gen >= 4) {
521 I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); 521 I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
522 I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); 522 I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
523 } 523 }
@@ -550,7 +550,7 @@ void i915_save_display(struct drm_device *dev)
550 dev_priv->saveCURBCNTR = I915_READ(CURBCNTR); 550 dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
551 dev_priv->saveCURBPOS = I915_READ(CURBPOS); 551 dev_priv->saveCURBPOS = I915_READ(CURBPOS);
552 dev_priv->saveCURBBASE = I915_READ(CURBBASE); 552 dev_priv->saveCURBBASE = I915_READ(CURBBASE);
553 if (!IS_I9XX(dev)) 553 if (IS_GEN2(dev))
554 dev_priv->saveCURSIZE = I915_READ(CURSIZE); 554 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
555 555
556 /* CRT state */ 556 /* CRT state */
@@ -573,7 +573,7 @@ void i915_save_display(struct drm_device *dev)
573 dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); 573 dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
574 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); 574 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
575 dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); 575 dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
576 if (IS_I965G(dev)) 576 if (INTEL_INFO(dev)->gen >= 4)
577 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); 577 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
578 if (IS_MOBILE(dev) && !IS_I830(dev)) 578 if (IS_MOBILE(dev) && !IS_I830(dev))
579 dev_priv->saveLVDS = I915_READ(LVDS); 579 dev_priv->saveLVDS = I915_READ(LVDS);
@@ -664,7 +664,7 @@ void i915_restore_display(struct drm_device *dev)
664 I915_WRITE(CURBPOS, dev_priv->saveCURBPOS); 664 I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
665 I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR); 665 I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
666 I915_WRITE(CURBBASE, dev_priv->saveCURBBASE); 666 I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
667 if (!IS_I9XX(dev)) 667 if (IS_GEN2(dev))
668 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); 668 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
669 669
670 /* CRT state */ 670 /* CRT state */
@@ -674,7 +674,7 @@ void i915_restore_display(struct drm_device *dev)
674 I915_WRITE(ADPA, dev_priv->saveADPA); 674 I915_WRITE(ADPA, dev_priv->saveADPA);
675 675
676 /* LVDS state */ 676 /* LVDS state */
677 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) 677 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
678 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); 678 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
679 679
680 if (HAS_PCH_SPLIT(dev)) { 680 if (HAS_PCH_SPLIT(dev)) {
@@ -878,9 +878,7 @@ int i915_restore_state(struct drm_device *dev)
878 for (i = 0; i < 3; i++) 878 for (i = 0; i < 3; i++)
879 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); 879 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
880 880
881 /* I2C state */ 881 intel_i2c_reset(dev);
882 intel_i2c_reset_gmbus(dev);
883 882
884 return 0; 883 return 0;
885} 884}
886
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
new file mode 100644
index 000000000000..65c88f9ba12c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -0,0 +1,286 @@
1/*
2 * Intel ACPI functions
3 *
4 * _DSM related code stolen from nouveau_acpi.c.
5 */
6#include <linux/pci.h>
7#include <linux/acpi.h>
8#include <linux/vga_switcheroo.h>
9#include <acpi/acpi_drivers.h>
10
11#include "drmP.h"
12
13#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
14
15#define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */
16#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
17
18static struct intel_dsm_priv {
19 acpi_handle dhandle;
20} intel_dsm_priv;
21
22static const u8 intel_dsm_guid[] = {
23 0xd3, 0x73, 0xd8, 0x7e,
24 0xd0, 0xc2,
25 0x4f, 0x4e,
26 0xa8, 0x54,
27 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
28};
29
30static int intel_dsm(acpi_handle handle, int func, int arg)
31{
32 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
33 struct acpi_object_list input;
34 union acpi_object params[4];
35 union acpi_object *obj;
36 u32 result;
37 int ret = 0;
38
39 input.count = 4;
40 input.pointer = params;
41 params[0].type = ACPI_TYPE_BUFFER;
42 params[0].buffer.length = sizeof(intel_dsm_guid);
43 params[0].buffer.pointer = (char *)intel_dsm_guid;
44 params[1].type = ACPI_TYPE_INTEGER;
45 params[1].integer.value = INTEL_DSM_REVISION_ID;
46 params[2].type = ACPI_TYPE_INTEGER;
47 params[2].integer.value = func;
48 params[3].type = ACPI_TYPE_INTEGER;
49 params[3].integer.value = arg;
50
51 ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
52 if (ret) {
53 DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
54 return ret;
55 }
56
57 obj = (union acpi_object *)output.pointer;
58
59 result = 0;
60 switch (obj->type) {
61 case ACPI_TYPE_INTEGER:
62 result = obj->integer.value;
63 break;
64
65 case ACPI_TYPE_BUFFER:
66 if (obj->buffer.length == 4) {
67 result =(obj->buffer.pointer[0] |
68 (obj->buffer.pointer[1] << 8) |
69 (obj->buffer.pointer[2] << 16) |
70 (obj->buffer.pointer[3] << 24));
71 break;
72 }
73 default:
74 ret = -EINVAL;
75 break;
76 }
77 if (result == 0x80000002)
78 ret = -ENODEV;
79
80 kfree(output.pointer);
81 return ret;
82}
83
84static char *intel_dsm_port_name(u8 id)
85{
86 switch (id) {
87 case 0:
88 return "Reserved";
89 case 1:
90 return "Analog VGA";
91 case 2:
92 return "LVDS";
93 case 3:
94 return "Reserved";
95 case 4:
96 return "HDMI/DVI_B";
97 case 5:
98 return "HDMI/DVI_C";
99 case 6:
100 return "HDMI/DVI_D";
101 case 7:
102 return "DisplayPort_A";
103 case 8:
104 return "DisplayPort_B";
105 case 9:
106 return "DisplayPort_C";
107 case 0xa:
108 return "DisplayPort_D";
109 case 0xb:
110 case 0xc:
111 case 0xd:
112 return "Reserved";
113 case 0xe:
114 return "WiDi";
115 default:
116 return "bad type";
117 }
118}
119
120static char *intel_dsm_mux_type(u8 type)
121{
122 switch (type) {
123 case 0:
124 return "unknown";
125 case 1:
126 return "No MUX, iGPU only";
127 case 2:
128 return "No MUX, dGPU only";
129 case 3:
130 return "MUXed between iGPU and dGPU";
131 default:
132 return "bad type";
133 }
134}
135
136static void intel_dsm_platform_mux_info(void)
137{
138 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
139 struct acpi_object_list input;
140 union acpi_object params[4];
141 union acpi_object *pkg;
142 int i, ret;
143
144 input.count = 4;
145 input.pointer = params;
146 params[0].type = ACPI_TYPE_BUFFER;
147 params[0].buffer.length = sizeof(intel_dsm_guid);
148 params[0].buffer.pointer = (char *)intel_dsm_guid;
149 params[1].type = ACPI_TYPE_INTEGER;
150 params[1].integer.value = INTEL_DSM_REVISION_ID;
151 params[2].type = ACPI_TYPE_INTEGER;
152 params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
153 params[3].type = ACPI_TYPE_INTEGER;
154 params[3].integer.value = 0;
155
156 ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
157 &output);
158 if (ret) {
159 DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
160 goto out;
161 }
162
163 pkg = (union acpi_object *)output.pointer;
164
165 if (pkg->type == ACPI_TYPE_PACKAGE) {
166 union acpi_object *connector_count = &pkg->package.elements[0];
167 DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
168 (unsigned long long)connector_count->integer.value);
169 for (i = 1; i < pkg->package.count; i++) {
170 union acpi_object *obj = &pkg->package.elements[i];
171 union acpi_object *connector_id =
172 &obj->package.elements[0];
173 union acpi_object *info = &obj->package.elements[1];
174 DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
175 (unsigned long long)connector_id->integer.value);
176 DRM_DEBUG_DRIVER(" port id: %s\n",
177 intel_dsm_port_name(info->buffer.pointer[0]));
178 DRM_DEBUG_DRIVER(" display mux info: %s\n",
179 intel_dsm_mux_type(info->buffer.pointer[1]));
180 DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n",
181 intel_dsm_mux_type(info->buffer.pointer[2]));
182 DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
183 intel_dsm_mux_type(info->buffer.pointer[3]));
184 }
185 } else {
186 DRM_ERROR("MUX INFO call failed\n");
187 }
188
189out:
190 kfree(output.pointer);
191}
192
193static int intel_dsm_switchto(enum vga_switcheroo_client_id id)
194{
195 return 0;
196}
197
198static int intel_dsm_power_state(enum vga_switcheroo_client_id id,
199 enum vga_switcheroo_state state)
200{
201 return 0;
202}
203
204static int intel_dsm_init(void)
205{
206 return 0;
207}
208
209static int intel_dsm_get_client_id(struct pci_dev *pdev)
210{
211 if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
212 return VGA_SWITCHEROO_IGD;
213 else
214 return VGA_SWITCHEROO_DIS;
215}
216
217static struct vga_switcheroo_handler intel_dsm_handler = {
218 .switchto = intel_dsm_switchto,
219 .power_state = intel_dsm_power_state,
220 .init = intel_dsm_init,
221 .get_client_id = intel_dsm_get_client_id,
222};
223
224static bool intel_dsm_pci_probe(struct pci_dev *pdev)
225{
226 acpi_handle dhandle, intel_handle;
227 acpi_status status;
228 int ret;
229
230 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
231 if (!dhandle)
232 return false;
233
234 status = acpi_get_handle(dhandle, "_DSM", &intel_handle);
235 if (ACPI_FAILURE(status)) {
236 DRM_DEBUG_KMS("no _DSM method for intel device\n");
237 return false;
238 }
239
240 ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
241 if (ret < 0) {
242 DRM_ERROR("failed to get supported _DSM functions\n");
243 return false;
244 }
245
246 intel_dsm_priv.dhandle = dhandle;
247
248 intel_dsm_platform_mux_info();
249 return true;
250}
251
252static bool intel_dsm_detect(void)
253{
254 char acpi_method_name[255] = { 0 };
255 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
256 struct pci_dev *pdev = NULL;
257 bool has_dsm = false;
258 int vga_count = 0;
259
260 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
261 vga_count++;
262 has_dsm |= intel_dsm_pci_probe(pdev);
263 }
264
265 if (vga_count == 2 && has_dsm) {
266 acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
267 DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n",
268 acpi_method_name);
269 return true;
270 }
271
272 return false;
273}
274
275void intel_register_dsm_handler(void)
276{
277 if (!intel_dsm_detect())
278 return;
279
280 vga_switcheroo_register_handler(&intel_dsm_handler);
281}
282
283void intel_unregister_dsm_handler(void)
284{
285 vga_switcheroo_unregister_handler();
286}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 96f75d7f6633..b0b1200ed650 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -24,6 +24,7 @@
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 * 25 *
26 */ 26 */
27#include <drm/drm_dp_helper.h>
27#include "drmP.h" 28#include "drmP.h"
28#include "drm.h" 29#include "drm.h"
29#include "i915_drm.h" 30#include "i915_drm.h"
@@ -129,10 +130,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
129 int i, temp_downclock; 130 int i, temp_downclock;
130 struct drm_display_mode *temp_mode; 131 struct drm_display_mode *temp_mode;
131 132
132 /* Defaults if we can't find VBT info */
133 dev_priv->lvds_dither = 0;
134 dev_priv->lvds_vbt = 0;
135
136 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); 133 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
137 if (!lvds_options) 134 if (!lvds_options)
138 return; 135 return;
@@ -140,6 +137,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
140 dev_priv->lvds_dither = lvds_options->pixel_dither; 137 dev_priv->lvds_dither = lvds_options->pixel_dither;
141 if (lvds_options->panel_type == 0xff) 138 if (lvds_options->panel_type == 0xff)
142 return; 139 return;
140
143 panel_type = lvds_options->panel_type; 141 panel_type = lvds_options->panel_type;
144 142
145 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); 143 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
@@ -169,6 +167,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
169 ((unsigned char *)entry + dvo_timing_offset); 167 ((unsigned char *)entry + dvo_timing_offset);
170 168
171 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); 169 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
170 if (!panel_fixed_mode)
171 return;
172 172
173 fill_detail_timing_data(panel_fixed_mode, dvo_timing); 173 fill_detail_timing_data(panel_fixed_mode, dvo_timing);
174 174
@@ -230,8 +230,6 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
230 struct lvds_dvo_timing *dvo_timing; 230 struct lvds_dvo_timing *dvo_timing;
231 struct drm_display_mode *panel_fixed_mode; 231 struct drm_display_mode *panel_fixed_mode;
232 232
233 dev_priv->sdvo_lvds_vbt_mode = NULL;
234
235 sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); 233 sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
236 if (!sdvo_lvds_options) 234 if (!sdvo_lvds_options)
237 return; 235 return;
@@ -260,10 +258,6 @@ parse_general_features(struct drm_i915_private *dev_priv,
260 struct drm_device *dev = dev_priv->dev; 258 struct drm_device *dev = dev_priv->dev;
261 struct bdb_general_features *general; 259 struct bdb_general_features *general;
262 260
263 /* Set sensible defaults in case we can't find the general block */
264 dev_priv->int_tv_support = 1;
265 dev_priv->int_crt_support = 1;
266
267 general = find_section(bdb, BDB_GENERAL_FEATURES); 261 general = find_section(bdb, BDB_GENERAL_FEATURES);
268 if (general) { 262 if (general) {
269 dev_priv->int_tv_support = general->int_tv_support; 263 dev_priv->int_tv_support = general->int_tv_support;
@@ -271,10 +265,10 @@ parse_general_features(struct drm_i915_private *dev_priv,
271 dev_priv->lvds_use_ssc = general->enable_ssc; 265 dev_priv->lvds_use_ssc = general->enable_ssc;
272 266
273 if (dev_priv->lvds_use_ssc) { 267 if (dev_priv->lvds_use_ssc) {
274 if (IS_I85X(dev_priv->dev)) 268 if (IS_I85X(dev))
275 dev_priv->lvds_ssc_freq = 269 dev_priv->lvds_ssc_freq =
276 general->ssc_freq ? 66 : 48; 270 general->ssc_freq ? 66 : 48;
277 else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev)) 271 else if (IS_GEN5(dev) || IS_GEN6(dev))
278 dev_priv->lvds_ssc_freq = 272 dev_priv->lvds_ssc_freq =
279 general->ssc_freq ? 100 : 120; 273 general->ssc_freq ? 100 : 120;
280 else 274 else
@@ -289,14 +283,6 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
289 struct bdb_header *bdb) 283 struct bdb_header *bdb)
290{ 284{
291 struct bdb_general_definitions *general; 285 struct bdb_general_definitions *general;
292 const int crt_bus_map_table[] = {
293 GPIOB,
294 GPIOA,
295 GPIOC,
296 GPIOD,
297 GPIOE,
298 GPIOF,
299 };
300 286
301 general = find_section(bdb, BDB_GENERAL_DEFINITIONS); 287 general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
302 if (general) { 288 if (general) {
@@ -304,10 +290,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
304 if (block_size >= sizeof(*general)) { 290 if (block_size >= sizeof(*general)) {
305 int bus_pin = general->crt_ddc_gmbus_pin; 291 int bus_pin = general->crt_ddc_gmbus_pin;
306 DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); 292 DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
307 if ((bus_pin >= 1) && (bus_pin <= 6)) { 293 if (bus_pin >= 1 && bus_pin <= 6)
308 dev_priv->crt_ddc_bus = 294 dev_priv->crt_ddc_pin = bus_pin;
309 crt_bus_map_table[bus_pin-1];
310 }
311 } else { 295 } else {
312 DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", 296 DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
313 block_size); 297 block_size);
@@ -317,7 +301,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
317 301
318static void 302static void
319parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, 303parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
320 struct bdb_header *bdb) 304 struct bdb_header *bdb)
321{ 305{
322 struct sdvo_device_mapping *p_mapping; 306 struct sdvo_device_mapping *p_mapping;
323 struct bdb_general_definitions *p_defs; 307 struct bdb_general_definitions *p_defs;
@@ -327,7 +311,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
327 311
328 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); 312 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
329 if (!p_defs) { 313 if (!p_defs) {
330 DRM_DEBUG_KMS("No general definition block is found\n"); 314 DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
331 return; 315 return;
332 } 316 }
333 /* judge whether the size of child device meets the requirements. 317 /* judge whether the size of child device meets the requirements.
@@ -377,7 +361,16 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
377 p_mapping->slave_addr = p_child->slave_addr; 361 p_mapping->slave_addr = p_child->slave_addr;
378 p_mapping->dvo_wiring = p_child->dvo_wiring; 362 p_mapping->dvo_wiring = p_child->dvo_wiring;
379 p_mapping->ddc_pin = p_child->ddc_pin; 363 p_mapping->ddc_pin = p_child->ddc_pin;
364 p_mapping->i2c_pin = p_child->i2c_pin;
365 p_mapping->i2c_speed = p_child->i2c_speed;
380 p_mapping->initialized = 1; 366 p_mapping->initialized = 1;
367 DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n",
368 p_mapping->dvo_port,
369 p_mapping->slave_addr,
370 p_mapping->dvo_wiring,
371 p_mapping->ddc_pin,
372 p_mapping->i2c_pin,
373 p_mapping->i2c_speed);
381 } else { 374 } else {
382 DRM_DEBUG_KMS("Maybe one SDVO port is shared by " 375 DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
383 "two SDVO device.\n"); 376 "two SDVO device.\n");
@@ -409,14 +402,11 @@ parse_driver_features(struct drm_i915_private *dev_priv,
409 if (!driver) 402 if (!driver)
410 return; 403 return;
411 404
412 if (driver && SUPPORTS_EDP(dev) && 405 if (SUPPORTS_EDP(dev) &&
413 driver->lvds_config == BDB_DRIVER_FEATURE_EDP) { 406 driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
414 dev_priv->edp_support = 1; 407 dev_priv->edp.support = 1;
415 } else {
416 dev_priv->edp_support = 0;
417 }
418 408
419 if (driver && driver->dual_frequency) 409 if (driver->dual_frequency)
420 dev_priv->render_reclock_avail = true; 410 dev_priv->render_reclock_avail = true;
421} 411}
422 412
@@ -424,27 +414,78 @@ static void
424parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) 414parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
425{ 415{
426 struct bdb_edp *edp; 416 struct bdb_edp *edp;
417 struct edp_power_seq *edp_pps;
418 struct edp_link_params *edp_link_params;
427 419
428 edp = find_section(bdb, BDB_EDP); 420 edp = find_section(bdb, BDB_EDP);
429 if (!edp) { 421 if (!edp) {
430 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) { 422 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
431 DRM_DEBUG_KMS("No eDP BDB found but eDP panel " 423 DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
432 "supported, assume 18bpp panel color " 424 "supported, assume %dbpp panel color "
433 "depth.\n"); 425 "depth.\n",
434 dev_priv->edp_bpp = 18; 426 dev_priv->edp.bpp);
435 } 427 }
436 return; 428 return;
437 } 429 }
438 430
439 switch ((edp->color_depth >> (panel_type * 2)) & 3) { 431 switch ((edp->color_depth >> (panel_type * 2)) & 3) {
440 case EDP_18BPP: 432 case EDP_18BPP:
441 dev_priv->edp_bpp = 18; 433 dev_priv->edp.bpp = 18;
442 break; 434 break;
443 case EDP_24BPP: 435 case EDP_24BPP:
444 dev_priv->edp_bpp = 24; 436 dev_priv->edp.bpp = 24;
445 break; 437 break;
446 case EDP_30BPP: 438 case EDP_30BPP:
447 dev_priv->edp_bpp = 30; 439 dev_priv->edp.bpp = 30;
440 break;
441 }
442
443 /* Get the eDP sequencing and link info */
444 edp_pps = &edp->power_seqs[panel_type];
445 edp_link_params = &edp->link_params[panel_type];
446
447 dev_priv->edp.pps = *edp_pps;
448
449 dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
450 DP_LINK_BW_1_62;
451 switch (edp_link_params->lanes) {
452 case 0:
453 dev_priv->edp.lanes = 1;
454 break;
455 case 1:
456 dev_priv->edp.lanes = 2;
457 break;
458 case 3:
459 default:
460 dev_priv->edp.lanes = 4;
461 break;
462 }
463 switch (edp_link_params->preemphasis) {
464 case 0:
465 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
466 break;
467 case 1:
468 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
469 break;
470 case 2:
471 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
472 break;
473 case 3:
474 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
475 break;
476 }
477 switch (edp_link_params->vswing) {
478 case 0:
479 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
480 break;
481 case 1:
482 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
483 break;
484 case 2:
485 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
486 break;
487 case 3:
488 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
448 break; 489 break;
449 } 490 }
450} 491}
@@ -460,7 +501,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
460 501
461 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); 502 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
462 if (!p_defs) { 503 if (!p_defs) {
463 DRM_DEBUG_KMS("No general definition block is found\n"); 504 DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
464 return; 505 return;
465 } 506 }
466 /* judge whether the size of child device meets the requirements. 507 /* judge whether the size of child device meets the requirements.
@@ -513,50 +554,83 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
513 } 554 }
514 return; 555 return;
515} 556}
557
558static void
559init_vbt_defaults(struct drm_i915_private *dev_priv)
560{
561 dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
562
563 /* LFP panel data */
564 dev_priv->lvds_dither = 1;
565 dev_priv->lvds_vbt = 0;
566
567 /* SDVO panel data */
568 dev_priv->sdvo_lvds_vbt_mode = NULL;
569
570 /* general features */
571 dev_priv->int_tv_support = 1;
572 dev_priv->int_crt_support = 1;
573 dev_priv->lvds_use_ssc = 0;
574
575 /* eDP data */
576 dev_priv->edp.bpp = 18;
577}
578
516/** 579/**
517 * intel_init_bios - initialize VBIOS settings & find VBT 580 * intel_parse_bios - find VBT and initialize settings from the BIOS
518 * @dev: DRM device 581 * @dev: DRM device
519 * 582 *
520 * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers 583 * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
521 * to appropriate values. 584 * to appropriate values.
522 * 585 *
523 * VBT existence is a sanity check that is relied on by other i830_bios.c code.
524 * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
525 * feed an updated VBT back through that, compared to what we'll fetch using
526 * this method of groping around in the BIOS data.
527 *
528 * Returns 0 on success, nonzero on failure. 586 * Returns 0 on success, nonzero on failure.
529 */ 587 */
530bool 588bool
531intel_init_bios(struct drm_device *dev) 589intel_parse_bios(struct drm_device *dev)
532{ 590{
533 struct drm_i915_private *dev_priv = dev->dev_private; 591 struct drm_i915_private *dev_priv = dev->dev_private;
534 struct pci_dev *pdev = dev->pdev; 592 struct pci_dev *pdev = dev->pdev;
535 struct vbt_header *vbt = NULL; 593 struct bdb_header *bdb = NULL;
536 struct bdb_header *bdb; 594 u8 __iomem *bios = NULL;
537 u8 __iomem *bios; 595
538 size_t size; 596 init_vbt_defaults(dev_priv);
539 int i; 597
540 598 /* XXX Should this validation be moved to intel_opregion.c? */
541 bios = pci_map_rom(pdev, &size); 599 if (dev_priv->opregion.vbt) {
542 if (!bios) 600 struct vbt_header *vbt = dev_priv->opregion.vbt;
543 return -1; 601 if (memcmp(vbt->signature, "$VBT", 4) == 0) {
544 602 DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n",
545 /* Scour memory looking for the VBT signature */ 603 vbt->signature);
546 for (i = 0; i + 4 < size; i++) { 604 bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
547 if (!memcmp(bios + i, "$VBT", 4)) { 605 } else
548 vbt = (struct vbt_header *)(bios + i); 606 dev_priv->opregion.vbt = NULL;
549 break;
550 }
551 } 607 }
552 608
553 if (!vbt) { 609 if (bdb == NULL) {
554 DRM_ERROR("VBT signature missing\n"); 610 struct vbt_header *vbt = NULL;
555 pci_unmap_rom(pdev, bios); 611 size_t size;
556 return -1; 612 int i;
557 }
558 613
559 bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); 614 bios = pci_map_rom(pdev, &size);
615 if (!bios)
616 return -1;
617
618 /* Scour memory looking for the VBT signature */
619 for (i = 0; i + 4 < size; i++) {
620 if (!memcmp(bios + i, "$VBT", 4)) {
621 vbt = (struct vbt_header *)(bios + i);
622 break;
623 }
624 }
625
626 if (!vbt) {
627 DRM_ERROR("VBT signature missing\n");
628 pci_unmap_rom(pdev, bios);
629 return -1;
630 }
631
632 bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
633 }
560 634
561 /* Grab useful general definitions */ 635 /* Grab useful general definitions */
562 parse_general_features(dev_priv, bdb); 636 parse_general_features(dev_priv, bdb);
@@ -568,7 +642,25 @@ intel_init_bios(struct drm_device *dev)
568 parse_driver_features(dev_priv, bdb); 642 parse_driver_features(dev_priv, bdb);
569 parse_edp(dev_priv, bdb); 643 parse_edp(dev_priv, bdb);
570 644
571 pci_unmap_rom(pdev, bios); 645 if (bios)
646 pci_unmap_rom(pdev, bios);
572 647
573 return 0; 648 return 0;
574} 649}
650
651/* Ensure that vital registers have been initialised, even if the BIOS
652 * is absent or just failing to do its job.
653 */
654void intel_setup_bios(struct drm_device *dev)
655{
656 struct drm_i915_private *dev_priv = dev->dev_private;
657
658 /* Set the Panel Power On/Off timings if uninitialized. */
659 if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
660 /* Set T2 to 40ms and T5 to 200ms */
661 I915_WRITE(PP_ON_DELAYS, 0x019007d0);
662
663 /* Set T3 to 35ms and Tx to 200ms */
664 I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
665 }
666}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 4c18514f6f80..5f8e4edcbbb9 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -197,7 +197,8 @@ struct bdb_general_features {
197struct child_device_config { 197struct child_device_config {
198 u16 handle; 198 u16 handle;
199 u16 device_type; 199 u16 device_type;
200 u8 device_id[10]; /* See DEVICE_TYPE_* above */ 200 u8 i2c_speed;
201 u8 rsvd[9];
201 u16 addin_offset; 202 u16 addin_offset;
202 u8 dvo_port; /* See Device_PORT_* above */ 203 u8 dvo_port; /* See Device_PORT_* above */
203 u8 i2c_pin; 204 u8 i2c_pin;
@@ -466,7 +467,8 @@ struct bdb_edp {
466 struct edp_link_params link_params[16]; 467 struct edp_link_params link_params[16];
467} __attribute__ ((packed)); 468} __attribute__ ((packed));
468 469
469bool intel_init_bios(struct drm_device *dev); 470void intel_setup_bios(struct drm_device *dev);
471bool intel_parse_bios(struct drm_device *dev);
470 472
471/* 473/*
472 * Driver<->VBIOS interaction occurs through scratch bits in 474 * Driver<->VBIOS interaction occurs through scratch bits in
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 197d4f32585a..c55c77043357 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -79,7 +79,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
79 if (mode->clock < 25000) 79 if (mode->clock < 25000)
80 return MODE_CLOCK_LOW; 80 return MODE_CLOCK_LOW;
81 81
82 if (!IS_I9XX(dev)) 82 if (IS_GEN2(dev))
83 max_clock = 350000; 83 max_clock = 350000;
84 else 84 else
85 max_clock = 400000; 85 max_clock = 400000;
@@ -123,7 +123,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
123 * Disable separate mode multiplier used when cloning SDVO to CRT 123 * Disable separate mode multiplier used when cloning SDVO to CRT
124 * XXX this needs to be adjusted when we really are cloning 124 * XXX this needs to be adjusted when we really are cloning
125 */ 125 */
126 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { 126 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
127 dpll_md = I915_READ(dpll_md_reg); 127 dpll_md = I915_READ(dpll_md_reg);
128 I915_WRITE(dpll_md_reg, 128 I915_WRITE(dpll_md_reg,
129 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); 129 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -187,11 +187,12 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
187 I915_WRITE(PCH_ADPA, adpa); 187 I915_WRITE(PCH_ADPA, adpa);
188 188
189 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, 189 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
190 1000, 1)) 190 1000))
191 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); 191 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
192 192
193 if (turn_off_dac) { 193 if (turn_off_dac) {
194 I915_WRITE(PCH_ADPA, temp); 194 /* Make sure hotplug is enabled */
195 I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE);
195 (void)I915_READ(PCH_ADPA); 196 (void)I915_READ(PCH_ADPA);
196 } 197 }
197 198
@@ -244,7 +245,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
244 /* wait for FORCE_DETECT to go off */ 245 /* wait for FORCE_DETECT to go off */
245 if (wait_for((I915_READ(PORT_HOTPLUG_EN) & 246 if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
246 CRT_HOTPLUG_FORCE_DETECT) == 0, 247 CRT_HOTPLUG_FORCE_DETECT) == 0,
247 1000, 1)) 248 1000))
248 DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off"); 249 DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
249 } 250 }
250 251
@@ -261,21 +262,47 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
261 return ret; 262 return ret;
262} 263}
263 264
265static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
266{
267 u8 buf;
268 struct i2c_msg msgs[] = {
269 {
270 .addr = 0xA0,
271 .flags = 0,
272 .len = 1,
273 .buf = &buf,
274 },
275 };
276 /* DDC monitor detect: Does it ACK a write to 0xA0? */
277 return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
278}
279
264static bool intel_crt_detect_ddc(struct drm_encoder *encoder) 280static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
265{ 281{
266 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 282 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
283 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
267 284
268 /* CRT should always be at 0, but check anyway */ 285 /* CRT should always be at 0, but check anyway */
269 if (intel_encoder->type != INTEL_OUTPUT_ANALOG) 286 if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
270 return false; 287 return false;
271 288
272 return intel_ddc_probe(intel_encoder); 289 if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) {
290 DRM_DEBUG_KMS("CRT detected via DDC:0xa0\n");
291 return true;
292 }
293
294 if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) {
295 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
296 return true;
297 }
298
299 return false;
273} 300}
274 301
275static enum drm_connector_status 302static enum drm_connector_status
276intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder) 303intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
277{ 304{
278 struct drm_encoder *encoder = &intel_encoder->enc; 305 struct drm_encoder *encoder = &intel_encoder->base;
279 struct drm_device *dev = encoder->dev; 306 struct drm_device *dev = encoder->dev;
280 struct drm_i915_private *dev_priv = dev->dev_private; 307 struct drm_i915_private *dev_priv = dev->dev_private;
281 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 308 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -295,6 +322,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
295 uint8_t st00; 322 uint8_t st00;
296 enum drm_connector_status status; 323 enum drm_connector_status status;
297 324
325 DRM_DEBUG_KMS("starting load-detect on CRT\n");
326
298 if (pipe == 0) { 327 if (pipe == 0) {
299 bclrpat_reg = BCLRPAT_A; 328 bclrpat_reg = BCLRPAT_A;
300 vtotal_reg = VTOTAL_A; 329 vtotal_reg = VTOTAL_A;
@@ -324,9 +353,10 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
324 /* Set the border color to purple. */ 353 /* Set the border color to purple. */
325 I915_WRITE(bclrpat_reg, 0x500050); 354 I915_WRITE(bclrpat_reg, 0x500050);
326 355
327 if (IS_I9XX(dev)) { 356 if (!IS_GEN2(dev)) {
328 uint32_t pipeconf = I915_READ(pipeconf_reg); 357 uint32_t pipeconf = I915_READ(pipeconf_reg);
329 I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER); 358 I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
359 POSTING_READ(pipeconf_reg);
330 /* Wait for next Vblank to substitue 360 /* Wait for next Vblank to substitue
331 * border color for Color info */ 361 * border color for Color info */
332 intel_wait_for_vblank(dev, pipe); 362 intel_wait_for_vblank(dev, pipe);
@@ -404,34 +434,37 @@ static enum drm_connector_status
404intel_crt_detect(struct drm_connector *connector, bool force) 434intel_crt_detect(struct drm_connector *connector, bool force)
405{ 435{
406 struct drm_device *dev = connector->dev; 436 struct drm_device *dev = connector->dev;
407 struct drm_encoder *encoder = intel_attached_encoder(connector); 437 struct intel_encoder *encoder = intel_attached_encoder(connector);
408 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
409 struct drm_crtc *crtc; 438 struct drm_crtc *crtc;
410 int dpms_mode; 439 int dpms_mode;
411 enum drm_connector_status status; 440 enum drm_connector_status status;
412 441
413 if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) { 442 if (I915_HAS_HOTPLUG(dev)) {
414 if (intel_crt_detect_hotplug(connector)) 443 if (intel_crt_detect_hotplug(connector)) {
444 DRM_DEBUG_KMS("CRT detected via hotplug\n");
415 return connector_status_connected; 445 return connector_status_connected;
416 else 446 } else
417 return connector_status_disconnected; 447 return connector_status_disconnected;
418 } 448 }
419 449
420 if (intel_crt_detect_ddc(encoder)) 450 if (intel_crt_detect_ddc(&encoder->base))
421 return connector_status_connected; 451 return connector_status_connected;
422 452
423 if (!force) 453 if (!force)
424 return connector->status; 454 return connector->status;
425 455
426 /* for pre-945g platforms use load detect */ 456 /* for pre-945g platforms use load detect */
427 if (encoder->crtc && encoder->crtc->enabled) { 457 if (encoder->base.crtc && encoder->base.crtc->enabled) {
428 status = intel_crt_load_detect(encoder->crtc, intel_encoder); 458 status = intel_crt_load_detect(encoder->base.crtc, encoder);
429 } else { 459 } else {
430 crtc = intel_get_load_detect_pipe(intel_encoder, connector, 460 crtc = intel_get_load_detect_pipe(encoder, connector,
431 NULL, &dpms_mode); 461 NULL, &dpms_mode);
432 if (crtc) { 462 if (crtc) {
433 status = intel_crt_load_detect(crtc, intel_encoder); 463 if (intel_crt_detect_ddc(&encoder->base))
434 intel_release_load_detect_pipe(intel_encoder, 464 status = connector_status_connected;
465 else
466 status = intel_crt_load_detect(crtc, encoder);
467 intel_release_load_detect_pipe(encoder,
435 connector, dpms_mode); 468 connector, dpms_mode);
436 } else 469 } else
437 status = connector_status_unknown; 470 status = connector_status_unknown;
@@ -449,32 +482,18 @@ static void intel_crt_destroy(struct drm_connector *connector)
449 482
450static int intel_crt_get_modes(struct drm_connector *connector) 483static int intel_crt_get_modes(struct drm_connector *connector)
451{ 484{
452 int ret;
453 struct drm_encoder *encoder = intel_attached_encoder(connector);
454 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
455 struct i2c_adapter *ddc_bus;
456 struct drm_device *dev = connector->dev; 485 struct drm_device *dev = connector->dev;
486 struct drm_i915_private *dev_priv = dev->dev_private;
487 int ret;
457 488
458 489 ret = intel_ddc_get_modes(connector,
459 ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); 490 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
460 if (ret || !IS_G4X(dev)) 491 if (ret || !IS_G4X(dev))
461 goto end; 492 return ret;
462 493
463 /* Try to probe digital port for output in DVI-I -> VGA mode. */ 494 /* Try to probe digital port for output in DVI-I -> VGA mode. */
464 ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); 495 return intel_ddc_get_modes(connector,
465 496 &dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
466 if (!ddc_bus) {
467 dev_printk(KERN_ERR, &connector->dev->pdev->dev,
468 "DDC bus registration failed for CRTDDC_D.\n");
469 goto end;
470 }
471 /* Try to get modes by GPIOD port */
472 ret = intel_ddc_get_modes(connector, ddc_bus);
473 intel_i2c_destroy(ddc_bus);
474
475end:
476 return ret;
477
478} 497}
479 498
480static int intel_crt_set_property(struct drm_connector *connector, 499static int intel_crt_set_property(struct drm_connector *connector,
@@ -507,7 +526,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
507static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { 526static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
508 .mode_valid = intel_crt_mode_valid, 527 .mode_valid = intel_crt_mode_valid,
509 .get_modes = intel_crt_get_modes, 528 .get_modes = intel_crt_get_modes,
510 .best_encoder = intel_attached_encoder, 529 .best_encoder = intel_best_encoder,
511}; 530};
512 531
513static const struct drm_encoder_funcs intel_crt_enc_funcs = { 532static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -520,7 +539,6 @@ void intel_crt_init(struct drm_device *dev)
520 struct intel_encoder *intel_encoder; 539 struct intel_encoder *intel_encoder;
521 struct intel_connector *intel_connector; 540 struct intel_connector *intel_connector;
522 struct drm_i915_private *dev_priv = dev->dev_private; 541 struct drm_i915_private *dev_priv = dev->dev_private;
523 u32 i2c_reg;
524 542
525 intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL); 543 intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
526 if (!intel_encoder) 544 if (!intel_encoder)
@@ -536,27 +554,10 @@ void intel_crt_init(struct drm_device *dev)
536 drm_connector_init(dev, &intel_connector->base, 554 drm_connector_init(dev, &intel_connector->base,
537 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 555 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
538 556
539 drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs, 557 drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs,
540 DRM_MODE_ENCODER_DAC); 558 DRM_MODE_ENCODER_DAC);
541 559
542 drm_mode_connector_attach_encoder(&intel_connector->base, 560 intel_connector_attach_encoder(intel_connector, intel_encoder);
543 &intel_encoder->enc);
544
545 /* Set up the DDC bus. */
546 if (HAS_PCH_SPLIT(dev))
547 i2c_reg = PCH_GPIOA;
548 else {
549 i2c_reg = GPIOA;
550 /* Use VBT information for CRT DDC if available */
551 if (dev_priv->crt_ddc_bus != 0)
552 i2c_reg = dev_priv->crt_ddc_bus;
553 }
554 intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
555 if (!intel_encoder->ddc_bus) {
556 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
557 "failed.\n");
558 return;
559 }
560 561
561 intel_encoder->type = INTEL_OUTPUT_ANALOG; 562 intel_encoder->type = INTEL_OUTPUT_ANALOG;
562 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 563 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
@@ -566,7 +567,7 @@ void intel_crt_init(struct drm_device *dev)
566 connector->interlace_allowed = 1; 567 connector->interlace_allowed = 1;
567 connector->doublescan_allowed = 0; 568 connector->doublescan_allowed = 0;
568 569
569 drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs); 570 drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs);
570 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 571 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
571 572
572 drm_sysfs_connector_add(connector); 573 drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 979228594599..990f065374b2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -43,8 +43,8 @@
43 43
44bool intel_pipe_has_type (struct drm_crtc *crtc, int type); 44bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
45static void intel_update_watermarks(struct drm_device *dev); 45static void intel_update_watermarks(struct drm_device *dev);
46static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule); 46static void intel_increase_pllclock(struct drm_crtc *crtc);
47static void intel_crtc_update_cursor(struct drm_crtc *crtc); 47static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
48 48
49typedef struct { 49typedef struct {
50 /* given values */ 50 /* given values */
@@ -342,6 +342,16 @@ static bool
342intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, 342intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
343 int target, int refclk, intel_clock_t *best_clock); 343 int target, int refclk, intel_clock_t *best_clock);
344 344
345static inline u32 /* units of 100MHz */
346intel_fdi_link_freq(struct drm_device *dev)
347{
348 if (IS_GEN5(dev)) {
349 struct drm_i915_private *dev_priv = dev->dev_private;
350 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
351 } else
352 return 27;
353}
354
345static const intel_limit_t intel_limits_i8xx_dvo = { 355static const intel_limit_t intel_limits_i8xx_dvo = {
346 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 356 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
347 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, 357 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
@@ -701,16 +711,16 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
701 limit = intel_ironlake_limit(crtc); 711 limit = intel_ironlake_limit(crtc);
702 else if (IS_G4X(dev)) { 712 else if (IS_G4X(dev)) {
703 limit = intel_g4x_limit(crtc); 713 limit = intel_g4x_limit(crtc);
704 } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
705 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
706 limit = &intel_limits_i9xx_lvds;
707 else
708 limit = &intel_limits_i9xx_sdvo;
709 } else if (IS_PINEVIEW(dev)) { 714 } else if (IS_PINEVIEW(dev)) {
710 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 715 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
711 limit = &intel_limits_pineview_lvds; 716 limit = &intel_limits_pineview_lvds;
712 else 717 else
713 limit = &intel_limits_pineview_sdvo; 718 limit = &intel_limits_pineview_sdvo;
719 } else if (!IS_GEN2(dev)) {
720 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
721 limit = &intel_limits_i9xx_lvds;
722 else
723 limit = &intel_limits_i9xx_sdvo;
714 } else { 724 } else {
715 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 725 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
716 limit = &intel_limits_i8xx_lvds; 726 limit = &intel_limits_i8xx_lvds;
@@ -744,20 +754,17 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock
744/** 754/**
745 * Returns whether any output on the specified pipe is of the specified type 755 * Returns whether any output on the specified pipe is of the specified type
746 */ 756 */
747bool intel_pipe_has_type (struct drm_crtc *crtc, int type) 757bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
748{ 758{
749 struct drm_device *dev = crtc->dev; 759 struct drm_device *dev = crtc->dev;
750 struct drm_mode_config *mode_config = &dev->mode_config; 760 struct drm_mode_config *mode_config = &dev->mode_config;
751 struct drm_encoder *l_entry; 761 struct intel_encoder *encoder;
762
763 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
764 if (encoder->base.crtc == crtc && encoder->type == type)
765 return true;
752 766
753 list_for_each_entry(l_entry, &mode_config->encoder_list, head) { 767 return false;
754 if (l_entry && l_entry->crtc == crtc) {
755 struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
756 if (intel_encoder->type == type)
757 return true;
758 }
759 }
760 return false;
761} 768}
762 769
763#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 770#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -928,10 +935,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
928 struct drm_device *dev = crtc->dev; 935 struct drm_device *dev = crtc->dev;
929 intel_clock_t clock; 936 intel_clock_t clock;
930 937
931 /* return directly when it is eDP */
932 if (HAS_eDP)
933 return true;
934
935 if (target < 200000) { 938 if (target < 200000) {
936 clock.n = 1; 939 clock.n = 1;
937 clock.p1 = 2; 940 clock.p1 = 2;
@@ -955,26 +958,26 @@ static bool
955intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 958intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
956 int target, int refclk, intel_clock_t *best_clock) 959 int target, int refclk, intel_clock_t *best_clock)
957{ 960{
958 intel_clock_t clock; 961 intel_clock_t clock;
959 if (target < 200000) { 962 if (target < 200000) {
960 clock.p1 = 2; 963 clock.p1 = 2;
961 clock.p2 = 10; 964 clock.p2 = 10;
962 clock.n = 2; 965 clock.n = 2;
963 clock.m1 = 23; 966 clock.m1 = 23;
964 clock.m2 = 8; 967 clock.m2 = 8;
965 } else { 968 } else {
966 clock.p1 = 1; 969 clock.p1 = 1;
967 clock.p2 = 10; 970 clock.p2 = 10;
968 clock.n = 1; 971 clock.n = 1;
969 clock.m1 = 14; 972 clock.m1 = 14;
970 clock.m2 = 2; 973 clock.m2 = 2;
971 } 974 }
972 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); 975 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
973 clock.p = (clock.p1 * clock.p2); 976 clock.p = (clock.p1 * clock.p2);
974 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; 977 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
975 clock.vco = 0; 978 clock.vco = 0;
976 memcpy(best_clock, &clock, sizeof(intel_clock_t)); 979 memcpy(best_clock, &clock, sizeof(intel_clock_t));
977 return true; 980 return true;
978} 981}
979 982
980/** 983/**
@@ -1007,9 +1010,9 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
1007 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); 1010 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
1008 1011
1009 /* Wait for vblank interrupt bit to set */ 1012 /* Wait for vblank interrupt bit to set */
1010 if (wait_for((I915_READ(pipestat_reg) & 1013 if (wait_for(I915_READ(pipestat_reg) &
1011 PIPE_VBLANK_INTERRUPT_STATUS), 1014 PIPE_VBLANK_INTERRUPT_STATUS,
1012 50, 0)) 1015 50))
1013 DRM_DEBUG_KMS("vblank wait timed out\n"); 1016 DRM_DEBUG_KMS("vblank wait timed out\n");
1014} 1017}
1015 1018
@@ -1028,36 +1031,35 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
1028 * Otherwise: 1031 * Otherwise:
1029 * wait for the display line value to settle (it usually 1032 * wait for the display line value to settle (it usually
1030 * ends up stopping at the start of the next frame). 1033 * ends up stopping at the start of the next frame).
1031 * 1034 *
1032 */ 1035 */
1033static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) 1036void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1034{ 1037{
1035 struct drm_i915_private *dev_priv = dev->dev_private; 1038 struct drm_i915_private *dev_priv = dev->dev_private;
1036 1039
1037 if (INTEL_INFO(dev)->gen >= 4) { 1040 if (INTEL_INFO(dev)->gen >= 4) {
1038 int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF); 1041 int reg = PIPECONF(pipe);
1039 1042
1040 /* Wait for the Pipe State to go off */ 1043 /* Wait for the Pipe State to go off */
1041 if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 1044 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1042 100, 0)) 1045 100))
1043 DRM_DEBUG_KMS("pipe_off wait timed out\n"); 1046 DRM_DEBUG_KMS("pipe_off wait timed out\n");
1044 } else { 1047 } else {
1045 u32 last_line; 1048 u32 last_line;
1046 int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); 1049 int reg = PIPEDSL(pipe);
1047 unsigned long timeout = jiffies + msecs_to_jiffies(100); 1050 unsigned long timeout = jiffies + msecs_to_jiffies(100);
1048 1051
1049 /* Wait for the display line to settle */ 1052 /* Wait for the display line to settle */
1050 do { 1053 do {
1051 last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; 1054 last_line = I915_READ(reg) & DSL_LINEMASK;
1052 mdelay(5); 1055 mdelay(5);
1053 } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && 1056 } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
1054 time_after(timeout, jiffies)); 1057 time_after(timeout, jiffies));
1055 if (time_after(jiffies, timeout)) 1058 if (time_after(jiffies, timeout))
1056 DRM_DEBUG_KMS("pipe_off wait timed out\n"); 1059 DRM_DEBUG_KMS("pipe_off wait timed out\n");
1057 } 1060 }
1058} 1061}
1059 1062
1060/* Parameters have changed, update FBC info */
1061static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1063static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1062{ 1064{
1063 struct drm_device *dev = crtc->dev; 1065 struct drm_device *dev = crtc->dev;
@@ -1069,6 +1071,14 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1069 int plane, i; 1071 int plane, i;
1070 u32 fbc_ctl, fbc_ctl2; 1072 u32 fbc_ctl, fbc_ctl2;
1071 1073
1074 if (fb->pitch == dev_priv->cfb_pitch &&
1075 obj_priv->fence_reg == dev_priv->cfb_fence &&
1076 intel_crtc->plane == dev_priv->cfb_plane &&
1077 I915_READ(FBC_CONTROL) & FBC_CTL_EN)
1078 return;
1079
1080 i8xx_disable_fbc(dev);
1081
1072 dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; 1082 dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1073 1083
1074 if (fb->pitch < dev_priv->cfb_pitch) 1084 if (fb->pitch < dev_priv->cfb_pitch)
@@ -1102,7 +1112,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1102 I915_WRITE(FBC_CONTROL, fbc_ctl); 1112 I915_WRITE(FBC_CONTROL, fbc_ctl);
1103 1113
1104 DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ", 1114 DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
1105 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); 1115 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
1106} 1116}
1107 1117
1108void i8xx_disable_fbc(struct drm_device *dev) 1118void i8xx_disable_fbc(struct drm_device *dev)
@@ -1110,19 +1120,16 @@ void i8xx_disable_fbc(struct drm_device *dev)
1110 struct drm_i915_private *dev_priv = dev->dev_private; 1120 struct drm_i915_private *dev_priv = dev->dev_private;
1111 u32 fbc_ctl; 1121 u32 fbc_ctl;
1112 1122
1113 if (!I915_HAS_FBC(dev))
1114 return;
1115
1116 if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN))
1117 return; /* Already off, just return */
1118
1119 /* Disable compression */ 1123 /* Disable compression */
1120 fbc_ctl = I915_READ(FBC_CONTROL); 1124 fbc_ctl = I915_READ(FBC_CONTROL);
1125 if ((fbc_ctl & FBC_CTL_EN) == 0)
1126 return;
1127
1121 fbc_ctl &= ~FBC_CTL_EN; 1128 fbc_ctl &= ~FBC_CTL_EN;
1122 I915_WRITE(FBC_CONTROL, fbc_ctl); 1129 I915_WRITE(FBC_CONTROL, fbc_ctl);
1123 1130
1124 /* Wait for compressing bit to clear */ 1131 /* Wait for compressing bit to clear */
1125 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) { 1132 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1126 DRM_DEBUG_KMS("FBC idle timed out\n"); 1133 DRM_DEBUG_KMS("FBC idle timed out\n");
1127 return; 1134 return;
1128 } 1135 }
@@ -1145,14 +1152,27 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1145 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1152 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1146 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); 1153 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
1147 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1154 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1148 int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : 1155 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1149 DPFC_CTL_PLANEB);
1150 unsigned long stall_watermark = 200; 1156 unsigned long stall_watermark = 200;
1151 u32 dpfc_ctl; 1157 u32 dpfc_ctl;
1152 1158
1159 dpfc_ctl = I915_READ(DPFC_CONTROL);
1160 if (dpfc_ctl & DPFC_CTL_EN) {
1161 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
1162 dev_priv->cfb_fence == obj_priv->fence_reg &&
1163 dev_priv->cfb_plane == intel_crtc->plane &&
1164 dev_priv->cfb_y == crtc->y)
1165 return;
1166
1167 I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
1168 POSTING_READ(DPFC_CONTROL);
1169 intel_wait_for_vblank(dev, intel_crtc->pipe);
1170 }
1171
1153 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; 1172 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1154 dev_priv->cfb_fence = obj_priv->fence_reg; 1173 dev_priv->cfb_fence = obj_priv->fence_reg;
1155 dev_priv->cfb_plane = intel_crtc->plane; 1174 dev_priv->cfb_plane = intel_crtc->plane;
1175 dev_priv->cfb_y = crtc->y;
1156 1176
1157 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; 1177 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1158 if (obj_priv->tiling_mode != I915_TILING_NONE) { 1178 if (obj_priv->tiling_mode != I915_TILING_NONE) {
@@ -1162,7 +1182,6 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1162 I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY); 1182 I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1163 } 1183 }
1164 1184
1165 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1166 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | 1185 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1167 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 1186 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1168 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); 1187 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
@@ -1181,10 +1200,12 @@ void g4x_disable_fbc(struct drm_device *dev)
1181 1200
1182 /* Disable compression */ 1201 /* Disable compression */
1183 dpfc_ctl = I915_READ(DPFC_CONTROL); 1202 dpfc_ctl = I915_READ(DPFC_CONTROL);
1184 dpfc_ctl &= ~DPFC_CTL_EN; 1203 if (dpfc_ctl & DPFC_CTL_EN) {
1185 I915_WRITE(DPFC_CONTROL, dpfc_ctl); 1204 dpfc_ctl &= ~DPFC_CTL_EN;
1205 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1186 1206
1187 DRM_DEBUG_KMS("disabled FBC\n"); 1207 DRM_DEBUG_KMS("disabled FBC\n");
1208 }
1188} 1209}
1189 1210
1190static bool g4x_fbc_enabled(struct drm_device *dev) 1211static bool g4x_fbc_enabled(struct drm_device *dev)
@@ -1202,16 +1223,30 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1202 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1223 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1203 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); 1224 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
1204 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1225 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1205 int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA : 1226 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1206 DPFC_CTL_PLANEB;
1207 unsigned long stall_watermark = 200; 1227 unsigned long stall_watermark = 200;
1208 u32 dpfc_ctl; 1228 u32 dpfc_ctl;
1209 1229
1230 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1231 if (dpfc_ctl & DPFC_CTL_EN) {
1232 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
1233 dev_priv->cfb_fence == obj_priv->fence_reg &&
1234 dev_priv->cfb_plane == intel_crtc->plane &&
1235 dev_priv->cfb_offset == obj_priv->gtt_offset &&
1236 dev_priv->cfb_y == crtc->y)
1237 return;
1238
1239 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
1240 POSTING_READ(ILK_DPFC_CONTROL);
1241 intel_wait_for_vblank(dev, intel_crtc->pipe);
1242 }
1243
1210 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; 1244 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1211 dev_priv->cfb_fence = obj_priv->fence_reg; 1245 dev_priv->cfb_fence = obj_priv->fence_reg;
1212 dev_priv->cfb_plane = intel_crtc->plane; 1246 dev_priv->cfb_plane = intel_crtc->plane;
1247 dev_priv->cfb_offset = obj_priv->gtt_offset;
1248 dev_priv->cfb_y = crtc->y;
1213 1249
1214 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1215 dpfc_ctl &= DPFC_RESERVED; 1250 dpfc_ctl &= DPFC_RESERVED;
1216 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); 1251 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1217 if (obj_priv->tiling_mode != I915_TILING_NONE) { 1252 if (obj_priv->tiling_mode != I915_TILING_NONE) {
@@ -1221,15 +1256,13 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1221 I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY); 1256 I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1222 } 1257 }
1223 1258
1224 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1225 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | 1259 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1226 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 1260 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1227 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); 1261 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1228 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); 1262 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1229 I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); 1263 I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
1230 /* enable it... */ 1264 /* enable it... */
1231 I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) | 1265 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1232 DPFC_CTL_EN);
1233 1266
1234 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 1267 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1235} 1268}
@@ -1241,10 +1274,12 @@ void ironlake_disable_fbc(struct drm_device *dev)
1241 1274
1242 /* Disable compression */ 1275 /* Disable compression */
1243 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 1276 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1244 dpfc_ctl &= ~DPFC_CTL_EN; 1277 if (dpfc_ctl & DPFC_CTL_EN) {
1245 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); 1278 dpfc_ctl &= ~DPFC_CTL_EN;
1279 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1246 1280
1247 DRM_DEBUG_KMS("disabled FBC\n"); 1281 DRM_DEBUG_KMS("disabled FBC\n");
1282 }
1248} 1283}
1249 1284
1250static bool ironlake_fbc_enabled(struct drm_device *dev) 1285static bool ironlake_fbc_enabled(struct drm_device *dev)
@@ -1286,8 +1321,7 @@ void intel_disable_fbc(struct drm_device *dev)
1286 1321
1287/** 1322/**
1288 * intel_update_fbc - enable/disable FBC as needed 1323 * intel_update_fbc - enable/disable FBC as needed
1289 * @crtc: CRTC to point the compressor at 1324 * @dev: the drm_device
1290 * @mode: mode in use
1291 * 1325 *
1292 * Set up the framebuffer compression hardware at mode set time. We 1326 * Set up the framebuffer compression hardware at mode set time. We
1293 * enable it if possible: 1327 * enable it if possible:
@@ -1304,18 +1338,14 @@ void intel_disable_fbc(struct drm_device *dev)
1304 * 1338 *
1305 * We need to enable/disable FBC on a global basis. 1339 * We need to enable/disable FBC on a global basis.
1306 */ 1340 */
1307static void intel_update_fbc(struct drm_crtc *crtc, 1341static void intel_update_fbc(struct drm_device *dev)
1308 struct drm_display_mode *mode)
1309{ 1342{
1310 struct drm_device *dev = crtc->dev;
1311 struct drm_i915_private *dev_priv = dev->dev_private; 1343 struct drm_i915_private *dev_priv = dev->dev_private;
1312 struct drm_framebuffer *fb = crtc->fb; 1344 struct drm_crtc *crtc = NULL, *tmp_crtc;
1345 struct intel_crtc *intel_crtc;
1346 struct drm_framebuffer *fb;
1313 struct intel_framebuffer *intel_fb; 1347 struct intel_framebuffer *intel_fb;
1314 struct drm_i915_gem_object *obj_priv; 1348 struct drm_i915_gem_object *obj_priv;
1315 struct drm_crtc *tmp_crtc;
1316 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1317 int plane = intel_crtc->plane;
1318 int crtcs_enabled = 0;
1319 1349
1320 DRM_DEBUG_KMS("\n"); 1350 DRM_DEBUG_KMS("\n");
1321 1351
@@ -1325,12 +1355,6 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1325 if (!I915_HAS_FBC(dev)) 1355 if (!I915_HAS_FBC(dev))
1326 return; 1356 return;
1327 1357
1328 if (!crtc->fb)
1329 return;
1330
1331 intel_fb = to_intel_framebuffer(fb);
1332 obj_priv = to_intel_bo(intel_fb->obj);
1333
1334 /* 1358 /*
1335 * If FBC is already on, we just have to verify that we can 1359 * If FBC is already on, we just have to verify that we can
1336 * keep it that way... 1360 * keep it that way...
@@ -1341,35 +1365,47 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1341 * - going to an unsupported config (interlace, pixel multiply, etc.) 1365 * - going to an unsupported config (interlace, pixel multiply, etc.)
1342 */ 1366 */
1343 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { 1367 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1344 if (tmp_crtc->enabled) 1368 if (tmp_crtc->enabled) {
1345 crtcs_enabled++; 1369 if (crtc) {
1370 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1371 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1372 goto out_disable;
1373 }
1374 crtc = tmp_crtc;
1375 }
1346 } 1376 }
1347 DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled); 1377
1348 if (crtcs_enabled > 1) { 1378 if (!crtc || crtc->fb == NULL) {
1349 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); 1379 DRM_DEBUG_KMS("no output, disabling\n");
1350 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; 1380 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
1351 goto out_disable; 1381 goto out_disable;
1352 } 1382 }
1383
1384 intel_crtc = to_intel_crtc(crtc);
1385 fb = crtc->fb;
1386 intel_fb = to_intel_framebuffer(fb);
1387 obj_priv = to_intel_bo(intel_fb->obj);
1388
1353 if (intel_fb->obj->size > dev_priv->cfb_size) { 1389 if (intel_fb->obj->size > dev_priv->cfb_size) {
1354 DRM_DEBUG_KMS("framebuffer too large, disabling " 1390 DRM_DEBUG_KMS("framebuffer too large, disabling "
1355 "compression\n"); 1391 "compression\n");
1356 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 1392 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1357 goto out_disable; 1393 goto out_disable;
1358 } 1394 }
1359 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 1395 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1360 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { 1396 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1361 DRM_DEBUG_KMS("mode incompatible with compression, " 1397 DRM_DEBUG_KMS("mode incompatible with compression, "
1362 "disabling\n"); 1398 "disabling\n");
1363 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; 1399 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1364 goto out_disable; 1400 goto out_disable;
1365 } 1401 }
1366 if ((mode->hdisplay > 2048) || 1402 if ((crtc->mode.hdisplay > 2048) ||
1367 (mode->vdisplay > 1536)) { 1403 (crtc->mode.vdisplay > 1536)) {
1368 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 1404 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1369 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; 1405 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1370 goto out_disable; 1406 goto out_disable;
1371 } 1407 }
1372 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { 1408 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1373 DRM_DEBUG_KMS("plane not 0, disabling compression\n"); 1409 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1374 dev_priv->no_fbc_reason = FBC_BAD_PLANE; 1410 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1375 goto out_disable; 1411 goto out_disable;
@@ -1384,18 +1420,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1384 if (in_dbg_master()) 1420 if (in_dbg_master())
1385 goto out_disable; 1421 goto out_disable;
1386 1422
1387 if (intel_fbc_enabled(dev)) { 1423 intel_enable_fbc(crtc, 500);
1388 /* We can re-enable it in this case, but need to update pitch */
1389 if ((fb->pitch > dev_priv->cfb_pitch) ||
1390 (obj_priv->fence_reg != dev_priv->cfb_fence) ||
1391 (plane != dev_priv->cfb_plane))
1392 intel_disable_fbc(dev);
1393 }
1394
1395 /* Now try to turn it back on if possible */
1396 if (!intel_fbc_enabled(dev))
1397 intel_enable_fbc(crtc, 500);
1398
1399 return; 1424 return;
1400 1425
1401out_disable: 1426out_disable:
@@ -1407,7 +1432,9 @@ out_disable:
1407} 1432}
1408 1433
1409int 1434int
1410intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) 1435intel_pin_and_fence_fb_obj(struct drm_device *dev,
1436 struct drm_gem_object *obj,
1437 bool pipelined)
1411{ 1438{
1412 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1439 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1413 u32 alignment; 1440 u32 alignment;
@@ -1417,7 +1444,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1417 case I915_TILING_NONE: 1444 case I915_TILING_NONE:
1418 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 1445 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1419 alignment = 128 * 1024; 1446 alignment = 128 * 1024;
1420 else if (IS_I965G(dev)) 1447 else if (INTEL_INFO(dev)->gen >= 4)
1421 alignment = 4 * 1024; 1448 alignment = 4 * 1024;
1422 else 1449 else
1423 alignment = 64 * 1024; 1450 alignment = 64 * 1024;
@@ -1435,9 +1462,13 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1435 } 1462 }
1436 1463
1437 ret = i915_gem_object_pin(obj, alignment); 1464 ret = i915_gem_object_pin(obj, alignment);
1438 if (ret != 0) 1465 if (ret)
1439 return ret; 1466 return ret;
1440 1467
1468 ret = i915_gem_object_set_to_display_plane(obj, pipelined);
1469 if (ret)
1470 goto err_unpin;
1471
1441 /* Install a fence for tiled scan-out. Pre-i965 always needs a 1472 /* Install a fence for tiled scan-out. Pre-i965 always needs a
1442 * fence, whereas 965+ only requires a fence if using 1473 * fence, whereas 965+ only requires a fence if using
1443 * framebuffer compression. For simplicity, we always install 1474 * framebuffer compression. For simplicity, we always install
@@ -1445,20 +1476,22 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1445 */ 1476 */
1446 if (obj_priv->fence_reg == I915_FENCE_REG_NONE && 1477 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1447 obj_priv->tiling_mode != I915_TILING_NONE) { 1478 obj_priv->tiling_mode != I915_TILING_NONE) {
1448 ret = i915_gem_object_get_fence_reg(obj); 1479 ret = i915_gem_object_get_fence_reg(obj, false);
1449 if (ret != 0) { 1480 if (ret)
1450 i915_gem_object_unpin(obj); 1481 goto err_unpin;
1451 return ret;
1452 }
1453 } 1482 }
1454 1483
1455 return 0; 1484 return 0;
1485
1486err_unpin:
1487 i915_gem_object_unpin(obj);
1488 return ret;
1456} 1489}
1457 1490
1458/* Assume fb object is pinned & idle & fenced and just update base pointers */ 1491/* Assume fb object is pinned & idle & fenced and just update base pointers */
1459static int 1492static int
1460intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 1493intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1461 int x, int y) 1494 int x, int y, enum mode_set_atomic state)
1462{ 1495{
1463 struct drm_device *dev = crtc->dev; 1496 struct drm_device *dev = crtc->dev;
1464 struct drm_i915_private *dev_priv = dev->dev_private; 1497 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1468,12 +1501,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1468 struct drm_gem_object *obj; 1501 struct drm_gem_object *obj;
1469 int plane = intel_crtc->plane; 1502 int plane = intel_crtc->plane;
1470 unsigned long Start, Offset; 1503 unsigned long Start, Offset;
1471 int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
1472 int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
1473 int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
1474 int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
1475 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
1476 u32 dspcntr; 1504 u32 dspcntr;
1505 u32 reg;
1477 1506
1478 switch (plane) { 1507 switch (plane) {
1479 case 0: 1508 case 0:
@@ -1488,7 +1517,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1488 obj = intel_fb->obj; 1517 obj = intel_fb->obj;
1489 obj_priv = to_intel_bo(obj); 1518 obj_priv = to_intel_bo(obj);
1490 1519
1491 dspcntr = I915_READ(dspcntr_reg); 1520 reg = DSPCNTR(plane);
1521 dspcntr = I915_READ(reg);
1492 /* Mask out pixel format bits in case we change it */ 1522 /* Mask out pixel format bits in case we change it */
1493 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 1523 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1494 switch (fb->bits_per_pixel) { 1524 switch (fb->bits_per_pixel) {
@@ -1509,7 +1539,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1509 DRM_ERROR("Unknown color depth\n"); 1539 DRM_ERROR("Unknown color depth\n");
1510 return -EINVAL; 1540 return -EINVAL;
1511 } 1541 }
1512 if (IS_I965G(dev)) { 1542 if (INTEL_INFO(dev)->gen >= 4) {
1513 if (obj_priv->tiling_mode != I915_TILING_NONE) 1543 if (obj_priv->tiling_mode != I915_TILING_NONE)
1514 dspcntr |= DISPPLANE_TILED; 1544 dspcntr |= DISPPLANE_TILED;
1515 else 1545 else
@@ -1520,28 +1550,24 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1520 /* must disable */ 1550 /* must disable */
1521 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 1551 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1522 1552
1523 I915_WRITE(dspcntr_reg, dspcntr); 1553 I915_WRITE(reg, dspcntr);
1524 1554
1525 Start = obj_priv->gtt_offset; 1555 Start = obj_priv->gtt_offset;
1526 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); 1556 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
1527 1557
1528 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 1558 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1529 Start, Offset, x, y, fb->pitch); 1559 Start, Offset, x, y, fb->pitch);
1530 I915_WRITE(dspstride, fb->pitch); 1560 I915_WRITE(DSPSTRIDE(plane), fb->pitch);
1531 if (IS_I965G(dev)) { 1561 if (INTEL_INFO(dev)->gen >= 4) {
1532 I915_WRITE(dspsurf, Start); 1562 I915_WRITE(DSPSURF(plane), Start);
1533 I915_WRITE(dsptileoff, (y << 16) | x); 1563 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1534 I915_WRITE(dspbase, Offset); 1564 I915_WRITE(DSPADDR(plane), Offset);
1535 } else { 1565 } else
1536 I915_WRITE(dspbase, Start + Offset); 1566 I915_WRITE(DSPADDR(plane), Start + Offset);
1537 } 1567 POSTING_READ(reg);
1538 POSTING_READ(dspbase);
1539
1540 if (IS_I965G(dev) || plane == 0)
1541 intel_update_fbc(crtc, &crtc->mode);
1542 1568
1543 intel_wait_for_vblank(dev, intel_crtc->pipe); 1569 intel_update_fbc(dev);
1544 intel_increase_pllclock(crtc, true); 1570 intel_increase_pllclock(crtc);
1545 1571
1546 return 0; 1572 return 0;
1547} 1573}
@@ -1553,11 +1579,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1553 struct drm_device *dev = crtc->dev; 1579 struct drm_device *dev = crtc->dev;
1554 struct drm_i915_master_private *master_priv; 1580 struct drm_i915_master_private *master_priv;
1555 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1581 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1556 struct intel_framebuffer *intel_fb;
1557 struct drm_i915_gem_object *obj_priv;
1558 struct drm_gem_object *obj;
1559 int pipe = intel_crtc->pipe;
1560 int plane = intel_crtc->plane;
1561 int ret; 1582 int ret;
1562 1583
1563 /* no fb bound */ 1584 /* no fb bound */
@@ -1566,45 +1587,42 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1566 return 0; 1587 return 0;
1567 } 1588 }
1568 1589
1569 switch (plane) { 1590 switch (intel_crtc->plane) {
1570 case 0: 1591 case 0:
1571 case 1: 1592 case 1:
1572 break; 1593 break;
1573 default: 1594 default:
1574 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1575 return -EINVAL; 1595 return -EINVAL;
1576 } 1596 }
1577 1597
1578 intel_fb = to_intel_framebuffer(crtc->fb);
1579 obj = intel_fb->obj;
1580 obj_priv = to_intel_bo(obj);
1581
1582 mutex_lock(&dev->struct_mutex); 1598 mutex_lock(&dev->struct_mutex);
1583 ret = intel_pin_and_fence_fb_obj(dev, obj); 1599 ret = intel_pin_and_fence_fb_obj(dev,
1600 to_intel_framebuffer(crtc->fb)->obj,
1601 false);
1584 if (ret != 0) { 1602 if (ret != 0) {
1585 mutex_unlock(&dev->struct_mutex); 1603 mutex_unlock(&dev->struct_mutex);
1586 return ret; 1604 return ret;
1587 } 1605 }
1588 1606
1589 ret = i915_gem_object_set_to_display_plane(obj); 1607 if (old_fb) {
1590 if (ret != 0) { 1608 struct drm_i915_private *dev_priv = dev->dev_private;
1591 i915_gem_object_unpin(obj); 1609 struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
1592 mutex_unlock(&dev->struct_mutex); 1610 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1593 return ret; 1611
1612 wait_event(dev_priv->pending_flip_queue,
1613 atomic_read(&obj_priv->pending_flip) == 0);
1594 } 1614 }
1595 1615
1596 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y); 1616 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
1617 LEAVE_ATOMIC_MODE_SET);
1597 if (ret) { 1618 if (ret) {
1598 i915_gem_object_unpin(obj); 1619 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
1599 mutex_unlock(&dev->struct_mutex); 1620 mutex_unlock(&dev->struct_mutex);
1600 return ret; 1621 return ret;
1601 } 1622 }
1602 1623
1603 if (old_fb) { 1624 if (old_fb)
1604 intel_fb = to_intel_framebuffer(old_fb); 1625 i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
1605 obj_priv = to_intel_bo(intel_fb->obj);
1606 i915_gem_object_unpin(intel_fb->obj);
1607 }
1608 1626
1609 mutex_unlock(&dev->struct_mutex); 1627 mutex_unlock(&dev->struct_mutex);
1610 1628
@@ -1615,7 +1633,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1615 if (!master_priv->sarea_priv) 1633 if (!master_priv->sarea_priv)
1616 return 0; 1634 return 0;
1617 1635
1618 if (pipe) { 1636 if (intel_crtc->pipe) {
1619 master_priv->sarea_priv->pipeB_x = x; 1637 master_priv->sarea_priv->pipeB_x = x;
1620 master_priv->sarea_priv->pipeB_y = y; 1638 master_priv->sarea_priv->pipeB_y = y;
1621 } else { 1639 } else {
@@ -1626,7 +1644,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1626 return 0; 1644 return 0;
1627} 1645}
1628 1646
1629static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) 1647static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
1630{ 1648{
1631 struct drm_device *dev = crtc->dev; 1649 struct drm_device *dev = crtc->dev;
1632 struct drm_i915_private *dev_priv = dev->dev_private; 1650 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1659,6 +1677,7 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
1659 } 1677 }
1660 I915_WRITE(DP_A, dpa_ctl); 1678 I915_WRITE(DP_A, dpa_ctl);
1661 1679
1680 POSTING_READ(DP_A);
1662 udelay(500); 1681 udelay(500);
1663} 1682}
1664 1683
@@ -1669,84 +1688,109 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1669 struct drm_i915_private *dev_priv = dev->dev_private; 1688 struct drm_i915_private *dev_priv = dev->dev_private;
1670 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1689 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1671 int pipe = intel_crtc->pipe; 1690 int pipe = intel_crtc->pipe;
1672 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; 1691 u32 reg, temp, tries;
1673 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1674 int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
1675 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
1676 u32 temp, tries = 0;
1677 1692
1678 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 1693 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1679 for train result */ 1694 for train result */
1680 temp = I915_READ(fdi_rx_imr_reg); 1695 reg = FDI_RX_IMR(pipe);
1696 temp = I915_READ(reg);
1681 temp &= ~FDI_RX_SYMBOL_LOCK; 1697 temp &= ~FDI_RX_SYMBOL_LOCK;
1682 temp &= ~FDI_RX_BIT_LOCK; 1698 temp &= ~FDI_RX_BIT_LOCK;
1683 I915_WRITE(fdi_rx_imr_reg, temp); 1699 I915_WRITE(reg, temp);
1684 I915_READ(fdi_rx_imr_reg); 1700 I915_READ(reg);
1685 udelay(150); 1701 udelay(150);
1686 1702
1687 /* enable CPU FDI TX and PCH FDI RX */ 1703 /* enable CPU FDI TX and PCH FDI RX */
1688 temp = I915_READ(fdi_tx_reg); 1704 reg = FDI_TX_CTL(pipe);
1689 temp |= FDI_TX_ENABLE; 1705 temp = I915_READ(reg);
1690 temp &= ~(7 << 19); 1706 temp &= ~(7 << 19);
1691 temp |= (intel_crtc->fdi_lanes - 1) << 19; 1707 temp |= (intel_crtc->fdi_lanes - 1) << 19;
1692 temp &= ~FDI_LINK_TRAIN_NONE; 1708 temp &= ~FDI_LINK_TRAIN_NONE;
1693 temp |= FDI_LINK_TRAIN_PATTERN_1; 1709 temp |= FDI_LINK_TRAIN_PATTERN_1;
1694 I915_WRITE(fdi_tx_reg, temp); 1710 I915_WRITE(reg, temp | FDI_TX_ENABLE);
1695 I915_READ(fdi_tx_reg);
1696 1711
1697 temp = I915_READ(fdi_rx_reg); 1712 reg = FDI_RX_CTL(pipe);
1713 temp = I915_READ(reg);
1698 temp &= ~FDI_LINK_TRAIN_NONE; 1714 temp &= ~FDI_LINK_TRAIN_NONE;
1699 temp |= FDI_LINK_TRAIN_PATTERN_1; 1715 temp |= FDI_LINK_TRAIN_PATTERN_1;
1700 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); 1716 I915_WRITE(reg, temp | FDI_RX_ENABLE);
1701 I915_READ(fdi_rx_reg); 1717
1718 POSTING_READ(reg);
1702 udelay(150); 1719 udelay(150);
1703 1720
1721 /* Ironlake workaround, enable clock pointer after FDI enable*/
1722 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE);
1723
1724 reg = FDI_RX_IIR(pipe);
1704 for (tries = 0; tries < 5; tries++) { 1725 for (tries = 0; tries < 5; tries++) {
1705 temp = I915_READ(fdi_rx_iir_reg); 1726 temp = I915_READ(reg);
1706 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 1727 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1707 1728
1708 if ((temp & FDI_RX_BIT_LOCK)) { 1729 if ((temp & FDI_RX_BIT_LOCK)) {
1709 DRM_DEBUG_KMS("FDI train 1 done.\n"); 1730 DRM_DEBUG_KMS("FDI train 1 done.\n");
1710 I915_WRITE(fdi_rx_iir_reg, 1731 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
1711 temp | FDI_RX_BIT_LOCK);
1712 break; 1732 break;
1713 } 1733 }
1714 } 1734 }
1715 if (tries == 5) 1735 if (tries == 5)
1716 DRM_DEBUG_KMS("FDI train 1 fail!\n"); 1736 DRM_ERROR("FDI train 1 fail!\n");
1717 1737
1718 /* Train 2 */ 1738 /* Train 2 */
1719 temp = I915_READ(fdi_tx_reg); 1739 reg = FDI_TX_CTL(pipe);
1740 temp = I915_READ(reg);
1720 temp &= ~FDI_LINK_TRAIN_NONE; 1741 temp &= ~FDI_LINK_TRAIN_NONE;
1721 temp |= FDI_LINK_TRAIN_PATTERN_2; 1742 temp |= FDI_LINK_TRAIN_PATTERN_2;
1722 I915_WRITE(fdi_tx_reg, temp); 1743 I915_WRITE(reg, temp);
1723 1744
1724 temp = I915_READ(fdi_rx_reg); 1745 reg = FDI_RX_CTL(pipe);
1746 temp = I915_READ(reg);
1725 temp &= ~FDI_LINK_TRAIN_NONE; 1747 temp &= ~FDI_LINK_TRAIN_NONE;
1726 temp |= FDI_LINK_TRAIN_PATTERN_2; 1748 temp |= FDI_LINK_TRAIN_PATTERN_2;
1727 I915_WRITE(fdi_rx_reg, temp); 1749 I915_WRITE(reg, temp);
1728 udelay(150);
1729 1750
1730 tries = 0; 1751 POSTING_READ(reg);
1752 udelay(150);
1731 1753
1754 reg = FDI_RX_IIR(pipe);
1732 for (tries = 0; tries < 5; tries++) { 1755 for (tries = 0; tries < 5; tries++) {
1733 temp = I915_READ(fdi_rx_iir_reg); 1756 temp = I915_READ(reg);
1734 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 1757 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1735 1758
1736 if (temp & FDI_RX_SYMBOL_LOCK) { 1759 if (temp & FDI_RX_SYMBOL_LOCK) {
1737 I915_WRITE(fdi_rx_iir_reg, 1760 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
1738 temp | FDI_RX_SYMBOL_LOCK);
1739 DRM_DEBUG_KMS("FDI train 2 done.\n"); 1761 DRM_DEBUG_KMS("FDI train 2 done.\n");
1740 break; 1762 break;
1741 } 1763 }
1742 } 1764 }
1743 if (tries == 5) 1765 if (tries == 5)
1744 DRM_DEBUG_KMS("FDI train 2 fail!\n"); 1766 DRM_ERROR("FDI train 2 fail!\n");
1745 1767
1746 DRM_DEBUG_KMS("FDI train done\n"); 1768 DRM_DEBUG_KMS("FDI train done\n");
1769
1770 /* enable normal train */
1771 reg = FDI_TX_CTL(pipe);
1772 temp = I915_READ(reg);
1773 temp &= ~FDI_LINK_TRAIN_NONE;
1774 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
1775 I915_WRITE(reg, temp);
1776
1777 reg = FDI_RX_CTL(pipe);
1778 temp = I915_READ(reg);
1779 if (HAS_PCH_CPT(dev)) {
1780 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1781 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
1782 } else {
1783 temp &= ~FDI_LINK_TRAIN_NONE;
1784 temp |= FDI_LINK_TRAIN_NONE;
1785 }
1786 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
1787
1788 /* wait one idle pattern time */
1789 POSTING_READ(reg);
1790 udelay(1000);
1747} 1791}
1748 1792
1749static int snb_b_fdi_train_param [] = { 1793static const int const snb_b_fdi_train_param [] = {
1750 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 1794 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
1751 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 1795 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
1752 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 1796 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
@@ -1760,24 +1804,22 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
1760 struct drm_i915_private *dev_priv = dev->dev_private; 1804 struct drm_i915_private *dev_priv = dev->dev_private;
1761 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1805 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1762 int pipe = intel_crtc->pipe; 1806 int pipe = intel_crtc->pipe;
1763 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; 1807 u32 reg, temp, i;
1764 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1765 int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
1766 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
1767 u32 temp, i;
1768 1808
1769 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 1809 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1770 for train result */ 1810 for train result */
1771 temp = I915_READ(fdi_rx_imr_reg); 1811 reg = FDI_RX_IMR(pipe);
1812 temp = I915_READ(reg);
1772 temp &= ~FDI_RX_SYMBOL_LOCK; 1813 temp &= ~FDI_RX_SYMBOL_LOCK;
1773 temp &= ~FDI_RX_BIT_LOCK; 1814 temp &= ~FDI_RX_BIT_LOCK;
1774 I915_WRITE(fdi_rx_imr_reg, temp); 1815 I915_WRITE(reg, temp);
1775 I915_READ(fdi_rx_imr_reg); 1816
1817 POSTING_READ(reg);
1776 udelay(150); 1818 udelay(150);
1777 1819
1778 /* enable CPU FDI TX and PCH FDI RX */ 1820 /* enable CPU FDI TX and PCH FDI RX */
1779 temp = I915_READ(fdi_tx_reg); 1821 reg = FDI_TX_CTL(pipe);
1780 temp |= FDI_TX_ENABLE; 1822 temp = I915_READ(reg);
1781 temp &= ~(7 << 19); 1823 temp &= ~(7 << 19);
1782 temp |= (intel_crtc->fdi_lanes - 1) << 19; 1824 temp |= (intel_crtc->fdi_lanes - 1) << 19;
1783 temp &= ~FDI_LINK_TRAIN_NONE; 1825 temp &= ~FDI_LINK_TRAIN_NONE;
@@ -1785,10 +1827,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
1785 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 1827 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
1786 /* SNB-B */ 1828 /* SNB-B */
1787 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 1829 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
1788 I915_WRITE(fdi_tx_reg, temp); 1830 I915_WRITE(reg, temp | FDI_TX_ENABLE);
1789 I915_READ(fdi_tx_reg);
1790 1831
1791 temp = I915_READ(fdi_rx_reg); 1832 reg = FDI_RX_CTL(pipe);
1833 temp = I915_READ(reg);
1792 if (HAS_PCH_CPT(dev)) { 1834 if (HAS_PCH_CPT(dev)) {
1793 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 1835 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1794 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 1836 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
@@ -1796,32 +1838,37 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
1796 temp &= ~FDI_LINK_TRAIN_NONE; 1838 temp &= ~FDI_LINK_TRAIN_NONE;
1797 temp |= FDI_LINK_TRAIN_PATTERN_1; 1839 temp |= FDI_LINK_TRAIN_PATTERN_1;
1798 } 1840 }
1799 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); 1841 I915_WRITE(reg, temp | FDI_RX_ENABLE);
1800 I915_READ(fdi_rx_reg); 1842
1843 POSTING_READ(reg);
1801 udelay(150); 1844 udelay(150);
1802 1845
1803 for (i = 0; i < 4; i++ ) { 1846 for (i = 0; i < 4; i++ ) {
1804 temp = I915_READ(fdi_tx_reg); 1847 reg = FDI_TX_CTL(pipe);
1848 temp = I915_READ(reg);
1805 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 1849 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
1806 temp |= snb_b_fdi_train_param[i]; 1850 temp |= snb_b_fdi_train_param[i];
1807 I915_WRITE(fdi_tx_reg, temp); 1851 I915_WRITE(reg, temp);
1852
1853 POSTING_READ(reg);
1808 udelay(500); 1854 udelay(500);
1809 1855
1810 temp = I915_READ(fdi_rx_iir_reg); 1856 reg = FDI_RX_IIR(pipe);
1857 temp = I915_READ(reg);
1811 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 1858 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1812 1859
1813 if (temp & FDI_RX_BIT_LOCK) { 1860 if (temp & FDI_RX_BIT_LOCK) {
1814 I915_WRITE(fdi_rx_iir_reg, 1861 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
1815 temp | FDI_RX_BIT_LOCK);
1816 DRM_DEBUG_KMS("FDI train 1 done.\n"); 1862 DRM_DEBUG_KMS("FDI train 1 done.\n");
1817 break; 1863 break;
1818 } 1864 }
1819 } 1865 }
1820 if (i == 4) 1866 if (i == 4)
1821 DRM_DEBUG_KMS("FDI train 1 fail!\n"); 1867 DRM_ERROR("FDI train 1 fail!\n");
1822 1868
1823 /* Train 2 */ 1869 /* Train 2 */
1824 temp = I915_READ(fdi_tx_reg); 1870 reg = FDI_TX_CTL(pipe);
1871 temp = I915_READ(reg);
1825 temp &= ~FDI_LINK_TRAIN_NONE; 1872 temp &= ~FDI_LINK_TRAIN_NONE;
1826 temp |= FDI_LINK_TRAIN_PATTERN_2; 1873 temp |= FDI_LINK_TRAIN_PATTERN_2;
1827 if (IS_GEN6(dev)) { 1874 if (IS_GEN6(dev)) {
@@ -1829,9 +1876,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
1829 /* SNB-B */ 1876 /* SNB-B */
1830 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 1877 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
1831 } 1878 }
1832 I915_WRITE(fdi_tx_reg, temp); 1879 I915_WRITE(reg, temp);
1833 1880
1834 temp = I915_READ(fdi_rx_reg); 1881 reg = FDI_RX_CTL(pipe);
1882 temp = I915_READ(reg);
1835 if (HAS_PCH_CPT(dev)) { 1883 if (HAS_PCH_CPT(dev)) {
1836 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 1884 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1837 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 1885 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
@@ -1839,535 +1887,593 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
1839 temp &= ~FDI_LINK_TRAIN_NONE; 1887 temp &= ~FDI_LINK_TRAIN_NONE;
1840 temp |= FDI_LINK_TRAIN_PATTERN_2; 1888 temp |= FDI_LINK_TRAIN_PATTERN_2;
1841 } 1889 }
1842 I915_WRITE(fdi_rx_reg, temp); 1890 I915_WRITE(reg, temp);
1891
1892 POSTING_READ(reg);
1843 udelay(150); 1893 udelay(150);
1844 1894
1845 for (i = 0; i < 4; i++ ) { 1895 for (i = 0; i < 4; i++ ) {
1846 temp = I915_READ(fdi_tx_reg); 1896 reg = FDI_TX_CTL(pipe);
1897 temp = I915_READ(reg);
1847 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 1898 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
1848 temp |= snb_b_fdi_train_param[i]; 1899 temp |= snb_b_fdi_train_param[i];
1849 I915_WRITE(fdi_tx_reg, temp); 1900 I915_WRITE(reg, temp);
1901
1902 POSTING_READ(reg);
1850 udelay(500); 1903 udelay(500);
1851 1904
1852 temp = I915_READ(fdi_rx_iir_reg); 1905 reg = FDI_RX_IIR(pipe);
1906 temp = I915_READ(reg);
1853 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 1907 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1854 1908
1855 if (temp & FDI_RX_SYMBOL_LOCK) { 1909 if (temp & FDI_RX_SYMBOL_LOCK) {
1856 I915_WRITE(fdi_rx_iir_reg, 1910 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
1857 temp | FDI_RX_SYMBOL_LOCK);
1858 DRM_DEBUG_KMS("FDI train 2 done.\n"); 1911 DRM_DEBUG_KMS("FDI train 2 done.\n");
1859 break; 1912 break;
1860 } 1913 }
1861 } 1914 }
1862 if (i == 4) 1915 if (i == 4)
1863 DRM_DEBUG_KMS("FDI train 2 fail!\n"); 1916 DRM_ERROR("FDI train 2 fail!\n");
1864 1917
1865 DRM_DEBUG_KMS("FDI train done.\n"); 1918 DRM_DEBUG_KMS("FDI train done.\n");
1866} 1919}
1867 1920
1868static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) 1921static void ironlake_fdi_enable(struct drm_crtc *crtc)
1869{ 1922{
1870 struct drm_device *dev = crtc->dev; 1923 struct drm_device *dev = crtc->dev;
1871 struct drm_i915_private *dev_priv = dev->dev_private; 1924 struct drm_i915_private *dev_priv = dev->dev_private;
1872 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1925 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1873 int pipe = intel_crtc->pipe; 1926 int pipe = intel_crtc->pipe;
1874 int plane = intel_crtc->plane; 1927 u32 reg, temp;
1875 int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
1876 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
1877 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
1878 int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
1879 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
1880 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1881 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
1882 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
1883 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
1884 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
1885 int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
1886 int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
1887 int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
1888 int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B;
1889 int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B;
1890 int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B;
1891 int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
1892 int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
1893 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
1894 int trans_dpll_sel = (pipe == 0) ? 0 : 1;
1895 u32 temp;
1896 u32 pipe_bpc;
1897
1898 temp = I915_READ(pipeconf_reg);
1899 pipe_bpc = temp & PIPE_BPC_MASK;
1900 1928
1901 /* XXX: When our outputs are all unaware of DPMS modes other than off 1929 /* Write the TU size bits so error detection works */
1902 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 1930 I915_WRITE(FDI_RX_TUSIZE1(pipe),
1903 */ 1931 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
1904 switch (mode) {
1905 case DRM_MODE_DPMS_ON:
1906 case DRM_MODE_DPMS_STANDBY:
1907 case DRM_MODE_DPMS_SUSPEND:
1908 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
1909 1932
1910 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 1933 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1911 temp = I915_READ(PCH_LVDS); 1934 reg = FDI_RX_CTL(pipe);
1912 if ((temp & LVDS_PORT_EN) == 0) { 1935 temp = I915_READ(reg);
1913 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); 1936 temp &= ~((0x7 << 19) | (0x7 << 16));
1914 POSTING_READ(PCH_LVDS); 1937 temp |= (intel_crtc->fdi_lanes - 1) << 19;
1915 } 1938 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
1916 } 1939 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
1917 1940
1918 if (!HAS_eDP) { 1941 POSTING_READ(reg);
1942 udelay(200);
1919 1943
1920 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 1944 /* Switch from Rawclk to PCDclk */
1921 temp = I915_READ(fdi_rx_reg); 1945 temp = I915_READ(reg);
1922 /* 1946 I915_WRITE(reg, temp | FDI_PCDCLK);
1923 * make the BPC in FDI Rx be consistent with that in
1924 * pipeconf reg.
1925 */
1926 temp &= ~(0x7 << 16);
1927 temp |= (pipe_bpc << 11);
1928 temp &= ~(7 << 19);
1929 temp |= (intel_crtc->fdi_lanes - 1) << 19;
1930 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
1931 I915_READ(fdi_rx_reg);
1932 udelay(200);
1933 1947
1934 /* Switch from Rawclk to PCDclk */ 1948 POSTING_READ(reg);
1935 temp = I915_READ(fdi_rx_reg); 1949 udelay(200);
1936 I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
1937 I915_READ(fdi_rx_reg);
1938 udelay(200);
1939 1950
1940 /* Enable CPU FDI TX PLL, always on for Ironlake */ 1951 /* Enable CPU FDI TX PLL, always on for Ironlake */
1941 temp = I915_READ(fdi_tx_reg); 1952 reg = FDI_TX_CTL(pipe);
1942 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 1953 temp = I915_READ(reg);
1943 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); 1954 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1944 I915_READ(fdi_tx_reg); 1955 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
1945 udelay(100);
1946 }
1947 }
1948 1956
1949 /* Enable panel fitting for LVDS */ 1957 POSTING_READ(reg);
1950 if (dev_priv->pch_pf_size && 1958 udelay(100);
1951 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) 1959 }
1952 || HAS_eDP || intel_pch_has_edp(crtc))) { 1960}
1953 /* Force use of hard-coded filter coefficients
1954 * as some pre-programmed values are broken,
1955 * e.g. x201.
1956 */
1957 I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
1958 PF_ENABLE | PF_FILTER_MED_3x3);
1959 I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
1960 dev_priv->pch_pf_pos);
1961 I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
1962 dev_priv->pch_pf_size);
1963 }
1964 1961
1965 /* Enable CPU pipe */ 1962static void intel_flush_display_plane(struct drm_device *dev,
1966 temp = I915_READ(pipeconf_reg); 1963 int plane)
1967 if ((temp & PIPEACONF_ENABLE) == 0) { 1964{
1968 I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); 1965 struct drm_i915_private *dev_priv = dev->dev_private;
1969 I915_READ(pipeconf_reg); 1966 u32 reg = DSPADDR(plane);
1970 udelay(100); 1967 I915_WRITE(reg, I915_READ(reg));
1971 } 1968}
1972 1969
1973 /* configure and enable CPU plane */ 1970/*
1974 temp = I915_READ(dspcntr_reg); 1971 * When we disable a pipe, we need to clear any pending scanline wait events
1975 if ((temp & DISPLAY_PLANE_ENABLE) == 0) { 1972 * to avoid hanging the ring, which we assume we are waiting on.
1976 I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); 1973 */
1977 /* Flush the plane changes */ 1974static void intel_clear_scanline_wait(struct drm_device *dev)
1978 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); 1975{
1979 } 1976 struct drm_i915_private *dev_priv = dev->dev_private;
1977 u32 tmp;
1980 1978
1981 if (!HAS_eDP) { 1979 if (IS_GEN2(dev))
1982 /* For PCH output, training FDI link */ 1980 /* Can't break the hang on i8xx */
1983 if (IS_GEN6(dev)) 1981 return;
1984 gen6_fdi_link_train(crtc);
1985 else
1986 ironlake_fdi_link_train(crtc);
1987 1982
1988 /* enable PCH DPLL */ 1983 tmp = I915_READ(PRB0_CTL);
1989 temp = I915_READ(pch_dpll_reg); 1984 if (tmp & RING_WAIT) {
1990 if ((temp & DPLL_VCO_ENABLE) == 0) { 1985 I915_WRITE(PRB0_CTL, tmp);
1991 I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); 1986 POSTING_READ(PRB0_CTL);
1992 I915_READ(pch_dpll_reg); 1987 }
1993 } 1988}
1994 udelay(200);
1995 1989
1996 if (HAS_PCH_CPT(dev)) { 1990static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
1997 /* Be sure PCH DPLL SEL is set */ 1991{
1998 temp = I915_READ(PCH_DPLL_SEL); 1992 struct drm_i915_gem_object *obj_priv;
1999 if (trans_dpll_sel == 0 && 1993 struct drm_i915_private *dev_priv;
2000 (temp & TRANSA_DPLL_ENABLE) == 0)
2001 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2002 else if (trans_dpll_sel == 1 &&
2003 (temp & TRANSB_DPLL_ENABLE) == 0)
2004 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2005 I915_WRITE(PCH_DPLL_SEL, temp);
2006 I915_READ(PCH_DPLL_SEL);
2007 }
2008 1994
2009 /* set transcoder timing */ 1995 if (crtc->fb == NULL)
2010 I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); 1996 return;
2011 I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg));
2012 I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg));
2013
2014 I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg));
2015 I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
2016 I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
2017
2018 /* enable normal train */
2019 temp = I915_READ(fdi_tx_reg);
2020 temp &= ~FDI_LINK_TRAIN_NONE;
2021 I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
2022 FDI_TX_ENHANCE_FRAME_ENABLE);
2023 I915_READ(fdi_tx_reg);
2024
2025 temp = I915_READ(fdi_rx_reg);
2026 if (HAS_PCH_CPT(dev)) {
2027 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2028 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2029 } else {
2030 temp &= ~FDI_LINK_TRAIN_NONE;
2031 temp |= FDI_LINK_TRAIN_NONE;
2032 }
2033 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2034 I915_READ(fdi_rx_reg);
2035 1997
2036 /* wait one idle pattern time */ 1998 obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj);
2037 udelay(100); 1999 dev_priv = crtc->dev->dev_private;
2000 wait_event(dev_priv->pending_flip_queue,
2001 atomic_read(&obj_priv->pending_flip) == 0);
2002}
2038 2003
2039 /* For PCH DP, enable TRANS_DP_CTL */ 2004static void ironlake_crtc_enable(struct drm_crtc *crtc)
2040 if (HAS_PCH_CPT(dev) && 2005{
2041 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 2006 struct drm_device *dev = crtc->dev;
2042 int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; 2007 struct drm_i915_private *dev_priv = dev->dev_private;
2043 int reg; 2008 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2044 2009 int pipe = intel_crtc->pipe;
2045 reg = I915_READ(trans_dp_ctl); 2010 int plane = intel_crtc->plane;
2046 reg &= ~(TRANS_DP_PORT_SEL_MASK | 2011 u32 reg, temp;
2047 TRANS_DP_SYNC_MASK);
2048 reg |= (TRANS_DP_OUTPUT_ENABLE |
2049 TRANS_DP_ENH_FRAMING);
2050
2051 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2052 reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2053 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2054 reg |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2055
2056 switch (intel_trans_dp_port_sel(crtc)) {
2057 case PCH_DP_B:
2058 reg |= TRANS_DP_PORT_SEL_B;
2059 break;
2060 case PCH_DP_C:
2061 reg |= TRANS_DP_PORT_SEL_C;
2062 break;
2063 case PCH_DP_D:
2064 reg |= TRANS_DP_PORT_SEL_D;
2065 break;
2066 default:
2067 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
2068 reg |= TRANS_DP_PORT_SEL_B;
2069 break;
2070 }
2071 2012
2072 I915_WRITE(trans_dp_ctl, reg); 2013 if (intel_crtc->active)
2073 POSTING_READ(trans_dp_ctl); 2014 return;
2074 }
2075 2015
2076 /* enable PCH transcoder */ 2016 intel_crtc->active = true;
2077 temp = I915_READ(transconf_reg); 2017 intel_update_watermarks(dev);
2078 /*
2079 * make the BPC in transcoder be consistent with
2080 * that in pipeconf reg.
2081 */
2082 temp &= ~PIPE_BPC_MASK;
2083 temp |= pipe_bpc;
2084 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
2085 I915_READ(transconf_reg);
2086 2018
2087 if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1)) 2019 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2088 DRM_ERROR("failed to enable transcoder\n"); 2020 temp = I915_READ(PCH_LVDS);
2089 } 2021 if ((temp & LVDS_PORT_EN) == 0)
2022 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2023 }
2090 2024
2091 intel_crtc_load_lut(crtc); 2025 ironlake_fdi_enable(crtc);
2092 2026
2093 intel_update_fbc(crtc, &crtc->mode); 2027 /* Enable panel fitting for LVDS */
2094 break; 2028 if (dev_priv->pch_pf_size &&
2029 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
2030 /* Force use of hard-coded filter coefficients
2031 * as some pre-programmed values are broken,
2032 * e.g. x201.
2033 */
2034 I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
2035 PF_ENABLE | PF_FILTER_MED_3x3);
2036 I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
2037 dev_priv->pch_pf_pos);
2038 I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
2039 dev_priv->pch_pf_size);
2040 }
2041
2042 /* Enable CPU pipe */
2043 reg = PIPECONF(pipe);
2044 temp = I915_READ(reg);
2045 if ((temp & PIPECONF_ENABLE) == 0) {
2046 I915_WRITE(reg, temp | PIPECONF_ENABLE);
2047 POSTING_READ(reg);
2048 intel_wait_for_vblank(dev, intel_crtc->pipe);
2049 }
2050
2051 /* configure and enable CPU plane */
2052 reg = DSPCNTR(plane);
2053 temp = I915_READ(reg);
2054 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
2055 I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
2056 intel_flush_display_plane(dev, plane);
2057 }
2058
2059 /* For PCH output, training FDI link */
2060 if (IS_GEN6(dev))
2061 gen6_fdi_link_train(crtc);
2062 else
2063 ironlake_fdi_link_train(crtc);
2064
2065 /* enable PCH DPLL */
2066 reg = PCH_DPLL(pipe);
2067 temp = I915_READ(reg);
2068 if ((temp & DPLL_VCO_ENABLE) == 0) {
2069 I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
2070 POSTING_READ(reg);
2071 udelay(200);
2072 }
2095 2073
2096 case DRM_MODE_DPMS_OFF: 2074 if (HAS_PCH_CPT(dev)) {
2097 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); 2075 /* Be sure PCH DPLL SEL is set */
2076 temp = I915_READ(PCH_DPLL_SEL);
2077 if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
2078 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2079 else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
2080 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2081 I915_WRITE(PCH_DPLL_SEL, temp);
2082 }
2098 2083
2099 drm_vblank_off(dev, pipe); 2084 /* set transcoder timing */
2100 /* Disable display plane */ 2085 I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2101 temp = I915_READ(dspcntr_reg); 2086 I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2102 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 2087 I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
2103 I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); 2088
2104 /* Flush the plane changes */ 2089 I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2105 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); 2090 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2106 I915_READ(dspbase_reg); 2091 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
2092
2093 /* For PCH DP, enable TRANS_DP_CTL */
2094 if (HAS_PCH_CPT(dev) &&
2095 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
2096 reg = TRANS_DP_CTL(pipe);
2097 temp = I915_READ(reg);
2098 temp &= ~(TRANS_DP_PORT_SEL_MASK |
2099 TRANS_DP_SYNC_MASK);
2100 temp |= (TRANS_DP_OUTPUT_ENABLE |
2101 TRANS_DP_ENH_FRAMING);
2102
2103 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2104 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2105 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2106 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2107
2108 switch (intel_trans_dp_port_sel(crtc)) {
2109 case PCH_DP_B:
2110 temp |= TRANS_DP_PORT_SEL_B;
2111 break;
2112 case PCH_DP_C:
2113 temp |= TRANS_DP_PORT_SEL_C;
2114 break;
2115 case PCH_DP_D:
2116 temp |= TRANS_DP_PORT_SEL_D;
2117 break;
2118 default:
2119 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
2120 temp |= TRANS_DP_PORT_SEL_B;
2121 break;
2107 } 2122 }
2108 2123
2109 if (dev_priv->cfb_plane == plane && 2124 I915_WRITE(reg, temp);
2110 dev_priv->display.disable_fbc) 2125 }
2111 dev_priv->display.disable_fbc(dev);
2112 2126
2113 /* disable cpu pipe, disable after all planes disabled */ 2127 /* enable PCH transcoder */
2114 temp = I915_READ(pipeconf_reg); 2128 reg = TRANSCONF(pipe);
2115 if ((temp & PIPEACONF_ENABLE) != 0) { 2129 temp = I915_READ(reg);
2116 I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); 2130 /*
2131 * make the BPC in transcoder be consistent with
2132 * that in pipeconf reg.
2133 */
2134 temp &= ~PIPE_BPC_MASK;
2135 temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
2136 I915_WRITE(reg, temp | TRANS_ENABLE);
2137 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
2138 DRM_ERROR("failed to enable transcoder %d\n", pipe);
2117 2139
2118 /* wait for cpu pipe off, pipe state */ 2140 intel_crtc_load_lut(crtc);
2119 if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1)) 2141 intel_update_fbc(dev);
2120 DRM_ERROR("failed to turn off cpu pipe\n"); 2142 intel_crtc_update_cursor(crtc, true);
2121 } else 2143}
2122 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
2123 2144
2124 udelay(100); 2145static void ironlake_crtc_disable(struct drm_crtc *crtc)
2146{
2147 struct drm_device *dev = crtc->dev;
2148 struct drm_i915_private *dev_priv = dev->dev_private;
2149 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2150 int pipe = intel_crtc->pipe;
2151 int plane = intel_crtc->plane;
2152 u32 reg, temp;
2153
2154 if (!intel_crtc->active)
2155 return;
2125 2156
2126 /* Disable PF */ 2157 intel_crtc_wait_for_pending_flips(crtc);
2127 I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0); 2158 drm_vblank_off(dev, pipe);
2128 I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0); 2159 intel_crtc_update_cursor(crtc, false);
2129 2160
2130 /* disable CPU FDI tx and PCH FDI rx */ 2161 /* Disable display plane */
2131 temp = I915_READ(fdi_tx_reg); 2162 reg = DSPCNTR(plane);
2132 I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE); 2163 temp = I915_READ(reg);
2133 I915_READ(fdi_tx_reg); 2164 if (temp & DISPLAY_PLANE_ENABLE) {
2165 I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
2166 intel_flush_display_plane(dev, plane);
2167 }
2134 2168
2135 temp = I915_READ(fdi_rx_reg); 2169 if (dev_priv->cfb_plane == plane &&
2136 /* BPC in FDI rx is consistent with that in pipeconf */ 2170 dev_priv->display.disable_fbc)
2137 temp &= ~(0x07 << 16); 2171 dev_priv->display.disable_fbc(dev);
2138 temp |= (pipe_bpc << 11);
2139 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
2140 I915_READ(fdi_rx_reg);
2141 2172
2142 udelay(100); 2173 /* disable cpu pipe, disable after all planes disabled */
2174 reg = PIPECONF(pipe);
2175 temp = I915_READ(reg);
2176 if (temp & PIPECONF_ENABLE) {
2177 I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
2178 POSTING_READ(reg);
2179 /* wait for cpu pipe off, pipe state */
2180 intel_wait_for_pipe_off(dev, intel_crtc->pipe);
2181 }
2182
2183 /* Disable PF */
2184 I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
2185 I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
2186
2187 /* disable CPU FDI tx and PCH FDI rx */
2188 reg = FDI_TX_CTL(pipe);
2189 temp = I915_READ(reg);
2190 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2191 POSTING_READ(reg);
2192
2193 reg = FDI_RX_CTL(pipe);
2194 temp = I915_READ(reg);
2195 temp &= ~(0x7 << 16);
2196 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2197 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2198
2199 POSTING_READ(reg);
2200 udelay(100);
2201
2202 /* Ironlake workaround, disable clock pointer after downing FDI */
2203 I915_WRITE(FDI_RX_CHICKEN(pipe),
2204 I915_READ(FDI_RX_CHICKEN(pipe) &
2205 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
2206
2207 /* still set train pattern 1 */
2208 reg = FDI_TX_CTL(pipe);
2209 temp = I915_READ(reg);
2210 temp &= ~FDI_LINK_TRAIN_NONE;
2211 temp |= FDI_LINK_TRAIN_PATTERN_1;
2212 I915_WRITE(reg, temp);
2143 2213
2144 /* still set train pattern 1 */ 2214 reg = FDI_RX_CTL(pipe);
2145 temp = I915_READ(fdi_tx_reg); 2215 temp = I915_READ(reg);
2216 if (HAS_PCH_CPT(dev)) {
2217 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2218 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2219 } else {
2146 temp &= ~FDI_LINK_TRAIN_NONE; 2220 temp &= ~FDI_LINK_TRAIN_NONE;
2147 temp |= FDI_LINK_TRAIN_PATTERN_1; 2221 temp |= FDI_LINK_TRAIN_PATTERN_1;
2148 I915_WRITE(fdi_tx_reg, temp); 2222 }
2149 POSTING_READ(fdi_tx_reg); 2223 /* BPC in FDI rx is consistent with that in PIPECONF */
2150 2224 temp &= ~(0x07 << 16);
2151 temp = I915_READ(fdi_rx_reg); 2225 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2152 if (HAS_PCH_CPT(dev)) { 2226 I915_WRITE(reg, temp);
2153 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2154 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2155 } else {
2156 temp &= ~FDI_LINK_TRAIN_NONE;
2157 temp |= FDI_LINK_TRAIN_PATTERN_1;
2158 }
2159 I915_WRITE(fdi_rx_reg, temp);
2160 POSTING_READ(fdi_rx_reg);
2161 2227
2162 udelay(100); 2228 POSTING_READ(reg);
2229 udelay(100);
2163 2230
2164 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 2231 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2165 temp = I915_READ(PCH_LVDS); 2232 temp = I915_READ(PCH_LVDS);
2233 if (temp & LVDS_PORT_EN) {
2166 I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN); 2234 I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
2167 I915_READ(PCH_LVDS); 2235 POSTING_READ(PCH_LVDS);
2168 udelay(100); 2236 udelay(100);
2169 } 2237 }
2238 }
2170 2239
2171 /* disable PCH transcoder */ 2240 /* disable PCH transcoder */
2172 temp = I915_READ(transconf_reg); 2241 reg = TRANSCONF(plane);
2173 if ((temp & TRANS_ENABLE) != 0) { 2242 temp = I915_READ(reg);
2174 I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); 2243 if (temp & TRANS_ENABLE) {
2244 I915_WRITE(reg, temp & ~TRANS_ENABLE);
2245 /* wait for PCH transcoder off, transcoder state */
2246 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
2247 DRM_ERROR("failed to disable transcoder\n");
2248 }
2175 2249
2176 /* wait for PCH transcoder off, transcoder state */ 2250 if (HAS_PCH_CPT(dev)) {
2177 if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1)) 2251 /* disable TRANS_DP_CTL */
2178 DRM_ERROR("failed to disable transcoder\n"); 2252 reg = TRANS_DP_CTL(pipe);
2179 } 2253 temp = I915_READ(reg);
2254 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
2255 I915_WRITE(reg, temp);
2180 2256
2181 temp = I915_READ(transconf_reg); 2257 /* disable DPLL_SEL */
2182 /* BPC in transcoder is consistent with that in pipeconf */ 2258 temp = I915_READ(PCH_DPLL_SEL);
2183 temp &= ~PIPE_BPC_MASK; 2259 if (pipe == 0)
2184 temp |= pipe_bpc; 2260 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
2185 I915_WRITE(transconf_reg, temp); 2261 else
2186 I915_READ(transconf_reg); 2262 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2187 udelay(100); 2263 I915_WRITE(PCH_DPLL_SEL, temp);
2264 }
2188 2265
2189 if (HAS_PCH_CPT(dev)) { 2266 /* disable PCH DPLL */
2190 /* disable TRANS_DP_CTL */ 2267 reg = PCH_DPLL(pipe);
2191 int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; 2268 temp = I915_READ(reg);
2192 int reg; 2269 I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
2193 2270
2194 reg = I915_READ(trans_dp_ctl); 2271 /* Switch from PCDclk to Rawclk */
2195 reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); 2272 reg = FDI_RX_CTL(pipe);
2196 I915_WRITE(trans_dp_ctl, reg); 2273 temp = I915_READ(reg);
2197 POSTING_READ(trans_dp_ctl); 2274 I915_WRITE(reg, temp & ~FDI_PCDCLK);
2198 2275
2199 /* disable DPLL_SEL */ 2276 /* Disable CPU FDI TX PLL */
2200 temp = I915_READ(PCH_DPLL_SEL); 2277 reg = FDI_TX_CTL(pipe);
2201 if (trans_dpll_sel == 0) 2278 temp = I915_READ(reg);
2202 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); 2279 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2203 else
2204 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2205 I915_WRITE(PCH_DPLL_SEL, temp);
2206 I915_READ(PCH_DPLL_SEL);
2207 2280
2208 } 2281 POSTING_READ(reg);
2282 udelay(100);
2209 2283
2210 /* disable PCH DPLL */ 2284 reg = FDI_RX_CTL(pipe);
2211 temp = I915_READ(pch_dpll_reg); 2285 temp = I915_READ(reg);
2212 I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); 2286 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2213 I915_READ(pch_dpll_reg);
2214
2215 /* Switch from PCDclk to Rawclk */
2216 temp = I915_READ(fdi_rx_reg);
2217 temp &= ~FDI_SEL_PCDCLK;
2218 I915_WRITE(fdi_rx_reg, temp);
2219 I915_READ(fdi_rx_reg);
2220
2221 /* Disable CPU FDI TX PLL */
2222 temp = I915_READ(fdi_tx_reg);
2223 I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
2224 I915_READ(fdi_tx_reg);
2225 udelay(100);
2226 2287
2227 temp = I915_READ(fdi_rx_reg); 2288 /* Wait for the clocks to turn off. */
2228 temp &= ~FDI_RX_PLL_ENABLE; 2289 POSTING_READ(reg);
2229 I915_WRITE(fdi_rx_reg, temp); 2290 udelay(100);
2230 I915_READ(fdi_rx_reg);
2231 2291
2232 /* Wait for the clocks to turn off. */ 2292 intel_crtc->active = false;
2233 udelay(100); 2293 intel_update_watermarks(dev);
2294 intel_update_fbc(dev);
2295 intel_clear_scanline_wait(dev);
2296}
2297
2298static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2299{
2300 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2301 int pipe = intel_crtc->pipe;
2302 int plane = intel_crtc->plane;
2303
2304 /* XXX: When our outputs are all unaware of DPMS modes other than off
2305 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
2306 */
2307 switch (mode) {
2308 case DRM_MODE_DPMS_ON:
2309 case DRM_MODE_DPMS_STANDBY:
2310 case DRM_MODE_DPMS_SUSPEND:
2311 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
2312 ironlake_crtc_enable(crtc);
2313 break;
2314
2315 case DRM_MODE_DPMS_OFF:
2316 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
2317 ironlake_crtc_disable(crtc);
2234 break; 2318 break;
2235 } 2319 }
2236} 2320}
2237 2321
2238static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) 2322static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
2239{ 2323{
2240 struct intel_overlay *overlay;
2241 int ret;
2242
2243 if (!enable && intel_crtc->overlay) { 2324 if (!enable && intel_crtc->overlay) {
2244 overlay = intel_crtc->overlay; 2325 struct drm_device *dev = intel_crtc->base.dev;
2245 mutex_lock(&overlay->dev->struct_mutex);
2246 for (;;) {
2247 ret = intel_overlay_switch_off(overlay);
2248 if (ret == 0)
2249 break;
2250 2326
2251 ret = intel_overlay_recover_from_interrupt(overlay, 0); 2327 mutex_lock(&dev->struct_mutex);
2252 if (ret != 0) { 2328 (void) intel_overlay_switch_off(intel_crtc->overlay, false);
2253 /* overlay doesn't react anymore. Usually 2329 mutex_unlock(&dev->struct_mutex);
2254 * results in a black screen and an unkillable
2255 * X server. */
2256 BUG();
2257 overlay->hw_wedged = HW_WEDGED;
2258 break;
2259 }
2260 }
2261 mutex_unlock(&overlay->dev->struct_mutex);
2262 } 2330 }
2263 /* Let userspace switch the overlay on again. In most cases userspace
2264 * has to recompute where to put it anyway. */
2265 2331
2266 return; 2332 /* Let userspace switch the overlay on again. In most cases userspace
2333 * has to recompute where to put it anyway.
2334 */
2267} 2335}
2268 2336
2269static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) 2337static void i9xx_crtc_enable(struct drm_crtc *crtc)
2270{ 2338{
2271 struct drm_device *dev = crtc->dev; 2339 struct drm_device *dev = crtc->dev;
2272 struct drm_i915_private *dev_priv = dev->dev_private; 2340 struct drm_i915_private *dev_priv = dev->dev_private;
2273 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2341 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2274 int pipe = intel_crtc->pipe; 2342 int pipe = intel_crtc->pipe;
2275 int plane = intel_crtc->plane; 2343 int plane = intel_crtc->plane;
2276 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 2344 u32 reg, temp;
2277 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
2278 int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
2279 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
2280 u32 temp;
2281 2345
2282 /* XXX: When our outputs are all unaware of DPMS modes other than off 2346 if (intel_crtc->active)
2283 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 2347 return;
2284 */
2285 switch (mode) {
2286 case DRM_MODE_DPMS_ON:
2287 case DRM_MODE_DPMS_STANDBY:
2288 case DRM_MODE_DPMS_SUSPEND:
2289 /* Enable the DPLL */
2290 temp = I915_READ(dpll_reg);
2291 if ((temp & DPLL_VCO_ENABLE) == 0) {
2292 I915_WRITE(dpll_reg, temp);
2293 I915_READ(dpll_reg);
2294 /* Wait for the clocks to stabilize. */
2295 udelay(150);
2296 I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
2297 I915_READ(dpll_reg);
2298 /* Wait for the clocks to stabilize. */
2299 udelay(150);
2300 I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
2301 I915_READ(dpll_reg);
2302 /* Wait for the clocks to stabilize. */
2303 udelay(150);
2304 }
2305 2348
2306 /* Enable the pipe */ 2349 intel_crtc->active = true;
2307 temp = I915_READ(pipeconf_reg); 2350 intel_update_watermarks(dev);
2308 if ((temp & PIPEACONF_ENABLE) == 0)
2309 I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
2310
2311 /* Enable the plane */
2312 temp = I915_READ(dspcntr_reg);
2313 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
2314 I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
2315 /* Flush the plane changes */
2316 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
2317 }
2318 2351
2319 intel_crtc_load_lut(crtc); 2352 /* Enable the DPLL */
2353 reg = DPLL(pipe);
2354 temp = I915_READ(reg);
2355 if ((temp & DPLL_VCO_ENABLE) == 0) {
2356 I915_WRITE(reg, temp);
2357
2358 /* Wait for the clocks to stabilize. */
2359 POSTING_READ(reg);
2360 udelay(150);
2320 2361
2321 if ((IS_I965G(dev) || plane == 0)) 2362 I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
2322 intel_update_fbc(crtc, &crtc->mode);
2323 2363
2324 /* Give the overlay scaler a chance to enable if it's on this pipe */ 2364 /* Wait for the clocks to stabilize. */
2325 intel_crtc_dpms_overlay(intel_crtc, true); 2365 POSTING_READ(reg);
2326 break; 2366 udelay(150);
2327 case DRM_MODE_DPMS_OFF:
2328 /* Give the overlay scaler a chance to disable if it's on this pipe */
2329 intel_crtc_dpms_overlay(intel_crtc, false);
2330 drm_vblank_off(dev, pipe);
2331
2332 if (dev_priv->cfb_plane == plane &&
2333 dev_priv->display.disable_fbc)
2334 dev_priv->display.disable_fbc(dev);
2335
2336 /* Disable display plane */
2337 temp = I915_READ(dspcntr_reg);
2338 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
2339 I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
2340 /* Flush the plane changes */
2341 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
2342 I915_READ(dspbase_reg);
2343 }
2344 2367
2345 /* Don't disable pipe A or pipe A PLLs if needed */ 2368 I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
2346 if (pipeconf_reg == PIPEACONF && 2369
2347 (dev_priv->quirks & QUIRK_PIPEA_FORCE)) { 2370 /* Wait for the clocks to stabilize. */
2348 /* Wait for vblank for the disable to take effect */ 2371 POSTING_READ(reg);
2372 udelay(150);
2373 }
2374
2375 /* Enable the pipe */
2376 reg = PIPECONF(pipe);
2377 temp = I915_READ(reg);
2378 if ((temp & PIPECONF_ENABLE) == 0)
2379 I915_WRITE(reg, temp | PIPECONF_ENABLE);
2380
2381 /* Enable the plane */
2382 reg = DSPCNTR(plane);
2383 temp = I915_READ(reg);
2384 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
2385 I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
2386 intel_flush_display_plane(dev, plane);
2387 }
2388
2389 intel_crtc_load_lut(crtc);
2390 intel_update_fbc(dev);
2391
2392 /* Give the overlay scaler a chance to enable if it's on this pipe */
2393 intel_crtc_dpms_overlay(intel_crtc, true);
2394 intel_crtc_update_cursor(crtc, true);
2395}
2396
2397static void i9xx_crtc_disable(struct drm_crtc *crtc)
2398{
2399 struct drm_device *dev = crtc->dev;
2400 struct drm_i915_private *dev_priv = dev->dev_private;
2401 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2402 int pipe = intel_crtc->pipe;
2403 int plane = intel_crtc->plane;
2404 u32 reg, temp;
2405
2406 if (!intel_crtc->active)
2407 return;
2408
2409 /* Give the overlay scaler a chance to disable if it's on this pipe */
2410 intel_crtc_wait_for_pending_flips(crtc);
2411 drm_vblank_off(dev, pipe);
2412 intel_crtc_dpms_overlay(intel_crtc, false);
2413 intel_crtc_update_cursor(crtc, false);
2414
2415 if (dev_priv->cfb_plane == plane &&
2416 dev_priv->display.disable_fbc)
2417 dev_priv->display.disable_fbc(dev);
2418
2419 /* Disable display plane */
2420 reg = DSPCNTR(plane);
2421 temp = I915_READ(reg);
2422 if (temp & DISPLAY_PLANE_ENABLE) {
2423 I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
2424 /* Flush the plane changes */
2425 intel_flush_display_plane(dev, plane);
2426
2427 /* Wait for vblank for the disable to take effect */
2428 if (IS_GEN2(dev))
2349 intel_wait_for_vblank(dev, pipe); 2429 intel_wait_for_vblank(dev, pipe);
2350 goto skip_pipe_off; 2430 }
2351 }
2352 2431
2353 /* Next, disable display pipes */ 2432 /* Don't disable pipe A or pipe A PLLs if needed */
2354 temp = I915_READ(pipeconf_reg); 2433 if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
2355 if ((temp & PIPEACONF_ENABLE) != 0) { 2434 goto done;
2356 I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); 2435
2357 I915_READ(pipeconf_reg); 2436 /* Next, disable display pipes */
2358 } 2437 reg = PIPECONF(pipe);
2438 temp = I915_READ(reg);
2439 if (temp & PIPECONF_ENABLE) {
2440 I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
2359 2441
2360 /* Wait for the pipe to turn off */ 2442 /* Wait for the pipe to turn off */
2443 POSTING_READ(reg);
2361 intel_wait_for_pipe_off(dev, pipe); 2444 intel_wait_for_pipe_off(dev, pipe);
2445 }
2446
2447 reg = DPLL(pipe);
2448 temp = I915_READ(reg);
2449 if (temp & DPLL_VCO_ENABLE) {
2450 I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
2362 2451
2363 temp = I915_READ(dpll_reg);
2364 if ((temp & DPLL_VCO_ENABLE) != 0) {
2365 I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
2366 I915_READ(dpll_reg);
2367 }
2368 skip_pipe_off:
2369 /* Wait for the clocks to turn off. */ 2452 /* Wait for the clocks to turn off. */
2453 POSTING_READ(reg);
2370 udelay(150); 2454 udelay(150);
2455 }
2456
2457done:
2458 intel_crtc->active = false;
2459 intel_update_fbc(dev);
2460 intel_update_watermarks(dev);
2461 intel_clear_scanline_wait(dev);
2462}
2463
2464static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2465{
2466 /* XXX: When our outputs are all unaware of DPMS modes other than off
2467 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
2468 */
2469 switch (mode) {
2470 case DRM_MODE_DPMS_ON:
2471 case DRM_MODE_DPMS_STANDBY:
2472 case DRM_MODE_DPMS_SUSPEND:
2473 i9xx_crtc_enable(crtc);
2474 break;
2475 case DRM_MODE_DPMS_OFF:
2476 i9xx_crtc_disable(crtc);
2371 break; 2477 break;
2372 } 2478 }
2373} 2479}
@@ -2388,26 +2494,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
2388 return; 2494 return;
2389 2495
2390 intel_crtc->dpms_mode = mode; 2496 intel_crtc->dpms_mode = mode;
2391 intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
2392
2393 /* When switching on the display, ensure that SR is disabled
2394 * with multiple pipes prior to enabling to new pipe.
2395 *
2396 * When switching off the display, make sure the cursor is
2397 * properly hidden prior to disabling the pipe.
2398 */
2399 if (mode == DRM_MODE_DPMS_ON)
2400 intel_update_watermarks(dev);
2401 else
2402 intel_crtc_update_cursor(crtc);
2403 2497
2404 dev_priv->display.dpms(crtc, mode); 2498 dev_priv->display.dpms(crtc, mode);
2405 2499
2406 if (mode == DRM_MODE_DPMS_ON)
2407 intel_crtc_update_cursor(crtc);
2408 else
2409 intel_update_watermarks(dev);
2410
2411 if (!dev->primary->master) 2500 if (!dev->primary->master)
2412 return; 2501 return;
2413 2502
@@ -2432,16 +2521,46 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
2432 } 2521 }
2433} 2522}
2434 2523
2435static void intel_crtc_prepare (struct drm_crtc *crtc) 2524static void intel_crtc_disable(struct drm_crtc *crtc)
2436{ 2525{
2437 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 2526 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
2527 struct drm_device *dev = crtc->dev;
2528
2438 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 2529 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
2530
2531 if (crtc->fb) {
2532 mutex_lock(&dev->struct_mutex);
2533 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
2534 mutex_unlock(&dev->struct_mutex);
2535 }
2439} 2536}
2440 2537
2441static void intel_crtc_commit (struct drm_crtc *crtc) 2538/* Prepare for a mode set.
2539 *
2540 * Note we could be a lot smarter here. We need to figure out which outputs
2541 * will be enabled, which disabled (in short, how the config will changes)
2542 * and perform the minimum necessary steps to accomplish that, e.g. updating
2543 * watermarks, FBC configuration, making sure PLLs are programmed correctly,
2544 * panel fitting is in the proper state, etc.
2545 */
2546static void i9xx_crtc_prepare(struct drm_crtc *crtc)
2442{ 2547{
2443 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 2548 i9xx_crtc_disable(crtc);
2444 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); 2549}
2550
2551static void i9xx_crtc_commit(struct drm_crtc *crtc)
2552{
2553 i9xx_crtc_enable(crtc);
2554}
2555
2556static void ironlake_crtc_prepare(struct drm_crtc *crtc)
2557{
2558 ironlake_crtc_disable(crtc);
2559}
2560
2561static void ironlake_crtc_commit(struct drm_crtc *crtc)
2562{
2563 ironlake_crtc_enable(crtc);
2445} 2564}
2446 2565
2447void intel_encoder_prepare (struct drm_encoder *encoder) 2566void intel_encoder_prepare (struct drm_encoder *encoder)
@@ -2460,13 +2579,7 @@ void intel_encoder_commit (struct drm_encoder *encoder)
2460 2579
2461void intel_encoder_destroy(struct drm_encoder *encoder) 2580void intel_encoder_destroy(struct drm_encoder *encoder)
2462{ 2581{
2463 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 2582 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2464
2465 if (intel_encoder->ddc_bus)
2466 intel_i2c_destroy(intel_encoder->ddc_bus);
2467
2468 if (intel_encoder->i2c_bus)
2469 intel_i2c_destroy(intel_encoder->i2c_bus);
2470 2583
2471 drm_encoder_cleanup(encoder); 2584 drm_encoder_cleanup(encoder);
2472 kfree(intel_encoder); 2585 kfree(intel_encoder);
@@ -2557,33 +2670,6 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
2557 return 133000; 2670 return 133000;
2558} 2671}
2559 2672
2560/**
2561 * Return the pipe currently connected to the panel fitter,
2562 * or -1 if the panel fitter is not present or not in use
2563 */
2564int intel_panel_fitter_pipe (struct drm_device *dev)
2565{
2566 struct drm_i915_private *dev_priv = dev->dev_private;
2567 u32 pfit_control;
2568
2569 /* i830 doesn't have a panel fitter */
2570 if (IS_I830(dev))
2571 return -1;
2572
2573 pfit_control = I915_READ(PFIT_CONTROL);
2574
2575 /* See if the panel fitter is in use */
2576 if ((pfit_control & PFIT_ENABLE) == 0)
2577 return -1;
2578
2579 /* 965 can place panel fitter on either pipe */
2580 if (IS_I965G(dev))
2581 return (pfit_control >> 29) & 0x3;
2582
2583 /* older chips can only use pipe 1 */
2584 return 1;
2585}
2586
2587struct fdi_m_n { 2673struct fdi_m_n {
2588 u32 tu; 2674 u32 tu;
2589 u32 gmch_m; 2675 u32 gmch_m;
@@ -2902,7 +2988,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
2902 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; 2988 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
2903 2989
2904 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 2990 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2905 plane ? "B" : "A", size); 2991 plane ? "B" : "A", size);
2906 2992
2907 return size; 2993 return size;
2908} 2994}
@@ -2919,7 +3005,7 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
2919 size >>= 1; /* Convert to cachelines */ 3005 size >>= 1; /* Convert to cachelines */
2920 3006
2921 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 3007 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2922 plane ? "B" : "A", size); 3008 plane ? "B" : "A", size);
2923 3009
2924 return size; 3010 return size;
2925} 3011}
@@ -2934,8 +3020,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
2934 size >>= 2; /* Convert to cachelines */ 3020 size >>= 2; /* Convert to cachelines */
2935 3021
2936 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 3022 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2937 plane ? "B" : "A", 3023 plane ? "B" : "A",
2938 size); 3024 size);
2939 3025
2940 return size; 3026 return size;
2941} 3027}
@@ -2950,14 +3036,14 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
2950 size >>= 1; /* Convert to cachelines */ 3036 size >>= 1; /* Convert to cachelines */
2951 3037
2952 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 3038 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2953 plane ? "B" : "A", size); 3039 plane ? "B" : "A", size);
2954 3040
2955 return size; 3041 return size;
2956} 3042}
2957 3043
2958static void pineview_update_wm(struct drm_device *dev, int planea_clock, 3044static void pineview_update_wm(struct drm_device *dev, int planea_clock,
2959 int planeb_clock, int sr_hdisplay, int unused, 3045 int planeb_clock, int sr_hdisplay, int unused,
2960 int pixel_size) 3046 int pixel_size)
2961{ 3047{
2962 struct drm_i915_private *dev_priv = dev->dev_private; 3048 struct drm_i915_private *dev_priv = dev->dev_private;
2963 const struct cxsr_latency *latency; 3049 const struct cxsr_latency *latency;
@@ -3069,13 +3155,13 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
3069 3155
3070 /* Use ns/us then divide to preserve precision */ 3156 /* Use ns/us then divide to preserve precision */
3071 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 3157 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3072 pixel_size * sr_hdisplay; 3158 pixel_size * sr_hdisplay;
3073 sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); 3159 sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
3074 3160
3075 entries_required = (((sr_latency_ns / line_time_us) + 3161 entries_required = (((sr_latency_ns / line_time_us) +
3076 1000) / 1000) * pixel_size * 64; 3162 1000) / 1000) * pixel_size * 64;
3077 entries_required = DIV_ROUND_UP(entries_required, 3163 entries_required = DIV_ROUND_UP(entries_required,
3078 g4x_cursor_wm_info.cacheline_size); 3164 g4x_cursor_wm_info.cacheline_size);
3079 cursor_sr = entries_required + g4x_cursor_wm_info.guard_size; 3165 cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
3080 3166
3081 if (cursor_sr > g4x_cursor_wm_info.max_wm) 3167 if (cursor_sr > g4x_cursor_wm_info.max_wm)
@@ -3087,7 +3173,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
3087 } else { 3173 } else {
3088 /* Turn off self refresh if both pipes are enabled */ 3174 /* Turn off self refresh if both pipes are enabled */
3089 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 3175 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
3090 & ~FW_BLC_SELF_EN); 3176 & ~FW_BLC_SELF_EN);
3091 } 3177 }
3092 3178
3093 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", 3179 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -3125,7 +3211,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
3125 3211
3126 /* Use ns/us then divide to preserve precision */ 3212 /* Use ns/us then divide to preserve precision */
3127 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 3213 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3128 pixel_size * sr_hdisplay; 3214 pixel_size * sr_hdisplay;
3129 sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE); 3215 sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
3130 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 3216 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
3131 srwm = I965_FIFO_SIZE - sr_entries; 3217 srwm = I965_FIFO_SIZE - sr_entries;
@@ -3134,11 +3220,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
3134 srwm &= 0x1ff; 3220 srwm &= 0x1ff;
3135 3221
3136 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 3222 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3137 pixel_size * 64; 3223 pixel_size * 64;
3138 sr_entries = DIV_ROUND_UP(sr_entries, 3224 sr_entries = DIV_ROUND_UP(sr_entries,
3139 i965_cursor_wm_info.cacheline_size); 3225 i965_cursor_wm_info.cacheline_size);
3140 cursor_sr = i965_cursor_wm_info.fifo_size - 3226 cursor_sr = i965_cursor_wm_info.fifo_size -
3141 (sr_entries + i965_cursor_wm_info.guard_size); 3227 (sr_entries + i965_cursor_wm_info.guard_size);
3142 3228
3143 if (cursor_sr > i965_cursor_wm_info.max_wm) 3229 if (cursor_sr > i965_cursor_wm_info.max_wm)
3144 cursor_sr = i965_cursor_wm_info.max_wm; 3230 cursor_sr = i965_cursor_wm_info.max_wm;
@@ -3146,11 +3232,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
3146 DRM_DEBUG_KMS("self-refresh watermark: display plane %d " 3232 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
3147 "cursor %d\n", srwm, cursor_sr); 3233 "cursor %d\n", srwm, cursor_sr);
3148 3234
3149 if (IS_I965GM(dev)) 3235 if (IS_CRESTLINE(dev))
3150 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 3236 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
3151 } else { 3237 } else {
3152 /* Turn off self refresh if both pipes are enabled */ 3238 /* Turn off self refresh if both pipes are enabled */
3153 if (IS_I965GM(dev)) 3239 if (IS_CRESTLINE(dev))
3154 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 3240 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
3155 & ~FW_BLC_SELF_EN); 3241 & ~FW_BLC_SELF_EN);
3156 } 3242 }
@@ -3180,9 +3266,9 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
3180 int sr_clock, sr_entries = 0; 3266 int sr_clock, sr_entries = 0;
3181 3267
3182 /* Create copies of the base settings for each pipe */ 3268 /* Create copies of the base settings for each pipe */
3183 if (IS_I965GM(dev) || IS_I945GM(dev)) 3269 if (IS_CRESTLINE(dev) || IS_I945GM(dev))
3184 planea_params = planeb_params = i945_wm_info; 3270 planea_params = planeb_params = i945_wm_info;
3185 else if (IS_I9XX(dev)) 3271 else if (!IS_GEN2(dev))
3186 planea_params = planeb_params = i915_wm_info; 3272 planea_params = planeb_params = i915_wm_info;
3187 else 3273 else
3188 planea_params = planeb_params = i855_wm_info; 3274 planea_params = planeb_params = i855_wm_info;
@@ -3217,7 +3303,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
3217 3303
3218 /* Use ns/us then divide to preserve precision */ 3304 /* Use ns/us then divide to preserve precision */
3219 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 3305 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3220 pixel_size * sr_hdisplay; 3306 pixel_size * sr_hdisplay;
3221 sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); 3307 sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
3222 DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); 3308 DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
3223 srwm = total_size - sr_entries; 3309 srwm = total_size - sr_entries;
@@ -3242,7 +3328,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
3242 } 3328 }
3243 3329
3244 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 3330 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
3245 planea_wm, planeb_wm, cwm, srwm); 3331 planea_wm, planeb_wm, cwm, srwm);
3246 3332
3247 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); 3333 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
3248 fwater_hi = (cwm & 0x1f); 3334 fwater_hi = (cwm & 0x1f);
@@ -3276,146 +3362,130 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
3276#define ILK_LP0_PLANE_LATENCY 700 3362#define ILK_LP0_PLANE_LATENCY 700
3277#define ILK_LP0_CURSOR_LATENCY 1300 3363#define ILK_LP0_CURSOR_LATENCY 1300
3278 3364
3279static void ironlake_update_wm(struct drm_device *dev, int planea_clock, 3365static bool ironlake_compute_wm0(struct drm_device *dev,
3280 int planeb_clock, int sr_hdisplay, int sr_htotal, 3366 int pipe,
3281 int pixel_size) 3367 int *plane_wm,
3368 int *cursor_wm)
3282{ 3369{
3283 struct drm_i915_private *dev_priv = dev->dev_private;
3284 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
3285 int sr_wm, cursor_wm;
3286 unsigned long line_time_us;
3287 int sr_clock, entries_required;
3288 u32 reg_value;
3289 int line_count;
3290 int planea_htotal = 0, planeb_htotal = 0;
3291 struct drm_crtc *crtc; 3370 struct drm_crtc *crtc;
3371 int htotal, hdisplay, clock, pixel_size = 0;
3372 int line_time_us, line_count, entries;
3292 3373
3293 /* Need htotal for all active display plane */ 3374 crtc = intel_get_crtc_for_pipe(dev, pipe);
3294 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3375 if (crtc->fb == NULL || !crtc->enabled)
3295 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3376 return false;
3296 if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
3297 if (intel_crtc->plane == 0)
3298 planea_htotal = crtc->mode.htotal;
3299 else
3300 planeb_htotal = crtc->mode.htotal;
3301 }
3302 }
3303
3304 /* Calculate and update the watermark for plane A */
3305 if (planea_clock) {
3306 entries_required = ((planea_clock / 1000) * pixel_size *
3307 ILK_LP0_PLANE_LATENCY) / 1000;
3308 entries_required = DIV_ROUND_UP(entries_required,
3309 ironlake_display_wm_info.cacheline_size);
3310 planea_wm = entries_required +
3311 ironlake_display_wm_info.guard_size;
3312
3313 if (planea_wm > (int)ironlake_display_wm_info.max_wm)
3314 planea_wm = ironlake_display_wm_info.max_wm;
3315
3316 /* Use the large buffer method to calculate cursor watermark */
3317 line_time_us = (planea_htotal * 1000) / planea_clock;
3318
3319 /* Use ns/us then divide to preserve precision */
3320 line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
3321
3322 /* calculate the cursor watermark for cursor A */
3323 entries_required = line_count * 64 * pixel_size;
3324 entries_required = DIV_ROUND_UP(entries_required,
3325 ironlake_cursor_wm_info.cacheline_size);
3326 cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size;
3327 if (cursora_wm > ironlake_cursor_wm_info.max_wm)
3328 cursora_wm = ironlake_cursor_wm_info.max_wm;
3329
3330 reg_value = I915_READ(WM0_PIPEA_ILK);
3331 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
3332 reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
3333 (cursora_wm & WM0_PIPE_CURSOR_MASK);
3334 I915_WRITE(WM0_PIPEA_ILK, reg_value);
3335 DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
3336 "cursor: %d\n", planea_wm, cursora_wm);
3337 }
3338 /* Calculate and update the watermark for plane B */
3339 if (planeb_clock) {
3340 entries_required = ((planeb_clock / 1000) * pixel_size *
3341 ILK_LP0_PLANE_LATENCY) / 1000;
3342 entries_required = DIV_ROUND_UP(entries_required,
3343 ironlake_display_wm_info.cacheline_size);
3344 planeb_wm = entries_required +
3345 ironlake_display_wm_info.guard_size;
3346
3347 if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
3348 planeb_wm = ironlake_display_wm_info.max_wm;
3349 3377
3350 /* Use the large buffer method to calculate cursor watermark */ 3378 htotal = crtc->mode.htotal;
3351 line_time_us = (planeb_htotal * 1000) / planeb_clock; 3379 hdisplay = crtc->mode.hdisplay;
3380 clock = crtc->mode.clock;
3381 pixel_size = crtc->fb->bits_per_pixel / 8;
3382
3383 /* Use the small buffer method to calculate plane watermark */
3384 entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000;
3385 entries = DIV_ROUND_UP(entries,
3386 ironlake_display_wm_info.cacheline_size);
3387 *plane_wm = entries + ironlake_display_wm_info.guard_size;
3388 if (*plane_wm > (int)ironlake_display_wm_info.max_wm)
3389 *plane_wm = ironlake_display_wm_info.max_wm;
3390
3391 /* Use the large buffer method to calculate cursor watermark */
3392 line_time_us = ((htotal * 1000) / clock);
3393 line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
3394 entries = line_count * 64 * pixel_size;
3395 entries = DIV_ROUND_UP(entries,
3396 ironlake_cursor_wm_info.cacheline_size);
3397 *cursor_wm = entries + ironlake_cursor_wm_info.guard_size;
3398 if (*cursor_wm > ironlake_cursor_wm_info.max_wm)
3399 *cursor_wm = ironlake_cursor_wm_info.max_wm;
3352 3400
3353 /* Use ns/us then divide to preserve precision */ 3401 return true;
3354 line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; 3402}
3355 3403
3356 /* calculate the cursor watermark for cursor B */ 3404static void ironlake_update_wm(struct drm_device *dev,
3357 entries_required = line_count * 64 * pixel_size; 3405 int planea_clock, int planeb_clock,
3358 entries_required = DIV_ROUND_UP(entries_required, 3406 int sr_hdisplay, int sr_htotal,
3359 ironlake_cursor_wm_info.cacheline_size); 3407 int pixel_size)
3360 cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size; 3408{
3361 if (cursorb_wm > ironlake_cursor_wm_info.max_wm) 3409 struct drm_i915_private *dev_priv = dev->dev_private;
3362 cursorb_wm = ironlake_cursor_wm_info.max_wm; 3410 int plane_wm, cursor_wm, enabled;
3411 int tmp;
3412
3413 enabled = 0;
3414 if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) {
3415 I915_WRITE(WM0_PIPEA_ILK,
3416 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
3417 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
3418 " plane %d, " "cursor: %d\n",
3419 plane_wm, cursor_wm);
3420 enabled++;
3421 }
3363 3422
3364 reg_value = I915_READ(WM0_PIPEB_ILK); 3423 if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) {
3365 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); 3424 I915_WRITE(WM0_PIPEB_ILK,
3366 reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | 3425 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
3367 (cursorb_wm & WM0_PIPE_CURSOR_MASK); 3426 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
3368 I915_WRITE(WM0_PIPEB_ILK, reg_value); 3427 " plane %d, cursor: %d\n",
3369 DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, " 3428 plane_wm, cursor_wm);
3370 "cursor: %d\n", planeb_wm, cursorb_wm); 3429 enabled++;
3371 } 3430 }
3372 3431
3373 /* 3432 /*
3374 * Calculate and update the self-refresh watermark only when one 3433 * Calculate and update the self-refresh watermark only when one
3375 * display plane is used. 3434 * display plane is used.
3376 */ 3435 */
3377 if (!planea_clock || !planeb_clock) { 3436 tmp = 0;
3378 3437 if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) {
3438 unsigned long line_time_us;
3439 int small, large, plane_fbc;
3440 int sr_clock, entries;
3441 int line_count, line_size;
3379 /* Read the self-refresh latency. The unit is 0.5us */ 3442 /* Read the self-refresh latency. The unit is 0.5us */
3380 int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; 3443 int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
3381 3444
3382 sr_clock = planea_clock ? planea_clock : planeb_clock; 3445 sr_clock = planea_clock ? planea_clock : planeb_clock;
3383 line_time_us = ((sr_htotal * 1000) / sr_clock); 3446 line_time_us = (sr_htotal * 1000) / sr_clock;
3384 3447
3385 /* Use ns/us then divide to preserve precision */ 3448 /* Use ns/us then divide to preserve precision */
3386 line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) 3449 line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
3387 / 1000; 3450 / 1000;
3451 line_size = sr_hdisplay * pixel_size;
3388 3452
3389 /* calculate the self-refresh watermark for display plane */ 3453 /* Use the minimum of the small and large buffer method for primary */
3390 entries_required = line_count * sr_hdisplay * pixel_size; 3454 small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000;
3391 entries_required = DIV_ROUND_UP(entries_required, 3455 large = line_count * line_size;
3392 ironlake_display_srwm_info.cacheline_size);
3393 sr_wm = entries_required +
3394 ironlake_display_srwm_info.guard_size;
3395 3456
3396 /* calculate the self-refresh watermark for display cursor */ 3457 entries = DIV_ROUND_UP(min(small, large),
3397 entries_required = line_count * pixel_size * 64; 3458 ironlake_display_srwm_info.cacheline_size);
3398 entries_required = DIV_ROUND_UP(entries_required,
3399 ironlake_cursor_srwm_info.cacheline_size);
3400 cursor_wm = entries_required +
3401 ironlake_cursor_srwm_info.guard_size;
3402 3459
3403 /* configure watermark and enable self-refresh */ 3460 plane_fbc = entries * 64;
3404 reg_value = I915_READ(WM1_LP_ILK); 3461 plane_fbc = DIV_ROUND_UP(plane_fbc, line_size);
3405 reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
3406 WM1_LP_CURSOR_MASK);
3407 reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
3408 (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
3409 3462
3410 I915_WRITE(WM1_LP_ILK, reg_value); 3463 plane_wm = entries + ironlake_display_srwm_info.guard_size;
3411 DRM_DEBUG_KMS("self-refresh watermark: display plane %d " 3464 if (plane_wm > (int)ironlake_display_srwm_info.max_wm)
3412 "cursor %d\n", sr_wm, cursor_wm); 3465 plane_wm = ironlake_display_srwm_info.max_wm;
3413 3466
3414 } else { 3467 /* calculate the self-refresh watermark for display cursor */
3415 /* Turn off self refresh if both pipes are enabled */ 3468 entries = line_count * pixel_size * 64;
3416 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); 3469 entries = DIV_ROUND_UP(entries,
3417 } 3470 ironlake_cursor_srwm_info.cacheline_size);
3471
3472 cursor_wm = entries + ironlake_cursor_srwm_info.guard_size;
3473 if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm)
3474 cursor_wm = ironlake_cursor_srwm_info.max_wm;
3475
3476 /* configure watermark and enable self-refresh */
3477 tmp = (WM1_LP_SR_EN |
3478 (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
3479 (plane_fbc << WM1_LP_FBC_SHIFT) |
3480 (plane_wm << WM1_LP_SR_SHIFT) |
3481 cursor_wm);
3482 DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d,"
3483 " cursor %d\n", plane_wm, plane_fbc, cursor_wm);
3484 }
3485 I915_WRITE(WM1_LP_ILK, tmp);
3486 /* XXX setup WM2 and WM3 */
3418} 3487}
3488
3419/** 3489/**
3420 * intel_update_watermarks - update FIFO watermark values based on current modes 3490 * intel_update_watermarks - update FIFO watermark values based on current modes
3421 * 3491 *
@@ -3447,7 +3517,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3447 * 3517 *
3448 * We don't use the sprite, so we can ignore that. And on Crestline we have 3518 * We don't use the sprite, so we can ignore that. And on Crestline we have
3449 * to set the non-SR watermarks to 8. 3519 * to set the non-SR watermarks to 8.
3450 */ 3520 */
3451static void intel_update_watermarks(struct drm_device *dev) 3521static void intel_update_watermarks(struct drm_device *dev)
3452{ 3522{
3453 struct drm_i915_private *dev_priv = dev->dev_private; 3523 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3463,15 +3533,15 @@ static void intel_update_watermarks(struct drm_device *dev)
3463 /* Get the clock config from both planes */ 3533 /* Get the clock config from both planes */
3464 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3534 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3465 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3535 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3466 if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) { 3536 if (intel_crtc->active) {
3467 enabled++; 3537 enabled++;
3468 if (intel_crtc->plane == 0) { 3538 if (intel_crtc->plane == 0) {
3469 DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n", 3539 DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
3470 intel_crtc->pipe, crtc->mode.clock); 3540 intel_crtc->pipe, crtc->mode.clock);
3471 planea_clock = crtc->mode.clock; 3541 planea_clock = crtc->mode.clock;
3472 } else { 3542 } else {
3473 DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n", 3543 DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
3474 intel_crtc->pipe, crtc->mode.clock); 3544 intel_crtc->pipe, crtc->mode.clock);
3475 planeb_clock = crtc->mode.clock; 3545 planeb_clock = crtc->mode.clock;
3476 } 3546 }
3477 sr_hdisplay = crtc->mode.hdisplay; 3547 sr_hdisplay = crtc->mode.hdisplay;
@@ -3502,62 +3572,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3502 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3572 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3503 int pipe = intel_crtc->pipe; 3573 int pipe = intel_crtc->pipe;
3504 int plane = intel_crtc->plane; 3574 int plane = intel_crtc->plane;
3505 int fp_reg = (pipe == 0) ? FPA0 : FPB0; 3575 u32 fp_reg, dpll_reg;
3506 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3507 int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
3508 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
3509 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
3510 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
3511 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
3512 int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
3513 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
3514 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
3515 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
3516 int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
3517 int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
3518 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
3519 int refclk, num_connectors = 0; 3576 int refclk, num_connectors = 0;
3520 intel_clock_t clock, reduced_clock; 3577 intel_clock_t clock, reduced_clock;
3521 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; 3578 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
3522 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 3579 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
3523 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 3580 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
3524 struct intel_encoder *has_edp_encoder = NULL; 3581 struct intel_encoder *has_edp_encoder = NULL;
3525 struct drm_mode_config *mode_config = &dev->mode_config; 3582 struct drm_mode_config *mode_config = &dev->mode_config;
3526 struct drm_encoder *encoder; 3583 struct intel_encoder *encoder;
3527 const intel_limit_t *limit; 3584 const intel_limit_t *limit;
3528 int ret; 3585 int ret;
3529 struct fdi_m_n m_n = {0}; 3586 struct fdi_m_n m_n = {0};
3530 int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1; 3587 u32 reg, temp;
3531 int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1;
3532 int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1;
3533 int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1;
3534 int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
3535 int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
3536 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
3537 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
3538 int trans_dpll_sel = (pipe == 0) ? 0 : 1;
3539 int lvds_reg = LVDS;
3540 u32 temp;
3541 int sdvo_pixel_multiply;
3542 int target_clock; 3588 int target_clock;
3543 3589
3544 drm_vblank_pre_modeset(dev, pipe); 3590 drm_vblank_pre_modeset(dev, pipe);
3545 3591
3546 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 3592 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
3547 struct intel_encoder *intel_encoder; 3593 if (encoder->base.crtc != crtc)
3548
3549 if (encoder->crtc != crtc)
3550 continue; 3594 continue;
3551 3595
3552 intel_encoder = enc_to_intel_encoder(encoder); 3596 switch (encoder->type) {
3553 switch (intel_encoder->type) {
3554 case INTEL_OUTPUT_LVDS: 3597 case INTEL_OUTPUT_LVDS:
3555 is_lvds = true; 3598 is_lvds = true;
3556 break; 3599 break;
3557 case INTEL_OUTPUT_SDVO: 3600 case INTEL_OUTPUT_SDVO:
3558 case INTEL_OUTPUT_HDMI: 3601 case INTEL_OUTPUT_HDMI:
3559 is_sdvo = true; 3602 is_sdvo = true;
3560 if (intel_encoder->needs_tv_clock) 3603 if (encoder->needs_tv_clock)
3561 is_tv = true; 3604 is_tv = true;
3562 break; 3605 break;
3563 case INTEL_OUTPUT_DVO: 3606 case INTEL_OUTPUT_DVO:
@@ -3573,7 +3616,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3573 is_dp = true; 3616 is_dp = true;
3574 break; 3617 break;
3575 case INTEL_OUTPUT_EDP: 3618 case INTEL_OUTPUT_EDP:
3576 has_edp_encoder = intel_encoder; 3619 has_edp_encoder = encoder;
3577 break; 3620 break;
3578 } 3621 }
3579 3622
@@ -3583,15 +3626,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3583 if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) { 3626 if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
3584 refclk = dev_priv->lvds_ssc_freq * 1000; 3627 refclk = dev_priv->lvds_ssc_freq * 1000;
3585 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 3628 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
3586 refclk / 1000); 3629 refclk / 1000);
3587 } else if (IS_I9XX(dev)) { 3630 } else if (!IS_GEN2(dev)) {
3588 refclk = 96000; 3631 refclk = 96000;
3589 if (HAS_PCH_SPLIT(dev)) 3632 if (HAS_PCH_SPLIT(dev) &&
3633 (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)))
3590 refclk = 120000; /* 120Mhz refclk */ 3634 refclk = 120000; /* 120Mhz refclk */
3591 } else { 3635 } else {
3592 refclk = 48000; 3636 refclk = 48000;
3593 } 3637 }
3594
3595 3638
3596 /* 3639 /*
3597 * Returns a set of divisors for the desired target clock with the given 3640 * Returns a set of divisors for the desired target clock with the given
@@ -3607,13 +3650,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3607 } 3650 }
3608 3651
3609 /* Ensure that the cursor is valid for the new mode before changing... */ 3652 /* Ensure that the cursor is valid for the new mode before changing... */
3610 intel_crtc_update_cursor(crtc); 3653 intel_crtc_update_cursor(crtc, true);
3611 3654
3612 if (is_lvds && dev_priv->lvds_downclock_avail) { 3655 if (is_lvds && dev_priv->lvds_downclock_avail) {
3613 has_reduced_clock = limit->find_pll(limit, crtc, 3656 has_reduced_clock = limit->find_pll(limit, crtc,
3614 dev_priv->lvds_downclock, 3657 dev_priv->lvds_downclock,
3615 refclk, 3658 refclk,
3616 &reduced_clock); 3659 &reduced_clock);
3617 if (has_reduced_clock && (clock.p != reduced_clock.p)) { 3660 if (has_reduced_clock && (clock.p != reduced_clock.p)) {
3618 /* 3661 /*
3619 * If the different P is found, it means that we can't 3662 * If the different P is found, it means that we can't
@@ -3622,7 +3665,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3622 * feature. 3665 * feature.
3623 */ 3666 */
3624 DRM_DEBUG_KMS("Different P is found for " 3667 DRM_DEBUG_KMS("Different P is found for "
3625 "LVDS clock/downclock\n"); 3668 "LVDS clock/downclock\n");
3626 has_reduced_clock = 0; 3669 has_reduced_clock = 0;
3627 } 3670 }
3628 } 3671 }
@@ -3630,14 +3673,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3630 this mirrors vbios setting. */ 3673 this mirrors vbios setting. */
3631 if (is_sdvo && is_tv) { 3674 if (is_sdvo && is_tv) {
3632 if (adjusted_mode->clock >= 100000 3675 if (adjusted_mode->clock >= 100000
3633 && adjusted_mode->clock < 140500) { 3676 && adjusted_mode->clock < 140500) {
3634 clock.p1 = 2; 3677 clock.p1 = 2;
3635 clock.p2 = 10; 3678 clock.p2 = 10;
3636 clock.n = 3; 3679 clock.n = 3;
3637 clock.m1 = 16; 3680 clock.m1 = 16;
3638 clock.m2 = 8; 3681 clock.m2 = 8;
3639 } else if (adjusted_mode->clock >= 140500 3682 } else if (adjusted_mode->clock >= 140500
3640 && adjusted_mode->clock <= 200000) { 3683 && adjusted_mode->clock <= 200000) {
3641 clock.p1 = 1; 3684 clock.p1 = 1;
3642 clock.p2 = 10; 3685 clock.p2 = 10;
3643 clock.n = 6; 3686 clock.n = 6;
@@ -3649,34 +3692,41 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3649 /* FDI link */ 3692 /* FDI link */
3650 if (HAS_PCH_SPLIT(dev)) { 3693 if (HAS_PCH_SPLIT(dev)) {
3651 int lane = 0, link_bw, bpp; 3694 int lane = 0, link_bw, bpp;
3652 /* eDP doesn't require FDI link, so just set DP M/N 3695 /* CPU eDP doesn't require FDI link, so just set DP M/N
3653 according to current link config */ 3696 according to current link config */
3654 if (has_edp_encoder) { 3697 if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) {
3655 target_clock = mode->clock; 3698 target_clock = mode->clock;
3656 intel_edp_link_config(has_edp_encoder, 3699 intel_edp_link_config(has_edp_encoder,
3657 &lane, &link_bw); 3700 &lane, &link_bw);
3658 } else { 3701 } else {
3659 /* DP over FDI requires target mode clock 3702 /* [e]DP over FDI requires target mode clock
3660 instead of link clock */ 3703 instead of link clock */
3661 if (is_dp) 3704 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
3662 target_clock = mode->clock; 3705 target_clock = mode->clock;
3663 else 3706 else
3664 target_clock = adjusted_mode->clock; 3707 target_clock = adjusted_mode->clock;
3665 link_bw = 270000; 3708
3709 /* FDI is a binary signal running at ~2.7GHz, encoding
3710 * each output octet as 10 bits. The actual frequency
3711 * is stored as a divider into a 100MHz clock, and the
3712 * mode pixel clock is stored in units of 1KHz.
3713 * Hence the bw of each lane in terms of the mode signal
3714 * is:
3715 */
3716 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
3666 } 3717 }
3667 3718
3668 /* determine panel color depth */ 3719 /* determine panel color depth */
3669 temp = I915_READ(pipeconf_reg); 3720 temp = I915_READ(PIPECONF(pipe));
3670 temp &= ~PIPE_BPC_MASK; 3721 temp &= ~PIPE_BPC_MASK;
3671 if (is_lvds) { 3722 if (is_lvds) {
3672 int lvds_reg = I915_READ(PCH_LVDS);
3673 /* the BPC will be 6 if it is 18-bit LVDS panel */ 3723 /* the BPC will be 6 if it is 18-bit LVDS panel */
3674 if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) 3724 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
3675 temp |= PIPE_8BPC; 3725 temp |= PIPE_8BPC;
3676 else 3726 else
3677 temp |= PIPE_6BPC; 3727 temp |= PIPE_6BPC;
3678 } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) { 3728 } else if (has_edp_encoder) {
3679 switch (dev_priv->edp_bpp/3) { 3729 switch (dev_priv->edp.bpp/3) {
3680 case 8: 3730 case 8:
3681 temp |= PIPE_8BPC; 3731 temp |= PIPE_8BPC;
3682 break; 3732 break;
@@ -3692,8 +3742,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3692 } 3742 }
3693 } else 3743 } else
3694 temp |= PIPE_8BPC; 3744 temp |= PIPE_8BPC;
3695 I915_WRITE(pipeconf_reg, temp); 3745 I915_WRITE(PIPECONF(pipe), temp);
3696 I915_READ(pipeconf_reg);
3697 3746
3698 switch (temp & PIPE_BPC_MASK) { 3747 switch (temp & PIPE_BPC_MASK) {
3699 case PIPE_8BPC: 3748 case PIPE_8BPC:
@@ -3738,33 +3787,39 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3738 /* Always enable nonspread source */ 3787 /* Always enable nonspread source */
3739 temp &= ~DREF_NONSPREAD_SOURCE_MASK; 3788 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
3740 temp |= DREF_NONSPREAD_SOURCE_ENABLE; 3789 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
3741 I915_WRITE(PCH_DREF_CONTROL, temp);
3742 POSTING_READ(PCH_DREF_CONTROL);
3743
3744 temp &= ~DREF_SSC_SOURCE_MASK; 3790 temp &= ~DREF_SSC_SOURCE_MASK;
3745 temp |= DREF_SSC_SOURCE_ENABLE; 3791 temp |= DREF_SSC_SOURCE_ENABLE;
3746 I915_WRITE(PCH_DREF_CONTROL, temp); 3792 I915_WRITE(PCH_DREF_CONTROL, temp);
3747 POSTING_READ(PCH_DREF_CONTROL);
3748 3793
3794 POSTING_READ(PCH_DREF_CONTROL);
3749 udelay(200); 3795 udelay(200);
3750 3796
3751 if (has_edp_encoder) { 3797 if (has_edp_encoder) {
3752 if (dev_priv->lvds_use_ssc) { 3798 if (dev_priv->lvds_use_ssc) {
3753 temp |= DREF_SSC1_ENABLE; 3799 temp |= DREF_SSC1_ENABLE;
3754 I915_WRITE(PCH_DREF_CONTROL, temp); 3800 I915_WRITE(PCH_DREF_CONTROL, temp);
3755 POSTING_READ(PCH_DREF_CONTROL);
3756 3801
3757 udelay(200);
3758
3759 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
3760 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
3761 I915_WRITE(PCH_DREF_CONTROL, temp);
3762 POSTING_READ(PCH_DREF_CONTROL); 3802 POSTING_READ(PCH_DREF_CONTROL);
3803 udelay(200);
3804 }
3805 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
3806
3807 /* Enable CPU source on CPU attached eDP */
3808 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
3809 if (dev_priv->lvds_use_ssc)
3810 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
3811 else
3812 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
3763 } else { 3813 } else {
3764 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 3814 /* Enable SSC on PCH eDP if needed */
3765 I915_WRITE(PCH_DREF_CONTROL, temp); 3815 if (dev_priv->lvds_use_ssc) {
3766 POSTING_READ(PCH_DREF_CONTROL); 3816 DRM_ERROR("enabling SSC on PCH\n");
3817 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
3818 }
3767 } 3819 }
3820 I915_WRITE(PCH_DREF_CONTROL, temp);
3821 POSTING_READ(PCH_DREF_CONTROL);
3822 udelay(200);
3768 } 3823 }
3769 } 3824 }
3770 3825
@@ -3780,23 +3835,26 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3780 reduced_clock.m2; 3835 reduced_clock.m2;
3781 } 3836 }
3782 3837
3838 dpll = 0;
3783 if (!HAS_PCH_SPLIT(dev)) 3839 if (!HAS_PCH_SPLIT(dev))
3784 dpll = DPLL_VGA_MODE_DIS; 3840 dpll = DPLL_VGA_MODE_DIS;
3785 3841
3786 if (IS_I9XX(dev)) { 3842 if (!IS_GEN2(dev)) {
3787 if (is_lvds) 3843 if (is_lvds)
3788 dpll |= DPLLB_MODE_LVDS; 3844 dpll |= DPLLB_MODE_LVDS;
3789 else 3845 else
3790 dpll |= DPLLB_MODE_DAC_SERIAL; 3846 dpll |= DPLLB_MODE_DAC_SERIAL;
3791 if (is_sdvo) { 3847 if (is_sdvo) {
3848 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
3849 if (pixel_multiplier > 1) {
3850 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3851 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
3852 else if (HAS_PCH_SPLIT(dev))
3853 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
3854 }
3792 dpll |= DPLL_DVO_HIGH_SPEED; 3855 dpll |= DPLL_DVO_HIGH_SPEED;
3793 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
3794 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3795 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
3796 else if (HAS_PCH_SPLIT(dev))
3797 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
3798 } 3856 }
3799 if (is_dp) 3857 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
3800 dpll |= DPLL_DVO_HIGH_SPEED; 3858 dpll |= DPLL_DVO_HIGH_SPEED;
3801 3859
3802 /* compute bitmask from p1 value */ 3860 /* compute bitmask from p1 value */
@@ -3824,7 +3882,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3824 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 3882 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
3825 break; 3883 break;
3826 } 3884 }
3827 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) 3885 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
3828 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 3886 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
3829 } else { 3887 } else {
3830 if (is_lvds) { 3888 if (is_lvds) {
@@ -3851,7 +3909,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3851 dpll |= PLL_REF_INPUT_DREFCLK; 3909 dpll |= PLL_REF_INPUT_DREFCLK;
3852 3910
3853 /* setup pipeconf */ 3911 /* setup pipeconf */
3854 pipeconf = I915_READ(pipeconf_reg); 3912 pipeconf = I915_READ(PIPECONF(pipe));
3855 3913
3856 /* Set up the display plane register */ 3914 /* Set up the display plane register */
3857 dspcntr = DISPPLANE_GAMMA_ENABLE; 3915 dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -3865,7 +3923,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3865 dspcntr |= DISPPLANE_SEL_PIPE_B; 3923 dspcntr |= DISPPLANE_SEL_PIPE_B;
3866 } 3924 }
3867 3925
3868 if (pipe == 0 && !IS_I965G(dev)) { 3926 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
3869 /* Enable pixel doubling when the dot clock is > 90% of the (display) 3927 /* Enable pixel doubling when the dot clock is > 90% of the (display)
3870 * core speed. 3928 * core speed.
3871 * 3929 *
@@ -3874,51 +3932,47 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3874 */ 3932 */
3875 if (mode->clock > 3933 if (mode->clock >
3876 dev_priv->display.get_display_clock_speed(dev) * 9 / 10) 3934 dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
3877 pipeconf |= PIPEACONF_DOUBLE_WIDE; 3935 pipeconf |= PIPECONF_DOUBLE_WIDE;
3878 else 3936 else
3879 pipeconf &= ~PIPEACONF_DOUBLE_WIDE; 3937 pipeconf &= ~PIPECONF_DOUBLE_WIDE;
3880 } 3938 }
3881 3939
3882 dspcntr |= DISPLAY_PLANE_ENABLE; 3940 dspcntr |= DISPLAY_PLANE_ENABLE;
3883 pipeconf |= PIPEACONF_ENABLE; 3941 pipeconf |= PIPECONF_ENABLE;
3884 dpll |= DPLL_VCO_ENABLE; 3942 dpll |= DPLL_VCO_ENABLE;
3885 3943
3886
3887 /* Disable the panel fitter if it was on our pipe */
3888 if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
3889 I915_WRITE(PFIT_CONTROL, 0);
3890
3891 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 3944 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
3892 drm_mode_debug_printmodeline(mode); 3945 drm_mode_debug_printmodeline(mode);
3893 3946
3894 /* assign to Ironlake registers */ 3947 /* assign to Ironlake registers */
3895 if (HAS_PCH_SPLIT(dev)) { 3948 if (HAS_PCH_SPLIT(dev)) {
3896 fp_reg = pch_fp_reg; 3949 fp_reg = PCH_FP0(pipe);
3897 dpll_reg = pch_dpll_reg; 3950 dpll_reg = PCH_DPLL(pipe);
3951 } else {
3952 fp_reg = FP0(pipe);
3953 dpll_reg = DPLL(pipe);
3898 } 3954 }
3899 3955
3900 if (!has_edp_encoder) { 3956 /* PCH eDP needs FDI, but CPU eDP does not */
3957 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
3901 I915_WRITE(fp_reg, fp); 3958 I915_WRITE(fp_reg, fp);
3902 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); 3959 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
3903 I915_READ(dpll_reg); 3960
3961 POSTING_READ(dpll_reg);
3904 udelay(150); 3962 udelay(150);
3905 } 3963 }
3906 3964
3907 /* enable transcoder DPLL */ 3965 /* enable transcoder DPLL */
3908 if (HAS_PCH_CPT(dev)) { 3966 if (HAS_PCH_CPT(dev)) {
3909 temp = I915_READ(PCH_DPLL_SEL); 3967 temp = I915_READ(PCH_DPLL_SEL);
3910 if (trans_dpll_sel == 0) 3968 if (pipe == 0)
3911 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); 3969 temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
3912 else 3970 else
3913 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); 3971 temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
3914 I915_WRITE(PCH_DPLL_SEL, temp); 3972 I915_WRITE(PCH_DPLL_SEL, temp);
3915 I915_READ(PCH_DPLL_SEL);
3916 udelay(150);
3917 }
3918 3973
3919 if (HAS_PCH_SPLIT(dev)) { 3974 POSTING_READ(PCH_DPLL_SEL);
3920 pipeconf &= ~PIPE_ENABLE_DITHER; 3975 udelay(150);
3921 pipeconf &= ~PIPE_DITHER_TYPE_MASK;
3922 } 3976 }
3923 3977
3924 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 3978 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -3926,58 +3980,60 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3926 * things on. 3980 * things on.
3927 */ 3981 */
3928 if (is_lvds) { 3982 if (is_lvds) {
3929 u32 lvds; 3983 reg = LVDS;
3930
3931 if (HAS_PCH_SPLIT(dev)) 3984 if (HAS_PCH_SPLIT(dev))
3932 lvds_reg = PCH_LVDS; 3985 reg = PCH_LVDS;
3933 3986
3934 lvds = I915_READ(lvds_reg); 3987 temp = I915_READ(reg);
3935 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 3988 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
3936 if (pipe == 1) { 3989 if (pipe == 1) {
3937 if (HAS_PCH_CPT(dev)) 3990 if (HAS_PCH_CPT(dev))
3938 lvds |= PORT_TRANS_B_SEL_CPT; 3991 temp |= PORT_TRANS_B_SEL_CPT;
3939 else 3992 else
3940 lvds |= LVDS_PIPEB_SELECT; 3993 temp |= LVDS_PIPEB_SELECT;
3941 } else { 3994 } else {
3942 if (HAS_PCH_CPT(dev)) 3995 if (HAS_PCH_CPT(dev))
3943 lvds &= ~PORT_TRANS_SEL_MASK; 3996 temp &= ~PORT_TRANS_SEL_MASK;
3944 else 3997 else
3945 lvds &= ~LVDS_PIPEB_SELECT; 3998 temp &= ~LVDS_PIPEB_SELECT;
3946 } 3999 }
3947 /* set the corresponsding LVDS_BORDER bit */ 4000 /* set the corresponsding LVDS_BORDER bit */
3948 lvds |= dev_priv->lvds_border_bits; 4001 temp |= dev_priv->lvds_border_bits;
3949 /* Set the B0-B3 data pairs corresponding to whether we're going to 4002 /* Set the B0-B3 data pairs corresponding to whether we're going to
3950 * set the DPLLs for dual-channel mode or not. 4003 * set the DPLLs for dual-channel mode or not.
3951 */ 4004 */
3952 if (clock.p2 == 7) 4005 if (clock.p2 == 7)
3953 lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; 4006 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
3954 else 4007 else
3955 lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); 4008 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
3956 4009
3957 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 4010 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
3958 * appropriately here, but we need to look more thoroughly into how 4011 * appropriately here, but we need to look more thoroughly into how
3959 * panels behave in the two modes. 4012 * panels behave in the two modes.
3960 */ 4013 */
3961 /* set the dithering flag */ 4014 /* set the dithering flag on non-PCH LVDS as needed */
3962 if (IS_I965G(dev)) { 4015 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
3963 if (dev_priv->lvds_dither) { 4016 if (dev_priv->lvds_dither)
3964 if (HAS_PCH_SPLIT(dev)) { 4017 temp |= LVDS_ENABLE_DITHER;
3965 pipeconf |= PIPE_ENABLE_DITHER; 4018 else
3966 pipeconf |= PIPE_DITHER_TYPE_ST01; 4019 temp &= ~LVDS_ENABLE_DITHER;
3967 } else
3968 lvds |= LVDS_ENABLE_DITHER;
3969 } else {
3970 if (!HAS_PCH_SPLIT(dev)) {
3971 lvds &= ~LVDS_ENABLE_DITHER;
3972 }
3973 }
3974 } 4020 }
3975 I915_WRITE(lvds_reg, lvds); 4021 I915_WRITE(reg, temp);
3976 I915_READ(lvds_reg);
3977 } 4022 }
3978 if (is_dp) 4023
4024 /* set the dithering flag and clear for anything other than a panel. */
4025 if (HAS_PCH_SPLIT(dev)) {
4026 pipeconf &= ~PIPECONF_DITHER_EN;
4027 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
4028 if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) {
4029 pipeconf |= PIPECONF_DITHER_EN;
4030 pipeconf |= PIPECONF_DITHER_TYPE_ST1;
4031 }
4032 }
4033
4034 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
3979 intel_dp_set_m_n(crtc, mode, adjusted_mode); 4035 intel_dp_set_m_n(crtc, mode, adjusted_mode);
3980 else if (HAS_PCH_SPLIT(dev)) { 4036 } else if (HAS_PCH_SPLIT(dev)) {
3981 /* For non-DP output, clear any trans DP clock recovery setting.*/ 4037 /* For non-DP output, clear any trans DP clock recovery setting.*/
3982 if (pipe == 0) { 4038 if (pipe == 0) {
3983 I915_WRITE(TRANSA_DATA_M1, 0); 4039 I915_WRITE(TRANSA_DATA_M1, 0);
@@ -3992,29 +4048,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3992 } 4048 }
3993 } 4049 }
3994 4050
3995 if (!has_edp_encoder) { 4051 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
3996 I915_WRITE(fp_reg, fp); 4052 I915_WRITE(fp_reg, fp);
3997 I915_WRITE(dpll_reg, dpll); 4053 I915_WRITE(dpll_reg, dpll);
3998 I915_READ(dpll_reg); 4054
3999 /* Wait for the clocks to stabilize. */ 4055 /* Wait for the clocks to stabilize. */
4056 POSTING_READ(dpll_reg);
4000 udelay(150); 4057 udelay(150);
4001 4058
4002 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { 4059 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
4060 temp = 0;
4003 if (is_sdvo) { 4061 if (is_sdvo) {
4004 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 4062 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
4005 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | 4063 if (temp > 1)
4006 ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); 4064 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4007 } else 4065 else
4008 I915_WRITE(dpll_md_reg, 0); 4066 temp = 0;
4067 }
4068 I915_WRITE(DPLL_MD(pipe), temp);
4009 } else { 4069 } else {
4010 /* write it again -- the BIOS does, after all */ 4070 /* write it again -- the BIOS does, after all */
4011 I915_WRITE(dpll_reg, dpll); 4071 I915_WRITE(dpll_reg, dpll);
4012 } 4072 }
4013 I915_READ(dpll_reg); 4073
4014 /* Wait for the clocks to stabilize. */ 4074 /* Wait for the clocks to stabilize. */
4075 POSTING_READ(dpll_reg);
4015 udelay(150); 4076 udelay(150);
4016 } 4077 }
4017 4078
4079 intel_crtc->lowfreq_avail = false;
4018 if (is_lvds && has_reduced_clock && i915_powersave) { 4080 if (is_lvds && has_reduced_clock && i915_powersave) {
4019 I915_WRITE(fp_reg + 4, fp2); 4081 I915_WRITE(fp_reg + 4, fp2);
4020 intel_crtc->lowfreq_avail = true; 4082 intel_crtc->lowfreq_avail = true;
@@ -4024,7 +4086,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4024 } 4086 }
4025 } else { 4087 } else {
4026 I915_WRITE(fp_reg + 4, fp); 4088 I915_WRITE(fp_reg + 4, fp);
4027 intel_crtc->lowfreq_avail = false;
4028 if (HAS_PIPE_CXSR(dev)) { 4089 if (HAS_PIPE_CXSR(dev)) {
4029 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 4090 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4030 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 4091 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
@@ -4043,70 +4104,62 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4043 } else 4104 } else
4044 pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ 4105 pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
4045 4106
4046 I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | 4107 I915_WRITE(HTOTAL(pipe),
4108 (adjusted_mode->crtc_hdisplay - 1) |
4047 ((adjusted_mode->crtc_htotal - 1) << 16)); 4109 ((adjusted_mode->crtc_htotal - 1) << 16));
4048 I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | 4110 I915_WRITE(HBLANK(pipe),
4111 (adjusted_mode->crtc_hblank_start - 1) |
4049 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 4112 ((adjusted_mode->crtc_hblank_end - 1) << 16));
4050 I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | 4113 I915_WRITE(HSYNC(pipe),
4114 (adjusted_mode->crtc_hsync_start - 1) |
4051 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 4115 ((adjusted_mode->crtc_hsync_end - 1) << 16));
4052 I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | 4116
4117 I915_WRITE(VTOTAL(pipe),
4118 (adjusted_mode->crtc_vdisplay - 1) |
4053 ((adjusted_mode->crtc_vtotal - 1) << 16)); 4119 ((adjusted_mode->crtc_vtotal - 1) << 16));
4054 I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | 4120 I915_WRITE(VBLANK(pipe),
4121 (adjusted_mode->crtc_vblank_start - 1) |
4055 ((adjusted_mode->crtc_vblank_end - 1) << 16)); 4122 ((adjusted_mode->crtc_vblank_end - 1) << 16));
4056 I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | 4123 I915_WRITE(VSYNC(pipe),
4124 (adjusted_mode->crtc_vsync_start - 1) |
4057 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 4125 ((adjusted_mode->crtc_vsync_end - 1) << 16));
4058 /* pipesrc and dspsize control the size that is scaled from, which should 4126
4059 * always be the user's requested size. 4127 /* pipesrc and dspsize control the size that is scaled from,
4128 * which should always be the user's requested size.
4060 */ 4129 */
4061 if (!HAS_PCH_SPLIT(dev)) { 4130 if (!HAS_PCH_SPLIT(dev)) {
4062 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | 4131 I915_WRITE(DSPSIZE(plane),
4063 (mode->hdisplay - 1)); 4132 ((mode->vdisplay - 1) << 16) |
4064 I915_WRITE(dsppos_reg, 0); 4133 (mode->hdisplay - 1));
4134 I915_WRITE(DSPPOS(plane), 0);
4065 } 4135 }
4066 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 4136 I915_WRITE(PIPESRC(pipe),
4137 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4067 4138
4068 if (HAS_PCH_SPLIT(dev)) { 4139 if (HAS_PCH_SPLIT(dev)) {
4069 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); 4140 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
4070 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); 4141 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
4071 I915_WRITE(link_m1_reg, m_n.link_m); 4142 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
4072 I915_WRITE(link_n1_reg, m_n.link_n); 4143 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
4073 4144
4074 if (has_edp_encoder) { 4145 if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4075 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 4146 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
4076 } else {
4077 /* enable FDI RX PLL too */
4078 temp = I915_READ(fdi_rx_reg);
4079 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
4080 I915_READ(fdi_rx_reg);
4081 udelay(200);
4082
4083 /* enable FDI TX PLL too */
4084 temp = I915_READ(fdi_tx_reg);
4085 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
4086 I915_READ(fdi_tx_reg);
4087
4088 /* enable FDI RX PCDCLK */
4089 temp = I915_READ(fdi_rx_reg);
4090 I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
4091 I915_READ(fdi_rx_reg);
4092 udelay(200);
4093 } 4147 }
4094 } 4148 }
4095 4149
4096 I915_WRITE(pipeconf_reg, pipeconf); 4150 I915_WRITE(PIPECONF(pipe), pipeconf);
4097 I915_READ(pipeconf_reg); 4151 POSTING_READ(PIPECONF(pipe));
4098 4152
4099 intel_wait_for_vblank(dev, pipe); 4153 intel_wait_for_vblank(dev, pipe);
4100 4154
4101 if (IS_IRONLAKE(dev)) { 4155 if (IS_GEN5(dev)) {
4102 /* enable address swizzle for tiling buffer */ 4156 /* enable address swizzle for tiling buffer */
4103 temp = I915_READ(DISP_ARB_CTL); 4157 temp = I915_READ(DISP_ARB_CTL);
4104 I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); 4158 I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
4105 } 4159 }
4106 4160
4107 I915_WRITE(dspcntr_reg, dspcntr); 4161 I915_WRITE(DSPCNTR(plane), dspcntr);
4108 4162
4109 /* Flush the plane changes */
4110 ret = intel_pipe_set_base(crtc, x, y, old_fb); 4163 ret = intel_pipe_set_base(crtc, x, y, old_fb);
4111 4164
4112 intel_update_watermarks(dev); 4165 intel_update_watermarks(dev);
@@ -4199,7 +4252,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
4199} 4252}
4200 4253
4201/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 4254/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
4202static void intel_crtc_update_cursor(struct drm_crtc *crtc) 4255static void intel_crtc_update_cursor(struct drm_crtc *crtc,
4256 bool on)
4203{ 4257{
4204 struct drm_device *dev = crtc->dev; 4258 struct drm_device *dev = crtc->dev;
4205 struct drm_i915_private *dev_priv = dev->dev_private; 4259 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4212,7 +4266,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
4212 4266
4213 pos = 0; 4267 pos = 0;
4214 4268
4215 if (intel_crtc->cursor_on && crtc->fb) { 4269 if (on && crtc->enabled && crtc->fb) {
4216 base = intel_crtc->cursor_addr; 4270 base = intel_crtc->cursor_addr;
4217 if (x > (int) crtc->fb->width) 4271 if (x > (int) crtc->fb->width)
4218 base = 0; 4272 base = 0;
@@ -4324,7 +4378,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4324 addr = obj_priv->phys_obj->handle->busaddr; 4378 addr = obj_priv->phys_obj->handle->busaddr;
4325 } 4379 }
4326 4380
4327 if (!IS_I9XX(dev)) 4381 if (IS_GEN2(dev))
4328 I915_WRITE(CURSIZE, (height << 12) | width); 4382 I915_WRITE(CURSIZE, (height << 12) | width);
4329 4383
4330 finish: 4384 finish:
@@ -4344,7 +4398,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4344 intel_crtc->cursor_width = width; 4398 intel_crtc->cursor_width = width;
4345 intel_crtc->cursor_height = height; 4399 intel_crtc->cursor_height = height;
4346 4400
4347 intel_crtc_update_cursor(crtc); 4401 intel_crtc_update_cursor(crtc, true);
4348 4402
4349 return 0; 4403 return 0;
4350fail_unpin: 4404fail_unpin:
@@ -4363,7 +4417,7 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
4363 intel_crtc->cursor_x = x; 4417 intel_crtc->cursor_x = x;
4364 intel_crtc->cursor_y = y; 4418 intel_crtc->cursor_y = y;
4365 4419
4366 intel_crtc_update_cursor(crtc); 4420 intel_crtc_update_cursor(crtc, true);
4367 4421
4368 return 0; 4422 return 0;
4369} 4423}
@@ -4432,7 +4486,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
4432 struct intel_crtc *intel_crtc; 4486 struct intel_crtc *intel_crtc;
4433 struct drm_crtc *possible_crtc; 4487 struct drm_crtc *possible_crtc;
4434 struct drm_crtc *supported_crtc =NULL; 4488 struct drm_crtc *supported_crtc =NULL;
4435 struct drm_encoder *encoder = &intel_encoder->enc; 4489 struct drm_encoder *encoder = &intel_encoder->base;
4436 struct drm_crtc *crtc = NULL; 4490 struct drm_crtc *crtc = NULL;
4437 struct drm_device *dev = encoder->dev; 4491 struct drm_device *dev = encoder->dev;
4438 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 4492 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -4513,7 +4567,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
4513void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, 4567void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
4514 struct drm_connector *connector, int dpms_mode) 4568 struct drm_connector *connector, int dpms_mode)
4515{ 4569{
4516 struct drm_encoder *encoder = &intel_encoder->enc; 4570 struct drm_encoder *encoder = &intel_encoder->base;
4517 struct drm_device *dev = encoder->dev; 4571 struct drm_device *dev = encoder->dev;
4518 struct drm_crtc *crtc = encoder->crtc; 4572 struct drm_crtc *crtc = encoder->crtc;
4519 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 4573 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -4559,7 +4613,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
4559 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 4613 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
4560 } 4614 }
4561 4615
4562 if (IS_I9XX(dev)) { 4616 if (!IS_GEN2(dev)) {
4563 if (IS_PINEVIEW(dev)) 4617 if (IS_PINEVIEW(dev))
4564 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 4618 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
4565 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 4619 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
@@ -4663,8 +4717,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
4663 struct drm_device *dev = (struct drm_device *)arg; 4717 struct drm_device *dev = (struct drm_device *)arg;
4664 drm_i915_private_t *dev_priv = dev->dev_private; 4718 drm_i915_private_t *dev_priv = dev->dev_private;
4665 4719
4666 DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
4667
4668 dev_priv->busy = false; 4720 dev_priv->busy = false;
4669 4721
4670 queue_work(dev_priv->wq, &dev_priv->idle_work); 4722 queue_work(dev_priv->wq, &dev_priv->idle_work);
@@ -4678,14 +4730,12 @@ static void intel_crtc_idle_timer(unsigned long arg)
4678 struct drm_crtc *crtc = &intel_crtc->base; 4730 struct drm_crtc *crtc = &intel_crtc->base;
4679 drm_i915_private_t *dev_priv = crtc->dev->dev_private; 4731 drm_i915_private_t *dev_priv = crtc->dev->dev_private;
4680 4732
4681 DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
4682
4683 intel_crtc->busy = false; 4733 intel_crtc->busy = false;
4684 4734
4685 queue_work(dev_priv->wq, &dev_priv->idle_work); 4735 queue_work(dev_priv->wq, &dev_priv->idle_work);
4686} 4736}
4687 4737
4688static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) 4738static void intel_increase_pllclock(struct drm_crtc *crtc)
4689{ 4739{
4690 struct drm_device *dev = crtc->dev; 4740 struct drm_device *dev = crtc->dev;
4691 drm_i915_private_t *dev_priv = dev->dev_private; 4741 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4720,9 +4770,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
4720 } 4770 }
4721 4771
4722 /* Schedule downclock */ 4772 /* Schedule downclock */
4723 if (schedule) 4773 mod_timer(&intel_crtc->idle_timer, jiffies +
4724 mod_timer(&intel_crtc->idle_timer, jiffies + 4774 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
4725 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
4726} 4775}
4727 4776
4728static void intel_decrease_pllclock(struct drm_crtc *crtc) 4777static void intel_decrease_pllclock(struct drm_crtc *crtc)
@@ -4858,7 +4907,7 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4858 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); 4907 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
4859 } 4908 }
4860 /* Non-busy -> busy, upclock */ 4909 /* Non-busy -> busy, upclock */
4861 intel_increase_pllclock(crtc, true); 4910 intel_increase_pllclock(crtc);
4862 intel_crtc->busy = true; 4911 intel_crtc->busy = true;
4863 } else { 4912 } else {
4864 /* Busy -> busy, put off timer */ 4913 /* Busy -> busy, put off timer */
@@ -4872,8 +4921,22 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4872static void intel_crtc_destroy(struct drm_crtc *crtc) 4921static void intel_crtc_destroy(struct drm_crtc *crtc)
4873{ 4922{
4874 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4923 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4924 struct drm_device *dev = crtc->dev;
4925 struct intel_unpin_work *work;
4926 unsigned long flags;
4927
4928 spin_lock_irqsave(&dev->event_lock, flags);
4929 work = intel_crtc->unpin_work;
4930 intel_crtc->unpin_work = NULL;
4931 spin_unlock_irqrestore(&dev->event_lock, flags);
4932
4933 if (work) {
4934 cancel_work_sync(&work->work);
4935 kfree(work);
4936 }
4875 4937
4876 drm_crtc_cleanup(crtc); 4938 drm_crtc_cleanup(crtc);
4939
4877 kfree(intel_crtc); 4940 kfree(intel_crtc);
4878} 4941}
4879 4942
@@ -4928,12 +4991,11 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
4928 4991
4929 spin_unlock_irqrestore(&dev->event_lock, flags); 4992 spin_unlock_irqrestore(&dev->event_lock, flags);
4930 4993
4931 obj_priv = to_intel_bo(work->pending_flip_obj); 4994 obj_priv = to_intel_bo(work->old_fb_obj);
4932 4995 atomic_clear_mask(1 << intel_crtc->plane,
4933 /* Initial scanout buffer will have a 0 pending flip count */ 4996 &obj_priv->pending_flip.counter);
4934 if ((atomic_read(&obj_priv->pending_flip) == 0) || 4997 if (atomic_read(&obj_priv->pending_flip) == 0)
4935 atomic_dec_and_test(&obj_priv->pending_flip)) 4998 wake_up(&dev_priv->pending_flip_queue);
4936 DRM_WAKEUP(&dev_priv->pending_flip_queue);
4937 schedule_work(&work->work); 4999 schedule_work(&work->work);
4938 5000
4939 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); 5001 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
@@ -5014,7 +5076,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5014 obj = intel_fb->obj; 5076 obj = intel_fb->obj;
5015 5077
5016 mutex_lock(&dev->struct_mutex); 5078 mutex_lock(&dev->struct_mutex);
5017 ret = intel_pin_and_fence_fb_obj(dev, obj); 5079 ret = intel_pin_and_fence_fb_obj(dev, obj, true);
5018 if (ret) 5080 if (ret)
5019 goto cleanup_work; 5081 goto cleanup_work;
5020 5082
@@ -5023,29 +5085,33 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5023 drm_gem_object_reference(obj); 5085 drm_gem_object_reference(obj);
5024 5086
5025 crtc->fb = fb; 5087 crtc->fb = fb;
5026 ret = i915_gem_object_flush_write_domain(obj);
5027 if (ret)
5028 goto cleanup_objs;
5029 5088
5030 ret = drm_vblank_get(dev, intel_crtc->pipe); 5089 ret = drm_vblank_get(dev, intel_crtc->pipe);
5031 if (ret) 5090 if (ret)
5032 goto cleanup_objs; 5091 goto cleanup_objs;
5033 5092
5034 obj_priv = to_intel_bo(obj); 5093 /* Block clients from rendering to the new back buffer until
5035 atomic_inc(&obj_priv->pending_flip); 5094 * the flip occurs and the object is no longer visible.
5095 */
5096 atomic_add(1 << intel_crtc->plane,
5097 &to_intel_bo(work->old_fb_obj)->pending_flip);
5098
5036 work->pending_flip_obj = obj; 5099 work->pending_flip_obj = obj;
5100 obj_priv = to_intel_bo(obj);
5037 5101
5038 if (IS_GEN3(dev) || IS_GEN2(dev)) { 5102 if (IS_GEN3(dev) || IS_GEN2(dev)) {
5039 u32 flip_mask; 5103 u32 flip_mask;
5040 5104
5105 /* Can't queue multiple flips, so wait for the previous
5106 * one to finish before executing the next.
5107 */
5108 BEGIN_LP_RING(2);
5041 if (intel_crtc->plane) 5109 if (intel_crtc->plane)
5042 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 5110 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5043 else 5111 else
5044 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 5112 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5045
5046 BEGIN_LP_RING(2);
5047 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); 5113 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
5048 OUT_RING(0); 5114 OUT_RING(MI_NOOP);
5049 ADVANCE_LP_RING(); 5115 ADVANCE_LP_RING();
5050 } 5116 }
5051 5117
@@ -5126,15 +5192,14 @@ cleanup_work:
5126 return ret; 5192 return ret;
5127} 5193}
5128 5194
5129static const struct drm_crtc_helper_funcs intel_helper_funcs = { 5195static struct drm_crtc_helper_funcs intel_helper_funcs = {
5130 .dpms = intel_crtc_dpms, 5196 .dpms = intel_crtc_dpms,
5131 .mode_fixup = intel_crtc_mode_fixup, 5197 .mode_fixup = intel_crtc_mode_fixup,
5132 .mode_set = intel_crtc_mode_set, 5198 .mode_set = intel_crtc_mode_set,
5133 .mode_set_base = intel_pipe_set_base, 5199 .mode_set_base = intel_pipe_set_base,
5134 .mode_set_base_atomic = intel_pipe_set_base_atomic, 5200 .mode_set_base_atomic = intel_pipe_set_base_atomic,
5135 .prepare = intel_crtc_prepare,
5136 .commit = intel_crtc_commit,
5137 .load_lut = intel_crtc_load_lut, 5201 .load_lut = intel_crtc_load_lut,
5202 .disable = intel_crtc_disable,
5138}; 5203};
5139 5204
5140static const struct drm_crtc_funcs intel_crtc_funcs = { 5205static const struct drm_crtc_funcs intel_crtc_funcs = {
@@ -5160,8 +5225,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5160 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); 5225 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
5161 5226
5162 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 5227 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
5163 intel_crtc->pipe = pipe;
5164 intel_crtc->plane = pipe;
5165 for (i = 0; i < 256; i++) { 5228 for (i = 0; i < 256; i++) {
5166 intel_crtc->lut_r[i] = i; 5229 intel_crtc->lut_r[i] = i;
5167 intel_crtc->lut_g[i] = i; 5230 intel_crtc->lut_g[i] = i;
@@ -5171,9 +5234,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5171 /* Swap pipes & planes for FBC on pre-965 */ 5234 /* Swap pipes & planes for FBC on pre-965 */
5172 intel_crtc->pipe = pipe; 5235 intel_crtc->pipe = pipe;
5173 intel_crtc->plane = pipe; 5236 intel_crtc->plane = pipe;
5174 if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) { 5237 if (IS_MOBILE(dev) && IS_GEN3(dev)) {
5175 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 5238 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
5176 intel_crtc->plane = ((pipe == 0) ? 1 : 0); 5239 intel_crtc->plane = !pipe;
5177 } 5240 }
5178 5241
5179 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 5242 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
@@ -5183,6 +5246,16 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5183 5246
5184 intel_crtc->cursor_addr = 0; 5247 intel_crtc->cursor_addr = 0;
5185 intel_crtc->dpms_mode = -1; 5248 intel_crtc->dpms_mode = -1;
5249 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5250
5251 if (HAS_PCH_SPLIT(dev)) {
5252 intel_helper_funcs.prepare = ironlake_crtc_prepare;
5253 intel_helper_funcs.commit = ironlake_crtc_commit;
5254 } else {
5255 intel_helper_funcs.prepare = i9xx_crtc_prepare;
5256 intel_helper_funcs.commit = i9xx_crtc_commit;
5257 }
5258
5186 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 5259 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
5187 5260
5188 intel_crtc->busy = false; 5261 intel_crtc->busy = false;
@@ -5218,38 +5291,25 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
5218 return 0; 5291 return 0;
5219} 5292}
5220 5293
5221struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
5222{
5223 struct drm_crtc *crtc = NULL;
5224
5225 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5226 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5227 if (intel_crtc->pipe == pipe)
5228 break;
5229 }
5230 return crtc;
5231}
5232
5233static int intel_encoder_clones(struct drm_device *dev, int type_mask) 5294static int intel_encoder_clones(struct drm_device *dev, int type_mask)
5234{ 5295{
5296 struct intel_encoder *encoder;
5235 int index_mask = 0; 5297 int index_mask = 0;
5236 struct drm_encoder *encoder;
5237 int entry = 0; 5298 int entry = 0;
5238 5299
5239 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 5300 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
5240 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 5301 if (type_mask & encoder->clone_mask)
5241 if (type_mask & intel_encoder->clone_mask)
5242 index_mask |= (1 << entry); 5302 index_mask |= (1 << entry);
5243 entry++; 5303 entry++;
5244 } 5304 }
5305
5245 return index_mask; 5306 return index_mask;
5246} 5307}
5247 5308
5248
5249static void intel_setup_outputs(struct drm_device *dev) 5309static void intel_setup_outputs(struct drm_device *dev)
5250{ 5310{
5251 struct drm_i915_private *dev_priv = dev->dev_private; 5311 struct drm_i915_private *dev_priv = dev->dev_private;
5252 struct drm_encoder *encoder; 5312 struct intel_encoder *encoder;
5253 bool dpd_is_edp = false; 5313 bool dpd_is_edp = false;
5254 5314
5255 if (IS_MOBILE(dev) && !IS_I830(dev)) 5315 if (IS_MOBILE(dev) && !IS_I830(dev))
@@ -5338,12 +5398,10 @@ static void intel_setup_outputs(struct drm_device *dev)
5338 if (SUPPORTS_TV(dev)) 5398 if (SUPPORTS_TV(dev))
5339 intel_tv_init(dev); 5399 intel_tv_init(dev);
5340 5400
5341 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 5401 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
5342 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 5402 encoder->base.possible_crtcs = encoder->crtc_mask;
5343 5403 encoder->base.possible_clones =
5344 encoder->possible_crtcs = intel_encoder->crtc_mask; 5404 intel_encoder_clones(dev, encoder->clone_mask);
5345 encoder->possible_clones = intel_encoder_clones(dev,
5346 intel_encoder->clone_mask);
5347 } 5405 }
5348} 5406}
5349 5407
@@ -5377,8 +5435,25 @@ int intel_framebuffer_init(struct drm_device *dev,
5377 struct drm_mode_fb_cmd *mode_cmd, 5435 struct drm_mode_fb_cmd *mode_cmd,
5378 struct drm_gem_object *obj) 5436 struct drm_gem_object *obj)
5379{ 5437{
5438 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
5380 int ret; 5439 int ret;
5381 5440
5441 if (obj_priv->tiling_mode == I915_TILING_Y)
5442 return -EINVAL;
5443
5444 if (mode_cmd->pitch & 63)
5445 return -EINVAL;
5446
5447 switch (mode_cmd->bpp) {
5448 case 8:
5449 case 16:
5450 case 24:
5451 case 32:
5452 break;
5453 default:
5454 return -EINVAL;
5455 }
5456
5382 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 5457 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
5383 if (ret) { 5458 if (ret) {
5384 DRM_ERROR("framebuffer init failed %d\n", ret); 5459 DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -5487,6 +5562,10 @@ void ironlake_enable_drps(struct drm_device *dev)
5487 u32 rgvmodectl = I915_READ(MEMMODECTL); 5562 u32 rgvmodectl = I915_READ(MEMMODECTL);
5488 u8 fmax, fmin, fstart, vstart; 5563 u8 fmax, fmin, fstart, vstart;
5489 5564
5565 /* Enable temp reporting */
5566 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
5567 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
5568
5490 /* 100ms RC evaluation intervals */ 5569 /* 100ms RC evaluation intervals */
5491 I915_WRITE(RCUPEI, 100000); 5570 I915_WRITE(RCUPEI, 100000);
5492 I915_WRITE(RCDNEI, 100000); 5571 I915_WRITE(RCDNEI, 100000);
@@ -5529,7 +5608,7 @@ void ironlake_enable_drps(struct drm_device *dev)
5529 rgvmodectl |= MEMMODE_SWMODE_EN; 5608 rgvmodectl |= MEMMODE_SWMODE_EN;
5530 I915_WRITE(MEMMODECTL, rgvmodectl); 5609 I915_WRITE(MEMMODECTL, rgvmodectl);
5531 5610
5532 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0)) 5611 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
5533 DRM_ERROR("stuck trying to change perf mode\n"); 5612 DRM_ERROR("stuck trying to change perf mode\n");
5534 msleep(1); 5613 msleep(1);
5535 5614
@@ -5660,7 +5739,7 @@ void intel_init_clock_gating(struct drm_device *dev)
5660 if (HAS_PCH_SPLIT(dev)) { 5739 if (HAS_PCH_SPLIT(dev)) {
5661 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; 5740 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
5662 5741
5663 if (IS_IRONLAKE(dev)) { 5742 if (IS_GEN5(dev)) {
5664 /* Required for FBC */ 5743 /* Required for FBC */
5665 dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; 5744 dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE;
5666 /* Required for CxSR */ 5745 /* Required for CxSR */
@@ -5674,13 +5753,20 @@ void intel_init_clock_gating(struct drm_device *dev)
5674 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); 5753 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
5675 5754
5676 /* 5755 /*
5756 * On Ibex Peak and Cougar Point, we need to disable clock
5757 * gating for the panel power sequencer or it will fail to
5758 * start up when no ports are active.
5759 */
5760 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
5761
5762 /*
5677 * According to the spec the following bits should be set in 5763 * According to the spec the following bits should be set in
5678 * order to enable memory self-refresh 5764 * order to enable memory self-refresh
5679 * The bit 22/21 of 0x42004 5765 * The bit 22/21 of 0x42004
5680 * The bit 5 of 0x42020 5766 * The bit 5 of 0x42020
5681 * The bit 15 of 0x45000 5767 * The bit 15 of 0x45000
5682 */ 5768 */
5683 if (IS_IRONLAKE(dev)) { 5769 if (IS_GEN5(dev)) {
5684 I915_WRITE(ILK_DISPLAY_CHICKEN2, 5770 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5685 (I915_READ(ILK_DISPLAY_CHICKEN2) | 5771 (I915_READ(ILK_DISPLAY_CHICKEN2) |
5686 ILK_DPARB_GATE | ILK_VSDPFD_FULL)); 5772 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
@@ -5728,20 +5814,20 @@ void intel_init_clock_gating(struct drm_device *dev)
5728 if (IS_GM45(dev)) 5814 if (IS_GM45(dev))
5729 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; 5815 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
5730 I915_WRITE(DSPCLK_GATE_D, dspclk_gate); 5816 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
5731 } else if (IS_I965GM(dev)) { 5817 } else if (IS_CRESTLINE(dev)) {
5732 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); 5818 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
5733 I915_WRITE(RENCLK_GATE_D2, 0); 5819 I915_WRITE(RENCLK_GATE_D2, 0);
5734 I915_WRITE(DSPCLK_GATE_D, 0); 5820 I915_WRITE(DSPCLK_GATE_D, 0);
5735 I915_WRITE(RAMCLK_GATE_D, 0); 5821 I915_WRITE(RAMCLK_GATE_D, 0);
5736 I915_WRITE16(DEUC, 0); 5822 I915_WRITE16(DEUC, 0);
5737 } else if (IS_I965G(dev)) { 5823 } else if (IS_BROADWATER(dev)) {
5738 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 5824 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
5739 I965_RCC_CLOCK_GATE_DISABLE | 5825 I965_RCC_CLOCK_GATE_DISABLE |
5740 I965_RCPB_CLOCK_GATE_DISABLE | 5826 I965_RCPB_CLOCK_GATE_DISABLE |
5741 I965_ISC_CLOCK_GATE_DISABLE | 5827 I965_ISC_CLOCK_GATE_DISABLE |
5742 I965_FBC_CLOCK_GATE_DISABLE); 5828 I965_FBC_CLOCK_GATE_DISABLE);
5743 I915_WRITE(RENCLK_GATE_D2, 0); 5829 I915_WRITE(RENCLK_GATE_D2, 0);
5744 } else if (IS_I9XX(dev)) { 5830 } else if (IS_GEN3(dev)) {
5745 u32 dstate = I915_READ(D_STATE); 5831 u32 dstate = I915_READ(D_STATE);
5746 5832
5747 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 5833 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
@@ -5823,7 +5909,7 @@ static void intel_init_display(struct drm_device *dev)
5823 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 5909 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
5824 dev_priv->display.enable_fbc = g4x_enable_fbc; 5910 dev_priv->display.enable_fbc = g4x_enable_fbc;
5825 dev_priv->display.disable_fbc = g4x_disable_fbc; 5911 dev_priv->display.disable_fbc = g4x_disable_fbc;
5826 } else if (IS_I965GM(dev)) { 5912 } else if (IS_CRESTLINE(dev)) {
5827 dev_priv->display.fbc_enabled = i8xx_fbc_enabled; 5913 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
5828 dev_priv->display.enable_fbc = i8xx_enable_fbc; 5914 dev_priv->display.enable_fbc = i8xx_enable_fbc;
5829 dev_priv->display.disable_fbc = i8xx_disable_fbc; 5915 dev_priv->display.disable_fbc = i8xx_disable_fbc;
@@ -5856,7 +5942,7 @@ static void intel_init_display(struct drm_device *dev)
5856 5942
5857 /* For FIFO watermark updates */ 5943 /* For FIFO watermark updates */
5858 if (HAS_PCH_SPLIT(dev)) { 5944 if (HAS_PCH_SPLIT(dev)) {
5859 if (IS_IRONLAKE(dev)) { 5945 if (IS_GEN5(dev)) {
5860 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) 5946 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
5861 dev_priv->display.update_wm = ironlake_update_wm; 5947 dev_priv->display.update_wm = ironlake_update_wm;
5862 else { 5948 else {
@@ -5883,9 +5969,9 @@ static void intel_init_display(struct drm_device *dev)
5883 dev_priv->display.update_wm = pineview_update_wm; 5969 dev_priv->display.update_wm = pineview_update_wm;
5884 } else if (IS_G4X(dev)) 5970 } else if (IS_G4X(dev))
5885 dev_priv->display.update_wm = g4x_update_wm; 5971 dev_priv->display.update_wm = g4x_update_wm;
5886 else if (IS_I965G(dev)) 5972 else if (IS_GEN4(dev))
5887 dev_priv->display.update_wm = i965_update_wm; 5973 dev_priv->display.update_wm = i965_update_wm;
5888 else if (IS_I9XX(dev)) { 5974 else if (IS_GEN3(dev)) {
5889 dev_priv->display.update_wm = i9xx_update_wm; 5975 dev_priv->display.update_wm = i9xx_update_wm;
5890 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 5976 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
5891 } else if (IS_I85X(dev)) { 5977 } else if (IS_I85X(dev)) {
@@ -5999,24 +6085,24 @@ void intel_modeset_init(struct drm_device *dev)
5999 6085
6000 intel_init_display(dev); 6086 intel_init_display(dev);
6001 6087
6002 if (IS_I965G(dev)) { 6088 if (IS_GEN2(dev)) {
6003 dev->mode_config.max_width = 8192; 6089 dev->mode_config.max_width = 2048;
6004 dev->mode_config.max_height = 8192; 6090 dev->mode_config.max_height = 2048;
6005 } else if (IS_I9XX(dev)) { 6091 } else if (IS_GEN3(dev)) {
6006 dev->mode_config.max_width = 4096; 6092 dev->mode_config.max_width = 4096;
6007 dev->mode_config.max_height = 4096; 6093 dev->mode_config.max_height = 4096;
6008 } else { 6094 } else {
6009 dev->mode_config.max_width = 2048; 6095 dev->mode_config.max_width = 8192;
6010 dev->mode_config.max_height = 2048; 6096 dev->mode_config.max_height = 8192;
6011 } 6097 }
6012 6098
6013 /* set memory base */ 6099 /* set memory base */
6014 if (IS_I9XX(dev)) 6100 if (IS_GEN2(dev))
6015 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
6016 else
6017 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); 6101 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
6102 else
6103 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
6018 6104
6019 if (IS_MOBILE(dev) || IS_I9XX(dev)) 6105 if (IS_MOBILE(dev) || !IS_GEN2(dev))
6020 dev_priv->num_pipe = 2; 6106 dev_priv->num_pipe = 2;
6021 else 6107 else
6022 dev_priv->num_pipe = 1; 6108 dev_priv->num_pipe = 1;
@@ -6052,10 +6138,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
6052 struct drm_crtc *crtc; 6138 struct drm_crtc *crtc;
6053 struct intel_crtc *intel_crtc; 6139 struct intel_crtc *intel_crtc;
6054 6140
6141 drm_kms_helper_poll_fini(dev);
6055 mutex_lock(&dev->struct_mutex); 6142 mutex_lock(&dev->struct_mutex);
6056 6143
6057 drm_kms_helper_poll_fini(dev); 6144 intel_unregister_dsm_handler();
6058 intel_fbdev_fini(dev); 6145
6059 6146
6060 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 6147 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6061 /* Skip inactive CRTCs */ 6148 /* Skip inactive CRTCs */
@@ -6063,12 +6150,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
6063 continue; 6150 continue;
6064 6151
6065 intel_crtc = to_intel_crtc(crtc); 6152 intel_crtc = to_intel_crtc(crtc);
6066 intel_increase_pllclock(crtc, false); 6153 intel_increase_pllclock(crtc);
6067 del_timer_sync(&intel_crtc->idle_timer);
6068 } 6154 }
6069 6155
6070 del_timer_sync(&dev_priv->idle_timer);
6071
6072 if (dev_priv->display.disable_fbc) 6156 if (dev_priv->display.disable_fbc)
6073 dev_priv->display.disable_fbc(dev); 6157 dev_priv->display.disable_fbc(dev);
6074 6158
@@ -6097,33 +6181,36 @@ void intel_modeset_cleanup(struct drm_device *dev)
6097 6181
6098 mutex_unlock(&dev->struct_mutex); 6182 mutex_unlock(&dev->struct_mutex);
6099 6183
6184 /* Disable the irq before mode object teardown, for the irq might
6185 * enqueue unpin/hotplug work. */
6186 drm_irq_uninstall(dev);
6187 cancel_work_sync(&dev_priv->hotplug_work);
6188
6189 /* Shut off idle work before the crtcs get freed. */
6190 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6191 intel_crtc = to_intel_crtc(crtc);
6192 del_timer_sync(&intel_crtc->idle_timer);
6193 }
6194 del_timer_sync(&dev_priv->idle_timer);
6195 cancel_work_sync(&dev_priv->idle_work);
6196
6100 drm_mode_config_cleanup(dev); 6197 drm_mode_config_cleanup(dev);
6101} 6198}
6102 6199
6103
6104/* 6200/*
6105 * Return which encoder is currently attached for connector. 6201 * Return which encoder is currently attached for connector.
6106 */ 6202 */
6107struct drm_encoder *intel_attached_encoder (struct drm_connector *connector) 6203struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
6108{ 6204{
6109 struct drm_mode_object *obj; 6205 return &intel_attached_encoder(connector)->base;
6110 struct drm_encoder *encoder; 6206}
6111 int i;
6112
6113 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
6114 if (connector->encoder_ids[i] == 0)
6115 break;
6116
6117 obj = drm_mode_object_find(connector->dev,
6118 connector->encoder_ids[i],
6119 DRM_MODE_OBJECT_ENCODER);
6120 if (!obj)
6121 continue;
6122 6207
6123 encoder = obj_to_encoder(obj); 6208void intel_connector_attach_encoder(struct intel_connector *connector,
6124 return encoder; 6209 struct intel_encoder *encoder)
6125 } 6210{
6126 return NULL; 6211 connector->encoder = encoder;
6212 drm_mode_connector_attach_encoder(&connector->base,
6213 &encoder->base);
6127} 6214}
6128 6215
6129/* 6216/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 9ab8708ac6ba..891f4f1d63b1 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -42,15 +42,13 @@
42 42
43#define DP_LINK_CONFIGURATION_SIZE 9 43#define DP_LINK_CONFIGURATION_SIZE 9
44 44
45#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP)
46#define IS_PCH_eDP(i) ((i)->is_pch_edp)
47
48struct intel_dp { 45struct intel_dp {
49 struct intel_encoder base; 46 struct intel_encoder base;
50 uint32_t output_reg; 47 uint32_t output_reg;
51 uint32_t DP; 48 uint32_t DP;
52 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; 49 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
53 bool has_audio; 50 bool has_audio;
51 int force_audio;
54 int dpms_mode; 52 int dpms_mode;
55 uint8_t link_bw; 53 uint8_t link_bw;
56 uint8_t lane_count; 54 uint8_t lane_count;
@@ -58,14 +56,69 @@ struct intel_dp {
58 struct i2c_adapter adapter; 56 struct i2c_adapter adapter;
59 struct i2c_algo_dp_aux_data algo; 57 struct i2c_algo_dp_aux_data algo;
60 bool is_pch_edp; 58 bool is_pch_edp;
59 uint8_t train_set[4];
60 uint8_t link_status[DP_LINK_STATUS_SIZE];
61
62 struct drm_property *force_audio_property;
61}; 63};
62 64
65/**
66 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
67 * @intel_dp: DP struct
68 *
69 * If a CPU or PCH DP output is attached to an eDP panel, this function
70 * will return true, and false otherwise.
71 */
72static bool is_edp(struct intel_dp *intel_dp)
73{
74 return intel_dp->base.type == INTEL_OUTPUT_EDP;
75}
76
77/**
78 * is_pch_edp - is the port on the PCH and attached to an eDP panel?
79 * @intel_dp: DP struct
80 *
81 * Returns true if the given DP struct corresponds to a PCH DP port attached
82 * to an eDP panel, false otherwise. Helpful for determining whether we
83 * may need FDI resources for a given DP output or not.
84 */
85static bool is_pch_edp(struct intel_dp *intel_dp)
86{
87 return intel_dp->is_pch_edp;
88}
89
63static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) 90static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
64{ 91{
65 return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base); 92 return container_of(encoder, struct intel_dp, base.base);
93}
94
95static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
96{
97 return container_of(intel_attached_encoder(connector),
98 struct intel_dp, base);
99}
100
101/**
102 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
103 * @encoder: DRM encoder
104 *
105 * Return true if @encoder corresponds to a PCH attached eDP panel. Needed
106 * by intel_display.c.
107 */
108bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
109{
110 struct intel_dp *intel_dp;
111
112 if (!encoder)
113 return false;
114
115 intel_dp = enc_to_intel_dp(encoder);
116
117 return is_pch_edp(intel_dp);
66} 118}
67 119
68static void intel_dp_link_train(struct intel_dp *intel_dp); 120static void intel_dp_start_link_train(struct intel_dp *intel_dp);
121static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
69static void intel_dp_link_down(struct intel_dp *intel_dp); 122static void intel_dp_link_down(struct intel_dp *intel_dp);
70 123
71void 124void
@@ -129,8 +182,8 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi
129{ 182{
130 struct drm_i915_private *dev_priv = dev->dev_private; 183 struct drm_i915_private *dev_priv = dev->dev_private;
131 184
132 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) 185 if (is_edp(intel_dp))
133 return (pixel_clock * dev_priv->edp_bpp) / 8; 186 return (pixel_clock * dev_priv->edp.bpp + 7) / 8;
134 else 187 else
135 return pixel_clock * 3; 188 return pixel_clock * 3;
136} 189}
@@ -145,15 +198,13 @@ static int
145intel_dp_mode_valid(struct drm_connector *connector, 198intel_dp_mode_valid(struct drm_connector *connector,
146 struct drm_display_mode *mode) 199 struct drm_display_mode *mode)
147{ 200{
148 struct drm_encoder *encoder = intel_attached_encoder(connector); 201 struct intel_dp *intel_dp = intel_attached_dp(connector);
149 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
150 struct drm_device *dev = connector->dev; 202 struct drm_device *dev = connector->dev;
151 struct drm_i915_private *dev_priv = dev->dev_private; 203 struct drm_i915_private *dev_priv = dev->dev_private;
152 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); 204 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
153 int max_lanes = intel_dp_max_lane_count(intel_dp); 205 int max_lanes = intel_dp_max_lane_count(intel_dp);
154 206
155 if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && 207 if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
156 dev_priv->panel_fixed_mode) {
157 if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) 208 if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
158 return MODE_PANEL; 209 return MODE_PANEL;
159 210
@@ -163,7 +214,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
163 214
164 /* only refuse the mode on non eDP since we have seen some wierd eDP panels 215 /* only refuse the mode on non eDP since we have seen some wierd eDP panels
165 which are outside spec tolerances but somehow work by magic */ 216 which are outside spec tolerances but somehow work by magic */
166 if (!IS_eDP(intel_dp) && 217 if (!is_edp(intel_dp) &&
167 (intel_dp_link_required(connector->dev, intel_dp, mode->clock) 218 (intel_dp_link_required(connector->dev, intel_dp, mode->clock)
168 > intel_dp_max_data_rate(max_link_clock, max_lanes))) 219 > intel_dp_max_data_rate(max_link_clock, max_lanes)))
169 return MODE_CLOCK_HIGH; 220 return MODE_CLOCK_HIGH;
@@ -233,7 +284,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
233 uint8_t *recv, int recv_size) 284 uint8_t *recv, int recv_size)
234{ 285{
235 uint32_t output_reg = intel_dp->output_reg; 286 uint32_t output_reg = intel_dp->output_reg;
236 struct drm_device *dev = intel_dp->base.enc.dev; 287 struct drm_device *dev = intel_dp->base.base.dev;
237 struct drm_i915_private *dev_priv = dev->dev_private; 288 struct drm_i915_private *dev_priv = dev->dev_private;
238 uint32_t ch_ctl = output_reg + 0x10; 289 uint32_t ch_ctl = output_reg + 0x10;
239 uint32_t ch_data = ch_ctl + 4; 290 uint32_t ch_data = ch_ctl + 4;
@@ -246,8 +297,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
246 /* The clock divider is based off the hrawclk, 297 /* The clock divider is based off the hrawclk,
247 * and would like to run at 2MHz. So, take the 298 * and would like to run at 2MHz. So, take the
248 * hrawclk value and divide by 2 and use that 299 * hrawclk value and divide by 2 and use that
300 *
301 * Note that PCH attached eDP panels should use a 125MHz input
302 * clock divider.
249 */ 303 */
250 if (IS_eDP(intel_dp)) { 304 if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
251 if (IS_GEN6(dev)) 305 if (IS_GEN6(dev))
252 aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ 306 aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
253 else 307 else
@@ -519,8 +573,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
519 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 573 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
520 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 574 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
521 575
522 if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && 576 if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
523 dev_priv->panel_fixed_mode) {
524 intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); 577 intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
525 intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, 578 intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
526 mode, adjusted_mode); 579 mode, adjusted_mode);
@@ -531,6 +584,17 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
531 mode->clock = dev_priv->panel_fixed_mode->clock; 584 mode->clock = dev_priv->panel_fixed_mode->clock;
532 } 585 }
533 586
587 /* Just use VBT values for eDP */
588 if (is_edp(intel_dp)) {
589 intel_dp->lane_count = dev_priv->edp.lanes;
590 intel_dp->link_bw = dev_priv->edp.rate;
591 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
592 DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n",
593 intel_dp->link_bw, intel_dp->lane_count,
594 adjusted_mode->clock);
595 return true;
596 }
597
534 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 598 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
535 for (clock = 0; clock <= max_clock; clock++) { 599 for (clock = 0; clock <= max_clock; clock++) {
536 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 600 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
@@ -549,19 +613,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
549 } 613 }
550 } 614 }
551 615
552 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
553 /* okay we failed just pick the highest */
554 intel_dp->lane_count = max_lane_count;
555 intel_dp->link_bw = bws[max_clock];
556 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
557 DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
558 "count %d clock %d\n",
559 intel_dp->link_bw, intel_dp->lane_count,
560 adjusted_mode->clock);
561
562 return true;
563 }
564
565 return false; 616 return false;
566} 617}
567 618
@@ -598,25 +649,6 @@ intel_dp_compute_m_n(int bpp,
598 intel_reduce_ratio(&m_n->link_m, &m_n->link_n); 649 intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
599} 650}
600 651
601bool intel_pch_has_edp(struct drm_crtc *crtc)
602{
603 struct drm_device *dev = crtc->dev;
604 struct drm_mode_config *mode_config = &dev->mode_config;
605 struct drm_encoder *encoder;
606
607 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
608 struct intel_dp *intel_dp;
609
610 if (encoder->crtc != crtc)
611 continue;
612
613 intel_dp = enc_to_intel_dp(encoder);
614 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
615 return intel_dp->is_pch_edp;
616 }
617 return false;
618}
619
620void 652void
621intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 653intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
622 struct drm_display_mode *adjusted_mode) 654 struct drm_display_mode *adjusted_mode)
@@ -641,8 +673,10 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
641 intel_dp = enc_to_intel_dp(encoder); 673 intel_dp = enc_to_intel_dp(encoder);
642 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { 674 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
643 lane_count = intel_dp->lane_count; 675 lane_count = intel_dp->lane_count;
644 if (IS_PCH_eDP(intel_dp)) 676 break;
645 bpp = dev_priv->edp_bpp; 677 } else if (is_edp(intel_dp)) {
678 lane_count = dev_priv->edp.lanes;
679 bpp = dev_priv->edp.bpp;
646 break; 680 break;
647 } 681 }
648 } 682 }
@@ -698,7 +732,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
698{ 732{
699 struct drm_device *dev = encoder->dev; 733 struct drm_device *dev = encoder->dev;
700 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 734 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
701 struct drm_crtc *crtc = intel_dp->base.enc.crtc; 735 struct drm_crtc *crtc = intel_dp->base.base.crtc;
702 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 736 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
703 737
704 intel_dp->DP = (DP_VOLTAGE_0_4 | 738 intel_dp->DP = (DP_VOLTAGE_0_4 |
@@ -709,7 +743,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
709 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 743 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
710 intel_dp->DP |= DP_SYNC_VS_HIGH; 744 intel_dp->DP |= DP_SYNC_VS_HIGH;
711 745
712 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) 746 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
713 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 747 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
714 else 748 else
715 intel_dp->DP |= DP_LINK_TRAIN_OFF; 749 intel_dp->DP |= DP_LINK_TRAIN_OFF;
@@ -744,7 +778,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
744 if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) 778 if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
745 intel_dp->DP |= DP_PIPEB_SELECT; 779 intel_dp->DP |= DP_PIPEB_SELECT;
746 780
747 if (IS_eDP(intel_dp)) { 781 if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
748 /* don't miss out required setting for eDP */ 782 /* don't miss out required setting for eDP */
749 intel_dp->DP |= DP_PLL_ENABLE; 783 intel_dp->DP |= DP_PLL_ENABLE;
750 if (adjusted_mode->clock < 200000) 784 if (adjusted_mode->clock < 200000)
@@ -754,13 +788,15 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
754 } 788 }
755} 789}
756 790
757static void ironlake_edp_panel_on (struct drm_device *dev) 791/* Returns true if the panel was already on when called */
792static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
758{ 793{
794 struct drm_device *dev = intel_dp->base.base.dev;
759 struct drm_i915_private *dev_priv = dev->dev_private; 795 struct drm_i915_private *dev_priv = dev->dev_private;
760 u32 pp; 796 u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
761 797
762 if (I915_READ(PCH_PP_STATUS) & PP_ON) 798 if (I915_READ(PCH_PP_STATUS) & PP_ON)
763 return; 799 return true;
764 800
765 pp = I915_READ(PCH_PP_CONTROL); 801 pp = I915_READ(PCH_PP_CONTROL);
766 802
@@ -771,21 +807,30 @@ static void ironlake_edp_panel_on (struct drm_device *dev)
771 807
772 pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; 808 pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
773 I915_WRITE(PCH_PP_CONTROL, pp); 809 I915_WRITE(PCH_PP_CONTROL, pp);
810 POSTING_READ(PCH_PP_CONTROL);
811
812 /* Ouch. We need to wait here for some panels, like Dell e6510
813 * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
814 */
815 msleep(300);
774 816
775 if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10)) 817 if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
818 5000))
776 DRM_ERROR("panel on wait timed out: 0x%08x\n", 819 DRM_ERROR("panel on wait timed out: 0x%08x\n",
777 I915_READ(PCH_PP_STATUS)); 820 I915_READ(PCH_PP_STATUS));
778 821
779 pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
780 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 822 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
781 I915_WRITE(PCH_PP_CONTROL, pp); 823 I915_WRITE(PCH_PP_CONTROL, pp);
782 POSTING_READ(PCH_PP_CONTROL); 824 POSTING_READ(PCH_PP_CONTROL);
825
826 return false;
783} 827}
784 828
785static void ironlake_edp_panel_off (struct drm_device *dev) 829static void ironlake_edp_panel_off (struct drm_device *dev)
786{ 830{
787 struct drm_i915_private *dev_priv = dev->dev_private; 831 struct drm_i915_private *dev_priv = dev->dev_private;
788 u32 pp; 832 u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK |
833 PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
789 834
790 pp = I915_READ(PCH_PP_CONTROL); 835 pp = I915_READ(PCH_PP_CONTROL);
791 836
@@ -796,15 +841,20 @@ static void ironlake_edp_panel_off (struct drm_device *dev)
796 841
797 pp &= ~POWER_TARGET_ON; 842 pp &= ~POWER_TARGET_ON;
798 I915_WRITE(PCH_PP_CONTROL, pp); 843 I915_WRITE(PCH_PP_CONTROL, pp);
844 POSTING_READ(PCH_PP_CONTROL);
799 845
800 if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10)) 846 if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
801 DRM_ERROR("panel off wait timed out: 0x%08x\n", 847 DRM_ERROR("panel off wait timed out: 0x%08x\n",
802 I915_READ(PCH_PP_STATUS)); 848 I915_READ(PCH_PP_STATUS));
803 849
804 /* Make sure VDD is enabled so DP AUX will work */ 850 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
805 pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */
806 I915_WRITE(PCH_PP_CONTROL, pp); 851 I915_WRITE(PCH_PP_CONTROL, pp);
807 POSTING_READ(PCH_PP_CONTROL); 852 POSTING_READ(PCH_PP_CONTROL);
853
854 /* Ouch. We need to wait here for some panels, like Dell e6510
855 * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
856 */
857 msleep(300);
808} 858}
809 859
810static void ironlake_edp_backlight_on (struct drm_device *dev) 860static void ironlake_edp_backlight_on (struct drm_device *dev)
@@ -813,6 +863,13 @@ static void ironlake_edp_backlight_on (struct drm_device *dev)
813 u32 pp; 863 u32 pp;
814 864
815 DRM_DEBUG_KMS("\n"); 865 DRM_DEBUG_KMS("\n");
866 /*
867 * If we enable the backlight right away following a panel power
868 * on, we may see slight flicker as the panel syncs with the eDP
869 * link. So delay a bit to make sure the image is solid before
870 * allowing it to appear.
871 */
872 msleep(300);
816 pp = I915_READ(PCH_PP_CONTROL); 873 pp = I915_READ(PCH_PP_CONTROL);
817 pp |= EDP_BLC_ENABLE; 874 pp |= EDP_BLC_ENABLE;
818 I915_WRITE(PCH_PP_CONTROL, pp); 875 I915_WRITE(PCH_PP_CONTROL, pp);
@@ -837,8 +894,10 @@ static void ironlake_edp_pll_on(struct drm_encoder *encoder)
837 894
838 DRM_DEBUG_KMS("\n"); 895 DRM_DEBUG_KMS("\n");
839 dpa_ctl = I915_READ(DP_A); 896 dpa_ctl = I915_READ(DP_A);
840 dpa_ctl &= ~DP_PLL_ENABLE; 897 dpa_ctl |= DP_PLL_ENABLE;
841 I915_WRITE(DP_A, dpa_ctl); 898 I915_WRITE(DP_A, dpa_ctl);
899 POSTING_READ(DP_A);
900 udelay(200);
842} 901}
843 902
844static void ironlake_edp_pll_off(struct drm_encoder *encoder) 903static void ironlake_edp_pll_off(struct drm_encoder *encoder)
@@ -848,8 +907,9 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder)
848 u32 dpa_ctl; 907 u32 dpa_ctl;
849 908
850 dpa_ctl = I915_READ(DP_A); 909 dpa_ctl = I915_READ(DP_A);
851 dpa_ctl |= DP_PLL_ENABLE; 910 dpa_ctl &= ~DP_PLL_ENABLE;
852 I915_WRITE(DP_A, dpa_ctl); 911 I915_WRITE(DP_A, dpa_ctl);
912 POSTING_READ(DP_A);
853 udelay(200); 913 udelay(200);
854} 914}
855 915
@@ -857,29 +917,31 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
857{ 917{
858 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 918 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
859 struct drm_device *dev = encoder->dev; 919 struct drm_device *dev = encoder->dev;
860 struct drm_i915_private *dev_priv = dev->dev_private;
861 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
862 920
863 if (IS_eDP(intel_dp)) { 921 if (is_edp(intel_dp)) {
864 ironlake_edp_backlight_off(dev); 922 ironlake_edp_backlight_off(dev);
865 ironlake_edp_panel_on(dev); 923 ironlake_edp_panel_on(intel_dp);
866 ironlake_edp_pll_on(encoder); 924 if (!is_pch_edp(intel_dp))
925 ironlake_edp_pll_on(encoder);
926 else
927 ironlake_edp_pll_off(encoder);
867 } 928 }
868 if (dp_reg & DP_PORT_EN) 929 intel_dp_link_down(intel_dp);
869 intel_dp_link_down(intel_dp);
870} 930}
871 931
872static void intel_dp_commit(struct drm_encoder *encoder) 932static void intel_dp_commit(struct drm_encoder *encoder)
873{ 933{
874 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 934 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
875 struct drm_device *dev = encoder->dev; 935 struct drm_device *dev = encoder->dev;
876 struct drm_i915_private *dev_priv = dev->dev_private;
877 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
878 936
879 if (!(dp_reg & DP_PORT_EN)) { 937 intel_dp_start_link_train(intel_dp);
880 intel_dp_link_train(intel_dp); 938
881 } 939 if (is_edp(intel_dp))
882 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) 940 ironlake_edp_panel_on(intel_dp);
941
942 intel_dp_complete_link_train(intel_dp);
943
944 if (is_edp(intel_dp))
883 ironlake_edp_backlight_on(dev); 945 ironlake_edp_backlight_on(dev);
884} 946}
885 947
@@ -892,22 +954,22 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
892 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 954 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
893 955
894 if (mode != DRM_MODE_DPMS_ON) { 956 if (mode != DRM_MODE_DPMS_ON) {
895 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { 957 if (is_edp(intel_dp))
896 ironlake_edp_backlight_off(dev); 958 ironlake_edp_backlight_off(dev);
959 intel_dp_link_down(intel_dp);
960 if (is_edp(intel_dp))
897 ironlake_edp_panel_off(dev); 961 ironlake_edp_panel_off(dev);
898 } 962 if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
899 if (dp_reg & DP_PORT_EN)
900 intel_dp_link_down(intel_dp);
901 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
902 ironlake_edp_pll_off(encoder); 963 ironlake_edp_pll_off(encoder);
903 } else { 964 } else {
965 if (is_edp(intel_dp))
966 ironlake_edp_panel_on(intel_dp);
904 if (!(dp_reg & DP_PORT_EN)) { 967 if (!(dp_reg & DP_PORT_EN)) {
905 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) 968 intel_dp_start_link_train(intel_dp);
906 ironlake_edp_panel_on(dev); 969 intel_dp_complete_link_train(intel_dp);
907 intel_dp_link_train(intel_dp);
908 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
909 ironlake_edp_backlight_on(dev);
910 } 970 }
971 if (is_edp(intel_dp))
972 ironlake_edp_backlight_on(dev);
911 } 973 }
912 intel_dp->dpms_mode = mode; 974 intel_dp->dpms_mode = mode;
913} 975}
@@ -917,14 +979,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
917 * link status information 979 * link status information
918 */ 980 */
919static bool 981static bool
920intel_dp_get_link_status(struct intel_dp *intel_dp, 982intel_dp_get_link_status(struct intel_dp *intel_dp)
921 uint8_t link_status[DP_LINK_STATUS_SIZE])
922{ 983{
923 int ret; 984 int ret;
924 985
925 ret = intel_dp_aux_native_read(intel_dp, 986 ret = intel_dp_aux_native_read(intel_dp,
926 DP_LANE0_1_STATUS, 987 DP_LANE0_1_STATUS,
927 link_status, DP_LINK_STATUS_SIZE); 988 intel_dp->link_status, DP_LINK_STATUS_SIZE);
928 if (ret != DP_LINK_STATUS_SIZE) 989 if (ret != DP_LINK_STATUS_SIZE)
929 return false; 990 return false;
930 return true; 991 return true;
@@ -999,18 +1060,15 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
999} 1060}
1000 1061
1001static void 1062static void
1002intel_get_adjust_train(struct intel_dp *intel_dp, 1063intel_get_adjust_train(struct intel_dp *intel_dp)
1003 uint8_t link_status[DP_LINK_STATUS_SIZE],
1004 int lane_count,
1005 uint8_t train_set[4])
1006{ 1064{
1007 uint8_t v = 0; 1065 uint8_t v = 0;
1008 uint8_t p = 0; 1066 uint8_t p = 0;
1009 int lane; 1067 int lane;
1010 1068
1011 for (lane = 0; lane < lane_count; lane++) { 1069 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1012 uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane); 1070 uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane);
1013 uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane); 1071 uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
1014 1072
1015 if (this_v > v) 1073 if (this_v > v)
1016 v = this_v; 1074 v = this_v;
@@ -1025,15 +1083,25 @@ intel_get_adjust_train(struct intel_dp *intel_dp,
1025 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 1083 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
1026 1084
1027 for (lane = 0; lane < 4; lane++) 1085 for (lane = 0; lane < 4; lane++)
1028 train_set[lane] = v | p; 1086 intel_dp->train_set[lane] = v | p;
1029} 1087}
1030 1088
1031static uint32_t 1089static uint32_t
1032intel_dp_signal_levels(uint8_t train_set, int lane_count) 1090intel_dp_signal_levels(struct intel_dp *intel_dp)
1033{ 1091{
1034 uint32_t signal_levels = 0; 1092 struct drm_device *dev = intel_dp->base.base.dev;
1093 struct drm_i915_private *dev_priv = dev->dev_private;
1094 uint32_t signal_levels = 0;
1095 u8 train_set = intel_dp->train_set[0];
1096 u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK;
1097 u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK;
1098
1099 if (is_edp(intel_dp)) {
1100 vswing = dev_priv->edp.vswing;
1101 preemphasis = dev_priv->edp.preemphasis;
1102 }
1035 1103
1036 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 1104 switch (vswing) {
1037 case DP_TRAIN_VOLTAGE_SWING_400: 1105 case DP_TRAIN_VOLTAGE_SWING_400:
1038 default: 1106 default:
1039 signal_levels |= DP_VOLTAGE_0_4; 1107 signal_levels |= DP_VOLTAGE_0_4;
@@ -1048,7 +1116,7 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
1048 signal_levels |= DP_VOLTAGE_1_2; 1116 signal_levels |= DP_VOLTAGE_1_2;
1049 break; 1117 break;
1050 } 1118 }
1051 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 1119 switch (preemphasis) {
1052 case DP_TRAIN_PRE_EMPHASIS_0: 1120 case DP_TRAIN_PRE_EMPHASIS_0:
1053 default: 1121 default:
1054 signal_levels |= DP_PRE_EMPHASIS_0; 1122 signal_levels |= DP_PRE_EMPHASIS_0;
@@ -1116,18 +1184,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
1116 DP_LANE_CHANNEL_EQ_DONE|\ 1184 DP_LANE_CHANNEL_EQ_DONE|\
1117 DP_LANE_SYMBOL_LOCKED) 1185 DP_LANE_SYMBOL_LOCKED)
1118static bool 1186static bool
1119intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) 1187intel_channel_eq_ok(struct intel_dp *intel_dp)
1120{ 1188{
1121 uint8_t lane_align; 1189 uint8_t lane_align;
1122 uint8_t lane_status; 1190 uint8_t lane_status;
1123 int lane; 1191 int lane;
1124 1192
1125 lane_align = intel_dp_link_status(link_status, 1193 lane_align = intel_dp_link_status(intel_dp->link_status,
1126 DP_LANE_ALIGN_STATUS_UPDATED); 1194 DP_LANE_ALIGN_STATUS_UPDATED);
1127 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) 1195 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
1128 return false; 1196 return false;
1129 for (lane = 0; lane < lane_count; lane++) { 1197 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1130 lane_status = intel_get_lane_status(link_status, lane); 1198 lane_status = intel_get_lane_status(intel_dp->link_status, lane);
1131 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) 1199 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
1132 return false; 1200 return false;
1133 } 1201 }
@@ -1135,159 +1203,194 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
1135} 1203}
1136 1204
1137static bool 1205static bool
1206intel_dp_aux_handshake_required(struct intel_dp *intel_dp)
1207{
1208 struct drm_device *dev = intel_dp->base.base.dev;
1209 struct drm_i915_private *dev_priv = dev->dev_private;
1210
1211 if (is_edp(intel_dp) && dev_priv->no_aux_handshake)
1212 return false;
1213
1214 return true;
1215}
1216
1217static bool
1138intel_dp_set_link_train(struct intel_dp *intel_dp, 1218intel_dp_set_link_train(struct intel_dp *intel_dp,
1139 uint32_t dp_reg_value, 1219 uint32_t dp_reg_value,
1140 uint8_t dp_train_pat, 1220 uint8_t dp_train_pat)
1141 uint8_t train_set[4])
1142{ 1221{
1143 struct drm_device *dev = intel_dp->base.enc.dev; 1222 struct drm_device *dev = intel_dp->base.base.dev;
1144 struct drm_i915_private *dev_priv = dev->dev_private; 1223 struct drm_i915_private *dev_priv = dev->dev_private;
1145 int ret; 1224 int ret;
1146 1225
1147 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1226 I915_WRITE(intel_dp->output_reg, dp_reg_value);
1148 POSTING_READ(intel_dp->output_reg); 1227 POSTING_READ(intel_dp->output_reg);
1149 1228
1229 if (!intel_dp_aux_handshake_required(intel_dp))
1230 return true;
1231
1150 intel_dp_aux_native_write_1(intel_dp, 1232 intel_dp_aux_native_write_1(intel_dp,
1151 DP_TRAINING_PATTERN_SET, 1233 DP_TRAINING_PATTERN_SET,
1152 dp_train_pat); 1234 dp_train_pat);
1153 1235
1154 ret = intel_dp_aux_native_write(intel_dp, 1236 ret = intel_dp_aux_native_write(intel_dp,
1155 DP_TRAINING_LANE0_SET, train_set, 4); 1237 DP_TRAINING_LANE0_SET,
1238 intel_dp->train_set, 4);
1156 if (ret != 4) 1239 if (ret != 4)
1157 return false; 1240 return false;
1158 1241
1159 return true; 1242 return true;
1160} 1243}
1161 1244
1245/* Enable corresponding port and start training pattern 1 */
1162static void 1246static void
1163intel_dp_link_train(struct intel_dp *intel_dp) 1247intel_dp_start_link_train(struct intel_dp *intel_dp)
1164{ 1248{
1165 struct drm_device *dev = intel_dp->base.enc.dev; 1249 struct drm_device *dev = intel_dp->base.base.dev;
1166 struct drm_i915_private *dev_priv = dev->dev_private; 1250 struct drm_i915_private *dev_priv = dev->dev_private;
1167 uint8_t train_set[4]; 1251 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
1168 uint8_t link_status[DP_LINK_STATUS_SIZE];
1169 int i; 1252 int i;
1170 uint8_t voltage; 1253 uint8_t voltage;
1171 bool clock_recovery = false; 1254 bool clock_recovery = false;
1172 bool channel_eq = false;
1173 int tries; 1255 int tries;
1174 u32 reg; 1256 u32 reg;
1175 uint32_t DP = intel_dp->DP; 1257 uint32_t DP = intel_dp->DP;
1176 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
1177 1258
1178 /* Enable output, wait for it to become active */ 1259 /* Enable output, wait for it to become active */
1179 I915_WRITE(intel_dp->output_reg, intel_dp->DP); 1260 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
1180 POSTING_READ(intel_dp->output_reg); 1261 POSTING_READ(intel_dp->output_reg);
1181 intel_wait_for_vblank(dev, intel_crtc->pipe); 1262 intel_wait_for_vblank(dev, intel_crtc->pipe);
1182 1263
1183 /* Write the link configuration data */ 1264 if (intel_dp_aux_handshake_required(intel_dp))
1184 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1265 /* Write the link configuration data */
1185 intel_dp->link_configuration, 1266 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1186 DP_LINK_CONFIGURATION_SIZE); 1267 intel_dp->link_configuration,
1268 DP_LINK_CONFIGURATION_SIZE);
1187 1269
1188 DP |= DP_PORT_EN; 1270 DP |= DP_PORT_EN;
1189 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) 1271 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
1190 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1272 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1191 else 1273 else
1192 DP &= ~DP_LINK_TRAIN_MASK; 1274 DP &= ~DP_LINK_TRAIN_MASK;
1193 memset(train_set, 0, 4); 1275 memset(intel_dp->train_set, 0, 4);
1194 voltage = 0xff; 1276 voltage = 0xff;
1195 tries = 0; 1277 tries = 0;
1196 clock_recovery = false; 1278 clock_recovery = false;
1197 for (;;) { 1279 for (;;) {
1198 /* Use train_set[0] to set the voltage and pre emphasis values */ 1280 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1199 uint32_t signal_levels; 1281 uint32_t signal_levels;
1200 if (IS_GEN6(dev) && IS_eDP(intel_dp)) { 1282 if (IS_GEN6(dev) && is_edp(intel_dp)) {
1201 signal_levels = intel_gen6_edp_signal_levels(train_set[0]); 1283 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1202 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1284 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1203 } else { 1285 } else {
1204 signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count); 1286 signal_levels = intel_dp_signal_levels(intel_dp);
1205 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1287 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1206 } 1288 }
1207 1289
1208 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) 1290 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
1209 reg = DP | DP_LINK_TRAIN_PAT_1_CPT; 1291 reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
1210 else 1292 else
1211 reg = DP | DP_LINK_TRAIN_PAT_1; 1293 reg = DP | DP_LINK_TRAIN_PAT_1;
1212 1294
1213 if (!intel_dp_set_link_train(intel_dp, reg, 1295 if (!intel_dp_set_link_train(intel_dp, reg,
1214 DP_TRAINING_PATTERN_1, train_set)) 1296 DP_TRAINING_PATTERN_1))
1215 break; 1297 break;
1216 /* Set training pattern 1 */ 1298 /* Set training pattern 1 */
1217 1299
1218 udelay(100); 1300 udelay(500);
1219 if (!intel_dp_get_link_status(intel_dp, link_status)) 1301 if (intel_dp_aux_handshake_required(intel_dp)) {
1220 break; 1302 break;
1303 } else {
1304 if (!intel_dp_get_link_status(intel_dp))
1305 break;
1221 1306
1222 if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1307 if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
1223 clock_recovery = true; 1308 clock_recovery = true;
1224 break;
1225 }
1226
1227 /* Check to see if we've tried the max voltage */
1228 for (i = 0; i < intel_dp->lane_count; i++)
1229 if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1230 break; 1309 break;
1231 if (i == intel_dp->lane_count) 1310 }
1232 break;
1233 1311
1234 /* Check to see if we've tried the same voltage 5 times */ 1312 /* Check to see if we've tried the max voltage */
1235 if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1313 for (i = 0; i < intel_dp->lane_count; i++)
1236 ++tries; 1314 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1237 if (tries == 5) 1315 break;
1316 if (i == intel_dp->lane_count)
1238 break; 1317 break;
1239 } else
1240 tries = 0;
1241 voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1242 1318
1243 /* Compute new train_set as requested by target */ 1319 /* Check to see if we've tried the same voltage 5 times */
1244 intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set); 1320 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1321 ++tries;
1322 if (tries == 5)
1323 break;
1324 } else
1325 tries = 0;
1326 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1327
1328 /* Compute new intel_dp->train_set as requested by target */
1329 intel_get_adjust_train(intel_dp);
1330 }
1245 } 1331 }
1246 1332
1333 intel_dp->DP = DP;
1334}
1335
1336static void
1337intel_dp_complete_link_train(struct intel_dp *intel_dp)
1338{
1339 struct drm_device *dev = intel_dp->base.base.dev;
1340 struct drm_i915_private *dev_priv = dev->dev_private;
1341 bool channel_eq = false;
1342 int tries;
1343 u32 reg;
1344 uint32_t DP = intel_dp->DP;
1345
1247 /* channel equalization */ 1346 /* channel equalization */
1248 tries = 0; 1347 tries = 0;
1249 channel_eq = false; 1348 channel_eq = false;
1250 for (;;) { 1349 for (;;) {
1251 /* Use train_set[0] to set the voltage and pre emphasis values */ 1350 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1252 uint32_t signal_levels; 1351 uint32_t signal_levels;
1253 1352
1254 if (IS_GEN6(dev) && IS_eDP(intel_dp)) { 1353 if (IS_GEN6(dev) && is_edp(intel_dp)) {
1255 signal_levels = intel_gen6_edp_signal_levels(train_set[0]); 1354 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1256 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1355 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1257 } else { 1356 } else {
1258 signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count); 1357 signal_levels = intel_dp_signal_levels(intel_dp);
1259 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1358 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1260 } 1359 }
1261 1360
1262 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) 1361 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
1263 reg = DP | DP_LINK_TRAIN_PAT_2_CPT; 1362 reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
1264 else 1363 else
1265 reg = DP | DP_LINK_TRAIN_PAT_2; 1364 reg = DP | DP_LINK_TRAIN_PAT_2;
1266 1365
1267 /* channel eq pattern */ 1366 /* channel eq pattern */
1268 if (!intel_dp_set_link_train(intel_dp, reg, 1367 if (!intel_dp_set_link_train(intel_dp, reg,
1269 DP_TRAINING_PATTERN_2, train_set)) 1368 DP_TRAINING_PATTERN_2))
1270 break; 1369 break;
1271 1370
1272 udelay(400); 1371 udelay(500);
1273 if (!intel_dp_get_link_status(intel_dp, link_status))
1274 break;
1275 1372
1276 if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) { 1373 if (!intel_dp_aux_handshake_required(intel_dp)) {
1277 channel_eq = true;
1278 break; 1374 break;
1279 } 1375 } else {
1376 if (!intel_dp_get_link_status(intel_dp))
1377 break;
1280 1378
1281 /* Try 5 times */ 1379 if (intel_channel_eq_ok(intel_dp)) {
1282 if (tries > 5) 1380 channel_eq = true;
1283 break; 1381 break;
1382 }
1284 1383
1285 /* Compute new train_set as requested by target */ 1384 /* Try 5 times */
1286 intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set); 1385 if (tries > 5)
1287 ++tries; 1386 break;
1288 }
1289 1387
1290 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) 1388 /* Compute new intel_dp->train_set as requested by target */
1389 intel_get_adjust_train(intel_dp);
1390 ++tries;
1391 }
1392 }
1393 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
1291 reg = DP | DP_LINK_TRAIN_OFF_CPT; 1394 reg = DP | DP_LINK_TRAIN_OFF_CPT;
1292 else 1395 else
1293 reg = DP | DP_LINK_TRAIN_OFF; 1396 reg = DP | DP_LINK_TRAIN_OFF;
@@ -1301,32 +1404,31 @@ intel_dp_link_train(struct intel_dp *intel_dp)
1301static void 1404static void
1302intel_dp_link_down(struct intel_dp *intel_dp) 1405intel_dp_link_down(struct intel_dp *intel_dp)
1303{ 1406{
1304 struct drm_device *dev = intel_dp->base.enc.dev; 1407 struct drm_device *dev = intel_dp->base.base.dev;
1305 struct drm_i915_private *dev_priv = dev->dev_private; 1408 struct drm_i915_private *dev_priv = dev->dev_private;
1306 uint32_t DP = intel_dp->DP; 1409 uint32_t DP = intel_dp->DP;
1307 1410
1308 DRM_DEBUG_KMS("\n"); 1411 DRM_DEBUG_KMS("\n");
1309 1412
1310 if (IS_eDP(intel_dp)) { 1413 if (is_edp(intel_dp)) {
1311 DP &= ~DP_PLL_ENABLE; 1414 DP &= ~DP_PLL_ENABLE;
1312 I915_WRITE(intel_dp->output_reg, DP); 1415 I915_WRITE(intel_dp->output_reg, DP);
1313 POSTING_READ(intel_dp->output_reg); 1416 POSTING_READ(intel_dp->output_reg);
1314 udelay(100); 1417 udelay(100);
1315 } 1418 }
1316 1419
1317 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) { 1420 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) {
1318 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1421 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1319 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 1422 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
1320 POSTING_READ(intel_dp->output_reg);
1321 } else { 1423 } else {
1322 DP &= ~DP_LINK_TRAIN_MASK; 1424 DP &= ~DP_LINK_TRAIN_MASK;
1323 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 1425 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
1324 POSTING_READ(intel_dp->output_reg);
1325 } 1426 }
1427 POSTING_READ(intel_dp->output_reg);
1326 1428
1327 udelay(17000); 1429 msleep(17);
1328 1430
1329 if (IS_eDP(intel_dp)) 1431 if (is_edp(intel_dp))
1330 DP |= DP_LINK_TRAIN_OFF; 1432 DP |= DP_LINK_TRAIN_OFF;
1331 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 1433 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
1332 POSTING_READ(intel_dp->output_reg); 1434 POSTING_READ(intel_dp->output_reg);
@@ -1344,32 +1446,34 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1344static void 1446static void
1345intel_dp_check_link_status(struct intel_dp *intel_dp) 1447intel_dp_check_link_status(struct intel_dp *intel_dp)
1346{ 1448{
1347 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1449 if (!intel_dp->base.base.crtc)
1348
1349 if (!intel_dp->base.enc.crtc)
1350 return; 1450 return;
1351 1451
1352 if (!intel_dp_get_link_status(intel_dp, link_status)) { 1452 if (!intel_dp_get_link_status(intel_dp)) {
1353 intel_dp_link_down(intel_dp); 1453 intel_dp_link_down(intel_dp);
1354 return; 1454 return;
1355 } 1455 }
1356 1456
1357 if (!intel_channel_eq_ok(link_status, intel_dp->lane_count)) 1457 if (!intel_channel_eq_ok(intel_dp)) {
1358 intel_dp_link_train(intel_dp); 1458 intel_dp_start_link_train(intel_dp);
1459 intel_dp_complete_link_train(intel_dp);
1460 }
1359} 1461}
1360 1462
1361static enum drm_connector_status 1463static enum drm_connector_status
1362ironlake_dp_detect(struct drm_connector *connector) 1464ironlake_dp_detect(struct intel_dp *intel_dp)
1363{ 1465{
1364 struct drm_encoder *encoder = intel_attached_encoder(connector);
1365 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1366 enum drm_connector_status status; 1466 enum drm_connector_status status;
1367 1467
1468 /* Can't disconnect eDP */
1469 if (is_edp(intel_dp))
1470 return connector_status_connected;
1471
1368 status = connector_status_disconnected; 1472 status = connector_status_disconnected;
1369 if (intel_dp_aux_native_read(intel_dp, 1473 if (intel_dp_aux_native_read(intel_dp,
1370 0x000, intel_dp->dpcd, 1474 0x000, intel_dp->dpcd,
1371 sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) 1475 sizeof (intel_dp->dpcd))
1372 { 1476 == sizeof(intel_dp->dpcd)) {
1373 if (intel_dp->dpcd[0] != 0) 1477 if (intel_dp->dpcd[0] != 0)
1374 status = connector_status_connected; 1478 status = connector_status_connected;
1375 } 1479 }
@@ -1378,26 +1482,13 @@ ironlake_dp_detect(struct drm_connector *connector)
1378 return status; 1482 return status;
1379} 1483}
1380 1484
1381/**
1382 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
1383 *
1384 * \return true if DP port is connected.
1385 * \return false if DP port is disconnected.
1386 */
1387static enum drm_connector_status 1485static enum drm_connector_status
1388intel_dp_detect(struct drm_connector *connector, bool force) 1486g4x_dp_detect(struct intel_dp *intel_dp)
1389{ 1487{
1390 struct drm_encoder *encoder = intel_attached_encoder(connector); 1488 struct drm_device *dev = intel_dp->base.base.dev;
1391 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1392 struct drm_device *dev = intel_dp->base.enc.dev;
1393 struct drm_i915_private *dev_priv = dev->dev_private; 1489 struct drm_i915_private *dev_priv = dev->dev_private;
1394 uint32_t temp, bit;
1395 enum drm_connector_status status; 1490 enum drm_connector_status status;
1396 1491 uint32_t temp, bit;
1397 intel_dp->has_audio = false;
1398
1399 if (HAS_PCH_SPLIT(dev))
1400 return ironlake_dp_detect(connector);
1401 1492
1402 switch (intel_dp->output_reg) { 1493 switch (intel_dp->output_reg) {
1403 case DP_B: 1494 case DP_B:
@@ -1419,31 +1510,66 @@ intel_dp_detect(struct drm_connector *connector, bool force)
1419 return connector_status_disconnected; 1510 return connector_status_disconnected;
1420 1511
1421 status = connector_status_disconnected; 1512 status = connector_status_disconnected;
1422 if (intel_dp_aux_native_read(intel_dp, 1513 if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd,
1423 0x000, intel_dp->dpcd,
1424 sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) 1514 sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
1425 { 1515 {
1426 if (intel_dp->dpcd[0] != 0) 1516 if (intel_dp->dpcd[0] != 0)
1427 status = connector_status_connected; 1517 status = connector_status_connected;
1428 } 1518 }
1429 return status; 1519
1520 return bit;
1521}
1522
1523/**
1524 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
1525 *
1526 * \return true if DP port is connected.
1527 * \return false if DP port is disconnected.
1528 */
1529static enum drm_connector_status
1530intel_dp_detect(struct drm_connector *connector, bool force)
1531{
1532 struct intel_dp *intel_dp = intel_attached_dp(connector);
1533 struct drm_device *dev = intel_dp->base.base.dev;
1534 enum drm_connector_status status;
1535 struct edid *edid = NULL;
1536
1537 intel_dp->has_audio = false;
1538
1539 if (HAS_PCH_SPLIT(dev))
1540 status = ironlake_dp_detect(intel_dp);
1541 else
1542 status = g4x_dp_detect(intel_dp);
1543 if (status != connector_status_connected)
1544 return status;
1545
1546 if (intel_dp->force_audio) {
1547 intel_dp->has_audio = intel_dp->force_audio > 0;
1548 } else {
1549 edid = drm_get_edid(connector, &intel_dp->adapter);
1550 if (edid) {
1551 intel_dp->has_audio = drm_detect_monitor_audio(edid);
1552 connector->display_info.raw_edid = NULL;
1553 kfree(edid);
1554 }
1555 }
1556
1557 return connector_status_connected;
1430} 1558}
1431 1559
1432static int intel_dp_get_modes(struct drm_connector *connector) 1560static int intel_dp_get_modes(struct drm_connector *connector)
1433{ 1561{
1434 struct drm_encoder *encoder = intel_attached_encoder(connector); 1562 struct intel_dp *intel_dp = intel_attached_dp(connector);
1435 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1563 struct drm_device *dev = intel_dp->base.base.dev;
1436 struct drm_device *dev = intel_dp->base.enc.dev;
1437 struct drm_i915_private *dev_priv = dev->dev_private; 1564 struct drm_i915_private *dev_priv = dev->dev_private;
1438 int ret; 1565 int ret;
1439 1566
1440 /* We should parse the EDID data and find out if it has an audio sink 1567 /* We should parse the EDID data and find out if it has an audio sink
1441 */ 1568 */
1442 1569
1443 ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus); 1570 ret = intel_ddc_get_modes(connector, &intel_dp->adapter);
1444 if (ret) { 1571 if (ret) {
1445 if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && 1572 if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) {
1446 !dev_priv->panel_fixed_mode) {
1447 struct drm_display_mode *newmode; 1573 struct drm_display_mode *newmode;
1448 list_for_each_entry(newmode, &connector->probed_modes, 1574 list_for_each_entry(newmode, &connector->probed_modes,
1449 head) { 1575 head) {
@@ -1459,7 +1585,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
1459 } 1585 }
1460 1586
1461 /* if eDP has no EDID, try to use fixed panel mode from VBT */ 1587 /* if eDP has no EDID, try to use fixed panel mode from VBT */
1462 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { 1588 if (is_edp(intel_dp)) {
1463 if (dev_priv->panel_fixed_mode != NULL) { 1589 if (dev_priv->panel_fixed_mode != NULL) {
1464 struct drm_display_mode *mode; 1590 struct drm_display_mode *mode;
1465 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); 1591 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1470,6 +1596,46 @@ static int intel_dp_get_modes(struct drm_connector *connector)
1470 return 0; 1596 return 0;
1471} 1597}
1472 1598
1599static int
1600intel_dp_set_property(struct drm_connector *connector,
1601 struct drm_property *property,
1602 uint64_t val)
1603{
1604 struct intel_dp *intel_dp = intel_attached_dp(connector);
1605 int ret;
1606
1607 ret = drm_connector_property_set_value(connector, property, val);
1608 if (ret)
1609 return ret;
1610
1611 if (property == intel_dp->force_audio_property) {
1612 if (val == intel_dp->force_audio)
1613 return 0;
1614
1615 intel_dp->force_audio = val;
1616
1617 if (val > 0 && intel_dp->has_audio)
1618 return 0;
1619 if (val < 0 && !intel_dp->has_audio)
1620 return 0;
1621
1622 intel_dp->has_audio = val > 0;
1623 goto done;
1624 }
1625
1626 return -EINVAL;
1627
1628done:
1629 if (intel_dp->base.base.crtc) {
1630 struct drm_crtc *crtc = intel_dp->base.base.crtc;
1631 drm_crtc_helper_set_mode(crtc, &crtc->mode,
1632 crtc->x, crtc->y,
1633 crtc->fb);
1634 }
1635
1636 return 0;
1637}
1638
1473static void 1639static void
1474intel_dp_destroy (struct drm_connector *connector) 1640intel_dp_destroy (struct drm_connector *connector)
1475{ 1641{
@@ -1478,6 +1644,15 @@ intel_dp_destroy (struct drm_connector *connector)
1478 kfree(connector); 1644 kfree(connector);
1479} 1645}
1480 1646
1647static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
1648{
1649 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1650
1651 i2c_del_adapter(&intel_dp->adapter);
1652 drm_encoder_cleanup(encoder);
1653 kfree(intel_dp);
1654}
1655
1481static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 1656static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
1482 .dpms = intel_dp_dpms, 1657 .dpms = intel_dp_dpms,
1483 .mode_fixup = intel_dp_mode_fixup, 1658 .mode_fixup = intel_dp_mode_fixup,
@@ -1490,20 +1665,21 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
1490 .dpms = drm_helper_connector_dpms, 1665 .dpms = drm_helper_connector_dpms,
1491 .detect = intel_dp_detect, 1666 .detect = intel_dp_detect,
1492 .fill_modes = drm_helper_probe_single_connector_modes, 1667 .fill_modes = drm_helper_probe_single_connector_modes,
1668 .set_property = intel_dp_set_property,
1493 .destroy = intel_dp_destroy, 1669 .destroy = intel_dp_destroy,
1494}; 1670};
1495 1671
1496static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 1672static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
1497 .get_modes = intel_dp_get_modes, 1673 .get_modes = intel_dp_get_modes,
1498 .mode_valid = intel_dp_mode_valid, 1674 .mode_valid = intel_dp_mode_valid,
1499 .best_encoder = intel_attached_encoder, 1675 .best_encoder = intel_best_encoder,
1500}; 1676};
1501 1677
1502static const struct drm_encoder_funcs intel_dp_enc_funcs = { 1678static const struct drm_encoder_funcs intel_dp_enc_funcs = {
1503 .destroy = intel_encoder_destroy, 1679 .destroy = intel_dp_encoder_destroy,
1504}; 1680};
1505 1681
1506void 1682static void
1507intel_dp_hot_plug(struct intel_encoder *intel_encoder) 1683intel_dp_hot_plug(struct intel_encoder *intel_encoder)
1508{ 1684{
1509 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 1685 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
@@ -1554,6 +1730,20 @@ bool intel_dpd_is_edp(struct drm_device *dev)
1554 return false; 1730 return false;
1555} 1731}
1556 1732
1733static void
1734intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
1735{
1736 struct drm_device *dev = connector->dev;
1737
1738 intel_dp->force_audio_property =
1739 drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
1740 if (intel_dp->force_audio_property) {
1741 intel_dp->force_audio_property->values[0] = -1;
1742 intel_dp->force_audio_property->values[1] = 1;
1743 drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
1744 }
1745}
1746
1557void 1747void
1558intel_dp_init(struct drm_device *dev, int output_reg) 1748intel_dp_init(struct drm_device *dev, int output_reg)
1559{ 1749{
@@ -1580,7 +1770,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1580 if (intel_dpd_is_edp(dev)) 1770 if (intel_dpd_is_edp(dev))
1581 intel_dp->is_pch_edp = true; 1771 intel_dp->is_pch_edp = true;
1582 1772
1583 if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { 1773 if (output_reg == DP_A || is_pch_edp(intel_dp)) {
1584 type = DRM_MODE_CONNECTOR_eDP; 1774 type = DRM_MODE_CONNECTOR_eDP;
1585 intel_encoder->type = INTEL_OUTPUT_EDP; 1775 intel_encoder->type = INTEL_OUTPUT_EDP;
1586 } else { 1776 } else {
@@ -1601,7 +1791,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1601 else if (output_reg == DP_D || output_reg == PCH_DP_D) 1791 else if (output_reg == DP_D || output_reg == PCH_DP_D)
1602 intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); 1792 intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
1603 1793
1604 if (IS_eDP(intel_dp)) 1794 if (is_edp(intel_dp))
1605 intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); 1795 intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
1606 1796
1607 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 1797 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
@@ -1612,12 +1802,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1612 intel_dp->has_audio = false; 1802 intel_dp->has_audio = false;
1613 intel_dp->dpms_mode = DRM_MODE_DPMS_ON; 1803 intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
1614 1804
1615 drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs, 1805 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
1616 DRM_MODE_ENCODER_TMDS); 1806 DRM_MODE_ENCODER_TMDS);
1617 drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs); 1807 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
1618 1808
1619 drm_mode_connector_attach_encoder(&intel_connector->base, 1809 intel_connector_attach_encoder(intel_connector, intel_encoder);
1620 &intel_encoder->enc);
1621 drm_sysfs_connector_add(connector); 1810 drm_sysfs_connector_add(connector);
1622 1811
1623 /* Set up the DDC bus. */ 1812 /* Set up the DDC bus. */
@@ -1647,10 +1836,29 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1647 1836
1648 intel_dp_i2c_init(intel_dp, intel_connector, name); 1837 intel_dp_i2c_init(intel_dp, intel_connector, name);
1649 1838
1650 intel_encoder->ddc_bus = &intel_dp->adapter; 1839 /* Cache some DPCD data in the eDP case */
1840 if (is_edp(intel_dp)) {
1841 int ret;
1842 bool was_on;
1843
1844 was_on = ironlake_edp_panel_on(intel_dp);
1845 ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV,
1846 intel_dp->dpcd,
1847 sizeof(intel_dp->dpcd));
1848 if (ret == sizeof(intel_dp->dpcd)) {
1849 if (intel_dp->dpcd[0] >= 0x11)
1850 dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
1851 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
1852 } else {
1853 DRM_ERROR("failed to retrieve link info\n");
1854 }
1855 if (!was_on)
1856 ironlake_edp_panel_off(dev);
1857 }
1858
1651 intel_encoder->hot_plug = intel_dp_hot_plug; 1859 intel_encoder->hot_plug = intel_dp_hot_plug;
1652 1860
1653 if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { 1861 if (is_edp(intel_dp)) {
1654 /* initialize panel mode from VBT if available for eDP */ 1862 /* initialize panel mode from VBT if available for eDP */
1655 if (dev_priv->lfp_lvds_vbt_mode) { 1863 if (dev_priv->lfp_lvds_vbt_mode) {
1656 dev_priv->panel_fixed_mode = 1864 dev_priv->panel_fixed_mode =
@@ -1662,6 +1870,8 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1662 } 1870 }
1663 } 1871 }
1664 1872
1873 intel_dp_add_properties(intel_dp, connector);
1874
1665 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 1875 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
1666 * 0xd. Failure to do so will result in spurious interrupts being 1876 * 0xd. Failure to do so will result in spurious interrupts being
1667 * generated on the port when a cable is not attached. 1877 * generated on the port when a cable is not attached.
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8828b3ac6414..9af9f86a8765 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -26,14 +26,12 @@
26#define __INTEL_DRV_H__ 26#define __INTEL_DRV_H__
27 27
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/i2c-id.h>
30#include <linux/i2c-algo-bit.h>
31#include "i915_drv.h" 29#include "i915_drv.h"
32#include "drm_crtc.h" 30#include "drm_crtc.h"
33
34#include "drm_crtc_helper.h" 31#include "drm_crtc_helper.h"
32#include "drm_fb_helper.h"
35 33
36#define wait_for(COND, MS, W) ({ \ 34#define _wait_for(COND, MS, W) ({ \
37 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ 35 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
38 int ret__ = 0; \ 36 int ret__ = 0; \
39 while (! (COND)) { \ 37 while (! (COND)) { \
@@ -41,11 +39,24 @@
41 ret__ = -ETIMEDOUT; \ 39 ret__ = -ETIMEDOUT; \
42 break; \ 40 break; \
43 } \ 41 } \
44 if (W) msleep(W); \ 42 if (W && !in_dbg_master()) msleep(W); \
45 } \ 43 } \
46 ret__; \ 44 ret__; \
47}) 45})
48 46
47#define wait_for(COND, MS) _wait_for(COND, MS, 1)
48#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
49
50#define MSLEEP(x) do { \
51 if (in_dbg_master()) \
52 mdelay(x); \
53 else \
54 msleep(x); \
55} while(0)
56
57#define KHz(x) (1000*x)
58#define MHz(x) KHz(1000*x)
59
49/* 60/*
50 * Display related stuff 61 * Display related stuff
51 */ 62 */
@@ -96,24 +107,39 @@
96#define INTEL_DVO_CHIP_TMDS 2 107#define INTEL_DVO_CHIP_TMDS 2
97#define INTEL_DVO_CHIP_TVOUT 4 108#define INTEL_DVO_CHIP_TVOUT 4
98 109
99struct intel_i2c_chan { 110/* drm_display_mode->private_flags */
100 struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */ 111#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
101 u32 reg; /* GPIO reg */ 112#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
102 struct i2c_adapter adapter; 113
103 struct i2c_algo_bit_data algo; 114static inline void
104}; 115intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
116 int multiplier)
117{
118 mode->clock *= multiplier;
119 mode->private_flags |= multiplier;
120}
121
122static inline int
123intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
124{
125 return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
126}
105 127
106struct intel_framebuffer { 128struct intel_framebuffer {
107 struct drm_framebuffer base; 129 struct drm_framebuffer base;
108 struct drm_gem_object *obj; 130 struct drm_gem_object *obj;
109}; 131};
110 132
133struct intel_fbdev {
134 struct drm_fb_helper helper;
135 struct intel_framebuffer ifb;
136 struct list_head fbdev_list;
137 struct drm_display_mode *our_mode;
138};
111 139
112struct intel_encoder { 140struct intel_encoder {
113 struct drm_encoder enc; 141 struct drm_encoder base;
114 int type; 142 int type;
115 struct i2c_adapter *i2c_bus;
116 struct i2c_adapter *ddc_bus;
117 bool load_detect_temp; 143 bool load_detect_temp;
118 bool needs_tv_clock; 144 bool needs_tv_clock;
119 void (*hot_plug)(struct intel_encoder *); 145 void (*hot_plug)(struct intel_encoder *);
@@ -123,32 +149,7 @@ struct intel_encoder {
123 149
124struct intel_connector { 150struct intel_connector {
125 struct drm_connector base; 151 struct drm_connector base;
126}; 152 struct intel_encoder *encoder;
127
128struct intel_crtc;
129struct intel_overlay {
130 struct drm_device *dev;
131 struct intel_crtc *crtc;
132 struct drm_i915_gem_object *vid_bo;
133 struct drm_i915_gem_object *old_vid_bo;
134 int active;
135 int pfit_active;
136 u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
137 u32 color_key;
138 u32 brightness, contrast, saturation;
139 u32 old_xscale, old_yscale;
140 /* register access */
141 u32 flip_addr;
142 struct drm_i915_gem_object *reg_bo;
143 void *virt_addr;
144 /* flip handling */
145 uint32_t last_flip_req;
146 int hw_wedged;
147#define HW_WEDGED 1
148#define NEEDS_WAIT_FOR_FLIP 2
149#define RELEASE_OLD_VID 3
150#define SWITCH_OFF_STAGE_1 4
151#define SWITCH_OFF_STAGE_2 5
152}; 153};
153 154
154struct intel_crtc { 155struct intel_crtc {
@@ -157,6 +158,7 @@ struct intel_crtc {
157 enum plane plane; 158 enum plane plane;
158 u8 lut_r[256], lut_g[256], lut_b[256]; 159 u8 lut_r[256], lut_g[256], lut_b[256];
159 int dpms_mode; 160 int dpms_mode;
161 bool active; /* is the crtc on? independent of the dpms mode */
160 bool busy; /* is scanout buffer being updated frequently? */ 162 bool busy; /* is scanout buffer being updated frequently? */
161 struct timer_list idle_timer; 163 struct timer_list idle_timer;
162 bool lowfreq_avail; 164 bool lowfreq_avail;
@@ -168,14 +170,53 @@ struct intel_crtc {
168 uint32_t cursor_addr; 170 uint32_t cursor_addr;
169 int16_t cursor_x, cursor_y; 171 int16_t cursor_x, cursor_y;
170 int16_t cursor_width, cursor_height; 172 int16_t cursor_width, cursor_height;
171 bool cursor_visible, cursor_on; 173 bool cursor_visible;
172}; 174};
173 175
174#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 176#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
175#define to_intel_connector(x) container_of(x, struct intel_connector, base) 177#define to_intel_connector(x) container_of(x, struct intel_connector, base)
176#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) 178#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
177#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 179#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
178 180
181#define DIP_TYPE_AVI 0x82
182#define DIP_VERSION_AVI 0x2
183#define DIP_LEN_AVI 13
184
185struct dip_infoframe {
186 uint8_t type; /* HB0 */
187 uint8_t ver; /* HB1 */
188 uint8_t len; /* HB2 - body len, not including checksum */
189 uint8_t ecc; /* Header ECC */
190 uint8_t checksum; /* PB0 */
191 union {
192 struct {
193 /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
194 uint8_t Y_A_B_S;
195 /* PB2 - C 7:6, M 5:4, R 3:0 */
196 uint8_t C_M_R;
197 /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
198 uint8_t ITC_EC_Q_SC;
199 /* PB4 - VIC 6:0 */
200 uint8_t VIC;
201 /* PB5 - PR 3:0 */
202 uint8_t PR;
203 /* PB6 to PB13 */
204 uint16_t top_bar_end;
205 uint16_t bottom_bar_start;
206 uint16_t left_bar_end;
207 uint16_t right_bar_start;
208 } avi;
209 uint8_t payload[27];
210 } __attribute__ ((packed)) body;
211} __attribute__((packed));
212
213static inline struct drm_crtc *
214intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
215{
216 struct drm_i915_private *dev_priv = dev->dev_private;
217 return dev_priv->pipe_to_crtc_mapping[pipe];
218}
219
179struct intel_unpin_work { 220struct intel_unpin_work {
180 struct work_struct work; 221 struct work_struct work;
181 struct drm_device *dev; 222 struct drm_device *dev;
@@ -186,16 +227,12 @@ struct intel_unpin_work {
186 bool enable_stall_check; 227 bool enable_stall_check;
187}; 228};
188 229
189struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
190 const char *name);
191void intel_i2c_destroy(struct i2c_adapter *adapter);
192int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); 230int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
193extern bool intel_ddc_probe(struct intel_encoder *intel_encoder); 231extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
194void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
195void intel_i2c_reset_gmbus(struct drm_device *dev);
196 232
197extern void intel_crt_init(struct drm_device *dev); 233extern void intel_crt_init(struct drm_device *dev);
198extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); 234extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
235void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
199extern bool intel_sdvo_init(struct drm_device *dev, int output_device); 236extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
200extern void intel_dvo_init(struct drm_device *dev); 237extern void intel_dvo_init(struct drm_device *dev);
201extern void intel_tv_init(struct drm_device *dev); 238extern void intel_tv_init(struct drm_device *dev);
@@ -205,32 +242,41 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
205void 242void
206intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 243intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
207 struct drm_display_mode *adjusted_mode); 244 struct drm_display_mode *adjusted_mode);
208extern bool intel_pch_has_edp(struct drm_crtc *crtc);
209extern bool intel_dpd_is_edp(struct drm_device *dev); 245extern bool intel_dpd_is_edp(struct drm_device *dev);
210extern void intel_edp_link_config (struct intel_encoder *, int *, int *); 246extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
247extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
211 248
212 249/* intel_panel.c */
213extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 250extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
214 struct drm_display_mode *adjusted_mode); 251 struct drm_display_mode *adjusted_mode);
215extern void intel_pch_panel_fitting(struct drm_device *dev, 252extern void intel_pch_panel_fitting(struct drm_device *dev,
216 int fitting_mode, 253 int fitting_mode,
217 struct drm_display_mode *mode, 254 struct drm_display_mode *mode,
218 struct drm_display_mode *adjusted_mode); 255 struct drm_display_mode *adjusted_mode);
256extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
257extern u32 intel_panel_get_backlight(struct drm_device *dev);
258extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
219 259
220extern int intel_panel_fitter_pipe (struct drm_device *dev);
221extern void intel_crtc_load_lut(struct drm_crtc *crtc); 260extern void intel_crtc_load_lut(struct drm_crtc *crtc);
222extern void intel_encoder_prepare (struct drm_encoder *encoder); 261extern void intel_encoder_prepare (struct drm_encoder *encoder);
223extern void intel_encoder_commit (struct drm_encoder *encoder); 262extern void intel_encoder_commit (struct drm_encoder *encoder);
224extern void intel_encoder_destroy(struct drm_encoder *encoder); 263extern void intel_encoder_destroy(struct drm_encoder *encoder);
225 264
226extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector); 265static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
266{
267 return to_intel_connector(connector)->encoder;
268}
269
270extern void intel_connector_attach_encoder(struct intel_connector *connector,
271 struct intel_encoder *encoder);
272extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
227 273
228extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 274extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
229 struct drm_crtc *crtc); 275 struct drm_crtc *crtc);
230int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 276int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
231 struct drm_file *file_priv); 277 struct drm_file *file_priv);
232extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 278extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
233extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); 279extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
234extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 280extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
235 struct drm_connector *connector, 281 struct drm_connector *connector,
236 struct drm_display_mode *mode, 282 struct drm_display_mode *mode,
@@ -252,7 +298,8 @@ extern void ironlake_enable_drps(struct drm_device *dev);
252extern void ironlake_disable_drps(struct drm_device *dev); 298extern void ironlake_disable_drps(struct drm_device *dev);
253 299
254extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, 300extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
255 struct drm_gem_object *obj); 301 struct drm_gem_object *obj,
302 bool pipelined);
256 303
257extern int intel_framebuffer_init(struct drm_device *dev, 304extern int intel_framebuffer_init(struct drm_device *dev,
258 struct intel_framebuffer *ifb, 305 struct intel_framebuffer *ifb,
@@ -267,9 +314,8 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
267 314
268extern void intel_setup_overlay(struct drm_device *dev); 315extern void intel_setup_overlay(struct drm_device *dev);
269extern void intel_cleanup_overlay(struct drm_device *dev); 316extern void intel_cleanup_overlay(struct drm_device *dev);
270extern int intel_overlay_switch_off(struct intel_overlay *overlay); 317extern int intel_overlay_switch_off(struct intel_overlay *overlay,
271extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, 318 bool interruptible);
272 int interruptible);
273extern int intel_overlay_put_image(struct drm_device *dev, void *data, 319extern int intel_overlay_put_image(struct drm_device *dev, void *data,
274 struct drm_file *file_priv); 320 struct drm_file *file_priv);
275extern int intel_overlay_attrs(struct drm_device *dev, void *data, 321extern int intel_overlay_attrs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 7c9ec1472d46..ea373283c93b 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -72,7 +72,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
72 .name = "ch7017", 72 .name = "ch7017",
73 .dvo_reg = DVOC, 73 .dvo_reg = DVOC,
74 .slave_addr = 0x75, 74 .slave_addr = 0x75,
75 .gpio = GPIOE, 75 .gpio = GMBUS_PORT_DPB,
76 .dev_ops = &ch7017_ops, 76 .dev_ops = &ch7017_ops,
77 } 77 }
78}; 78};
@@ -88,7 +88,13 @@ struct intel_dvo {
88 88
89static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder) 89static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
90{ 90{
91 return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base); 91 return container_of(encoder, struct intel_dvo, base.base);
92}
93
94static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
95{
96 return container_of(intel_attached_encoder(connector),
97 struct intel_dvo, base);
92} 98}
93 99
94static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) 100static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
@@ -112,8 +118,7 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
112static int intel_dvo_mode_valid(struct drm_connector *connector, 118static int intel_dvo_mode_valid(struct drm_connector *connector,
113 struct drm_display_mode *mode) 119 struct drm_display_mode *mode)
114{ 120{
115 struct drm_encoder *encoder = intel_attached_encoder(connector); 121 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
116 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
117 122
118 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 123 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
119 return MODE_NO_DBLESCAN; 124 return MODE_NO_DBLESCAN;
@@ -224,23 +229,22 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
224static enum drm_connector_status 229static enum drm_connector_status
225intel_dvo_detect(struct drm_connector *connector, bool force) 230intel_dvo_detect(struct drm_connector *connector, bool force)
226{ 231{
227 struct drm_encoder *encoder = intel_attached_encoder(connector); 232 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
228 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
229
230 return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); 233 return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
231} 234}
232 235
233static int intel_dvo_get_modes(struct drm_connector *connector) 236static int intel_dvo_get_modes(struct drm_connector *connector)
234{ 237{
235 struct drm_encoder *encoder = intel_attached_encoder(connector); 238 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
236 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); 239 struct drm_i915_private *dev_priv = connector->dev->dev_private;
237 240
238 /* We should probably have an i2c driver get_modes function for those 241 /* We should probably have an i2c driver get_modes function for those
239 * devices which will have a fixed set of modes determined by the chip 242 * devices which will have a fixed set of modes determined by the chip
240 * (TV-out, for example), but for now with just TMDS and LVDS, 243 * (TV-out, for example), but for now with just TMDS and LVDS,
241 * that's not the case. 244 * that's not the case.
242 */ 245 */
243 intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus); 246 intel_ddc_get_modes(connector,
247 &dev_priv->gmbus[GMBUS_PORT_DPC].adapter);
244 if (!list_empty(&connector->probed_modes)) 248 if (!list_empty(&connector->probed_modes))
245 return 1; 249 return 1;
246 250
@@ -281,7 +285,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
281static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { 285static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
282 .mode_valid = intel_dvo_mode_valid, 286 .mode_valid = intel_dvo_mode_valid,
283 .get_modes = intel_dvo_get_modes, 287 .get_modes = intel_dvo_get_modes,
284 .best_encoder = intel_attached_encoder, 288 .best_encoder = intel_best_encoder,
285}; 289};
286 290
287static void intel_dvo_enc_destroy(struct drm_encoder *encoder) 291static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
@@ -311,8 +315,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
311{ 315{
312 struct drm_device *dev = connector->dev; 316 struct drm_device *dev = connector->dev;
313 struct drm_i915_private *dev_priv = dev->dev_private; 317 struct drm_i915_private *dev_priv = dev->dev_private;
314 struct drm_encoder *encoder = intel_attached_encoder(connector); 318 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
315 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
316 uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg); 319 uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
317 struct drm_display_mode *mode = NULL; 320 struct drm_display_mode *mode = NULL;
318 321
@@ -323,7 +326,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
323 struct drm_crtc *crtc; 326 struct drm_crtc *crtc;
324 int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0; 327 int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
325 328
326 crtc = intel_get_crtc_from_pipe(dev, pipe); 329 crtc = intel_get_crtc_for_pipe(dev, pipe);
327 if (crtc) { 330 if (crtc) {
328 mode = intel_crtc_mode_get(dev, crtc); 331 mode = intel_crtc_mode_get(dev, crtc);
329 if (mode) { 332 if (mode) {
@@ -341,11 +344,10 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
341 344
342void intel_dvo_init(struct drm_device *dev) 345void intel_dvo_init(struct drm_device *dev)
343{ 346{
347 struct drm_i915_private *dev_priv = dev->dev_private;
344 struct intel_encoder *intel_encoder; 348 struct intel_encoder *intel_encoder;
345 struct intel_dvo *intel_dvo; 349 struct intel_dvo *intel_dvo;
346 struct intel_connector *intel_connector; 350 struct intel_connector *intel_connector;
347 struct i2c_adapter *i2cbus = NULL;
348 int ret = 0;
349 int i; 351 int i;
350 int encoder_type = DRM_MODE_ENCODER_NONE; 352 int encoder_type = DRM_MODE_ENCODER_NONE;
351 353
@@ -360,16 +362,14 @@ void intel_dvo_init(struct drm_device *dev)
360 } 362 }
361 363
362 intel_encoder = &intel_dvo->base; 364 intel_encoder = &intel_dvo->base;
363 365 drm_encoder_init(dev, &intel_encoder->base,
364 /* Set up the DDC bus */ 366 &intel_dvo_enc_funcs, encoder_type);
365 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
366 if (!intel_encoder->ddc_bus)
367 goto free_intel;
368 367
369 /* Now, try to find a controller */ 368 /* Now, try to find a controller */
370 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { 369 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
371 struct drm_connector *connector = &intel_connector->base; 370 struct drm_connector *connector = &intel_connector->base;
372 const struct intel_dvo_device *dvo = &intel_dvo_devices[i]; 371 const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
372 struct i2c_adapter *i2c;
373 int gpio; 373 int gpio;
374 374
375 /* Allow the I2C driver info to specify the GPIO to be used in 375 /* Allow the I2C driver info to specify the GPIO to be used in
@@ -379,24 +379,18 @@ void intel_dvo_init(struct drm_device *dev)
379 if (dvo->gpio != 0) 379 if (dvo->gpio != 0)
380 gpio = dvo->gpio; 380 gpio = dvo->gpio;
381 else if (dvo->type == INTEL_DVO_CHIP_LVDS) 381 else if (dvo->type == INTEL_DVO_CHIP_LVDS)
382 gpio = GPIOB; 382 gpio = GMBUS_PORT_SSC;
383 else 383 else
384 gpio = GPIOE; 384 gpio = GMBUS_PORT_DPB;
385 385
386 /* Set up the I2C bus necessary for the chip we're probing. 386 /* Set up the I2C bus necessary for the chip we're probing.
387 * It appears that everything is on GPIOE except for panels 387 * It appears that everything is on GPIOE except for panels
388 * on i830 laptops, which are on GPIOB (DVOA). 388 * on i830 laptops, which are on GPIOB (DVOA).
389 */ 389 */
390 if (i2cbus != NULL) 390 i2c = &dev_priv->gmbus[gpio].adapter;
391 intel_i2c_destroy(i2cbus);
392 if (!(i2cbus = intel_i2c_create(dev, gpio,
393 gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
394 continue;
395 }
396 391
397 intel_dvo->dev = *dvo; 392 intel_dvo->dev = *dvo;
398 ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus); 393 if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
399 if (!ret)
400 continue; 394 continue;
401 395
402 intel_encoder->type = INTEL_OUTPUT_DVO; 396 intel_encoder->type = INTEL_OUTPUT_DVO;
@@ -427,13 +421,10 @@ void intel_dvo_init(struct drm_device *dev)
427 connector->interlace_allowed = false; 421 connector->interlace_allowed = false;
428 connector->doublescan_allowed = false; 422 connector->doublescan_allowed = false;
429 423
430 drm_encoder_init(dev, &intel_encoder->enc, 424 drm_encoder_helper_add(&intel_encoder->base,
431 &intel_dvo_enc_funcs, encoder_type);
432 drm_encoder_helper_add(&intel_encoder->enc,
433 &intel_dvo_helper_funcs); 425 &intel_dvo_helper_funcs);
434 426
435 drm_mode_connector_attach_encoder(&intel_connector->base, 427 intel_connector_attach_encoder(intel_connector, intel_encoder);
436 &intel_encoder->enc);
437 if (dvo->type == INTEL_DVO_CHIP_LVDS) { 428 if (dvo->type == INTEL_DVO_CHIP_LVDS) {
438 /* For our LVDS chipsets, we should hopefully be able 429 /* For our LVDS chipsets, we should hopefully be able
439 * to dig the fixed panel mode out of the BIOS data. 430 * to dig the fixed panel mode out of the BIOS data.
@@ -451,11 +442,7 @@ void intel_dvo_init(struct drm_device *dev)
451 return; 442 return;
452 } 443 }
453 444
454 intel_i2c_destroy(intel_encoder->ddc_bus); 445 drm_encoder_cleanup(&intel_encoder->base);
455 /* Didn't find a chip, so tear down. */
456 if (i2cbus != NULL)
457 intel_i2c_destroy(i2cbus);
458free_intel:
459 kfree(intel_dvo); 446 kfree(intel_dvo);
460 kfree(intel_connector); 447 kfree(intel_connector);
461} 448}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index b61966c126d3..af2a1dddc28e 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -44,13 +44,6 @@
44#include "i915_drm.h" 44#include "i915_drm.h"
45#include "i915_drv.h" 45#include "i915_drv.h"
46 46
47struct intel_fbdev {
48 struct drm_fb_helper helper;
49 struct intel_framebuffer ifb;
50 struct list_head fbdev_list;
51 struct drm_display_mode *our_mode;
52};
53
54static struct fb_ops intelfb_ops = { 47static struct fb_ops intelfb_ops = {
55 .owner = THIS_MODULE, 48 .owner = THIS_MODULE,
56 .fb_check_var = drm_fb_helper_check_var, 49 .fb_check_var = drm_fb_helper_check_var,
@@ -75,7 +68,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
75 struct drm_gem_object *fbo = NULL; 68 struct drm_gem_object *fbo = NULL;
76 struct drm_i915_gem_object *obj_priv; 69 struct drm_i915_gem_object *obj_priv;
77 struct device *device = &dev->pdev->dev; 70 struct device *device = &dev->pdev->dev;
78 int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; 71 int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
79 72
80 /* we don't do packed 24bpp */ 73 /* we don't do packed 24bpp */
81 if (sizes->surface_bpp == 24) 74 if (sizes->surface_bpp == 24)
@@ -100,19 +93,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
100 93
101 mutex_lock(&dev->struct_mutex); 94 mutex_lock(&dev->struct_mutex);
102 95
103 ret = intel_pin_and_fence_fb_obj(dev, fbo); 96 /* Flush everything out, we'll be doing GTT only from now on */
97 ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
104 if (ret) { 98 if (ret) {
105 DRM_ERROR("failed to pin fb: %d\n", ret); 99 DRM_ERROR("failed to pin fb: %d\n", ret);
106 goto out_unref; 100 goto out_unref;
107 } 101 }
108 102
109 /* Flush everything out, we'll be doing GTT only from now on */
110 ret = i915_gem_object_set_to_gtt_domain(fbo, 1);
111 if (ret) {
112 DRM_ERROR("failed to bind fb: %d.\n", ret);
113 goto out_unpin;
114 }
115
116 info = framebuffer_alloc(0, device); 103 info = framebuffer_alloc(0, device);
117 if (!info) { 104 if (!info) {
118 ret = -ENOMEM; 105 ret = -ENOMEM;
@@ -142,7 +129,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
142 goto out_unpin; 129 goto out_unpin;
143 } 130 }
144 info->apertures->ranges[0].base = dev->mode_config.fb_base; 131 info->apertures->ranges[0].base = dev->mode_config.fb_base;
145 if (IS_I9XX(dev)) 132 if (!IS_GEN2(dev))
146 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2); 133 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
147 else 134 else
148 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); 135 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
@@ -219,8 +206,8 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
219 .fb_probe = intel_fb_find_or_create_single, 206 .fb_probe = intel_fb_find_or_create_single,
220}; 207};
221 208
222int intel_fbdev_destroy(struct drm_device *dev, 209static void intel_fbdev_destroy(struct drm_device *dev,
223 struct intel_fbdev *ifbdev) 210 struct intel_fbdev *ifbdev)
224{ 211{
225 struct fb_info *info; 212 struct fb_info *info;
226 struct intel_framebuffer *ifb = &ifbdev->ifb; 213 struct intel_framebuffer *ifb = &ifbdev->ifb;
@@ -238,11 +225,9 @@ int intel_fbdev_destroy(struct drm_device *dev,
238 225
239 drm_framebuffer_cleanup(&ifb->base); 226 drm_framebuffer_cleanup(&ifb->base);
240 if (ifb->obj) { 227 if (ifb->obj) {
241 drm_gem_object_unreference(ifb->obj); 228 drm_gem_object_unreference_unlocked(ifb->obj);
242 ifb->obj = NULL; 229 ifb->obj = NULL;
243 } 230 }
244
245 return 0;
246} 231}
247 232
248int intel_fbdev_init(struct drm_device *dev) 233int intel_fbdev_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 926934a482ec..0d0273e7b029 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -40,12 +40,76 @@
40struct intel_hdmi { 40struct intel_hdmi {
41 struct intel_encoder base; 41 struct intel_encoder base;
42 u32 sdvox_reg; 42 u32 sdvox_reg;
43 int ddc_bus;
43 bool has_hdmi_sink; 44 bool has_hdmi_sink;
45 bool has_audio;
46 int force_audio;
47 struct drm_property *force_audio_property;
44}; 48};
45 49
46static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) 50static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
47{ 51{
48 return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base); 52 return container_of(encoder, struct intel_hdmi, base.base);
53}
54
55static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
56{
57 return container_of(intel_attached_encoder(connector),
58 struct intel_hdmi, base);
59}
60
61void intel_dip_infoframe_csum(struct dip_infoframe *avi_if)
62{
63 uint8_t *data = (uint8_t *)avi_if;
64 uint8_t sum = 0;
65 unsigned i;
66
67 avi_if->checksum = 0;
68 avi_if->ecc = 0;
69
70 for (i = 0; i < sizeof(*avi_if); i++)
71 sum += data[i];
72
73 avi_if->checksum = 0x100 - sum;
74}
75
76static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
77{
78 struct dip_infoframe avi_if = {
79 .type = DIP_TYPE_AVI,
80 .ver = DIP_VERSION_AVI,
81 .len = DIP_LEN_AVI,
82 };
83 uint32_t *data = (uint32_t *)&avi_if;
84 struct drm_device *dev = encoder->dev;
85 struct drm_i915_private *dev_priv = dev->dev_private;
86 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
87 u32 port;
88 unsigned i;
89
90 if (!intel_hdmi->has_hdmi_sink)
91 return;
92
93 /* XXX first guess at handling video port, is this corrent? */
94 if (intel_hdmi->sdvox_reg == SDVOB)
95 port = VIDEO_DIP_PORT_B;
96 else if (intel_hdmi->sdvox_reg == SDVOC)
97 port = VIDEO_DIP_PORT_C;
98 else
99 return;
100
101 I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
102 VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC);
103
104 intel_dip_infoframe_csum(&avi_if);
105 for (i = 0; i < sizeof(avi_if); i += 4) {
106 I915_WRITE(VIDEO_DIP_DATA, *data);
107 data++;
108 }
109
110 I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
111 VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC |
112 VIDEO_DIP_ENABLE_AVI);
49} 113}
50 114
51static void intel_hdmi_mode_set(struct drm_encoder *encoder, 115static void intel_hdmi_mode_set(struct drm_encoder *encoder,
@@ -65,10 +129,13 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
65 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 129 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
66 sdvox |= SDVO_HSYNC_ACTIVE_HIGH; 130 sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
67 131
68 if (intel_hdmi->has_hdmi_sink) { 132 /* Required on CPT */
133 if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
134 sdvox |= HDMI_MODE_SELECT;
135
136 if (intel_hdmi->has_audio) {
69 sdvox |= SDVO_AUDIO_ENABLE; 137 sdvox |= SDVO_AUDIO_ENABLE;
70 if (HAS_PCH_CPT(dev)) 138 sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
71 sdvox |= HDMI_MODE_SELECT;
72 } 139 }
73 140
74 if (intel_crtc->pipe == 1) { 141 if (intel_crtc->pipe == 1) {
@@ -80,6 +147,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
80 147
81 I915_WRITE(intel_hdmi->sdvox_reg, sdvox); 148 I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
82 POSTING_READ(intel_hdmi->sdvox_reg); 149 POSTING_READ(intel_hdmi->sdvox_reg);
150
151 intel_hdmi_set_avi_infoframe(encoder);
83} 152}
84 153
85static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) 154static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
@@ -141,36 +210,85 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
141static enum drm_connector_status 210static enum drm_connector_status
142intel_hdmi_detect(struct drm_connector *connector, bool force) 211intel_hdmi_detect(struct drm_connector *connector, bool force)
143{ 212{
144 struct drm_encoder *encoder = intel_attached_encoder(connector); 213 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
145 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 214 struct drm_i915_private *dev_priv = connector->dev->dev_private;
146 struct edid *edid = NULL; 215 struct edid *edid;
147 enum drm_connector_status status = connector_status_disconnected; 216 enum drm_connector_status status = connector_status_disconnected;
148 217
149 intel_hdmi->has_hdmi_sink = false; 218 intel_hdmi->has_hdmi_sink = false;
150 edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus); 219 intel_hdmi->has_audio = false;
220 edid = drm_get_edid(connector,
221 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
151 222
152 if (edid) { 223 if (edid) {
153 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 224 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
154 status = connector_status_connected; 225 status = connector_status_connected;
155 intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 226 intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
227 intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
156 } 228 }
157 connector->display_info.raw_edid = NULL; 229 connector->display_info.raw_edid = NULL;
158 kfree(edid); 230 kfree(edid);
159 } 231 }
160 232
233 if (status == connector_status_connected) {
234 if (intel_hdmi->force_audio)
235 intel_hdmi->has_audio = intel_hdmi->force_audio > 0;
236 }
237
161 return status; 238 return status;
162} 239}
163 240
164static int intel_hdmi_get_modes(struct drm_connector *connector) 241static int intel_hdmi_get_modes(struct drm_connector *connector)
165{ 242{
166 struct drm_encoder *encoder = intel_attached_encoder(connector); 243 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
167 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 244 struct drm_i915_private *dev_priv = connector->dev->dev_private;
168 245
169 /* We should parse the EDID data and find out if it's an HDMI sink so 246 /* We should parse the EDID data and find out if it's an HDMI sink so
170 * we can send audio to it. 247 * we can send audio to it.
171 */ 248 */
172 249
173 return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus); 250 return intel_ddc_get_modes(connector,
251 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
252}
253
254static int
255intel_hdmi_set_property(struct drm_connector *connector,
256 struct drm_property *property,
257 uint64_t val)
258{
259 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
260 int ret;
261
262 ret = drm_connector_property_set_value(connector, property, val);
263 if (ret)
264 return ret;
265
266 if (property == intel_hdmi->force_audio_property) {
267 if (val == intel_hdmi->force_audio)
268 return 0;
269
270 intel_hdmi->force_audio = val;
271
272 if (val > 0 && intel_hdmi->has_audio)
273 return 0;
274 if (val < 0 && !intel_hdmi->has_audio)
275 return 0;
276
277 intel_hdmi->has_audio = val > 0;
278 goto done;
279 }
280
281 return -EINVAL;
282
283done:
284 if (intel_hdmi->base.base.crtc) {
285 struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
286 drm_crtc_helper_set_mode(crtc, &crtc->mode,
287 crtc->x, crtc->y,
288 crtc->fb);
289 }
290
291 return 0;
174} 292}
175 293
176static void intel_hdmi_destroy(struct drm_connector *connector) 294static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -192,19 +310,34 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
192 .dpms = drm_helper_connector_dpms, 310 .dpms = drm_helper_connector_dpms,
193 .detect = intel_hdmi_detect, 311 .detect = intel_hdmi_detect,
194 .fill_modes = drm_helper_probe_single_connector_modes, 312 .fill_modes = drm_helper_probe_single_connector_modes,
313 .set_property = intel_hdmi_set_property,
195 .destroy = intel_hdmi_destroy, 314 .destroy = intel_hdmi_destroy,
196}; 315};
197 316
198static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { 317static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
199 .get_modes = intel_hdmi_get_modes, 318 .get_modes = intel_hdmi_get_modes,
200 .mode_valid = intel_hdmi_mode_valid, 319 .mode_valid = intel_hdmi_mode_valid,
201 .best_encoder = intel_attached_encoder, 320 .best_encoder = intel_best_encoder,
202}; 321};
203 322
204static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { 323static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
205 .destroy = intel_encoder_destroy, 324 .destroy = intel_encoder_destroy,
206}; 325};
207 326
327static void
328intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
329{
330 struct drm_device *dev = connector->dev;
331
332 intel_hdmi->force_audio_property =
333 drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
334 if (intel_hdmi->force_audio_property) {
335 intel_hdmi->force_audio_property->values[0] = -1;
336 intel_hdmi->force_audio_property->values[1] = 1;
337 drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
338 }
339}
340
208void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) 341void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
209{ 342{
210 struct drm_i915_private *dev_priv = dev->dev_private; 343 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -224,6 +357,9 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
224 } 357 }
225 358
226 intel_encoder = &intel_hdmi->base; 359 intel_encoder = &intel_hdmi->base;
360 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
361 DRM_MODE_ENCODER_TMDS);
362
227 connector = &intel_connector->base; 363 connector = &intel_connector->base;
228 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, 364 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
229 DRM_MODE_CONNECTOR_HDMIA); 365 DRM_MODE_CONNECTOR_HDMIA);
@@ -239,39 +375,33 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
239 /* Set up the DDC bus. */ 375 /* Set up the DDC bus. */
240 if (sdvox_reg == SDVOB) { 376 if (sdvox_reg == SDVOB) {
241 intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); 377 intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
242 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); 378 intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
243 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 379 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
244 } else if (sdvox_reg == SDVOC) { 380 } else if (sdvox_reg == SDVOC) {
245 intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); 381 intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
246 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); 382 intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
247 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; 383 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
248 } else if (sdvox_reg == HDMIB) { 384 } else if (sdvox_reg == HDMIB) {
249 intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); 385 intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
250 intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, 386 intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
251 "HDMIB");
252 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 387 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
253 } else if (sdvox_reg == HDMIC) { 388 } else if (sdvox_reg == HDMIC) {
254 intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); 389 intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
255 intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, 390 intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
256 "HDMIC");
257 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; 391 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
258 } else if (sdvox_reg == HDMID) { 392 } else if (sdvox_reg == HDMID) {
259 intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); 393 intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
260 intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, 394 intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
261 "HDMID");
262 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; 395 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
263 } 396 }
264 if (!intel_encoder->ddc_bus)
265 goto err_connector;
266 397
267 intel_hdmi->sdvox_reg = sdvox_reg; 398 intel_hdmi->sdvox_reg = sdvox_reg;
268 399
269 drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs, 400 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
270 DRM_MODE_ENCODER_TMDS); 401
271 drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs); 402 intel_hdmi_add_properties(intel_hdmi, connector);
272 403
273 drm_mode_connector_attach_encoder(&intel_connector->base, 404 intel_connector_attach_encoder(intel_connector, intel_encoder);
274 &intel_encoder->enc);
275 drm_sysfs_connector_add(connector); 405 drm_sysfs_connector_add(connector);
276 406
277 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 407 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -282,13 +412,4 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
282 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 412 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
283 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 413 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
284 } 414 }
285
286 return;
287
288err_connector:
289 drm_connector_cleanup(connector);
290 kfree(intel_hdmi);
291 kfree(intel_connector);
292
293 return;
294} 415}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c2649c7df14c..2be4f728ed0c 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2006 Dave Airlie <airlied@linux.ie> 2 * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
3 * Copyright © 2006-2008 Intel Corporation 3 * Copyright © 2006-2008,2010 Intel Corporation
4 * Jesse Barnes <jesse.barnes@intel.com> 4 * Jesse Barnes <jesse.barnes@intel.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -24,10 +24,9 @@
24 * 24 *
25 * Authors: 25 * Authors:
26 * Eric Anholt <eric@anholt.net> 26 * Eric Anholt <eric@anholt.net>
27 * Chris Wilson <chris@chris-wilson.co.uk>
27 */ 28 */
28#include <linux/i2c.h> 29#include <linux/i2c.h>
29#include <linux/slab.h>
30#include <linux/i2c-id.h>
31#include <linux/i2c-algo-bit.h> 30#include <linux/i2c-algo-bit.h>
32#include "drmP.h" 31#include "drmP.h"
33#include "drm.h" 32#include "drm.h"
@@ -35,79 +34,106 @@
35#include "i915_drm.h" 34#include "i915_drm.h"
36#include "i915_drv.h" 35#include "i915_drv.h"
37 36
38void intel_i2c_quirk_set(struct drm_device *dev, bool enable) 37/* Intel GPIO access functions */
38
39#define I2C_RISEFALL_TIME 20
40
41static inline struct intel_gmbus *
42to_intel_gmbus(struct i2c_adapter *i2c)
43{
44 return container_of(i2c, struct intel_gmbus, adapter);
45}
46
47struct intel_gpio {
48 struct i2c_adapter adapter;
49 struct i2c_algo_bit_data algo;
50 struct drm_i915_private *dev_priv;
51 u32 reg;
52};
53
54void
55intel_i2c_reset(struct drm_device *dev)
39{ 56{
40 struct drm_i915_private *dev_priv = dev->dev_private; 57 struct drm_i915_private *dev_priv = dev->dev_private;
58 if (HAS_PCH_SPLIT(dev))
59 I915_WRITE(PCH_GMBUS0, 0);
60 else
61 I915_WRITE(GMBUS0, 0);
62}
63
64static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
65{
66 u32 val;
41 67
42 /* When using bit bashing for I2C, this bit needs to be set to 1 */ 68 /* When using bit bashing for I2C, this bit needs to be set to 1 */
43 if (!IS_PINEVIEW(dev)) 69 if (!IS_PINEVIEW(dev_priv->dev))
44 return; 70 return;
71
72 val = I915_READ(DSPCLK_GATE_D);
45 if (enable) 73 if (enable)
46 I915_WRITE(DSPCLK_GATE_D, 74 val |= DPCUNIT_CLOCK_GATE_DISABLE;
47 I915_READ(DSPCLK_GATE_D) | DPCUNIT_CLOCK_GATE_DISABLE);
48 else 75 else
49 I915_WRITE(DSPCLK_GATE_D, 76 val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
50 I915_READ(DSPCLK_GATE_D) & (~DPCUNIT_CLOCK_GATE_DISABLE)); 77 I915_WRITE(DSPCLK_GATE_D, val);
51} 78}
52 79
53/* 80static u32 get_reserved(struct intel_gpio *gpio)
54 * Intel GPIO access functions 81{
55 */ 82 struct drm_i915_private *dev_priv = gpio->dev_priv;
83 struct drm_device *dev = dev_priv->dev;
84 u32 reserved = 0;
56 85
57#define I2C_RISEFALL_TIME 20 86 /* On most chips, these bits must be preserved in software. */
87 if (!IS_I830(dev) && !IS_845G(dev))
88 reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE |
89 GPIO_CLOCK_PULLUP_DISABLE);
90
91 return reserved;
92}
58 93
59static int get_clock(void *data) 94static int get_clock(void *data)
60{ 95{
61 struct intel_i2c_chan *chan = data; 96 struct intel_gpio *gpio = data;
62 struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; 97 struct drm_i915_private *dev_priv = gpio->dev_priv;
63 u32 val; 98 u32 reserved = get_reserved(gpio);
64 99 I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
65 val = I915_READ(chan->reg); 100 I915_WRITE(gpio->reg, reserved);
66 return ((val & GPIO_CLOCK_VAL_IN) != 0); 101 return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
67} 102}
68 103
69static int get_data(void *data) 104static int get_data(void *data)
70{ 105{
71 struct intel_i2c_chan *chan = data; 106 struct intel_gpio *gpio = data;
72 struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; 107 struct drm_i915_private *dev_priv = gpio->dev_priv;
73 u32 val; 108 u32 reserved = get_reserved(gpio);
74 109 I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
75 val = I915_READ(chan->reg); 110 I915_WRITE(gpio->reg, reserved);
76 return ((val & GPIO_DATA_VAL_IN) != 0); 111 return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
77} 112}
78 113
79static void set_clock(void *data, int state_high) 114static void set_clock(void *data, int state_high)
80{ 115{
81 struct intel_i2c_chan *chan = data; 116 struct intel_gpio *gpio = data;
82 struct drm_device *dev = chan->drm_dev; 117 struct drm_i915_private *dev_priv = gpio->dev_priv;
83 struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; 118 u32 reserved = get_reserved(gpio);
84 u32 reserved = 0, clock_bits; 119 u32 clock_bits;
85
86 /* On most chips, these bits must be preserved in software. */
87 if (!IS_I830(dev) && !IS_845G(dev))
88 reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
89 GPIO_CLOCK_PULLUP_DISABLE);
90 120
91 if (state_high) 121 if (state_high)
92 clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK; 122 clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
93 else 123 else
94 clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | 124 clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
95 GPIO_CLOCK_VAL_MASK; 125 GPIO_CLOCK_VAL_MASK;
96 I915_WRITE(chan->reg, reserved | clock_bits); 126
97 udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ 127 I915_WRITE(gpio->reg, reserved | clock_bits);
128 POSTING_READ(gpio->reg);
98} 129}
99 130
100static void set_data(void *data, int state_high) 131static void set_data(void *data, int state_high)
101{ 132{
102 struct intel_i2c_chan *chan = data; 133 struct intel_gpio *gpio = data;
103 struct drm_device *dev = chan->drm_dev; 134 struct drm_i915_private *dev_priv = gpio->dev_priv;
104 struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; 135 u32 reserved = get_reserved(gpio);
105 u32 reserved = 0, data_bits; 136 u32 data_bits;
106
107 /* On most chips, these bits must be preserved in software. */
108 if (!IS_I830(dev) && !IS_845G(dev))
109 reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
110 GPIO_CLOCK_PULLUP_DISABLE);
111 137
112 if (state_high) 138 if (state_high)
113 data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK; 139 data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
@@ -115,109 +141,313 @@ static void set_data(void *data, int state_high)
115 data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | 141 data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
116 GPIO_DATA_VAL_MASK; 142 GPIO_DATA_VAL_MASK;
117 143
118 I915_WRITE(chan->reg, reserved | data_bits); 144 I915_WRITE(gpio->reg, reserved | data_bits);
119 udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ 145 POSTING_READ(gpio->reg);
120} 146}
121 147
122/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C 148static struct i2c_adapter *
123 * engine, but if the BIOS leaves it enabled, then that can break our use 149intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
124 * of the bit-banging I2C interfaces. This is notably the case with the
125 * Mac Mini in EFI mode.
126 */
127void
128intel_i2c_reset_gmbus(struct drm_device *dev)
129{ 150{
130 struct drm_i915_private *dev_priv = dev->dev_private; 151 static const int map_pin_to_reg[] = {
152 0,
153 GPIOB,
154 GPIOA,
155 GPIOC,
156 GPIOD,
157 GPIOE,
158 0,
159 GPIOF,
160 };
161 struct intel_gpio *gpio;
131 162
132 if (HAS_PCH_SPLIT(dev)) { 163 if (pin < 1 || pin > 7)
133 I915_WRITE(PCH_GMBUS0, 0); 164 return NULL;
134 } else { 165
135 I915_WRITE(GMBUS0, 0); 166 gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
167 if (gpio == NULL)
168 return NULL;
169
170 gpio->reg = map_pin_to_reg[pin];
171 if (HAS_PCH_SPLIT(dev_priv->dev))
172 gpio->reg += PCH_GPIOA - GPIOA;
173 gpio->dev_priv = dev_priv;
174
175 snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]);
176 gpio->adapter.owner = THIS_MODULE;
177 gpio->adapter.algo_data = &gpio->algo;
178 gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
179 gpio->algo.setsda = set_data;
180 gpio->algo.setscl = set_clock;
181 gpio->algo.getsda = get_data;
182 gpio->algo.getscl = get_clock;
183 gpio->algo.udelay = I2C_RISEFALL_TIME;
184 gpio->algo.timeout = usecs_to_jiffies(2200);
185 gpio->algo.data = gpio;
186
187 if (i2c_bit_add_bus(&gpio->adapter))
188 goto out_free;
189
190 return &gpio->adapter;
191
192out_free:
193 kfree(gpio);
194 return NULL;
195}
196
197static int
198intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv,
199 struct i2c_adapter *adapter,
200 struct i2c_msg *msgs,
201 int num)
202{
203 struct intel_gpio *gpio = container_of(adapter,
204 struct intel_gpio,
205 adapter);
206 int ret;
207
208 intel_i2c_reset(dev_priv->dev);
209
210 intel_i2c_quirk_set(dev_priv, true);
211 set_data(gpio, 1);
212 set_clock(gpio, 1);
213 udelay(I2C_RISEFALL_TIME);
214
215 ret = adapter->algo->master_xfer(adapter, msgs, num);
216
217 set_data(gpio, 1);
218 set_clock(gpio, 1);
219 intel_i2c_quirk_set(dev_priv, false);
220
221 return ret;
222}
223
224static int
225gmbus_xfer(struct i2c_adapter *adapter,
226 struct i2c_msg *msgs,
227 int num)
228{
229 struct intel_gmbus *bus = container_of(adapter,
230 struct intel_gmbus,
231 adapter);
232 struct drm_i915_private *dev_priv = adapter->algo_data;
233 int i, reg_offset;
234
235 if (bus->force_bit)
236 return intel_i2c_quirk_xfer(dev_priv,
237 bus->force_bit, msgs, num);
238
239 reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
240
241 I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
242
243 for (i = 0; i < num; i++) {
244 u16 len = msgs[i].len;
245 u8 *buf = msgs[i].buf;
246
247 if (msgs[i].flags & I2C_M_RD) {
248 I915_WRITE(GMBUS1 + reg_offset,
249 GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
250 (len << GMBUS_BYTE_COUNT_SHIFT) |
251 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
252 GMBUS_SLAVE_READ | GMBUS_SW_RDY);
253 POSTING_READ(GMBUS2+reg_offset);
254 do {
255 u32 val, loop = 0;
256
257 if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
258 goto timeout;
259 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
260 return 0;
261
262 val = I915_READ(GMBUS3 + reg_offset);
263 do {
264 *buf++ = val & 0xff;
265 val >>= 8;
266 } while (--len && ++loop < 4);
267 } while (len);
268 } else {
269 u32 val, loop;
270
271 val = loop = 0;
272 do {
273 val |= *buf++ << (8 * loop);
274 } while (--len && ++loop < 4);
275
276 I915_WRITE(GMBUS3 + reg_offset, val);
277 I915_WRITE(GMBUS1 + reg_offset,
278 (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
279 (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
280 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
281 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
282 POSTING_READ(GMBUS2+reg_offset);
283
284 while (len) {
285 if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
286 goto timeout;
287 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
288 return 0;
289
290 val = loop = 0;
291 do {
292 val |= *buf++ << (8 * loop);
293 } while (--len && ++loop < 4);
294
295 I915_WRITE(GMBUS3 + reg_offset, val);
296 POSTING_READ(GMBUS2+reg_offset);
297 }
298 }
299
300 if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
301 goto timeout;
302 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
303 return 0;
136 } 304 }
305
306 return num;
307
308timeout:
309 DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
310 bus->reg0 & 0xff, bus->adapter.name);
311 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
312 bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
313 if (!bus->force_bit)
314 return -ENOMEM;
315
316 return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
137} 317}
138 318
319static u32 gmbus_func(struct i2c_adapter *adapter)
320{
321 struct intel_gmbus *bus = container_of(adapter,
322 struct intel_gmbus,
323 adapter);
324
325 if (bus->force_bit)
326 bus->force_bit->algo->functionality(bus->force_bit);
327
328 return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
329 /* I2C_FUNC_10BIT_ADDR | */
330 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
331 I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
332}
333
334static const struct i2c_algorithm gmbus_algorithm = {
335 .master_xfer = gmbus_xfer,
336 .functionality = gmbus_func
337};
338
139/** 339/**
140 * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg 340 * intel_gmbus_setup - instantiate all Intel i2c GMBuses
141 * @dev: DRM device 341 * @dev: DRM device
142 * @output: driver specific output device
143 * @reg: GPIO reg to use
144 * @name: name for this bus
145 * @slave_addr: slave address (if fixed)
146 *
147 * Creates and registers a new i2c bus with the Linux i2c layer, for use
148 * in output probing and control (e.g. DDC or SDVO control functions).
149 *
150 * Possible values for @reg include:
151 * %GPIOA
152 * %GPIOB
153 * %GPIOC
154 * %GPIOD
155 * %GPIOE
156 * %GPIOF
157 * %GPIOG
158 * %GPIOH
159 * see PRM for details on how these different busses are used.
160 */ 342 */
161struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, 343int intel_setup_gmbus(struct drm_device *dev)
162 const char *name)
163{ 344{
164 struct intel_i2c_chan *chan; 345 static const char *names[GMBUS_NUM_PORTS] = {
346 "disabled",
347 "ssc",
348 "vga",
349 "panel",
350 "dpc",
351 "dpb",
352 "reserved"
353 "dpd",
354 };
355 struct drm_i915_private *dev_priv = dev->dev_private;
356 int ret, i;
165 357
166 chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL); 358 dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
167 if (!chan) 359 GFP_KERNEL);
168 goto out_free; 360 if (dev_priv->gmbus == NULL)
361 return -ENOMEM;
169 362
170 chan->drm_dev = dev; 363 for (i = 0; i < GMBUS_NUM_PORTS; i++) {
171 chan->reg = reg; 364 struct intel_gmbus *bus = &dev_priv->gmbus[i];
172 snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
173 chan->adapter.owner = THIS_MODULE;
174 chan->adapter.algo_data = &chan->algo;
175 chan->adapter.dev.parent = &dev->pdev->dev;
176 chan->algo.setsda = set_data;
177 chan->algo.setscl = set_clock;
178 chan->algo.getsda = get_data;
179 chan->algo.getscl = get_clock;
180 chan->algo.udelay = 20;
181 chan->algo.timeout = usecs_to_jiffies(2200);
182 chan->algo.data = chan;
183
184 i2c_set_adapdata(&chan->adapter, chan);
185
186 if(i2c_bit_add_bus(&chan->adapter))
187 goto out_free;
188 365
189 intel_i2c_reset_gmbus(dev); 366 bus->adapter.owner = THIS_MODULE;
367 bus->adapter.class = I2C_CLASS_DDC;
368 snprintf(bus->adapter.name,
369 I2C_NAME_SIZE,
370 "gmbus %s",
371 names[i]);
190 372
191 /* JJJ: raise SCL and SDA? */ 373 bus->adapter.dev.parent = &dev->pdev->dev;
192 intel_i2c_quirk_set(dev, true); 374 bus->adapter.algo_data = dev_priv;
193 set_data(chan, 1);
194 set_clock(chan, 1);
195 intel_i2c_quirk_set(dev, false);
196 udelay(20);
197 375
198 return &chan->adapter; 376 bus->adapter.algo = &gmbus_algorithm;
377 ret = i2c_add_adapter(&bus->adapter);
378 if (ret)
379 goto err;
199 380
200out_free: 381 /* By default use a conservative clock rate */
201 kfree(chan); 382 bus->reg0 = i | GMBUS_RATE_100KHZ;
202 return NULL; 383
384 /* XXX force bit banging until GMBUS is fully debugged */
385 bus->force_bit = intel_gpio_create(dev_priv, i);
386 }
387
388 intel_i2c_reset(dev_priv->dev);
389
390 return 0;
391
392err:
393 while (--i) {
394 struct intel_gmbus *bus = &dev_priv->gmbus[i];
395 i2c_del_adapter(&bus->adapter);
396 }
397 kfree(dev_priv->gmbus);
398 dev_priv->gmbus = NULL;
399 return ret;
203} 400}
204 401
205/** 402void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
206 * intel_i2c_destroy - unregister and free i2c bus resources 403{
207 * @output: channel to free 404 struct intel_gmbus *bus = to_intel_gmbus(adapter);
208 * 405
209 * Unregister the adapter from the i2c layer, then free the structure. 406 /* speed:
210 */ 407 * 0x0 = 100 KHz
211void intel_i2c_destroy(struct i2c_adapter *adapter) 408 * 0x1 = 50 KHz
409 * 0x2 = 400 KHz
410 * 0x3 = 1000 Khz
411 */
412 bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
413}
414
415void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
416{
417 struct intel_gmbus *bus = to_intel_gmbus(adapter);
418
419 if (force_bit) {
420 if (bus->force_bit == NULL) {
421 struct drm_i915_private *dev_priv = adapter->algo_data;
422 bus->force_bit = intel_gpio_create(dev_priv,
423 bus->reg0 & 0xff);
424 }
425 } else {
426 if (bus->force_bit) {
427 i2c_del_adapter(bus->force_bit);
428 kfree(bus->force_bit);
429 bus->force_bit = NULL;
430 }
431 }
432}
433
434void intel_teardown_gmbus(struct drm_device *dev)
212{ 435{
213 struct intel_i2c_chan *chan; 436 struct drm_i915_private *dev_priv = dev->dev_private;
437 int i;
214 438
215 if (!adapter) 439 if (dev_priv->gmbus == NULL)
216 return; 440 return;
217 441
218 chan = container_of(adapter, 442 for (i = 0; i < GMBUS_NUM_PORTS; i++) {
219 struct intel_i2c_chan, 443 struct intel_gmbus *bus = &dev_priv->gmbus[i];
220 adapter); 444 if (bus->force_bit) {
221 i2c_del_adapter(&chan->adapter); 445 i2c_del_adapter(bus->force_bit);
222 kfree(chan); 446 kfree(bus->force_bit);
447 }
448 i2c_del_adapter(&bus->adapter);
449 }
450
451 kfree(dev_priv->gmbus);
452 dev_priv->gmbus = NULL;
223} 453}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 6ec39a86ed06..f1a649990ea9 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -43,102 +43,76 @@
43/* Private structure for the integrated LVDS support */ 43/* Private structure for the integrated LVDS support */
44struct intel_lvds { 44struct intel_lvds {
45 struct intel_encoder base; 45 struct intel_encoder base;
46
47 struct edid *edid;
48
46 int fitting_mode; 49 int fitting_mode;
47 u32 pfit_control; 50 u32 pfit_control;
48 u32 pfit_pgm_ratios; 51 u32 pfit_pgm_ratios;
52 bool pfit_dirty;
53
54 struct drm_display_mode *fixed_mode;
49}; 55};
50 56
51static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder) 57static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
52{ 58{
53 return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base); 59 return container_of(encoder, struct intel_lvds, base.base);
54}
55
56/**
57 * Sets the backlight level.
58 *
59 * \param level backlight level, from 0 to intel_lvds_get_max_backlight().
60 */
61static void intel_lvds_set_backlight(struct drm_device *dev, int level)
62{
63 struct drm_i915_private *dev_priv = dev->dev_private;
64 u32 blc_pwm_ctl, reg;
65
66 if (HAS_PCH_SPLIT(dev))
67 reg = BLC_PWM_CPU_CTL;
68 else
69 reg = BLC_PWM_CTL;
70
71 blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK;
72 I915_WRITE(reg, (blc_pwm_ctl |
73 (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
74} 60}
75 61
76/** 62static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
77 * Returns the maximum level of the backlight duty cycle field.
78 */
79static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
80{ 63{
81 struct drm_i915_private *dev_priv = dev->dev_private; 64 return container_of(intel_attached_encoder(connector),
82 u32 reg; 65 struct intel_lvds, base);
83
84 if (HAS_PCH_SPLIT(dev))
85 reg = BLC_PWM_PCH_CTL2;
86 else
87 reg = BLC_PWM_CTL;
88
89 return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >>
90 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
91} 66}
92 67
93/** 68/**
94 * Sets the power state for the panel. 69 * Sets the power state for the panel.
95 */ 70 */
96static void intel_lvds_set_power(struct drm_device *dev, bool on) 71static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
97{ 72{
73 struct drm_device *dev = intel_lvds->base.base.dev;
98 struct drm_i915_private *dev_priv = dev->dev_private; 74 struct drm_i915_private *dev_priv = dev->dev_private;
99 u32 ctl_reg, status_reg, lvds_reg; 75 u32 ctl_reg, lvds_reg;
100 76
101 if (HAS_PCH_SPLIT(dev)) { 77 if (HAS_PCH_SPLIT(dev)) {
102 ctl_reg = PCH_PP_CONTROL; 78 ctl_reg = PCH_PP_CONTROL;
103 status_reg = PCH_PP_STATUS;
104 lvds_reg = PCH_LVDS; 79 lvds_reg = PCH_LVDS;
105 } else { 80 } else {
106 ctl_reg = PP_CONTROL; 81 ctl_reg = PP_CONTROL;
107 status_reg = PP_STATUS;
108 lvds_reg = LVDS; 82 lvds_reg = LVDS;
109 } 83 }
110 84
111 if (on) { 85 if (on) {
112 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); 86 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
113 POSTING_READ(lvds_reg); 87 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
114 88 intel_panel_set_backlight(dev, dev_priv->backlight_level);
115 I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
116 POWER_TARGET_ON);
117 if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0))
118 DRM_ERROR("timed out waiting to enable LVDS pipe");
119
120 intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
121 } else { 89 } else {
122 intel_lvds_set_backlight(dev, 0); 90 dev_priv->backlight_level = intel_panel_get_backlight(dev);
91
92 intel_panel_set_backlight(dev, 0);
93 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
123 94
124 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & 95 if (intel_lvds->pfit_control) {
125 ~POWER_TARGET_ON); 96 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
126 if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0)) 97 DRM_ERROR("timed out waiting for panel to power off\n");
127 DRM_ERROR("timed out waiting for LVDS pipe to turn off"); 98 I915_WRITE(PFIT_CONTROL, 0);
99 intel_lvds->pfit_control = 0;
100 intel_lvds->pfit_dirty = false;
101 }
128 102
129 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); 103 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
130 POSTING_READ(lvds_reg);
131 } 104 }
105 POSTING_READ(lvds_reg);
132} 106}
133 107
134static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) 108static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
135{ 109{
136 struct drm_device *dev = encoder->dev; 110 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
137 111
138 if (mode == DRM_MODE_DPMS_ON) 112 if (mode == DRM_MODE_DPMS_ON)
139 intel_lvds_set_power(dev, true); 113 intel_lvds_set_power(intel_lvds, true);
140 else 114 else
141 intel_lvds_set_power(dev, false); 115 intel_lvds_set_power(intel_lvds, false);
142 116
143 /* XXX: We never power down the LVDS pairs. */ 117 /* XXX: We never power down the LVDS pairs. */
144} 118}
@@ -146,16 +120,13 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
146static int intel_lvds_mode_valid(struct drm_connector *connector, 120static int intel_lvds_mode_valid(struct drm_connector *connector,
147 struct drm_display_mode *mode) 121 struct drm_display_mode *mode)
148{ 122{
149 struct drm_device *dev = connector->dev; 123 struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
150 struct drm_i915_private *dev_priv = dev->dev_private; 124 struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
151 struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
152 125
153 if (fixed_mode) { 126 if (mode->hdisplay > fixed_mode->hdisplay)
154 if (mode->hdisplay > fixed_mode->hdisplay) 127 return MODE_PANEL;
155 return MODE_PANEL; 128 if (mode->vdisplay > fixed_mode->vdisplay)
156 if (mode->vdisplay > fixed_mode->vdisplay) 129 return MODE_PANEL;
157 return MODE_PANEL;
158 }
159 130
160 return MODE_OK; 131 return MODE_OK;
161} 132}
@@ -223,12 +194,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
223 struct drm_device *dev = encoder->dev; 194 struct drm_device *dev = encoder->dev;
224 struct drm_i915_private *dev_priv = dev->dev_private; 195 struct drm_i915_private *dev_priv = dev->dev_private;
225 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 196 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
226 struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); 197 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
227 struct drm_encoder *tmp_encoder; 198 struct drm_encoder *tmp_encoder;
228 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; 199 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
229 200
230 /* Should never happen!! */ 201 /* Should never happen!! */
231 if (!IS_I965G(dev) && intel_crtc->pipe == 0) { 202 if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
232 DRM_ERROR("Can't support LVDS on pipe A\n"); 203 DRM_ERROR("Can't support LVDS on pipe A\n");
233 return false; 204 return false;
234 } 205 }
@@ -241,9 +212,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
241 return false; 212 return false;
242 } 213 }
243 } 214 }
244 /* If we don't have a panel mode, there is nothing we can do */
245 if (dev_priv->panel_fixed_mode == NULL)
246 return true;
247 215
248 /* 216 /*
249 * We have timings from the BIOS for the panel, put them in 217 * We have timings from the BIOS for the panel, put them in
@@ -251,7 +219,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
251 * with the panel scaling set up to source from the H/VDisplay 219 * with the panel scaling set up to source from the H/VDisplay
252 * of the original mode. 220 * of the original mode.
253 */ 221 */
254 intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); 222 intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
255 223
256 if (HAS_PCH_SPLIT(dev)) { 224 if (HAS_PCH_SPLIT(dev)) {
257 intel_pch_panel_fitting(dev, intel_lvds->fitting_mode, 225 intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
@@ -260,8 +228,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
260 } 228 }
261 229
262 /* Make sure pre-965s set dither correctly */ 230 /* Make sure pre-965s set dither correctly */
263 if (!IS_I965G(dev)) { 231 if (INTEL_INFO(dev)->gen < 4) {
264 if (dev_priv->panel_wants_dither || dev_priv->lvds_dither) 232 if (dev_priv->lvds_dither)
265 pfit_control |= PANEL_8TO6_DITHER_ENABLE; 233 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
266 } 234 }
267 235
@@ -271,7 +239,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
271 goto out; 239 goto out;
272 240
273 /* 965+ wants fuzzy fitting */ 241 /* 965+ wants fuzzy fitting */
274 if (IS_I965G(dev)) 242 if (INTEL_INFO(dev)->gen >= 4)
275 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | 243 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
276 PFIT_FILTER_FUZZY); 244 PFIT_FILTER_FUZZY);
277 245
@@ -297,7 +265,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
297 265
298 case DRM_MODE_SCALE_ASPECT: 266 case DRM_MODE_SCALE_ASPECT:
299 /* Scale but preserve the aspect ratio */ 267 /* Scale but preserve the aspect ratio */
300 if (IS_I965G(dev)) { 268 if (INTEL_INFO(dev)->gen >= 4) {
301 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; 269 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
302 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; 270 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
303 271
@@ -356,7 +324,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
356 * Fortunately this is all done for us in hw. 324 * Fortunately this is all done for us in hw.
357 */ 325 */
358 pfit_control |= PFIT_ENABLE; 326 pfit_control |= PFIT_ENABLE;
359 if (IS_I965G(dev)) 327 if (INTEL_INFO(dev)->gen >= 4)
360 pfit_control |= PFIT_SCALING_AUTO; 328 pfit_control |= PFIT_SCALING_AUTO;
361 else 329 else
362 pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | 330 pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
@@ -369,8 +337,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
369 } 337 }
370 338
371out: 339out:
372 intel_lvds->pfit_control = pfit_control; 340 if (pfit_control != intel_lvds->pfit_control ||
373 intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; 341 pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
342 intel_lvds->pfit_control = pfit_control;
343 intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
344 intel_lvds->pfit_dirty = true;
345 }
374 dev_priv->lvds_border_bits = border; 346 dev_priv->lvds_border_bits = border;
375 347
376 /* 348 /*
@@ -386,30 +358,60 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
386{ 358{
387 struct drm_device *dev = encoder->dev; 359 struct drm_device *dev = encoder->dev;
388 struct drm_i915_private *dev_priv = dev->dev_private; 360 struct drm_i915_private *dev_priv = dev->dev_private;
389 u32 reg; 361 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
390 362
391 if (HAS_PCH_SPLIT(dev)) 363 dev_priv->backlight_level = intel_panel_get_backlight(dev);
392 reg = BLC_PWM_CPU_CTL; 364
393 else 365 /* We try to do the minimum that is necessary in order to unlock
394 reg = BLC_PWM_CTL; 366 * the registers for mode setting.
395 367 *
396 dev_priv->saveBLC_PWM_CTL = I915_READ(reg); 368 * On Ironlake, this is quite simple as we just set the unlock key
397 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & 369 * and ignore all subtleties. (This may cause some issues...)
398 BACKLIGHT_DUTY_CYCLE_MASK); 370 *
371 * Prior to Ironlake, we must disable the pipe if we want to adjust
372 * the panel fitter. However at all other times we can just reset
373 * the registers regardless.
374 */
399 375
400 intel_lvds_set_power(dev, false); 376 if (HAS_PCH_SPLIT(dev)) {
377 I915_WRITE(PCH_PP_CONTROL,
378 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
379 } else if (intel_lvds->pfit_dirty) {
380 I915_WRITE(PP_CONTROL,
381 (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS)
382 & ~POWER_TARGET_ON);
383 } else {
384 I915_WRITE(PP_CONTROL,
385 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
386 }
401} 387}
402 388
403static void intel_lvds_commit( struct drm_encoder *encoder) 389static void intel_lvds_commit(struct drm_encoder *encoder)
404{ 390{
405 struct drm_device *dev = encoder->dev; 391 struct drm_device *dev = encoder->dev;
406 struct drm_i915_private *dev_priv = dev->dev_private; 392 struct drm_i915_private *dev_priv = dev->dev_private;
393 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
407 394
408 if (dev_priv->backlight_duty_cycle == 0) 395 if (dev_priv->backlight_level == 0)
409 dev_priv->backlight_duty_cycle = 396 dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
410 intel_lvds_get_max_backlight(dev); 397
398 /* Undo any unlocking done in prepare to prevent accidental
399 * adjustment of the registers.
400 */
401 if (HAS_PCH_SPLIT(dev)) {
402 u32 val = I915_READ(PCH_PP_CONTROL);
403 if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
404 I915_WRITE(PCH_PP_CONTROL, val & 0x3);
405 } else {
406 u32 val = I915_READ(PP_CONTROL);
407 if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
408 I915_WRITE(PP_CONTROL, val & 0x3);
409 }
411 410
412 intel_lvds_set_power(dev, true); 411 /* Always do a full power on as we do not know what state
412 * we were left in.
413 */
414 intel_lvds_set_power(intel_lvds, true);
413} 415}
414 416
415static void intel_lvds_mode_set(struct drm_encoder *encoder, 417static void intel_lvds_mode_set(struct drm_encoder *encoder,
@@ -418,7 +420,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
418{ 420{
419 struct drm_device *dev = encoder->dev; 421 struct drm_device *dev = encoder->dev;
420 struct drm_i915_private *dev_priv = dev->dev_private; 422 struct drm_i915_private *dev_priv = dev->dev_private;
421 struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); 423 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
422 424
423 /* 425 /*
424 * The LVDS pin pair will already have been turned on in the 426 * The LVDS pin pair will already have been turned on in the
@@ -429,13 +431,23 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
429 if (HAS_PCH_SPLIT(dev)) 431 if (HAS_PCH_SPLIT(dev))
430 return; 432 return;
431 433
434 if (!intel_lvds->pfit_dirty)
435 return;
436
432 /* 437 /*
433 * Enable automatic panel scaling so that non-native modes fill the 438 * Enable automatic panel scaling so that non-native modes fill the
434 * screen. Should be enabled before the pipe is enabled, according to 439 * screen. Should be enabled before the pipe is enabled, according to
435 * register description and PRM. 440 * register description and PRM.
436 */ 441 */
442 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
443 intel_lvds->pfit_control,
444 intel_lvds->pfit_pgm_ratios);
445 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
446 DRM_ERROR("timed out waiting for panel to power off\n");
447
437 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); 448 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
438 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); 449 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
450 intel_lvds->pfit_dirty = false;
439} 451}
440 452
441/** 453/**
@@ -465,38 +477,22 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
465 */ 477 */
466static int intel_lvds_get_modes(struct drm_connector *connector) 478static int intel_lvds_get_modes(struct drm_connector *connector)
467{ 479{
480 struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
468 struct drm_device *dev = connector->dev; 481 struct drm_device *dev = connector->dev;
469 struct drm_encoder *encoder = intel_attached_encoder(connector); 482 struct drm_display_mode *mode;
470 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
471 struct drm_i915_private *dev_priv = dev->dev_private;
472 int ret = 0;
473
474 if (dev_priv->lvds_edid_good) {
475 ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
476 483
477 if (ret) 484 if (intel_lvds->edid) {
478 return ret; 485 drm_mode_connector_update_edid_property(connector,
486 intel_lvds->edid);
487 return drm_add_edid_modes(connector, intel_lvds->edid);
479 } 488 }
480 489
481 /* Didn't get an EDID, so 490 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
482 * Set wide sync ranges so we get all modes 491 if (mode == 0)
483 * handed to valid_mode for checking 492 return 0;
484 */
485 connector->display_info.min_vfreq = 0;
486 connector->display_info.max_vfreq = 200;
487 connector->display_info.min_hfreq = 0;
488 connector->display_info.max_hfreq = 200;
489
490 if (dev_priv->panel_fixed_mode != NULL) {
491 struct drm_display_mode *mode;
492
493 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
494 drm_mode_probed_add(connector, mode);
495
496 return 1;
497 }
498 493
499 return 0; 494 drm_mode_probed_add(connector, mode);
495 return 1;
500} 496}
501 497
502static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) 498static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
@@ -587,18 +583,17 @@ static int intel_lvds_set_property(struct drm_connector *connector,
587 struct drm_property *property, 583 struct drm_property *property,
588 uint64_t value) 584 uint64_t value)
589{ 585{
586 struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
590 struct drm_device *dev = connector->dev; 587 struct drm_device *dev = connector->dev;
591 588
592 if (property == dev->mode_config.scaling_mode_property && 589 if (property == dev->mode_config.scaling_mode_property) {
593 connector->encoder) { 590 struct drm_crtc *crtc = intel_lvds->base.base.crtc;
594 struct drm_crtc *crtc = connector->encoder->crtc;
595 struct drm_encoder *encoder = connector->encoder;
596 struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
597 591
598 if (value == DRM_MODE_SCALE_NONE) { 592 if (value == DRM_MODE_SCALE_NONE) {
599 DRM_DEBUG_KMS("no scaling not supported\n"); 593 DRM_DEBUG_KMS("no scaling not supported\n");
600 return 0; 594 return -EINVAL;
601 } 595 }
596
602 if (intel_lvds->fitting_mode == value) { 597 if (intel_lvds->fitting_mode == value) {
603 /* the LVDS scaling property is not changed */ 598 /* the LVDS scaling property is not changed */
604 return 0; 599 return 0;
@@ -628,7 +623,7 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
628static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { 623static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
629 .get_modes = intel_lvds_get_modes, 624 .get_modes = intel_lvds_get_modes,
630 .mode_valid = intel_lvds_mode_valid, 625 .mode_valid = intel_lvds_mode_valid,
631 .best_encoder = intel_attached_encoder, 626 .best_encoder = intel_best_encoder,
632}; 627};
633 628
634static const struct drm_connector_funcs intel_lvds_connector_funcs = { 629static const struct drm_connector_funcs intel_lvds_connector_funcs = {
@@ -726,16 +721,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
726 * Find the reduced downclock for LVDS in EDID. 721 * Find the reduced downclock for LVDS in EDID.
727 */ 722 */
728static void intel_find_lvds_downclock(struct drm_device *dev, 723static void intel_find_lvds_downclock(struct drm_device *dev,
729 struct drm_connector *connector) 724 struct drm_display_mode *fixed_mode,
725 struct drm_connector *connector)
730{ 726{
731 struct drm_i915_private *dev_priv = dev->dev_private; 727 struct drm_i915_private *dev_priv = dev->dev_private;
732 struct drm_display_mode *scan, *panel_fixed_mode; 728 struct drm_display_mode *scan;
733 int temp_downclock; 729 int temp_downclock;
734 730
735 panel_fixed_mode = dev_priv->panel_fixed_mode; 731 temp_downclock = fixed_mode->clock;
736 temp_downclock = panel_fixed_mode->clock;
737
738 mutex_lock(&dev->mode_config.mutex);
739 list_for_each_entry(scan, &connector->probed_modes, head) { 732 list_for_each_entry(scan, &connector->probed_modes, head) {
740 /* 733 /*
741 * If one mode has the same resolution with the fixed_panel 734 * If one mode has the same resolution with the fixed_panel
@@ -744,14 +737,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
744 * case we can set the different FPx0/1 to dynamically select 737 * case we can set the different FPx0/1 to dynamically select
745 * between low and high frequency. 738 * between low and high frequency.
746 */ 739 */
747 if (scan->hdisplay == panel_fixed_mode->hdisplay && 740 if (scan->hdisplay == fixed_mode->hdisplay &&
748 scan->hsync_start == panel_fixed_mode->hsync_start && 741 scan->hsync_start == fixed_mode->hsync_start &&
749 scan->hsync_end == panel_fixed_mode->hsync_end && 742 scan->hsync_end == fixed_mode->hsync_end &&
750 scan->htotal == panel_fixed_mode->htotal && 743 scan->htotal == fixed_mode->htotal &&
751 scan->vdisplay == panel_fixed_mode->vdisplay && 744 scan->vdisplay == fixed_mode->vdisplay &&
752 scan->vsync_start == panel_fixed_mode->vsync_start && 745 scan->vsync_start == fixed_mode->vsync_start &&
753 scan->vsync_end == panel_fixed_mode->vsync_end && 746 scan->vsync_end == fixed_mode->vsync_end &&
754 scan->vtotal == panel_fixed_mode->vtotal) { 747 scan->vtotal == fixed_mode->vtotal) {
755 if (scan->clock < temp_downclock) { 748 if (scan->clock < temp_downclock) {
756 /* 749 /*
757 * The downclock is already found. But we 750 * The downclock is already found. But we
@@ -761,17 +754,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
761 } 754 }
762 } 755 }
763 } 756 }
764 mutex_unlock(&dev->mode_config.mutex); 757 if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
765 if (temp_downclock < panel_fixed_mode->clock &&
766 i915_lvds_downclock) {
767 /* We found the downclock for LVDS. */ 758 /* We found the downclock for LVDS. */
768 dev_priv->lvds_downclock_avail = 1; 759 dev_priv->lvds_downclock_avail = 1;
769 dev_priv->lvds_downclock = temp_downclock; 760 dev_priv->lvds_downclock = temp_downclock;
770 DRM_DEBUG_KMS("LVDS downclock is found in EDID. " 761 DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
771 "Normal clock %dKhz, downclock %dKhz\n", 762 "Normal clock %dKhz, downclock %dKhz\n",
772 panel_fixed_mode->clock, temp_downclock); 763 fixed_mode->clock, temp_downclock);
773 } 764 }
774 return;
775} 765}
776 766
777/* 767/*
@@ -780,38 +770,67 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
780 * If it is present, return 1. 770 * If it is present, return 1.
781 * If it is not present, return false. 771 * If it is not present, return false.
782 * If no child dev is parsed from VBT, it assumes that the LVDS is present. 772 * If no child dev is parsed from VBT, it assumes that the LVDS is present.
783 * Note: The addin_offset should also be checked for LVDS panel.
784 * Only when it is non-zero, it is assumed that it is present.
785 */ 773 */
786static int lvds_is_present_in_vbt(struct drm_device *dev) 774static bool lvds_is_present_in_vbt(struct drm_device *dev,
775 u8 *i2c_pin)
787{ 776{
788 struct drm_i915_private *dev_priv = dev->dev_private; 777 struct drm_i915_private *dev_priv = dev->dev_private;
789 struct child_device_config *p_child; 778 int i;
790 int i, ret;
791 779
792 if (!dev_priv->child_dev_num) 780 if (!dev_priv->child_dev_num)
793 return 1; 781 return true;
794 782
795 ret = 0;
796 for (i = 0; i < dev_priv->child_dev_num; i++) { 783 for (i = 0; i < dev_priv->child_dev_num; i++) {
797 p_child = dev_priv->child_dev + i; 784 struct child_device_config *child = dev_priv->child_dev + i;
798 /* 785
799 * If the device type is not LFP, continue. 786 /* If the device type is not LFP, continue.
800 * If the device type is 0x22, it is also regarded as LFP. 787 * We have to check both the new identifiers as well as the
788 * old for compatibility with some BIOSes.
801 */ 789 */
802 if (p_child->device_type != DEVICE_TYPE_INT_LFP && 790 if (child->device_type != DEVICE_TYPE_INT_LFP &&
803 p_child->device_type != DEVICE_TYPE_LFP) 791 child->device_type != DEVICE_TYPE_LFP)
804 continue; 792 continue;
805 793
806 /* The addin_offset should be checked. Only when it is 794 if (child->i2c_pin)
807 * non-zero, it is regarded as present. 795 *i2c_pin = child->i2c_pin;
796
797 /* However, we cannot trust the BIOS writers to populate
798 * the VBT correctly. Since LVDS requires additional
799 * information from AIM blocks, a non-zero addin offset is
800 * a good indicator that the LVDS is actually present.
808 */ 801 */
809 if (p_child->addin_offset) { 802 if (child->addin_offset)
810 ret = 1; 803 return true;
811 break; 804
812 } 805 /* But even then some BIOS writers perform some black magic
806 * and instantiate the device without reference to any
807 * additional data. Trust that if the VBT was written into
808 * the OpRegion then they have validated the LVDS's existence.
809 */
810 if (dev_priv->opregion.vbt)
811 return true;
813 } 812 }
814 return ret; 813
814 return false;
815}
816
817static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin)
818{
819 struct drm_i915_private *dev_priv = dev->dev_private;
820 u8 buf = 0;
821 struct i2c_msg msgs[] = {
822 {
823 .addr = 0xA0,
824 .flags = 0,
825 .len = 1,
826 .buf = &buf,
827 },
828 };
829 struct i2c_adapter *i2c = &dev_priv->gmbus[pin].adapter;
830 /* XXX this only appears to work when using GMBUS */
831 if (intel_gmbus_is_forced_bit(i2c))
832 return true;
833 return i2c_transfer(i2c, msgs, 1) == 1;
815} 834}
816 835
817/** 836/**
@@ -832,13 +851,15 @@ void intel_lvds_init(struct drm_device *dev)
832 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 851 struct drm_display_mode *scan; /* *modes, *bios_mode; */
833 struct drm_crtc *crtc; 852 struct drm_crtc *crtc;
834 u32 lvds; 853 u32 lvds;
835 int pipe, gpio = GPIOC; 854 int pipe;
855 u8 pin;
836 856
837 /* Skip init on machines we know falsely report LVDS */ 857 /* Skip init on machines we know falsely report LVDS */
838 if (dmi_check_system(intel_no_lvds)) 858 if (dmi_check_system(intel_no_lvds))
839 return; 859 return;
840 860
841 if (!lvds_is_present_in_vbt(dev)) { 861 pin = GMBUS_PORT_PANEL;
862 if (!lvds_is_present_in_vbt(dev, &pin)) {
842 DRM_DEBUG_KMS("LVDS is not present in VBT\n"); 863 DRM_DEBUG_KMS("LVDS is not present in VBT\n");
843 return; 864 return;
844 } 865 }
@@ -846,11 +867,15 @@ void intel_lvds_init(struct drm_device *dev)
846 if (HAS_PCH_SPLIT(dev)) { 867 if (HAS_PCH_SPLIT(dev)) {
847 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) 868 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
848 return; 869 return;
849 if (dev_priv->edp_support) { 870 if (dev_priv->edp.support) {
850 DRM_DEBUG_KMS("disable LVDS for eDP support\n"); 871 DRM_DEBUG_KMS("disable LVDS for eDP support\n");
851 return; 872 return;
852 } 873 }
853 gpio = PCH_GPIOC; 874 }
875
876 if (!intel_lvds_ddc_probe(dev, pin)) {
877 DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n");
878 return;
854 } 879 }
855 880
856 intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); 881 intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
@@ -864,16 +889,20 @@ void intel_lvds_init(struct drm_device *dev)
864 return; 889 return;
865 } 890 }
866 891
892 if (!HAS_PCH_SPLIT(dev)) {
893 intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
894 }
895
867 intel_encoder = &intel_lvds->base; 896 intel_encoder = &intel_lvds->base;
868 encoder = &intel_encoder->enc; 897 encoder = &intel_encoder->base;
869 connector = &intel_connector->base; 898 connector = &intel_connector->base;
870 drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, 899 drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
871 DRM_MODE_CONNECTOR_LVDS); 900 DRM_MODE_CONNECTOR_LVDS);
872 901
873 drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs, 902 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
874 DRM_MODE_ENCODER_LVDS); 903 DRM_MODE_ENCODER_LVDS);
875 904
876 drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); 905 intel_connector_attach_encoder(intel_connector, intel_encoder);
877 intel_encoder->type = INTEL_OUTPUT_LVDS; 906 intel_encoder->type = INTEL_OUTPUT_LVDS;
878 907
879 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); 908 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
@@ -904,43 +933,41 @@ void intel_lvds_init(struct drm_device *dev)
904 * if closed, act like it's not there for now 933 * if closed, act like it's not there for now
905 */ 934 */
906 935
907 /* Set up the DDC bus. */
908 intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
909 if (!intel_encoder->ddc_bus) {
910 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
911 "failed.\n");
912 goto failed;
913 }
914
915 /* 936 /*
916 * Attempt to get the fixed panel mode from DDC. Assume that the 937 * Attempt to get the fixed panel mode from DDC. Assume that the
917 * preferred mode is the right one. 938 * preferred mode is the right one.
918 */ 939 */
919 dev_priv->lvds_edid_good = true; 940 intel_lvds->edid = drm_get_edid(connector,
941 &dev_priv->gmbus[pin].adapter);
920 942
921 if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus)) 943 if (!intel_lvds->edid) {
922 dev_priv->lvds_edid_good = false; 944 /* Didn't get an EDID, so
945 * Set wide sync ranges so we get all modes
946 * handed to valid_mode for checking
947 */
948 connector->display_info.min_vfreq = 0;
949 connector->display_info.max_vfreq = 200;
950 connector->display_info.min_hfreq = 0;
951 connector->display_info.max_hfreq = 200;
952 }
923 953
924 list_for_each_entry(scan, &connector->probed_modes, head) { 954 list_for_each_entry(scan, &connector->probed_modes, head) {
925 mutex_lock(&dev->mode_config.mutex);
926 if (scan->type & DRM_MODE_TYPE_PREFERRED) { 955 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
927 dev_priv->panel_fixed_mode = 956 intel_lvds->fixed_mode =
928 drm_mode_duplicate(dev, scan); 957 drm_mode_duplicate(dev, scan);
929 mutex_unlock(&dev->mode_config.mutex); 958 intel_find_lvds_downclock(dev,
930 intel_find_lvds_downclock(dev, connector); 959 intel_lvds->fixed_mode,
960 connector);
931 goto out; 961 goto out;
932 } 962 }
933 mutex_unlock(&dev->mode_config.mutex);
934 } 963 }
935 964
936 /* Failed to get EDID, what about VBT? */ 965 /* Failed to get EDID, what about VBT? */
937 if (dev_priv->lfp_lvds_vbt_mode) { 966 if (dev_priv->lfp_lvds_vbt_mode) {
938 mutex_lock(&dev->mode_config.mutex); 967 intel_lvds->fixed_mode =
939 dev_priv->panel_fixed_mode =
940 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 968 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
941 mutex_unlock(&dev->mode_config.mutex); 969 if (intel_lvds->fixed_mode) {
942 if (dev_priv->panel_fixed_mode) { 970 intel_lvds->fixed_mode->type |=
943 dev_priv->panel_fixed_mode->type |=
944 DRM_MODE_TYPE_PREFERRED; 971 DRM_MODE_TYPE_PREFERRED;
945 goto out; 972 goto out;
946 } 973 }
@@ -958,19 +985,19 @@ void intel_lvds_init(struct drm_device *dev)
958 985
959 lvds = I915_READ(LVDS); 986 lvds = I915_READ(LVDS);
960 pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; 987 pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
961 crtc = intel_get_crtc_from_pipe(dev, pipe); 988 crtc = intel_get_crtc_for_pipe(dev, pipe);
962 989
963 if (crtc && (lvds & LVDS_PORT_EN)) { 990 if (crtc && (lvds & LVDS_PORT_EN)) {
964 dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc); 991 intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
965 if (dev_priv->panel_fixed_mode) { 992 if (intel_lvds->fixed_mode) {
966 dev_priv->panel_fixed_mode->type |= 993 intel_lvds->fixed_mode->type |=
967 DRM_MODE_TYPE_PREFERRED; 994 DRM_MODE_TYPE_PREFERRED;
968 goto out; 995 goto out;
969 } 996 }
970 } 997 }
971 998
972 /* If we still don't have a mode after all that, give up. */ 999 /* If we still don't have a mode after all that, give up. */
973 if (!dev_priv->panel_fixed_mode) 1000 if (!intel_lvds->fixed_mode)
974 goto failed; 1001 goto failed;
975 1002
976out: 1003out:
@@ -997,8 +1024,6 @@ out:
997 1024
998failed: 1025failed:
999 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); 1026 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
1000 if (intel_encoder->ddc_bus)
1001 intel_i2c_destroy(intel_encoder->ddc_bus);
1002 drm_connector_cleanup(connector); 1027 drm_connector_cleanup(connector);
1003 drm_encoder_cleanup(encoder); 1028 drm_encoder_cleanup(encoder);
1004 kfree(intel_lvds); 1029 kfree(intel_lvds);
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 4b1fd3d9c73c..f70b7cf32bff 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> 2 * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
3 * Copyright (c) 2007 Intel Corporation 3 * Copyright (c) 2007, 2010 Intel Corporation
4 * Jesse Barnes <jesse.barnes@intel.com> 4 * Jesse Barnes <jesse.barnes@intel.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -34,11 +34,11 @@
34 * intel_ddc_probe 34 * intel_ddc_probe
35 * 35 *
36 */ 36 */
37bool intel_ddc_probe(struct intel_encoder *intel_encoder) 37bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
38{ 38{
39 struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
39 u8 out_buf[] = { 0x0, 0x0}; 40 u8 out_buf[] = { 0x0, 0x0};
40 u8 buf[2]; 41 u8 buf[2];
41 int ret;
42 struct i2c_msg msgs[] = { 42 struct i2c_msg msgs[] = {
43 { 43 {
44 .addr = 0x50, 44 .addr = 0x50,
@@ -54,13 +54,7 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
54 } 54 }
55 }; 55 };
56 56
57 intel_i2c_quirk_set(intel_encoder->enc.dev, true); 57 return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2;
58 ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
59 intel_i2c_quirk_set(intel_encoder->enc.dev, false);
60 if (ret == 2)
61 return true;
62
63 return false;
64} 58}
65 59
66/** 60/**
@@ -76,9 +70,7 @@ int intel_ddc_get_modes(struct drm_connector *connector,
76 struct edid *edid; 70 struct edid *edid;
77 int ret = 0; 71 int ret = 0;
78 72
79 intel_i2c_quirk_set(connector->dev, true);
80 edid = drm_get_edid(connector, adapter); 73 edid = drm_get_edid(connector, adapter);
81 intel_i2c_quirk_set(connector->dev, false);
82 if (edid) { 74 if (edid) {
83 drm_mode_connector_update_edid_property(connector, edid); 75 drm_mode_connector_update_edid_property(connector, edid);
84 ret = drm_add_edid_modes(connector, edid); 76 ret = drm_add_edid_modes(connector, edid);
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index ea5d3fea4b61..917c7dc3cd6b 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -31,17 +31,16 @@
31#include "drmP.h" 31#include "drmP.h"
32#include "i915_drm.h" 32#include "i915_drm.h"
33#include "i915_drv.h" 33#include "i915_drv.h"
34#include "intel_drv.h"
34 35
35#define PCI_ASLE 0xe4 36#define PCI_ASLE 0xe4
36#define PCI_LBPC 0xf4
37#define PCI_ASLS 0xfc 37#define PCI_ASLS 0xfc
38 38
39#define OPREGION_SZ (8*1024)
40#define OPREGION_HEADER_OFFSET 0 39#define OPREGION_HEADER_OFFSET 0
41#define OPREGION_ACPI_OFFSET 0x100 40#define OPREGION_ACPI_OFFSET 0x100
42#define OPREGION_SWSCI_OFFSET 0x200 41#define OPREGION_SWSCI_OFFSET 0x200
43#define OPREGION_ASLE_OFFSET 0x300 42#define OPREGION_ASLE_OFFSET 0x300
44#define OPREGION_VBT_OFFSET 0x1000 43#define OPREGION_VBT_OFFSET 0x400
45 44
46#define OPREGION_SIGNATURE "IntelGraphicsMem" 45#define OPREGION_SIGNATURE "IntelGraphicsMem"
47#define MBOX_ACPI (1<<0) 46#define MBOX_ACPI (1<<0)
@@ -143,40 +142,22 @@ struct opregion_asle {
143#define ACPI_DIGITAL_OUTPUT (3<<8) 142#define ACPI_DIGITAL_OUTPUT (3<<8)
144#define ACPI_LVDS_OUTPUT (4<<8) 143#define ACPI_LVDS_OUTPUT (4<<8)
145 144
145#ifdef CONFIG_ACPI
146static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 146static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
147{ 147{
148 struct drm_i915_private *dev_priv = dev->dev_private; 148 struct drm_i915_private *dev_priv = dev->dev_private;
149 struct opregion_asle *asle = dev_priv->opregion.asle; 149 struct opregion_asle *asle = dev_priv->opregion.asle;
150 u32 blc_pwm_ctl, blc_pwm_ctl2; 150 u32 max;
151 u32 max_backlight, level, shift;
152 151
153 if (!(bclp & ASLE_BCLP_VALID)) 152 if (!(bclp & ASLE_BCLP_VALID))
154 return ASLE_BACKLIGHT_FAILED; 153 return ASLE_BACKLIGHT_FAILED;
155 154
156 bclp &= ASLE_BCLP_MSK; 155 bclp &= ASLE_BCLP_MSK;
157 if (bclp < 0 || bclp > 255) 156 if (bclp > 255)
158 return ASLE_BACKLIGHT_FAILED; 157 return ASLE_BACKLIGHT_FAILED;
159 158
160 blc_pwm_ctl = I915_READ(BLC_PWM_CTL); 159 max = intel_panel_get_max_backlight(dev);
161 blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); 160 intel_panel_set_backlight(dev, bclp * max / 255);
162
163 if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
164 pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
165 else {
166 if (IS_PINEVIEW(dev)) {
167 blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
168 max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
169 BACKLIGHT_MODULATION_FREQ_SHIFT;
170 shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1;
171 } else {
172 blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
173 max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
174 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
175 shift = BACKLIGHT_DUTY_CYCLE_SHIFT;
176 }
177 level = (bclp * max_backlight) / 255;
178 I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift));
179 }
180 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; 161 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
181 162
182 return 0; 163 return 0;
@@ -211,7 +192,7 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
211 return 0; 192 return 0;
212} 193}
213 194
214void opregion_asle_intr(struct drm_device *dev) 195void intel_opregion_asle_intr(struct drm_device *dev)
215{ 196{
216 struct drm_i915_private *dev_priv = dev->dev_private; 197 struct drm_i915_private *dev_priv = dev->dev_private;
217 struct opregion_asle *asle = dev_priv->opregion.asle; 198 struct opregion_asle *asle = dev_priv->opregion.asle;
@@ -243,37 +224,8 @@ void opregion_asle_intr(struct drm_device *dev)
243 asle->aslc = asle_stat; 224 asle->aslc = asle_stat;
244} 225}
245 226
246static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp) 227/* Only present on Ironlake+ */
247{ 228void intel_opregion_gse_intr(struct drm_device *dev)
248 struct drm_i915_private *dev_priv = dev->dev_private;
249 struct opregion_asle *asle = dev_priv->opregion.asle;
250 u32 cpu_pwm_ctl, pch_pwm_ctl2;
251 u32 max_backlight, level;
252
253 if (!(bclp & ASLE_BCLP_VALID))
254 return ASLE_BACKLIGHT_FAILED;
255
256 bclp &= ASLE_BCLP_MSK;
257 if (bclp < 0 || bclp > 255)
258 return ASLE_BACKLIGHT_FAILED;
259
260 cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL);
261 pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
262 /* get the max PWM frequency */
263 max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK;
264 /* calculate the expected PMW frequency */
265 level = (bclp * max_backlight) / 255;
266 /* reserve the high 16 bits */
267 cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK);
268 /* write the updated PWM frequency */
269 I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level);
270
271 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
272
273 return 0;
274}
275
276void ironlake_opregion_gse_intr(struct drm_device *dev)
277{ 229{
278 struct drm_i915_private *dev_priv = dev->dev_private; 230 struct drm_i915_private *dev_priv = dev->dev_private;
279 struct opregion_asle *asle = dev_priv->opregion.asle; 231 struct opregion_asle *asle = dev_priv->opregion.asle;
@@ -296,7 +248,7 @@ void ironlake_opregion_gse_intr(struct drm_device *dev)
296 } 248 }
297 249
298 if (asle_req & ASLE_SET_BACKLIGHT) 250 if (asle_req & ASLE_SET_BACKLIGHT)
299 asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp); 251 asle_stat |= asle_set_backlight(dev, asle->bclp);
300 252
301 if (asle_req & ASLE_SET_PFIT) { 253 if (asle_req & ASLE_SET_PFIT) {
302 DRM_DEBUG_DRIVER("Pfit is not supported\n"); 254 DRM_DEBUG_DRIVER("Pfit is not supported\n");
@@ -315,7 +267,7 @@ void ironlake_opregion_gse_intr(struct drm_device *dev)
315#define ASLE_PFIT_EN (1<<2) 267#define ASLE_PFIT_EN (1<<2)
316#define ASLE_PFMB_EN (1<<3) 268#define ASLE_PFMB_EN (1<<3)
317 269
318void opregion_enable_asle(struct drm_device *dev) 270void intel_opregion_enable_asle(struct drm_device *dev)
319{ 271{
320 struct drm_i915_private *dev_priv = dev->dev_private; 272 struct drm_i915_private *dev_priv = dev->dev_private;
321 struct opregion_asle *asle = dev_priv->opregion.asle; 273 struct opregion_asle *asle = dev_priv->opregion.asle;
@@ -464,7 +416,58 @@ blind_set:
464 goto end; 416 goto end;
465} 417}
466 418
467int intel_opregion_init(struct drm_device *dev, int resume) 419void intel_opregion_init(struct drm_device *dev)
420{
421 struct drm_i915_private *dev_priv = dev->dev_private;
422 struct intel_opregion *opregion = &dev_priv->opregion;
423
424 if (!opregion->header)
425 return;
426
427 if (opregion->acpi) {
428 if (drm_core_check_feature(dev, DRIVER_MODESET))
429 intel_didl_outputs(dev);
430
431 /* Notify BIOS we are ready to handle ACPI video ext notifs.
432 * Right now, all the events are handled by the ACPI video module.
433 * We don't actually need to do anything with them. */
434 opregion->acpi->csts = 0;
435 opregion->acpi->drdy = 1;
436
437 system_opregion = opregion;
438 register_acpi_notifier(&intel_opregion_notifier);
439 }
440
441 if (opregion->asle)
442 intel_opregion_enable_asle(dev);
443}
444
445void intel_opregion_fini(struct drm_device *dev)
446{
447 struct drm_i915_private *dev_priv = dev->dev_private;
448 struct intel_opregion *opregion = &dev_priv->opregion;
449
450 if (!opregion->header)
451 return;
452
453 if (opregion->acpi) {
454 opregion->acpi->drdy = 0;
455
456 system_opregion = NULL;
457 unregister_acpi_notifier(&intel_opregion_notifier);
458 }
459
460 /* just clear all opregion memory pointers now */
461 iounmap(opregion->header);
462 opregion->header = NULL;
463 opregion->acpi = NULL;
464 opregion->swsci = NULL;
465 opregion->asle = NULL;
466 opregion->vbt = NULL;
467}
468#endif
469
470int intel_opregion_setup(struct drm_device *dev)
468{ 471{
469 struct drm_i915_private *dev_priv = dev->dev_private; 472 struct drm_i915_private *dev_priv = dev->dev_private;
470 struct intel_opregion *opregion = &dev_priv->opregion; 473 struct intel_opregion *opregion = &dev_priv->opregion;
@@ -479,29 +482,23 @@ int intel_opregion_init(struct drm_device *dev, int resume)
479 return -ENOTSUPP; 482 return -ENOTSUPP;
480 } 483 }
481 484
482 base = ioremap(asls, OPREGION_SZ); 485 base = ioremap(asls, OPREGION_SIZE);
483 if (!base) 486 if (!base)
484 return -ENOMEM; 487 return -ENOMEM;
485 488
486 opregion->header = base; 489 if (memcmp(base, OPREGION_SIGNATURE, 16)) {
487 if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
488 DRM_DEBUG_DRIVER("opregion signature mismatch\n"); 490 DRM_DEBUG_DRIVER("opregion signature mismatch\n");
489 err = -EINVAL; 491 err = -EINVAL;
490 goto err_out; 492 goto err_out;
491 } 493 }
494 opregion->header = base;
495 opregion->vbt = base + OPREGION_VBT_OFFSET;
492 496
493 mboxes = opregion->header->mboxes; 497 mboxes = opregion->header->mboxes;
494 if (mboxes & MBOX_ACPI) { 498 if (mboxes & MBOX_ACPI) {
495 DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); 499 DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
496 opregion->acpi = base + OPREGION_ACPI_OFFSET; 500 opregion->acpi = base + OPREGION_ACPI_OFFSET;
497 if (drm_core_check_feature(dev, DRIVER_MODESET))
498 intel_didl_outputs(dev);
499 } else {
500 DRM_DEBUG_DRIVER("Public ACPI methods not supported\n");
501 err = -ENOTSUPP;
502 goto err_out;
503 } 501 }
504 opregion->enabled = 1;
505 502
506 if (mboxes & MBOX_SWSCI) { 503 if (mboxes & MBOX_SWSCI) {
507 DRM_DEBUG_DRIVER("SWSCI supported\n"); 504 DRM_DEBUG_DRIVER("SWSCI supported\n");
@@ -510,53 +507,11 @@ int intel_opregion_init(struct drm_device *dev, int resume)
510 if (mboxes & MBOX_ASLE) { 507 if (mboxes & MBOX_ASLE) {
511 DRM_DEBUG_DRIVER("ASLE supported\n"); 508 DRM_DEBUG_DRIVER("ASLE supported\n");
512 opregion->asle = base + OPREGION_ASLE_OFFSET; 509 opregion->asle = base + OPREGION_ASLE_OFFSET;
513 opregion_enable_asle(dev);
514 } 510 }
515 511
516 if (!resume)
517 acpi_video_register();
518
519
520 /* Notify BIOS we are ready to handle ACPI video ext notifs.
521 * Right now, all the events are handled by the ACPI video module.
522 * We don't actually need to do anything with them. */
523 opregion->acpi->csts = 0;
524 opregion->acpi->drdy = 1;
525
526 system_opregion = opregion;
527 register_acpi_notifier(&intel_opregion_notifier);
528
529 return 0; 512 return 0;
530 513
531err_out: 514err_out:
532 iounmap(opregion->header); 515 iounmap(opregion->header);
533 opregion->header = NULL;
534 acpi_video_register();
535 return err; 516 return err;
536} 517}
537
538void intel_opregion_free(struct drm_device *dev, int suspend)
539{
540 struct drm_i915_private *dev_priv = dev->dev_private;
541 struct intel_opregion *opregion = &dev_priv->opregion;
542
543 if (!opregion->enabled)
544 return;
545
546 if (!suspend)
547 acpi_video_unregister();
548
549 opregion->acpi->drdy = 0;
550
551 system_opregion = NULL;
552 unregister_acpi_notifier(&intel_opregion_notifier);
553
554 /* just clear all opregion memory pointers now */
555 iounmap(opregion->header);
556 opregion->header = NULL;
557 opregion->acpi = NULL;
558 opregion->swsci = NULL;
559 opregion->asle = NULL;
560
561 opregion->enabled = 0;
562}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 3264bbd47e65..afb96d25219a 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -170,56 +170,143 @@ struct overlay_registers {
170 u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES]; 170 u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
171}; 171};
172 172
173/* overlay flip addr flag */ 173struct intel_overlay {
174#define OFC_UPDATE 0x1 174 struct drm_device *dev;
175 175 struct intel_crtc *crtc;
176#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev)) 176 struct drm_i915_gem_object *vid_bo;
177#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev)) 177 struct drm_i915_gem_object *old_vid_bo;
178 178 int active;
179 int pfit_active;
180 u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
181 u32 color_key;
182 u32 brightness, contrast, saturation;
183 u32 old_xscale, old_yscale;
184 /* register access */
185 u32 flip_addr;
186 struct drm_i915_gem_object *reg_bo;
187 /* flip handling */
188 uint32_t last_flip_req;
189 void (*flip_tail)(struct intel_overlay *);
190};
179 191
180static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay) 192static struct overlay_registers *
193intel_overlay_map_regs(struct intel_overlay *overlay)
181{ 194{
182 drm_i915_private_t *dev_priv = overlay->dev->dev_private; 195 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
183 struct overlay_registers *regs; 196 struct overlay_registers *regs;
184 197
185 /* no recursive mappings */ 198 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
186 BUG_ON(overlay->virt_addr); 199 regs = overlay->reg_bo->phys_obj->handle->vaddr;
200 else
201 regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
202 overlay->reg_bo->gtt_offset);
187 203
188 if (OVERLAY_NONPHYSICAL(overlay->dev)) { 204 return regs;
189 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 205}
190 overlay->reg_bo->gtt_offset);
191 206
192 if (!regs) { 207static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
193 DRM_ERROR("failed to map overlay regs in GTT\n"); 208 struct overlay_registers *regs)
194 return NULL; 209{
195 } 210 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
196 } else 211 io_mapping_unmap(regs);
197 regs = overlay->reg_bo->phys_obj->handle->vaddr; 212}
213
214static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
215 struct drm_i915_gem_request *request,
216 bool interruptible,
217 void (*tail)(struct intel_overlay *))
218{
219 struct drm_device *dev = overlay->dev;
220 drm_i915_private_t *dev_priv = dev->dev_private;
221 int ret;
222
223 BUG_ON(overlay->last_flip_req);
224 overlay->last_flip_req =
225 i915_add_request(dev, NULL, request, &dev_priv->render_ring);
226 if (overlay->last_flip_req == 0)
227 return -ENOMEM;
198 228
199 return overlay->virt_addr = regs; 229 overlay->flip_tail = tail;
230 ret = i915_do_wait_request(dev,
231 overlay->last_flip_req, true,
232 &dev_priv->render_ring);
233 if (ret)
234 return ret;
235
236 overlay->last_flip_req = 0;
237 return 0;
200} 238}
201 239
202static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) 240/* Workaround for i830 bug where pipe a must be enable to change control regs */
241static int
242i830_activate_pipe_a(struct drm_device *dev)
203{ 243{
204 if (OVERLAY_NONPHYSICAL(overlay->dev)) 244 drm_i915_private_t *dev_priv = dev->dev_private;
205 io_mapping_unmap_atomic(overlay->virt_addr); 245 struct intel_crtc *crtc;
246 struct drm_crtc_helper_funcs *crtc_funcs;
247 struct drm_display_mode vesa_640x480 = {
248 DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
249 752, 800, 0, 480, 489, 492, 525, 0,
250 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
251 }, *mode;
252
253 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
254 if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
255 return 0;
206 256
207 overlay->virt_addr = NULL; 257 /* most i8xx have pipe a forced on, so don't trust dpms mode */
258 if (I915_READ(PIPEACONF) & PIPECONF_ENABLE)
259 return 0;
208 260
209 return; 261 crtc_funcs = crtc->base.helper_private;
262 if (crtc_funcs->dpms == NULL)
263 return 0;
264
265 DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
266
267 mode = drm_mode_duplicate(dev, &vesa_640x480);
268 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
269 if(!drm_crtc_helper_set_mode(&crtc->base, mode,
270 crtc->base.x, crtc->base.y,
271 crtc->base.fb))
272 return 0;
273
274 crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
275 return 1;
276}
277
278static void
279i830_deactivate_pipe_a(struct drm_device *dev)
280{
281 drm_i915_private_t *dev_priv = dev->dev_private;
282 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
283 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
284
285 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
210} 286}
211 287
212/* overlay needs to be disable in OCMD reg */ 288/* overlay needs to be disable in OCMD reg */
213static int intel_overlay_on(struct intel_overlay *overlay) 289static int intel_overlay_on(struct intel_overlay *overlay)
214{ 290{
215 struct drm_device *dev = overlay->dev; 291 struct drm_device *dev = overlay->dev;
292 struct drm_i915_gem_request *request;
293 int pipe_a_quirk = 0;
216 int ret; 294 int ret;
217 drm_i915_private_t *dev_priv = dev->dev_private;
218 295
219 BUG_ON(overlay->active); 296 BUG_ON(overlay->active);
220
221 overlay->active = 1; 297 overlay->active = 1;
222 overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP; 298
299 if (IS_I830(dev)) {
300 pipe_a_quirk = i830_activate_pipe_a(dev);
301 if (pipe_a_quirk < 0)
302 return pipe_a_quirk;
303 }
304
305 request = kzalloc(sizeof(*request), GFP_KERNEL);
306 if (request == NULL) {
307 ret = -ENOMEM;
308 goto out;
309 }
223 310
224 BEGIN_LP_RING(4); 311 BEGIN_LP_RING(4);
225 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); 312 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
@@ -228,32 +315,30 @@ static int intel_overlay_on(struct intel_overlay *overlay)
228 OUT_RING(MI_NOOP); 315 OUT_RING(MI_NOOP);
229 ADVANCE_LP_RING(); 316 ADVANCE_LP_RING();
230 317
231 overlay->last_flip_req = 318 ret = intel_overlay_do_wait_request(overlay, request, true, NULL);
232 i915_add_request(dev, NULL, 0, &dev_priv->render_ring); 319out:
233 if (overlay->last_flip_req == 0) 320 if (pipe_a_quirk)
234 return -ENOMEM; 321 i830_deactivate_pipe_a(dev);
235
236 ret = i915_do_wait_request(dev,
237 overlay->last_flip_req, 1, &dev_priv->render_ring);
238 if (ret != 0)
239 return ret;
240 322
241 overlay->hw_wedged = 0; 323 return ret;
242 overlay->last_flip_req = 0;
243 return 0;
244} 324}
245 325
246/* overlay needs to be enabled in OCMD reg */ 326/* overlay needs to be enabled in OCMD reg */
247static void intel_overlay_continue(struct intel_overlay *overlay, 327static int intel_overlay_continue(struct intel_overlay *overlay,
248 bool load_polyphase_filter) 328 bool load_polyphase_filter)
249{ 329{
250 struct drm_device *dev = overlay->dev; 330 struct drm_device *dev = overlay->dev;
251 drm_i915_private_t *dev_priv = dev->dev_private; 331 drm_i915_private_t *dev_priv = dev->dev_private;
332 struct drm_i915_gem_request *request;
252 u32 flip_addr = overlay->flip_addr; 333 u32 flip_addr = overlay->flip_addr;
253 u32 tmp; 334 u32 tmp;
254 335
255 BUG_ON(!overlay->active); 336 BUG_ON(!overlay->active);
256 337
338 request = kzalloc(sizeof(*request), GFP_KERNEL);
339 if (request == NULL)
340 return -ENOMEM;
341
257 if (load_polyphase_filter) 342 if (load_polyphase_filter)
258 flip_addr |= OFC_UPDATE; 343 flip_addr |= OFC_UPDATE;
259 344
@@ -268,220 +353,132 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
268 ADVANCE_LP_RING(); 353 ADVANCE_LP_RING();
269 354
270 overlay->last_flip_req = 355 overlay->last_flip_req =
271 i915_add_request(dev, NULL, 0, &dev_priv->render_ring); 356 i915_add_request(dev, NULL, request, &dev_priv->render_ring);
357 return 0;
272} 358}
273 359
274static int intel_overlay_wait_flip(struct intel_overlay *overlay) 360static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
275{ 361{
276 struct drm_device *dev = overlay->dev; 362 struct drm_gem_object *obj = &overlay->old_vid_bo->base;
277 drm_i915_private_t *dev_priv = dev->dev_private;
278 int ret;
279 u32 tmp;
280
281 if (overlay->last_flip_req != 0) {
282 ret = i915_do_wait_request(dev, overlay->last_flip_req,
283 1, &dev_priv->render_ring);
284 if (ret == 0) {
285 overlay->last_flip_req = 0;
286
287 tmp = I915_READ(ISR);
288 363
289 if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) 364 i915_gem_object_unpin(obj);
290 return 0; 365 drm_gem_object_unreference(obj);
291 }
292 }
293 366
294 /* synchronous slowpath */ 367 overlay->old_vid_bo = NULL;
295 overlay->hw_wedged = RELEASE_OLD_VID; 368}
296 369
297 BEGIN_LP_RING(2); 370static void intel_overlay_off_tail(struct intel_overlay *overlay)
298 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 371{
299 OUT_RING(MI_NOOP); 372 struct drm_gem_object *obj;
300 ADVANCE_LP_RING();
301 373
302 overlay->last_flip_req = 374 /* never have the overlay hw on without showing a frame */
303 i915_add_request(dev, NULL, 0, &dev_priv->render_ring); 375 BUG_ON(!overlay->vid_bo);
304 if (overlay->last_flip_req == 0) 376 obj = &overlay->vid_bo->base;
305 return -ENOMEM;
306 377
307 ret = i915_do_wait_request(dev, overlay->last_flip_req, 378 i915_gem_object_unpin(obj);
308 1, &dev_priv->render_ring); 379 drm_gem_object_unreference(obj);
309 if (ret != 0) 380 overlay->vid_bo = NULL;
310 return ret;
311 381
312 overlay->hw_wedged = 0; 382 overlay->crtc->overlay = NULL;
313 overlay->last_flip_req = 0; 383 overlay->crtc = NULL;
314 return 0; 384 overlay->active = 0;
315} 385}
316 386
317/* overlay needs to be disabled in OCMD reg */ 387/* overlay needs to be disabled in OCMD reg */
318static int intel_overlay_off(struct intel_overlay *overlay) 388static int intel_overlay_off(struct intel_overlay *overlay,
389 bool interruptible)
319{ 390{
320 u32 flip_addr = overlay->flip_addr;
321 struct drm_device *dev = overlay->dev; 391 struct drm_device *dev = overlay->dev;
322 drm_i915_private_t *dev_priv = dev->dev_private; 392 u32 flip_addr = overlay->flip_addr;
323 int ret; 393 struct drm_i915_gem_request *request;
324 394
325 BUG_ON(!overlay->active); 395 BUG_ON(!overlay->active);
326 396
397 request = kzalloc(sizeof(*request), GFP_KERNEL);
398 if (request == NULL)
399 return -ENOMEM;
400
327 /* According to intel docs the overlay hw may hang (when switching 401 /* According to intel docs the overlay hw may hang (when switching
328 * off) without loading the filter coeffs. It is however unclear whether 402 * off) without loading the filter coeffs. It is however unclear whether
329 * this applies to the disabling of the overlay or to the switching off 403 * this applies to the disabling of the overlay or to the switching off
330 * of the hw. Do it in both cases */ 404 * of the hw. Do it in both cases */
331 flip_addr |= OFC_UPDATE; 405 flip_addr |= OFC_UPDATE;
332 406
407 BEGIN_LP_RING(6);
333 /* wait for overlay to go idle */ 408 /* wait for overlay to go idle */
334 overlay->hw_wedged = SWITCH_OFF_STAGE_1;
335
336 BEGIN_LP_RING(4);
337 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 409 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
338 OUT_RING(flip_addr); 410 OUT_RING(flip_addr);
339 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 411 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
340 OUT_RING(MI_NOOP);
341 ADVANCE_LP_RING();
342
343 overlay->last_flip_req =
344 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
345 if (overlay->last_flip_req == 0)
346 return -ENOMEM;
347
348 ret = i915_do_wait_request(dev, overlay->last_flip_req,
349 1, &dev_priv->render_ring);
350 if (ret != 0)
351 return ret;
352
353 /* turn overlay off */ 412 /* turn overlay off */
354 overlay->hw_wedged = SWITCH_OFF_STAGE_2; 413 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
355
356 BEGIN_LP_RING(4);
357 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
358 OUT_RING(flip_addr); 414 OUT_RING(flip_addr);
359 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 415 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
360 OUT_RING(MI_NOOP);
361 ADVANCE_LP_RING(); 416 ADVANCE_LP_RING();
362 417
363 overlay->last_flip_req = 418 return intel_overlay_do_wait_request(overlay, request, interruptible,
364 i915_add_request(dev, NULL, 0, &dev_priv->render_ring); 419 intel_overlay_off_tail);
365 if (overlay->last_flip_req == 0)
366 return -ENOMEM;
367
368 ret = i915_do_wait_request(dev, overlay->last_flip_req,
369 1, &dev_priv->render_ring);
370 if (ret != 0)
371 return ret;
372
373 overlay->hw_wedged = 0;
374 overlay->last_flip_req = 0;
375 return ret;
376}
377
378static void intel_overlay_off_tail(struct intel_overlay *overlay)
379{
380 struct drm_gem_object *obj;
381
382 /* never have the overlay hw on without showing a frame */
383 BUG_ON(!overlay->vid_bo);
384 obj = &overlay->vid_bo->base;
385
386 i915_gem_object_unpin(obj);
387 drm_gem_object_unreference(obj);
388 overlay->vid_bo = NULL;
389
390 overlay->crtc->overlay = NULL;
391 overlay->crtc = NULL;
392 overlay->active = 0;
393} 420}
394 421
395/* recover from an interruption due to a signal 422/* recover from an interruption due to a signal
396 * We have to be careful not to repeat work forever an make forward progess. */ 423 * We have to be careful not to repeat work forever an make forward progess. */
397int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, 424static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
398 int interruptible) 425 bool interruptible)
399{ 426{
400 struct drm_device *dev = overlay->dev; 427 struct drm_device *dev = overlay->dev;
401 struct drm_gem_object *obj;
402 drm_i915_private_t *dev_priv = dev->dev_private; 428 drm_i915_private_t *dev_priv = dev->dev_private;
403 u32 flip_addr;
404 int ret; 429 int ret;
405 430
406 if (overlay->hw_wedged == HW_WEDGED) 431 if (overlay->last_flip_req == 0)
407 return -EIO; 432 return 0;
408
409 if (overlay->last_flip_req == 0) {
410 overlay->last_flip_req =
411 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
412 if (overlay->last_flip_req == 0)
413 return -ENOMEM;
414 }
415 433
416 ret = i915_do_wait_request(dev, overlay->last_flip_req, 434 ret = i915_do_wait_request(dev, overlay->last_flip_req,
417 interruptible, &dev_priv->render_ring); 435 interruptible, &dev_priv->render_ring);
418 if (ret != 0) 436 if (ret)
419 return ret; 437 return ret;
420 438
421 switch (overlay->hw_wedged) { 439 if (overlay->flip_tail)
422 case RELEASE_OLD_VID: 440 overlay->flip_tail(overlay);
423 obj = &overlay->old_vid_bo->base;
424 i915_gem_object_unpin(obj);
425 drm_gem_object_unreference(obj);
426 overlay->old_vid_bo = NULL;
427 break;
428 case SWITCH_OFF_STAGE_1:
429 flip_addr = overlay->flip_addr;
430 flip_addr |= OFC_UPDATE;
431
432 overlay->hw_wedged = SWITCH_OFF_STAGE_2;
433
434 BEGIN_LP_RING(4);
435 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
436 OUT_RING(flip_addr);
437 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
438 OUT_RING(MI_NOOP);
439 ADVANCE_LP_RING();
440
441 overlay->last_flip_req = i915_add_request(dev, NULL,
442 0, &dev_priv->render_ring);
443 if (overlay->last_flip_req == 0)
444 return -ENOMEM;
445
446 ret = i915_do_wait_request(dev, overlay->last_flip_req,
447 interruptible, &dev_priv->render_ring);
448 if (ret != 0)
449 return ret;
450
451 case SWITCH_OFF_STAGE_2:
452 intel_overlay_off_tail(overlay);
453 break;
454 default:
455 BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
456 }
457 441
458 overlay->hw_wedged = 0;
459 overlay->last_flip_req = 0; 442 overlay->last_flip_req = 0;
460 return 0; 443 return 0;
461} 444}
462 445
463/* Wait for pending overlay flip and release old frame. 446/* Wait for pending overlay flip and release old frame.
464 * Needs to be called before the overlay register are changed 447 * Needs to be called before the overlay register are changed
465 * via intel_overlay_(un)map_regs_atomic */ 448 * via intel_overlay_(un)map_regs
449 */
466static int intel_overlay_release_old_vid(struct intel_overlay *overlay) 450static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
467{ 451{
452 struct drm_device *dev = overlay->dev;
453 drm_i915_private_t *dev_priv = dev->dev_private;
468 int ret; 454 int ret;
469 struct drm_gem_object *obj;
470 455
471 /* only wait if there is actually an old frame to release to 456 /* Only wait if there is actually an old frame to release to
472 * guarantee forward progress */ 457 * guarantee forward progress.
458 */
473 if (!overlay->old_vid_bo) 459 if (!overlay->old_vid_bo)
474 return 0; 460 return 0;
475 461
476 ret = intel_overlay_wait_flip(overlay); 462 if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
477 if (ret != 0) 463 struct drm_i915_gem_request *request;
478 return ret;
479 464
480 obj = &overlay->old_vid_bo->base; 465 /* synchronous slowpath */
481 i915_gem_object_unpin(obj); 466 request = kzalloc(sizeof(*request), GFP_KERNEL);
482 drm_gem_object_unreference(obj); 467 if (request == NULL)
483 overlay->old_vid_bo = NULL; 468 return -ENOMEM;
484 469
470 BEGIN_LP_RING(2);
471 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
472 OUT_RING(MI_NOOP);
473 ADVANCE_LP_RING();
474
475 ret = intel_overlay_do_wait_request(overlay, request, true,
476 intel_overlay_release_old_vid_tail);
477 if (ret)
478 return ret;
479 }
480
481 intel_overlay_release_old_vid_tail(overlay);
485 return 0; 482 return 0;
486} 483}
487 484
@@ -505,65 +502,65 @@ struct put_image_params {
505static int packed_depth_bytes(u32 format) 502static int packed_depth_bytes(u32 format)
506{ 503{
507 switch (format & I915_OVERLAY_DEPTH_MASK) { 504 switch (format & I915_OVERLAY_DEPTH_MASK) {
508 case I915_OVERLAY_YUV422: 505 case I915_OVERLAY_YUV422:
509 return 4; 506 return 4;
510 case I915_OVERLAY_YUV411: 507 case I915_OVERLAY_YUV411:
511 /* return 6; not implemented */ 508 /* return 6; not implemented */
512 default: 509 default:
513 return -EINVAL; 510 return -EINVAL;
514 } 511 }
515} 512}
516 513
517static int packed_width_bytes(u32 format, short width) 514static int packed_width_bytes(u32 format, short width)
518{ 515{
519 switch (format & I915_OVERLAY_DEPTH_MASK) { 516 switch (format & I915_OVERLAY_DEPTH_MASK) {
520 case I915_OVERLAY_YUV422: 517 case I915_OVERLAY_YUV422:
521 return width << 1; 518 return width << 1;
522 default: 519 default:
523 return -EINVAL; 520 return -EINVAL;
524 } 521 }
525} 522}
526 523
527static int uv_hsubsampling(u32 format) 524static int uv_hsubsampling(u32 format)
528{ 525{
529 switch (format & I915_OVERLAY_DEPTH_MASK) { 526 switch (format & I915_OVERLAY_DEPTH_MASK) {
530 case I915_OVERLAY_YUV422: 527 case I915_OVERLAY_YUV422:
531 case I915_OVERLAY_YUV420: 528 case I915_OVERLAY_YUV420:
532 return 2; 529 return 2;
533 case I915_OVERLAY_YUV411: 530 case I915_OVERLAY_YUV411:
534 case I915_OVERLAY_YUV410: 531 case I915_OVERLAY_YUV410:
535 return 4; 532 return 4;
536 default: 533 default:
537 return -EINVAL; 534 return -EINVAL;
538 } 535 }
539} 536}
540 537
541static int uv_vsubsampling(u32 format) 538static int uv_vsubsampling(u32 format)
542{ 539{
543 switch (format & I915_OVERLAY_DEPTH_MASK) { 540 switch (format & I915_OVERLAY_DEPTH_MASK) {
544 case I915_OVERLAY_YUV420: 541 case I915_OVERLAY_YUV420:
545 case I915_OVERLAY_YUV410: 542 case I915_OVERLAY_YUV410:
546 return 2; 543 return 2;
547 case I915_OVERLAY_YUV422: 544 case I915_OVERLAY_YUV422:
548 case I915_OVERLAY_YUV411: 545 case I915_OVERLAY_YUV411:
549 return 1; 546 return 1;
550 default: 547 default:
551 return -EINVAL; 548 return -EINVAL;
552 } 549 }
553} 550}
554 551
555static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) 552static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
556{ 553{
557 u32 mask, shift, ret; 554 u32 mask, shift, ret;
558 if (IS_I9XX(dev)) { 555 if (IS_GEN2(dev)) {
559 mask = 0x3f;
560 shift = 6;
561 } else {
562 mask = 0x1f; 556 mask = 0x1f;
563 shift = 5; 557 shift = 5;
558 } else {
559 mask = 0x3f;
560 shift = 6;
564 } 561 }
565 ret = ((offset + width + mask) >> shift) - (offset >> shift); 562 ret = ((offset + width + mask) >> shift) - (offset >> shift);
566 if (IS_I9XX(dev)) 563 if (!IS_GEN2(dev))
567 ret <<= 1; 564 ret <<= 1;
568 ret -=1; 565 ret -=1;
569 return ret << 2; 566 return ret << 2;
@@ -586,7 +583,9 @@ static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
586 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, 583 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
587 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, 584 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
588 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, 585 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
589 0xb000, 0x3000, 0x0800, 0x3000, 0xb000}; 586 0xb000, 0x3000, 0x0800, 0x3000, 0xb000
587};
588
590static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = { 589static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
591 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60, 590 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
592 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40, 591 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
@@ -596,7 +595,8 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
596 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0, 595 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
597 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240, 596 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
598 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0, 597 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
599 0x3000, 0x0800, 0x3000}; 598 0x3000, 0x0800, 0x3000
599};
600 600
601static void update_polyphase_filter(struct overlay_registers *regs) 601static void update_polyphase_filter(struct overlay_registers *regs)
602{ 602{
@@ -629,29 +629,31 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
629 yscale = 1 << FP_SHIFT; 629 yscale = 1 << FP_SHIFT;
630 630
631 /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/ 631 /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
632 xscale_UV = xscale/uv_hscale; 632 xscale_UV = xscale/uv_hscale;
633 yscale_UV = yscale/uv_vscale; 633 yscale_UV = yscale/uv_vscale;
634 /* make the Y scale to UV scale ratio an exact multiply */ 634 /* make the Y scale to UV scale ratio an exact multiply */
635 xscale = xscale_UV * uv_hscale; 635 xscale = xscale_UV * uv_hscale;
636 yscale = yscale_UV * uv_vscale; 636 yscale = yscale_UV * uv_vscale;
637 /*} else { 637 /*} else {
638 xscale_UV = 0; 638 xscale_UV = 0;
639 yscale_UV = 0; 639 yscale_UV = 0;
640 }*/ 640 }*/
641 641
642 if (xscale != overlay->old_xscale || yscale != overlay->old_yscale) 642 if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
643 scale_changed = true; 643 scale_changed = true;
644 overlay->old_xscale = xscale; 644 overlay->old_xscale = xscale;
645 overlay->old_yscale = yscale; 645 overlay->old_yscale = yscale;
646 646
647 regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20) 647 regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
648 | ((xscale >> FP_SHIFT) << 16) 648 ((xscale >> FP_SHIFT) << 16) |
649 | ((xscale & FRACT_MASK) << 3); 649 ((xscale & FRACT_MASK) << 3));
650 regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20) 650
651 | ((xscale_UV >> FP_SHIFT) << 16) 651 regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
652 | ((xscale_UV & FRACT_MASK) << 3); 652 ((xscale_UV >> FP_SHIFT) << 16) |
653 regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16) 653 ((xscale_UV & FRACT_MASK) << 3));
654 | ((yscale_UV >> FP_SHIFT) << 0); 654
655 regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) |
656 ((yscale_UV >> FP_SHIFT) << 0)));
655 657
656 if (scale_changed) 658 if (scale_changed)
657 update_polyphase_filter(regs); 659 update_polyphase_filter(regs);
@@ -663,22 +665,28 @@ static void update_colorkey(struct intel_overlay *overlay,
663 struct overlay_registers *regs) 665 struct overlay_registers *regs)
664{ 666{
665 u32 key = overlay->color_key; 667 u32 key = overlay->color_key;
668
666 switch (overlay->crtc->base.fb->bits_per_pixel) { 669 switch (overlay->crtc->base.fb->bits_per_pixel) {
667 case 8: 670 case 8:
668 regs->DCLRKV = 0; 671 regs->DCLRKV = 0;
669 regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE; 672 regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
670 case 16: 673 break;
671 if (overlay->crtc->base.fb->depth == 15) { 674
672 regs->DCLRKV = RGB15_TO_COLORKEY(key); 675 case 16:
673 regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE; 676 if (overlay->crtc->base.fb->depth == 15) {
674 } else { 677 regs->DCLRKV = RGB15_TO_COLORKEY(key);
675 regs->DCLRKV = RGB16_TO_COLORKEY(key); 678 regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
676 regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE; 679 } else {
677 } 680 regs->DCLRKV = RGB16_TO_COLORKEY(key);
678 case 24: 681 regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
679 case 32: 682 }
680 regs->DCLRKV = key; 683 break;
681 regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE; 684
685 case 24:
686 case 32:
687 regs->DCLRKV = key;
688 regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
689 break;
682 } 690 }
683} 691}
684 692
@@ -688,48 +696,48 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
688 696
689 if (params->format & I915_OVERLAY_YUV_PLANAR) { 697 if (params->format & I915_OVERLAY_YUV_PLANAR) {
690 switch (params->format & I915_OVERLAY_DEPTH_MASK) { 698 switch (params->format & I915_OVERLAY_DEPTH_MASK) {
691 case I915_OVERLAY_YUV422: 699 case I915_OVERLAY_YUV422:
692 cmd |= OCMD_YUV_422_PLANAR; 700 cmd |= OCMD_YUV_422_PLANAR;
693 break; 701 break;
694 case I915_OVERLAY_YUV420: 702 case I915_OVERLAY_YUV420:
695 cmd |= OCMD_YUV_420_PLANAR; 703 cmd |= OCMD_YUV_420_PLANAR;
696 break; 704 break;
697 case I915_OVERLAY_YUV411: 705 case I915_OVERLAY_YUV411:
698 case I915_OVERLAY_YUV410: 706 case I915_OVERLAY_YUV410:
699 cmd |= OCMD_YUV_410_PLANAR; 707 cmd |= OCMD_YUV_410_PLANAR;
700 break; 708 break;
701 } 709 }
702 } else { /* YUV packed */ 710 } else { /* YUV packed */
703 switch (params->format & I915_OVERLAY_DEPTH_MASK) { 711 switch (params->format & I915_OVERLAY_DEPTH_MASK) {
704 case I915_OVERLAY_YUV422: 712 case I915_OVERLAY_YUV422:
705 cmd |= OCMD_YUV_422_PACKED; 713 cmd |= OCMD_YUV_422_PACKED;
706 break; 714 break;
707 case I915_OVERLAY_YUV411: 715 case I915_OVERLAY_YUV411:
708 cmd |= OCMD_YUV_411_PACKED; 716 cmd |= OCMD_YUV_411_PACKED;
709 break; 717 break;
710 } 718 }
711 719
712 switch (params->format & I915_OVERLAY_SWAP_MASK) { 720 switch (params->format & I915_OVERLAY_SWAP_MASK) {
713 case I915_OVERLAY_NO_SWAP: 721 case I915_OVERLAY_NO_SWAP:
714 break; 722 break;
715 case I915_OVERLAY_UV_SWAP: 723 case I915_OVERLAY_UV_SWAP:
716 cmd |= OCMD_UV_SWAP; 724 cmd |= OCMD_UV_SWAP;
717 break; 725 break;
718 case I915_OVERLAY_Y_SWAP: 726 case I915_OVERLAY_Y_SWAP:
719 cmd |= OCMD_Y_SWAP; 727 cmd |= OCMD_Y_SWAP;
720 break; 728 break;
721 case I915_OVERLAY_Y_AND_UV_SWAP: 729 case I915_OVERLAY_Y_AND_UV_SWAP:
722 cmd |= OCMD_Y_AND_UV_SWAP; 730 cmd |= OCMD_Y_AND_UV_SWAP;
723 break; 731 break;
724 } 732 }
725 } 733 }
726 734
727 return cmd; 735 return cmd;
728} 736}
729 737
730int intel_overlay_do_put_image(struct intel_overlay *overlay, 738static int intel_overlay_do_put_image(struct intel_overlay *overlay,
731 struct drm_gem_object *new_bo, 739 struct drm_gem_object *new_bo,
732 struct put_image_params *params) 740 struct put_image_params *params)
733{ 741{
734 int ret, tmp_width; 742 int ret, tmp_width;
735 struct overlay_registers *regs; 743 struct overlay_registers *regs;
@@ -754,24 +762,24 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
754 goto out_unpin; 762 goto out_unpin;
755 763
756 if (!overlay->active) { 764 if (!overlay->active) {
757 regs = intel_overlay_map_regs_atomic(overlay); 765 regs = intel_overlay_map_regs(overlay);
758 if (!regs) { 766 if (!regs) {
759 ret = -ENOMEM; 767 ret = -ENOMEM;
760 goto out_unpin; 768 goto out_unpin;
761 } 769 }
762 regs->OCONFIG = OCONF_CC_OUT_8BIT; 770 regs->OCONFIG = OCONF_CC_OUT_8BIT;
763 if (IS_I965GM(overlay->dev)) 771 if (IS_GEN4(overlay->dev))
764 regs->OCONFIG |= OCONF_CSC_MODE_BT709; 772 regs->OCONFIG |= OCONF_CSC_MODE_BT709;
765 regs->OCONFIG |= overlay->crtc->pipe == 0 ? 773 regs->OCONFIG |= overlay->crtc->pipe == 0 ?
766 OCONF_PIPE_A : OCONF_PIPE_B; 774 OCONF_PIPE_A : OCONF_PIPE_B;
767 intel_overlay_unmap_regs_atomic(overlay); 775 intel_overlay_unmap_regs(overlay, regs);
768 776
769 ret = intel_overlay_on(overlay); 777 ret = intel_overlay_on(overlay);
770 if (ret != 0) 778 if (ret != 0)
771 goto out_unpin; 779 goto out_unpin;
772 } 780 }
773 781
774 regs = intel_overlay_map_regs_atomic(overlay); 782 regs = intel_overlay_map_regs(overlay);
775 if (!regs) { 783 if (!regs) {
776 ret = -ENOMEM; 784 ret = -ENOMEM;
777 goto out_unpin; 785 goto out_unpin;
@@ -787,7 +795,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
787 795
788 regs->SWIDTH = params->src_w; 796 regs->SWIDTH = params->src_w;
789 regs->SWIDTHSW = calc_swidthsw(overlay->dev, 797 regs->SWIDTHSW = calc_swidthsw(overlay->dev,
790 params->offset_Y, tmp_width); 798 params->offset_Y, tmp_width);
791 regs->SHEIGHT = params->src_h; 799 regs->SHEIGHT = params->src_h;
792 regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y; 800 regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
793 regs->OSTRIDE = params->stride_Y; 801 regs->OSTRIDE = params->stride_Y;
@@ -798,9 +806,9 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
798 u32 tmp_U, tmp_V; 806 u32 tmp_U, tmp_V;
799 regs->SWIDTH |= (params->src_w/uv_hscale) << 16; 807 regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
800 tmp_U = calc_swidthsw(overlay->dev, params->offset_U, 808 tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
801 params->src_w/uv_hscale); 809 params->src_w/uv_hscale);
802 tmp_V = calc_swidthsw(overlay->dev, params->offset_V, 810 tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
803 params->src_w/uv_hscale); 811 params->src_w/uv_hscale);
804 regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; 812 regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
805 regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; 813 regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
806 regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U; 814 regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
@@ -814,9 +822,11 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
814 822
815 regs->OCMD = overlay_cmd_reg(params); 823 regs->OCMD = overlay_cmd_reg(params);
816 824
817 intel_overlay_unmap_regs_atomic(overlay); 825 intel_overlay_unmap_regs(overlay, regs);
818 826
819 intel_overlay_continue(overlay, scale_changed); 827 ret = intel_overlay_continue(overlay, scale_changed);
828 if (ret)
829 goto out_unpin;
820 830
821 overlay->old_vid_bo = overlay->vid_bo; 831 overlay->old_vid_bo = overlay->vid_bo;
822 overlay->vid_bo = to_intel_bo(new_bo); 832 overlay->vid_bo = to_intel_bo(new_bo);
@@ -828,20 +838,19 @@ out_unpin:
828 return ret; 838 return ret;
829} 839}
830 840
831int intel_overlay_switch_off(struct intel_overlay *overlay) 841int intel_overlay_switch_off(struct intel_overlay *overlay,
842 bool interruptible)
832{ 843{
833 int ret;
834 struct overlay_registers *regs; 844 struct overlay_registers *regs;
835 struct drm_device *dev = overlay->dev; 845 struct drm_device *dev = overlay->dev;
846 int ret;
836 847
837 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 848 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
838 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); 849 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
839 850
840 if (overlay->hw_wedged) { 851 ret = intel_overlay_recover_from_interrupt(overlay, interruptible);
841 ret = intel_overlay_recover_from_interrupt(overlay, 1); 852 if (ret != 0)
842 if (ret != 0) 853 return ret;
843 return ret;
844 }
845 854
846 if (!overlay->active) 855 if (!overlay->active)
847 return 0; 856 return 0;
@@ -850,33 +859,29 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
850 if (ret != 0) 859 if (ret != 0)
851 return ret; 860 return ret;
852 861
853 regs = intel_overlay_map_regs_atomic(overlay); 862 regs = intel_overlay_map_regs(overlay);
854 regs->OCMD = 0; 863 regs->OCMD = 0;
855 intel_overlay_unmap_regs_atomic(overlay); 864 intel_overlay_unmap_regs(overlay, regs);
856 865
857 ret = intel_overlay_off(overlay); 866 ret = intel_overlay_off(overlay, interruptible);
858 if (ret != 0) 867 if (ret != 0)
859 return ret; 868 return ret;
860 869
861 intel_overlay_off_tail(overlay); 870 intel_overlay_off_tail(overlay);
862
863 return 0; 871 return 0;
864} 872}
865 873
866static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, 874static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
867 struct intel_crtc *crtc) 875 struct intel_crtc *crtc)
868{ 876{
869 drm_i915_private_t *dev_priv = overlay->dev->dev_private; 877 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
870 u32 pipeconf;
871 int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
872 878
873 if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON) 879 if (!crtc->active)
874 return -EINVAL; 880 return -EINVAL;
875 881
876 pipeconf = I915_READ(pipeconf_reg);
877
878 /* can't use the overlay with double wide pipe */ 882 /* can't use the overlay with double wide pipe */
879 if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE) 883 if (INTEL_INFO(overlay->dev)->gen < 4 &&
884 (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
880 return -EINVAL; 885 return -EINVAL;
881 886
882 return 0; 887 return 0;
@@ -885,20 +890,22 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
885static void update_pfit_vscale_ratio(struct intel_overlay *overlay) 890static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
886{ 891{
887 struct drm_device *dev = overlay->dev; 892 struct drm_device *dev = overlay->dev;
888 drm_i915_private_t *dev_priv = dev->dev_private; 893 drm_i915_private_t *dev_priv = dev->dev_private;
889 u32 ratio;
890 u32 pfit_control = I915_READ(PFIT_CONTROL); 894 u32 pfit_control = I915_READ(PFIT_CONTROL);
895 u32 ratio;
891 896
892 /* XXX: This is not the same logic as in the xorg driver, but more in 897 /* XXX: This is not the same logic as in the xorg driver, but more in
893 * line with the intel documentation for the i965 */ 898 * line with the intel documentation for the i965
894 if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) { 899 */
895 ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT; 900 if (INTEL_INFO(dev)->gen >= 4) {
896 } else { /* on i965 use the PGM reg to read out the autoscaler values */ 901 /* on i965 use the PGM reg to read out the autoscaler values */
897 ratio = I915_READ(PFIT_PGM_RATIOS); 902 ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
898 if (IS_I965G(dev)) 903 } else {
899 ratio >>= PFIT_VERT_SCALE_SHIFT_965; 904 if (pfit_control & VERT_AUTO_SCALE)
905 ratio = I915_READ(PFIT_AUTO_RATIOS);
900 else 906 else
901 ratio >>= PFIT_VERT_SCALE_SHIFT; 907 ratio = I915_READ(PFIT_PGM_RATIOS);
908 ratio >>= PFIT_VERT_SCALE_SHIFT;
902 } 909 }
903 910
904 overlay->pfit_vscale_ratio = ratio; 911 overlay->pfit_vscale_ratio = ratio;
@@ -909,12 +916,10 @@ static int check_overlay_dst(struct intel_overlay *overlay,
909{ 916{
910 struct drm_display_mode *mode = &overlay->crtc->base.mode; 917 struct drm_display_mode *mode = &overlay->crtc->base.mode;
911 918
912 if ((rec->dst_x < mode->crtc_hdisplay) 919 if (rec->dst_x < mode->crtc_hdisplay &&
913 && (rec->dst_x + rec->dst_width 920 rec->dst_x + rec->dst_width <= mode->crtc_hdisplay &&
914 <= mode->crtc_hdisplay) 921 rec->dst_y < mode->crtc_vdisplay &&
915 && (rec->dst_y < mode->crtc_vdisplay) 922 rec->dst_y + rec->dst_height <= mode->crtc_vdisplay)
916 && (rec->dst_y + rec->dst_height
917 <= mode->crtc_vdisplay))
918 return 0; 923 return 0;
919 else 924 else
920 return -EINVAL; 925 return -EINVAL;
@@ -939,53 +944,57 @@ static int check_overlay_src(struct drm_device *dev,
939 struct drm_intel_overlay_put_image *rec, 944 struct drm_intel_overlay_put_image *rec,
940 struct drm_gem_object *new_bo) 945 struct drm_gem_object *new_bo)
941{ 946{
942 u32 stride_mask;
943 int depth;
944 int uv_hscale = uv_hsubsampling(rec->flags); 947 int uv_hscale = uv_hsubsampling(rec->flags);
945 int uv_vscale = uv_vsubsampling(rec->flags); 948 int uv_vscale = uv_vsubsampling(rec->flags);
946 size_t tmp; 949 u32 stride_mask, depth, tmp;
947 950
948 /* check src dimensions */ 951 /* check src dimensions */
949 if (IS_845G(dev) || IS_I830(dev)) { 952 if (IS_845G(dev) || IS_I830(dev)) {
950 if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY 953 if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
951 || rec->src_width > IMAGE_MAX_WIDTH_LEGACY) 954 rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
952 return -EINVAL; 955 return -EINVAL;
953 } else { 956 } else {
954 if (rec->src_height > IMAGE_MAX_HEIGHT 957 if (rec->src_height > IMAGE_MAX_HEIGHT ||
955 || rec->src_width > IMAGE_MAX_WIDTH) 958 rec->src_width > IMAGE_MAX_WIDTH)
956 return -EINVAL; 959 return -EINVAL;
957 } 960 }
961
958 /* better safe than sorry, use 4 as the maximal subsampling ratio */ 962 /* better safe than sorry, use 4 as the maximal subsampling ratio */
959 if (rec->src_height < N_VERT_Y_TAPS*4 963 if (rec->src_height < N_VERT_Y_TAPS*4 ||
960 || rec->src_width < N_HORIZ_Y_TAPS*4) 964 rec->src_width < N_HORIZ_Y_TAPS*4)
961 return -EINVAL; 965 return -EINVAL;
962 966
963 /* check alignment constraints */ 967 /* check alignment constraints */
964 switch (rec->flags & I915_OVERLAY_TYPE_MASK) { 968 switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
965 case I915_OVERLAY_RGB: 969 case I915_OVERLAY_RGB:
966 /* not implemented */ 970 /* not implemented */
971 return -EINVAL;
972
973 case I915_OVERLAY_YUV_PACKED:
974 if (uv_vscale != 1)
967 return -EINVAL; 975 return -EINVAL;
968 case I915_OVERLAY_YUV_PACKED: 976
969 depth = packed_depth_bytes(rec->flags); 977 depth = packed_depth_bytes(rec->flags);
970 if (uv_vscale != 1) 978 if (depth < 0)
971 return -EINVAL; 979 return depth;
972 if (depth < 0) 980
973 return depth; 981 /* ignore UV planes */
974 /* ignore UV planes */ 982 rec->stride_UV = 0;
975 rec->stride_UV = 0; 983 rec->offset_U = 0;
976 rec->offset_U = 0; 984 rec->offset_V = 0;
977 rec->offset_V = 0; 985 /* check pixel alignment */
978 /* check pixel alignment */ 986 if (rec->offset_Y % depth)
979 if (rec->offset_Y % depth) 987 return -EINVAL;
980 return -EINVAL; 988 break;
981 break; 989
982 case I915_OVERLAY_YUV_PLANAR: 990 case I915_OVERLAY_YUV_PLANAR:
983 if (uv_vscale < 0 || uv_hscale < 0) 991 if (uv_vscale < 0 || uv_hscale < 0)
984 return -EINVAL;
985 /* no offset restrictions for planar formats */
986 break;
987 default:
988 return -EINVAL; 992 return -EINVAL;
993 /* no offset restrictions for planar formats */
994 break;
995
996 default:
997 return -EINVAL;
989 } 998 }
990 999
991 if (rec->src_width % uv_hscale) 1000 if (rec->src_width % uv_hscale)
@@ -999,47 +1008,74 @@ static int check_overlay_src(struct drm_device *dev,
999 1008
1000 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) 1009 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
1001 return -EINVAL; 1010 return -EINVAL;
1002 if (IS_I965G(dev) && rec->stride_Y < 512) 1011 if (IS_GEN4(dev) && rec->stride_Y < 512)
1003 return -EINVAL; 1012 return -EINVAL;
1004 1013
1005 tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? 1014 tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
1006 4 : 8; 1015 4096 : 8192;
1007 if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024) 1016 if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
1008 return -EINVAL; 1017 return -EINVAL;
1009 1018
1010 /* check buffer dimensions */ 1019 /* check buffer dimensions */
1011 switch (rec->flags & I915_OVERLAY_TYPE_MASK) { 1020 switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
1012 case I915_OVERLAY_RGB: 1021 case I915_OVERLAY_RGB:
1013 case I915_OVERLAY_YUV_PACKED: 1022 case I915_OVERLAY_YUV_PACKED:
1014 /* always 4 Y values per depth pixels */ 1023 /* always 4 Y values per depth pixels */
1015 if (packed_width_bytes(rec->flags, rec->src_width) 1024 if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
1016 > rec->stride_Y) 1025 return -EINVAL;
1017 return -EINVAL; 1026
1018 1027 tmp = rec->stride_Y*rec->src_height;
1019 tmp = rec->stride_Y*rec->src_height; 1028 if (rec->offset_Y + tmp > new_bo->size)
1020 if (rec->offset_Y + tmp > new_bo->size) 1029 return -EINVAL;
1021 return -EINVAL; 1030 break;
1022 break; 1031
1023 case I915_OVERLAY_YUV_PLANAR: 1032 case I915_OVERLAY_YUV_PLANAR:
1024 if (rec->src_width > rec->stride_Y) 1033 if (rec->src_width > rec->stride_Y)
1025 return -EINVAL; 1034 return -EINVAL;
1026 if (rec->src_width/uv_hscale > rec->stride_UV) 1035 if (rec->src_width/uv_hscale > rec->stride_UV)
1027 return -EINVAL; 1036 return -EINVAL;
1028 1037
1029 tmp = rec->stride_Y*rec->src_height; 1038 tmp = rec->stride_Y * rec->src_height;
1030 if (rec->offset_Y + tmp > new_bo->size) 1039 if (rec->offset_Y + tmp > new_bo->size)
1031 return -EINVAL; 1040 return -EINVAL;
1032 tmp = rec->stride_UV*rec->src_height; 1041
1033 tmp /= uv_vscale; 1042 tmp = rec->stride_UV * (rec->src_height / uv_vscale);
1034 if (rec->offset_U + tmp > new_bo->size 1043 if (rec->offset_U + tmp > new_bo->size ||
1035 || rec->offset_V + tmp > new_bo->size) 1044 rec->offset_V + tmp > new_bo->size)
1036 return -EINVAL; 1045 return -EINVAL;
1037 break; 1046 break;
1038 } 1047 }
1039 1048
1040 return 0; 1049 return 0;
1041} 1050}
1042 1051
1052/**
1053 * Return the pipe currently connected to the panel fitter,
1054 * or -1 if the panel fitter is not present or not in use
1055 */
1056static int intel_panel_fitter_pipe(struct drm_device *dev)
1057{
1058 struct drm_i915_private *dev_priv = dev->dev_private;
1059 u32 pfit_control;
1060
1061 /* i830 doesn't have a panel fitter */
1062 if (IS_I830(dev))
1063 return -1;
1064
1065 pfit_control = I915_READ(PFIT_CONTROL);
1066
1067 /* See if the panel fitter is in use */
1068 if ((pfit_control & PFIT_ENABLE) == 0)
1069 return -1;
1070
1071 /* 965 can place panel fitter on either pipe */
1072 if (IS_GEN4(dev))
1073 return (pfit_control >> 29) & 0x3;
1074
1075 /* older chips can only use pipe 1 */
1076 return 1;
1077}
1078
1043int intel_overlay_put_image(struct drm_device *dev, void *data, 1079int intel_overlay_put_image(struct drm_device *dev, void *data,
1044 struct drm_file *file_priv) 1080 struct drm_file *file_priv)
1045{ 1081{
@@ -1067,7 +1103,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1067 mutex_lock(&dev->mode_config.mutex); 1103 mutex_lock(&dev->mode_config.mutex);
1068 mutex_lock(&dev->struct_mutex); 1104 mutex_lock(&dev->struct_mutex);
1069 1105
1070 ret = intel_overlay_switch_off(overlay); 1106 ret = intel_overlay_switch_off(overlay, true);
1071 1107
1072 mutex_unlock(&dev->struct_mutex); 1108 mutex_unlock(&dev->struct_mutex);
1073 mutex_unlock(&dev->mode_config.mutex); 1109 mutex_unlock(&dev->mode_config.mutex);
@@ -1080,7 +1116,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1080 return -ENOMEM; 1116 return -ENOMEM;
1081 1117
1082 drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id, 1118 drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
1083 DRM_MODE_OBJECT_CRTC); 1119 DRM_MODE_OBJECT_CRTC);
1084 if (!drmmode_obj) { 1120 if (!drmmode_obj) {
1085 ret = -ENOENT; 1121 ret = -ENOENT;
1086 goto out_free; 1122 goto out_free;
@@ -1088,7 +1124,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1088 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); 1124 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
1089 1125
1090 new_bo = drm_gem_object_lookup(dev, file_priv, 1126 new_bo = drm_gem_object_lookup(dev, file_priv,
1091 put_image_rec->bo_handle); 1127 put_image_rec->bo_handle);
1092 if (!new_bo) { 1128 if (!new_bo) {
1093 ret = -ENOENT; 1129 ret = -ENOENT;
1094 goto out_free; 1130 goto out_free;
@@ -1097,15 +1133,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1097 mutex_lock(&dev->mode_config.mutex); 1133 mutex_lock(&dev->mode_config.mutex);
1098 mutex_lock(&dev->struct_mutex); 1134 mutex_lock(&dev->struct_mutex);
1099 1135
1100 if (overlay->hw_wedged) { 1136 ret = intel_overlay_recover_from_interrupt(overlay, true);
1101 ret = intel_overlay_recover_from_interrupt(overlay, 1); 1137 if (ret != 0)
1102 if (ret != 0) 1138 goto out_unlock;
1103 goto out_unlock;
1104 }
1105 1139
1106 if (overlay->crtc != crtc) { 1140 if (overlay->crtc != crtc) {
1107 struct drm_display_mode *mode = &crtc->base.mode; 1141 struct drm_display_mode *mode = &crtc->base.mode;
1108 ret = intel_overlay_switch_off(overlay); 1142 ret = intel_overlay_switch_off(overlay, true);
1109 if (ret != 0) 1143 if (ret != 0)
1110 goto out_unlock; 1144 goto out_unlock;
1111 1145
@@ -1116,9 +1150,9 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1116 overlay->crtc = crtc; 1150 overlay->crtc = crtc;
1117 crtc->overlay = overlay; 1151 crtc->overlay = overlay;
1118 1152
1119 if (intel_panel_fitter_pipe(dev) == crtc->pipe 1153 /* line too wide, i.e. one-line-mode */
1120 /* and line to wide, i.e. one-line-mode */ 1154 if (mode->hdisplay > 1024 &&
1121 && mode->hdisplay > 1024) { 1155 intel_panel_fitter_pipe(dev) == crtc->pipe) {
1122 overlay->pfit_active = 1; 1156 overlay->pfit_active = 1;
1123 update_pfit_vscale_ratio(overlay); 1157 update_pfit_vscale_ratio(overlay);
1124 } else 1158 } else
@@ -1131,10 +1165,10 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1131 1165
1132 if (overlay->pfit_active) { 1166 if (overlay->pfit_active) {
1133 params->dst_y = ((((u32)put_image_rec->dst_y) << 12) / 1167 params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
1134 overlay->pfit_vscale_ratio); 1168 overlay->pfit_vscale_ratio);
1135 /* shifting right rounds downwards, so add 1 */ 1169 /* shifting right rounds downwards, so add 1 */
1136 params->dst_h = ((((u32)put_image_rec->dst_height) << 12) / 1170 params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
1137 overlay->pfit_vscale_ratio) + 1; 1171 overlay->pfit_vscale_ratio) + 1;
1138 } else { 1172 } else {
1139 params->dst_y = put_image_rec->dst_y; 1173 params->dst_y = put_image_rec->dst_y;
1140 params->dst_h = put_image_rec->dst_height; 1174 params->dst_h = put_image_rec->dst_height;
@@ -1146,8 +1180,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1146 params->src_h = put_image_rec->src_height; 1180 params->src_h = put_image_rec->src_height;
1147 params->src_scan_w = put_image_rec->src_scan_width; 1181 params->src_scan_w = put_image_rec->src_scan_width;
1148 params->src_scan_h = put_image_rec->src_scan_height; 1182 params->src_scan_h = put_image_rec->src_scan_height;
1149 if (params->src_scan_h > params->src_h 1183 if (params->src_scan_h > params->src_h ||
1150 || params->src_scan_w > params->src_w) { 1184 params->src_scan_w > params->src_w) {
1151 ret = -EINVAL; 1185 ret = -EINVAL;
1152 goto out_unlock; 1186 goto out_unlock;
1153 } 1187 }
@@ -1203,7 +1237,7 @@ static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
1203 return false; 1237 return false;
1204 1238
1205 for (i = 0; i < 3; i++) { 1239 for (i = 0; i < 3; i++) {
1206 if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff)) 1240 if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
1207 return false; 1241 return false;
1208 } 1242 }
1209 1243
@@ -1224,16 +1258,18 @@ static bool check_gamma5_errata(u32 gamma5)
1224 1258
1225static int check_gamma(struct drm_intel_overlay_attrs *attrs) 1259static int check_gamma(struct drm_intel_overlay_attrs *attrs)
1226{ 1260{
1227 if (!check_gamma_bounds(0, attrs->gamma0) 1261 if (!check_gamma_bounds(0, attrs->gamma0) ||
1228 || !check_gamma_bounds(attrs->gamma0, attrs->gamma1) 1262 !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
1229 || !check_gamma_bounds(attrs->gamma1, attrs->gamma2) 1263 !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
1230 || !check_gamma_bounds(attrs->gamma2, attrs->gamma3) 1264 !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
1231 || !check_gamma_bounds(attrs->gamma3, attrs->gamma4) 1265 !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
1232 || !check_gamma_bounds(attrs->gamma4, attrs->gamma5) 1266 !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
1233 || !check_gamma_bounds(attrs->gamma5, 0x00ffffff)) 1267 !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
1234 return -EINVAL; 1268 return -EINVAL;
1269
1235 if (!check_gamma5_errata(attrs->gamma5)) 1270 if (!check_gamma5_errata(attrs->gamma5))
1236 return -EINVAL; 1271 return -EINVAL;
1272
1237 return 0; 1273 return 0;
1238} 1274}
1239 1275
@@ -1260,13 +1296,14 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1260 mutex_lock(&dev->mode_config.mutex); 1296 mutex_lock(&dev->mode_config.mutex);
1261 mutex_lock(&dev->struct_mutex); 1297 mutex_lock(&dev->struct_mutex);
1262 1298
1299 ret = -EINVAL;
1263 if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) { 1300 if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
1264 attrs->color_key = overlay->color_key; 1301 attrs->color_key = overlay->color_key;
1265 attrs->brightness = overlay->brightness; 1302 attrs->brightness = overlay->brightness;
1266 attrs->contrast = overlay->contrast; 1303 attrs->contrast = overlay->contrast;
1267 attrs->saturation = overlay->saturation; 1304 attrs->saturation = overlay->saturation;
1268 1305
1269 if (IS_I9XX(dev)) { 1306 if (!IS_GEN2(dev)) {
1270 attrs->gamma0 = I915_READ(OGAMC0); 1307 attrs->gamma0 = I915_READ(OGAMC0);
1271 attrs->gamma1 = I915_READ(OGAMC1); 1308 attrs->gamma1 = I915_READ(OGAMC1);
1272 attrs->gamma2 = I915_READ(OGAMC2); 1309 attrs->gamma2 = I915_READ(OGAMC2);
@@ -1274,29 +1311,20 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1274 attrs->gamma4 = I915_READ(OGAMC4); 1311 attrs->gamma4 = I915_READ(OGAMC4);
1275 attrs->gamma5 = I915_READ(OGAMC5); 1312 attrs->gamma5 = I915_READ(OGAMC5);
1276 } 1313 }
1277 ret = 0;
1278 } else { 1314 } else {
1279 overlay->color_key = attrs->color_key; 1315 if (attrs->brightness < -128 || attrs->brightness > 127)
1280 if (attrs->brightness >= -128 && attrs->brightness <= 127) {
1281 overlay->brightness = attrs->brightness;
1282 } else {
1283 ret = -EINVAL;
1284 goto out_unlock; 1316 goto out_unlock;
1285 } 1317 if (attrs->contrast > 255)
1286 if (attrs->contrast <= 255) {
1287 overlay->contrast = attrs->contrast;
1288 } else {
1289 ret = -EINVAL;
1290 goto out_unlock; 1318 goto out_unlock;
1291 } 1319 if (attrs->saturation > 1023)
1292 if (attrs->saturation <= 1023) {
1293 overlay->saturation = attrs->saturation;
1294 } else {
1295 ret = -EINVAL;
1296 goto out_unlock; 1320 goto out_unlock;
1297 }
1298 1321
1299 regs = intel_overlay_map_regs_atomic(overlay); 1322 overlay->color_key = attrs->color_key;
1323 overlay->brightness = attrs->brightness;
1324 overlay->contrast = attrs->contrast;
1325 overlay->saturation = attrs->saturation;
1326
1327 regs = intel_overlay_map_regs(overlay);
1300 if (!regs) { 1328 if (!regs) {
1301 ret = -ENOMEM; 1329 ret = -ENOMEM;
1302 goto out_unlock; 1330 goto out_unlock;
@@ -1304,13 +1332,11 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1304 1332
1305 update_reg_attrs(overlay, regs); 1333 update_reg_attrs(overlay, regs);
1306 1334
1307 intel_overlay_unmap_regs_atomic(overlay); 1335 intel_overlay_unmap_regs(overlay, regs);
1308 1336
1309 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { 1337 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
1310 if (!IS_I9XX(dev)) { 1338 if (IS_GEN2(dev))
1311 ret = -EINVAL;
1312 goto out_unlock; 1339 goto out_unlock;
1313 }
1314 1340
1315 if (overlay->active) { 1341 if (overlay->active) {
1316 ret = -EBUSY; 1342 ret = -EBUSY;
@@ -1318,7 +1344,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1318 } 1344 }
1319 1345
1320 ret = check_gamma(attrs); 1346 ret = check_gamma(attrs);
1321 if (ret != 0) 1347 if (ret)
1322 goto out_unlock; 1348 goto out_unlock;
1323 1349
1324 I915_WRITE(OGAMC0, attrs->gamma0); 1350 I915_WRITE(OGAMC0, attrs->gamma0);
@@ -1328,9 +1354,9 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1328 I915_WRITE(OGAMC4, attrs->gamma4); 1354 I915_WRITE(OGAMC4, attrs->gamma4);
1329 I915_WRITE(OGAMC5, attrs->gamma5); 1355 I915_WRITE(OGAMC5, attrs->gamma5);
1330 } 1356 }
1331 ret = 0;
1332 } 1357 }
1333 1358
1359 ret = 0;
1334out_unlock: 1360out_unlock:
1335 mutex_unlock(&dev->struct_mutex); 1361 mutex_unlock(&dev->struct_mutex);
1336 mutex_unlock(&dev->mode_config.mutex); 1362 mutex_unlock(&dev->mode_config.mutex);
@@ -1346,7 +1372,7 @@ void intel_setup_overlay(struct drm_device *dev)
1346 struct overlay_registers *regs; 1372 struct overlay_registers *regs;
1347 int ret; 1373 int ret;
1348 1374
1349 if (!OVERLAY_EXISTS(dev)) 1375 if (!HAS_OVERLAY(dev))
1350 return; 1376 return;
1351 1377
1352 overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL); 1378 overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
@@ -1359,22 +1385,28 @@ void intel_setup_overlay(struct drm_device *dev)
1359 goto out_free; 1385 goto out_free;
1360 overlay->reg_bo = to_intel_bo(reg_bo); 1386 overlay->reg_bo = to_intel_bo(reg_bo);
1361 1387
1362 if (OVERLAY_NONPHYSICAL(dev)) { 1388 if (OVERLAY_NEEDS_PHYSICAL(dev)) {
1363 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
1364 if (ret) {
1365 DRM_ERROR("failed to pin overlay register bo\n");
1366 goto out_free_bo;
1367 }
1368 overlay->flip_addr = overlay->reg_bo->gtt_offset;
1369 } else {
1370 ret = i915_gem_attach_phys_object(dev, reg_bo, 1389 ret = i915_gem_attach_phys_object(dev, reg_bo,
1371 I915_GEM_PHYS_OVERLAY_REGS, 1390 I915_GEM_PHYS_OVERLAY_REGS,
1372 0); 1391 PAGE_SIZE);
1373 if (ret) { 1392 if (ret) {
1374 DRM_ERROR("failed to attach phys overlay regs\n"); 1393 DRM_ERROR("failed to attach phys overlay regs\n");
1375 goto out_free_bo; 1394 goto out_free_bo;
1376 } 1395 }
1377 overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; 1396 overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
1397 } else {
1398 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
1399 if (ret) {
1400 DRM_ERROR("failed to pin overlay register bo\n");
1401 goto out_free_bo;
1402 }
1403 overlay->flip_addr = overlay->reg_bo->gtt_offset;
1404
1405 ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
1406 if (ret) {
1407 DRM_ERROR("failed to move overlay register bo into the GTT\n");
1408 goto out_unpin_bo;
1409 }
1378 } 1410 }
1379 1411
1380 /* init all values */ 1412 /* init all values */
@@ -1383,21 +1415,22 @@ void intel_setup_overlay(struct drm_device *dev)
1383 overlay->contrast = 75; 1415 overlay->contrast = 75;
1384 overlay->saturation = 146; 1416 overlay->saturation = 146;
1385 1417
1386 regs = intel_overlay_map_regs_atomic(overlay); 1418 regs = intel_overlay_map_regs(overlay);
1387 if (!regs) 1419 if (!regs)
1388 goto out_free_bo; 1420 goto out_free_bo;
1389 1421
1390 memset(regs, 0, sizeof(struct overlay_registers)); 1422 memset(regs, 0, sizeof(struct overlay_registers));
1391 update_polyphase_filter(regs); 1423 update_polyphase_filter(regs);
1392
1393 update_reg_attrs(overlay, regs); 1424 update_reg_attrs(overlay, regs);
1394 1425
1395 intel_overlay_unmap_regs_atomic(overlay); 1426 intel_overlay_unmap_regs(overlay, regs);
1396 1427
1397 dev_priv->overlay = overlay; 1428 dev_priv->overlay = overlay;
1398 DRM_INFO("initialized overlay support\n"); 1429 DRM_INFO("initialized overlay support\n");
1399 return; 1430 return;
1400 1431
1432out_unpin_bo:
1433 i915_gem_object_unpin(reg_bo);
1401out_free_bo: 1434out_free_bo:
1402 drm_gem_object_unreference(reg_bo); 1435 drm_gem_object_unreference(reg_bo);
1403out_free: 1436out_free:
@@ -1407,18 +1440,23 @@ out_free:
1407 1440
1408void intel_cleanup_overlay(struct drm_device *dev) 1441void intel_cleanup_overlay(struct drm_device *dev)
1409{ 1442{
1410 drm_i915_private_t *dev_priv = dev->dev_private; 1443 drm_i915_private_t *dev_priv = dev->dev_private;
1411 1444
1412 if (dev_priv->overlay) { 1445 if (!dev_priv->overlay)
1413 /* The bo's should be free'd by the generic code already. 1446 return;
1414 * Furthermore modesetting teardown happens beforehand so the
1415 * hardware should be off already */
1416 BUG_ON(dev_priv->overlay->active);
1417 1447
1418 kfree(dev_priv->overlay); 1448 /* The bo's should be free'd by the generic code already.
1419 } 1449 * Furthermore modesetting teardown happens beforehand so the
1450 * hardware should be off already */
1451 BUG_ON(dev_priv->overlay->active);
1452
1453 drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
1454 kfree(dev_priv->overlay);
1420} 1455}
1421 1456
1457#ifdef CONFIG_DEBUG_FS
1458#include <linux/seq_file.h>
1459
1422struct intel_overlay_error_state { 1460struct intel_overlay_error_state {
1423 struct overlay_registers regs; 1461 struct overlay_registers regs;
1424 unsigned long base; 1462 unsigned long base;
@@ -1426,6 +1464,29 @@ struct intel_overlay_error_state {
1426 u32 isr; 1464 u32 isr;
1427}; 1465};
1428 1466
1467static struct overlay_registers *
1468intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1469{
1470 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
1471 struct overlay_registers *regs;
1472
1473 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1474 regs = overlay->reg_bo->phys_obj->handle->vaddr;
1475 else
1476 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
1477 overlay->reg_bo->gtt_offset);
1478
1479 return regs;
1480}
1481
1482static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
1483 struct overlay_registers *regs)
1484{
1485 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1486 io_mapping_unmap_atomic(regs);
1487}
1488
1489
1429struct intel_overlay_error_state * 1490struct intel_overlay_error_state *
1430intel_overlay_capture_error_state(struct drm_device *dev) 1491intel_overlay_capture_error_state(struct drm_device *dev)
1431{ 1492{
@@ -1443,17 +1504,17 @@ intel_overlay_capture_error_state(struct drm_device *dev)
1443 1504
1444 error->dovsta = I915_READ(DOVSTA); 1505 error->dovsta = I915_READ(DOVSTA);
1445 error->isr = I915_READ(ISR); 1506 error->isr = I915_READ(ISR);
1446 if (OVERLAY_NONPHYSICAL(overlay->dev)) 1507 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1447 error->base = (long) overlay->reg_bo->gtt_offset;
1448 else
1449 error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr; 1508 error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
1509 else
1510 error->base = (long) overlay->reg_bo->gtt_offset;
1450 1511
1451 regs = intel_overlay_map_regs_atomic(overlay); 1512 regs = intel_overlay_map_regs_atomic(overlay);
1452 if (!regs) 1513 if (!regs)
1453 goto err; 1514 goto err;
1454 1515
1455 memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers)); 1516 memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
1456 intel_overlay_unmap_regs_atomic(overlay); 1517 intel_overlay_unmap_regs_atomic(overlay, regs);
1457 1518
1458 return error; 1519 return error;
1459 1520
@@ -1514,3 +1575,4 @@ intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_s
1514 P(UVSCALEV); 1575 P(UVSCALEV);
1515#undef P 1576#undef P
1516} 1577}
1578#endif
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e7f5299d9d57..92ff8f385278 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -30,6 +30,8 @@
30 30
31#include "intel_drv.h" 31#include "intel_drv.h"
32 32
33#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
34
33void 35void
34intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 36intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
35 struct drm_display_mode *adjusted_mode) 37 struct drm_display_mode *adjusted_mode)
@@ -109,3 +111,110 @@ done:
109 dev_priv->pch_pf_pos = (x << 16) | y; 111 dev_priv->pch_pf_pos = (x << 16) | y;
110 dev_priv->pch_pf_size = (width << 16) | height; 112 dev_priv->pch_pf_size = (width << 16) | height;
111} 113}
114
115static int is_backlight_combination_mode(struct drm_device *dev)
116{
117 struct drm_i915_private *dev_priv = dev->dev_private;
118
119 if (INTEL_INFO(dev)->gen >= 4)
120 return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
121
122 if (IS_GEN2(dev))
123 return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
124
125 return 0;
126}
127
128u32 intel_panel_get_max_backlight(struct drm_device *dev)
129{
130 struct drm_i915_private *dev_priv = dev->dev_private;
131 u32 max;
132
133 if (HAS_PCH_SPLIT(dev)) {
134 max = I915_READ(BLC_PWM_PCH_CTL2) >> 16;
135 } else {
136 max = I915_READ(BLC_PWM_CTL);
137 if (IS_PINEVIEW(dev)) {
138 max >>= 17;
139 } else {
140 max >>= 16;
141 if (INTEL_INFO(dev)->gen < 4)
142 max &= ~1;
143 }
144
145 if (is_backlight_combination_mode(dev))
146 max *= 0xff;
147 }
148
149 if (max == 0) {
150 /* XXX add code here to query mode clock or hardware clock
151 * and program max PWM appropriately.
152 */
153 DRM_ERROR("fixme: max PWM is zero.\n");
154 max = 1;
155 }
156
157 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
158 return max;
159}
160
161u32 intel_panel_get_backlight(struct drm_device *dev)
162{
163 struct drm_i915_private *dev_priv = dev->dev_private;
164 u32 val;
165
166 if (HAS_PCH_SPLIT(dev)) {
167 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
168 } else {
169 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
170 if (IS_PINEVIEW(dev))
171 val >>= 1;
172
173 if (is_backlight_combination_mode(dev)){
174 u8 lbpc;
175
176 val &= ~1;
177 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
178 val *= lbpc;
179 val >>= 1;
180 }
181 }
182
183 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
184 return val;
185}
186
187static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
188{
189 struct drm_i915_private *dev_priv = dev->dev_private;
190 u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
191 I915_WRITE(BLC_PWM_CPU_CTL, val | level);
192}
193
194void intel_panel_set_backlight(struct drm_device *dev, u32 level)
195{
196 struct drm_i915_private *dev_priv = dev->dev_private;
197 u32 tmp;
198
199 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
200
201 if (HAS_PCH_SPLIT(dev))
202 return intel_pch_panel_set_backlight(dev, level);
203
204 if (is_backlight_combination_mode(dev)){
205 u32 max = intel_panel_get_max_backlight(dev);
206 u8 lpbc;
207
208 lpbc = level * 0xfe / max + 1;
209 level /= lpbc;
210 pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
211 }
212
213 tmp = I915_READ(BLC_PWM_CTL);
214 if (IS_PINEVIEW(dev)) {
215 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
216 level <<= 1;
217 } else
218 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
219 I915_WRITE(BLC_PWM_CTL, tmp | level);
220}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cb3508f78bc3..09f2dc353ae2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -32,6 +32,7 @@
32#include "i915_drv.h" 32#include "i915_drv.h"
33#include "i915_drm.h" 33#include "i915_drm.h"
34#include "i915_trace.h" 34#include "i915_trace.h"
35#include "intel_drv.h"
35 36
36static u32 i915_gem_get_seqno(struct drm_device *dev) 37static u32 i915_gem_get_seqno(struct drm_device *dev)
37{ 38{
@@ -49,9 +50,9 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
49 50
50static void 51static void
51render_ring_flush(struct drm_device *dev, 52render_ring_flush(struct drm_device *dev,
52 struct intel_ring_buffer *ring, 53 struct intel_ring_buffer *ring,
53 u32 invalidate_domains, 54 u32 invalidate_domains,
54 u32 flush_domains) 55 u32 flush_domains)
55{ 56{
56 drm_i915_private_t *dev_priv = dev->dev_private; 57 drm_i915_private_t *dev_priv = dev->dev_private;
57 u32 cmd; 58 u32 cmd;
@@ -97,7 +98,7 @@ render_ring_flush(struct drm_device *dev,
97 if ((invalidate_domains|flush_domains) & 98 if ((invalidate_domains|flush_domains) &
98 I915_GEM_DOMAIN_RENDER) 99 I915_GEM_DOMAIN_RENDER)
99 cmd &= ~MI_NO_WRITE_FLUSH; 100 cmd &= ~MI_NO_WRITE_FLUSH;
100 if (!IS_I965G(dev)) { 101 if (INTEL_INFO(dev)->gen < 4) {
101 /* 102 /*
102 * On the 965, the sampler cache always gets flushed 103 * On the 965, the sampler cache always gets flushed
103 * and this bit is reserved. 104 * and this bit is reserved.
@@ -118,38 +119,26 @@ render_ring_flush(struct drm_device *dev,
118 } 119 }
119} 120}
120 121
121static unsigned int render_ring_get_head(struct drm_device *dev, 122static void ring_write_tail(struct drm_device *dev,
122 struct intel_ring_buffer *ring) 123 struct intel_ring_buffer *ring,
123{ 124 u32 value)
124 drm_i915_private_t *dev_priv = dev->dev_private;
125 return I915_READ(PRB0_HEAD) & HEAD_ADDR;
126}
127
128static unsigned int render_ring_get_tail(struct drm_device *dev,
129 struct intel_ring_buffer *ring)
130{ 125{
131 drm_i915_private_t *dev_priv = dev->dev_private; 126 drm_i915_private_t *dev_priv = dev->dev_private;
132 return I915_READ(PRB0_TAIL) & TAIL_ADDR; 127 I915_WRITE_TAIL(ring, value);
133} 128}
134 129
135static unsigned int render_ring_get_active_head(struct drm_device *dev, 130u32 intel_ring_get_active_head(struct drm_device *dev,
136 struct intel_ring_buffer *ring) 131 struct intel_ring_buffer *ring)
137{ 132{
138 drm_i915_private_t *dev_priv = dev->dev_private; 133 drm_i915_private_t *dev_priv = dev->dev_private;
139 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; 134 u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ?
135 RING_ACTHD(ring->mmio_base) : ACTHD;
140 136
141 return I915_READ(acthd_reg); 137 return I915_READ(acthd_reg);
142} 138}
143 139
144static void render_ring_advance_ring(struct drm_device *dev,
145 struct intel_ring_buffer *ring)
146{
147 drm_i915_private_t *dev_priv = dev->dev_private;
148 I915_WRITE(PRB0_TAIL, ring->tail);
149}
150
151static int init_ring_common(struct drm_device *dev, 140static int init_ring_common(struct drm_device *dev,
152 struct intel_ring_buffer *ring) 141 struct intel_ring_buffer *ring)
153{ 142{
154 u32 head; 143 u32 head;
155 drm_i915_private_t *dev_priv = dev->dev_private; 144 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -157,57 +146,57 @@ static int init_ring_common(struct drm_device *dev,
157 obj_priv = to_intel_bo(ring->gem_object); 146 obj_priv = to_intel_bo(ring->gem_object);
158 147
159 /* Stop the ring if it's running. */ 148 /* Stop the ring if it's running. */
160 I915_WRITE(ring->regs.ctl, 0); 149 I915_WRITE_CTL(ring, 0);
161 I915_WRITE(ring->regs.head, 0); 150 I915_WRITE_HEAD(ring, 0);
162 I915_WRITE(ring->regs.tail, 0); 151 ring->write_tail(dev, ring, 0);
163 152
164 /* Initialize the ring. */ 153 /* Initialize the ring. */
165 I915_WRITE(ring->regs.start, obj_priv->gtt_offset); 154 I915_WRITE_START(ring, obj_priv->gtt_offset);
166 head = ring->get_head(dev, ring); 155 head = I915_READ_HEAD(ring) & HEAD_ADDR;
167 156
168 /* G45 ring initialization fails to reset head to zero */ 157 /* G45 ring initialization fails to reset head to zero */
169 if (head != 0) { 158 if (head != 0) {
170 DRM_ERROR("%s head not reset to zero " 159 DRM_ERROR("%s head not reset to zero "
171 "ctl %08x head %08x tail %08x start %08x\n", 160 "ctl %08x head %08x tail %08x start %08x\n",
172 ring->name, 161 ring->name,
173 I915_READ(ring->regs.ctl), 162 I915_READ_CTL(ring),
174 I915_READ(ring->regs.head), 163 I915_READ_HEAD(ring),
175 I915_READ(ring->regs.tail), 164 I915_READ_TAIL(ring),
176 I915_READ(ring->regs.start)); 165 I915_READ_START(ring));
177 166
178 I915_WRITE(ring->regs.head, 0); 167 I915_WRITE_HEAD(ring, 0);
179 168
180 DRM_ERROR("%s head forced to zero " 169 DRM_ERROR("%s head forced to zero "
181 "ctl %08x head %08x tail %08x start %08x\n", 170 "ctl %08x head %08x tail %08x start %08x\n",
182 ring->name, 171 ring->name,
183 I915_READ(ring->regs.ctl), 172 I915_READ_CTL(ring),
184 I915_READ(ring->regs.head), 173 I915_READ_HEAD(ring),
185 I915_READ(ring->regs.tail), 174 I915_READ_TAIL(ring),
186 I915_READ(ring->regs.start)); 175 I915_READ_START(ring));
187 } 176 }
188 177
189 I915_WRITE(ring->regs.ctl, 178 I915_WRITE_CTL(ring,
190 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) 179 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
191 | RING_NO_REPORT | RING_VALID); 180 | RING_NO_REPORT | RING_VALID);
192 181
193 head = I915_READ(ring->regs.head) & HEAD_ADDR; 182 head = I915_READ_HEAD(ring) & HEAD_ADDR;
194 /* If the head is still not zero, the ring is dead */ 183 /* If the head is still not zero, the ring is dead */
195 if (head != 0) { 184 if (head != 0) {
196 DRM_ERROR("%s initialization failed " 185 DRM_ERROR("%s initialization failed "
197 "ctl %08x head %08x tail %08x start %08x\n", 186 "ctl %08x head %08x tail %08x start %08x\n",
198 ring->name, 187 ring->name,
199 I915_READ(ring->regs.ctl), 188 I915_READ_CTL(ring),
200 I915_READ(ring->regs.head), 189 I915_READ_HEAD(ring),
201 I915_READ(ring->regs.tail), 190 I915_READ_TAIL(ring),
202 I915_READ(ring->regs.start)); 191 I915_READ_START(ring));
203 return -EIO; 192 return -EIO;
204 } 193 }
205 194
206 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 195 if (!drm_core_check_feature(dev, DRIVER_MODESET))
207 i915_kernel_lost_context(dev); 196 i915_kernel_lost_context(dev);
208 else { 197 else {
209 ring->head = ring->get_head(dev, ring); 198 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
210 ring->tail = ring->get_tail(dev, ring); 199 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
211 ring->space = ring->head - (ring->tail + 8); 200 ring->space = ring->head - (ring->tail + 8);
212 if (ring->space < 0) 201 if (ring->space < 0)
213 ring->space += ring->size; 202 ring->space += ring->size;
@@ -216,13 +205,13 @@ static int init_ring_common(struct drm_device *dev,
216} 205}
217 206
218static int init_render_ring(struct drm_device *dev, 207static int init_render_ring(struct drm_device *dev,
219 struct intel_ring_buffer *ring) 208 struct intel_ring_buffer *ring)
220{ 209{
221 drm_i915_private_t *dev_priv = dev->dev_private; 210 drm_i915_private_t *dev_priv = dev->dev_private;
222 int ret = init_ring_common(dev, ring); 211 int ret = init_ring_common(dev, ring);
223 int mode; 212 int mode;
224 213
225 if (IS_I9XX(dev) && !IS_GEN3(dev)) { 214 if (INTEL_INFO(dev)->gen > 3) {
226 mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; 215 mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
227 if (IS_GEN6(dev)) 216 if (IS_GEN6(dev))
228 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; 217 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
@@ -250,9 +239,8 @@ do { \
250 */ 239 */
251static u32 240static u32
252render_ring_add_request(struct drm_device *dev, 241render_ring_add_request(struct drm_device *dev,
253 struct intel_ring_buffer *ring, 242 struct intel_ring_buffer *ring,
254 struct drm_file *file_priv, 243 u32 flush_domains)
255 u32 flush_domains)
256{ 244{
257 drm_i915_private_t *dev_priv = dev->dev_private; 245 drm_i915_private_t *dev_priv = dev->dev_private;
258 u32 seqno; 246 u32 seqno;
@@ -315,8 +303,8 @@ render_ring_add_request(struct drm_device *dev,
315} 303}
316 304
317static u32 305static u32
318render_ring_get_gem_seqno(struct drm_device *dev, 306render_ring_get_seqno(struct drm_device *dev,
319 struct intel_ring_buffer *ring) 307 struct intel_ring_buffer *ring)
320{ 308{
321 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 309 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
322 if (HAS_PIPE_CONTROL(dev)) 310 if (HAS_PIPE_CONTROL(dev))
@@ -327,7 +315,7 @@ render_ring_get_gem_seqno(struct drm_device *dev,
327 315
328static void 316static void
329render_ring_get_user_irq(struct drm_device *dev, 317render_ring_get_user_irq(struct drm_device *dev,
330 struct intel_ring_buffer *ring) 318 struct intel_ring_buffer *ring)
331{ 319{
332 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 320 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
333 unsigned long irqflags; 321 unsigned long irqflags;
@@ -344,7 +332,7 @@ render_ring_get_user_irq(struct drm_device *dev,
344 332
345static void 333static void
346render_ring_put_user_irq(struct drm_device *dev, 334render_ring_put_user_irq(struct drm_device *dev,
347 struct intel_ring_buffer *ring) 335 struct intel_ring_buffer *ring)
348{ 336{
349 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 337 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
350 unsigned long irqflags; 338 unsigned long irqflags;
@@ -360,21 +348,23 @@ render_ring_put_user_irq(struct drm_device *dev,
360 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 348 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
361} 349}
362 350
363static void render_setup_status_page(struct drm_device *dev, 351void intel_ring_setup_status_page(struct drm_device *dev,
364 struct intel_ring_buffer *ring) 352 struct intel_ring_buffer *ring)
365{ 353{
366 drm_i915_private_t *dev_priv = dev->dev_private; 354 drm_i915_private_t *dev_priv = dev->dev_private;
367 if (IS_GEN6(dev)) { 355 if (IS_GEN6(dev)) {
368 I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr); 356 I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base),
369 I915_READ(HWS_PGA_GEN6); /* posting read */ 357 ring->status_page.gfx_addr);
358 I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */
370 } else { 359 } else {
371 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); 360 I915_WRITE(RING_HWS_PGA(ring->mmio_base),
372 I915_READ(HWS_PGA); /* posting read */ 361 ring->status_page.gfx_addr);
362 I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
373 } 363 }
374 364
375} 365}
376 366
377void 367static void
378bsd_ring_flush(struct drm_device *dev, 368bsd_ring_flush(struct drm_device *dev,
379 struct intel_ring_buffer *ring, 369 struct intel_ring_buffer *ring,
380 u32 invalidate_domains, 370 u32 invalidate_domains,
@@ -386,45 +376,16 @@ bsd_ring_flush(struct drm_device *dev,
386 intel_ring_advance(dev, ring); 376 intel_ring_advance(dev, ring);
387} 377}
388 378
389static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
390 struct intel_ring_buffer *ring)
391{
392 drm_i915_private_t *dev_priv = dev->dev_private;
393 return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
394}
395
396static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
397 struct intel_ring_buffer *ring)
398{
399 drm_i915_private_t *dev_priv = dev->dev_private;
400 return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
401}
402
403static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
404 struct intel_ring_buffer *ring)
405{
406 drm_i915_private_t *dev_priv = dev->dev_private;
407 return I915_READ(BSD_RING_ACTHD);
408}
409
410static inline void bsd_ring_advance_ring(struct drm_device *dev,
411 struct intel_ring_buffer *ring)
412{
413 drm_i915_private_t *dev_priv = dev->dev_private;
414 I915_WRITE(BSD_RING_TAIL, ring->tail);
415}
416
417static int init_bsd_ring(struct drm_device *dev, 379static int init_bsd_ring(struct drm_device *dev,
418 struct intel_ring_buffer *ring) 380 struct intel_ring_buffer *ring)
419{ 381{
420 return init_ring_common(dev, ring); 382 return init_ring_common(dev, ring);
421} 383}
422 384
423static u32 385static u32
424bsd_ring_add_request(struct drm_device *dev, 386ring_add_request(struct drm_device *dev,
425 struct intel_ring_buffer *ring, 387 struct intel_ring_buffer *ring,
426 struct drm_file *file_priv, 388 u32 flush_domains)
427 u32 flush_domains)
428{ 389{
429 u32 seqno; 390 u32 seqno;
430 391
@@ -443,40 +404,32 @@ bsd_ring_add_request(struct drm_device *dev,
443 return seqno; 404 return seqno;
444} 405}
445 406
446static void bsd_setup_status_page(struct drm_device *dev,
447 struct intel_ring_buffer *ring)
448{
449 drm_i915_private_t *dev_priv = dev->dev_private;
450 I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
451 I915_READ(BSD_HWS_PGA);
452}
453
454static void 407static void
455bsd_ring_get_user_irq(struct drm_device *dev, 408bsd_ring_get_user_irq(struct drm_device *dev,
456 struct intel_ring_buffer *ring) 409 struct intel_ring_buffer *ring)
457{ 410{
458 /* do nothing */ 411 /* do nothing */
459} 412}
460static void 413static void
461bsd_ring_put_user_irq(struct drm_device *dev, 414bsd_ring_put_user_irq(struct drm_device *dev,
462 struct intel_ring_buffer *ring) 415 struct intel_ring_buffer *ring)
463{ 416{
464 /* do nothing */ 417 /* do nothing */
465} 418}
466 419
467static u32 420static u32
468bsd_ring_get_gem_seqno(struct drm_device *dev, 421ring_status_page_get_seqno(struct drm_device *dev,
469 struct intel_ring_buffer *ring) 422 struct intel_ring_buffer *ring)
470{ 423{
471 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 424 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
472} 425}
473 426
474static int 427static int
475bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, 428ring_dispatch_gem_execbuffer(struct drm_device *dev,
476 struct intel_ring_buffer *ring, 429 struct intel_ring_buffer *ring,
477 struct drm_i915_gem_execbuffer2 *exec, 430 struct drm_i915_gem_execbuffer2 *exec,
478 struct drm_clip_rect *cliprects, 431 struct drm_clip_rect *cliprects,
479 uint64_t exec_offset) 432 uint64_t exec_offset)
480{ 433{
481 uint32_t exec_start; 434 uint32_t exec_start;
482 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 435 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
@@ -488,13 +441,12 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
488 return 0; 441 return 0;
489} 442}
490 443
491
492static int 444static int
493render_ring_dispatch_gem_execbuffer(struct drm_device *dev, 445render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
494 struct intel_ring_buffer *ring, 446 struct intel_ring_buffer *ring,
495 struct drm_i915_gem_execbuffer2 *exec, 447 struct drm_i915_gem_execbuffer2 *exec,
496 struct drm_clip_rect *cliprects, 448 struct drm_clip_rect *cliprects,
497 uint64_t exec_offset) 449 uint64_t exec_offset)
498{ 450{
499 drm_i915_private_t *dev_priv = dev->dev_private; 451 drm_i915_private_t *dev_priv = dev->dev_private;
500 int nbox = exec->num_cliprects; 452 int nbox = exec->num_cliprects;
@@ -523,8 +475,8 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
523 intel_ring_emit(dev, ring, exec_start + exec_len - 4); 475 intel_ring_emit(dev, ring, exec_start + exec_len - 4);
524 intel_ring_emit(dev, ring, 0); 476 intel_ring_emit(dev, ring, 0);
525 } else { 477 } else {
526 intel_ring_begin(dev, ring, 4); 478 intel_ring_begin(dev, ring, 2);
527 if (IS_I965G(dev)) { 479 if (INTEL_INFO(dev)->gen >= 4) {
528 intel_ring_emit(dev, ring, 480 intel_ring_emit(dev, ring,
529 MI_BATCH_BUFFER_START | (2 << 6) 481 MI_BATCH_BUFFER_START | (2 << 6)
530 | MI_BATCH_NON_SECURE_I965); 482 | MI_BATCH_NON_SECURE_I965);
@@ -539,7 +491,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
539 intel_ring_advance(dev, ring); 491 intel_ring_advance(dev, ring);
540 } 492 }
541 493
542 if (IS_G4X(dev) || IS_IRONLAKE(dev)) { 494 if (IS_G4X(dev) || IS_GEN5(dev)) {
543 intel_ring_begin(dev, ring, 2); 495 intel_ring_begin(dev, ring, 2);
544 intel_ring_emit(dev, ring, MI_FLUSH | 496 intel_ring_emit(dev, ring, MI_FLUSH |
545 MI_NO_WRITE_FLUSH | 497 MI_NO_WRITE_FLUSH |
@@ -553,7 +505,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
553} 505}
554 506
555static void cleanup_status_page(struct drm_device *dev, 507static void cleanup_status_page(struct drm_device *dev,
556 struct intel_ring_buffer *ring) 508 struct intel_ring_buffer *ring)
557{ 509{
558 drm_i915_private_t *dev_priv = dev->dev_private; 510 drm_i915_private_t *dev_priv = dev->dev_private;
559 struct drm_gem_object *obj; 511 struct drm_gem_object *obj;
@@ -573,7 +525,7 @@ static void cleanup_status_page(struct drm_device *dev,
573} 525}
574 526
575static int init_status_page(struct drm_device *dev, 527static int init_status_page(struct drm_device *dev,
576 struct intel_ring_buffer *ring) 528 struct intel_ring_buffer *ring)
577{ 529{
578 drm_i915_private_t *dev_priv = dev->dev_private; 530 drm_i915_private_t *dev_priv = dev->dev_private;
579 struct drm_gem_object *obj; 531 struct drm_gem_object *obj;
@@ -603,7 +555,7 @@ static int init_status_page(struct drm_device *dev,
603 ring->status_page.obj = obj; 555 ring->status_page.obj = obj;
604 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 556 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
605 557
606 ring->setup_status_page(dev, ring); 558 intel_ring_setup_status_page(dev, ring);
607 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 559 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
608 ring->name, ring->status_page.gfx_addr); 560 ring->name, ring->status_page.gfx_addr);
609 561
@@ -617,15 +569,18 @@ err:
617 return ret; 569 return ret;
618} 570}
619 571
620
621int intel_init_ring_buffer(struct drm_device *dev, 572int intel_init_ring_buffer(struct drm_device *dev,
622 struct intel_ring_buffer *ring) 573 struct intel_ring_buffer *ring)
623{ 574{
575 struct drm_i915_private *dev_priv = dev->dev_private;
624 struct drm_i915_gem_object *obj_priv; 576 struct drm_i915_gem_object *obj_priv;
625 struct drm_gem_object *obj; 577 struct drm_gem_object *obj;
626 int ret; 578 int ret;
627 579
628 ring->dev = dev; 580 ring->dev = dev;
581 INIT_LIST_HEAD(&ring->active_list);
582 INIT_LIST_HEAD(&ring->request_list);
583 INIT_LIST_HEAD(&ring->gpu_write_list);
629 584
630 if (I915_NEED_GFX_HWS(dev)) { 585 if (I915_NEED_GFX_HWS(dev)) {
631 ret = init_status_page(dev, ring); 586 ret = init_status_page(dev, ring);
@@ -642,7 +597,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
642 597
643 ring->gem_object = obj; 598 ring->gem_object = obj;
644 599
645 ret = i915_gem_object_pin(obj, ring->alignment); 600 ret = i915_gem_object_pin(obj, PAGE_SIZE);
646 if (ret) 601 if (ret)
647 goto err_unref; 602 goto err_unref;
648 603
@@ -668,14 +623,12 @@ int intel_init_ring_buffer(struct drm_device *dev,
668 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 623 if (!drm_core_check_feature(dev, DRIVER_MODESET))
669 i915_kernel_lost_context(dev); 624 i915_kernel_lost_context(dev);
670 else { 625 else {
671 ring->head = ring->get_head(dev, ring); 626 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
672 ring->tail = ring->get_tail(dev, ring); 627 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
673 ring->space = ring->head - (ring->tail + 8); 628 ring->space = ring->head - (ring->tail + 8);
674 if (ring->space < 0) 629 if (ring->space < 0)
675 ring->space += ring->size; 630 ring->space += ring->size;
676 } 631 }
677 INIT_LIST_HEAD(&ring->active_list);
678 INIT_LIST_HEAD(&ring->request_list);
679 return ret; 632 return ret;
680 633
681err_unmap: 634err_unmap:
@@ -691,7 +644,7 @@ err_hws:
691} 644}
692 645
693void intel_cleanup_ring_buffer(struct drm_device *dev, 646void intel_cleanup_ring_buffer(struct drm_device *dev,
694 struct intel_ring_buffer *ring) 647 struct intel_ring_buffer *ring)
695{ 648{
696 if (ring->gem_object == NULL) 649 if (ring->gem_object == NULL)
697 return; 650 return;
@@ -704,8 +657,8 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
704 cleanup_status_page(dev, ring); 657 cleanup_status_page(dev, ring);
705} 658}
706 659
707int intel_wrap_ring_buffer(struct drm_device *dev, 660static int intel_wrap_ring_buffer(struct drm_device *dev,
708 struct intel_ring_buffer *ring) 661 struct intel_ring_buffer *ring)
709{ 662{
710 unsigned int *virt; 663 unsigned int *virt;
711 int rem; 664 int rem;
@@ -731,14 +684,15 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
731} 684}
732 685
733int intel_wait_ring_buffer(struct drm_device *dev, 686int intel_wait_ring_buffer(struct drm_device *dev,
734 struct intel_ring_buffer *ring, int n) 687 struct intel_ring_buffer *ring, int n)
735{ 688{
736 unsigned long end; 689 unsigned long end;
690 drm_i915_private_t *dev_priv = dev->dev_private;
737 691
738 trace_i915_ring_wait_begin (dev); 692 trace_i915_ring_wait_begin (dev);
739 end = jiffies + 3 * HZ; 693 end = jiffies + 3 * HZ;
740 do { 694 do {
741 ring->head = ring->get_head(dev, ring); 695 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
742 ring->space = ring->head - (ring->tail + 8); 696 ring->space = ring->head - (ring->tail + 8);
743 if (ring->space < 0) 697 if (ring->space < 0)
744 ring->space += ring->size; 698 ring->space += ring->size;
@@ -753,14 +707,15 @@ int intel_wait_ring_buffer(struct drm_device *dev,
753 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 707 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
754 } 708 }
755 709
756 yield(); 710 msleep(1);
757 } while (!time_after(jiffies, end)); 711 } while (!time_after(jiffies, end));
758 trace_i915_ring_wait_end (dev); 712 trace_i915_ring_wait_end (dev);
759 return -EBUSY; 713 return -EBUSY;
760} 714}
761 715
762void intel_ring_begin(struct drm_device *dev, 716void intel_ring_begin(struct drm_device *dev,
763 struct intel_ring_buffer *ring, int num_dwords) 717 struct intel_ring_buffer *ring,
718 int num_dwords)
764{ 719{
765 int n = 4*num_dwords; 720 int n = 4*num_dwords;
766 if (unlikely(ring->tail + n > ring->size)) 721 if (unlikely(ring->tail + n > ring->size))
@@ -772,97 +727,181 @@ void intel_ring_begin(struct drm_device *dev,
772} 727}
773 728
774void intel_ring_advance(struct drm_device *dev, 729void intel_ring_advance(struct drm_device *dev,
775 struct intel_ring_buffer *ring) 730 struct intel_ring_buffer *ring)
776{ 731{
777 ring->tail &= ring->size - 1; 732 ring->tail &= ring->size - 1;
778 ring->advance_ring(dev, ring); 733 ring->write_tail(dev, ring, ring->tail);
779}
780
781void intel_fill_struct(struct drm_device *dev,
782 struct intel_ring_buffer *ring,
783 void *data,
784 unsigned int len)
785{
786 unsigned int *virt = ring->virtual_start + ring->tail;
787 BUG_ON((len&~(4-1)) != 0);
788 intel_ring_begin(dev, ring, len/4);
789 memcpy(virt, data, len);
790 ring->tail += len;
791 ring->tail &= ring->size - 1;
792 ring->space -= len;
793 intel_ring_advance(dev, ring);
794} 734}
795 735
796struct intel_ring_buffer render_ring = { 736static const struct intel_ring_buffer render_ring = {
797 .name = "render ring", 737 .name = "render ring",
798 .regs = { 738 .id = RING_RENDER,
799 .ctl = PRB0_CTL, 739 .mmio_base = RENDER_RING_BASE,
800 .head = PRB0_HEAD,
801 .tail = PRB0_TAIL,
802 .start = PRB0_START
803 },
804 .ring_flag = I915_EXEC_RENDER,
805 .size = 32 * PAGE_SIZE, 740 .size = 32 * PAGE_SIZE,
806 .alignment = PAGE_SIZE,
807 .virtual_start = NULL,
808 .dev = NULL,
809 .gem_object = NULL,
810 .head = 0,
811 .tail = 0,
812 .space = 0,
813 .user_irq_refcount = 0,
814 .irq_gem_seqno = 0,
815 .waiting_gem_seqno = 0,
816 .setup_status_page = render_setup_status_page,
817 .init = init_render_ring, 741 .init = init_render_ring,
818 .get_head = render_ring_get_head, 742 .write_tail = ring_write_tail,
819 .get_tail = render_ring_get_tail,
820 .get_active_head = render_ring_get_active_head,
821 .advance_ring = render_ring_advance_ring,
822 .flush = render_ring_flush, 743 .flush = render_ring_flush,
823 .add_request = render_ring_add_request, 744 .add_request = render_ring_add_request,
824 .get_gem_seqno = render_ring_get_gem_seqno, 745 .get_seqno = render_ring_get_seqno,
825 .user_irq_get = render_ring_get_user_irq, 746 .user_irq_get = render_ring_get_user_irq,
826 .user_irq_put = render_ring_put_user_irq, 747 .user_irq_put = render_ring_put_user_irq,
827 .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, 748 .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
828 .status_page = {NULL, 0, NULL},
829 .map = {0,}
830}; 749};
831 750
832/* ring buffer for bit-stream decoder */ 751/* ring buffer for bit-stream decoder */
833 752
834struct intel_ring_buffer bsd_ring = { 753static const struct intel_ring_buffer bsd_ring = {
835 .name = "bsd ring", 754 .name = "bsd ring",
836 .regs = { 755 .id = RING_BSD,
837 .ctl = BSD_RING_CTL, 756 .mmio_base = BSD_RING_BASE,
838 .head = BSD_RING_HEAD,
839 .tail = BSD_RING_TAIL,
840 .start = BSD_RING_START
841 },
842 .ring_flag = I915_EXEC_BSD,
843 .size = 32 * PAGE_SIZE, 757 .size = 32 * PAGE_SIZE,
844 .alignment = PAGE_SIZE,
845 .virtual_start = NULL,
846 .dev = NULL,
847 .gem_object = NULL,
848 .head = 0,
849 .tail = 0,
850 .space = 0,
851 .user_irq_refcount = 0,
852 .irq_gem_seqno = 0,
853 .waiting_gem_seqno = 0,
854 .setup_status_page = bsd_setup_status_page,
855 .init = init_bsd_ring, 758 .init = init_bsd_ring,
856 .get_head = bsd_ring_get_head, 759 .write_tail = ring_write_tail,
857 .get_tail = bsd_ring_get_tail,
858 .get_active_head = bsd_ring_get_active_head,
859 .advance_ring = bsd_ring_advance_ring,
860 .flush = bsd_ring_flush, 760 .flush = bsd_ring_flush,
861 .add_request = bsd_ring_add_request, 761 .add_request = ring_add_request,
862 .get_gem_seqno = bsd_ring_get_gem_seqno, 762 .get_seqno = ring_status_page_get_seqno,
863 .user_irq_get = bsd_ring_get_user_irq, 763 .user_irq_get = bsd_ring_get_user_irq,
864 .user_irq_put = bsd_ring_put_user_irq, 764 .user_irq_put = bsd_ring_put_user_irq,
865 .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, 765 .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer,
866 .status_page = {NULL, 0, NULL},
867 .map = {0,}
868}; 766};
767
768
769static void gen6_bsd_ring_write_tail(struct drm_device *dev,
770 struct intel_ring_buffer *ring,
771 u32 value)
772{
773 drm_i915_private_t *dev_priv = dev->dev_private;
774
775 /* Every tail move must follow the sequence below */
776 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
777 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
778 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
779 I915_WRITE(GEN6_BSD_RNCID, 0x0);
780
781 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
782 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
783 50))
784 DRM_ERROR("timed out waiting for IDLE Indicator\n");
785
786 I915_WRITE_TAIL(ring, value);
787 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
788 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
789 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
790}
791
792static void gen6_ring_flush(struct drm_device *dev,
793 struct intel_ring_buffer *ring,
794 u32 invalidate_domains,
795 u32 flush_domains)
796{
797 intel_ring_begin(dev, ring, 4);
798 intel_ring_emit(dev, ring, MI_FLUSH_DW);
799 intel_ring_emit(dev, ring, 0);
800 intel_ring_emit(dev, ring, 0);
801 intel_ring_emit(dev, ring, 0);
802 intel_ring_advance(dev, ring);
803}
804
805static int
806gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev,
807 struct intel_ring_buffer *ring,
808 struct drm_i915_gem_execbuffer2 *exec,
809 struct drm_clip_rect *cliprects,
810 uint64_t exec_offset)
811{
812 uint32_t exec_start;
813
814 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
815
816 intel_ring_begin(dev, ring, 2);
817 intel_ring_emit(dev, ring,
818 MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
819 /* bit0-7 is the length on GEN6+ */
820 intel_ring_emit(dev, ring, exec_start);
821 intel_ring_advance(dev, ring);
822
823 return 0;
824}
825
826/* ring buffer for Video Codec for Gen6+ */
827static const struct intel_ring_buffer gen6_bsd_ring = {
828 .name = "gen6 bsd ring",
829 .id = RING_BSD,
830 .mmio_base = GEN6_BSD_RING_BASE,
831 .size = 32 * PAGE_SIZE,
832 .init = init_bsd_ring,
833 .write_tail = gen6_bsd_ring_write_tail,
834 .flush = gen6_ring_flush,
835 .add_request = ring_add_request,
836 .get_seqno = ring_status_page_get_seqno,
837 .user_irq_get = bsd_ring_get_user_irq,
838 .user_irq_put = bsd_ring_put_user_irq,
839 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
840};
841
842/* Blitter support (SandyBridge+) */
843
844static void
845blt_ring_get_user_irq(struct drm_device *dev,
846 struct intel_ring_buffer *ring)
847{
848 /* do nothing */
849}
850static void
851blt_ring_put_user_irq(struct drm_device *dev,
852 struct intel_ring_buffer *ring)
853{
854 /* do nothing */
855}
856
857static const struct intel_ring_buffer gen6_blt_ring = {
858 .name = "blt ring",
859 .id = RING_BLT,
860 .mmio_base = BLT_RING_BASE,
861 .size = 32 * PAGE_SIZE,
862 .init = init_ring_common,
863 .write_tail = ring_write_tail,
864 .flush = gen6_ring_flush,
865 .add_request = ring_add_request,
866 .get_seqno = ring_status_page_get_seqno,
867 .user_irq_get = blt_ring_get_user_irq,
868 .user_irq_put = blt_ring_put_user_irq,
869 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
870};
871
872int intel_init_render_ring_buffer(struct drm_device *dev)
873{
874 drm_i915_private_t *dev_priv = dev->dev_private;
875
876 dev_priv->render_ring = render_ring;
877
878 if (!I915_NEED_GFX_HWS(dev)) {
879 dev_priv->render_ring.status_page.page_addr
880 = dev_priv->status_page_dmah->vaddr;
881 memset(dev_priv->render_ring.status_page.page_addr,
882 0, PAGE_SIZE);
883 }
884
885 return intel_init_ring_buffer(dev, &dev_priv->render_ring);
886}
887
888int intel_init_bsd_ring_buffer(struct drm_device *dev)
889{
890 drm_i915_private_t *dev_priv = dev->dev_private;
891
892 if (IS_GEN6(dev))
893 dev_priv->bsd_ring = gen6_bsd_ring;
894 else
895 dev_priv->bsd_ring = bsd_ring;
896
897 return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
898}
899
900int intel_init_blt_ring_buffer(struct drm_device *dev)
901{
902 drm_i915_private_t *dev_priv = dev->dev_private;
903
904 dev_priv->blt_ring = gen6_blt_ring;
905
906 return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
907}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 525e7d3edda8..a05aff0e5764 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -7,25 +7,32 @@ struct intel_hw_status_page {
7 struct drm_gem_object *obj; 7 struct drm_gem_object *obj;
8}; 8};
9 9
10#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
11#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
12#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
13#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
14#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
15#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
16#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
17#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
18
10struct drm_i915_gem_execbuffer2; 19struct drm_i915_gem_execbuffer2;
11struct intel_ring_buffer { 20struct intel_ring_buffer {
12 const char *name; 21 const char *name;
13 struct ring_regs { 22 enum intel_ring_id {
14 u32 ctl; 23 RING_RENDER = 0x1,
15 u32 head; 24 RING_BSD = 0x2,
16 u32 tail; 25 RING_BLT = 0x4,
17 u32 start; 26 } id;
18 } regs; 27 u32 mmio_base;
19 unsigned int ring_flag;
20 unsigned long size; 28 unsigned long size;
21 unsigned int alignment;
22 void *virtual_start; 29 void *virtual_start;
23 struct drm_device *dev; 30 struct drm_device *dev;
24 struct drm_gem_object *gem_object; 31 struct drm_gem_object *gem_object;
25 32
26 unsigned int head; 33 unsigned int head;
27 unsigned int tail; 34 unsigned int tail;
28 unsigned int space; 35 int space;
29 struct intel_hw_status_page status_page; 36 struct intel_hw_status_page status_page;
30 37
31 u32 irq_gem_seqno; /* last seq seem at irq time */ 38 u32 irq_gem_seqno; /* last seq seem at irq time */
@@ -35,30 +42,22 @@ struct intel_ring_buffer {
35 struct intel_ring_buffer *ring); 42 struct intel_ring_buffer *ring);
36 void (*user_irq_put)(struct drm_device *dev, 43 void (*user_irq_put)(struct drm_device *dev,
37 struct intel_ring_buffer *ring); 44 struct intel_ring_buffer *ring);
38 void (*setup_status_page)(struct drm_device *dev,
39 struct intel_ring_buffer *ring);
40 45
41 int (*init)(struct drm_device *dev, 46 int (*init)(struct drm_device *dev,
42 struct intel_ring_buffer *ring); 47 struct intel_ring_buffer *ring);
43 48
44 unsigned int (*get_head)(struct drm_device *dev, 49 void (*write_tail)(struct drm_device *dev,
45 struct intel_ring_buffer *ring); 50 struct intel_ring_buffer *ring,
46 unsigned int (*get_tail)(struct drm_device *dev, 51 u32 value);
47 struct intel_ring_buffer *ring);
48 unsigned int (*get_active_head)(struct drm_device *dev,
49 struct intel_ring_buffer *ring);
50 void (*advance_ring)(struct drm_device *dev,
51 struct intel_ring_buffer *ring);
52 void (*flush)(struct drm_device *dev, 52 void (*flush)(struct drm_device *dev,
53 struct intel_ring_buffer *ring, 53 struct intel_ring_buffer *ring,
54 u32 invalidate_domains, 54 u32 invalidate_domains,
55 u32 flush_domains); 55 u32 flush_domains);
56 u32 (*add_request)(struct drm_device *dev, 56 u32 (*add_request)(struct drm_device *dev,
57 struct intel_ring_buffer *ring, 57 struct intel_ring_buffer *ring,
58 struct drm_file *file_priv,
59 u32 flush_domains); 58 u32 flush_domains);
60 u32 (*get_gem_seqno)(struct drm_device *dev, 59 u32 (*get_seqno)(struct drm_device *dev,
61 struct intel_ring_buffer *ring); 60 struct intel_ring_buffer *ring);
62 int (*dispatch_gem_execbuffer)(struct drm_device *dev, 61 int (*dispatch_gem_execbuffer)(struct drm_device *dev,
63 struct intel_ring_buffer *ring, 62 struct intel_ring_buffer *ring,
64 struct drm_i915_gem_execbuffer2 *exec, 63 struct drm_i915_gem_execbuffer2 *exec,
@@ -83,6 +82,20 @@ struct intel_ring_buffer {
83 */ 82 */
84 struct list_head request_list; 83 struct list_head request_list;
85 84
85 /**
86 * List of objects currently pending a GPU write flush.
87 *
88 * All elements on this list will belong to either the
89 * active_list or flushing_list, last_rendering_seqno can
90 * be used to differentiate between the two elements.
91 */
92 struct list_head gpu_write_list;
93
94 /**
95 * Do we have some not yet emitted requests outstanding?
96 */
97 bool outstanding_lazy_request;
98
86 wait_queue_head_t irq_queue; 99 wait_queue_head_t irq_queue;
87 drm_local_map_t map; 100 drm_local_map_t map;
88}; 101};
@@ -96,15 +109,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
96} 109}
97 110
98int intel_init_ring_buffer(struct drm_device *dev, 111int intel_init_ring_buffer(struct drm_device *dev,
99 struct intel_ring_buffer *ring); 112 struct intel_ring_buffer *ring);
100void intel_cleanup_ring_buffer(struct drm_device *dev, 113void intel_cleanup_ring_buffer(struct drm_device *dev,
101 struct intel_ring_buffer *ring); 114 struct intel_ring_buffer *ring);
102int intel_wait_ring_buffer(struct drm_device *dev, 115int intel_wait_ring_buffer(struct drm_device *dev,
103 struct intel_ring_buffer *ring, int n); 116 struct intel_ring_buffer *ring, int n);
104int intel_wrap_ring_buffer(struct drm_device *dev,
105 struct intel_ring_buffer *ring);
106void intel_ring_begin(struct drm_device *dev, 117void intel_ring_begin(struct drm_device *dev,
107 struct intel_ring_buffer *ring, int n); 118 struct intel_ring_buffer *ring, int n);
108 119
109static inline void intel_ring_emit(struct drm_device *dev, 120static inline void intel_ring_emit(struct drm_device *dev,
110 struct intel_ring_buffer *ring, 121 struct intel_ring_buffer *ring,
@@ -115,17 +126,19 @@ static inline void intel_ring_emit(struct drm_device *dev,
115 ring->tail += 4; 126 ring->tail += 4;
116} 127}
117 128
118void intel_fill_struct(struct drm_device *dev,
119 struct intel_ring_buffer *ring,
120 void *data,
121 unsigned int len);
122void intel_ring_advance(struct drm_device *dev, 129void intel_ring_advance(struct drm_device *dev,
123 struct intel_ring_buffer *ring); 130 struct intel_ring_buffer *ring);
124 131
125u32 intel_ring_get_seqno(struct drm_device *dev, 132u32 intel_ring_get_seqno(struct drm_device *dev,
126 struct intel_ring_buffer *ring); 133 struct intel_ring_buffer *ring);
127 134
128extern struct intel_ring_buffer render_ring; 135int intel_init_render_ring_buffer(struct drm_device *dev);
129extern struct intel_ring_buffer bsd_ring; 136int intel_init_bsd_ring_buffer(struct drm_device *dev);
137int intel_init_blt_ring_buffer(struct drm_device *dev);
138
139u32 intel_ring_get_active_head(struct drm_device *dev,
140 struct intel_ring_buffer *ring);
141void intel_ring_setup_status_page(struct drm_device *dev,
142 struct intel_ring_buffer *ring);
130 143
131#endif /* _INTEL_RINGBUFFER_H_ */ 144#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index ee73e428a84a..de158b76bcd5 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -65,8 +65,11 @@ static const char *tv_format_names[] = {
65struct intel_sdvo { 65struct intel_sdvo {
66 struct intel_encoder base; 66 struct intel_encoder base;
67 67
68 struct i2c_adapter *i2c;
68 u8 slave_addr; 69 u8 slave_addr;
69 70
71 struct i2c_adapter ddc;
72
70 /* Register for the SDVO device: SDVOB or SDVOC */ 73 /* Register for the SDVO device: SDVOB or SDVOC */
71 int sdvo_reg; 74 int sdvo_reg;
72 75
@@ -104,34 +107,24 @@ struct intel_sdvo {
104 * This is set if we treat the device as HDMI, instead of DVI. 107 * This is set if we treat the device as HDMI, instead of DVI.
105 */ 108 */
106 bool is_hdmi; 109 bool is_hdmi;
110 bool has_audio;
107 111
108 /** 112 /**
109 * This is set if we detect output of sdvo device as LVDS. 113 * This is set if we detect output of sdvo device as LVDS and
114 * have a valid fixed mode to use with the panel.
110 */ 115 */
111 bool is_lvds; 116 bool is_lvds;
112 117
113 /** 118 /**
114 * This is sdvo flags for input timing.
115 */
116 uint8_t sdvo_flags;
117
118 /**
119 * This is sdvo fixed pannel mode pointer 119 * This is sdvo fixed pannel mode pointer
120 */ 120 */
121 struct drm_display_mode *sdvo_lvds_fixed_mode; 121 struct drm_display_mode *sdvo_lvds_fixed_mode;
122 122
123 /*
124 * supported encoding mode, used to determine whether HDMI is
125 * supported
126 */
127 struct intel_sdvo_encode encode;
128
129 /* DDC bus used by this SDVO encoder */ 123 /* DDC bus used by this SDVO encoder */
130 uint8_t ddc_bus; 124 uint8_t ddc_bus;
131 125
132 /* Mac mini hack -- use the same DDC as the analog connector */ 126 /* Input timings for adjusted_mode */
133 struct i2c_adapter *analog_ddc_bus; 127 struct intel_sdvo_dtd input_dtd;
134
135}; 128};
136 129
137struct intel_sdvo_connector { 130struct intel_sdvo_connector {
@@ -140,11 +133,15 @@ struct intel_sdvo_connector {
140 /* Mark the type of connector */ 133 /* Mark the type of connector */
141 uint16_t output_flag; 134 uint16_t output_flag;
142 135
136 int force_audio;
137
143 /* This contains all current supported TV format */ 138 /* This contains all current supported TV format */
144 u8 tv_format_supported[TV_FORMAT_NUM]; 139 u8 tv_format_supported[TV_FORMAT_NUM];
145 int format_supported_num; 140 int format_supported_num;
146 struct drm_property *tv_format; 141 struct drm_property *tv_format;
147 142
143 struct drm_property *force_audio_property;
144
148 /* add the property for the SDVO-TV */ 145 /* add the property for the SDVO-TV */
149 struct drm_property *left; 146 struct drm_property *left;
150 struct drm_property *right; 147 struct drm_property *right;
@@ -186,9 +183,15 @@ struct intel_sdvo_connector {
186 u32 cur_dot_crawl, max_dot_crawl; 183 u32 cur_dot_crawl, max_dot_crawl;
187}; 184};
188 185
189static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder) 186static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder)
187{
188 return container_of(encoder, struct intel_sdvo, base.base);
189}
190
191static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
190{ 192{
191 return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base); 193 return container_of(intel_attached_encoder(connector),
194 struct intel_sdvo, base);
192} 195}
193 196
194static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) 197static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
@@ -213,7 +216,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
213 */ 216 */
214static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) 217static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
215{ 218{
216 struct drm_device *dev = intel_sdvo->base.enc.dev; 219 struct drm_device *dev = intel_sdvo->base.base.dev;
217 struct drm_i915_private *dev_priv = dev->dev_private; 220 struct drm_i915_private *dev_priv = dev->dev_private;
218 u32 bval = val, cval = val; 221 u32 bval = val, cval = val;
219 int i; 222 int i;
@@ -245,49 +248,29 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
245 248
246static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) 249static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
247{ 250{
248 u8 out_buf[2] = { addr, 0 };
249 u8 buf[2];
250 struct i2c_msg msgs[] = { 251 struct i2c_msg msgs[] = {
251 { 252 {
252 .addr = intel_sdvo->slave_addr >> 1, 253 .addr = intel_sdvo->slave_addr,
253 .flags = 0, 254 .flags = 0,
254 .len = 1, 255 .len = 1,
255 .buf = out_buf, 256 .buf = &addr,
256 }, 257 },
257 { 258 {
258 .addr = intel_sdvo->slave_addr >> 1, 259 .addr = intel_sdvo->slave_addr,
259 .flags = I2C_M_RD, 260 .flags = I2C_M_RD,
260 .len = 1, 261 .len = 1,
261 .buf = buf, 262 .buf = ch,
262 } 263 }
263 }; 264 };
264 int ret; 265 int ret;
265 266
266 if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2) 267 if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2)
267 {
268 *ch = buf[0];
269 return true; 268 return true;
270 }
271 269
272 DRM_DEBUG_KMS("i2c transfer returned %d\n", ret); 270 DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
273 return false; 271 return false;
274} 272}
275 273
276static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch)
277{
278 u8 out_buf[2] = { addr, ch };
279 struct i2c_msg msgs[] = {
280 {
281 .addr = intel_sdvo->slave_addr >> 1,
282 .flags = 0,
283 .len = 2,
284 .buf = out_buf,
285 }
286 };
287
288 return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1;
289}
290
291#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} 274#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
292/** Mapping of command numbers to names, for debug output */ 275/** Mapping of command numbers to names, for debug output */
293static const struct _sdvo_cmd_name { 276static const struct _sdvo_cmd_name {
@@ -432,22 +415,6 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
432 DRM_LOG_KMS("\n"); 415 DRM_LOG_KMS("\n");
433} 416}
434 417
435static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
436 const void *args, int args_len)
437{
438 int i;
439
440 intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
441
442 for (i = 0; i < args_len; i++) {
443 if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i,
444 ((u8*)args)[i]))
445 return false;
446 }
447
448 return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd);
449}
450
451static const char *cmd_status_names[] = { 418static const char *cmd_status_names[] = {
452 "Power on", 419 "Power on",
453 "Success", 420 "Success",
@@ -458,54 +425,115 @@ static const char *cmd_status_names[] = {
458 "Scaling not supported" 425 "Scaling not supported"
459}; 426};
460 427
461static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo, 428static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
462 void *response, int response_len, 429 const void *args, int args_len)
463 u8 status)
464{ 430{
465 int i; 431 u8 buf[args_len*2 + 2], status;
432 struct i2c_msg msgs[args_len + 3];
433 int i, ret;
466 434
467 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); 435 intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
468 for (i = 0; i < response_len; i++) 436
469 DRM_LOG_KMS("%02X ", ((u8 *)response)[i]); 437 for (i = 0; i < args_len; i++) {
470 for (; i < 8; i++) 438 msgs[i].addr = intel_sdvo->slave_addr;
471 DRM_LOG_KMS(" "); 439 msgs[i].flags = 0;
472 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) 440 msgs[i].len = 2;
473 DRM_LOG_KMS("(%s)", cmd_status_names[status]); 441 msgs[i].buf = buf + 2 *i;
474 else 442 buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
475 DRM_LOG_KMS("(??? %d)", status); 443 buf[2*i + 1] = ((u8*)args)[i];
476 DRM_LOG_KMS("\n"); 444 }
445 msgs[i].addr = intel_sdvo->slave_addr;
446 msgs[i].flags = 0;
447 msgs[i].len = 2;
448 msgs[i].buf = buf + 2*i;
449 buf[2*i + 0] = SDVO_I2C_OPCODE;
450 buf[2*i + 1] = cmd;
451
452 /* the following two are to read the response */
453 status = SDVO_I2C_CMD_STATUS;
454 msgs[i+1].addr = intel_sdvo->slave_addr;
455 msgs[i+1].flags = 0;
456 msgs[i+1].len = 1;
457 msgs[i+1].buf = &status;
458
459 msgs[i+2].addr = intel_sdvo->slave_addr;
460 msgs[i+2].flags = I2C_M_RD;
461 msgs[i+2].len = 1;
462 msgs[i+2].buf = &status;
463
464 ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
465 if (ret < 0) {
466 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
467 return false;
468 }
469 if (ret != i+3) {
470 /* failure in I2C transfer */
471 DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
472 return false;
473 }
474
475 i = 3;
476 while (status == SDVO_CMD_STATUS_PENDING && i--) {
477 if (!intel_sdvo_read_byte(intel_sdvo,
478 SDVO_I2C_CMD_STATUS,
479 &status))
480 return false;
481 }
482 if (status != SDVO_CMD_STATUS_SUCCESS) {
483 DRM_DEBUG_KMS("command returns response %s [%d]\n",
484 status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
485 status);
486 return false;
487 }
488
489 return true;
477} 490}
478 491
479static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, 492static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
480 void *response, int response_len) 493 void *response, int response_len)
481{ 494{
482 int i; 495 u8 retry = 5;
483 u8 status; 496 u8 status;
484 u8 retry = 50; 497 int i;
485
486 while (retry--) {
487 /* Read the command response */
488 for (i = 0; i < response_len; i++) {
489 if (!intel_sdvo_read_byte(intel_sdvo,
490 SDVO_I2C_RETURN_0 + i,
491 &((u8 *)response)[i]))
492 return false;
493 }
494 498
495 /* read the return status */ 499 /*
496 if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, 500 * The documentation states that all commands will be
501 * processed within 15µs, and that we need only poll
502 * the status byte a maximum of 3 times in order for the
503 * command to be complete.
504 *
505 * Check 5 times in case the hardware failed to read the docs.
506 */
507 do {
508 if (!intel_sdvo_read_byte(intel_sdvo,
509 SDVO_I2C_CMD_STATUS,
497 &status)) 510 &status))
498 return false; 511 return false;
512 } while (status == SDVO_CMD_STATUS_PENDING && --retry);
499 513
500 intel_sdvo_debug_response(intel_sdvo, response, response_len, 514 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
501 status); 515 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
502 if (status != SDVO_CMD_STATUS_PENDING) 516 DRM_LOG_KMS("(%s)", cmd_status_names[status]);
503 break; 517 else
518 DRM_LOG_KMS("(??? %d)", status);
504 519
505 mdelay(50); 520 if (status != SDVO_CMD_STATUS_SUCCESS)
521 goto log_fail;
522
523 /* Read the command response */
524 for (i = 0; i < response_len; i++) {
525 if (!intel_sdvo_read_byte(intel_sdvo,
526 SDVO_I2C_RETURN_0 + i,
527 &((u8 *)response)[i]))
528 goto log_fail;
529 DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
506 } 530 }
531 DRM_LOG_KMS("\n");
532 return true;
507 533
508 return status == SDVO_CMD_STATUS_SUCCESS; 534log_fail:
535 DRM_LOG_KMS("\n");
536 return false;
509} 537}
510 538
511static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) 539static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
@@ -518,71 +546,17 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
518 return 4; 546 return 4;
519} 547}
520 548
521/** 549static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
522 * Try to read the response after issuie the DDC switch command. But it 550 u8 ddc_bus)
523 * is noted that we must do the action of reading response and issuing DDC
524 * switch command in one I2C transaction. Otherwise when we try to start
525 * another I2C transaction after issuing the DDC bus switch, it will be
526 * switched to the internal SDVO register.
527 */
528static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
529 u8 target)
530{ 551{
531 u8 out_buf[2], cmd_buf[2], ret_value[2], ret; 552 return intel_sdvo_write_cmd(intel_sdvo,
532 struct i2c_msg msgs[] = { 553 SDVO_CMD_SET_CONTROL_BUS_SWITCH,
533 { 554 &ddc_bus, 1);
534 .addr = intel_sdvo->slave_addr >> 1,
535 .flags = 0,
536 .len = 2,
537 .buf = out_buf,
538 },
539 /* the following two are to read the response */
540 {
541 .addr = intel_sdvo->slave_addr >> 1,
542 .flags = 0,
543 .len = 1,
544 .buf = cmd_buf,
545 },
546 {
547 .addr = intel_sdvo->slave_addr >> 1,
548 .flags = I2C_M_RD,
549 .len = 1,
550 .buf = ret_value,
551 },
552 };
553
554 intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
555 &target, 1);
556 /* write the DDC switch command argument */
557 intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target);
558
559 out_buf[0] = SDVO_I2C_OPCODE;
560 out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
561 cmd_buf[0] = SDVO_I2C_CMD_STATUS;
562 cmd_buf[1] = 0;
563 ret_value[0] = 0;
564 ret_value[1] = 0;
565
566 ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3);
567 if (ret != 3) {
568 /* failure in I2C transfer */
569 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
570 return;
571 }
572 if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
573 DRM_DEBUG_KMS("DDC switch command returns response %d\n",
574 ret_value[0]);
575 return;
576 }
577 return;
578} 555}
579 556
580static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) 557static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
581{ 558{
582 if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len)) 559 return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len);
583 return false;
584
585 return intel_sdvo_read_response(intel_sdvo, NULL, 0);
586} 560}
587 561
588static bool 562static bool
@@ -819,17 +793,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
819 mode->flags |= DRM_MODE_FLAG_PVSYNC; 793 mode->flags |= DRM_MODE_FLAG_PVSYNC;
820} 794}
821 795
822static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo, 796static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
823 struct intel_sdvo_encode *encode)
824{ 797{
825 if (intel_sdvo_get_value(intel_sdvo, 798 struct intel_sdvo_encode encode;
826 SDVO_CMD_GET_SUPP_ENCODE,
827 encode, sizeof(*encode)))
828 return true;
829 799
830 /* non-support means DVI */ 800 return intel_sdvo_get_value(intel_sdvo,
831 memset(encode, 0, sizeof(*encode)); 801 SDVO_CMD_GET_SUPP_ENCODE,
832 return false; 802 &encode, sizeof(encode));
833} 803}
834 804
835static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, 805static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
@@ -874,115 +844,33 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
874} 844}
875#endif 845#endif
876 846
877static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo, 847static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
878 int index,
879 uint8_t *data, int8_t size, uint8_t tx_rate)
880{
881 uint8_t set_buf_index[2];
882
883 set_buf_index[0] = index;
884 set_buf_index[1] = 0;
885
886 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
887 set_buf_index, 2))
888 return false;
889
890 for (; size > 0; size -= 8) {
891 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8))
892 return false;
893
894 data += 8;
895 }
896
897 return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
898}
899
900static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
901{
902 uint8_t csum = 0;
903 int i;
904
905 for (i = 0; i < size; i++)
906 csum += data[i];
907
908 return 0x100 - csum;
909}
910
911#define DIP_TYPE_AVI 0x82
912#define DIP_VERSION_AVI 0x2
913#define DIP_LEN_AVI 13
914
915struct dip_infoframe {
916 uint8_t type;
917 uint8_t version;
918 uint8_t len;
919 uint8_t checksum;
920 union {
921 struct {
922 /* Packet Byte #1 */
923 uint8_t S:2;
924 uint8_t B:2;
925 uint8_t A:1;
926 uint8_t Y:2;
927 uint8_t rsvd1:1;
928 /* Packet Byte #2 */
929 uint8_t R:4;
930 uint8_t M:2;
931 uint8_t C:2;
932 /* Packet Byte #3 */
933 uint8_t SC:2;
934 uint8_t Q:2;
935 uint8_t EC:3;
936 uint8_t ITC:1;
937 /* Packet Byte #4 */
938 uint8_t VIC:7;
939 uint8_t rsvd2:1;
940 /* Packet Byte #5 */
941 uint8_t PR:4;
942 uint8_t rsvd3:4;
943 /* Packet Byte #6~13 */
944 uint16_t top_bar_end;
945 uint16_t bottom_bar_start;
946 uint16_t left_bar_end;
947 uint16_t right_bar_start;
948 } avi;
949 struct {
950 /* Packet Byte #1 */
951 uint8_t channel_count:3;
952 uint8_t rsvd1:1;
953 uint8_t coding_type:4;
954 /* Packet Byte #2 */
955 uint8_t sample_size:2; /* SS0, SS1 */
956 uint8_t sample_frequency:3;
957 uint8_t rsvd2:3;
958 /* Packet Byte #3 */
959 uint8_t coding_type_private:5;
960 uint8_t rsvd3:3;
961 /* Packet Byte #4 */
962 uint8_t channel_allocation;
963 /* Packet Byte #5 */
964 uint8_t rsvd4:3;
965 uint8_t level_shift:4;
966 uint8_t downmix_inhibit:1;
967 } audio;
968 uint8_t payload[28];
969 } __attribute__ ((packed)) u;
970} __attribute__((packed));
971
972static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
973 struct drm_display_mode * mode)
974{ 848{
975 struct dip_infoframe avi_if = { 849 struct dip_infoframe avi_if = {
976 .type = DIP_TYPE_AVI, 850 .type = DIP_TYPE_AVI,
977 .version = DIP_VERSION_AVI, 851 .ver = DIP_VERSION_AVI,
978 .len = DIP_LEN_AVI, 852 .len = DIP_LEN_AVI,
979 }; 853 };
854 uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
855 uint8_t set_buf_index[2] = { 1, 0 };
856 uint64_t *data = (uint64_t *)&avi_if;
857 unsigned i;
858
859 intel_dip_infoframe_csum(&avi_if);
860
861 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
862 set_buf_index, 2))
863 return false;
980 864
981 avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, 865 for (i = 0; i < sizeof(avi_if); i += 8) {
982 4 + avi_if.len); 866 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA,
983 return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if, 867 data, 8))
984 4 + avi_if.len, 868 return false;
985 SDVO_HBUF_TX_VSYNC); 869 data++;
870 }
871
872 return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE,
873 &tx_rate, 1);
986} 874}
987 875
988static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) 876static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
@@ -1022,8 +910,6 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
1022 struct drm_display_mode *mode, 910 struct drm_display_mode *mode,
1023 struct drm_display_mode *adjusted_mode) 911 struct drm_display_mode *adjusted_mode)
1024{ 912{
1025 struct intel_sdvo_dtd input_dtd;
1026
1027 /* Reset the input timing to the screen. Assume always input 0. */ 913 /* Reset the input timing to the screen. Assume always input 0. */
1028 if (!intel_sdvo_set_target_input(intel_sdvo)) 914 if (!intel_sdvo_set_target_input(intel_sdvo))
1029 return false; 915 return false;
@@ -1035,14 +921,12 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
1035 return false; 921 return false;
1036 922
1037 if (!intel_sdvo_get_preferred_input_timing(intel_sdvo, 923 if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
1038 &input_dtd)) 924 &intel_sdvo->input_dtd))
1039 return false; 925 return false;
1040 926
1041 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); 927 intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
1042 intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags;
1043 928
1044 drm_mode_set_crtcinfo(adjusted_mode, 0); 929 drm_mode_set_crtcinfo(adjusted_mode, 0);
1045 mode->clock = adjusted_mode->clock;
1046 return true; 930 return true;
1047} 931}
1048 932
@@ -1050,7 +934,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1050 struct drm_display_mode *mode, 934 struct drm_display_mode *mode,
1051 struct drm_display_mode *adjusted_mode) 935 struct drm_display_mode *adjusted_mode)
1052{ 936{
1053 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); 937 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
938 int multiplier;
1054 939
1055 /* We need to construct preferred input timings based on our 940 /* We need to construct preferred input timings based on our
1056 * output timings. To do that, we have to set the output 941 * output timings. To do that, we have to set the output
@@ -1065,10 +950,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1065 mode, 950 mode,
1066 adjusted_mode); 951 adjusted_mode);
1067 } else if (intel_sdvo->is_lvds) { 952 } else if (intel_sdvo->is_lvds) {
1068 drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
1069
1070 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, 953 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
1071 intel_sdvo->sdvo_lvds_fixed_mode)) 954 intel_sdvo->sdvo_lvds_fixed_mode))
1072 return false; 955 return false;
1073 956
1074 (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, 957 (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
@@ -1077,9 +960,10 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1077 } 960 }
1078 961
1079 /* Make the CRTC code factor in the SDVO pixel multiplier. The 962 /* Make the CRTC code factor in the SDVO pixel multiplier. The
1080 * SDVO device will be told of the multiplier during mode_set. 963 * SDVO device will factor out the multiplier during mode_set.
1081 */ 964 */
1082 adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); 965 multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
966 intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
1083 967
1084 return true; 968 return true;
1085} 969}
@@ -1092,11 +976,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1092 struct drm_i915_private *dev_priv = dev->dev_private; 976 struct drm_i915_private *dev_priv = dev->dev_private;
1093 struct drm_crtc *crtc = encoder->crtc; 977 struct drm_crtc *crtc = encoder->crtc;
1094 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 978 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1095 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); 979 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
1096 u32 sdvox = 0; 980 u32 sdvox;
1097 int sdvo_pixel_multiply, rate;
1098 struct intel_sdvo_in_out_map in_out; 981 struct intel_sdvo_in_out_map in_out;
1099 struct intel_sdvo_dtd input_dtd; 982 struct intel_sdvo_dtd input_dtd;
983 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
984 int rate;
1100 985
1101 if (!mode) 986 if (!mode)
1102 return; 987 return;
@@ -1114,28 +999,23 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1114 SDVO_CMD_SET_IN_OUT_MAP, 999 SDVO_CMD_SET_IN_OUT_MAP,
1115 &in_out, sizeof(in_out)); 1000 &in_out, sizeof(in_out));
1116 1001
1117 if (intel_sdvo->is_hdmi) { 1002 /* Set the output timings to the screen */
1118 if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode)) 1003 if (!intel_sdvo_set_target_output(intel_sdvo,
1119 return; 1004 intel_sdvo->attached_output))
1120 1005 return;
1121 sdvox |= SDVO_AUDIO_ENABLE;
1122 }
1123 1006
1124 /* We have tried to get input timing in mode_fixup, and filled into 1007 /* We have tried to get input timing in mode_fixup, and filled into
1125 adjusted_mode */ 1008 * adjusted_mode.
1126 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1127 if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
1128 input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
1129
1130 /* If it's a TV, we already set the output timing in mode_fixup.
1131 * Otherwise, the output timing is equal to the input timing.
1132 */ 1009 */
1133 if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) { 1010 if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
1011 input_dtd = intel_sdvo->input_dtd;
1012 } else {
1134 /* Set the output timing to the screen */ 1013 /* Set the output timing to the screen */
1135 if (!intel_sdvo_set_target_output(intel_sdvo, 1014 if (!intel_sdvo_set_target_output(intel_sdvo,
1136 intel_sdvo->attached_output)) 1015 intel_sdvo->attached_output))
1137 return; 1016 return;
1138 1017
1018 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1139 (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd); 1019 (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
1140 } 1020 }
1141 1021
@@ -1143,31 +1023,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1143 if (!intel_sdvo_set_target_input(intel_sdvo)) 1023 if (!intel_sdvo_set_target_input(intel_sdvo))
1144 return; 1024 return;
1145 1025
1146 if (intel_sdvo->is_tv) { 1026 if (intel_sdvo->is_hdmi &&
1147 if (!intel_sdvo_set_tv_format(intel_sdvo)) 1027 !intel_sdvo_set_avi_infoframe(intel_sdvo))
1148 return; 1028 return;
1149 }
1150 1029
1151 /* We would like to use intel_sdvo_create_preferred_input_timing() to 1030 if (intel_sdvo->is_tv &&
1152 * provide the device with a timing it can support, if it supports that 1031 !intel_sdvo_set_tv_format(intel_sdvo))
1153 * feature. However, presumably we would need to adjust the CRTC to 1032 return;
1154 * output the preferred timing, and we don't support that currently.
1155 */
1156#if 0
1157 success = intel_sdvo_create_preferred_input_timing(encoder, clock,
1158 width, height);
1159 if (success) {
1160 struct intel_sdvo_dtd *input_dtd;
1161 1033
1162 intel_sdvo_get_preferred_input_timing(encoder, &input_dtd);
1163 intel_sdvo_set_input_timing(encoder, &input_dtd);
1164 }
1165#else
1166 (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); 1034 (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
1167#endif
1168 1035
1169 sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); 1036 switch (pixel_multiplier) {
1170 switch (sdvo_pixel_multiply) { 1037 default:
1171 case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; 1038 case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
1172 case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; 1039 case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
1173 case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; 1040 case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
@@ -1176,14 +1043,14 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1176 return; 1043 return;
1177 1044
1178 /* Set the SDVO control regs. */ 1045 /* Set the SDVO control regs. */
1179 if (IS_I965G(dev)) { 1046 if (INTEL_INFO(dev)->gen >= 4) {
1180 sdvox |= SDVO_BORDER_ENABLE; 1047 sdvox = SDVO_BORDER_ENABLE;
1181 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 1048 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1182 sdvox |= SDVO_VSYNC_ACTIVE_HIGH; 1049 sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
1183 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 1050 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1184 sdvox |= SDVO_HSYNC_ACTIVE_HIGH; 1051 sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
1185 } else { 1052 } else {
1186 sdvox |= I915_READ(intel_sdvo->sdvo_reg); 1053 sdvox = I915_READ(intel_sdvo->sdvo_reg);
1187 switch (intel_sdvo->sdvo_reg) { 1054 switch (intel_sdvo->sdvo_reg) {
1188 case SDVOB: 1055 case SDVOB:
1189 sdvox &= SDVOB_PRESERVE_MASK; 1056 sdvox &= SDVOB_PRESERVE_MASK;
@@ -1196,16 +1063,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1196 } 1063 }
1197 if (intel_crtc->pipe == 1) 1064 if (intel_crtc->pipe == 1)
1198 sdvox |= SDVO_PIPE_B_SELECT; 1065 sdvox |= SDVO_PIPE_B_SELECT;
1066 if (intel_sdvo->has_audio)
1067 sdvox |= SDVO_AUDIO_ENABLE;
1199 1068
1200 if (IS_I965G(dev)) { 1069 if (INTEL_INFO(dev)->gen >= 4) {
1201 /* done in crtc_mode_set as the dpll_md reg must be written early */ 1070 /* done in crtc_mode_set as the dpll_md reg must be written early */
1202 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 1071 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
1203 /* done in crtc_mode_set as it lives inside the dpll register */ 1072 /* done in crtc_mode_set as it lives inside the dpll register */
1204 } else { 1073 } else {
1205 sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; 1074 sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
1206 } 1075 }
1207 1076
1208 if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL) 1077 if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
1209 sdvox |= SDVO_STALL_SELECT; 1078 sdvox |= SDVO_STALL_SELECT;
1210 intel_sdvo_write_sdvox(intel_sdvo, sdvox); 1079 intel_sdvo_write_sdvox(intel_sdvo, sdvox);
1211} 1080}
@@ -1214,7 +1083,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1214{ 1083{
1215 struct drm_device *dev = encoder->dev; 1084 struct drm_device *dev = encoder->dev;
1216 struct drm_i915_private *dev_priv = dev->dev_private; 1085 struct drm_i915_private *dev_priv = dev->dev_private;
1217 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); 1086 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
1218 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 1087 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
1219 u32 temp; 1088 u32 temp;
1220 1089
@@ -1260,8 +1129,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1260static int intel_sdvo_mode_valid(struct drm_connector *connector, 1129static int intel_sdvo_mode_valid(struct drm_connector *connector,
1261 struct drm_display_mode *mode) 1130 struct drm_display_mode *mode)
1262{ 1131{
1263 struct drm_encoder *encoder = intel_attached_encoder(connector); 1132 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1264 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1265 1133
1266 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 1134 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1267 return MODE_NO_DBLESCAN; 1135 return MODE_NO_DBLESCAN;
@@ -1285,7 +1153,38 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
1285 1153
1286static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) 1154static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
1287{ 1155{
1288 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps)); 1156 if (!intel_sdvo_get_value(intel_sdvo,
1157 SDVO_CMD_GET_DEVICE_CAPS,
1158 caps, sizeof(*caps)))
1159 return false;
1160
1161 DRM_DEBUG_KMS("SDVO capabilities:\n"
1162 " vendor_id: %d\n"
1163 " device_id: %d\n"
1164 " device_rev_id: %d\n"
1165 " sdvo_version_major: %d\n"
1166 " sdvo_version_minor: %d\n"
1167 " sdvo_inputs_mask: %d\n"
1168 " smooth_scaling: %d\n"
1169 " sharp_scaling: %d\n"
1170 " up_scaling: %d\n"
1171 " down_scaling: %d\n"
1172 " stall_support: %d\n"
1173 " output_flags: %d\n",
1174 caps->vendor_id,
1175 caps->device_id,
1176 caps->device_rev_id,
1177 caps->sdvo_version_major,
1178 caps->sdvo_version_minor,
1179 caps->sdvo_inputs_mask,
1180 caps->smooth_scaling,
1181 caps->sharp_scaling,
1182 caps->up_scaling,
1183 caps->down_scaling,
1184 caps->stall_support,
1185 caps->output_flags);
1186
1187 return true;
1289} 1188}
1290 1189
1291/* No use! */ 1190/* No use! */
@@ -1389,22 +1288,33 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
1389 return (caps > 1); 1288 return (caps > 1);
1390} 1289}
1391 1290
1291static struct edid *
1292intel_sdvo_get_edid(struct drm_connector *connector)
1293{
1294 struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
1295 return drm_get_edid(connector, &sdvo->ddc);
1296}
1297
1392static struct drm_connector * 1298static struct drm_connector *
1393intel_find_analog_connector(struct drm_device *dev) 1299intel_find_analog_connector(struct drm_device *dev)
1394{ 1300{
1395 struct drm_connector *connector; 1301 struct drm_connector *connector;
1396 struct drm_encoder *encoder; 1302 struct intel_sdvo *encoder;
1397 struct intel_sdvo *intel_sdvo; 1303
1398 1304 list_for_each_entry(encoder,
1399 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1305 &dev->mode_config.encoder_list,
1400 intel_sdvo = enc_to_intel_sdvo(encoder); 1306 base.base.head) {
1401 if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) { 1307 if (encoder->base.type == INTEL_OUTPUT_ANALOG) {
1402 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1308 list_for_each_entry(connector,
1403 if (encoder == intel_attached_encoder(connector)) 1309 &dev->mode_config.connector_list,
1310 head) {
1311 if (&encoder->base ==
1312 intel_attached_encoder(connector))
1404 return connector; 1313 return connector;
1405 } 1314 }
1406 } 1315 }
1407 } 1316 }
1317
1408 return NULL; 1318 return NULL;
1409} 1319}
1410 1320
@@ -1424,64 +1334,72 @@ intel_analog_is_connected(struct drm_device *dev)
1424 return true; 1334 return true;
1425} 1335}
1426 1336
1337/* Mac mini hack -- use the same DDC as the analog connector */
1338static struct edid *
1339intel_sdvo_get_analog_edid(struct drm_connector *connector)
1340{
1341 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1342
1343 if (!intel_analog_is_connected(connector->dev))
1344 return NULL;
1345
1346 return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
1347}
1348
1427enum drm_connector_status 1349enum drm_connector_status
1428intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) 1350intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1429{ 1351{
1430 struct drm_encoder *encoder = intel_attached_encoder(connector); 1352 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1431 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); 1353 enum drm_connector_status status;
1432 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1354 struct edid *edid;
1433 enum drm_connector_status status = connector_status_connected;
1434 struct edid *edid = NULL;
1435 1355
1436 edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus); 1356 edid = intel_sdvo_get_edid(connector);
1437 1357
1438 /* This is only applied to SDVO cards with multiple outputs */
1439 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) { 1358 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
1440 uint8_t saved_ddc, temp_ddc; 1359 u8 ddc, saved_ddc = intel_sdvo->ddc_bus;
1441 saved_ddc = intel_sdvo->ddc_bus; 1360
1442 temp_ddc = intel_sdvo->ddc_bus >> 1;
1443 /* 1361 /*
1444 * Don't use the 1 as the argument of DDC bus switch to get 1362 * Don't use the 1 as the argument of DDC bus switch to get
1445 * the EDID. It is used for SDVO SPD ROM. 1363 * the EDID. It is used for SDVO SPD ROM.
1446 */ 1364 */
1447 while(temp_ddc > 1) { 1365 for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
1448 intel_sdvo->ddc_bus = temp_ddc; 1366 intel_sdvo->ddc_bus = ddc;
1449 edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus); 1367 edid = intel_sdvo_get_edid(connector);
1450 if (edid) { 1368 if (edid)
1451 /*
1452 * When we can get the EDID, maybe it is the
1453 * correct DDC bus. Update it.
1454 */
1455 intel_sdvo->ddc_bus = temp_ddc;
1456 break; 1369 break;
1457 }
1458 temp_ddc >>= 1;
1459 } 1370 }
1371 /*
1372 * If we found the EDID on the other bus,
1373 * assume that is the correct DDC bus.
1374 */
1460 if (edid == NULL) 1375 if (edid == NULL)
1461 intel_sdvo->ddc_bus = saved_ddc; 1376 intel_sdvo->ddc_bus = saved_ddc;
1462 } 1377 }
1463 /* when there is no edid and no monitor is connected with VGA 1378
1464 * port, try to use the CRT ddc to read the EDID for DVI-connector 1379 /*
1380 * When there is no edid and no monitor is connected with VGA
1381 * port, try to use the CRT ddc to read the EDID for DVI-connector.
1465 */ 1382 */
1466 if (edid == NULL && intel_sdvo->analog_ddc_bus && 1383 if (edid == NULL)
1467 !intel_analog_is_connected(connector->dev)) 1384 edid = intel_sdvo_get_analog_edid(connector);
1468 edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus);
1469 1385
1386 status = connector_status_unknown;
1470 if (edid != NULL) { 1387 if (edid != NULL) {
1471 bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
1472 bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK);
1473
1474 /* DDC bus is shared, match EDID to connector type */ 1388 /* DDC bus is shared, match EDID to connector type */
1475 if (is_digital && need_digital) 1389 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
1390 status = connector_status_connected;
1476 intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); 1391 intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
1477 else if (is_digital != need_digital) 1392 intel_sdvo->has_audio = drm_detect_monitor_audio(edid);
1478 status = connector_status_disconnected; 1393 }
1479
1480 connector->display_info.raw_edid = NULL; 1394 connector->display_info.raw_edid = NULL;
1481 } else 1395 kfree(edid);
1482 status = connector_status_disconnected; 1396 }
1483 1397
1484 kfree(edid); 1398 if (status == connector_status_connected) {
1399 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1400 if (intel_sdvo_connector->force_audio)
1401 intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0;
1402 }
1485 1403
1486 return status; 1404 return status;
1487} 1405}
@@ -1490,13 +1408,12 @@ static enum drm_connector_status
1490intel_sdvo_detect(struct drm_connector *connector, bool force) 1408intel_sdvo_detect(struct drm_connector *connector, bool force)
1491{ 1409{
1492 uint16_t response; 1410 uint16_t response;
1493 struct drm_encoder *encoder = intel_attached_encoder(connector); 1411 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1494 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1495 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1412 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1496 enum drm_connector_status ret; 1413 enum drm_connector_status ret;
1497 1414
1498 if (!intel_sdvo_write_cmd(intel_sdvo, 1415 if (!intel_sdvo_write_cmd(intel_sdvo,
1499 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) 1416 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
1500 return connector_status_unknown; 1417 return connector_status_unknown;
1501 if (intel_sdvo->is_tv) { 1418 if (intel_sdvo->is_tv) {
1502 /* add 30ms delay when the output type is SDVO-TV */ 1419 /* add 30ms delay when the output type is SDVO-TV */
@@ -1505,7 +1422,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1505 if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) 1422 if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
1506 return connector_status_unknown; 1423 return connector_status_unknown;
1507 1424
1508 DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); 1425 DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
1426 response & 0xff, response >> 8,
1427 intel_sdvo_connector->output_flag);
1509 1428
1510 if (response == 0) 1429 if (response == 0)
1511 return connector_status_disconnected; 1430 return connector_status_disconnected;
@@ -1538,12 +1457,10 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1538 1457
1539static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) 1458static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1540{ 1459{
1541 struct drm_encoder *encoder = intel_attached_encoder(connector); 1460 struct edid *edid;
1542 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1543 int num_modes;
1544 1461
1545 /* set the bus switch and get the modes */ 1462 /* set the bus switch and get the modes */
1546 num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus); 1463 edid = intel_sdvo_get_edid(connector);
1547 1464
1548 /* 1465 /*
1549 * Mac mini hack. On this device, the DVI-I connector shares one DDC 1466 * Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1551,12 +1468,14 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1551 * DDC fails, check to see if the analog output is disconnected, in 1468 * DDC fails, check to see if the analog output is disconnected, in
1552 * which case we'll look there for the digital DDC data. 1469 * which case we'll look there for the digital DDC data.
1553 */ 1470 */
1554 if (num_modes == 0 && 1471 if (edid == NULL)
1555 intel_sdvo->analog_ddc_bus && 1472 edid = intel_sdvo_get_analog_edid(connector);
1556 !intel_analog_is_connected(connector->dev)) { 1473
1557 /* Switch to the analog ddc bus and try that 1474 if (edid != NULL) {
1558 */ 1475 drm_mode_connector_update_edid_property(connector, edid);
1559 (void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus); 1476 drm_add_edid_modes(connector, edid);
1477 connector->display_info.raw_edid = NULL;
1478 kfree(edid);
1560 } 1479 }
1561} 1480}
1562 1481
@@ -1627,8 +1546,7 @@ struct drm_display_mode sdvo_tv_modes[] = {
1627 1546
1628static void intel_sdvo_get_tv_modes(struct drm_connector *connector) 1547static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1629{ 1548{
1630 struct drm_encoder *encoder = intel_attached_encoder(connector); 1549 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1631 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1632 struct intel_sdvo_sdtv_resolution_request tv_res; 1550 struct intel_sdvo_sdtv_resolution_request tv_res;
1633 uint32_t reply = 0, format_map = 0; 1551 uint32_t reply = 0, format_map = 0;
1634 int i; 1552 int i;
@@ -1644,7 +1562,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1644 return; 1562 return;
1645 1563
1646 BUILD_BUG_ON(sizeof(tv_res) != 3); 1564 BUILD_BUG_ON(sizeof(tv_res) != 3);
1647 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, 1565 if (!intel_sdvo_write_cmd(intel_sdvo,
1566 SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
1648 &tv_res, sizeof(tv_res))) 1567 &tv_res, sizeof(tv_res)))
1649 return; 1568 return;
1650 if (!intel_sdvo_read_response(intel_sdvo, &reply, 3)) 1569 if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
@@ -1662,8 +1581,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1662 1581
1663static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) 1582static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1664{ 1583{
1665 struct drm_encoder *encoder = intel_attached_encoder(connector); 1584 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1666 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1667 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1585 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1668 struct drm_display_mode *newmode; 1586 struct drm_display_mode *newmode;
1669 1587
@@ -1672,7 +1590,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1672 * Assume that the preferred modes are 1590 * Assume that the preferred modes are
1673 * arranged in priority order. 1591 * arranged in priority order.
1674 */ 1592 */
1675 intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus); 1593 intel_ddc_get_modes(connector, intel_sdvo->i2c);
1676 if (list_empty(&connector->probed_modes) == false) 1594 if (list_empty(&connector->probed_modes) == false)
1677 goto end; 1595 goto end;
1678 1596
@@ -1693,6 +1611,10 @@ end:
1693 if (newmode->type & DRM_MODE_TYPE_PREFERRED) { 1611 if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
1694 intel_sdvo->sdvo_lvds_fixed_mode = 1612 intel_sdvo->sdvo_lvds_fixed_mode =
1695 drm_mode_duplicate(connector->dev, newmode); 1613 drm_mode_duplicate(connector->dev, newmode);
1614
1615 drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
1616 0);
1617
1696 intel_sdvo->is_lvds = true; 1618 intel_sdvo->is_lvds = true;
1697 break; 1619 break;
1698 } 1620 }
@@ -1775,8 +1697,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1775 struct drm_property *property, 1697 struct drm_property *property,
1776 uint64_t val) 1698 uint64_t val)
1777{ 1699{
1778 struct drm_encoder *encoder = intel_attached_encoder(connector); 1700 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1779 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1780 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1701 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1781 uint16_t temp_value; 1702 uint16_t temp_value;
1782 uint8_t cmd; 1703 uint8_t cmd;
@@ -1786,6 +1707,21 @@ intel_sdvo_set_property(struct drm_connector *connector,
1786 if (ret) 1707 if (ret)
1787 return ret; 1708 return ret;
1788 1709
1710 if (property == intel_sdvo_connector->force_audio_property) {
1711 if (val == intel_sdvo_connector->force_audio)
1712 return 0;
1713
1714 intel_sdvo_connector->force_audio = val;
1715
1716 if (val > 0 && intel_sdvo->has_audio)
1717 return 0;
1718 if (val < 0 && !intel_sdvo->has_audio)
1719 return 0;
1720
1721 intel_sdvo->has_audio = val > 0;
1722 goto done;
1723 }
1724
1789#define CHECK_PROPERTY(name, NAME) \ 1725#define CHECK_PROPERTY(name, NAME) \
1790 if (intel_sdvo_connector->name == property) { \ 1726 if (intel_sdvo_connector->name == property) { \
1791 if (intel_sdvo_connector->cur_##name == temp_value) return 0; \ 1727 if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
@@ -1879,9 +1815,8 @@ set_value:
1879 1815
1880 1816
1881done: 1817done:
1882 if (encoder->crtc) { 1818 if (intel_sdvo->base.base.crtc) {
1883 struct drm_crtc *crtc = encoder->crtc; 1819 struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
1884
1885 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, 1820 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
1886 crtc->y, crtc->fb); 1821 crtc->y, crtc->fb);
1887 } 1822 }
@@ -1909,20 +1844,18 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
1909static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { 1844static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
1910 .get_modes = intel_sdvo_get_modes, 1845 .get_modes = intel_sdvo_get_modes,
1911 .mode_valid = intel_sdvo_mode_valid, 1846 .mode_valid = intel_sdvo_mode_valid,
1912 .best_encoder = intel_attached_encoder, 1847 .best_encoder = intel_best_encoder,
1913}; 1848};
1914 1849
1915static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) 1850static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
1916{ 1851{
1917 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); 1852 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
1918
1919 if (intel_sdvo->analog_ddc_bus)
1920 intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
1921 1853
1922 if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) 1854 if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
1923 drm_mode_destroy(encoder->dev, 1855 drm_mode_destroy(encoder->dev,
1924 intel_sdvo->sdvo_lvds_fixed_mode); 1856 intel_sdvo->sdvo_lvds_fixed_mode);
1925 1857
1858 i2c_del_adapter(&intel_sdvo->ddc);
1926 intel_encoder_destroy(encoder); 1859 intel_encoder_destroy(encoder);
1927} 1860}
1928 1861
@@ -1990,53 +1923,48 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
1990 intel_sdvo_guess_ddc_bus(sdvo); 1923 intel_sdvo_guess_ddc_bus(sdvo);
1991} 1924}
1992 1925
1993static bool 1926static void
1994intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device) 1927intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
1928 struct intel_sdvo *sdvo, u32 reg)
1995{ 1929{
1996 return intel_sdvo_set_target_output(intel_sdvo, 1930 struct sdvo_device_mapping *mapping;
1997 device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) && 1931 u8 pin, speed;
1998 intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
1999 &intel_sdvo->is_hdmi, 1);
2000}
2001 1932
2002static struct intel_sdvo * 1933 if (IS_SDVOB(reg))
2003intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan) 1934 mapping = &dev_priv->sdvo_mappings[0];
2004{ 1935 else
2005 struct drm_device *dev = chan->drm_dev; 1936 mapping = &dev_priv->sdvo_mappings[1];
2006 struct drm_encoder *encoder;
2007 1937
2008 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1938 pin = GMBUS_PORT_DPB;
2009 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); 1939 speed = GMBUS_RATE_1MHZ >> 8;
2010 if (intel_sdvo->base.ddc_bus == &chan->adapter) 1940 if (mapping->initialized) {
2011 return intel_sdvo; 1941 pin = mapping->i2c_pin;
1942 speed = mapping->i2c_speed;
2012 } 1943 }
2013 1944
2014 return NULL; 1945 sdvo->i2c = &dev_priv->gmbus[pin].adapter;
1946 intel_gmbus_set_speed(sdvo->i2c, speed);
1947 intel_gmbus_force_bit(sdvo->i2c, true);
2015} 1948}
2016 1949
2017static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, 1950static bool
2018 struct i2c_msg msgs[], int num) 1951intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
2019{ 1952{
2020 struct intel_sdvo *intel_sdvo; 1953 int is_hdmi;
2021 struct i2c_algo_bit_data *algo_data;
2022 const struct i2c_algorithm *algo;
2023 1954
2024 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; 1955 if (!intel_sdvo_check_supp_encode(intel_sdvo))
2025 intel_sdvo = 1956 return false;
2026 intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *)
2027 (algo_data->data));
2028 if (intel_sdvo == NULL)
2029 return -EINVAL;
2030 1957
2031 algo = intel_sdvo->base.i2c_bus->algo; 1958 if (!intel_sdvo_set_target_output(intel_sdvo,
1959 device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1))
1960 return false;
2032 1961
2033 intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus); 1962 is_hdmi = 0;
2034 return algo->master_xfer(i2c_adap, msgs, num); 1963 if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1))
2035} 1964 return false;
2036 1965
2037static struct i2c_algorithm intel_sdvo_i2c_bit_algo = { 1966 return !!is_hdmi;
2038 .master_xfer = intel_sdvo_master_xfer, 1967}
2039};
2040 1968
2041static u8 1969static u8
2042intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) 1970intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
@@ -2076,26 +2004,44 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
2076} 2004}
2077 2005
2078static void 2006static void
2079intel_sdvo_connector_init(struct drm_encoder *encoder, 2007intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
2080 struct drm_connector *connector) 2008 struct intel_sdvo *encoder)
2081{ 2009{
2082 drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs, 2010 drm_connector_init(encoder->base.base.dev,
2083 connector->connector_type); 2011 &connector->base.base,
2012 &intel_sdvo_connector_funcs,
2013 connector->base.base.connector_type);
2014
2015 drm_connector_helper_add(&connector->base.base,
2016 &intel_sdvo_connector_helper_funcs);
2017
2018 connector->base.base.interlace_allowed = 0;
2019 connector->base.base.doublescan_allowed = 0;
2020 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
2084 2021
2085 drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); 2022 intel_connector_attach_encoder(&connector->base, &encoder->base);
2023 drm_sysfs_connector_add(&connector->base.base);
2024}
2086 2025
2087 connector->interlace_allowed = 0; 2026static void
2088 connector->doublescan_allowed = 0; 2027intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
2089 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 2028{
2029 struct drm_device *dev = connector->base.base.dev;
2090 2030
2091 drm_mode_connector_attach_encoder(connector, encoder); 2031 connector->force_audio_property =
2092 drm_sysfs_connector_add(connector); 2032 drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
2033 if (connector->force_audio_property) {
2034 connector->force_audio_property->values[0] = -1;
2035 connector->force_audio_property->values[1] = 1;
2036 drm_connector_attach_property(&connector->base.base,
2037 connector->force_audio_property, 0);
2038 }
2093} 2039}
2094 2040
2095static bool 2041static bool
2096intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) 2042intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2097{ 2043{
2098 struct drm_encoder *encoder = &intel_sdvo->base.enc; 2044 struct drm_encoder *encoder = &intel_sdvo->base.base;
2099 struct drm_connector *connector; 2045 struct drm_connector *connector;
2100 struct intel_connector *intel_connector; 2046 struct intel_connector *intel_connector;
2101 struct intel_sdvo_connector *intel_sdvo_connector; 2047 struct intel_sdvo_connector *intel_sdvo_connector;
@@ -2118,19 +2064,20 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2118 encoder->encoder_type = DRM_MODE_ENCODER_TMDS; 2064 encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
2119 connector->connector_type = DRM_MODE_CONNECTOR_DVID; 2065 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
2120 2066
2121 if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode) 2067 if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
2122 && intel_sdvo_get_digital_encoding_mode(intel_sdvo, device)
2123 && intel_sdvo->is_hdmi) {
2124 /* enable hdmi encoding mode if supported */ 2068 /* enable hdmi encoding mode if supported */
2125 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); 2069 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
2126 intel_sdvo_set_colorimetry(intel_sdvo, 2070 intel_sdvo_set_colorimetry(intel_sdvo,
2127 SDVO_COLORIMETRY_RGB256); 2071 SDVO_COLORIMETRY_RGB256);
2128 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2072 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2073 intel_sdvo->is_hdmi = true;
2129 } 2074 }
2130 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2075 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2131 (1 << INTEL_ANALOG_CLONE_BIT)); 2076 (1 << INTEL_ANALOG_CLONE_BIT));
2132 2077
2133 intel_sdvo_connector_init(encoder, connector); 2078 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2079
2080 intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
2134 2081
2135 return true; 2082 return true;
2136} 2083}
@@ -2138,36 +2085,36 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2138static bool 2085static bool
2139intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) 2086intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2140{ 2087{
2141 struct drm_encoder *encoder = &intel_sdvo->base.enc; 2088 struct drm_encoder *encoder = &intel_sdvo->base.base;
2142 struct drm_connector *connector; 2089 struct drm_connector *connector;
2143 struct intel_connector *intel_connector; 2090 struct intel_connector *intel_connector;
2144 struct intel_sdvo_connector *intel_sdvo_connector; 2091 struct intel_sdvo_connector *intel_sdvo_connector;
2145 2092
2146 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2093 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
2147 if (!intel_sdvo_connector) 2094 if (!intel_sdvo_connector)
2148 return false; 2095 return false;
2149 2096
2150 intel_connector = &intel_sdvo_connector->base; 2097 intel_connector = &intel_sdvo_connector->base;
2151 connector = &intel_connector->base; 2098 connector = &intel_connector->base;
2152 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; 2099 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2153 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; 2100 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2154 2101
2155 intel_sdvo->controlled_output |= type; 2102 intel_sdvo->controlled_output |= type;
2156 intel_sdvo_connector->output_flag = type; 2103 intel_sdvo_connector->output_flag = type;
2157 2104
2158 intel_sdvo->is_tv = true; 2105 intel_sdvo->is_tv = true;
2159 intel_sdvo->base.needs_tv_clock = true; 2106 intel_sdvo->base.needs_tv_clock = true;
2160 intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; 2107 intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2161 2108
2162 intel_sdvo_connector_init(encoder, connector); 2109 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2163 2110
2164 if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) 2111 if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
2165 goto err; 2112 goto err;
2166 2113
2167 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) 2114 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
2168 goto err; 2115 goto err;
2169 2116
2170 return true; 2117 return true;
2171 2118
2172err: 2119err:
2173 intel_sdvo_destroy(connector); 2120 intel_sdvo_destroy(connector);
@@ -2177,43 +2124,44 @@ err:
2177static bool 2124static bool
2178intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) 2125intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
2179{ 2126{
2180 struct drm_encoder *encoder = &intel_sdvo->base.enc; 2127 struct drm_encoder *encoder = &intel_sdvo->base.base;
2181 struct drm_connector *connector; 2128 struct drm_connector *connector;
2182 struct intel_connector *intel_connector; 2129 struct intel_connector *intel_connector;
2183 struct intel_sdvo_connector *intel_sdvo_connector; 2130 struct intel_sdvo_connector *intel_sdvo_connector;
2184 2131
2185 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2132 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
2186 if (!intel_sdvo_connector) 2133 if (!intel_sdvo_connector)
2187 return false; 2134 return false;
2188 2135
2189 intel_connector = &intel_sdvo_connector->base; 2136 intel_connector = &intel_sdvo_connector->base;
2190 connector = &intel_connector->base; 2137 connector = &intel_connector->base;
2191 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 2138 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
2192 encoder->encoder_type = DRM_MODE_ENCODER_DAC; 2139 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2193 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2140 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2194 2141
2195 if (device == 0) { 2142 if (device == 0) {
2196 intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; 2143 intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
2197 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; 2144 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
2198 } else if (device == 1) { 2145 } else if (device == 1) {
2199 intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; 2146 intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
2200 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; 2147 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
2201 } 2148 }
2202 2149
2203 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2150 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2204 (1 << INTEL_ANALOG_CLONE_BIT)); 2151 (1 << INTEL_ANALOG_CLONE_BIT));
2205 2152
2206 intel_sdvo_connector_init(encoder, connector); 2153 intel_sdvo_connector_init(intel_sdvo_connector,
2207 return true; 2154 intel_sdvo);
2155 return true;
2208} 2156}
2209 2157
2210static bool 2158static bool
2211intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) 2159intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2212{ 2160{
2213 struct drm_encoder *encoder = &intel_sdvo->base.enc; 2161 struct drm_encoder *encoder = &intel_sdvo->base.base;
2214 struct drm_connector *connector; 2162 struct drm_connector *connector;
2215 struct intel_connector *intel_connector; 2163 struct intel_connector *intel_connector;
2216 struct intel_sdvo_connector *intel_sdvo_connector; 2164 struct intel_sdvo_connector *intel_sdvo_connector;
2217 2165
2218 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2166 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
2219 if (!intel_sdvo_connector) 2167 if (!intel_sdvo_connector)
@@ -2221,22 +2169,22 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2221 2169
2222 intel_connector = &intel_sdvo_connector->base; 2170 intel_connector = &intel_sdvo_connector->base;
2223 connector = &intel_connector->base; 2171 connector = &intel_connector->base;
2224 encoder->encoder_type = DRM_MODE_ENCODER_LVDS; 2172 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
2225 connector->connector_type = DRM_MODE_CONNECTOR_LVDS; 2173 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
2226 2174
2227 if (device == 0) { 2175 if (device == 0) {
2228 intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; 2176 intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
2229 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; 2177 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
2230 } else if (device == 1) { 2178 } else if (device == 1) {
2231 intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; 2179 intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
2232 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; 2180 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
2233 } 2181 }
2234 2182
2235 intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | 2183 intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
2236 (1 << INTEL_SDVO_LVDS_CLONE_BIT)); 2184 (1 << INTEL_SDVO_LVDS_CLONE_BIT));
2237 2185
2238 intel_sdvo_connector_init(encoder, connector); 2186 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2239 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) 2187 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
2240 goto err; 2188 goto err;
2241 2189
2242 return true; 2190 return true;
@@ -2307,7 +2255,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
2307 struct intel_sdvo_connector *intel_sdvo_connector, 2255 struct intel_sdvo_connector *intel_sdvo_connector,
2308 int type) 2256 int type)
2309{ 2257{
2310 struct drm_device *dev = intel_sdvo->base.enc.dev; 2258 struct drm_device *dev = intel_sdvo->base.base.dev;
2311 struct intel_sdvo_tv_format format; 2259 struct intel_sdvo_tv_format format;
2312 uint32_t format_map, i; 2260 uint32_t format_map, i;
2313 2261
@@ -2373,7 +2321,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2373 struct intel_sdvo_connector *intel_sdvo_connector, 2321 struct intel_sdvo_connector *intel_sdvo_connector,
2374 struct intel_sdvo_enhancements_reply enhancements) 2322 struct intel_sdvo_enhancements_reply enhancements)
2375{ 2323{
2376 struct drm_device *dev = intel_sdvo->base.enc.dev; 2324 struct drm_device *dev = intel_sdvo->base.base.dev;
2377 struct drm_connector *connector = &intel_sdvo_connector->base.base; 2325 struct drm_connector *connector = &intel_sdvo_connector->base.base;
2378 uint16_t response, data_value[2]; 2326 uint16_t response, data_value[2];
2379 2327
@@ -2502,7 +2450,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
2502 struct intel_sdvo_connector *intel_sdvo_connector, 2450 struct intel_sdvo_connector *intel_sdvo_connector,
2503 struct intel_sdvo_enhancements_reply enhancements) 2451 struct intel_sdvo_enhancements_reply enhancements)
2504{ 2452{
2505 struct drm_device *dev = intel_sdvo->base.enc.dev; 2453 struct drm_device *dev = intel_sdvo->base.base.dev;
2506 struct drm_connector *connector = &intel_sdvo_connector->base.base; 2454 struct drm_connector *connector = &intel_sdvo_connector->base.base;
2507 uint16_t response, data_value[2]; 2455 uint16_t response, data_value[2];
2508 2456
@@ -2535,7 +2483,43 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
2535 return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply); 2483 return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
2536 else 2484 else
2537 return true; 2485 return true;
2486}
2487
2488static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
2489 struct i2c_msg *msgs,
2490 int num)
2491{
2492 struct intel_sdvo *sdvo = adapter->algo_data;
2538 2493
2494 if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
2495 return -EIO;
2496
2497 return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
2498}
2499
2500static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
2501{
2502 struct intel_sdvo *sdvo = adapter->algo_data;
2503 return sdvo->i2c->algo->functionality(sdvo->i2c);
2504}
2505
2506static const struct i2c_algorithm intel_sdvo_ddc_proxy = {
2507 .master_xfer = intel_sdvo_ddc_proxy_xfer,
2508 .functionality = intel_sdvo_ddc_proxy_func
2509};
2510
2511static bool
2512intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
2513 struct drm_device *dev)
2514{
2515 sdvo->ddc.owner = THIS_MODULE;
2516 sdvo->ddc.class = I2C_CLASS_DDC;
2517 snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
2518 sdvo->ddc.dev.parent = &dev->pdev->dev;
2519 sdvo->ddc.algo_data = sdvo;
2520 sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
2521
2522 return i2c_add_adapter(&sdvo->ddc) == 0;
2539} 2523}
2540 2524
2541bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) 2525bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
@@ -2543,95 +2527,66 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2543 struct drm_i915_private *dev_priv = dev->dev_private; 2527 struct drm_i915_private *dev_priv = dev->dev_private;
2544 struct intel_encoder *intel_encoder; 2528 struct intel_encoder *intel_encoder;
2545 struct intel_sdvo *intel_sdvo; 2529 struct intel_sdvo *intel_sdvo;
2546 u8 ch[0x40];
2547 int i; 2530 int i;
2548 u32 i2c_reg, ddc_reg, analog_ddc_reg;
2549 2531
2550 intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); 2532 intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
2551 if (!intel_sdvo) 2533 if (!intel_sdvo)
2552 return false; 2534 return false;
2553 2535
2536 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
2537 kfree(intel_sdvo);
2538 return false;
2539 }
2540
2554 intel_sdvo->sdvo_reg = sdvo_reg; 2541 intel_sdvo->sdvo_reg = sdvo_reg;
2555 2542
2556 intel_encoder = &intel_sdvo->base; 2543 intel_encoder = &intel_sdvo->base;
2557 intel_encoder->type = INTEL_OUTPUT_SDVO; 2544 intel_encoder->type = INTEL_OUTPUT_SDVO;
2545 /* encoder type will be decided later */
2546 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
2558 2547
2559 if (HAS_PCH_SPLIT(dev)) { 2548 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
2560 i2c_reg = PCH_GPIOE; 2549 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
2561 ddc_reg = PCH_GPIOE;
2562 analog_ddc_reg = PCH_GPIOA;
2563 } else {
2564 i2c_reg = GPIOE;
2565 ddc_reg = GPIOE;
2566 analog_ddc_reg = GPIOA;
2567 }
2568
2569 /* setup the DDC bus. */
2570 if (IS_SDVOB(sdvo_reg))
2571 intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB");
2572 else
2573 intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC");
2574
2575 if (!intel_encoder->i2c_bus)
2576 goto err_inteloutput;
2577
2578 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
2579
2580 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
2581 intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
2582 2550
2583 /* Read the regs to test if we can talk to the device */ 2551 /* Read the regs to test if we can talk to the device */
2584 for (i = 0; i < 0x40; i++) { 2552 for (i = 0; i < 0x40; i++) {
2585 if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) { 2553 u8 byte;
2554
2555 if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
2586 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", 2556 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
2587 IS_SDVOB(sdvo_reg) ? 'B' : 'C'); 2557 IS_SDVOB(sdvo_reg) ? 'B' : 'C');
2588 goto err_i2c; 2558 goto err;
2589 } 2559 }
2590 } 2560 }
2591 2561
2592 /* setup the DDC bus. */ 2562 if (IS_SDVOB(sdvo_reg))
2593 if (IS_SDVOB(sdvo_reg)) {
2594 intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
2595 intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
2596 "SDVOB/VGA DDC BUS");
2597 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; 2563 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
2598 } else { 2564 else
2599 intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
2600 intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
2601 "SDVOC/VGA DDC BUS");
2602 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; 2565 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2603 }
2604 if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL)
2605 goto err_i2c;
2606 2566
2607 /* Wrap with our custom algo which switches to DDC mode */ 2567 drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
2608 intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
2609
2610 /* encoder type will be decided later */
2611 drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0);
2612 drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
2613 2568
2614 /* In default case sdvo lvds is false */ 2569 /* In default case sdvo lvds is false */
2615 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) 2570 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
2616 goto err_enc; 2571 goto err;
2617 2572
2618 if (intel_sdvo_output_setup(intel_sdvo, 2573 if (intel_sdvo_output_setup(intel_sdvo,
2619 intel_sdvo->caps.output_flags) != true) { 2574 intel_sdvo->caps.output_flags) != true) {
2620 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", 2575 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
2621 IS_SDVOB(sdvo_reg) ? 'B' : 'C'); 2576 IS_SDVOB(sdvo_reg) ? 'B' : 'C');
2622 goto err_enc; 2577 goto err;
2623 } 2578 }
2624 2579
2625 intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); 2580 intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
2626 2581
2627 /* Set the input timing to the screen. Assume always input 0. */ 2582 /* Set the input timing to the screen. Assume always input 0. */
2628 if (!intel_sdvo_set_target_input(intel_sdvo)) 2583 if (!intel_sdvo_set_target_input(intel_sdvo))
2629 goto err_enc; 2584 goto err;
2630 2585
2631 if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, 2586 if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
2632 &intel_sdvo->pixel_clock_min, 2587 &intel_sdvo->pixel_clock_min,
2633 &intel_sdvo->pixel_clock_max)) 2588 &intel_sdvo->pixel_clock_max))
2634 goto err_enc; 2589 goto err;
2635 2590
2636 DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " 2591 DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
2637 "clock range %dMHz - %dMHz, " 2592 "clock range %dMHz - %dMHz, "
@@ -2651,16 +2606,9 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2651 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); 2606 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
2652 return true; 2607 return true;
2653 2608
2654err_enc: 2609err:
2655 drm_encoder_cleanup(&intel_encoder->enc); 2610 drm_encoder_cleanup(&intel_encoder->base);
2656err_i2c: 2611 i2c_del_adapter(&intel_sdvo->ddc);
2657 if (intel_sdvo->analog_ddc_bus != NULL)
2658 intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
2659 if (intel_encoder->ddc_bus != NULL)
2660 intel_i2c_destroy(intel_encoder->ddc_bus);
2661 if (intel_encoder->i2c_bus != NULL)
2662 intel_i2c_destroy(intel_encoder->i2c_bus);
2663err_inteloutput:
2664 kfree(intel_sdvo); 2612 kfree(intel_sdvo);
2665 2613
2666 return false; 2614 return false;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 4a117e318a73..2f7681989316 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -48,7 +48,7 @@ struct intel_tv {
48 struct intel_encoder base; 48 struct intel_encoder base;
49 49
50 int type; 50 int type;
51 char *tv_format; 51 const char *tv_format;
52 int margin[4]; 52 int margin[4];
53 u32 save_TV_H_CTL_1; 53 u32 save_TV_H_CTL_1;
54 u32 save_TV_H_CTL_2; 54 u32 save_TV_H_CTL_2;
@@ -350,7 +350,7 @@ static const struct video_levels component_levels = {
350 350
351 351
352struct tv_mode { 352struct tv_mode {
353 char *name; 353 const char *name;
354 int clock; 354 int clock;
355 int refresh; /* in millihertz (for precision) */ 355 int refresh; /* in millihertz (for precision) */
356 u32 oversample; 356 u32 oversample;
@@ -900,7 +900,14 @@ static const struct tv_mode tv_modes[] = {
900 900
901static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder) 901static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
902{ 902{
903 return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base); 903 return container_of(encoder, struct intel_tv, base.base);
904}
905
906static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
907{
908 return container_of(intel_attached_encoder(connector),
909 struct intel_tv,
910 base);
904} 911}
905 912
906static void 913static void
@@ -922,7 +929,7 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
922} 929}
923 930
924static const struct tv_mode * 931static const struct tv_mode *
925intel_tv_mode_lookup (char *tv_format) 932intel_tv_mode_lookup(const char *tv_format)
926{ 933{
927 int i; 934 int i;
928 935
@@ -936,22 +943,23 @@ intel_tv_mode_lookup (char *tv_format)
936} 943}
937 944
938static const struct tv_mode * 945static const struct tv_mode *
939intel_tv_mode_find (struct intel_tv *intel_tv) 946intel_tv_mode_find(struct intel_tv *intel_tv)
940{ 947{
941 return intel_tv_mode_lookup(intel_tv->tv_format); 948 return intel_tv_mode_lookup(intel_tv->tv_format);
942} 949}
943 950
944static enum drm_mode_status 951static enum drm_mode_status
945intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) 952intel_tv_mode_valid(struct drm_connector *connector,
953 struct drm_display_mode *mode)
946{ 954{
947 struct drm_encoder *encoder = intel_attached_encoder(connector); 955 struct intel_tv *intel_tv = intel_attached_tv(connector);
948 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
949 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 956 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
950 957
951 /* Ensure TV refresh is close to desired refresh */ 958 /* Ensure TV refresh is close to desired refresh */
952 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) 959 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
953 < 1000) 960 < 1000)
954 return MODE_OK; 961 return MODE_OK;
962
955 return MODE_CLOCK_RANGE; 963 return MODE_CLOCK_RANGE;
956} 964}
957 965
@@ -1131,7 +1139,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1131 color_conversion->av); 1139 color_conversion->av);
1132 } 1140 }
1133 1141
1134 if (IS_I965G(dev)) 1142 if (INTEL_INFO(dev)->gen >= 4)
1135 I915_WRITE(TV_CLR_KNOBS, 0x00404000); 1143 I915_WRITE(TV_CLR_KNOBS, 0x00404000);
1136 else 1144 else
1137 I915_WRITE(TV_CLR_KNOBS, 0x00606000); 1145 I915_WRITE(TV_CLR_KNOBS, 0x00606000);
@@ -1157,12 +1165,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1157 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); 1165 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1158 1166
1159 /* Wait for vblank for the disable to take effect */ 1167 /* Wait for vblank for the disable to take effect */
1160 if (!IS_I9XX(dev)) 1168 if (IS_GEN2(dev))
1161 intel_wait_for_vblank(dev, intel_crtc->pipe); 1169 intel_wait_for_vblank(dev, intel_crtc->pipe);
1162 1170
1163 I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); 1171 I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE);
1164 /* Wait for vblank for the disable to take effect. */ 1172 /* Wait for vblank for the disable to take effect. */
1165 intel_wait_for_vblank(dev, intel_crtc->pipe); 1173 intel_wait_for_pipe_off(dev, intel_crtc->pipe);
1166 1174
1167 /* Filter ctl must be set before TV_WIN_SIZE */ 1175 /* Filter ctl must be set before TV_WIN_SIZE */
1168 I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE); 1176 I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
@@ -1196,7 +1204,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1196 I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]); 1204 I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
1197 for (i = 0; i < 43; i++) 1205 for (i = 0; i < 43; i++)
1198 I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]); 1206 I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
1199 I915_WRITE(TV_DAC, 0); 1207 I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
1200 I915_WRITE(TV_CTL, tv_ctl); 1208 I915_WRITE(TV_CTL, tv_ctl);
1201} 1209}
1202 1210
@@ -1228,15 +1236,13 @@ static const struct drm_display_mode reported_modes[] = {
1228static int 1236static int
1229intel_tv_detect_type (struct intel_tv *intel_tv) 1237intel_tv_detect_type (struct intel_tv *intel_tv)
1230{ 1238{
1231 struct drm_encoder *encoder = &intel_tv->base.enc; 1239 struct drm_encoder *encoder = &intel_tv->base.base;
1232 struct drm_device *dev = encoder->dev; 1240 struct drm_device *dev = encoder->dev;
1233 struct drm_i915_private *dev_priv = dev->dev_private; 1241 struct drm_i915_private *dev_priv = dev->dev_private;
1234 unsigned long irqflags; 1242 unsigned long irqflags;
1235 u32 tv_ctl, save_tv_ctl; 1243 u32 tv_ctl, save_tv_ctl;
1236 u32 tv_dac, save_tv_dac; 1244 u32 tv_dac, save_tv_dac;
1237 int type = DRM_MODE_CONNECTOR_Unknown; 1245 int type;
1238
1239 tv_dac = I915_READ(TV_DAC);
1240 1246
1241 /* Disable TV interrupts around load detect or we'll recurse */ 1247 /* Disable TV interrupts around load detect or we'll recurse */
1242 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1248 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -1244,19 +1250,14 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1244 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1250 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1245 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1251 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
1246 1252
1247 /* 1253 save_tv_dac = tv_dac = I915_READ(TV_DAC);
1248 * Detect TV by polling) 1254 save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
1249 */ 1255
1250 save_tv_dac = tv_dac; 1256 /* Poll for TV detection */
1251 tv_ctl = I915_READ(TV_CTL); 1257 tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
1252 save_tv_ctl = tv_ctl;
1253 tv_ctl &= ~TV_ENC_ENABLE;
1254 tv_ctl &= ~TV_TEST_MODE_MASK;
1255 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; 1258 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
1256 tv_dac &= ~TVDAC_SENSE_MASK; 1259
1257 tv_dac &= ~DAC_A_MASK; 1260 tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
1258 tv_dac &= ~DAC_B_MASK;
1259 tv_dac &= ~DAC_C_MASK;
1260 tv_dac |= (TVDAC_STATE_CHG_EN | 1261 tv_dac |= (TVDAC_STATE_CHG_EN |
1261 TVDAC_A_SENSE_CTL | 1262 TVDAC_A_SENSE_CTL |
1262 TVDAC_B_SENSE_CTL | 1263 TVDAC_B_SENSE_CTL |
@@ -1265,37 +1266,40 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1265 DAC_A_0_7_V | 1266 DAC_A_0_7_V |
1266 DAC_B_0_7_V | 1267 DAC_B_0_7_V |
1267 DAC_C_0_7_V); 1268 DAC_C_0_7_V);
1269
1268 I915_WRITE(TV_CTL, tv_ctl); 1270 I915_WRITE(TV_CTL, tv_ctl);
1269 I915_WRITE(TV_DAC, tv_dac); 1271 I915_WRITE(TV_DAC, tv_dac);
1270 POSTING_READ(TV_DAC); 1272 POSTING_READ(TV_DAC);
1271 msleep(20);
1272 1273
1273 tv_dac = I915_READ(TV_DAC); 1274 intel_wait_for_vblank(intel_tv->base.base.dev,
1274 I915_WRITE(TV_DAC, save_tv_dac); 1275 to_intel_crtc(intel_tv->base.base.crtc)->pipe);
1275 I915_WRITE(TV_CTL, save_tv_ctl);
1276 POSTING_READ(TV_CTL);
1277 msleep(20);
1278 1276
1279 /* 1277 type = -1;
1280 * A B C 1278 if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) {
1281 * 0 1 1 Composite 1279 DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
1282 * 1 0 X svideo 1280 /*
1283 * 0 0 0 Component 1281 * A B C
1284 */ 1282 * 0 1 1 Composite
1285 if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { 1283 * 1 0 X svideo
1286 DRM_DEBUG_KMS("Detected Composite TV connection\n"); 1284 * 0 0 0 Component
1287 type = DRM_MODE_CONNECTOR_Composite; 1285 */
1288 } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { 1286 if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
1289 DRM_DEBUG_KMS("Detected S-Video TV connection\n"); 1287 DRM_DEBUG_KMS("Detected Composite TV connection\n");
1290 type = DRM_MODE_CONNECTOR_SVIDEO; 1288 type = DRM_MODE_CONNECTOR_Composite;
1291 } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { 1289 } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
1292 DRM_DEBUG_KMS("Detected Component TV connection\n"); 1290 DRM_DEBUG_KMS("Detected S-Video TV connection\n");
1293 type = DRM_MODE_CONNECTOR_Component; 1291 type = DRM_MODE_CONNECTOR_SVIDEO;
1294 } else { 1292 } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
1295 DRM_DEBUG_KMS("No TV connection detected\n"); 1293 DRM_DEBUG_KMS("Detected Component TV connection\n");
1296 type = -1; 1294 type = DRM_MODE_CONNECTOR_Component;
1295 } else {
1296 DRM_DEBUG_KMS("Unrecognised TV connection\n");
1297 }
1297 } 1298 }
1298 1299
1300 I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
1301 I915_WRITE(TV_CTL, save_tv_ctl);
1302
1299 /* Restore interrupt config */ 1303 /* Restore interrupt config */
1300 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1304 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1301 i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | 1305 i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
@@ -1311,8 +1315,7 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1311 */ 1315 */
1312static void intel_tv_find_better_format(struct drm_connector *connector) 1316static void intel_tv_find_better_format(struct drm_connector *connector)
1313{ 1317{
1314 struct drm_encoder *encoder = intel_attached_encoder(connector); 1318 struct intel_tv *intel_tv = intel_attached_tv(connector);
1315 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
1316 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 1319 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
1317 int i; 1320 int i;
1318 1321
@@ -1344,14 +1347,13 @@ static enum drm_connector_status
1344intel_tv_detect(struct drm_connector *connector, bool force) 1347intel_tv_detect(struct drm_connector *connector, bool force)
1345{ 1348{
1346 struct drm_display_mode mode; 1349 struct drm_display_mode mode;
1347 struct drm_encoder *encoder = intel_attached_encoder(connector); 1350 struct intel_tv *intel_tv = intel_attached_tv(connector);
1348 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
1349 int type; 1351 int type;
1350 1352
1351 mode = reported_modes[0]; 1353 mode = reported_modes[0];
1352 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); 1354 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
1353 1355
1354 if (encoder->crtc && encoder->crtc->enabled) { 1356 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
1355 type = intel_tv_detect_type(intel_tv); 1357 type = intel_tv_detect_type(intel_tv);
1356 } else if (force) { 1358 } else if (force) {
1357 struct drm_crtc *crtc; 1359 struct drm_crtc *crtc;
@@ -1375,11 +1377,10 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1375 return connector_status_connected; 1377 return connector_status_connected;
1376} 1378}
1377 1379
1378static struct input_res { 1380static const struct input_res {
1379 char *name; 1381 const char *name;
1380 int w, h; 1382 int w, h;
1381} input_res_table[] = 1383} input_res_table[] = {
1382{
1383 {"640x480", 640, 480}, 1384 {"640x480", 640, 480},
1384 {"800x600", 800, 600}, 1385 {"800x600", 800, 600},
1385 {"1024x768", 1024, 768}, 1386 {"1024x768", 1024, 768},
@@ -1396,8 +1397,7 @@ static void
1396intel_tv_chose_preferred_modes(struct drm_connector *connector, 1397intel_tv_chose_preferred_modes(struct drm_connector *connector,
1397 struct drm_display_mode *mode_ptr) 1398 struct drm_display_mode *mode_ptr)
1398{ 1399{
1399 struct drm_encoder *encoder = intel_attached_encoder(connector); 1400 struct intel_tv *intel_tv = intel_attached_tv(connector);
1400 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
1401 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 1401 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
1402 1402
1403 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) 1403 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
@@ -1422,15 +1422,14 @@ static int
1422intel_tv_get_modes(struct drm_connector *connector) 1422intel_tv_get_modes(struct drm_connector *connector)
1423{ 1423{
1424 struct drm_display_mode *mode_ptr; 1424 struct drm_display_mode *mode_ptr;
1425 struct drm_encoder *encoder = intel_attached_encoder(connector); 1425 struct intel_tv *intel_tv = intel_attached_tv(connector);
1426 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
1427 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 1426 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
1428 int j, count = 0; 1427 int j, count = 0;
1429 u64 tmp; 1428 u64 tmp;
1430 1429
1431 for (j = 0; j < ARRAY_SIZE(input_res_table); 1430 for (j = 0; j < ARRAY_SIZE(input_res_table);
1432 j++) { 1431 j++) {
1433 struct input_res *input = &input_res_table[j]; 1432 const struct input_res *input = &input_res_table[j];
1434 unsigned int hactive_s = input->w; 1433 unsigned int hactive_s = input->w;
1435 unsigned int vactive_s = input->h; 1434 unsigned int vactive_s = input->h;
1436 1435
@@ -1488,9 +1487,8 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1488 uint64_t val) 1487 uint64_t val)
1489{ 1488{
1490 struct drm_device *dev = connector->dev; 1489 struct drm_device *dev = connector->dev;
1491 struct drm_encoder *encoder = intel_attached_encoder(connector); 1490 struct intel_tv *intel_tv = intel_attached_tv(connector);
1492 struct intel_tv *intel_tv = enc_to_intel_tv(encoder); 1491 struct drm_crtc *crtc = intel_tv->base.base.crtc;
1493 struct drm_crtc *crtc = encoder->crtc;
1494 int ret = 0; 1492 int ret = 0;
1495 bool changed = false; 1493 bool changed = false;
1496 1494
@@ -1555,7 +1553,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
1555static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { 1553static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
1556 .mode_valid = intel_tv_mode_valid, 1554 .mode_valid = intel_tv_mode_valid,
1557 .get_modes = intel_tv_get_modes, 1555 .get_modes = intel_tv_get_modes,
1558 .best_encoder = intel_attached_encoder, 1556 .best_encoder = intel_best_encoder,
1559}; 1557};
1560 1558
1561static const struct drm_encoder_funcs intel_tv_enc_funcs = { 1559static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1607,7 +1605,7 @@ intel_tv_init(struct drm_device *dev)
1607 struct intel_encoder *intel_encoder; 1605 struct intel_encoder *intel_encoder;
1608 struct intel_connector *intel_connector; 1606 struct intel_connector *intel_connector;
1609 u32 tv_dac_on, tv_dac_off, save_tv_dac; 1607 u32 tv_dac_on, tv_dac_off, save_tv_dac;
1610 char **tv_format_names; 1608 char *tv_format_names[ARRAY_SIZE(tv_modes)];
1611 int i, initial_mode = 0; 1609 int i, initial_mode = 0;
1612 1610
1613 if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) 1611 if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
@@ -1661,15 +1659,15 @@ intel_tv_init(struct drm_device *dev)
1661 drm_connector_init(dev, connector, &intel_tv_connector_funcs, 1659 drm_connector_init(dev, connector, &intel_tv_connector_funcs,
1662 DRM_MODE_CONNECTOR_SVIDEO); 1660 DRM_MODE_CONNECTOR_SVIDEO);
1663 1661
1664 drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs, 1662 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
1665 DRM_MODE_ENCODER_TVDAC); 1663 DRM_MODE_ENCODER_TVDAC);
1666 1664
1667 drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); 1665 intel_connector_attach_encoder(intel_connector, intel_encoder);
1668 intel_encoder->type = INTEL_OUTPUT_TVOUT; 1666 intel_encoder->type = INTEL_OUTPUT_TVOUT;
1669 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 1667 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
1670 intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); 1668 intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
1671 intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1)); 1669 intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
1672 intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); 1670 intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
1673 intel_tv->type = DRM_MODE_CONNECTOR_Unknown; 1671 intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
1674 1672
1675 /* BIOS margin values */ 1673 /* BIOS margin values */
@@ -1678,21 +1676,19 @@ intel_tv_init(struct drm_device *dev)
1678 intel_tv->margin[TV_MARGIN_RIGHT] = 46; 1676 intel_tv->margin[TV_MARGIN_RIGHT] = 46;
1679 intel_tv->margin[TV_MARGIN_BOTTOM] = 37; 1677 intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
1680 1678
1681 intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); 1679 intel_tv->tv_format = tv_modes[initial_mode].name;
1682 1680
1683 drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs); 1681 drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
1684 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); 1682 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
1685 connector->interlace_allowed = false; 1683 connector->interlace_allowed = false;
1686 connector->doublescan_allowed = false; 1684 connector->doublescan_allowed = false;
1687 1685
1688 /* Create TV properties then attach current values */ 1686 /* Create TV properties then attach current values */
1689 tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes),
1690 GFP_KERNEL);
1691 if (!tv_format_names)
1692 goto out;
1693 for (i = 0; i < ARRAY_SIZE(tv_modes); i++) 1687 for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
1694 tv_format_names[i] = tv_modes[i].name; 1688 tv_format_names[i] = (char *)tv_modes[i].name;
1695 drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names); 1689 drm_mode_create_tv_properties(dev,
1690 ARRAY_SIZE(tv_modes),
1691 tv_format_names);
1696 1692
1697 drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, 1693 drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
1698 initial_mode); 1694 initial_mode);
@@ -1708,6 +1704,5 @@ intel_tv_init(struct drm_device *dev)
1708 drm_connector_attach_property(connector, 1704 drm_connector_attach_property(connector,
1709 dev->mode_config.tv_bottom_margin_property, 1705 dev->mode_config.tv_bottom_margin_property,
1710 intel_tv->margin[TV_MARGIN_BOTTOM]); 1706 intel_tv->margin[TV_MARGIN_BOTTOM]);
1711out:
1712 drm_sysfs_connector_add(connector); 1707 drm_sysfs_connector_add(connector);
1713} 1708}