aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2010-10-05 20:11:56 -0400
committerDave Airlie <airlied@redhat.com>2010-10-05 20:11:56 -0400
commit9a170caed6fce89da77852575a7eee7dbadee332 (patch)
tree489082522869cb382a2dc464ccbd474846693a37 /drivers/gpu/drm/i915
parent45ff46c54a31bf8924b61e3e3411654410a3b5c3 (diff)
parent7b4f3990a22fbe800945f12001bc30db374d0af5 (diff)
Merge remote branch 'intel/drm-intel-next' of ../drm-next into drm-core-next
* 'intel/drm-intel-next' of ../drm-next: (266 commits) drm/i915: Avoid circular locking from intel_fbdev_fini() drm/i915: mark display port DPMS state as 'ON' when enabling output drm/i915: Skip pread/pwrite if size to copy is 0. drm/i915: avoid struct mutex output_poll mutex lock loop on unload drm/i915: Rephrase pwrite bounds checking to avoid any potential overflow drm/i915: Sanity check pread/pwrite drm/i915: Use pipe state to tell when pipe is off drm/i915: vblank status not valid while training display port drivers/gpu/drm/i915/i915_gem.c: Add missing error handling code drm/i915: Don't mask the return code whilst relocating. drm/i915: If the GPU hangs twice within 5 seconds, declare it wedged. drm/i915: Only print 'generating error event' if we actually are drm/i915: Try to reset gen2 devices. drm/i915: Clear fence registers on GPU reset drm/i915: Force the domain to CPU on unbinding whilst wedged. drm: Move the GTT accounting to i915 drm/i915: Fix refleak during eviction. i915: Added function to initialize VBT settings drm/i915: Remove redundant deletion of obj->gpu_write_list drm/i915: Make get/put pages static ...
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c66
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c10
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c340
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c327
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c210
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h226
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1156
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c148
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c50
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c52
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c183
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h298
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c28
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c171
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c124
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2303
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c272
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h126
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c69
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c29
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c61
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c483
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c435
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c16
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c (renamed from drivers/gpu/drm/i915/i915_opregion.c)181
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c1006
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c109
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c393
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h66
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c894
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c165
36 files changed, 5515 insertions, 4517 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 5c8e53458edb..f6e98dd416c9 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -26,13 +26,13 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
26 intel_dvo.o \ 26 intel_dvo.o \
27 intel_ringbuffer.o \ 27 intel_ringbuffer.o \
28 intel_overlay.o \ 28 intel_overlay.o \
29 intel_opregion.o \
29 dvo_ch7xxx.o \ 30 dvo_ch7xxx.o \
30 dvo_ch7017.o \ 31 dvo_ch7017.o \
31 dvo_ivch.o \ 32 dvo_ivch.o \
32 dvo_tfp410.o \ 33 dvo_tfp410.o \
33 dvo_sil164.o 34 dvo_sil164.o
34 35
35i915-$(CONFIG_ACPI) += i915_opregion.o
36i915-$(CONFIG_COMPAT) += i915_ioc32.o 36i915-$(CONFIG_COMPAT) += i915_ioc32.o
37 37
38obj-$(CONFIG_DRM_I915) += i915.o 38obj-$(CONFIG_DRM_I915) += i915.o
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 14d59804acd7..af70337567ce 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -165,67 +165,44 @@ struct ch7017_priv {
165static void ch7017_dump_regs(struct intel_dvo_device *dvo); 165static void ch7017_dump_regs(struct intel_dvo_device *dvo);
166static void ch7017_dpms(struct intel_dvo_device *dvo, int mode); 166static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
167 167
168static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) 168static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
169{ 169{
170 struct i2c_adapter *adapter = dvo->i2c_bus;
171 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
172 u8 out_buf[2];
173 u8 in_buf[2];
174
175 struct i2c_msg msgs[] = { 170 struct i2c_msg msgs[] = {
176 { 171 {
177 .addr = dvo->slave_addr, 172 .addr = dvo->slave_addr,
178 .flags = 0, 173 .flags = 0,
179 .len = 1, 174 .len = 1,
180 .buf = out_buf, 175 .buf = &addr,
181 }, 176 },
182 { 177 {
183 .addr = dvo->slave_addr, 178 .addr = dvo->slave_addr,
184 .flags = I2C_M_RD, 179 .flags = I2C_M_RD,
185 .len = 1, 180 .len = 1,
186 .buf = in_buf, 181 .buf = val,
187 } 182 }
188 }; 183 };
189 184 return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2;
190 out_buf[0] = addr;
191 out_buf[1] = 0;
192
193 if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
194 *val= in_buf[0];
195 return true;
196 };
197
198 return false;
199} 185}
200 186
201static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) 187static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
202{ 188{
203 struct i2c_adapter *adapter = dvo->i2c_bus; 189 uint8_t buf[2] = { addr, val };
204 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
205 uint8_t out_buf[2];
206 struct i2c_msg msg = { 190 struct i2c_msg msg = {
207 .addr = dvo->slave_addr, 191 .addr = dvo->slave_addr,
208 .flags = 0, 192 .flags = 0,
209 .len = 2, 193 .len = 2,
210 .buf = out_buf, 194 .buf = buf,
211 }; 195 };
212 196 return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1;
213 out_buf[0] = addr;
214 out_buf[1] = val;
215
216 if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
217 return true;
218
219 return false;
220} 197}
221 198
222/** Probes for a CH7017 on the given bus and slave address. */ 199/** Probes for a CH7017 on the given bus and slave address. */
223static bool ch7017_init(struct intel_dvo_device *dvo, 200static bool ch7017_init(struct intel_dvo_device *dvo,
224 struct i2c_adapter *adapter) 201 struct i2c_adapter *adapter)
225{ 202{
226 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
227 struct ch7017_priv *priv; 203 struct ch7017_priv *priv;
228 uint8_t val; 204 const char *str;
205 u8 val;
229 206
230 priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL); 207 priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
231 if (priv == NULL) 208 if (priv == NULL)
@@ -237,16 +214,27 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
237 if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) 214 if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
238 goto fail; 215 goto fail;
239 216
240 if (val != CH7017_DEVICE_ID_VALUE && 217 switch (val) {
241 val != CH7018_DEVICE_ID_VALUE && 218 case CH7017_DEVICE_ID_VALUE:
242 val != CH7019_DEVICE_ID_VALUE) { 219 str = "ch7017";
220 break;
221 case CH7018_DEVICE_ID_VALUE:
222 str = "ch7018";
223 break;
224 case CH7019_DEVICE_ID_VALUE:
225 str = "ch7019";
226 break;
227 default:
243 DRM_DEBUG_KMS("ch701x not detected, got %d: from %s " 228 DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
244 "Slave %d.\n", 229 "slave %d.\n",
245 val, i2cbus->adapter.name,dvo->slave_addr); 230 val, adapter->name,dvo->slave_addr);
246 goto fail; 231 goto fail;
247 } 232 }
248 233
234 DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
235 str, adapter->name, dvo->slave_addr);
249 return true; 236 return true;
237
250fail: 238fail:
251 kfree(priv); 239 kfree(priv);
252 return false; 240 return false;
@@ -368,7 +356,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
368 } 356 }
369 357
370 /* XXX: Should actually wait for update power status somehow */ 358 /* XXX: Should actually wait for update power status somehow */
371 udelay(20000); 359 msleep(20);
372} 360}
373 361
374static void ch7017_dump_regs(struct intel_dvo_device *dvo) 362static void ch7017_dump_regs(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 6f1944b24441..7eaa94e4ff06 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -113,7 +113,6 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
113{ 113{
114 struct ch7xxx_priv *ch7xxx= dvo->dev_priv; 114 struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
115 struct i2c_adapter *adapter = dvo->i2c_bus; 115 struct i2c_adapter *adapter = dvo->i2c_bus;
116 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
117 u8 out_buf[2]; 116 u8 out_buf[2];
118 u8 in_buf[2]; 117 u8 in_buf[2];
119 118
@@ -135,14 +134,14 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
135 out_buf[0] = addr; 134 out_buf[0] = addr;
136 out_buf[1] = 0; 135 out_buf[1] = 0;
137 136
138 if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { 137 if (i2c_transfer(adapter, msgs, 2) == 2) {
139 *ch = in_buf[0]; 138 *ch = in_buf[0];
140 return true; 139 return true;
141 }; 140 };
142 141
143 if (!ch7xxx->quiet) { 142 if (!ch7xxx->quiet) {
144 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", 143 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
145 addr, i2cbus->adapter.name, dvo->slave_addr); 144 addr, adapter->name, dvo->slave_addr);
146 } 145 }
147 return false; 146 return false;
148} 147}
@@ -152,7 +151,6 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
152{ 151{
153 struct ch7xxx_priv *ch7xxx = dvo->dev_priv; 152 struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
154 struct i2c_adapter *adapter = dvo->i2c_bus; 153 struct i2c_adapter *adapter = dvo->i2c_bus;
155 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
156 uint8_t out_buf[2]; 154 uint8_t out_buf[2];
157 struct i2c_msg msg = { 155 struct i2c_msg msg = {
158 .addr = dvo->slave_addr, 156 .addr = dvo->slave_addr,
@@ -164,12 +162,12 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
164 out_buf[0] = addr; 162 out_buf[0] = addr;
165 out_buf[1] = ch; 163 out_buf[1] = ch;
166 164
167 if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) 165 if (i2c_transfer(adapter, &msg, 1) == 1)
168 return true; 166 return true;
169 167
170 if (!ch7xxx->quiet) { 168 if (!ch7xxx->quiet) {
171 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", 169 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
172 addr, i2cbus->adapter.name, dvo->slave_addr); 170 addr, adapter->name, dvo->slave_addr);
173 } 171 }
174 172
175 return false; 173 return false;
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index a2ec3f487202..a12ed9414cc7 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -167,7 +167,6 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
167{ 167{
168 struct ivch_priv *priv = dvo->dev_priv; 168 struct ivch_priv *priv = dvo->dev_priv;
169 struct i2c_adapter *adapter = dvo->i2c_bus; 169 struct i2c_adapter *adapter = dvo->i2c_bus;
170 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
171 u8 out_buf[1]; 170 u8 out_buf[1];
172 u8 in_buf[2]; 171 u8 in_buf[2];
173 172
@@ -193,7 +192,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
193 192
194 out_buf[0] = addr; 193 out_buf[0] = addr;
195 194
196 if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) { 195 if (i2c_transfer(adapter, msgs, 3) == 3) {
197 *data = (in_buf[1] << 8) | in_buf[0]; 196 *data = (in_buf[1] << 8) | in_buf[0];
198 return true; 197 return true;
199 }; 198 };
@@ -201,7 +200,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
201 if (!priv->quiet) { 200 if (!priv->quiet) {
202 DRM_DEBUG_KMS("Unable to read register 0x%02x from " 201 DRM_DEBUG_KMS("Unable to read register 0x%02x from "
203 "%s:%02x.\n", 202 "%s:%02x.\n",
204 addr, i2cbus->adapter.name, dvo->slave_addr); 203 addr, adapter->name, dvo->slave_addr);
205 } 204 }
206 return false; 205 return false;
207} 206}
@@ -211,7 +210,6 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
211{ 210{
212 struct ivch_priv *priv = dvo->dev_priv; 211 struct ivch_priv *priv = dvo->dev_priv;
213 struct i2c_adapter *adapter = dvo->i2c_bus; 212 struct i2c_adapter *adapter = dvo->i2c_bus;
214 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
215 u8 out_buf[3]; 213 u8 out_buf[3];
216 struct i2c_msg msg = { 214 struct i2c_msg msg = {
217 .addr = dvo->slave_addr, 215 .addr = dvo->slave_addr,
@@ -224,12 +222,12 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
224 out_buf[1] = data & 0xff; 222 out_buf[1] = data & 0xff;
225 out_buf[2] = data >> 8; 223 out_buf[2] = data >> 8;
226 224
227 if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) 225 if (i2c_transfer(adapter, &msg, 1) == 1)
228 return true; 226 return true;
229 227
230 if (!priv->quiet) { 228 if (!priv->quiet) {
231 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", 229 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
232 addr, i2cbus->adapter.name, dvo->slave_addr); 230 addr, adapter->name, dvo->slave_addr);
233 } 231 }
234 232
235 return false; 233 return false;
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 9b8e6765cf26..e4b4091df942 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -69,7 +69,6 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
69{ 69{
70 struct sil164_priv *sil = dvo->dev_priv; 70 struct sil164_priv *sil = dvo->dev_priv;
71 struct i2c_adapter *adapter = dvo->i2c_bus; 71 struct i2c_adapter *adapter = dvo->i2c_bus;
72 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
73 u8 out_buf[2]; 72 u8 out_buf[2];
74 u8 in_buf[2]; 73 u8 in_buf[2];
75 74
@@ -91,14 +90,14 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
91 out_buf[0] = addr; 90 out_buf[0] = addr;
92 out_buf[1] = 0; 91 out_buf[1] = 0;
93 92
94 if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { 93 if (i2c_transfer(adapter, msgs, 2) == 2) {
95 *ch = in_buf[0]; 94 *ch = in_buf[0];
96 return true; 95 return true;
97 }; 96 };
98 97
99 if (!sil->quiet) { 98 if (!sil->quiet) {
100 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", 99 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
101 addr, i2cbus->adapter.name, dvo->slave_addr); 100 addr, adapter->name, dvo->slave_addr);
102 } 101 }
103 return false; 102 return false;
104} 103}
@@ -107,7 +106,6 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
107{ 106{
108 struct sil164_priv *sil= dvo->dev_priv; 107 struct sil164_priv *sil= dvo->dev_priv;
109 struct i2c_adapter *adapter = dvo->i2c_bus; 108 struct i2c_adapter *adapter = dvo->i2c_bus;
110 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
111 uint8_t out_buf[2]; 109 uint8_t out_buf[2];
112 struct i2c_msg msg = { 110 struct i2c_msg msg = {
113 .addr = dvo->slave_addr, 111 .addr = dvo->slave_addr,
@@ -119,12 +117,12 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
119 out_buf[0] = addr; 117 out_buf[0] = addr;
120 out_buf[1] = ch; 118 out_buf[1] = ch;
121 119
122 if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) 120 if (i2c_transfer(adapter, &msg, 1) == 1)
123 return true; 121 return true;
124 122
125 if (!sil->quiet) { 123 if (!sil->quiet) {
126 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", 124 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
127 addr, i2cbus->adapter.name, dvo->slave_addr); 125 addr, adapter->name, dvo->slave_addr);
128 } 126 }
129 127
130 return false; 128 return false;
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 56f66426207f..8ab2855bb544 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -94,7 +94,6 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
94{ 94{
95 struct tfp410_priv *tfp = dvo->dev_priv; 95 struct tfp410_priv *tfp = dvo->dev_priv;
96 struct i2c_adapter *adapter = dvo->i2c_bus; 96 struct i2c_adapter *adapter = dvo->i2c_bus;
97 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
98 u8 out_buf[2]; 97 u8 out_buf[2];
99 u8 in_buf[2]; 98 u8 in_buf[2];
100 99
@@ -116,14 +115,14 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
116 out_buf[0] = addr; 115 out_buf[0] = addr;
117 out_buf[1] = 0; 116 out_buf[1] = 0;
118 117
119 if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { 118 if (i2c_transfer(adapter, msgs, 2) == 2) {
120 *ch = in_buf[0]; 119 *ch = in_buf[0];
121 return true; 120 return true;
122 }; 121 };
123 122
124 if (!tfp->quiet) { 123 if (!tfp->quiet) {
125 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", 124 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
126 addr, i2cbus->adapter.name, dvo->slave_addr); 125 addr, adapter->name, dvo->slave_addr);
127 } 126 }
128 return false; 127 return false;
129} 128}
@@ -132,7 +131,6 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
132{ 131{
133 struct tfp410_priv *tfp = dvo->dev_priv; 132 struct tfp410_priv *tfp = dvo->dev_priv;
134 struct i2c_adapter *adapter = dvo->i2c_bus; 133 struct i2c_adapter *adapter = dvo->i2c_bus;
135 struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
136 uint8_t out_buf[2]; 134 uint8_t out_buf[2];
137 struct i2c_msg msg = { 135 struct i2c_msg msg = {
138 .addr = dvo->slave_addr, 136 .addr = dvo->slave_addr,
@@ -144,12 +142,12 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
144 out_buf[0] = addr; 142 out_buf[0] = addr;
145 out_buf[1] = ch; 143 out_buf[1] = ch;
146 144
147 if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) 145 if (i2c_transfer(adapter, &msg, 1) == 1)
148 return true; 146 return true;
149 147
150 if (!tfp->quiet) { 148 if (!tfp->quiet) {
151 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", 149 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
152 addr, i2cbus->adapter.name, dvo->slave_addr); 150 addr, adapter->name, dvo->slave_addr);
153 } 151 }
154 152
155 return false; 153 return false;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5e43d7076789..d598070fb279 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -40,9 +40,51 @@
40 40
41#if defined(CONFIG_DEBUG_FS) 41#if defined(CONFIG_DEBUG_FS)
42 42
43#define ACTIVE_LIST 1 43enum {
44#define FLUSHING_LIST 2 44 RENDER_LIST,
45#define INACTIVE_LIST 3 45 BSD_LIST,
46 FLUSHING_LIST,
47 INACTIVE_LIST,
48 PINNED_LIST,
49 DEFERRED_FREE_LIST,
50};
51
52static const char *yesno(int v)
53{
54 return v ? "yes" : "no";
55}
56
57static int i915_capabilities(struct seq_file *m, void *data)
58{
59 struct drm_info_node *node = (struct drm_info_node *) m->private;
60 struct drm_device *dev = node->minor->dev;
61 const struct intel_device_info *info = INTEL_INFO(dev);
62
63 seq_printf(m, "gen: %d\n", info->gen);
64#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
65 B(is_mobile);
66 B(is_i85x);
67 B(is_i915g);
68 B(is_i945gm);
69 B(is_g33);
70 B(need_gfx_hws);
71 B(is_g4x);
72 B(is_pineview);
73 B(is_broadwater);
74 B(is_crestline);
75 B(is_ironlake);
76 B(has_fbc);
77 B(has_rc6);
78 B(has_pipe_cxsr);
79 B(has_hotplug);
80 B(cursor_needs_physical);
81 B(has_overlay);
82 B(overlay_needs_physical);
83 B(supports_tv);
84#undef B
85
86 return 0;
87}
46 88
47static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv) 89static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
48{ 90{
@@ -64,6 +106,27 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
64 } 106 }
65} 107}
66 108
109static void
110describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
111{
112 seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
113 &obj->base,
114 get_pin_flag(obj),
115 get_tiling_flag(obj),
116 obj->base.size,
117 obj->base.read_domains,
118 obj->base.write_domain,
119 obj->last_rendering_seqno,
120 obj->dirty ? " dirty" : "",
121 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
122 if (obj->base.name)
123 seq_printf(m, " (name: %d)", obj->base.name);
124 if (obj->fence_reg != I915_FENCE_REG_NONE)
125 seq_printf(m, " (fence: %d)", obj->fence_reg);
126 if (obj->gtt_space != NULL)
127 seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
128}
129
67static int i915_gem_object_list_info(struct seq_file *m, void *data) 130static int i915_gem_object_list_info(struct seq_file *m, void *data)
68{ 131{
69 struct drm_info_node *node = (struct drm_info_node *) m->private; 132 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -72,56 +135,84 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
72 struct drm_device *dev = node->minor->dev; 135 struct drm_device *dev = node->minor->dev;
73 drm_i915_private_t *dev_priv = dev->dev_private; 136 drm_i915_private_t *dev_priv = dev->dev_private;
74 struct drm_i915_gem_object *obj_priv; 137 struct drm_i915_gem_object *obj_priv;
75 spinlock_t *lock = NULL; 138 size_t total_obj_size, total_gtt_size;
139 int count, ret;
140
141 ret = mutex_lock_interruptible(&dev->struct_mutex);
142 if (ret)
143 return ret;
76 144
77 switch (list) { 145 switch (list) {
78 case ACTIVE_LIST: 146 case RENDER_LIST:
79 seq_printf(m, "Active:\n"); 147 seq_printf(m, "Render:\n");
80 lock = &dev_priv->mm.active_list_lock;
81 head = &dev_priv->render_ring.active_list; 148 head = &dev_priv->render_ring.active_list;
82 break; 149 break;
150 case BSD_LIST:
151 seq_printf(m, "BSD:\n");
152 head = &dev_priv->bsd_ring.active_list;
153 break;
83 case INACTIVE_LIST: 154 case INACTIVE_LIST:
84 seq_printf(m, "Inactive:\n"); 155 seq_printf(m, "Inactive:\n");
85 head = &dev_priv->mm.inactive_list; 156 head = &dev_priv->mm.inactive_list;
86 break; 157 break;
158 case PINNED_LIST:
159 seq_printf(m, "Pinned:\n");
160 head = &dev_priv->mm.pinned_list;
161 break;
87 case FLUSHING_LIST: 162 case FLUSHING_LIST:
88 seq_printf(m, "Flushing:\n"); 163 seq_printf(m, "Flushing:\n");
89 head = &dev_priv->mm.flushing_list; 164 head = &dev_priv->mm.flushing_list;
90 break; 165 break;
166 case DEFERRED_FREE_LIST:
167 seq_printf(m, "Deferred free:\n");
168 head = &dev_priv->mm.deferred_free_list;
169 break;
91 default: 170 default:
92 DRM_INFO("Ooops, unexpected list\n"); 171 mutex_unlock(&dev->struct_mutex);
93 return 0; 172 return -EINVAL;
94 } 173 }
95 174
96 if (lock) 175 total_obj_size = total_gtt_size = count = 0;
97 spin_lock(lock); 176 list_for_each_entry(obj_priv, head, list) {
98 list_for_each_entry(obj_priv, head, list) 177 seq_printf(m, " ");
99 { 178 describe_obj(m, obj_priv);
100 seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
101 &obj_priv->base,
102 get_pin_flag(obj_priv),
103 obj_priv->base.size,
104 obj_priv->base.read_domains,
105 obj_priv->base.write_domain,
106 obj_priv->last_rendering_seqno,
107 obj_priv->dirty ? " dirty" : "",
108 obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
109
110 if (obj_priv->base.name)
111 seq_printf(m, " (name: %d)", obj_priv->base.name);
112 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
113 seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
114 if (obj_priv->gtt_space != NULL)
115 seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
116
117 seq_printf(m, "\n"); 179 seq_printf(m, "\n");
180 total_obj_size += obj_priv->base.size;
181 total_gtt_size += obj_priv->gtt_space->size;
182 count++;
118 } 183 }
184 mutex_unlock(&dev->struct_mutex);
119 185
120 if (lock) 186 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
121 spin_unlock(lock); 187 count, total_obj_size, total_gtt_size);
122 return 0; 188 return 0;
123} 189}
124 190
191static int i915_gem_object_info(struct seq_file *m, void* data)
192{
193 struct drm_info_node *node = (struct drm_info_node *) m->private;
194 struct drm_device *dev = node->minor->dev;
195 struct drm_i915_private *dev_priv = dev->dev_private;
196 int ret;
197
198 ret = mutex_lock_interruptible(&dev->struct_mutex);
199 if (ret)
200 return ret;
201
202 seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
203 seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
204 seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
205 seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
206 seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
207 seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
208 seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
209
210 mutex_unlock(&dev->struct_mutex);
211
212 return 0;
213}
214
215
125static int i915_gem_pageflip_info(struct seq_file *m, void *data) 216static int i915_gem_pageflip_info(struct seq_file *m, void *data)
126{ 217{
127 struct drm_info_node *node = (struct drm_info_node *) m->private; 218 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -176,6 +267,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
176 struct drm_device *dev = node->minor->dev; 267 struct drm_device *dev = node->minor->dev;
177 drm_i915_private_t *dev_priv = dev->dev_private; 268 drm_i915_private_t *dev_priv = dev->dev_private;
178 struct drm_i915_gem_request *gem_request; 269 struct drm_i915_gem_request *gem_request;
270 int ret;
271
272 ret = mutex_lock_interruptible(&dev->struct_mutex);
273 if (ret)
274 return ret;
179 275
180 seq_printf(m, "Request:\n"); 276 seq_printf(m, "Request:\n");
181 list_for_each_entry(gem_request, &dev_priv->render_ring.request_list, 277 list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
@@ -184,6 +280,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
184 gem_request->seqno, 280 gem_request->seqno,
185 (int) (jiffies - gem_request->emitted_jiffies)); 281 (int) (jiffies - gem_request->emitted_jiffies));
186 } 282 }
283 mutex_unlock(&dev->struct_mutex);
284
187 return 0; 285 return 0;
188} 286}
189 287
@@ -192,16 +290,24 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
192 struct drm_info_node *node = (struct drm_info_node *) m->private; 290 struct drm_info_node *node = (struct drm_info_node *) m->private;
193 struct drm_device *dev = node->minor->dev; 291 struct drm_device *dev = node->minor->dev;
194 drm_i915_private_t *dev_priv = dev->dev_private; 292 drm_i915_private_t *dev_priv = dev->dev_private;
293 int ret;
294
295 ret = mutex_lock_interruptible(&dev->struct_mutex);
296 if (ret)
297 return ret;
195 298
196 if (dev_priv->render_ring.status_page.page_addr != NULL) { 299 if (dev_priv->render_ring.status_page.page_addr != NULL) {
197 seq_printf(m, "Current sequence: %d\n", 300 seq_printf(m, "Current sequence: %d\n",
198 i915_get_gem_seqno(dev, &dev_priv->render_ring)); 301 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
199 } else { 302 } else {
200 seq_printf(m, "Current sequence: hws uninitialized\n"); 303 seq_printf(m, "Current sequence: hws uninitialized\n");
201 } 304 }
202 seq_printf(m, "Waiter sequence: %d\n", 305 seq_printf(m, "Waiter sequence: %d\n",
203 dev_priv->mm.waiting_gem_seqno); 306 dev_priv->mm.waiting_gem_seqno);
204 seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); 307 seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
308
309 mutex_unlock(&dev->struct_mutex);
310
205 return 0; 311 return 0;
206} 312}
207 313
@@ -211,6 +317,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
211 struct drm_info_node *node = (struct drm_info_node *) m->private; 317 struct drm_info_node *node = (struct drm_info_node *) m->private;
212 struct drm_device *dev = node->minor->dev; 318 struct drm_device *dev = node->minor->dev;
213 drm_i915_private_t *dev_priv = dev->dev_private; 319 drm_i915_private_t *dev_priv = dev->dev_private;
320 int ret;
321
322 ret = mutex_lock_interruptible(&dev->struct_mutex);
323 if (ret)
324 return ret;
214 325
215 if (!HAS_PCH_SPLIT(dev)) { 326 if (!HAS_PCH_SPLIT(dev)) {
216 seq_printf(m, "Interrupt enable: %08x\n", 327 seq_printf(m, "Interrupt enable: %08x\n",
@@ -247,7 +358,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
247 atomic_read(&dev_priv->irq_received)); 358 atomic_read(&dev_priv->irq_received));
248 if (dev_priv->render_ring.status_page.page_addr != NULL) { 359 if (dev_priv->render_ring.status_page.page_addr != NULL) {
249 seq_printf(m, "Current sequence: %d\n", 360 seq_printf(m, "Current sequence: %d\n",
250 i915_get_gem_seqno(dev, &dev_priv->render_ring)); 361 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
251 } else { 362 } else {
252 seq_printf(m, "Current sequence: hws uninitialized\n"); 363 seq_printf(m, "Current sequence: hws uninitialized\n");
253 } 364 }
@@ -255,6 +366,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
255 dev_priv->mm.waiting_gem_seqno); 366 dev_priv->mm.waiting_gem_seqno);
256 seq_printf(m, "IRQ sequence: %d\n", 367 seq_printf(m, "IRQ sequence: %d\n",
257 dev_priv->mm.irq_gem_seqno); 368 dev_priv->mm.irq_gem_seqno);
369 mutex_unlock(&dev->struct_mutex);
370
258 return 0; 371 return 0;
259} 372}
260 373
@@ -263,7 +376,11 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
263 struct drm_info_node *node = (struct drm_info_node *) m->private; 376 struct drm_info_node *node = (struct drm_info_node *) m->private;
264 struct drm_device *dev = node->minor->dev; 377 struct drm_device *dev = node->minor->dev;
265 drm_i915_private_t *dev_priv = dev->dev_private; 378 drm_i915_private_t *dev_priv = dev->dev_private;
266 int i; 379 int i, ret;
380
381 ret = mutex_lock_interruptible(&dev->struct_mutex);
382 if (ret)
383 return ret;
267 384
268 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 385 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
269 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 386 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
@@ -289,6 +406,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
289 seq_printf(m, "\n"); 406 seq_printf(m, "\n");
290 } 407 }
291 } 408 }
409 mutex_unlock(&dev->struct_mutex);
292 410
293 return 0; 411 return 0;
294} 412}
@@ -313,16 +431,19 @@ static int i915_hws_info(struct seq_file *m, void *data)
313 return 0; 431 return 0;
314} 432}
315 433
316static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count) 434static void i915_dump_object(struct seq_file *m,
435 struct io_mapping *mapping,
436 struct drm_i915_gem_object *obj_priv)
317{ 437{
318 int page, i; 438 int page, page_count, i;
319 uint32_t *mem;
320 439
440 page_count = obj_priv->base.size / PAGE_SIZE;
321 for (page = 0; page < page_count; page++) { 441 for (page = 0; page < page_count; page++) {
322 mem = kmap_atomic(pages[page], KM_USER0); 442 u32 *mem = io_mapping_map_wc(mapping,
443 obj_priv->gtt_offset + page * PAGE_SIZE);
323 for (i = 0; i < PAGE_SIZE; i += 4) 444 for (i = 0; i < PAGE_SIZE; i += 4)
324 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 445 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
325 kunmap_atomic(mem, KM_USER0); 446 io_mapping_unmap(mem);
326 } 447 }
327} 448}
328 449
@@ -335,27 +456,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
335 struct drm_i915_gem_object *obj_priv; 456 struct drm_i915_gem_object *obj_priv;
336 int ret; 457 int ret;
337 458
338 spin_lock(&dev_priv->mm.active_list_lock); 459 ret = mutex_lock_interruptible(&dev->struct_mutex);
460 if (ret)
461 return ret;
339 462
340 list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, 463 list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
341 list) { 464 list) {
342 obj = &obj_priv->base; 465 obj = &obj_priv->base;
343 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 466 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
344 ret = i915_gem_object_get_pages(obj, 0); 467 seq_printf(m, "--- gtt_offset = 0x%08x\n",
345 if (ret) { 468 obj_priv->gtt_offset);
346 DRM_ERROR("Failed to get pages: %d\n", ret); 469 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
347 spin_unlock(&dev_priv->mm.active_list_lock);
348 return ret;
349 }
350
351 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
352 i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
353
354 i915_gem_object_put_pages(obj);
355 } 470 }
356 } 471 }
357 472
358 spin_unlock(&dev_priv->mm.active_list_lock); 473 mutex_unlock(&dev->struct_mutex);
359 474
360 return 0; 475 return 0;
361} 476}
@@ -365,20 +480,24 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
365 struct drm_info_node *node = (struct drm_info_node *) m->private; 480 struct drm_info_node *node = (struct drm_info_node *) m->private;
366 struct drm_device *dev = node->minor->dev; 481 struct drm_device *dev = node->minor->dev;
367 drm_i915_private_t *dev_priv = dev->dev_private; 482 drm_i915_private_t *dev_priv = dev->dev_private;
368 u8 *virt; 483 int ret;
369 uint32_t *ptr, off; 484
485 ret = mutex_lock_interruptible(&dev->struct_mutex);
486 if (ret)
487 return ret;
370 488
371 if (!dev_priv->render_ring.gem_object) { 489 if (!dev_priv->render_ring.gem_object) {
372 seq_printf(m, "No ringbuffer setup\n"); 490 seq_printf(m, "No ringbuffer setup\n");
373 return 0; 491 } else {
374 } 492 u8 *virt = dev_priv->render_ring.virtual_start;
375 493 uint32_t off;
376 virt = dev_priv->render_ring.virtual_start;
377 494
378 for (off = 0; off < dev_priv->render_ring.size; off += 4) { 495 for (off = 0; off < dev_priv->render_ring.size; off += 4) {
379 ptr = (uint32_t *)(virt + off); 496 uint32_t *ptr = (uint32_t *)(virt + off);
380 seq_printf(m, "%08x : %08x\n", off, *ptr); 497 seq_printf(m, "%08x : %08x\n", off, *ptr);
498 }
381 } 499 }
500 mutex_unlock(&dev->struct_mutex);
382 501
383 return 0; 502 return 0;
384} 503}
@@ -396,7 +515,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
396 seq_printf(m, "RingHead : %08x\n", head); 515 seq_printf(m, "RingHead : %08x\n", head);
397 seq_printf(m, "RingTail : %08x\n", tail); 516 seq_printf(m, "RingTail : %08x\n", tail);
398 seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size); 517 seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
399 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); 518 seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
400 519
401 return 0; 520 return 0;
402} 521}
@@ -458,7 +577,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
458 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); 577 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
459 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); 578 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
460 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); 579 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
461 if (IS_I965G(dev)) { 580 if (INTEL_INFO(dev)->gen >= 4) {
462 seq_printf(m, " INSTPS: 0x%08x\n", error->instps); 581 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
463 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 582 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
464 } 583 }
@@ -642,6 +761,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
642 } else { 761 } else {
643 seq_printf(m, "FBC disabled: "); 762 seq_printf(m, "FBC disabled: ");
644 switch (dev_priv->no_fbc_reason) { 763 switch (dev_priv->no_fbc_reason) {
764 case FBC_NO_OUTPUT:
765 seq_printf(m, "no outputs");
766 break;
645 case FBC_STOLEN_TOO_SMALL: 767 case FBC_STOLEN_TOO_SMALL:
646 seq_printf(m, "not enough stolen memory"); 768 seq_printf(m, "not enough stolen memory");
647 break; 769 break;
@@ -675,15 +797,17 @@ static int i915_sr_status(struct seq_file *m, void *unused)
675 drm_i915_private_t *dev_priv = dev->dev_private; 797 drm_i915_private_t *dev_priv = dev->dev_private;
676 bool sr_enabled = false; 798 bool sr_enabled = false;
677 799
678 if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev)) 800 if (IS_IRONLAKE(dev))
801 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
802 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
679 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 803 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
680 else if (IS_I915GM(dev)) 804 else if (IS_I915GM(dev))
681 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 805 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
682 else if (IS_PINEVIEW(dev)) 806 else if (IS_PINEVIEW(dev))
683 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 807 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
684 808
685 seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" : 809 seq_printf(m, "self-refresh: %s\n",
686 "disabled"); 810 sr_enabled ? "enabled" : "disabled");
687 811
688 return 0; 812 return 0;
689} 813}
@@ -694,10 +818,16 @@ static int i915_emon_status(struct seq_file *m, void *unused)
694 struct drm_device *dev = node->minor->dev; 818 struct drm_device *dev = node->minor->dev;
695 drm_i915_private_t *dev_priv = dev->dev_private; 819 drm_i915_private_t *dev_priv = dev->dev_private;
696 unsigned long temp, chipset, gfx; 820 unsigned long temp, chipset, gfx;
821 int ret;
822
823 ret = mutex_lock_interruptible(&dev->struct_mutex);
824 if (ret)
825 return ret;
697 826
698 temp = i915_mch_val(dev_priv); 827 temp = i915_mch_val(dev_priv);
699 chipset = i915_chipset_val(dev_priv); 828 chipset = i915_chipset_val(dev_priv);
700 gfx = i915_gfx_val(dev_priv); 829 gfx = i915_gfx_val(dev_priv);
830 mutex_unlock(&dev->struct_mutex);
701 831
702 seq_printf(m, "GMCH temp: %ld\n", temp); 832 seq_printf(m, "GMCH temp: %ld\n", temp);
703 seq_printf(m, "Chipset power: %ld\n", chipset); 833 seq_printf(m, "Chipset power: %ld\n", chipset);
@@ -718,6 +848,68 @@ static int i915_gfxec(struct seq_file *m, void *unused)
718 return 0; 848 return 0;
719} 849}
720 850
851static int i915_opregion(struct seq_file *m, void *unused)
852{
853 struct drm_info_node *node = (struct drm_info_node *) m->private;
854 struct drm_device *dev = node->minor->dev;
855 drm_i915_private_t *dev_priv = dev->dev_private;
856 struct intel_opregion *opregion = &dev_priv->opregion;
857 int ret;
858
859 ret = mutex_lock_interruptible(&dev->struct_mutex);
860 if (ret)
861 return ret;
862
863 if (opregion->header)
864 seq_write(m, opregion->header, OPREGION_SIZE);
865
866 mutex_unlock(&dev->struct_mutex);
867
868 return 0;
869}
870
871static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
872{
873 struct drm_info_node *node = (struct drm_info_node *) m->private;
874 struct drm_device *dev = node->minor->dev;
875 drm_i915_private_t *dev_priv = dev->dev_private;
876 struct intel_fbdev *ifbdev;
877 struct intel_framebuffer *fb;
878 int ret;
879
880 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
881 if (ret)
882 return ret;
883
884 ifbdev = dev_priv->fbdev;
885 fb = to_intel_framebuffer(ifbdev->helper.fb);
886
887 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
888 fb->base.width,
889 fb->base.height,
890 fb->base.depth,
891 fb->base.bits_per_pixel);
892 describe_obj(m, to_intel_bo(fb->obj));
893 seq_printf(m, "\n");
894
895 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
896 if (&fb->base == ifbdev->helper.fb)
897 continue;
898
899 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
900 fb->base.width,
901 fb->base.height,
902 fb->base.depth,
903 fb->base.bits_per_pixel);
904 describe_obj(m, to_intel_bo(fb->obj));
905 seq_printf(m, "\n");
906 }
907
908 mutex_unlock(&dev->mode_config.mutex);
909
910 return 0;
911}
912
721static int 913static int
722i915_wedged_open(struct inode *inode, 914i915_wedged_open(struct inode *inode,
723 struct file *filp) 915 struct file *filp)
@@ -741,6 +933,9 @@ i915_wedged_read(struct file *filp,
741 "wedged : %d\n", 933 "wedged : %d\n",
742 atomic_read(&dev_priv->mm.wedged)); 934 atomic_read(&dev_priv->mm.wedged));
743 935
936 if (len > sizeof (buf))
937 len = sizeof (buf);
938
744 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 939 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
745} 940}
746 941
@@ -770,7 +965,7 @@ i915_wedged_write(struct file *filp,
770 965
771 atomic_set(&dev_priv->mm.wedged, val); 966 atomic_set(&dev_priv->mm.wedged, val);
772 if (val) { 967 if (val) {
773 DRM_WAKEUP(&dev_priv->irq_queue); 968 wake_up_all(&dev_priv->irq_queue);
774 queue_work(dev_priv->wq, &dev_priv->error_work); 969 queue_work(dev_priv->wq, &dev_priv->error_work);
775 } 970 }
776 971
@@ -823,9 +1018,14 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
823} 1018}
824 1019
825static struct drm_info_list i915_debugfs_list[] = { 1020static struct drm_info_list i915_debugfs_list[] = {
826 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 1021 {"i915_capabilities", i915_capabilities, 0, 0},
1022 {"i915_gem_objects", i915_gem_object_info, 0},
1023 {"i915_gem_render_active", i915_gem_object_list_info, 0, (void *) RENDER_LIST},
1024 {"i915_gem_bsd_active", i915_gem_object_list_info, 0, (void *) BSD_LIST},
827 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 1025 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
828 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 1026 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
1027 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
1028 {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
829 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 1029 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
830 {"i915_gem_request", i915_gem_request_info, 0}, 1030 {"i915_gem_request", i915_gem_request_info, 0},
831 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 1031 {"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -845,6 +1045,8 @@ static struct drm_info_list i915_debugfs_list[] = {
845 {"i915_gfxec", i915_gfxec, 0}, 1045 {"i915_gfxec", i915_gfxec, 0},
846 {"i915_fbc_status", i915_fbc_status, 0}, 1046 {"i915_fbc_status", i915_fbc_status, 0},
847 {"i915_sr_status", i915_sr_status, 0}, 1047 {"i915_sr_status", i915_sr_status, 0},
1048 {"i915_opregion", i915_opregion, 0},
1049 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
848}; 1050};
849#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 1051#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
850 1052
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9d67b4853030..726c3736082f 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -40,8 +40,7 @@
40#include <linux/pnp.h> 40#include <linux/pnp.h>
41#include <linux/vga_switcheroo.h> 41#include <linux/vga_switcheroo.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43 43#include <acpi/video.h>
44extern int intel_max_stolen; /* from AGP driver */
45 44
46/** 45/**
47 * Sets up the hardware status page for devices that need a physical address 46 * Sets up the hardware status page for devices that need a physical address
@@ -64,7 +63,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
64 63
65 memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); 64 memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
66 65
67 if (IS_I965G(dev)) 66 if (INTEL_INFO(dev)->gen >= 4)
68 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & 67 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
69 0xf0; 68 0xf0;
70 69
@@ -222,7 +221,7 @@ static int i915_dma_resume(struct drm_device * dev)
222 DRM_DEBUG_DRIVER("hw status page @ %p\n", 221 DRM_DEBUG_DRIVER("hw status page @ %p\n",
223 ring->status_page.page_addr); 222 ring->status_page.page_addr);
224 if (ring->status_page.gfx_addr != 0) 223 if (ring->status_page.gfx_addr != 0)
225 ring->setup_status_page(dev, ring); 224 intel_ring_setup_status_page(dev, ring);
226 else 225 else
227 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 226 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
228 227
@@ -377,7 +376,7 @@ i915_emit_box(struct drm_device *dev,
377 return -EINVAL; 376 return -EINVAL;
378 } 377 }
379 378
380 if (IS_I965G(dev)) { 379 if (INTEL_INFO(dev)->gen >= 4) {
381 BEGIN_LP_RING(4); 380 BEGIN_LP_RING(4);
382 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 381 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
383 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 382 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
@@ -481,7 +480,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
481 480
482 if (!IS_I830(dev) && !IS_845G(dev)) { 481 if (!IS_I830(dev) && !IS_845G(dev)) {
483 BEGIN_LP_RING(2); 482 BEGIN_LP_RING(2);
484 if (IS_I965G(dev)) { 483 if (INTEL_INFO(dev)->gen >= 4) {
485 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); 484 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
486 OUT_RING(batch->start); 485 OUT_RING(batch->start);
487 } else { 486 } else {
@@ -888,12 +887,12 @@ static int
888intel_alloc_mchbar_resource(struct drm_device *dev) 887intel_alloc_mchbar_resource(struct drm_device *dev)
889{ 888{
890 drm_i915_private_t *dev_priv = dev->dev_private; 889 drm_i915_private_t *dev_priv = dev->dev_private;
891 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 890 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
892 u32 temp_lo, temp_hi = 0; 891 u32 temp_lo, temp_hi = 0;
893 u64 mchbar_addr; 892 u64 mchbar_addr;
894 int ret; 893 int ret;
895 894
896 if (IS_I965G(dev)) 895 if (INTEL_INFO(dev)->gen >= 4)
897 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 896 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
898 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); 897 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
899 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 898 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
@@ -920,7 +919,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
920 return ret; 919 return ret;
921 } 920 }
922 921
923 if (IS_I965G(dev)) 922 if (INTEL_INFO(dev)->gen >= 4)
924 pci_write_config_dword(dev_priv->bridge_dev, reg + 4, 923 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
925 upper_32_bits(dev_priv->mch_res.start)); 924 upper_32_bits(dev_priv->mch_res.start));
926 925
@@ -934,7 +933,7 @@ static void
934intel_setup_mchbar(struct drm_device *dev) 933intel_setup_mchbar(struct drm_device *dev)
935{ 934{
936 drm_i915_private_t *dev_priv = dev->dev_private; 935 drm_i915_private_t *dev_priv = dev->dev_private;
937 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 936 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
938 u32 temp; 937 u32 temp;
939 bool enabled; 938 bool enabled;
940 939
@@ -971,7 +970,7 @@ static void
971intel_teardown_mchbar(struct drm_device *dev) 970intel_teardown_mchbar(struct drm_device *dev)
972{ 971{
973 drm_i915_private_t *dev_priv = dev->dev_private; 972 drm_i915_private_t *dev_priv = dev->dev_private;
974 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 973 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
975 u32 temp; 974 u32 temp;
976 975
977 if (dev_priv->mchbar_need_disable) { 976 if (dev_priv->mchbar_need_disable) {
@@ -990,174 +989,6 @@ intel_teardown_mchbar(struct drm_device *dev)
990 release_resource(&dev_priv->mch_res); 989 release_resource(&dev_priv->mch_res);
991} 990}
992 991
993/**
994 * i915_probe_agp - get AGP bootup configuration
995 * @pdev: PCI device
996 * @aperture_size: returns AGP aperture configured size
997 * @preallocated_size: returns size of BIOS preallocated AGP space
998 *
999 * Since Intel integrated graphics are UMA, the BIOS has to set aside
1000 * some RAM for the framebuffer at early boot. This code figures out
1001 * how much was set aside so we can use it for our own purposes.
1002 */
1003static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
1004 uint32_t *preallocated_size,
1005 uint32_t *start)
1006{
1007 struct drm_i915_private *dev_priv = dev->dev_private;
1008 u16 tmp = 0;
1009 unsigned long overhead;
1010 unsigned long stolen;
1011
1012 /* Get the fb aperture size and "stolen" memory amount. */
1013 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp);
1014
1015 *aperture_size = 1024 * 1024;
1016 *preallocated_size = 1024 * 1024;
1017
1018 switch (dev->pdev->device) {
1019 case PCI_DEVICE_ID_INTEL_82830_CGC:
1020 case PCI_DEVICE_ID_INTEL_82845G_IG:
1021 case PCI_DEVICE_ID_INTEL_82855GM_IG:
1022 case PCI_DEVICE_ID_INTEL_82865_IG:
1023 if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
1024 *aperture_size *= 64;
1025 else
1026 *aperture_size *= 128;
1027 break;
1028 default:
1029 /* 9xx supports large sizes, just look at the length */
1030 *aperture_size = pci_resource_len(dev->pdev, 2);
1031 break;
1032 }
1033
1034 /*
1035 * Some of the preallocated space is taken by the GTT
1036 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
1037 */
1038 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
1039 overhead = 4096;
1040 else
1041 overhead = (*aperture_size / 1024) + 4096;
1042
1043 if (IS_GEN6(dev)) {
1044 /* SNB has memory control reg at 0x50.w */
1045 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
1046
1047 switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
1048 case INTEL_855_GMCH_GMS_DISABLED:
1049 DRM_ERROR("video memory is disabled\n");
1050 return -1;
1051 case SNB_GMCH_GMS_STOLEN_32M:
1052 stolen = 32 * 1024 * 1024;
1053 break;
1054 case SNB_GMCH_GMS_STOLEN_64M:
1055 stolen = 64 * 1024 * 1024;
1056 break;
1057 case SNB_GMCH_GMS_STOLEN_96M:
1058 stolen = 96 * 1024 * 1024;
1059 break;
1060 case SNB_GMCH_GMS_STOLEN_128M:
1061 stolen = 128 * 1024 * 1024;
1062 break;
1063 case SNB_GMCH_GMS_STOLEN_160M:
1064 stolen = 160 * 1024 * 1024;
1065 break;
1066 case SNB_GMCH_GMS_STOLEN_192M:
1067 stolen = 192 * 1024 * 1024;
1068 break;
1069 case SNB_GMCH_GMS_STOLEN_224M:
1070 stolen = 224 * 1024 * 1024;
1071 break;
1072 case SNB_GMCH_GMS_STOLEN_256M:
1073 stolen = 256 * 1024 * 1024;
1074 break;
1075 case SNB_GMCH_GMS_STOLEN_288M:
1076 stolen = 288 * 1024 * 1024;
1077 break;
1078 case SNB_GMCH_GMS_STOLEN_320M:
1079 stolen = 320 * 1024 * 1024;
1080 break;
1081 case SNB_GMCH_GMS_STOLEN_352M:
1082 stolen = 352 * 1024 * 1024;
1083 break;
1084 case SNB_GMCH_GMS_STOLEN_384M:
1085 stolen = 384 * 1024 * 1024;
1086 break;
1087 case SNB_GMCH_GMS_STOLEN_416M:
1088 stolen = 416 * 1024 * 1024;
1089 break;
1090 case SNB_GMCH_GMS_STOLEN_448M:
1091 stolen = 448 * 1024 * 1024;
1092 break;
1093 case SNB_GMCH_GMS_STOLEN_480M:
1094 stolen = 480 * 1024 * 1024;
1095 break;
1096 case SNB_GMCH_GMS_STOLEN_512M:
1097 stolen = 512 * 1024 * 1024;
1098 break;
1099 default:
1100 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1101 tmp & SNB_GMCH_GMS_STOLEN_MASK);
1102 return -1;
1103 }
1104 } else {
1105 switch (tmp & INTEL_GMCH_GMS_MASK) {
1106 case INTEL_855_GMCH_GMS_DISABLED:
1107 DRM_ERROR("video memory is disabled\n");
1108 return -1;
1109 case INTEL_855_GMCH_GMS_STOLEN_1M:
1110 stolen = 1 * 1024 * 1024;
1111 break;
1112 case INTEL_855_GMCH_GMS_STOLEN_4M:
1113 stolen = 4 * 1024 * 1024;
1114 break;
1115 case INTEL_855_GMCH_GMS_STOLEN_8M:
1116 stolen = 8 * 1024 * 1024;
1117 break;
1118 case INTEL_855_GMCH_GMS_STOLEN_16M:
1119 stolen = 16 * 1024 * 1024;
1120 break;
1121 case INTEL_855_GMCH_GMS_STOLEN_32M:
1122 stolen = 32 * 1024 * 1024;
1123 break;
1124 case INTEL_915G_GMCH_GMS_STOLEN_48M:
1125 stolen = 48 * 1024 * 1024;
1126 break;
1127 case INTEL_915G_GMCH_GMS_STOLEN_64M:
1128 stolen = 64 * 1024 * 1024;
1129 break;
1130 case INTEL_GMCH_GMS_STOLEN_128M:
1131 stolen = 128 * 1024 * 1024;
1132 break;
1133 case INTEL_GMCH_GMS_STOLEN_256M:
1134 stolen = 256 * 1024 * 1024;
1135 break;
1136 case INTEL_GMCH_GMS_STOLEN_96M:
1137 stolen = 96 * 1024 * 1024;
1138 break;
1139 case INTEL_GMCH_GMS_STOLEN_160M:
1140 stolen = 160 * 1024 * 1024;
1141 break;
1142 case INTEL_GMCH_GMS_STOLEN_224M:
1143 stolen = 224 * 1024 * 1024;
1144 break;
1145 case INTEL_GMCH_GMS_STOLEN_352M:
1146 stolen = 352 * 1024 * 1024;
1147 break;
1148 default:
1149 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1150 tmp & INTEL_GMCH_GMS_MASK);
1151 return -1;
1152 }
1153 }
1154
1155 *preallocated_size = stolen - overhead;
1156 *start = overhead;
1157
1158 return 0;
1159}
1160
1161#define PTE_ADDRESS_MASK 0xfffff000 992#define PTE_ADDRESS_MASK 0xfffff000
1162#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ 993#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
1163#define PTE_MAPPING_TYPE_UNCACHED (0 << 1) 994#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
@@ -1181,11 +1012,11 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1181{ 1012{
1182 unsigned long *gtt; 1013 unsigned long *gtt;
1183 unsigned long entry, phys; 1014 unsigned long entry, phys;
1184 int gtt_bar = IS_I9XX(dev) ? 0 : 1; 1015 int gtt_bar = IS_GEN2(dev) ? 1 : 0;
1185 int gtt_offset, gtt_size; 1016 int gtt_offset, gtt_size;
1186 1017
1187 if (IS_I965G(dev)) { 1018 if (INTEL_INFO(dev)->gen >= 4) {
1188 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { 1019 if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) {
1189 gtt_offset = 2*1024*1024; 1020 gtt_offset = 2*1024*1024;
1190 gtt_size = 2*1024*1024; 1021 gtt_size = 2*1024*1024;
1191 } else { 1022 } else {
@@ -1210,10 +1041,8 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1210 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); 1041 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
1211 1042
1212 /* Mask out these reserved bits on this hardware. */ 1043 /* Mask out these reserved bits on this hardware. */
1213 if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || 1044 if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
1214 IS_I945G(dev) || IS_I945GM(dev)) {
1215 entry &= ~PTE_ADDRESS_MASK_HIGH; 1045 entry &= ~PTE_ADDRESS_MASK_HIGH;
1216 }
1217 1046
1218 /* If it's not a mapping type we know, then bail. */ 1047 /* If it's not a mapping type we know, then bail. */
1219 if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && 1048 if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
@@ -1252,7 +1081,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1252 unsigned long ll_base = 0; 1081 unsigned long ll_base = 0;
1253 1082
1254 /* Leave 1M for line length buffer & misc. */ 1083 /* Leave 1M for line length buffer & misc. */
1255 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); 1084 compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0);
1256 if (!compressed_fb) { 1085 if (!compressed_fb) {
1257 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 1086 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1258 i915_warn_stolen(dev); 1087 i915_warn_stolen(dev);
@@ -1273,7 +1102,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1273 } 1102 }
1274 1103
1275 if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { 1104 if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
1276 compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, 1105 compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096,
1277 4096, 0); 1106 4096, 0);
1278 if (!compressed_llb) { 1107 if (!compressed_llb) {
1279 i915_warn_stolen(dev); 1108 i915_warn_stolen(dev);
@@ -1343,10 +1172,8 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
1343 /* i915 resume handler doesn't set to D0 */ 1172 /* i915 resume handler doesn't set to D0 */
1344 pci_set_power_state(dev->pdev, PCI_D0); 1173 pci_set_power_state(dev->pdev, PCI_D0);
1345 i915_resume(dev); 1174 i915_resume(dev);
1346 drm_kms_helper_poll_enable(dev);
1347 } else { 1175 } else {
1348 printk(KERN_ERR "i915: switched off\n"); 1176 printk(KERN_ERR "i915: switched off\n");
1349 drm_kms_helper_poll_disable(dev);
1350 i915_suspend(dev, pmm); 1177 i915_suspend(dev, pmm);
1351 } 1178 }
1352} 1179}
@@ -1363,20 +1190,14 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1363} 1190}
1364 1191
1365static int i915_load_modeset_init(struct drm_device *dev, 1192static int i915_load_modeset_init(struct drm_device *dev,
1366 unsigned long prealloc_start,
1367 unsigned long prealloc_size, 1193 unsigned long prealloc_size,
1368 unsigned long agp_size) 1194 unsigned long agp_size)
1369{ 1195{
1370 struct drm_i915_private *dev_priv = dev->dev_private; 1196 struct drm_i915_private *dev_priv = dev->dev_private;
1371 int fb_bar = IS_I9XX(dev) ? 2 : 0;
1372 int ret = 0; 1197 int ret = 0;
1373 1198
1374 dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) & 1199 /* Basic memrange allocator for stolen space (aka mm.vram) */
1375 0xff000000; 1200 drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
1376
1377 /* Basic memrange allocator for stolen space (aka vram) */
1378 drm_mm_init(&dev_priv->vram, 0, prealloc_size);
1379 DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
1380 1201
1381 /* We're off and running w/KMS */ 1202 /* We're off and running w/KMS */
1382 dev_priv->mm.suspended = 0; 1203 dev_priv->mm.suspended = 0;
@@ -1443,12 +1264,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
1443 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1264 /* FIXME: do pre/post-mode set stuff in core KMS code */
1444 dev->vblank_disable_allowed = 1; 1265 dev->vblank_disable_allowed = 1;
1445 1266
1446 /*
1447 * Initialize the hardware status page IRQ location.
1448 */
1449
1450 I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
1451
1452 ret = intel_fbdev_init(dev); 1267 ret = intel_fbdev_init(dev);
1453 if (ret) 1268 if (ret)
1454 goto cleanup_irq; 1269 goto cleanup_irq;
@@ -1787,9 +1602,9 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1787 } 1602 }
1788 } 1603 }
1789 1604
1790 div_u64(diff, diff1); 1605 diff = div_u64(diff, diff1);
1791 ret = ((m * diff) + c); 1606 ret = ((m * diff) + c);
1792 div_u64(ret, 10); 1607 ret = div_u64(ret, 10);
1793 1608
1794 dev_priv->last_count1 = total_count; 1609 dev_priv->last_count1 = total_count;
1795 dev_priv->last_time1 = now; 1610 dev_priv->last_time1 = now;
@@ -1858,7 +1673,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
1858 1673
1859 /* More magic constants... */ 1674 /* More magic constants... */
1860 diff = diff * 1181; 1675 diff = diff * 1181;
1861 div_u64(diff, diffms * 10); 1676 diff = div_u64(diff, diffms * 10);
1862 dev_priv->gfx_power = diff; 1677 dev_priv->gfx_power = diff;
1863} 1678}
1864 1679
@@ -1907,7 +1722,7 @@ static struct drm_i915_private *i915_mch_dev;
1907 * - dev_priv->fmax 1722 * - dev_priv->fmax
1908 * - dev_priv->gpu_busy 1723 * - dev_priv->gpu_busy
1909 */ 1724 */
1910DEFINE_SPINLOCK(mchdev_lock); 1725static DEFINE_SPINLOCK(mchdev_lock);
1911 1726
1912/** 1727/**
1913 * i915_read_mch_val - return value for IPS use 1728 * i915_read_mch_val - return value for IPS use
@@ -2062,7 +1877,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2062 struct drm_i915_private *dev_priv; 1877 struct drm_i915_private *dev_priv;
2063 resource_size_t base, size; 1878 resource_size_t base, size;
2064 int ret = 0, mmio_bar; 1879 int ret = 0, mmio_bar;
2065 uint32_t agp_size, prealloc_size, prealloc_start; 1880 uint32_t agp_size, prealloc_size;
2066 /* i915 has 4 more counters */ 1881 /* i915 has 4 more counters */
2067 dev->counters += 4; 1882 dev->counters += 4;
2068 dev->types[6] = _DRM_STAT_IRQ; 1883 dev->types[6] = _DRM_STAT_IRQ;
@@ -2079,7 +1894,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2079 dev_priv->info = (struct intel_device_info *) flags; 1894 dev_priv->info = (struct intel_device_info *) flags;
2080 1895
2081 /* Add register map (needed for suspend/resume) */ 1896 /* Add register map (needed for suspend/resume) */
2082 mmio_bar = IS_I9XX(dev) ? 0 : 1; 1897 mmio_bar = IS_GEN2(dev) ? 1 : 0;
2083 base = pci_resource_start(dev->pdev, mmio_bar); 1898 base = pci_resource_start(dev->pdev, mmio_bar);
2084 size = pci_resource_len(dev->pdev, mmio_bar); 1899 size = pci_resource_len(dev->pdev, mmio_bar);
2085 1900
@@ -2121,17 +1936,32 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2121 "performance may suffer.\n"); 1936 "performance may suffer.\n");
2122 } 1937 }
2123 1938
2124 ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start); 1939 dev_priv->mm.gtt = intel_gtt_get();
2125 if (ret) 1940 if (!dev_priv->mm.gtt) {
1941 DRM_ERROR("Failed to initialize GTT\n");
1942 ret = -ENODEV;
2126 goto out_iomapfree; 1943 goto out_iomapfree;
2127
2128 if (prealloc_size > intel_max_stolen) {
2129 DRM_INFO("detected %dM stolen memory, trimming to %dM\n",
2130 prealloc_size >> 20, intel_max_stolen >> 20);
2131 prealloc_size = intel_max_stolen;
2132 } 1944 }
2133 1945
2134 dev_priv->wq = create_singlethread_workqueue("i915"); 1946 prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
1947 agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1948
1949 /* The i915 workqueue is primarily used for batched retirement of
1950 * requests (and thus managing bo) once the task has been completed
1951 * by the GPU. i915_gem_retire_requests() is called directly when we
1952 * need high-priority retirement, such as waiting for an explicit
1953 * bo.
1954 *
1955 * It is also used for periodic low-priority events, such as
1956 * idle-timers and hangcheck.
1957 *
1958 * All tasks on the workqueue are expected to acquire the dev mutex
1959 * so there is no point in running more than one instance of the
1960 * workqueue at any time: max_active = 1 and NON_REENTRANT.
1961 */
1962 dev_priv->wq = alloc_workqueue("i915",
1963 WQ_UNBOUND | WQ_NON_REENTRANT,
1964 1);
2135 if (dev_priv->wq == NULL) { 1965 if (dev_priv->wq == NULL) {
2136 DRM_ERROR("Failed to create our workqueue.\n"); 1966 DRM_ERROR("Failed to create our workqueue.\n");
2137 ret = -ENOMEM; 1967 ret = -ENOMEM;
@@ -2166,6 +1996,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2166 1996
2167 /* Try to make sure MCHBAR is enabled before poking at it */ 1997 /* Try to make sure MCHBAR is enabled before poking at it */
2168 intel_setup_mchbar(dev); 1998 intel_setup_mchbar(dev);
1999 intel_setup_gmbus(dev);
2000 intel_opregion_setup(dev);
2169 2001
2170 i915_gem_load(dev); 2002 i915_gem_load(dev);
2171 2003
@@ -2212,8 +2044,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2212 intel_detect_pch(dev); 2044 intel_detect_pch(dev);
2213 2045
2214 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2046 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2215 ret = i915_load_modeset_init(dev, prealloc_start, 2047 ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
2216 prealloc_size, agp_size);
2217 if (ret < 0) { 2048 if (ret < 0) {
2218 DRM_ERROR("failed to init modeset\n"); 2049 DRM_ERROR("failed to init modeset\n");
2219 goto out_workqueue_free; 2050 goto out_workqueue_free;
@@ -2221,7 +2052,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2221 } 2052 }
2222 2053
2223 /* Must be done after probing outputs */ 2054 /* Must be done after probing outputs */
2224 intel_opregion_init(dev, 0); 2055 intel_opregion_init(dev);
2056 acpi_video_register();
2225 2057
2226 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 2058 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
2227 (unsigned long) dev); 2059 (unsigned long) dev);
@@ -2249,15 +2081,20 @@ free_priv:
2249int i915_driver_unload(struct drm_device *dev) 2081int i915_driver_unload(struct drm_device *dev)
2250{ 2082{
2251 struct drm_i915_private *dev_priv = dev->dev_private; 2083 struct drm_i915_private *dev_priv = dev->dev_private;
2252 2084 int ret;
2253 i915_destroy_error_state(dev);
2254 2085
2255 spin_lock(&mchdev_lock); 2086 spin_lock(&mchdev_lock);
2256 i915_mch_dev = NULL; 2087 i915_mch_dev = NULL;
2257 spin_unlock(&mchdev_lock); 2088 spin_unlock(&mchdev_lock);
2258 2089
2259 destroy_workqueue(dev_priv->wq); 2090 mutex_lock(&dev->struct_mutex);
2260 del_timer_sync(&dev_priv->hangcheck_timer); 2091 ret = i915_gpu_idle(dev);
2092 if (ret)
2093 DRM_ERROR("failed to idle hardware: %d\n", ret);
2094 mutex_unlock(&dev->struct_mutex);
2095
2096 /* Cancel the retire work handler, which should be idle now. */
2097 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
2261 2098
2262 io_mapping_free(dev_priv->mm.gtt_mapping); 2099 io_mapping_free(dev_priv->mm.gtt_mapping);
2263 if (dev_priv->mm.gtt_mtrr >= 0) { 2100 if (dev_priv->mm.gtt_mtrr >= 0) {
@@ -2266,7 +2103,10 @@ int i915_driver_unload(struct drm_device *dev)
2266 dev_priv->mm.gtt_mtrr = -1; 2103 dev_priv->mm.gtt_mtrr = -1;
2267 } 2104 }
2268 2105
2106 acpi_video_unregister();
2107
2269 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2108 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2109 intel_fbdev_fini(dev);
2270 intel_modeset_cleanup(dev); 2110 intel_modeset_cleanup(dev);
2271 2111
2272 /* 2112 /*
@@ -2278,20 +2118,28 @@ int i915_driver_unload(struct drm_device *dev)
2278 dev_priv->child_dev = NULL; 2118 dev_priv->child_dev = NULL;
2279 dev_priv->child_dev_num = 0; 2119 dev_priv->child_dev_num = 0;
2280 } 2120 }
2281 drm_irq_uninstall(dev); 2121
2282 vga_switcheroo_unregister_client(dev->pdev); 2122 vga_switcheroo_unregister_client(dev->pdev);
2283 vga_client_register(dev->pdev, NULL, NULL, NULL); 2123 vga_client_register(dev->pdev, NULL, NULL, NULL);
2284 } 2124 }
2285 2125
2126 /* Free error state after interrupts are fully disabled. */
2127 del_timer_sync(&dev_priv->hangcheck_timer);
2128 cancel_work_sync(&dev_priv->error_work);
2129 i915_destroy_error_state(dev);
2130
2286 if (dev->pdev->msi_enabled) 2131 if (dev->pdev->msi_enabled)
2287 pci_disable_msi(dev->pdev); 2132 pci_disable_msi(dev->pdev);
2288 2133
2289 if (dev_priv->regs != NULL) 2134 if (dev_priv->regs != NULL)
2290 iounmap(dev_priv->regs); 2135 iounmap(dev_priv->regs);
2291 2136
2292 intel_opregion_free(dev, 0); 2137 intel_opregion_fini(dev);
2293 2138
2294 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2139 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2140 /* Flush any outstanding unpin_work. */
2141 flush_workqueue(dev_priv->wq);
2142
2295 i915_gem_free_all_phys_object(dev); 2143 i915_gem_free_all_phys_object(dev);
2296 2144
2297 mutex_lock(&dev->struct_mutex); 2145 mutex_lock(&dev->struct_mutex);
@@ -2299,34 +2147,35 @@ int i915_driver_unload(struct drm_device *dev)
2299 mutex_unlock(&dev->struct_mutex); 2147 mutex_unlock(&dev->struct_mutex);
2300 if (I915_HAS_FBC(dev) && i915_powersave) 2148 if (I915_HAS_FBC(dev) && i915_powersave)
2301 i915_cleanup_compression(dev); 2149 i915_cleanup_compression(dev);
2302 drm_mm_takedown(&dev_priv->vram); 2150 drm_mm_takedown(&dev_priv->mm.vram);
2303 i915_gem_lastclose(dev);
2304 2151
2305 intel_cleanup_overlay(dev); 2152 intel_cleanup_overlay(dev);
2306 } 2153 }
2307 2154
2155 intel_teardown_gmbus(dev);
2308 intel_teardown_mchbar(dev); 2156 intel_teardown_mchbar(dev);
2309 2157
2158 destroy_workqueue(dev_priv->wq);
2159
2310 pci_dev_put(dev_priv->bridge_dev); 2160 pci_dev_put(dev_priv->bridge_dev);
2311 kfree(dev->dev_private); 2161 kfree(dev->dev_private);
2312 2162
2313 return 0; 2163 return 0;
2314} 2164}
2315 2165
2316int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) 2166int i915_driver_open(struct drm_device *dev, struct drm_file *file)
2317{ 2167{
2318 struct drm_i915_file_private *i915_file_priv; 2168 struct drm_i915_file_private *file_priv;
2319 2169
2320 DRM_DEBUG_DRIVER("\n"); 2170 DRM_DEBUG_DRIVER("\n");
2321 i915_file_priv = (struct drm_i915_file_private *) 2171 file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
2322 kmalloc(sizeof(*i915_file_priv), GFP_KERNEL); 2172 if (!file_priv)
2323
2324 if (!i915_file_priv)
2325 return -ENOMEM; 2173 return -ENOMEM;
2326 2174
2327 file_priv->driver_priv = i915_file_priv; 2175 file->driver_priv = file_priv;
2328 2176
2329 INIT_LIST_HEAD(&i915_file_priv->mm.request_list); 2177 spin_lock_init(&file_priv->mm.lock);
2178 INIT_LIST_HEAD(&file_priv->mm.request_list);
2330 2179
2331 return 0; 2180 return 0;
2332} 2181}
@@ -2369,11 +2218,11 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
2369 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 2218 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
2370} 2219}
2371 2220
2372void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) 2221void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
2373{ 2222{
2374 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 2223 struct drm_i915_file_private *file_priv = file->driver_priv;
2375 2224
2376 kfree(i915_file_priv); 2225 kfree(file_priv);
2377} 2226}
2378 2227
2379struct drm_ioctl_desc i915_ioctls[] = { 2228struct drm_ioctl_desc i915_ioctls[] = {
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9ed9d23caf14..c3decb2fef4b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -32,6 +32,7 @@
32#include "drm.h" 32#include "drm.h"
33#include "i915_drm.h" 33#include "i915_drm.h"
34#include "i915_drv.h" 34#include "i915_drv.h"
35#include "intel_drv.h"
35 36
36#include <linux/console.h> 37#include <linux/console.h>
37#include "drm_crtc_helper.h" 38#include "drm_crtc_helper.h"
@@ -61,86 +62,108 @@ extern int intel_agp_enabled;
61 .driver_data = (unsigned long) info } 62 .driver_data = (unsigned long) info }
62 63
63static const struct intel_device_info intel_i830_info = { 64static const struct intel_device_info intel_i830_info = {
64 .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, 65 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
66 .has_overlay = 1, .overlay_needs_physical = 1,
65}; 67};
66 68
67static const struct intel_device_info intel_845g_info = { 69static const struct intel_device_info intel_845g_info = {
68 .gen = 2, .is_i8xx = 1, 70 .gen = 2,
71 .has_overlay = 1, .overlay_needs_physical = 1,
69}; 72};
70 73
71static const struct intel_device_info intel_i85x_info = { 74static const struct intel_device_info intel_i85x_info = {
72 .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, 75 .gen = 2, .is_i85x = 1, .is_mobile = 1,
73 .cursor_needs_physical = 1, 76 .cursor_needs_physical = 1,
77 .has_overlay = 1, .overlay_needs_physical = 1,
74}; 78};
75 79
76static const struct intel_device_info intel_i865g_info = { 80static const struct intel_device_info intel_i865g_info = {
77 .gen = 2, .is_i8xx = 1, 81 .gen = 2,
82 .has_overlay = 1, .overlay_needs_physical = 1,
78}; 83};
79 84
80static const struct intel_device_info intel_i915g_info = { 85static const struct intel_device_info intel_i915g_info = {
81 .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, 86 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
87 .has_overlay = 1, .overlay_needs_physical = 1,
82}; 88};
83static const struct intel_device_info intel_i915gm_info = { 89static const struct intel_device_info intel_i915gm_info = {
84 .gen = 3, .is_i9xx = 1, .is_mobile = 1, 90 .gen = 3, .is_mobile = 1,
85 .cursor_needs_physical = 1, 91 .cursor_needs_physical = 1,
92 .has_overlay = 1, .overlay_needs_physical = 1,
93 .supports_tv = 1,
86}; 94};
87static const struct intel_device_info intel_i945g_info = { 95static const struct intel_device_info intel_i945g_info = {
88 .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, 96 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
97 .has_overlay = 1, .overlay_needs_physical = 1,
89}; 98};
90static const struct intel_device_info intel_i945gm_info = { 99static const struct intel_device_info intel_i945gm_info = {
91 .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, 100 .gen = 3, .is_i945gm = 1, .is_mobile = 1,
92 .has_hotplug = 1, .cursor_needs_physical = 1, 101 .has_hotplug = 1, .cursor_needs_physical = 1,
102 .has_overlay = 1, .overlay_needs_physical = 1,
103 .supports_tv = 1,
93}; 104};
94 105
95static const struct intel_device_info intel_i965g_info = { 106static const struct intel_device_info intel_i965g_info = {
96 .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, 107 .gen = 4, .is_broadwater = 1,
97 .has_hotplug = 1, 108 .has_hotplug = 1,
109 .has_overlay = 1,
98}; 110};
99 111
100static const struct intel_device_info intel_i965gm_info = { 112static const struct intel_device_info intel_i965gm_info = {
101 .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, 113 .gen = 4, .is_crestline = 1,
102 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, 114 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
115 .has_overlay = 1,
116 .supports_tv = 1,
103}; 117};
104 118
105static const struct intel_device_info intel_g33_info = { 119static const struct intel_device_info intel_g33_info = {
106 .gen = 3, .is_g33 = 1, .is_i9xx = 1, 120 .gen = 3, .is_g33 = 1,
107 .need_gfx_hws = 1, .has_hotplug = 1, 121 .need_gfx_hws = 1, .has_hotplug = 1,
122 .has_overlay = 1,
108}; 123};
109 124
110static const struct intel_device_info intel_g45_info = { 125static const struct intel_device_info intel_g45_info = {
111 .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, 126 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
112 .has_pipe_cxsr = 1, .has_hotplug = 1, 127 .has_pipe_cxsr = 1, .has_hotplug = 1,
128 .has_bsd_ring = 1,
113}; 129};
114 130
115static const struct intel_device_info intel_gm45_info = { 131static const struct intel_device_info intel_gm45_info = {
116 .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, 132 .gen = 4, .is_g4x = 1,
117 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 133 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
118 .has_pipe_cxsr = 1, .has_hotplug = 1, 134 .has_pipe_cxsr = 1, .has_hotplug = 1,
135 .supports_tv = 1,
136 .has_bsd_ring = 1,
119}; 137};
120 138
121static const struct intel_device_info intel_pineview_info = { 139static const struct intel_device_info intel_pineview_info = {
122 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 140 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
123 .need_gfx_hws = 1, .has_hotplug = 1, 141 .need_gfx_hws = 1, .has_hotplug = 1,
142 .has_overlay = 1,
124}; 143};
125 144
126static const struct intel_device_info intel_ironlake_d_info = { 145static const struct intel_device_info intel_ironlake_d_info = {
127 .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, 146 .gen = 5, .is_ironlake = 1,
128 .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, 147 .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
148 .has_bsd_ring = 1,
129}; 149};
130 150
131static const struct intel_device_info intel_ironlake_m_info = { 151static const struct intel_device_info intel_ironlake_m_info = {
132 .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, 152 .gen = 5, .is_ironlake = 1, .is_mobile = 1,
133 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, 153 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
154 .has_bsd_ring = 1,
134}; 155};
135 156
136static const struct intel_device_info intel_sandybridge_d_info = { 157static const struct intel_device_info intel_sandybridge_d_info = {
137 .gen = 6, .is_i965g = 1, .is_i9xx = 1, 158 .gen = 6,
138 .need_gfx_hws = 1, .has_hotplug = 1, 159 .need_gfx_hws = 1, .has_hotplug = 1,
160 .has_bsd_ring = 1,
139}; 161};
140 162
141static const struct intel_device_info intel_sandybridge_m_info = { 163static const struct intel_device_info intel_sandybridge_m_info = {
142 .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, 164 .gen = 6, .is_mobile = 1,
143 .need_gfx_hws = 1, .has_hotplug = 1, 165 .need_gfx_hws = 1, .has_hotplug = 1,
166 .has_bsd_ring = 1,
144}; 167};
145 168
146static const struct pci_device_id pciidlist[] = { /* aka */ 169static const struct pci_device_id pciidlist[] = { /* aka */
@@ -237,7 +260,7 @@ static int i915_drm_freeze(struct drm_device *dev)
237 260
238 i915_save_state(dev); 261 i915_save_state(dev);
239 262
240 intel_opregion_free(dev, 1); 263 intel_opregion_fini(dev);
241 264
242 /* Modeset on resume, not lid events */ 265 /* Modeset on resume, not lid events */
243 dev_priv->modeset_on_lid = 0; 266 dev_priv->modeset_on_lid = 0;
@@ -258,6 +281,8 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
258 if (state.event == PM_EVENT_PRETHAW) 281 if (state.event == PM_EVENT_PRETHAW)
259 return 0; 282 return 0;
260 283
284 drm_kms_helper_poll_disable(dev);
285
261 error = i915_drm_freeze(dev); 286 error = i915_drm_freeze(dev);
262 if (error) 287 if (error)
263 return error; 288 return error;
@@ -277,8 +302,7 @@ static int i915_drm_thaw(struct drm_device *dev)
277 int error = 0; 302 int error = 0;
278 303
279 i915_restore_state(dev); 304 i915_restore_state(dev);
280 305 intel_opregion_setup(dev);
281 intel_opregion_init(dev, 1);
282 306
283 /* KMS EnterVT equivalent */ 307 /* KMS EnterVT equivalent */
284 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 308 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -294,6 +318,8 @@ static int i915_drm_thaw(struct drm_device *dev)
294 drm_helper_resume_force_mode(dev); 318 drm_helper_resume_force_mode(dev);
295 } 319 }
296 320
321 intel_opregion_init(dev);
322
297 dev_priv->modeset_on_lid = 0; 323 dev_priv->modeset_on_lid = 0;
298 324
299 return error; 325 return error;
@@ -301,12 +327,79 @@ static int i915_drm_thaw(struct drm_device *dev)
301 327
302int i915_resume(struct drm_device *dev) 328int i915_resume(struct drm_device *dev)
303{ 329{
330 int ret;
331
304 if (pci_enable_device(dev->pdev)) 332 if (pci_enable_device(dev->pdev))
305 return -EIO; 333 return -EIO;
306 334
307 pci_set_master(dev->pdev); 335 pci_set_master(dev->pdev);
308 336
309 return i915_drm_thaw(dev); 337 ret = i915_drm_thaw(dev);
338 if (ret)
339 return ret;
340
341 drm_kms_helper_poll_enable(dev);
342 return 0;
343}
344
345static int i8xx_do_reset(struct drm_device *dev, u8 flags)
346{
347 struct drm_i915_private *dev_priv = dev->dev_private;
348
349 if (IS_I85X(dev))
350 return -ENODEV;
351
352 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
353 POSTING_READ(D_STATE);
354
355 if (IS_I830(dev) || IS_845G(dev)) {
356 I915_WRITE(DEBUG_RESET_I830,
357 DEBUG_RESET_DISPLAY |
358 DEBUG_RESET_RENDER |
359 DEBUG_RESET_FULL);
360 POSTING_READ(DEBUG_RESET_I830);
361 msleep(1);
362
363 I915_WRITE(DEBUG_RESET_I830, 0);
364 POSTING_READ(DEBUG_RESET_I830);
365 }
366
367 msleep(1);
368
369 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
370 POSTING_READ(D_STATE);
371
372 return 0;
373}
374
375static int i965_reset_complete(struct drm_device *dev)
376{
377 u8 gdrst;
378 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
379 return gdrst & 0x1;
380}
381
382static int i965_do_reset(struct drm_device *dev, u8 flags)
383{
384 u8 gdrst;
385
386 /*
387 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
388 * well as the reset bit (GR/bit 0). Setting the GR bit
389 * triggers the reset; when done, the hardware will clear it.
390 */
391 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
392 pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
393
394 return wait_for(i965_reset_complete(dev), 500);
395}
396
397static int ironlake_do_reset(struct drm_device *dev, u8 flags)
398{
399 struct drm_i915_private *dev_priv = dev->dev_private;
400 u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
401 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
402 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
310} 403}
311 404
312/** 405/**
@@ -325,54 +418,39 @@ int i915_resume(struct drm_device *dev)
325 * - re-init interrupt state 418 * - re-init interrupt state
326 * - re-init display 419 * - re-init display
327 */ 420 */
328int i965_reset(struct drm_device *dev, u8 flags) 421int i915_reset(struct drm_device *dev, u8 flags)
329{ 422{
330 drm_i915_private_t *dev_priv = dev->dev_private; 423 drm_i915_private_t *dev_priv = dev->dev_private;
331 unsigned long timeout;
332 u8 gdrst;
333 /* 424 /*
334 * We really should only reset the display subsystem if we actually 425 * We really should only reset the display subsystem if we actually
335 * need to 426 * need to
336 */ 427 */
337 bool need_display = true; 428 bool need_display = true;
429 int ret;
338 430
339 mutex_lock(&dev->struct_mutex); 431 mutex_lock(&dev->struct_mutex);
340 432
341 /* 433 i915_gem_reset(dev);
342 * Clear request list 434
343 */ 435 ret = -ENODEV;
344 i915_gem_retire_requests(dev); 436 if (get_seconds() - dev_priv->last_gpu_reset < 5) {
345 437 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
346 if (need_display) 438 } else switch (INTEL_INFO(dev)->gen) {
347 i915_save_display(dev); 439 case 5:
348 440 ret = ironlake_do_reset(dev, flags);
349 if (IS_I965G(dev) || IS_G4X(dev)) { 441 break;
350 /* 442 case 4:
351 * Set the domains we want to reset, then the reset bit (bit 0). 443 ret = i965_do_reset(dev, flags);
352 * Clear the reset bit after a while and wait for hardware status 444 break;
353 * bit (bit 1) to be set 445 case 2:
354 */ 446 ret = i8xx_do_reset(dev, flags);
355 pci_read_config_byte(dev->pdev, GDRST, &gdrst); 447 break;
356 pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0)); 448 }
357 udelay(50); 449 dev_priv->last_gpu_reset = get_seconds();
358 pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe); 450 if (ret) {
359 451 DRM_ERROR("Failed to reset chip.\n");
360 /* ...we don't want to loop forever though, 500ms should be plenty */
361 timeout = jiffies + msecs_to_jiffies(500);
362 do {
363 udelay(100);
364 pci_read_config_byte(dev->pdev, GDRST, &gdrst);
365 } while ((gdrst & 0x1) && time_after(timeout, jiffies));
366
367 if (gdrst & 0x1) {
368 WARN(true, "i915: Failed to reset chip\n");
369 mutex_unlock(&dev->struct_mutex);
370 return -EIO;
371 }
372 } else {
373 DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
374 mutex_unlock(&dev->struct_mutex); 452 mutex_unlock(&dev->struct_mutex);
375 return -ENODEV; 453 return ret;
376 } 454 }
377 455
378 /* Ok, now get things going again... */ 456 /* Ok, now get things going again... */
@@ -400,13 +478,19 @@ int i965_reset(struct drm_device *dev, u8 flags)
400 mutex_lock(&dev->struct_mutex); 478 mutex_lock(&dev->struct_mutex);
401 } 479 }
402 480
481 mutex_unlock(&dev->struct_mutex);
482
403 /* 483 /*
404 * Display needs restore too... 484 * Perform a full modeset as on later generations, e.g. Ironlake, we may
485 * need to retrain the display link and cannot just restore the register
486 * values.
405 */ 487 */
406 if (need_display) 488 if (need_display) {
407 i915_restore_display(dev); 489 mutex_lock(&dev->mode_config.mutex);
490 drm_helper_resume_force_mode(dev);
491 mutex_unlock(&dev->mode_config.mutex);
492 }
408 493
409 mutex_unlock(&dev->struct_mutex);
410 return 0; 494 return 0;
411} 495}
412 496
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index af4a263cf257..73ad8bff2c2a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -34,6 +34,8 @@
34#include "intel_bios.h" 34#include "intel_bios.h"
35#include "intel_ringbuffer.h" 35#include "intel_ringbuffer.h"
36#include <linux/io-mapping.h> 36#include <linux/io-mapping.h>
37#include <linux/i2c.h>
38#include <drm/intel-gtt.h>
37 39
38/* General customization: 40/* General customization:
39 */ 41 */
@@ -73,11 +75,9 @@ enum plane {
73#define DRIVER_PATCHLEVEL 0 75#define DRIVER_PATCHLEVEL 0
74 76
75#define WATCH_COHERENCY 0 77#define WATCH_COHERENCY 0
76#define WATCH_BUF 0
77#define WATCH_EXEC 0 78#define WATCH_EXEC 0
78#define WATCH_LRU 0
79#define WATCH_RELOC 0 79#define WATCH_RELOC 0
80#define WATCH_INACTIVE 0 80#define WATCH_LISTS 0
81#define WATCH_PWRITE 0 81#define WATCH_PWRITE 0
82 82
83#define I915_GEM_PHYS_CURSOR_0 1 83#define I915_GEM_PHYS_CURSOR_0 1
@@ -110,8 +110,9 @@ struct intel_opregion {
110 struct opregion_acpi *acpi; 110 struct opregion_acpi *acpi;
111 struct opregion_swsci *swsci; 111 struct opregion_swsci *swsci;
112 struct opregion_asle *asle; 112 struct opregion_asle *asle;
113 int enabled; 113 void *vbt;
114}; 114};
115#define OPREGION_SIZE (8*1024)
115 116
116struct intel_overlay; 117struct intel_overlay;
117struct intel_overlay_error_state; 118struct intel_overlay_error_state;
@@ -125,13 +126,16 @@ struct drm_i915_master_private {
125struct drm_i915_fence_reg { 126struct drm_i915_fence_reg {
126 struct drm_gem_object *obj; 127 struct drm_gem_object *obj;
127 struct list_head lru_list; 128 struct list_head lru_list;
129 bool gpu;
128}; 130};
129 131
130struct sdvo_device_mapping { 132struct sdvo_device_mapping {
133 u8 initialized;
131 u8 dvo_port; 134 u8 dvo_port;
132 u8 slave_addr; 135 u8 slave_addr;
133 u8 dvo_wiring; 136 u8 dvo_wiring;
134 u8 initialized; 137 u8 i2c_pin;
138 u8 i2c_speed;
135 u8 ddc_pin; 139 u8 ddc_pin;
136}; 140};
137 141
@@ -193,13 +197,9 @@ struct drm_i915_display_funcs {
193struct intel_device_info { 197struct intel_device_info {
194 u8 gen; 198 u8 gen;
195 u8 is_mobile : 1; 199 u8 is_mobile : 1;
196 u8 is_i8xx : 1;
197 u8 is_i85x : 1; 200 u8 is_i85x : 1;
198 u8 is_i915g : 1; 201 u8 is_i915g : 1;
199 u8 is_i9xx : 1;
200 u8 is_i945gm : 1; 202 u8 is_i945gm : 1;
201 u8 is_i965g : 1;
202 u8 is_i965gm : 1;
203 u8 is_g33 : 1; 203 u8 is_g33 : 1;
204 u8 need_gfx_hws : 1; 204 u8 need_gfx_hws : 1;
205 u8 is_g4x : 1; 205 u8 is_g4x : 1;
@@ -212,9 +212,14 @@ struct intel_device_info {
212 u8 has_pipe_cxsr : 1; 212 u8 has_pipe_cxsr : 1;
213 u8 has_hotplug : 1; 213 u8 has_hotplug : 1;
214 u8 cursor_needs_physical : 1; 214 u8 cursor_needs_physical : 1;
215 u8 has_overlay : 1;
216 u8 overlay_needs_physical : 1;
217 u8 supports_tv : 1;
218 u8 has_bsd_ring : 1;
215}; 219};
216 220
217enum no_fbc_reason { 221enum no_fbc_reason {
222 FBC_NO_OUTPUT, /* no outputs enabled to compress */
218 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ 223 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
219 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ 224 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
220 FBC_MODE_TOO_LARGE, /* mode too large for compression */ 225 FBC_MODE_TOO_LARGE, /* mode too large for compression */
@@ -241,6 +246,12 @@ typedef struct drm_i915_private {
241 246
242 void __iomem *regs; 247 void __iomem *regs;
243 248
249 struct intel_gmbus {
250 struct i2c_adapter adapter;
251 struct i2c_adapter *force_bit;
252 u32 reg0;
253 } *gmbus;
254
244 struct pci_dev *bridge_dev; 255 struct pci_dev *bridge_dev;
245 struct intel_ring_buffer render_ring; 256 struct intel_ring_buffer render_ring;
246 struct intel_ring_buffer bsd_ring; 257 struct intel_ring_buffer bsd_ring;
@@ -263,6 +274,9 @@ typedef struct drm_i915_private {
263 int front_offset; 274 int front_offset;
264 int current_page; 275 int current_page;
265 int page_flipping; 276 int page_flipping;
277#define I915_DEBUG_READ (1<<0)
278#define I915_DEBUG_WRITE (1<<1)
279 unsigned long debug_flags;
266 280
267 wait_queue_head_t irq_queue; 281 wait_queue_head_t irq_queue;
268 atomic_t irq_received; 282 atomic_t irq_received;
@@ -289,24 +303,21 @@ typedef struct drm_i915_private {
289 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 303 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
290 int vblank_pipe; 304 int vblank_pipe;
291 int num_pipe; 305 int num_pipe;
292 u32 flush_rings;
293#define FLUSH_RENDER_RING 0x1
294#define FLUSH_BSD_RING 0x2
295 306
296 /* For hangcheck timer */ 307 /* For hangcheck timer */
297#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ 308#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */
298 struct timer_list hangcheck_timer; 309 struct timer_list hangcheck_timer;
299 int hangcheck_count; 310 int hangcheck_count;
300 uint32_t last_acthd; 311 uint32_t last_acthd;
301 uint32_t last_instdone; 312 uint32_t last_instdone;
302 uint32_t last_instdone1; 313 uint32_t last_instdone1;
303 314
304 struct drm_mm vram;
305
306 unsigned long cfb_size; 315 unsigned long cfb_size;
307 unsigned long cfb_pitch; 316 unsigned long cfb_pitch;
317 unsigned long cfb_offset;
308 int cfb_fence; 318 int cfb_fence;
309 int cfb_plane; 319 int cfb_plane;
320 int cfb_y;
310 321
311 int irq_enabled; 322 int irq_enabled;
312 323
@@ -316,8 +327,7 @@ typedef struct drm_i915_private {
316 struct intel_overlay *overlay; 327 struct intel_overlay *overlay;
317 328
318 /* LVDS info */ 329 /* LVDS info */
319 int backlight_duty_cycle; /* restore backlight to this value */ 330 int backlight_level; /* restore backlight to this value */
320 bool panel_wants_dither;
321 struct drm_display_mode *panel_fixed_mode; 331 struct drm_display_mode *panel_fixed_mode;
322 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 332 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
323 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 333 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -328,13 +338,22 @@ typedef struct drm_i915_private {
328 unsigned int lvds_vbt:1; 338 unsigned int lvds_vbt:1;
329 unsigned int int_crt_support:1; 339 unsigned int int_crt_support:1;
330 unsigned int lvds_use_ssc:1; 340 unsigned int lvds_use_ssc:1;
331 unsigned int edp_support:1;
332 int lvds_ssc_freq; 341 int lvds_ssc_freq;
333 int edp_bpp; 342
343 struct {
344 u8 rate:4;
345 u8 lanes:4;
346 u8 preemphasis:4;
347 u8 vswing:4;
348
349 u8 initialized:1;
350 u8 support:1;
351 u8 bpp:6;
352 } edp;
334 353
335 struct notifier_block lid_notifier; 354 struct notifier_block lid_notifier;
336 355
337 int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */ 356 int crt_ddc_pin;
338 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 357 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
339 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 358 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
340 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 359 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -344,6 +363,7 @@ typedef struct drm_i915_private {
344 spinlock_t error_lock; 363 spinlock_t error_lock;
345 struct drm_i915_error_state *first_error; 364 struct drm_i915_error_state *first_error;
346 struct work_struct error_work; 365 struct work_struct error_work;
366 struct completion error_completion;
347 struct workqueue_struct *wq; 367 struct workqueue_struct *wq;
348 368
349 /* Display functions */ 369 /* Display functions */
@@ -507,6 +527,11 @@ typedef struct drm_i915_private {
507 u32 saveMCHBAR_RENDER_STANDBY; 527 u32 saveMCHBAR_RENDER_STANDBY;
508 528
509 struct { 529 struct {
530 /** Bridge to intel-gtt-ko */
531 struct intel_gtt *gtt;
532 /** Memory allocator for GTT stolen memory */
533 struct drm_mm vram;
534 /** Memory allocator for GTT */
510 struct drm_mm gtt_space; 535 struct drm_mm gtt_space;
511 536
512 struct io_mapping *gtt_mapping; 537 struct io_mapping *gtt_mapping;
@@ -521,8 +546,6 @@ typedef struct drm_i915_private {
521 */ 546 */
522 struct list_head shrink_list; 547 struct list_head shrink_list;
523 548
524 spinlock_t active_list_lock;
525
526 /** 549 /**
527 * List of objects which are not in the ringbuffer but which 550 * List of objects which are not in the ringbuffer but which
528 * still have a write_domain which needs to be flushed before 551 * still have a write_domain which needs to be flushed before
@@ -555,6 +578,12 @@ typedef struct drm_i915_private {
555 */ 578 */
556 struct list_head inactive_list; 579 struct list_head inactive_list;
557 580
581 /**
582 * LRU list of objects which are not in the ringbuffer but
583 * are still pinned in the GTT.
584 */
585 struct list_head pinned_list;
586
558 /** LRU list of objects with fence regs on them. */ 587 /** LRU list of objects with fence regs on them. */
559 struct list_head fence_list; 588 struct list_head fence_list;
560 589
@@ -611,6 +640,17 @@ typedef struct drm_i915_private {
611 640
612 /* storage for physical objects */ 641 /* storage for physical objects */
613 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; 642 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
643
644 uint32_t flush_rings;
645
646 /* accounting, useful for userland debugging */
647 size_t object_memory;
648 size_t pin_memory;
649 size_t gtt_memory;
650 size_t gtt_total;
651 u32 object_count;
652 u32 pin_count;
653 u32 gtt_count;
614 } mm; 654 } mm;
615 struct sdvo_device_mapping sdvo_mappings[2]; 655 struct sdvo_device_mapping sdvo_mappings[2];
616 /* indicate whether the LVDS_BORDER should be enabled or not */ 656 /* indicate whether the LVDS_BORDER should be enabled or not */
@@ -626,8 +666,6 @@ typedef struct drm_i915_private {
626 /* Reclocking support */ 666 /* Reclocking support */
627 bool render_reclock_avail; 667 bool render_reclock_avail;
628 bool lvds_downclock_avail; 668 bool lvds_downclock_avail;
629 /* indicate whether the LVDS EDID is OK */
630 bool lvds_edid_good;
631 /* indicates the reduced downclock for LVDS*/ 669 /* indicates the reduced downclock for LVDS*/
632 int lvds_downclock; 670 int lvds_downclock;
633 struct work_struct idle_work; 671 struct work_struct idle_work;
@@ -661,6 +699,8 @@ typedef struct drm_i915_private {
661 struct drm_mm_node *compressed_fb; 699 struct drm_mm_node *compressed_fb;
662 struct drm_mm_node *compressed_llb; 700 struct drm_mm_node *compressed_llb;
663 701
702 unsigned long last_gpu_reset;
703
664 /* list of fbdev register on this device */ 704 /* list of fbdev register on this device */
665 struct intel_fbdev *fbdev; 705 struct intel_fbdev *fbdev;
666} drm_i915_private_t; 706} drm_i915_private_t;
@@ -816,12 +856,14 @@ struct drm_i915_gem_request {
816 /** global list entry for this request */ 856 /** global list entry for this request */
817 struct list_head list; 857 struct list_head list;
818 858
859 struct drm_i915_file_private *file_priv;
819 /** file_priv list entry for this request */ 860 /** file_priv list entry for this request */
820 struct list_head client_list; 861 struct list_head client_list;
821}; 862};
822 863
823struct drm_i915_file_private { 864struct drm_i915_file_private {
824 struct { 865 struct {
866 struct spinlock lock;
825 struct list_head request_list; 867 struct list_head request_list;
826 } mm; 868 } mm;
827}; 869};
@@ -862,7 +904,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
862extern int i915_emit_box(struct drm_device *dev, 904extern int i915_emit_box(struct drm_device *dev,
863 struct drm_clip_rect *boxes, 905 struct drm_clip_rect *boxes,
864 int i, int DR1, int DR4); 906 int i, int DR1, int DR4);
865extern int i965_reset(struct drm_device *dev, u8 flags); 907extern int i915_reset(struct drm_device *dev, u8 flags);
866extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 908extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
867extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 909extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
868extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 910extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -871,7 +913,6 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
871 913
872/* i915_irq.c */ 914/* i915_irq.c */
873void i915_hangcheck_elapsed(unsigned long data); 915void i915_hangcheck_elapsed(unsigned long data);
874void i915_destroy_error_state(struct drm_device *dev);
875extern int i915_irq_emit(struct drm_device *dev, void *data, 916extern int i915_irq_emit(struct drm_device *dev, void *data,
876 struct drm_file *file_priv); 917 struct drm_file *file_priv);
877extern int i915_irq_wait(struct drm_device *dev, void *data, 918extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -908,6 +949,12 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
908 949
909void intel_enable_asle (struct drm_device *dev); 950void intel_enable_asle (struct drm_device *dev);
910 951
952#ifdef CONFIG_DEBUG_FS
953extern void i915_destroy_error_state(struct drm_device *dev);
954#else
955#define i915_destroy_error_state(x)
956#endif
957
911 958
912/* i915_mem.c */ 959/* i915_mem.c */
913extern int i915_mem_alloc(struct drm_device *dev, void *data, 960extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -922,6 +969,7 @@ extern void i915_mem_takedown(struct mem_block **heap);
922extern void i915_mem_release(struct drm_device * dev, 969extern void i915_mem_release(struct drm_device * dev,
923 struct drm_file *file_priv, struct mem_block *heap); 970 struct drm_file *file_priv, struct mem_block *heap);
924/* i915_gem.c */ 971/* i915_gem.c */
972int i915_gem_check_is_wedged(struct drm_device *dev);
925int i915_gem_init_ioctl(struct drm_device *dev, void *data, 973int i915_gem_init_ioctl(struct drm_device *dev, void *data,
926 struct drm_file *file_priv); 974 struct drm_file *file_priv);
927int i915_gem_create_ioctl(struct drm_device *dev, void *data, 975int i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -972,13 +1020,22 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
972int i915_gem_object_unbind(struct drm_gem_object *obj); 1020int i915_gem_object_unbind(struct drm_gem_object *obj);
973void i915_gem_release_mmap(struct drm_gem_object *obj); 1021void i915_gem_release_mmap(struct drm_gem_object *obj);
974void i915_gem_lastclose(struct drm_device *dev); 1022void i915_gem_lastclose(struct drm_device *dev);
975uint32_t i915_get_gem_seqno(struct drm_device *dev, 1023
976 struct intel_ring_buffer *ring); 1024/**
977bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); 1025 * Returns true if seq1 is later than seq2.
978int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 1026 */
979int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); 1027static inline bool
1028i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1029{
1030 return (int32_t)(seq1 - seq2) >= 0;
1031}
1032
1033int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
1034 bool interruptible);
1035int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
1036 bool interruptible);
980void i915_gem_retire_requests(struct drm_device *dev); 1037void i915_gem_retire_requests(struct drm_device *dev);
981void i915_gem_retire_work_handler(struct work_struct *work); 1038void i915_gem_reset(struct drm_device *dev);
982void i915_gem_clflush_object(struct drm_gem_object *obj); 1039void i915_gem_clflush_object(struct drm_gem_object *obj);
983int i915_gem_object_set_domain(struct drm_gem_object *obj, 1040int i915_gem_object_set_domain(struct drm_gem_object *obj,
984 uint32_t read_domains, 1041 uint32_t read_domains,
@@ -990,16 +1047,18 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
990int i915_gpu_idle(struct drm_device *dev); 1047int i915_gpu_idle(struct drm_device *dev);
991int i915_gem_idle(struct drm_device *dev); 1048int i915_gem_idle(struct drm_device *dev);
992uint32_t i915_add_request(struct drm_device *dev, 1049uint32_t i915_add_request(struct drm_device *dev,
993 struct drm_file *file_priv, 1050 struct drm_file *file_priv,
994 uint32_t flush_domains, 1051 struct drm_i915_gem_request *request,
995 struct intel_ring_buffer *ring); 1052 struct intel_ring_buffer *ring);
996int i915_do_wait_request(struct drm_device *dev, 1053int i915_do_wait_request(struct drm_device *dev,
997 uint32_t seqno, int interruptible, 1054 uint32_t seqno,
998 struct intel_ring_buffer *ring); 1055 bool interruptible,
1056 struct intel_ring_buffer *ring);
999int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 1057int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
1000int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 1058int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
1001 int write); 1059 int write);
1002int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj); 1060int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
1061 bool pipelined);
1003int i915_gem_attach_phys_object(struct drm_device *dev, 1062int i915_gem_attach_phys_object(struct drm_device *dev,
1004 struct drm_gem_object *obj, 1063 struct drm_gem_object *obj,
1005 int id, 1064 int id,
@@ -1007,10 +1066,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
1007void i915_gem_detach_phys_object(struct drm_device *dev, 1066void i915_gem_detach_phys_object(struct drm_device *dev,
1008 struct drm_gem_object *obj); 1067 struct drm_gem_object *obj);
1009void i915_gem_free_all_phys_object(struct drm_device *dev); 1068void i915_gem_free_all_phys_object(struct drm_device *dev);
1010int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
1011void i915_gem_object_put_pages(struct drm_gem_object *obj);
1012void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 1069void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
1013int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
1014 1070
1015void i915_gem_shrinker_init(void); 1071void i915_gem_shrinker_init(void);
1016void i915_gem_shrinker_exit(void); 1072void i915_gem_shrinker_exit(void);
@@ -1032,15 +1088,14 @@ bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
1032/* i915_gem_debug.c */ 1088/* i915_gem_debug.c */
1033void i915_gem_dump_object(struct drm_gem_object *obj, int len, 1089void i915_gem_dump_object(struct drm_gem_object *obj, int len,
1034 const char *where, uint32_t mark); 1090 const char *where, uint32_t mark);
1035#if WATCH_INACTIVE 1091#if WATCH_LISTS
1036void i915_verify_inactive(struct drm_device *dev, char *file, int line); 1092int i915_verify_lists(struct drm_device *dev);
1037#else 1093#else
1038#define i915_verify_inactive(dev, file, line) 1094#define i915_verify_lists(dev) 0
1039#endif 1095#endif
1040void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); 1096void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
1041void i915_gem_dump_object(struct drm_gem_object *obj, int len, 1097void i915_gem_dump_object(struct drm_gem_object *obj, int len,
1042 const char *where, uint32_t mark); 1098 const char *where, uint32_t mark);
1043void i915_dump_lru(struct drm_device *dev, const char *where);
1044 1099
1045/* i915_debugfs.c */ 1100/* i915_debugfs.c */
1046int i915_debugfs_init(struct drm_minor *minor); 1101int i915_debugfs_init(struct drm_minor *minor);
@@ -1054,19 +1109,31 @@ extern int i915_restore_state(struct drm_device *dev);
1054extern int i915_save_state(struct drm_device *dev); 1109extern int i915_save_state(struct drm_device *dev);
1055extern int i915_restore_state(struct drm_device *dev); 1110extern int i915_restore_state(struct drm_device *dev);
1056 1111
1112/* intel_i2c.c */
1113extern int intel_setup_gmbus(struct drm_device *dev);
1114extern void intel_teardown_gmbus(struct drm_device *dev);
1115extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
1116extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
1117extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
1118{
1119 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
1120}
1121extern void intel_i2c_reset(struct drm_device *dev);
1122
1123/* intel_opregion.c */
1124extern int intel_opregion_setup(struct drm_device *dev);
1057#ifdef CONFIG_ACPI 1125#ifdef CONFIG_ACPI
1058/* i915_opregion.c */ 1126extern void intel_opregion_init(struct drm_device *dev);
1059extern int intel_opregion_init(struct drm_device *dev, int resume); 1127extern void intel_opregion_fini(struct drm_device *dev);
1060extern void intel_opregion_free(struct drm_device *dev, int suspend); 1128extern void intel_opregion_asle_intr(struct drm_device *dev);
1061extern void opregion_asle_intr(struct drm_device *dev); 1129extern void intel_opregion_gse_intr(struct drm_device *dev);
1062extern void ironlake_opregion_gse_intr(struct drm_device *dev); 1130extern void intel_opregion_enable_asle(struct drm_device *dev);
1063extern void opregion_enable_asle(struct drm_device *dev);
1064#else 1131#else
1065static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; } 1132static inline void intel_opregion_init(struct drm_device *dev) { return; }
1066static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; } 1133static inline void intel_opregion_fini(struct drm_device *dev) { return; }
1067static inline void opregion_asle_intr(struct drm_device *dev) { return; } 1134static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
1068static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; } 1135static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
1069static inline void opregion_enable_asle(struct drm_device *dev) { return; } 1136static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
1070#endif 1137#endif
1071 1138
1072/* modesetting */ 1139/* modesetting */
@@ -1084,8 +1151,10 @@ extern void intel_detect_pch (struct drm_device *dev);
1084extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); 1151extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1085 1152
1086/* overlay */ 1153/* overlay */
1154#ifdef CONFIG_DEBUG_FS
1087extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 1155extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
1088extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); 1156extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
1157#endif
1089 1158
1090/** 1159/**
1091 * Lock test for when it's just for synchronization of ring access. 1160 * Lock test for when it's just for synchronization of ring access.
@@ -1099,8 +1168,26 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
1099 LOCK_TEST_WITH_RETURN(dev, file_priv); \ 1168 LOCK_TEST_WITH_RETURN(dev, file_priv); \
1100} while (0) 1169} while (0)
1101 1170
1102#define I915_READ(reg) readl(dev_priv->regs + (reg)) 1171static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg)
1103#define I915_WRITE(reg, val) writel(val, dev_priv->regs + (reg)) 1172{
1173 u32 val;
1174
1175 val = readl(dev_priv->regs + reg);
1176 if (dev_priv->debug_flags & I915_DEBUG_READ)
1177 printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg);
1178 return val;
1179}
1180
1181static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
1182 u32 val)
1183{
1184 writel(val, dev_priv->regs + reg);
1185 if (dev_priv->debug_flags & I915_DEBUG_WRITE)
1186 printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg);
1187}
1188
1189#define I915_READ(reg) i915_read(dev_priv, (reg))
1190#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val))
1104#define I915_READ16(reg) readw(dev_priv->regs + (reg)) 1191#define I915_READ16(reg) readw(dev_priv->regs + (reg))
1105#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) 1192#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
1106#define I915_READ8(reg) readb(dev_priv->regs + (reg)) 1193#define I915_READ8(reg) readb(dev_priv->regs + (reg))
@@ -1110,6 +1197,11 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
1110#define POSTING_READ(reg) (void)I915_READ(reg) 1197#define POSTING_READ(reg) (void)I915_READ(reg)
1111#define POSTING_READ16(reg) (void)I915_READ16(reg) 1198#define POSTING_READ16(reg) (void)I915_READ16(reg)
1112 1199
1200#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
1201 I915_DEBUG_WRITE)
1202#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
1203 I915_DEBUG_WRITE))
1204
1113#define I915_VERBOSE 0 1205#define I915_VERBOSE 0
1114 1206
1115#define BEGIN_LP_RING(n) do { \ 1207#define BEGIN_LP_RING(n) do { \
@@ -1166,8 +1258,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
1166#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1258#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1167#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1259#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1168#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 1260#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1169#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
1170#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
1171#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 1261#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1172#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 1262#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1173#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) 1263#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
@@ -1179,7 +1269,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
1179#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 1269#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1180#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1270#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1181#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) 1271#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
1182#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
1183#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1272#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1184 1273
1185#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 1274#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
@@ -1188,26 +1277,27 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
1188#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 1277#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1189#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 1278#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1190 1279
1191#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) 1280#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1192#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1281#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1193 1282
1283#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1284#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1285
1194/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1286/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1195 * rows, which changed the alignment requirements and fence programming. 1287 * rows, which changed the alignment requirements and fence programming.
1196 */ 1288 */
1197#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ 1289#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1198 IS_I915GM(dev))) 1290 IS_I915GM(dev)))
1199#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_PINEVIEW(dev)) 1291#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1200#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1292#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1201#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1293#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1202#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) 1294#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1203#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ 1295#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1204 !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
1205 !IS_GEN6(dev))
1206#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 1296#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1207/* dsparb controlled by hw only */ 1297/* dsparb controlled by hw only */
1208#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1298#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1209 1299
1210#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) 1300#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1211#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1301#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1212#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1302#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1213#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) 1303#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cf4ffbee1c00..29e97c075421 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -37,7 +37,9 @@
37#include <linux/intel-gtt.h> 37#include <linux/intel-gtt.h>
38 38
39static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); 39static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
40static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 40
41static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
42 bool pipelined);
41static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 43static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
42static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 44static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
43static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, 45static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@ -46,7 +48,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
46 uint64_t offset, 48 uint64_t offset,
47 uint64_t size); 49 uint64_t size);
48static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); 50static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
49static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 51static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
52 bool interruptible);
50static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 53static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
51 unsigned alignment); 54 unsigned alignment);
52static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 55static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
@@ -55,9 +58,111 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
55 struct drm_file *file_priv); 58 struct drm_file *file_priv);
56static void i915_gem_free_object_tail(struct drm_gem_object *obj); 59static void i915_gem_free_object_tail(struct drm_gem_object *obj);
57 60
61static int
62i915_gem_object_get_pages(struct drm_gem_object *obj,
63 gfp_t gfpmask);
64
65static void
66i915_gem_object_put_pages(struct drm_gem_object *obj);
67
58static LIST_HEAD(shrink_list); 68static LIST_HEAD(shrink_list);
59static DEFINE_SPINLOCK(shrink_list_lock); 69static DEFINE_SPINLOCK(shrink_list_lock);
60 70
71/* some bookkeeping */
72static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
73 size_t size)
74{
75 dev_priv->mm.object_count++;
76 dev_priv->mm.object_memory += size;
77}
78
79static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
80 size_t size)
81{
82 dev_priv->mm.object_count--;
83 dev_priv->mm.object_memory -= size;
84}
85
86static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
87 size_t size)
88{
89 dev_priv->mm.gtt_count++;
90 dev_priv->mm.gtt_memory += size;
91}
92
93static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
94 size_t size)
95{
96 dev_priv->mm.gtt_count--;
97 dev_priv->mm.gtt_memory -= size;
98}
99
100static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
101 size_t size)
102{
103 dev_priv->mm.pin_count++;
104 dev_priv->mm.pin_memory += size;
105}
106
107static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
108 size_t size)
109{
110 dev_priv->mm.pin_count--;
111 dev_priv->mm.pin_memory -= size;
112}
113
114int
115i915_gem_check_is_wedged(struct drm_device *dev)
116{
117 struct drm_i915_private *dev_priv = dev->dev_private;
118 struct completion *x = &dev_priv->error_completion;
119 unsigned long flags;
120 int ret;
121
122 if (!atomic_read(&dev_priv->mm.wedged))
123 return 0;
124
125 ret = wait_for_completion_interruptible(x);
126 if (ret)
127 return ret;
128
129 /* Success, we reset the GPU! */
130 if (!atomic_read(&dev_priv->mm.wedged))
131 return 0;
132
133 /* GPU is hung, bump the completion count to account for
134 * the token we just consumed so that we never hit zero and
135 * end up waiting upon a subsequent completion event that
136 * will never happen.
137 */
138 spin_lock_irqsave(&x->wait.lock, flags);
139 x->done++;
140 spin_unlock_irqrestore(&x->wait.lock, flags);
141 return -EIO;
142}
143
144static int i915_mutex_lock_interruptible(struct drm_device *dev)
145{
146 struct drm_i915_private *dev_priv = dev->dev_private;
147 int ret;
148
149 ret = i915_gem_check_is_wedged(dev);
150 if (ret)
151 return ret;
152
153 ret = mutex_lock_interruptible(&dev->struct_mutex);
154 if (ret)
155 return ret;
156
157 if (atomic_read(&dev_priv->mm.wedged)) {
158 mutex_unlock(&dev->struct_mutex);
159 return -EAGAIN;
160 }
161
162 WARN_ON(i915_verify_lists(dev));
163 return 0;
164}
165
61static inline bool 166static inline bool
62i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) 167i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
63{ 168{
@@ -66,7 +171,8 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
66 obj_priv->pin_count == 0; 171 obj_priv->pin_count == 0;
67} 172}
68 173
69int i915_gem_do_init(struct drm_device *dev, unsigned long start, 174int i915_gem_do_init(struct drm_device *dev,
175 unsigned long start,
70 unsigned long end) 176 unsigned long end)
71{ 177{
72 drm_i915_private_t *dev_priv = dev->dev_private; 178 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -80,7 +186,7 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
80 drm_mm_init(&dev_priv->mm.gtt_space, start, 186 drm_mm_init(&dev_priv->mm.gtt_space, start,
81 end - start); 187 end - start);
82 188
83 dev->gtt_total = (uint32_t) (end - start); 189 dev_priv->mm.gtt_total = end - start;
84 190
85 return 0; 191 return 0;
86} 192}
@@ -103,14 +209,16 @@ int
103i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 209i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
104 struct drm_file *file_priv) 210 struct drm_file *file_priv)
105{ 211{
212 struct drm_i915_private *dev_priv = dev->dev_private;
106 struct drm_i915_gem_get_aperture *args = data; 213 struct drm_i915_gem_get_aperture *args = data;
107 214
108 if (!(dev->driver->driver_features & DRIVER_GEM)) 215 if (!(dev->driver->driver_features & DRIVER_GEM))
109 return -ENODEV; 216 return -ENODEV;
110 217
111 args->aper_size = dev->gtt_total; 218 mutex_lock(&dev->struct_mutex);
112 args->aper_available_size = (args->aper_size - 219 args->aper_size = dev_priv->mm.gtt_total;
113 atomic_read(&dev->pin_memory)); 220 args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
221 mutex_unlock(&dev->struct_mutex);
114 222
115 return 0; 223 return 0;
116} 224}
@@ -265,7 +373,9 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
265 user_data = (char __user *) (uintptr_t) args->data_ptr; 373 user_data = (char __user *) (uintptr_t) args->data_ptr;
266 remain = args->size; 374 remain = args->size;
267 375
268 mutex_lock(&dev->struct_mutex); 376 ret = i915_mutex_lock_interruptible(dev);
377 if (ret)
378 return ret;
269 379
270 ret = i915_gem_object_get_pages(obj, 0); 380 ret = i915_gem_object_get_pages(obj, 0);
271 if (ret != 0) 381 if (ret != 0)
@@ -384,7 +494,9 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
384 494
385 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 495 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
386 496
387 mutex_lock(&dev->struct_mutex); 497 ret = i915_mutex_lock_interruptible(dev);
498 if (ret)
499 goto fail_put_user_pages;
388 500
389 ret = i915_gem_object_get_pages_or_evict(obj); 501 ret = i915_gem_object_get_pages_or_evict(obj);
390 if (ret) 502 if (ret)
@@ -464,21 +576,27 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
464 struct drm_i915_gem_pread *args = data; 576 struct drm_i915_gem_pread *args = data;
465 struct drm_gem_object *obj; 577 struct drm_gem_object *obj;
466 struct drm_i915_gem_object *obj_priv; 578 struct drm_i915_gem_object *obj_priv;
467 int ret; 579 int ret = 0;
468 580
469 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 581 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
470 if (obj == NULL) 582 if (obj == NULL)
471 return -ENOENT; 583 return -ENOENT;
472 obj_priv = to_intel_bo(obj); 584 obj_priv = to_intel_bo(obj);
473 585
474 /* Bounds check source. 586 /* Bounds check source. */
475 * 587 if (args->offset > obj->size || args->size > obj->size - args->offset) {
476 * XXX: This could use review for overflow issues... 588 ret = -EINVAL;
477 */ 589 goto out;
478 if (args->offset > obj->size || args->size > obj->size || 590 }
479 args->offset + args->size > obj->size) { 591
480 drm_gem_object_unreference_unlocked(obj); 592 if (args->size == 0)
481 return -EINVAL; 593 goto out;
594
595 if (!access_ok(VERIFY_WRITE,
596 (char __user *)(uintptr_t)args->data_ptr,
597 args->size)) {
598 ret = -EFAULT;
599 goto out;
482 } 600 }
483 601
484 if (i915_gem_object_needs_bit17_swizzle(obj)) { 602 if (i915_gem_object_needs_bit17_swizzle(obj)) {
@@ -490,8 +608,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
490 file_priv); 608 file_priv);
491 } 609 }
492 610
611out:
493 drm_gem_object_unreference_unlocked(obj); 612 drm_gem_object_unreference_unlocked(obj);
494
495 return ret; 613 return ret;
496} 614}
497 615
@@ -580,11 +698,11 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
580 698
581 user_data = (char __user *) (uintptr_t) args->data_ptr; 699 user_data = (char __user *) (uintptr_t) args->data_ptr;
582 remain = args->size; 700 remain = args->size;
583 if (!access_ok(VERIFY_READ, user_data, remain))
584 return -EFAULT;
585 701
702 ret = i915_mutex_lock_interruptible(dev);
703 if (ret)
704 return ret;
586 705
587 mutex_lock(&dev->struct_mutex);
588 ret = i915_gem_object_pin(obj, 0); 706 ret = i915_gem_object_pin(obj, 0);
589 if (ret) { 707 if (ret) {
590 mutex_unlock(&dev->struct_mutex); 708 mutex_unlock(&dev->struct_mutex);
@@ -679,7 +797,10 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
679 goto out_unpin_pages; 797 goto out_unpin_pages;
680 } 798 }
681 799
682 mutex_lock(&dev->struct_mutex); 800 ret = i915_mutex_lock_interruptible(dev);
801 if (ret)
802 goto out_unpin_pages;
803
683 ret = i915_gem_object_pin(obj, 0); 804 ret = i915_gem_object_pin(obj, 0);
684 if (ret) 805 if (ret)
685 goto out_unlock; 806 goto out_unlock;
@@ -753,7 +874,9 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
753 user_data = (char __user *) (uintptr_t) args->data_ptr; 874 user_data = (char __user *) (uintptr_t) args->data_ptr;
754 remain = args->size; 875 remain = args->size;
755 876
756 mutex_lock(&dev->struct_mutex); 877 ret = i915_mutex_lock_interruptible(dev);
878 if (ret)
879 return ret;
757 880
758 ret = i915_gem_object_get_pages(obj, 0); 881 ret = i915_gem_object_get_pages(obj, 0);
759 if (ret != 0) 882 if (ret != 0)
@@ -849,7 +972,9 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
849 972
850 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 973 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
851 974
852 mutex_lock(&dev->struct_mutex); 975 ret = i915_mutex_lock_interruptible(dev);
976 if (ret)
977 goto fail_put_user_pages;
853 978
854 ret = i915_gem_object_get_pages_or_evict(obj); 979 ret = i915_gem_object_get_pages_or_evict(obj);
855 if (ret) 980 if (ret)
@@ -934,14 +1059,20 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
934 return -ENOENT; 1059 return -ENOENT;
935 obj_priv = to_intel_bo(obj); 1060 obj_priv = to_intel_bo(obj);
936 1061
937 /* Bounds check destination. 1062 /* Bounds check destination. */
938 * 1063 if (args->offset > obj->size || args->size > obj->size - args->offset) {
939 * XXX: This could use review for overflow issues... 1064 ret = -EINVAL;
940 */ 1065 goto out;
941 if (args->offset > obj->size || args->size > obj->size || 1066 }
942 args->offset + args->size > obj->size) { 1067
943 drm_gem_object_unreference_unlocked(obj); 1068 if (args->size == 0)
944 return -EINVAL; 1069 goto out;
1070
1071 if (!access_ok(VERIFY_READ,
1072 (char __user *)(uintptr_t)args->data_ptr,
1073 args->size)) {
1074 ret = -EFAULT;
1075 goto out;
945 } 1076 }
946 1077
947 /* We can only do the GTT pwrite on untiled buffers, as otherwise 1078 /* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -953,7 +1084,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
953 if (obj_priv->phys_obj) 1084 if (obj_priv->phys_obj)
954 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); 1085 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
955 else if (obj_priv->tiling_mode == I915_TILING_NONE && 1086 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
956 dev->gtt_total != 0 && 1087 obj_priv->gtt_space &&
957 obj->write_domain != I915_GEM_DOMAIN_CPU) { 1088 obj->write_domain != I915_GEM_DOMAIN_CPU) {
958 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); 1089 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
959 if (ret == -EFAULT) { 1090 if (ret == -EFAULT) {
@@ -975,8 +1106,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
975 DRM_INFO("pwrite failed %d\n", ret); 1106 DRM_INFO("pwrite failed %d\n", ret);
976#endif 1107#endif
977 1108
1109out:
978 drm_gem_object_unreference_unlocked(obj); 1110 drm_gem_object_unreference_unlocked(obj);
979
980 return ret; 1111 return ret;
981} 1112}
982 1113
@@ -1017,14 +1148,14 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1017 return -ENOENT; 1148 return -ENOENT;
1018 obj_priv = to_intel_bo(obj); 1149 obj_priv = to_intel_bo(obj);
1019 1150
1020 mutex_lock(&dev->struct_mutex); 1151 ret = i915_mutex_lock_interruptible(dev);
1152 if (ret) {
1153 drm_gem_object_unreference_unlocked(obj);
1154 return ret;
1155 }
1021 1156
1022 intel_mark_busy(dev, obj); 1157 intel_mark_busy(dev, obj);
1023 1158
1024#if WATCH_BUF
1025 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
1026 obj, obj->size, read_domains, write_domain);
1027#endif
1028 if (read_domains & I915_GEM_DOMAIN_GTT) { 1159 if (read_domains & I915_GEM_DOMAIN_GTT) {
1029 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); 1160 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1030 1161
@@ -1048,7 +1179,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1048 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 1179 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1049 } 1180 }
1050 1181
1051
1052 /* Maintain LRU order of "inactive" objects */ 1182 /* Maintain LRU order of "inactive" objects */
1053 if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) 1183 if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
1054 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1184 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
@@ -1067,27 +1197,23 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1067{ 1197{
1068 struct drm_i915_gem_sw_finish *args = data; 1198 struct drm_i915_gem_sw_finish *args = data;
1069 struct drm_gem_object *obj; 1199 struct drm_gem_object *obj;
1070 struct drm_i915_gem_object *obj_priv;
1071 int ret = 0; 1200 int ret = 0;
1072 1201
1073 if (!(dev->driver->driver_features & DRIVER_GEM)) 1202 if (!(dev->driver->driver_features & DRIVER_GEM))
1074 return -ENODEV; 1203 return -ENODEV;
1075 1204
1076 mutex_lock(&dev->struct_mutex);
1077 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 1205 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1078 if (obj == NULL) { 1206 if (obj == NULL)
1079 mutex_unlock(&dev->struct_mutex);
1080 return -ENOENT; 1207 return -ENOENT;
1081 }
1082 1208
1083#if WATCH_BUF 1209 ret = i915_mutex_lock_interruptible(dev);
1084 DRM_INFO("%s: sw_finish %d (%p %zd)\n", 1210 if (ret) {
1085 __func__, args->handle, obj, obj->size); 1211 drm_gem_object_unreference_unlocked(obj);
1086#endif 1212 return ret;
1087 obj_priv = to_intel_bo(obj); 1213 }
1088 1214
1089 /* Pinned buffers may be scanout, so flush the cache */ 1215 /* Pinned buffers may be scanout, so flush the cache */
1090 if (obj_priv->pin_count) 1216 if (to_intel_bo(obj)->pin_count)
1091 i915_gem_object_flush_cpu_write_domain(obj); 1217 i915_gem_object_flush_cpu_write_domain(obj);
1092 1218
1093 drm_gem_object_unreference(obj); 1219 drm_gem_object_unreference(obj);
@@ -1179,7 +1305,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1179 1305
1180 /* Need a new fence register? */ 1306 /* Need a new fence register? */
1181 if (obj_priv->tiling_mode != I915_TILING_NONE) { 1307 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1182 ret = i915_gem_object_get_fence_reg(obj); 1308 ret = i915_gem_object_get_fence_reg(obj, true);
1183 if (ret) 1309 if (ret)
1184 goto unlock; 1310 goto unlock;
1185 } 1311 }
@@ -1244,7 +1370,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1244 obj->size / PAGE_SIZE, 0, 0); 1370 obj->size / PAGE_SIZE, 0, 0);
1245 if (!list->file_offset_node) { 1371 if (!list->file_offset_node) {
1246 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); 1372 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1247 ret = -ENOMEM; 1373 ret = -ENOSPC;
1248 goto out_free_list; 1374 goto out_free_list;
1249 } 1375 }
1250 1376
@@ -1256,9 +1382,9 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1256 } 1382 }
1257 1383
1258 list->hash.key = list->file_offset_node->start; 1384 list->hash.key = list->file_offset_node->start;
1259 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { 1385 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
1386 if (ret) {
1260 DRM_ERROR("failed to add to map hash\n"); 1387 DRM_ERROR("failed to add to map hash\n");
1261 ret = -ENOMEM;
1262 goto out_free_mm; 1388 goto out_free_mm;
1263 } 1389 }
1264 1390
@@ -1343,14 +1469,14 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1343 * Minimum alignment is 4k (GTT page size), but might be greater 1469 * Minimum alignment is 4k (GTT page size), but might be greater
1344 * if a fence register is needed for the object. 1470 * if a fence register is needed for the object.
1345 */ 1471 */
1346 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE) 1472 if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
1347 return 4096; 1473 return 4096;
1348 1474
1349 /* 1475 /*
1350 * Previous chips need to be aligned to the size of the smallest 1476 * Previous chips need to be aligned to the size of the smallest
1351 * fence register that can contain the object. 1477 * fence register that can contain the object.
1352 */ 1478 */
1353 if (IS_I9XX(dev)) 1479 if (INTEL_INFO(dev)->gen == 3)
1354 start = 1024*1024; 1480 start = 1024*1024;
1355 else 1481 else
1356 start = 512*1024; 1482 start = 512*1024;
@@ -1392,7 +1518,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1392 if (obj == NULL) 1518 if (obj == NULL)
1393 return -ENOENT; 1519 return -ENOENT;
1394 1520
1395 mutex_lock(&dev->struct_mutex); 1521 ret = i915_mutex_lock_interruptible(dev);
1522 if (ret) {
1523 drm_gem_object_unreference_unlocked(obj);
1524 return ret;
1525 }
1396 1526
1397 obj_priv = to_intel_bo(obj); 1527 obj_priv = to_intel_bo(obj);
1398 1528
@@ -1434,7 +1564,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1434 return 0; 1564 return 0;
1435} 1565}
1436 1566
1437void 1567static void
1438i915_gem_object_put_pages(struct drm_gem_object *obj) 1568i915_gem_object_put_pages(struct drm_gem_object *obj)
1439{ 1569{
1440 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1570 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -1468,13 +1598,24 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1468 obj_priv->pages = NULL; 1598 obj_priv->pages = NULL;
1469} 1599}
1470 1600
1601static uint32_t
1602i915_gem_next_request_seqno(struct drm_device *dev,
1603 struct intel_ring_buffer *ring)
1604{
1605 drm_i915_private_t *dev_priv = dev->dev_private;
1606
1607 ring->outstanding_lazy_request = true;
1608 return dev_priv->next_seqno;
1609}
1610
1471static void 1611static void
1472i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno, 1612i915_gem_object_move_to_active(struct drm_gem_object *obj,
1473 struct intel_ring_buffer *ring) 1613 struct intel_ring_buffer *ring)
1474{ 1614{
1475 struct drm_device *dev = obj->dev; 1615 struct drm_device *dev = obj->dev;
1476 drm_i915_private_t *dev_priv = dev->dev_private;
1477 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1616 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1617 uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
1618
1478 BUG_ON(ring == NULL); 1619 BUG_ON(ring == NULL);
1479 obj_priv->ring = ring; 1620 obj_priv->ring = ring;
1480 1621
@@ -1483,10 +1624,9 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
1483 drm_gem_object_reference(obj); 1624 drm_gem_object_reference(obj);
1484 obj_priv->active = 1; 1625 obj_priv->active = 1;
1485 } 1626 }
1627
1486 /* Move from whatever list we were on to the tail of execution. */ 1628 /* Move from whatever list we were on to the tail of execution. */
1487 spin_lock(&dev_priv->mm.active_list_lock);
1488 list_move_tail(&obj_priv->list, &ring->active_list); 1629 list_move_tail(&obj_priv->list, &ring->active_list);
1489 spin_unlock(&dev_priv->mm.active_list_lock);
1490 obj_priv->last_rendering_seqno = seqno; 1630 obj_priv->last_rendering_seqno = seqno;
1491} 1631}
1492 1632
@@ -1536,9 +1676,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1536 drm_i915_private_t *dev_priv = dev->dev_private; 1676 drm_i915_private_t *dev_priv = dev->dev_private;
1537 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1677 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1538 1678
1539 i915_verify_inactive(dev, __FILE__, __LINE__);
1540 if (obj_priv->pin_count != 0) 1679 if (obj_priv->pin_count != 0)
1541 list_del_init(&obj_priv->list); 1680 list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list);
1542 else 1681 else
1543 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1682 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1544 1683
@@ -1550,12 +1689,12 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1550 obj_priv->active = 0; 1689 obj_priv->active = 0;
1551 drm_gem_object_unreference(obj); 1690 drm_gem_object_unreference(obj);
1552 } 1691 }
1553 i915_verify_inactive(dev, __FILE__, __LINE__); 1692 WARN_ON(i915_verify_lists(dev));
1554} 1693}
1555 1694
1556static void 1695static void
1557i915_gem_process_flushing_list(struct drm_device *dev, 1696i915_gem_process_flushing_list(struct drm_device *dev,
1558 uint32_t flush_domains, uint32_t seqno, 1697 uint32_t flush_domains,
1559 struct intel_ring_buffer *ring) 1698 struct intel_ring_buffer *ring)
1560{ 1699{
1561 drm_i915_private_t *dev_priv = dev->dev_private; 1700 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -1566,14 +1705,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1566 gpu_write_list) { 1705 gpu_write_list) {
1567 struct drm_gem_object *obj = &obj_priv->base; 1706 struct drm_gem_object *obj = &obj_priv->base;
1568 1707
1569 if ((obj->write_domain & flush_domains) == 1708 if (obj->write_domain & flush_domains &&
1570 obj->write_domain && 1709 obj_priv->ring == ring) {
1571 obj_priv->ring->ring_flag == ring->ring_flag) {
1572 uint32_t old_write_domain = obj->write_domain; 1710 uint32_t old_write_domain = obj->write_domain;
1573 1711
1574 obj->write_domain = 0; 1712 obj->write_domain = 0;
1575 list_del_init(&obj_priv->gpu_write_list); 1713 list_del_init(&obj_priv->gpu_write_list);
1576 i915_gem_object_move_to_active(obj, seqno, ring); 1714 i915_gem_object_move_to_active(obj, ring);
1577 1715
1578 /* update the fence lru list */ 1716 /* update the fence lru list */
1579 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { 1717 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -1591,23 +1729,27 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1591} 1729}
1592 1730
1593uint32_t 1731uint32_t
1594i915_add_request(struct drm_device *dev, struct drm_file *file_priv, 1732i915_add_request(struct drm_device *dev,
1595 uint32_t flush_domains, struct intel_ring_buffer *ring) 1733 struct drm_file *file,
1734 struct drm_i915_gem_request *request,
1735 struct intel_ring_buffer *ring)
1596{ 1736{
1597 drm_i915_private_t *dev_priv = dev->dev_private; 1737 drm_i915_private_t *dev_priv = dev->dev_private;
1598 struct drm_i915_file_private *i915_file_priv = NULL; 1738 struct drm_i915_file_private *file_priv = NULL;
1599 struct drm_i915_gem_request *request;
1600 uint32_t seqno; 1739 uint32_t seqno;
1601 int was_empty; 1740 int was_empty;
1602 1741
1603 if (file_priv != NULL) 1742 if (file != NULL)
1604 i915_file_priv = file_priv->driver_priv; 1743 file_priv = file->driver_priv;
1605 1744
1606 request = kzalloc(sizeof(*request), GFP_KERNEL); 1745 if (request == NULL) {
1607 if (request == NULL) 1746 request = kzalloc(sizeof(*request), GFP_KERNEL);
1608 return 0; 1747 if (request == NULL)
1748 return 0;
1749 }
1609 1750
1610 seqno = ring->add_request(dev, ring, file_priv, flush_domains); 1751 seqno = ring->add_request(dev, ring, 0);
1752 ring->outstanding_lazy_request = false;
1611 1753
1612 request->seqno = seqno; 1754 request->seqno = seqno;
1613 request->ring = ring; 1755 request->ring = ring;
@@ -1615,23 +1757,20 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1615 was_empty = list_empty(&ring->request_list); 1757 was_empty = list_empty(&ring->request_list);
1616 list_add_tail(&request->list, &ring->request_list); 1758 list_add_tail(&request->list, &ring->request_list);
1617 1759
1618 if (i915_file_priv) { 1760 if (file_priv) {
1761 spin_lock(&file_priv->mm.lock);
1762 request->file_priv = file_priv;
1619 list_add_tail(&request->client_list, 1763 list_add_tail(&request->client_list,
1620 &i915_file_priv->mm.request_list); 1764 &file_priv->mm.request_list);
1621 } else { 1765 spin_unlock(&file_priv->mm.lock);
1622 INIT_LIST_HEAD(&request->client_list);
1623 } 1766 }
1624 1767
1625 /* Associate any objects on the flushing list matching the write
1626 * domain we're flushing with our flush.
1627 */
1628 if (flush_domains != 0)
1629 i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
1630
1631 if (!dev_priv->mm.suspended) { 1768 if (!dev_priv->mm.suspended) {
1632 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 1769 mod_timer(&dev_priv->hangcheck_timer,
1770 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1633 if (was_empty) 1771 if (was_empty)
1634 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1772 queue_delayed_work(dev_priv->wq,
1773 &dev_priv->mm.retire_work, HZ);
1635 } 1774 }
1636 return seqno; 1775 return seqno;
1637} 1776}
@@ -1642,91 +1781,105 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1642 * Ensures that all commands in the ring are finished 1781 * Ensures that all commands in the ring are finished
1643 * before signalling the CPU 1782 * before signalling the CPU
1644 */ 1783 */
1645static uint32_t 1784static void
1646i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) 1785i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
1647{ 1786{
1648 uint32_t flush_domains = 0; 1787 uint32_t flush_domains = 0;
1649 1788
1650 /* The sampler always gets flushed on i965 (sigh) */ 1789 /* The sampler always gets flushed on i965 (sigh) */
1651 if (IS_I965G(dev)) 1790 if (INTEL_INFO(dev)->gen >= 4)
1652 flush_domains |= I915_GEM_DOMAIN_SAMPLER; 1791 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1653 1792
1654 ring->flush(dev, ring, 1793 ring->flush(dev, ring,
1655 I915_GEM_DOMAIN_COMMAND, flush_domains); 1794 I915_GEM_DOMAIN_COMMAND, flush_domains);
1656 return flush_domains;
1657} 1795}
1658 1796
1659/** 1797static inline void
1660 * Moves buffers associated only with the given active seqno from the active 1798i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1661 * to inactive list, potentially freeing them.
1662 */
1663static void
1664i915_gem_retire_request(struct drm_device *dev,
1665 struct drm_i915_gem_request *request)
1666{ 1799{
1667 drm_i915_private_t *dev_priv = dev->dev_private; 1800 struct drm_i915_file_private *file_priv = request->file_priv;
1668 1801
1669 trace_i915_gem_request_retire(dev, request->seqno); 1802 if (!file_priv)
1803 return;
1670 1804
1671 /* Move any buffers on the active list that are no longer referenced 1805 spin_lock(&file_priv->mm.lock);
1672 * by the ringbuffer to the flushing/inactive lists as appropriate. 1806 list_del(&request->client_list);
1673 */ 1807 request->file_priv = NULL;
1674 spin_lock(&dev_priv->mm.active_list_lock); 1808 spin_unlock(&file_priv->mm.lock);
1675 while (!list_empty(&request->ring->active_list)) { 1809}
1676 struct drm_gem_object *obj; 1810
1811static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1812 struct intel_ring_buffer *ring)
1813{
1814 while (!list_empty(&ring->request_list)) {
1815 struct drm_i915_gem_request *request;
1816
1817 request = list_first_entry(&ring->request_list,
1818 struct drm_i915_gem_request,
1819 list);
1820
1821 list_del(&request->list);
1822 i915_gem_request_remove_from_client(request);
1823 kfree(request);
1824 }
1825
1826 while (!list_empty(&ring->active_list)) {
1677 struct drm_i915_gem_object *obj_priv; 1827 struct drm_i915_gem_object *obj_priv;
1678 1828
1679 obj_priv = list_first_entry(&request->ring->active_list, 1829 obj_priv = list_first_entry(&ring->active_list,
1680 struct drm_i915_gem_object, 1830 struct drm_i915_gem_object,
1681 list); 1831 list);
1682 obj = &obj_priv->base;
1683
1684 /* If the seqno being retired doesn't match the oldest in the
1685 * list, then the oldest in the list must still be newer than
1686 * this seqno.
1687 */
1688 if (obj_priv->last_rendering_seqno != request->seqno)
1689 goto out;
1690 1832
1691#if WATCH_LRU 1833 obj_priv->base.write_domain = 0;
1692 DRM_INFO("%s: retire %d moves to inactive list %p\n", 1834 list_del_init(&obj_priv->gpu_write_list);
1693 __func__, request->seqno, obj); 1835 i915_gem_object_move_to_inactive(&obj_priv->base);
1694#endif
1695
1696 if (obj->write_domain != 0)
1697 i915_gem_object_move_to_flushing(obj);
1698 else {
1699 /* Take a reference on the object so it won't be
1700 * freed while the spinlock is held. The list
1701 * protection for this spinlock is safe when breaking
1702 * the lock like this since the next thing we do
1703 * is just get the head of the list again.
1704 */
1705 drm_gem_object_reference(obj);
1706 i915_gem_object_move_to_inactive(obj);
1707 spin_unlock(&dev_priv->mm.active_list_lock);
1708 drm_gem_object_unreference(obj);
1709 spin_lock(&dev_priv->mm.active_list_lock);
1710 }
1711 } 1836 }
1712out:
1713 spin_unlock(&dev_priv->mm.active_list_lock);
1714} 1837}
1715 1838
1716/** 1839void i915_gem_reset(struct drm_device *dev)
1717 * Returns true if seq1 is later than seq2.
1718 */
1719bool
1720i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1721{ 1840{
1722 return (int32_t)(seq1 - seq2) >= 0; 1841 struct drm_i915_private *dev_priv = dev->dev_private;
1723} 1842 struct drm_i915_gem_object *obj_priv;
1843 int i;
1724 1844
1725uint32_t 1845 i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
1726i915_get_gem_seqno(struct drm_device *dev, 1846 if (HAS_BSD(dev))
1727 struct intel_ring_buffer *ring) 1847 i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
1728{ 1848
1729 return ring->get_gem_seqno(dev, ring); 1849 /* Remove anything from the flushing lists. The GPU cache is likely
1850 * to be lost on reset along with the data, so simply move the
1851 * lost bo to the inactive list.
1852 */
1853 while (!list_empty(&dev_priv->mm.flushing_list)) {
1854 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1855 struct drm_i915_gem_object,
1856 list);
1857
1858 obj_priv->base.write_domain = 0;
1859 list_del_init(&obj_priv->gpu_write_list);
1860 i915_gem_object_move_to_inactive(&obj_priv->base);
1861 }
1862
1863 /* Move everything out of the GPU domains to ensure we do any
1864 * necessary invalidation upon reuse.
1865 */
1866 list_for_each_entry(obj_priv,
1867 &dev_priv->mm.inactive_list,
1868 list)
1869 {
1870 obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1871 }
1872
1873 /* The fence registers are invalidated so clear them out */
1874 for (i = 0; i < 16; i++) {
1875 struct drm_i915_fence_reg *reg;
1876
1877 reg = &dev_priv->fence_regs[i];
1878 if (!reg->obj)
1879 continue;
1880
1881 i915_gem_clear_fence_reg(reg->obj);
1882 }
1730} 1883}
1731 1884
1732/** 1885/**
@@ -1739,38 +1892,58 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1739 drm_i915_private_t *dev_priv = dev->dev_private; 1892 drm_i915_private_t *dev_priv = dev->dev_private;
1740 uint32_t seqno; 1893 uint32_t seqno;
1741 1894
1742 if (!ring->status_page.page_addr 1895 if (!ring->status_page.page_addr ||
1743 || list_empty(&ring->request_list)) 1896 list_empty(&ring->request_list))
1744 return; 1897 return;
1745 1898
1746 seqno = i915_get_gem_seqno(dev, ring); 1899 WARN_ON(i915_verify_lists(dev));
1747 1900
1901 seqno = ring->get_seqno(dev, ring);
1748 while (!list_empty(&ring->request_list)) { 1902 while (!list_empty(&ring->request_list)) {
1749 struct drm_i915_gem_request *request; 1903 struct drm_i915_gem_request *request;
1750 uint32_t retiring_seqno;
1751 1904
1752 request = list_first_entry(&ring->request_list, 1905 request = list_first_entry(&ring->request_list,
1753 struct drm_i915_gem_request, 1906 struct drm_i915_gem_request,
1754 list); 1907 list);
1755 retiring_seqno = request->seqno;
1756 1908
1757 if (i915_seqno_passed(seqno, retiring_seqno) || 1909 if (!i915_seqno_passed(seqno, request->seqno))
1758 atomic_read(&dev_priv->mm.wedged)) { 1910 break;
1759 i915_gem_retire_request(dev, request); 1911
1912 trace_i915_gem_request_retire(dev, request->seqno);
1913
1914 list_del(&request->list);
1915 i915_gem_request_remove_from_client(request);
1916 kfree(request);
1917 }
1918
1919 /* Move any buffers on the active list that are no longer referenced
1920 * by the ringbuffer to the flushing/inactive lists as appropriate.
1921 */
1922 while (!list_empty(&ring->active_list)) {
1923 struct drm_gem_object *obj;
1924 struct drm_i915_gem_object *obj_priv;
1925
1926 obj_priv = list_first_entry(&ring->active_list,
1927 struct drm_i915_gem_object,
1928 list);
1760 1929
1761 list_del(&request->list); 1930 if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
1762 list_del(&request->client_list);
1763 kfree(request);
1764 } else
1765 break; 1931 break;
1932
1933 obj = &obj_priv->base;
1934 if (obj->write_domain != 0)
1935 i915_gem_object_move_to_flushing(obj);
1936 else
1937 i915_gem_object_move_to_inactive(obj);
1766 } 1938 }
1767 1939
1768 if (unlikely (dev_priv->trace_irq_seqno && 1940 if (unlikely (dev_priv->trace_irq_seqno &&
1769 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { 1941 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1770
1771 ring->user_irq_put(dev, ring); 1942 ring->user_irq_put(dev, ring);
1772 dev_priv->trace_irq_seqno = 0; 1943 dev_priv->trace_irq_seqno = 0;
1773 } 1944 }
1945
1946 WARN_ON(i915_verify_lists(dev));
1774} 1947}
1775 1948
1776void 1949void
@@ -1797,7 +1970,7 @@ i915_gem_retire_requests(struct drm_device *dev)
1797 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); 1970 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
1798} 1971}
1799 1972
1800void 1973static void
1801i915_gem_retire_work_handler(struct work_struct *work) 1974i915_gem_retire_work_handler(struct work_struct *work)
1802{ 1975{
1803 drm_i915_private_t *dev_priv; 1976 drm_i915_private_t *dev_priv;
@@ -1807,7 +1980,12 @@ i915_gem_retire_work_handler(struct work_struct *work)
1807 mm.retire_work.work); 1980 mm.retire_work.work);
1808 dev = dev_priv->dev; 1981 dev = dev_priv->dev;
1809 1982
1810 mutex_lock(&dev->struct_mutex); 1983 /* Come back later if the device is busy... */
1984 if (!mutex_trylock(&dev->struct_mutex)) {
1985 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1986 return;
1987 }
1988
1811 i915_gem_retire_requests(dev); 1989 i915_gem_retire_requests(dev);
1812 1990
1813 if (!dev_priv->mm.suspended && 1991 if (!dev_priv->mm.suspended &&
@@ -1820,7 +1998,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
1820 1998
1821int 1999int
1822i915_do_wait_request(struct drm_device *dev, uint32_t seqno, 2000i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1823 int interruptible, struct intel_ring_buffer *ring) 2001 bool interruptible, struct intel_ring_buffer *ring)
1824{ 2002{
1825 drm_i915_private_t *dev_priv = dev->dev_private; 2003 drm_i915_private_t *dev_priv = dev->dev_private;
1826 u32 ier; 2004 u32 ier;
@@ -1829,9 +2007,16 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1829 BUG_ON(seqno == 0); 2007 BUG_ON(seqno == 0);
1830 2008
1831 if (atomic_read(&dev_priv->mm.wedged)) 2009 if (atomic_read(&dev_priv->mm.wedged))
1832 return -EIO; 2010 return -EAGAIN;
2011
2012 if (ring->outstanding_lazy_request) {
2013 seqno = i915_add_request(dev, NULL, NULL, ring);
2014 if (seqno == 0)
2015 return -ENOMEM;
2016 }
2017 BUG_ON(seqno == dev_priv->next_seqno);
1833 2018
1834 if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) { 2019 if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
1835 if (HAS_PCH_SPLIT(dev)) 2020 if (HAS_PCH_SPLIT(dev))
1836 ier = I915_READ(DEIER) | I915_READ(GTIER); 2021 ier = I915_READ(DEIER) | I915_READ(GTIER);
1837 else 2022 else
@@ -1850,12 +2035,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1850 if (interruptible) 2035 if (interruptible)
1851 ret = wait_event_interruptible(ring->irq_queue, 2036 ret = wait_event_interruptible(ring->irq_queue,
1852 i915_seqno_passed( 2037 i915_seqno_passed(
1853 ring->get_gem_seqno(dev, ring), seqno) 2038 ring->get_seqno(dev, ring), seqno)
1854 || atomic_read(&dev_priv->mm.wedged)); 2039 || atomic_read(&dev_priv->mm.wedged));
1855 else 2040 else
1856 wait_event(ring->irq_queue, 2041 wait_event(ring->irq_queue,
1857 i915_seqno_passed( 2042 i915_seqno_passed(
1858 ring->get_gem_seqno(dev, ring), seqno) 2043 ring->get_seqno(dev, ring), seqno)
1859 || atomic_read(&dev_priv->mm.wedged)); 2044 || atomic_read(&dev_priv->mm.wedged));
1860 2045
1861 ring->user_irq_put(dev, ring); 2046 ring->user_irq_put(dev, ring);
@@ -1864,11 +2049,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1864 trace_i915_gem_request_wait_end(dev, seqno); 2049 trace_i915_gem_request_wait_end(dev, seqno);
1865 } 2050 }
1866 if (atomic_read(&dev_priv->mm.wedged)) 2051 if (atomic_read(&dev_priv->mm.wedged))
1867 ret = -EIO; 2052 ret = -EAGAIN;
1868 2053
1869 if (ret && ret != -ERESTARTSYS) 2054 if (ret && ret != -ERESTARTSYS)
1870 DRM_ERROR("%s returns %d (awaiting %d at %d)\n", 2055 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
1871 __func__, ret, seqno, ring->get_gem_seqno(dev, ring)); 2056 __func__, ret, seqno, ring->get_seqno(dev, ring),
2057 dev_priv->next_seqno);
1872 2058
1873 /* Directly dispatch request retiring. While we have the work queue 2059 /* Directly dispatch request retiring. While we have the work queue
1874 * to handle this, the waiter on a request often wants an associated 2060 * to handle this, the waiter on a request often wants an associated
@@ -1887,27 +2073,44 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1887 */ 2073 */
1888static int 2074static int
1889i915_wait_request(struct drm_device *dev, uint32_t seqno, 2075i915_wait_request(struct drm_device *dev, uint32_t seqno,
1890 struct intel_ring_buffer *ring) 2076 struct intel_ring_buffer *ring)
1891{ 2077{
1892 return i915_do_wait_request(dev, seqno, 1, ring); 2078 return i915_do_wait_request(dev, seqno, 1, ring);
1893} 2079}
1894 2080
1895static void 2081static void
2082i915_gem_flush_ring(struct drm_device *dev,
2083 struct drm_file *file_priv,
2084 struct intel_ring_buffer *ring,
2085 uint32_t invalidate_domains,
2086 uint32_t flush_domains)
2087{
2088 ring->flush(dev, ring, invalidate_domains, flush_domains);
2089 i915_gem_process_flushing_list(dev, flush_domains, ring);
2090}
2091
2092static void
1896i915_gem_flush(struct drm_device *dev, 2093i915_gem_flush(struct drm_device *dev,
2094 struct drm_file *file_priv,
1897 uint32_t invalidate_domains, 2095 uint32_t invalidate_domains,
1898 uint32_t flush_domains) 2096 uint32_t flush_domains,
2097 uint32_t flush_rings)
1899{ 2098{
1900 drm_i915_private_t *dev_priv = dev->dev_private; 2099 drm_i915_private_t *dev_priv = dev->dev_private;
2100
1901 if (flush_domains & I915_GEM_DOMAIN_CPU) 2101 if (flush_domains & I915_GEM_DOMAIN_CPU)
1902 drm_agp_chipset_flush(dev); 2102 drm_agp_chipset_flush(dev);
1903 dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
1904 invalidate_domains,
1905 flush_domains);
1906 2103
1907 if (HAS_BSD(dev)) 2104 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
1908 dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring, 2105 if (flush_rings & RING_RENDER)
1909 invalidate_domains, 2106 i915_gem_flush_ring(dev, file_priv,
1910 flush_domains); 2107 &dev_priv->render_ring,
2108 invalidate_domains, flush_domains);
2109 if (flush_rings & RING_BSD)
2110 i915_gem_flush_ring(dev, file_priv,
2111 &dev_priv->bsd_ring,
2112 invalidate_domains, flush_domains);
2113 }
1911} 2114}
1912 2115
1913/** 2116/**
@@ -1915,7 +2118,8 @@ i915_gem_flush(struct drm_device *dev,
1915 * safe to unbind from the GTT or access from the CPU. 2118 * safe to unbind from the GTT or access from the CPU.
1916 */ 2119 */
1917static int 2120static int
1918i915_gem_object_wait_rendering(struct drm_gem_object *obj) 2121i915_gem_object_wait_rendering(struct drm_gem_object *obj,
2122 bool interruptible)
1919{ 2123{
1920 struct drm_device *dev = obj->dev; 2124 struct drm_device *dev = obj->dev;
1921 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2125 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -1930,13 +2134,11 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1930 * it. 2134 * it.
1931 */ 2135 */
1932 if (obj_priv->active) { 2136 if (obj_priv->active) {
1933#if WATCH_BUF 2137 ret = i915_do_wait_request(dev,
1934 DRM_INFO("%s: object %p wait for seqno %08x\n", 2138 obj_priv->last_rendering_seqno,
1935 __func__, obj, obj_priv->last_rendering_seqno); 2139 interruptible,
1936#endif 2140 obj_priv->ring);
1937 ret = i915_wait_request(dev, 2141 if (ret)
1938 obj_priv->last_rendering_seqno, obj_priv->ring);
1939 if (ret != 0)
1940 return ret; 2142 return ret;
1941 } 2143 }
1942 2144
@@ -1950,14 +2152,10 @@ int
1950i915_gem_object_unbind(struct drm_gem_object *obj) 2152i915_gem_object_unbind(struct drm_gem_object *obj)
1951{ 2153{
1952 struct drm_device *dev = obj->dev; 2154 struct drm_device *dev = obj->dev;
1953 drm_i915_private_t *dev_priv = dev->dev_private; 2155 struct drm_i915_private *dev_priv = dev->dev_private;
1954 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2156 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1955 int ret = 0; 2157 int ret = 0;
1956 2158
1957#if WATCH_BUF
1958 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1959 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1960#endif
1961 if (obj_priv->gtt_space == NULL) 2159 if (obj_priv->gtt_space == NULL)
1962 return 0; 2160 return 0;
1963 2161
@@ -1982,33 +2180,26 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1982 * should be safe and we need to cleanup or else we might 2180 * should be safe and we need to cleanup or else we might
1983 * cause memory corruption through use-after-free. 2181 * cause memory corruption through use-after-free.
1984 */ 2182 */
2183 if (ret) {
2184 i915_gem_clflush_object(obj);
2185 obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
2186 }
1985 2187
1986 /* release the fence reg _after_ flushing */ 2188 /* release the fence reg _after_ flushing */
1987 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 2189 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1988 i915_gem_clear_fence_reg(obj); 2190 i915_gem_clear_fence_reg(obj);
1989 2191
1990 if (obj_priv->agp_mem != NULL) { 2192 drm_unbind_agp(obj_priv->agp_mem);
1991 drm_unbind_agp(obj_priv->agp_mem); 2193 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1992 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1993 obj_priv->agp_mem = NULL;
1994 }
1995 2194
1996 i915_gem_object_put_pages(obj); 2195 i915_gem_object_put_pages(obj);
1997 BUG_ON(obj_priv->pages_refcount); 2196 BUG_ON(obj_priv->pages_refcount);
1998 2197
1999 if (obj_priv->gtt_space) { 2198 i915_gem_info_remove_gtt(dev_priv, obj->size);
2000 atomic_dec(&dev->gtt_count); 2199 list_del_init(&obj_priv->list);
2001 atomic_sub(obj->size, &dev->gtt_memory);
2002 2200
2003 drm_mm_put_block(obj_priv->gtt_space); 2201 drm_mm_put_block(obj_priv->gtt_space);
2004 obj_priv->gtt_space = NULL; 2202 obj_priv->gtt_space = NULL;
2005 }
2006
2007 /* Remove ourselves from the LRU list if present. */
2008 spin_lock(&dev_priv->mm.active_list_lock);
2009 if (!list_empty(&obj_priv->list))
2010 list_del_init(&obj_priv->list);
2011 spin_unlock(&dev_priv->mm.active_list_lock);
2012 2203
2013 if (i915_gem_object_is_purgeable(obj_priv)) 2204 if (i915_gem_object_is_purgeable(obj_priv))
2014 i915_gem_object_truncate(obj); 2205 i915_gem_object_truncate(obj);
@@ -2018,48 +2209,45 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2018 return ret; 2209 return ret;
2019} 2210}
2020 2211
2212static int i915_ring_idle(struct drm_device *dev,
2213 struct intel_ring_buffer *ring)
2214{
2215 i915_gem_flush_ring(dev, NULL, ring,
2216 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2217 return i915_wait_request(dev,
2218 i915_gem_next_request_seqno(dev, ring),
2219 ring);
2220}
2221
2021int 2222int
2022i915_gpu_idle(struct drm_device *dev) 2223i915_gpu_idle(struct drm_device *dev)
2023{ 2224{
2024 drm_i915_private_t *dev_priv = dev->dev_private; 2225 drm_i915_private_t *dev_priv = dev->dev_private;
2025 bool lists_empty; 2226 bool lists_empty;
2026 uint32_t seqno1, seqno2;
2027 int ret; 2227 int ret;
2028 2228
2029 spin_lock(&dev_priv->mm.active_list_lock);
2030 lists_empty = (list_empty(&dev_priv->mm.flushing_list) && 2229 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2031 list_empty(&dev_priv->render_ring.active_list) && 2230 list_empty(&dev_priv->render_ring.active_list) &&
2032 (!HAS_BSD(dev) || 2231 (!HAS_BSD(dev) ||
2033 list_empty(&dev_priv->bsd_ring.active_list))); 2232 list_empty(&dev_priv->bsd_ring.active_list)));
2034 spin_unlock(&dev_priv->mm.active_list_lock);
2035
2036 if (lists_empty) 2233 if (lists_empty)
2037 return 0; 2234 return 0;
2038 2235
2039 /* Flush everything onto the inactive list. */ 2236 /* Flush everything onto the inactive list. */
2040 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2237 ret = i915_ring_idle(dev, &dev_priv->render_ring);
2041 seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, 2238 if (ret)
2042 &dev_priv->render_ring); 2239 return ret;
2043 if (seqno1 == 0)
2044 return -ENOMEM;
2045 ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
2046 2240
2047 if (HAS_BSD(dev)) { 2241 if (HAS_BSD(dev)) {
2048 seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, 2242 ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
2049 &dev_priv->bsd_ring);
2050 if (seqno2 == 0)
2051 return -ENOMEM;
2052
2053 ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
2054 if (ret) 2243 if (ret)
2055 return ret; 2244 return ret;
2056 } 2245 }
2057 2246
2058 2247 return 0;
2059 return ret;
2060} 2248}
2061 2249
2062int 2250static int
2063i915_gem_object_get_pages(struct drm_gem_object *obj, 2251i915_gem_object_get_pages(struct drm_gem_object *obj,
2064 gfp_t gfpmask) 2252 gfp_t gfpmask)
2065{ 2253{
@@ -2239,7 +2427,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2239 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); 2427 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2240} 2428}
2241 2429
2242static int i915_find_fence_reg(struct drm_device *dev) 2430static int i915_find_fence_reg(struct drm_device *dev,
2431 bool interruptible)
2243{ 2432{
2244 struct drm_i915_fence_reg *reg = NULL; 2433 struct drm_i915_fence_reg *reg = NULL;
2245 struct drm_i915_gem_object *obj_priv = NULL; 2434 struct drm_i915_gem_object *obj_priv = NULL;
@@ -2284,7 +2473,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
2284 * private reference to obj like the other callers of put_fence_reg 2473 * private reference to obj like the other callers of put_fence_reg
2285 * (set_tiling ioctl) do. */ 2474 * (set_tiling ioctl) do. */
2286 drm_gem_object_reference(obj); 2475 drm_gem_object_reference(obj);
2287 ret = i915_gem_object_put_fence_reg(obj); 2476 ret = i915_gem_object_put_fence_reg(obj, interruptible);
2288 drm_gem_object_unreference(obj); 2477 drm_gem_object_unreference(obj);
2289 if (ret != 0) 2478 if (ret != 0)
2290 return ret; 2479 return ret;
@@ -2306,7 +2495,8 @@ static int i915_find_fence_reg(struct drm_device *dev)
2306 * and tiling format. 2495 * and tiling format.
2307 */ 2496 */
2308int 2497int
2309i915_gem_object_get_fence_reg(struct drm_gem_object *obj) 2498i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
2499 bool interruptible)
2310{ 2500{
2311 struct drm_device *dev = obj->dev; 2501 struct drm_device *dev = obj->dev;
2312 struct drm_i915_private *dev_priv = dev->dev_private; 2502 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2341,7 +2531,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2341 break; 2531 break;
2342 } 2532 }
2343 2533
2344 ret = i915_find_fence_reg(dev); 2534 ret = i915_find_fence_reg(dev, interruptible);
2345 if (ret < 0) 2535 if (ret < 0)
2346 return ret; 2536 return ret;
2347 2537
@@ -2400,7 +2590,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2400 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); 2590 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2401 break; 2591 break;
2402 case 3: 2592 case 3:
2403 if (obj_priv->fence_reg > 8) 2593 if (obj_priv->fence_reg >= 8)
2404 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; 2594 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
2405 else 2595 else
2406 case 2: 2596 case 2:
@@ -2419,15 +2609,19 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2419 * i915_gem_object_put_fence_reg - waits on outstanding fenced access 2609 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2420 * to the buffer to finish, and then resets the fence register. 2610 * to the buffer to finish, and then resets the fence register.
2421 * @obj: tiled object holding a fence register. 2611 * @obj: tiled object holding a fence register.
2612 * @bool: whether the wait upon the fence is interruptible
2422 * 2613 *
2423 * Zeroes out the fence register itself and clears out the associated 2614 * Zeroes out the fence register itself and clears out the associated
2424 * data structures in dev_priv and obj_priv. 2615 * data structures in dev_priv and obj_priv.
2425 */ 2616 */
2426int 2617int
2427i915_gem_object_put_fence_reg(struct drm_gem_object *obj) 2618i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
2619 bool interruptible)
2428{ 2620{
2429 struct drm_device *dev = obj->dev; 2621 struct drm_device *dev = obj->dev;
2622 struct drm_i915_private *dev_priv = dev->dev_private;
2430 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2623 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2624 struct drm_i915_fence_reg *reg;
2431 2625
2432 if (obj_priv->fence_reg == I915_FENCE_REG_NONE) 2626 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2433 return 0; 2627 return 0;
@@ -2442,20 +2636,23 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2442 * therefore we must wait for any outstanding access to complete 2636 * therefore we must wait for any outstanding access to complete
2443 * before clearing the fence. 2637 * before clearing the fence.
2444 */ 2638 */
2445 if (!IS_I965G(dev)) { 2639 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2640 if (reg->gpu) {
2446 int ret; 2641 int ret;
2447 2642
2448 ret = i915_gem_object_flush_gpu_write_domain(obj); 2643 ret = i915_gem_object_flush_gpu_write_domain(obj, true);
2449 if (ret != 0) 2644 if (ret)
2450 return ret; 2645 return ret;
2451 2646
2452 ret = i915_gem_object_wait_rendering(obj); 2647 ret = i915_gem_object_wait_rendering(obj, interruptible);
2453 if (ret != 0) 2648 if (ret)
2454 return ret; 2649 return ret;
2650
2651 reg->gpu = false;
2455 } 2652 }
2456 2653
2457 i915_gem_object_flush_gtt_write_domain(obj); 2654 i915_gem_object_flush_gtt_write_domain(obj);
2458 i915_gem_clear_fence_reg (obj); 2655 i915_gem_clear_fence_reg(obj);
2459 2656
2460 return 0; 2657 return 0;
2461} 2658}
@@ -2488,7 +2685,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2488 /* If the object is bigger than the entire aperture, reject it early 2685 /* If the object is bigger than the entire aperture, reject it early
2489 * before evicting everything in a vain attempt to find space. 2686 * before evicting everything in a vain attempt to find space.
2490 */ 2687 */
2491 if (obj->size > dev->gtt_total) { 2688 if (obj->size > dev_priv->mm.gtt_total) {
2492 DRM_ERROR("Attempting to bind an object larger than the aperture\n"); 2689 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2493 return -E2BIG; 2690 return -E2BIG;
2494 } 2691 }
@@ -2506,9 +2703,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2506 /* If the gtt is empty and we're still having trouble 2703 /* If the gtt is empty and we're still having trouble
2507 * fitting our object in, we're out of memory. 2704 * fitting our object in, we're out of memory.
2508 */ 2705 */
2509#if WATCH_LRU
2510 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2511#endif
2512 ret = i915_gem_evict_something(dev, obj->size, alignment); 2706 ret = i915_gem_evict_something(dev, obj->size, alignment);
2513 if (ret) 2707 if (ret)
2514 return ret; 2708 return ret;
@@ -2516,10 +2710,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2516 goto search_free; 2710 goto search_free;
2517 } 2711 }
2518 2712
2519#if WATCH_BUF
2520 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2521 obj->size, obj_priv->gtt_offset);
2522#endif
2523 ret = i915_gem_object_get_pages(obj, gfpmask); 2713 ret = i915_gem_object_get_pages(obj, gfpmask);
2524 if (ret) { 2714 if (ret) {
2525 drm_mm_put_block(obj_priv->gtt_space); 2715 drm_mm_put_block(obj_priv->gtt_space);
@@ -2564,11 +2754,10 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2564 2754
2565 goto search_free; 2755 goto search_free;
2566 } 2756 }
2567 atomic_inc(&dev->gtt_count);
2568 atomic_add(obj->size, &dev->gtt_memory);
2569 2757
2570 /* keep track of bounds object by adding it to the inactive list */ 2758 /* keep track of bounds object by adding it to the inactive list */
2571 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 2759 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
2760 i915_gem_info_add_gtt(dev_priv, obj->size);
2572 2761
2573 /* Assert that the object is not currently in any GPU domain. As it 2762 /* Assert that the object is not currently in any GPU domain. As it
2574 * wasn't in the GTT, there shouldn't be any way it could have been in 2763 * wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2601,25 +2790,30 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
2601 2790
2602/** Flushes any GPU write domain for the object if it's dirty. */ 2791/** Flushes any GPU write domain for the object if it's dirty. */
2603static int 2792static int
2604i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) 2793i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
2794 bool pipelined)
2605{ 2795{
2606 struct drm_device *dev = obj->dev; 2796 struct drm_device *dev = obj->dev;
2607 uint32_t old_write_domain; 2797 uint32_t old_write_domain;
2608 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2609 2798
2610 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2799 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2611 return 0; 2800 return 0;
2612 2801
2613 /* Queue the GPU write cache flushing we need. */ 2802 /* Queue the GPU write cache flushing we need. */
2614 old_write_domain = obj->write_domain; 2803 old_write_domain = obj->write_domain;
2615 i915_gem_flush(dev, 0, obj->write_domain); 2804 i915_gem_flush_ring(dev, NULL,
2616 if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0) 2805 to_intel_bo(obj)->ring,
2617 return -ENOMEM; 2806 0, obj->write_domain);
2807 BUG_ON(obj->write_domain);
2618 2808
2619 trace_i915_gem_object_change_domain(obj, 2809 trace_i915_gem_object_change_domain(obj,
2620 obj->read_domains, 2810 obj->read_domains,
2621 old_write_domain); 2811 old_write_domain);
2622 return 0; 2812
2813 if (pipelined)
2814 return 0;
2815
2816 return i915_gem_object_wait_rendering(obj, true);
2623} 2817}
2624 2818
2625/** Flushes the GTT write domain for the object if it's dirty. */ 2819/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2663,26 +2857,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2663 old_write_domain); 2857 old_write_domain);
2664} 2858}
2665 2859
2666int
2667i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2668{
2669 int ret = 0;
2670
2671 switch (obj->write_domain) {
2672 case I915_GEM_DOMAIN_GTT:
2673 i915_gem_object_flush_gtt_write_domain(obj);
2674 break;
2675 case I915_GEM_DOMAIN_CPU:
2676 i915_gem_object_flush_cpu_write_domain(obj);
2677 break;
2678 default:
2679 ret = i915_gem_object_flush_gpu_write_domain(obj);
2680 break;
2681 }
2682
2683 return ret;
2684}
2685
2686/** 2860/**
2687 * Moves a single object to the GTT read, and possibly write domain. 2861 * Moves a single object to the GTT read, and possibly write domain.
2688 * 2862 *
@@ -2700,32 +2874,28 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2700 if (obj_priv->gtt_space == NULL) 2874 if (obj_priv->gtt_space == NULL)
2701 return -EINVAL; 2875 return -EINVAL;
2702 2876
2703 ret = i915_gem_object_flush_gpu_write_domain(obj); 2877 ret = i915_gem_object_flush_gpu_write_domain(obj, false);
2704 if (ret != 0) 2878 if (ret != 0)
2705 return ret; 2879 return ret;
2706 2880
2707 /* Wait on any GPU rendering and flushing to occur. */ 2881 i915_gem_object_flush_cpu_write_domain(obj);
2708 ret = i915_gem_object_wait_rendering(obj); 2882
2709 if (ret != 0) 2883 if (write) {
2710 return ret; 2884 ret = i915_gem_object_wait_rendering(obj, true);
2885 if (ret)
2886 return ret;
2887 }
2711 2888
2712 old_write_domain = obj->write_domain; 2889 old_write_domain = obj->write_domain;
2713 old_read_domains = obj->read_domains; 2890 old_read_domains = obj->read_domains;
2714 2891
2715 /* If we're writing through the GTT domain, then CPU and GPU caches
2716 * will need to be invalidated at next use.
2717 */
2718 if (write)
2719 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2720
2721 i915_gem_object_flush_cpu_write_domain(obj);
2722
2723 /* It should now be out of any other write domains, and we can update 2892 /* It should now be out of any other write domains, and we can update
2724 * the domain values for our changes. 2893 * the domain values for our changes.
2725 */ 2894 */
2726 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 2895 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2727 obj->read_domains |= I915_GEM_DOMAIN_GTT; 2896 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2728 if (write) { 2897 if (write) {
2898 obj->read_domains = I915_GEM_DOMAIN_GTT;
2729 obj->write_domain = I915_GEM_DOMAIN_GTT; 2899 obj->write_domain = I915_GEM_DOMAIN_GTT;
2730 obj_priv->dirty = 1; 2900 obj_priv->dirty = 1;
2731 } 2901 }
@@ -2742,51 +2912,36 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2742 * wait, as in modesetting process we're not supposed to be interrupted. 2912 * wait, as in modesetting process we're not supposed to be interrupted.
2743 */ 2913 */
2744int 2914int
2745i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) 2915i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
2916 bool pipelined)
2746{ 2917{
2747 struct drm_device *dev = obj->dev;
2748 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2918 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2749 uint32_t old_write_domain, old_read_domains; 2919 uint32_t old_read_domains;
2750 int ret; 2920 int ret;
2751 2921
2752 /* Not valid to be called on unbound objects. */ 2922 /* Not valid to be called on unbound objects. */
2753 if (obj_priv->gtt_space == NULL) 2923 if (obj_priv->gtt_space == NULL)
2754 return -EINVAL; 2924 return -EINVAL;
2755 2925
2756 ret = i915_gem_object_flush_gpu_write_domain(obj); 2926 ret = i915_gem_object_flush_gpu_write_domain(obj, true);
2757 if (ret) 2927 if (ret)
2758 return ret; 2928 return ret;
2759 2929
2760 /* Wait on any GPU rendering and flushing to occur. */ 2930 /* Currently, we are always called from an non-interruptible context. */
2761 if (obj_priv->active) { 2931 if (!pipelined) {
2762#if WATCH_BUF 2932 ret = i915_gem_object_wait_rendering(obj, false);
2763 DRM_INFO("%s: object %p wait for seqno %08x\n", 2933 if (ret)
2764 __func__, obj, obj_priv->last_rendering_seqno);
2765#endif
2766 ret = i915_do_wait_request(dev,
2767 obj_priv->last_rendering_seqno,
2768 0,
2769 obj_priv->ring);
2770 if (ret != 0)
2771 return ret; 2934 return ret;
2772 } 2935 }
2773 2936
2774 i915_gem_object_flush_cpu_write_domain(obj); 2937 i915_gem_object_flush_cpu_write_domain(obj);
2775 2938
2776 old_write_domain = obj->write_domain;
2777 old_read_domains = obj->read_domains; 2939 old_read_domains = obj->read_domains;
2778 2940 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2779 /* It should now be out of any other write domains, and we can update
2780 * the domain values for our changes.
2781 */
2782 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2783 obj->read_domains = I915_GEM_DOMAIN_GTT;
2784 obj->write_domain = I915_GEM_DOMAIN_GTT;
2785 obj_priv->dirty = 1;
2786 2941
2787 trace_i915_gem_object_change_domain(obj, 2942 trace_i915_gem_object_change_domain(obj,
2788 old_read_domains, 2943 old_read_domains,
2789 old_write_domain); 2944 obj->write_domain);
2790 2945
2791 return 0; 2946 return 0;
2792} 2947}
@@ -2803,12 +2958,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2803 uint32_t old_write_domain, old_read_domains; 2958 uint32_t old_write_domain, old_read_domains;
2804 int ret; 2959 int ret;
2805 2960
2806 ret = i915_gem_object_flush_gpu_write_domain(obj); 2961 ret = i915_gem_object_flush_gpu_write_domain(obj, false);
2807 if (ret)
2808 return ret;
2809
2810 /* Wait on any GPU rendering and flushing to occur. */
2811 ret = i915_gem_object_wait_rendering(obj);
2812 if (ret != 0) 2962 if (ret != 0)
2813 return ret; 2963 return ret;
2814 2964
@@ -2819,6 +2969,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2819 */ 2969 */
2820 i915_gem_object_set_to_full_cpu_read_domain(obj); 2970 i915_gem_object_set_to_full_cpu_read_domain(obj);
2821 2971
2972 if (write) {
2973 ret = i915_gem_object_wait_rendering(obj, true);
2974 if (ret)
2975 return ret;
2976 }
2977
2822 old_write_domain = obj->write_domain; 2978 old_write_domain = obj->write_domain;
2823 old_read_domains = obj->read_domains; 2979 old_read_domains = obj->read_domains;
2824 2980
@@ -2838,7 +2994,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2838 * need to be invalidated at next use. 2994 * need to be invalidated at next use.
2839 */ 2995 */
2840 if (write) { 2996 if (write) {
2841 obj->read_domains &= I915_GEM_DOMAIN_CPU; 2997 obj->read_domains = I915_GEM_DOMAIN_CPU;
2842 obj->write_domain = I915_GEM_DOMAIN_CPU; 2998 obj->write_domain = I915_GEM_DOMAIN_CPU;
2843 } 2999 }
2844 3000
@@ -2964,7 +3120,7 @@ static void
2964i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) 3120i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2965{ 3121{
2966 struct drm_device *dev = obj->dev; 3122 struct drm_device *dev = obj->dev;
2967 drm_i915_private_t *dev_priv = dev->dev_private; 3123 struct drm_i915_private *dev_priv = dev->dev_private;
2968 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 3124 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2969 uint32_t invalidate_domains = 0; 3125 uint32_t invalidate_domains = 0;
2970 uint32_t flush_domains = 0; 3126 uint32_t flush_domains = 0;
@@ -2975,12 +3131,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2975 3131
2976 intel_mark_busy(dev, obj); 3132 intel_mark_busy(dev, obj);
2977 3133
2978#if WATCH_BUF
2979 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2980 __func__, obj,
2981 obj->read_domains, obj->pending_read_domains,
2982 obj->write_domain, obj->pending_write_domain);
2983#endif
2984 /* 3134 /*
2985 * If the object isn't moving to a new write domain, 3135 * If the object isn't moving to a new write domain,
2986 * let the object stay in multiple read domains 3136 * let the object stay in multiple read domains
@@ -3007,13 +3157,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
3007 * stale data. That is, any new read domains. 3157 * stale data. That is, any new read domains.
3008 */ 3158 */
3009 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; 3159 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
3010 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { 3160 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
3011#if WATCH_BUF
3012 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
3013 __func__, flush_domains, invalidate_domains);
3014#endif
3015 i915_gem_clflush_object(obj); 3161 i915_gem_clflush_object(obj);
3016 }
3017 3162
3018 old_read_domains = obj->read_domains; 3163 old_read_domains = obj->read_domains;
3019 3164
@@ -3027,21 +3172,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
3027 obj->pending_write_domain = obj->write_domain; 3172 obj->pending_write_domain = obj->write_domain;
3028 obj->read_domains = obj->pending_read_domains; 3173 obj->read_domains = obj->pending_read_domains;
3029 3174
3030 if (flush_domains & I915_GEM_GPU_DOMAINS) {
3031 if (obj_priv->ring == &dev_priv->render_ring)
3032 dev_priv->flush_rings |= FLUSH_RENDER_RING;
3033 else if (obj_priv->ring == &dev_priv->bsd_ring)
3034 dev_priv->flush_rings |= FLUSH_BSD_RING;
3035 }
3036
3037 dev->invalidate_domains |= invalidate_domains; 3175 dev->invalidate_domains |= invalidate_domains;
3038 dev->flush_domains |= flush_domains; 3176 dev->flush_domains |= flush_domains;
3039#if WATCH_BUF 3177 if (obj_priv->ring)
3040 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n", 3178 dev_priv->mm.flush_rings |= obj_priv->ring->id;
3041 __func__,
3042 obj->read_domains, obj->write_domain,
3043 dev->invalidate_domains, dev->flush_domains);
3044#endif
3045 3179
3046 trace_i915_gem_object_change_domain(obj, 3180 trace_i915_gem_object_change_domain(obj,
3047 old_read_domains, 3181 old_read_domains,
@@ -3104,12 +3238,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3104 if (offset == 0 && size == obj->size) 3238 if (offset == 0 && size == obj->size)
3105 return i915_gem_object_set_to_cpu_domain(obj, 0); 3239 return i915_gem_object_set_to_cpu_domain(obj, 0);
3106 3240
3107 ret = i915_gem_object_flush_gpu_write_domain(obj); 3241 ret = i915_gem_object_flush_gpu_write_domain(obj, false);
3108 if (ret)
3109 return ret;
3110
3111 /* Wait on any GPU rendering and flushing to occur. */
3112 ret = i915_gem_object_wait_rendering(obj);
3113 if (ret != 0) 3242 if (ret != 0)
3114 return ret; 3243 return ret;
3115 i915_gem_object_flush_gtt_write_domain(obj); 3244 i915_gem_object_flush_gtt_write_domain(obj);
@@ -3196,11 +3325,13 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3196 * properly handle blits to/from tiled surfaces. 3325 * properly handle blits to/from tiled surfaces.
3197 */ 3326 */
3198 if (need_fence) { 3327 if (need_fence) {
3199 ret = i915_gem_object_get_fence_reg(obj); 3328 ret = i915_gem_object_get_fence_reg(obj, true);
3200 if (ret != 0) { 3329 if (ret != 0) {
3201 i915_gem_object_unpin(obj); 3330 i915_gem_object_unpin(obj);
3202 return ret; 3331 return ret;
3203 } 3332 }
3333
3334 dev_priv->fence_regs[obj_priv->fence_reg].gpu = true;
3204 } 3335 }
3205 3336
3206 entry->offset = obj_priv->gtt_offset; 3337 entry->offset = obj_priv->gtt_offset;
@@ -3258,6 +3389,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3258 (int) reloc->offset, 3389 (int) reloc->offset,
3259 reloc->read_domains, 3390 reloc->read_domains,
3260 reloc->write_domain); 3391 reloc->write_domain);
3392 drm_gem_object_unreference(target_obj);
3393 i915_gem_object_unpin(obj);
3261 return -EINVAL; 3394 return -EINVAL;
3262 } 3395 }
3263 if (reloc->write_domain & I915_GEM_DOMAIN_CPU || 3396 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
@@ -3333,7 +3466,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3333 if (ret != 0) { 3466 if (ret != 0) {
3334 drm_gem_object_unreference(target_obj); 3467 drm_gem_object_unreference(target_obj);
3335 i915_gem_object_unpin(obj); 3468 i915_gem_object_unpin(obj);
3336 return -EINVAL; 3469 return ret;
3337 } 3470 }
3338 3471
3339 /* Map the page containing the relocation we're going to 3472 /* Map the page containing the relocation we're going to
@@ -3348,11 +3481,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3348 (reloc_offset & (PAGE_SIZE - 1))); 3481 (reloc_offset & (PAGE_SIZE - 1)));
3349 reloc_val = target_obj_priv->gtt_offset + reloc->delta; 3482 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
3350 3483
3351#if WATCH_BUF
3352 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
3353 obj, (unsigned int) reloc->offset,
3354 readl(reloc_entry), reloc_val);
3355#endif
3356 writel(reloc_val, reloc_entry); 3484 writel(reloc_val, reloc_entry);
3357 io_mapping_unmap_atomic(reloc_page, KM_USER0); 3485 io_mapping_unmap_atomic(reloc_page, KM_USER0);
3358 3486
@@ -3364,10 +3492,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3364 drm_gem_object_unreference(target_obj); 3492 drm_gem_object_unreference(target_obj);
3365 } 3493 }
3366 3494
3367#if WATCH_BUF
3368 if (0)
3369 i915_gem_dump_object(obj, 128, __func__, ~0);
3370#endif
3371 return 0; 3495 return 0;
3372} 3496}
3373 3497
@@ -3382,28 +3506,48 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3382 * relatively low latency when blocking on a particular request to finish. 3506 * relatively low latency when blocking on a particular request to finish.
3383 */ 3507 */
3384static int 3508static int
3385i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) 3509i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3386{ 3510{
3387 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 3511 struct drm_i915_private *dev_priv = dev->dev_private;
3388 int ret = 0; 3512 struct drm_i915_file_private *file_priv = file->driver_priv;
3389 unsigned long recent_enough = jiffies - msecs_to_jiffies(20); 3513 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3514 struct drm_i915_gem_request *request;
3515 struct intel_ring_buffer *ring = NULL;
3516 u32 seqno = 0;
3517 int ret;
3390 3518
3391 mutex_lock(&dev->struct_mutex); 3519 spin_lock(&file_priv->mm.lock);
3392 while (!list_empty(&i915_file_priv->mm.request_list)) { 3520 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3393 struct drm_i915_gem_request *request;
3394
3395 request = list_first_entry(&i915_file_priv->mm.request_list,
3396 struct drm_i915_gem_request,
3397 client_list);
3398
3399 if (time_after_eq(request->emitted_jiffies, recent_enough)) 3521 if (time_after_eq(request->emitted_jiffies, recent_enough))
3400 break; 3522 break;
3401 3523
3402 ret = i915_wait_request(dev, request->seqno, request->ring); 3524 ring = request->ring;
3403 if (ret != 0) 3525 seqno = request->seqno;
3404 break;
3405 } 3526 }
3406 mutex_unlock(&dev->struct_mutex); 3527 spin_unlock(&file_priv->mm.lock);
3528
3529 if (seqno == 0)
3530 return 0;
3531
3532 ret = 0;
3533 if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
3534 /* And wait for the seqno passing without holding any locks and
3535 * causing extra latency for others. This is safe as the irq
3536 * generation is designed to be run atomically and so is
3537 * lockless.
3538 */
3539 ring->user_irq_get(dev, ring);
3540 ret = wait_event_interruptible(ring->irq_queue,
3541 i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
3542 || atomic_read(&dev_priv->mm.wedged));
3543 ring->user_irq_put(dev, ring);
3544
3545 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3546 ret = -EIO;
3547 }
3548
3549 if (ret == 0)
3550 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3407 3551
3408 return ret; 3552 return ret;
3409} 3553}
@@ -3539,8 +3683,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
3539 return ret; 3683 return ret;
3540} 3684}
3541 3685
3542 3686static int
3543int
3544i915_gem_do_execbuffer(struct drm_device *dev, void *data, 3687i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3545 struct drm_file *file_priv, 3688 struct drm_file *file_priv,
3546 struct drm_i915_gem_execbuffer2 *args, 3689 struct drm_i915_gem_execbuffer2 *args,
@@ -3552,13 +3695,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3552 struct drm_i915_gem_object *obj_priv; 3695 struct drm_i915_gem_object *obj_priv;
3553 struct drm_clip_rect *cliprects = NULL; 3696 struct drm_clip_rect *cliprects = NULL;
3554 struct drm_i915_gem_relocation_entry *relocs = NULL; 3697 struct drm_i915_gem_relocation_entry *relocs = NULL;
3555 int ret = 0, ret2, i, pinned = 0; 3698 struct drm_i915_gem_request *request = NULL;
3699 int ret, ret2, i, pinned = 0;
3556 uint64_t exec_offset; 3700 uint64_t exec_offset;
3557 uint32_t seqno, flush_domains, reloc_index; 3701 uint32_t reloc_index;
3558 int pin_tries, flips; 3702 int pin_tries, flips;
3559 3703
3560 struct intel_ring_buffer *ring = NULL; 3704 struct intel_ring_buffer *ring = NULL;
3561 3705
3706 ret = i915_gem_check_is_wedged(dev);
3707 if (ret)
3708 return ret;
3709
3562#if WATCH_EXEC 3710#if WATCH_EXEC
3563 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 3711 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3564 (int) args->buffers_ptr, args->buffer_count, args->batch_len); 3712 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
@@ -3605,20 +3753,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3605 } 3753 }
3606 } 3754 }
3607 3755
3756 request = kzalloc(sizeof(*request), GFP_KERNEL);
3757 if (request == NULL) {
3758 ret = -ENOMEM;
3759 goto pre_mutex_err;
3760 }
3761
3608 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count, 3762 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3609 &relocs); 3763 &relocs);
3610 if (ret != 0) 3764 if (ret != 0)
3611 goto pre_mutex_err; 3765 goto pre_mutex_err;
3612 3766
3613 mutex_lock(&dev->struct_mutex); 3767 ret = i915_mutex_lock_interruptible(dev);
3614 3768 if (ret)
3615 i915_verify_inactive(dev, __FILE__, __LINE__);
3616
3617 if (atomic_read(&dev_priv->mm.wedged)) {
3618 mutex_unlock(&dev->struct_mutex);
3619 ret = -EIO;
3620 goto pre_mutex_err; 3769 goto pre_mutex_err;
3621 }
3622 3770
3623 if (dev_priv->mm.suspended) { 3771 if (dev_priv->mm.suspended) {
3624 mutex_unlock(&dev->struct_mutex); 3772 mutex_unlock(&dev->struct_mutex);
@@ -3698,15 +3846,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3698 pinned+1, args->buffer_count, 3846 pinned+1, args->buffer_count,
3699 total_size, num_fences, 3847 total_size, num_fences,
3700 ret); 3848 ret);
3701 DRM_ERROR("%d objects [%d pinned], " 3849 DRM_ERROR("%u objects [%u pinned, %u GTT], "
3702 "%d object bytes [%d pinned], " 3850 "%zu object bytes [%zu pinned], "
3703 "%d/%d gtt bytes\n", 3851 "%zu /%zu gtt bytes\n",
3704 atomic_read(&dev->object_count), 3852 dev_priv->mm.object_count,
3705 atomic_read(&dev->pin_count), 3853 dev_priv->mm.pin_count,
3706 atomic_read(&dev->object_memory), 3854 dev_priv->mm.gtt_count,
3707 atomic_read(&dev->pin_memory), 3855 dev_priv->mm.object_memory,
3708 atomic_read(&dev->gtt_memory), 3856 dev_priv->mm.pin_memory,
3709 dev->gtt_total); 3857 dev_priv->mm.gtt_memory,
3858 dev_priv->mm.gtt_total);
3710 } 3859 }
3711 goto err; 3860 goto err;
3712 } 3861 }
@@ -3739,15 +3888,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3739 goto err; 3888 goto err;
3740 } 3889 }
3741 3890
3742 i915_verify_inactive(dev, __FILE__, __LINE__);
3743
3744 /* Zero the global flush/invalidate flags. These 3891 /* Zero the global flush/invalidate flags. These
3745 * will be modified as new domains are computed 3892 * will be modified as new domains are computed
3746 * for each object 3893 * for each object
3747 */ 3894 */
3748 dev->invalidate_domains = 0; 3895 dev->invalidate_domains = 0;
3749 dev->flush_domains = 0; 3896 dev->flush_domains = 0;
3750 dev_priv->flush_rings = 0; 3897 dev_priv->mm.flush_rings = 0;
3751 3898
3752 for (i = 0; i < args->buffer_count; i++) { 3899 for (i = 0; i < args->buffer_count; i++) {
3753 struct drm_gem_object *obj = object_list[i]; 3900 struct drm_gem_object *obj = object_list[i];
@@ -3756,8 +3903,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3756 i915_gem_object_set_to_gpu_domain(obj); 3903 i915_gem_object_set_to_gpu_domain(obj);
3757 } 3904 }
3758 3905
3759 i915_verify_inactive(dev, __FILE__, __LINE__);
3760
3761 if (dev->invalidate_domains | dev->flush_domains) { 3906 if (dev->invalidate_domains | dev->flush_domains) {
3762#if WATCH_EXEC 3907#if WATCH_EXEC
3763 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", 3908 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
@@ -3765,17 +3910,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3765 dev->invalidate_domains, 3910 dev->invalidate_domains,
3766 dev->flush_domains); 3911 dev->flush_domains);
3767#endif 3912#endif
3768 i915_gem_flush(dev, 3913 i915_gem_flush(dev, file_priv,
3769 dev->invalidate_domains, 3914 dev->invalidate_domains,
3770 dev->flush_domains); 3915 dev->flush_domains,
3771 if (dev_priv->flush_rings & FLUSH_RENDER_RING) 3916 dev_priv->mm.flush_rings);
3772 (void)i915_add_request(dev, file_priv,
3773 dev->flush_domains,
3774 &dev_priv->render_ring);
3775 if (dev_priv->flush_rings & FLUSH_BSD_RING)
3776 (void)i915_add_request(dev, file_priv,
3777 dev->flush_domains,
3778 &dev_priv->bsd_ring);
3779 } 3917 }
3780 3918
3781 for (i = 0; i < args->buffer_count; i++) { 3919 for (i = 0; i < args->buffer_count; i++) {
@@ -3787,16 +3925,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3787 if (obj->write_domain) 3925 if (obj->write_domain)
3788 list_move_tail(&obj_priv->gpu_write_list, 3926 list_move_tail(&obj_priv->gpu_write_list,
3789 &dev_priv->mm.gpu_write_list); 3927 &dev_priv->mm.gpu_write_list);
3790 else
3791 list_del_init(&obj_priv->gpu_write_list);
3792 3928
3793 trace_i915_gem_object_change_domain(obj, 3929 trace_i915_gem_object_change_domain(obj,
3794 obj->read_domains, 3930 obj->read_domains,
3795 old_write_domain); 3931 old_write_domain);
3796 } 3932 }
3797 3933
3798 i915_verify_inactive(dev, __FILE__, __LINE__);
3799
3800#if WATCH_COHERENCY 3934#if WATCH_COHERENCY
3801 for (i = 0; i < args->buffer_count; i++) { 3935 for (i = 0; i < args->buffer_count; i++) {
3802 i915_gem_object_check_coherency(object_list[i], 3936 i915_gem_object_check_coherency(object_list[i],
@@ -3823,33 +3957,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3823 * Ensure that the commands in the batch buffer are 3957 * Ensure that the commands in the batch buffer are
3824 * finished before the interrupt fires 3958 * finished before the interrupt fires
3825 */ 3959 */
3826 flush_domains = i915_retire_commands(dev, ring); 3960 i915_retire_commands(dev, ring);
3827
3828 i915_verify_inactive(dev, __FILE__, __LINE__);
3829 3961
3830 /*
3831 * Get a seqno representing the execution of the current buffer,
3832 * which we can wait on. We would like to mitigate these interrupts,
3833 * likely by only creating seqnos occasionally (so that we have
3834 * *some* interrupts representing completion of buffers that we can
3835 * wait on when trying to clear up gtt space).
3836 */
3837 seqno = i915_add_request(dev, file_priv, flush_domains, ring);
3838 BUG_ON(seqno == 0);
3839 for (i = 0; i < args->buffer_count; i++) { 3962 for (i = 0; i < args->buffer_count; i++) {
3840 struct drm_gem_object *obj = object_list[i]; 3963 struct drm_gem_object *obj = object_list[i];
3841 obj_priv = to_intel_bo(obj); 3964 obj_priv = to_intel_bo(obj);
3842 3965
3843 i915_gem_object_move_to_active(obj, seqno, ring); 3966 i915_gem_object_move_to_active(obj, ring);
3844#if WATCH_LRU
3845 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3846#endif
3847 } 3967 }
3848#if WATCH_LRU
3849 i915_dump_lru(dev, __func__);
3850#endif
3851 3968
3852 i915_verify_inactive(dev, __FILE__, __LINE__); 3969 i915_add_request(dev, file_priv, request, ring);
3970 request = NULL;
3853 3971
3854err: 3972err:
3855 for (i = 0; i < pinned; i++) 3973 for (i = 0; i < pinned; i++)
@@ -3882,6 +4000,7 @@ pre_mutex_err:
3882 4000
3883 drm_free_large(object_list); 4001 drm_free_large(object_list);
3884 kfree(cliprects); 4002 kfree(cliprects);
4003 kfree(request);
3885 4004
3886 return ret; 4005 return ret;
3887} 4006}
@@ -3938,7 +4057,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3938 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; 4057 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
3939 exec2_list[i].alignment = exec_list[i].alignment; 4058 exec2_list[i].alignment = exec_list[i].alignment;
3940 exec2_list[i].offset = exec_list[i].offset; 4059 exec2_list[i].offset = exec_list[i].offset;
3941 if (!IS_I965G(dev)) 4060 if (INTEL_INFO(dev)->gen < 4)
3942 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; 4061 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
3943 else 4062 else
3944 exec2_list[i].flags = 0; 4063 exec2_list[i].flags = 0;
@@ -4035,12 +4154,12 @@ int
4035i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) 4154i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4036{ 4155{
4037 struct drm_device *dev = obj->dev; 4156 struct drm_device *dev = obj->dev;
4157 struct drm_i915_private *dev_priv = dev->dev_private;
4038 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 4158 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4039 int ret; 4159 int ret;
4040 4160
4041 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); 4161 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4042 4162 WARN_ON(i915_verify_lists(dev));
4043 i915_verify_inactive(dev, __FILE__, __LINE__);
4044 4163
4045 if (obj_priv->gtt_space != NULL) { 4164 if (obj_priv->gtt_space != NULL) {
4046 if (alignment == 0) 4165 if (alignment == 0)
@@ -4068,14 +4187,13 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4068 * remove it from the inactive list 4187 * remove it from the inactive list
4069 */ 4188 */
4070 if (obj_priv->pin_count == 1) { 4189 if (obj_priv->pin_count == 1) {
4071 atomic_inc(&dev->pin_count); 4190 i915_gem_info_add_pin(dev_priv, obj->size);
4072 atomic_add(obj->size, &dev->pin_memory); 4191 if (!obj_priv->active)
4073 if (!obj_priv->active && 4192 list_move_tail(&obj_priv->list,
4074 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 4193 &dev_priv->mm.pinned_list);
4075 list_del_init(&obj_priv->list);
4076 } 4194 }
4077 i915_verify_inactive(dev, __FILE__, __LINE__);
4078 4195
4196 WARN_ON(i915_verify_lists(dev));
4079 return 0; 4197 return 0;
4080} 4198}
4081 4199
@@ -4086,7 +4204,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
4086 drm_i915_private_t *dev_priv = dev->dev_private; 4204 drm_i915_private_t *dev_priv = dev->dev_private;
4087 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 4205 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4088 4206
4089 i915_verify_inactive(dev, __FILE__, __LINE__); 4207 WARN_ON(i915_verify_lists(dev));
4090 obj_priv->pin_count--; 4208 obj_priv->pin_count--;
4091 BUG_ON(obj_priv->pin_count < 0); 4209 BUG_ON(obj_priv->pin_count < 0);
4092 BUG_ON(obj_priv->gtt_space == NULL); 4210 BUG_ON(obj_priv->gtt_space == NULL);
@@ -4096,14 +4214,12 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
4096 * the inactive list 4214 * the inactive list
4097 */ 4215 */
4098 if (obj_priv->pin_count == 0) { 4216 if (obj_priv->pin_count == 0) {
4099 if (!obj_priv->active && 4217 if (!obj_priv->active)
4100 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
4101 list_move_tail(&obj_priv->list, 4218 list_move_tail(&obj_priv->list,
4102 &dev_priv->mm.inactive_list); 4219 &dev_priv->mm.inactive_list);
4103 atomic_dec(&dev->pin_count); 4220 i915_gem_info_remove_pin(dev_priv, obj->size);
4104 atomic_sub(obj->size, &dev->pin_memory);
4105 } 4221 }
4106 i915_verify_inactive(dev, __FILE__, __LINE__); 4222 WARN_ON(i915_verify_lists(dev));
4107} 4223}
4108 4224
4109int 4225int
@@ -4115,17 +4231,20 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4115 struct drm_i915_gem_object *obj_priv; 4231 struct drm_i915_gem_object *obj_priv;
4116 int ret; 4232 int ret;
4117 4233
4118 mutex_lock(&dev->struct_mutex);
4119
4120 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 4234 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4121 if (obj == NULL) { 4235 if (obj == NULL) {
4122 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", 4236 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
4123 args->handle); 4237 args->handle);
4124 mutex_unlock(&dev->struct_mutex);
4125 return -ENOENT; 4238 return -ENOENT;
4126 } 4239 }
4127 obj_priv = to_intel_bo(obj); 4240 obj_priv = to_intel_bo(obj);
4128 4241
4242 ret = i915_mutex_lock_interruptible(dev);
4243 if (ret) {
4244 drm_gem_object_unreference_unlocked(obj);
4245 return ret;
4246 }
4247
4129 if (obj_priv->madv != I915_MADV_WILLNEED) { 4248 if (obj_priv->madv != I915_MADV_WILLNEED) {
4130 DRM_ERROR("Attempting to pin a purgeable buffer\n"); 4249 DRM_ERROR("Attempting to pin a purgeable buffer\n");
4131 drm_gem_object_unreference(obj); 4250 drm_gem_object_unreference(obj);
@@ -4170,18 +4289,23 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4170 struct drm_i915_gem_pin *args = data; 4289 struct drm_i915_gem_pin *args = data;
4171 struct drm_gem_object *obj; 4290 struct drm_gem_object *obj;
4172 struct drm_i915_gem_object *obj_priv; 4291 struct drm_i915_gem_object *obj_priv;
4173 4292 int ret;
4174 mutex_lock(&dev->struct_mutex);
4175 4293
4176 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 4294 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4177 if (obj == NULL) { 4295 if (obj == NULL) {
4178 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", 4296 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
4179 args->handle); 4297 args->handle);
4180 mutex_unlock(&dev->struct_mutex);
4181 return -ENOENT; 4298 return -ENOENT;
4182 } 4299 }
4183 4300
4184 obj_priv = to_intel_bo(obj); 4301 obj_priv = to_intel_bo(obj);
4302
4303 ret = i915_mutex_lock_interruptible(dev);
4304 if (ret) {
4305 drm_gem_object_unreference_unlocked(obj);
4306 return ret;
4307 }
4308
4185 if (obj_priv->pin_filp != file_priv) { 4309 if (obj_priv->pin_filp != file_priv) {
4186 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", 4310 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4187 args->handle); 4311 args->handle);
@@ -4207,6 +4331,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4207 struct drm_i915_gem_busy *args = data; 4331 struct drm_i915_gem_busy *args = data;
4208 struct drm_gem_object *obj; 4332 struct drm_gem_object *obj;
4209 struct drm_i915_gem_object *obj_priv; 4333 struct drm_i915_gem_object *obj_priv;
4334 int ret;
4210 4335
4211 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 4336 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4212 if (obj == NULL) { 4337 if (obj == NULL) {
@@ -4215,7 +4340,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4215 return -ENOENT; 4340 return -ENOENT;
4216 } 4341 }
4217 4342
4218 mutex_lock(&dev->struct_mutex); 4343 ret = i915_mutex_lock_interruptible(dev);
4344 if (ret) {
4345 drm_gem_object_unreference_unlocked(obj);
4346 return ret;
4347 }
4219 4348
4220 /* Count all active objects as busy, even if they are currently not used 4349 /* Count all active objects as busy, even if they are currently not used
4221 * by the gpu. Users of this interface expect objects to eventually 4350 * by the gpu. Users of this interface expect objects to eventually
@@ -4230,10 +4359,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4230 * use this buffer rather sooner than later, so issuing the required 4359 * use this buffer rather sooner than later, so issuing the required
4231 * flush earlier is beneficial. 4360 * flush earlier is beneficial.
4232 */ 4361 */
4233 if (obj->write_domain) { 4362 if (obj->write_domain & I915_GEM_GPU_DOMAINS)
4234 i915_gem_flush(dev, 0, obj->write_domain); 4363 i915_gem_flush_ring(dev, file_priv,
4235 (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring); 4364 obj_priv->ring,
4236 } 4365 0, obj->write_domain);
4237 4366
4238 /* Update the active list for the hardware's current position. 4367 /* Update the active list for the hardware's current position.
4239 * Otherwise this only updates on a delayed timer or when irqs 4368 * Otherwise this only updates on a delayed timer or when irqs
@@ -4264,6 +4393,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4264 struct drm_i915_gem_madvise *args = data; 4393 struct drm_i915_gem_madvise *args = data;
4265 struct drm_gem_object *obj; 4394 struct drm_gem_object *obj;
4266 struct drm_i915_gem_object *obj_priv; 4395 struct drm_i915_gem_object *obj_priv;
4396 int ret;
4267 4397
4268 switch (args->madv) { 4398 switch (args->madv) {
4269 case I915_MADV_DONTNEED: 4399 case I915_MADV_DONTNEED:
@@ -4279,10 +4409,14 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4279 args->handle); 4409 args->handle);
4280 return -ENOENT; 4410 return -ENOENT;
4281 } 4411 }
4282
4283 mutex_lock(&dev->struct_mutex);
4284 obj_priv = to_intel_bo(obj); 4412 obj_priv = to_intel_bo(obj);
4285 4413
4414 ret = i915_mutex_lock_interruptible(dev);
4415 if (ret) {
4416 drm_gem_object_unreference_unlocked(obj);
4417 return ret;
4418 }
4419
4286 if (obj_priv->pin_count) { 4420 if (obj_priv->pin_count) {
4287 drm_gem_object_unreference(obj); 4421 drm_gem_object_unreference(obj);
4288 mutex_unlock(&dev->struct_mutex); 4422 mutex_unlock(&dev->struct_mutex);
@@ -4310,6 +4444,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4310struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, 4444struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4311 size_t size) 4445 size_t size)
4312{ 4446{
4447 struct drm_i915_private *dev_priv = dev->dev_private;
4313 struct drm_i915_gem_object *obj; 4448 struct drm_i915_gem_object *obj;
4314 4449
4315 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 4450 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
@@ -4321,6 +4456,8 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4321 return NULL; 4456 return NULL;
4322 } 4457 }
4323 4458
4459 i915_gem_info_add_obj(dev_priv, size);
4460
4324 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4461 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4325 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4462 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4326 4463
@@ -4361,6 +4498,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj)
4361 i915_gem_free_mmap_offset(obj); 4498 i915_gem_free_mmap_offset(obj);
4362 4499
4363 drm_gem_object_release(obj); 4500 drm_gem_object_release(obj);
4501 i915_gem_info_remove_obj(dev_priv, obj->size);
4364 4502
4365 kfree(obj_priv->page_cpu_valid); 4503 kfree(obj_priv->page_cpu_valid);
4366 kfree(obj_priv->bit_17); 4504 kfree(obj_priv->bit_17);
@@ -4419,7 +4557,7 @@ i915_gem_idle(struct drm_device *dev)
4419 * And not confound mm.suspended! 4557 * And not confound mm.suspended!
4420 */ 4558 */
4421 dev_priv->mm.suspended = 1; 4559 dev_priv->mm.suspended = 1;
4422 del_timer(&dev_priv->hangcheck_timer); 4560 del_timer_sync(&dev_priv->hangcheck_timer);
4423 4561
4424 i915_kernel_lost_context(dev); 4562 i915_kernel_lost_context(dev);
4425 i915_gem_cleanup_ringbuffer(dev); 4563 i915_gem_cleanup_ringbuffer(dev);
@@ -4499,28 +4637,18 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
4499 drm_i915_private_t *dev_priv = dev->dev_private; 4637 drm_i915_private_t *dev_priv = dev->dev_private;
4500 int ret; 4638 int ret;
4501 4639
4502 dev_priv->render_ring = render_ring;
4503
4504 if (!I915_NEED_GFX_HWS(dev)) {
4505 dev_priv->render_ring.status_page.page_addr
4506 = dev_priv->status_page_dmah->vaddr;
4507 memset(dev_priv->render_ring.status_page.page_addr,
4508 0, PAGE_SIZE);
4509 }
4510
4511 if (HAS_PIPE_CONTROL(dev)) { 4640 if (HAS_PIPE_CONTROL(dev)) {
4512 ret = i915_gem_init_pipe_control(dev); 4641 ret = i915_gem_init_pipe_control(dev);
4513 if (ret) 4642 if (ret)
4514 return ret; 4643 return ret;
4515 } 4644 }
4516 4645
4517 ret = intel_init_ring_buffer(dev, &dev_priv->render_ring); 4646 ret = intel_init_render_ring_buffer(dev);
4518 if (ret) 4647 if (ret)
4519 goto cleanup_pipe_control; 4648 goto cleanup_pipe_control;
4520 4649
4521 if (HAS_BSD(dev)) { 4650 if (HAS_BSD(dev)) {
4522 dev_priv->bsd_ring = bsd_ring; 4651 ret = intel_init_bsd_ring_buffer(dev);
4523 ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
4524 if (ret) 4652 if (ret)
4525 goto cleanup_render_ring; 4653 goto cleanup_render_ring;
4526 } 4654 }
@@ -4573,11 +4701,8 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4573 return ret; 4701 return ret;
4574 } 4702 }
4575 4703
4576 spin_lock(&dev_priv->mm.active_list_lock);
4577 BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); 4704 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
4578 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list)); 4705 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
4579 spin_unlock(&dev_priv->mm.active_list_lock);
4580
4581 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 4706 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4582 BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 4707 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4583 BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); 4708 BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
@@ -4629,10 +4754,10 @@ i915_gem_load(struct drm_device *dev)
4629 int i; 4754 int i;
4630 drm_i915_private_t *dev_priv = dev->dev_private; 4755 drm_i915_private_t *dev_priv = dev->dev_private;
4631 4756
4632 spin_lock_init(&dev_priv->mm.active_list_lock);
4633 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4757 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4634 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); 4758 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4635 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4759 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4760 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
4636 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4761 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4637 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); 4762 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
4638 INIT_LIST_HEAD(&dev_priv->render_ring.active_list); 4763 INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
@@ -4645,6 +4770,7 @@ i915_gem_load(struct drm_device *dev)
4645 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 4770 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4646 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4771 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4647 i915_gem_retire_work_handler); 4772 i915_gem_retire_work_handler);
4773 init_completion(&dev_priv->error_completion);
4648 spin_lock(&shrink_list_lock); 4774 spin_lock(&shrink_list_lock);
4649 list_add(&dev_priv->mm.shrink_list, &shrink_list); 4775 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4650 spin_unlock(&shrink_list_lock); 4776 spin_unlock(&shrink_list_lock);
@@ -4663,21 +4789,30 @@ i915_gem_load(struct drm_device *dev)
4663 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4789 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4664 dev_priv->fence_reg_start = 3; 4790 dev_priv->fence_reg_start = 3;
4665 4791
4666 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4792 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4667 dev_priv->num_fence_regs = 16; 4793 dev_priv->num_fence_regs = 16;
4668 else 4794 else
4669 dev_priv->num_fence_regs = 8; 4795 dev_priv->num_fence_regs = 8;
4670 4796
4671 /* Initialize fence registers to zero */ 4797 /* Initialize fence registers to zero */
4672 if (IS_I965G(dev)) { 4798 switch (INTEL_INFO(dev)->gen) {
4799 case 6:
4800 for (i = 0; i < 16; i++)
4801 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
4802 break;
4803 case 5:
4804 case 4:
4673 for (i = 0; i < 16; i++) 4805 for (i = 0; i < 16; i++)
4674 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0); 4806 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4675 } else { 4807 break;
4676 for (i = 0; i < 8; i++) 4808 case 3:
4677 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4678 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4809 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4679 for (i = 0; i < 8; i++) 4810 for (i = 0; i < 8; i++)
4680 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); 4811 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4812 case 2:
4813 for (i = 0; i < 8; i++)
4814 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4815 break;
4681 } 4816 }
4682 i915_gem_detect_bit_6_swizzle(dev); 4817 i915_gem_detect_bit_6_swizzle(dev);
4683 init_waitqueue_head(&dev_priv->pending_flip_queue); 4818 init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -4687,8 +4822,8 @@ i915_gem_load(struct drm_device *dev)
4687 * Create a physically contiguous memory object for this object 4822 * Create a physically contiguous memory object for this object
4688 * e.g. for cursor + overlay regs 4823 * e.g. for cursor + overlay regs
4689 */ 4824 */
4690int i915_gem_init_phys_object(struct drm_device *dev, 4825static int i915_gem_init_phys_object(struct drm_device *dev,
4691 int id, int size, int align) 4826 int id, int size, int align)
4692{ 4827{
4693 drm_i915_private_t *dev_priv = dev->dev_private; 4828 drm_i915_private_t *dev_priv = dev->dev_private;
4694 struct drm_i915_gem_phys_object *phys_obj; 4829 struct drm_i915_gem_phys_object *phys_obj;
@@ -4720,7 +4855,7 @@ kfree_obj:
4720 return ret; 4855 return ret;
4721} 4856}
4722 4857
4723void i915_gem_free_phys_object(struct drm_device *dev, int id) 4858static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4724{ 4859{
4725 drm_i915_private_t *dev_priv = dev->dev_private; 4860 drm_i915_private_t *dev_priv = dev->dev_private;
4726 struct drm_i915_gem_phys_object *phys_obj; 4861 struct drm_i915_gem_phys_object *phys_obj;
@@ -4865,18 +5000,25 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4865 return 0; 5000 return 0;
4866} 5001}
4867 5002
4868void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv) 5003void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4869{ 5004{
4870 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 5005 struct drm_i915_file_private *file_priv = file->driver_priv;
4871 5006
4872 /* Clean up our request list when the client is going away, so that 5007 /* Clean up our request list when the client is going away, so that
4873 * later retire_requests won't dereference our soon-to-be-gone 5008 * later retire_requests won't dereference our soon-to-be-gone
4874 * file_priv. 5009 * file_priv.
4875 */ 5010 */
4876 mutex_lock(&dev->struct_mutex); 5011 spin_lock(&file_priv->mm.lock);
4877 while (!list_empty(&i915_file_priv->mm.request_list)) 5012 while (!list_empty(&file_priv->mm.request_list)) {
4878 list_del_init(i915_file_priv->mm.request_list.next); 5013 struct drm_i915_gem_request *request;
4879 mutex_unlock(&dev->struct_mutex); 5014
5015 request = list_first_entry(&file_priv->mm.request_list,
5016 struct drm_i915_gem_request,
5017 client_list);
5018 list_del(&request->client_list);
5019 request->file_priv = NULL;
5020 }
5021 spin_unlock(&file_priv->mm.lock);
4880} 5022}
4881 5023
4882static int 5024static int
@@ -4885,12 +5027,10 @@ i915_gpu_is_active(struct drm_device *dev)
4885 drm_i915_private_t *dev_priv = dev->dev_private; 5027 drm_i915_private_t *dev_priv = dev->dev_private;
4886 int lists_empty; 5028 int lists_empty;
4887 5029
4888 spin_lock(&dev_priv->mm.active_list_lock);
4889 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 5030 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4890 list_empty(&dev_priv->render_ring.active_list); 5031 list_empty(&dev_priv->render_ring.active_list);
4891 if (HAS_BSD(dev)) 5032 if (HAS_BSD(dev))
4892 lists_empty &= list_empty(&dev_priv->bsd_ring.active_list); 5033 lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
4893 spin_unlock(&dev_priv->mm.active_list_lock);
4894 5034
4895 return !lists_empty; 5035 return !lists_empty;
4896} 5036}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 80f380b1d951..48644b840a8d 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -30,29 +30,112 @@
30#include "i915_drm.h" 30#include "i915_drm.h"
31#include "i915_drv.h" 31#include "i915_drv.h"
32 32
33#if WATCH_INACTIVE 33#if WATCH_LISTS
34void 34int
35i915_verify_inactive(struct drm_device *dev, char *file, int line) 35i915_verify_lists(struct drm_device *dev)
36{ 36{
37 static int warned;
37 drm_i915_private_t *dev_priv = dev->dev_private; 38 drm_i915_private_t *dev_priv = dev->dev_private;
38 struct drm_gem_object *obj; 39 struct drm_i915_gem_object *obj;
39 struct drm_i915_gem_object *obj_priv; 40 int err = 0;
40 41
41 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { 42 if (warned)
42 obj = &obj_priv->base; 43 return 0;
43 if (obj_priv->pin_count || obj_priv->active || 44
44 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | 45 list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
45 I915_GEM_DOMAIN_GTT))) 46 if (obj->base.dev != dev ||
46 DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n", 47 !atomic_read(&obj->base.refcount.refcount)) {
48 DRM_ERROR("freed render active %p\n", obj);
49 err++;
50 break;
51 } else if (!obj->active ||
52 (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
53 DRM_ERROR("invalid render active %p (a %d r %x)\n",
54 obj,
55 obj->active,
56 obj->base.read_domains);
57 err++;
58 } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
59 DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
60 obj,
61 obj->base.write_domain,
62 !list_empty(&obj->gpu_write_list));
63 err++;
64 }
65 }
66
67 list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
68 if (obj->base.dev != dev ||
69 !atomic_read(&obj->base.refcount.refcount)) {
70 DRM_ERROR("freed flushing %p\n", obj);
71 err++;
72 break;
73 } else if (!obj->active ||
74 (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
75 list_empty(&obj->gpu_write_list)){
76 DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
47 obj, 77 obj,
48 obj_priv->pin_count, obj_priv->active, 78 obj->active,
49 obj->write_domain, file, line); 79 obj->base.write_domain,
80 !list_empty(&obj->gpu_write_list));
81 err++;
82 }
83 }
84
85 list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
86 if (obj->base.dev != dev ||
87 !atomic_read(&obj->base.refcount.refcount)) {
88 DRM_ERROR("freed gpu write %p\n", obj);
89 err++;
90 break;
91 } else if (!obj->active ||
92 (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
93 DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
94 obj,
95 obj->active,
96 obj->base.write_domain);
97 err++;
98 }
99 }
100
101 list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
102 if (obj->base.dev != dev ||
103 !atomic_read(&obj->base.refcount.refcount)) {
104 DRM_ERROR("freed inactive %p\n", obj);
105 err++;
106 break;
107 } else if (obj->pin_count || obj->active ||
108 (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
109 DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
110 obj,
111 obj->pin_count, obj->active,
112 obj->base.write_domain);
113 err++;
114 }
50 } 115 }
116
117 list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
118 if (obj->base.dev != dev ||
119 !atomic_read(&obj->base.refcount.refcount)) {
120 DRM_ERROR("freed pinned %p\n", obj);
121 err++;
122 break;
123 } else if (!obj->pin_count || obj->active ||
124 (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
125 DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
126 obj,
127 obj->pin_count, obj->active,
128 obj->base.write_domain);
129 err++;
130 }
131 }
132
133 return warned = err;
51} 134}
52#endif /* WATCH_INACTIVE */ 135#endif /* WATCH_INACTIVE */
53 136
54 137
55#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE 138#if WATCH_EXEC | WATCH_PWRITE
56static void 139static void
57i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, 140i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
58 uint32_t bias, uint32_t mark) 141 uint32_t bias, uint32_t mark)
@@ -97,41 +180,6 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
97} 180}
98#endif 181#endif
99 182
100#if WATCH_LRU
101void
102i915_dump_lru(struct drm_device *dev, const char *where)
103{
104 drm_i915_private_t *dev_priv = dev->dev_private;
105 struct drm_i915_gem_object *obj_priv;
106
107 DRM_INFO("active list %s {\n", where);
108 spin_lock(&dev_priv->mm.active_list_lock);
109 list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
110 list)
111 {
112 DRM_INFO(" %p: %08x\n", obj_priv,
113 obj_priv->last_rendering_seqno);
114 }
115 spin_unlock(&dev_priv->mm.active_list_lock);
116 DRM_INFO("}\n");
117 DRM_INFO("flushing list %s {\n", where);
118 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
119 list)
120 {
121 DRM_INFO(" %p: %08x\n", obj_priv,
122 obj_priv->last_rendering_seqno);
123 }
124 DRM_INFO("}\n");
125 DRM_INFO("inactive %s {\n", where);
126 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
127 DRM_INFO(" %p: %08x\n", obj_priv,
128 obj_priv->last_rendering_seqno);
129 }
130 DRM_INFO("}\n");
131}
132#endif
133
134
135#if WATCH_COHERENCY 183#if WATCH_COHERENCY
136void 184void
137i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) 185i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index e85246ef691c..3d7fbf32bb18 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -93,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
93{ 93{
94 drm_i915_private_t *dev_priv = dev->dev_private; 94 drm_i915_private_t *dev_priv = dev->dev_private;
95 struct list_head eviction_list, unwind_list; 95 struct list_head eviction_list, unwind_list;
96 struct drm_i915_gem_object *obj_priv, *tmp_obj_priv; 96 struct drm_i915_gem_object *obj_priv;
97 struct list_head *render_iter, *bsd_iter; 97 struct list_head *render_iter, *bsd_iter;
98 int ret = 0; 98 int ret = 0;
99 99
@@ -175,39 +175,34 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
175 return -ENOSPC; 175 return -ENOSPC;
176 176
177found: 177found:
178 /* drm_mm doesn't allow any other other operations while
179 * scanning, therefore store to be evicted objects on a
180 * temporary list. */
178 INIT_LIST_HEAD(&eviction_list); 181 INIT_LIST_HEAD(&eviction_list);
179 list_for_each_entry_safe(obj_priv, tmp_obj_priv, 182 while (!list_empty(&unwind_list)) {
180 &unwind_list, evict_list) { 183 obj_priv = list_first_entry(&unwind_list,
184 struct drm_i915_gem_object,
185 evict_list);
181 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { 186 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
182 /* drm_mm doesn't allow any other other operations while
183 * scanning, therefore store to be evicted objects on a
184 * temporary list. */
185 list_move(&obj_priv->evict_list, &eviction_list); 187 list_move(&obj_priv->evict_list, &eviction_list);
186 } else 188 continue;
187 drm_gem_object_unreference(&obj_priv->base); 189 }
190 list_del(&obj_priv->evict_list);
191 drm_gem_object_unreference(&obj_priv->base);
188 } 192 }
189 193
190 /* Unbinding will emit any required flushes */ 194 /* Unbinding will emit any required flushes */
191 list_for_each_entry_safe(obj_priv, tmp_obj_priv, 195 while (!list_empty(&eviction_list)) {
192 &eviction_list, evict_list) { 196 obj_priv = list_first_entry(&eviction_list,
193#if WATCH_LRU 197 struct drm_i915_gem_object,
194 DRM_INFO("%s: evicting %p\n", __func__, &obj_priv->base); 198 evict_list);
195#endif 199 if (ret == 0)
196 ret = i915_gem_object_unbind(&obj_priv->base); 200 ret = i915_gem_object_unbind(&obj_priv->base);
197 if (ret) 201 list_del(&obj_priv->evict_list);
198 return ret;
199
200 drm_gem_object_unreference(&obj_priv->base); 202 drm_gem_object_unreference(&obj_priv->base);
201 } 203 }
202 204
203 /* The just created free hole should be on the top of the free stack 205 return ret;
204 * maintained by drm_mm, so this BUG_ON actually executes in O(1).
205 * Furthermore all accessed data has just recently been used, so it
206 * should be really fast, too. */
207 BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
208 alignment, 0));
209
210 return 0;
211} 206}
212 207
213int 208int
@@ -217,14 +212,11 @@ i915_gem_evict_everything(struct drm_device *dev)
217 int ret; 212 int ret;
218 bool lists_empty; 213 bool lists_empty;
219 214
220 spin_lock(&dev_priv->mm.active_list_lock);
221 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 215 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
222 list_empty(&dev_priv->mm.flushing_list) && 216 list_empty(&dev_priv->mm.flushing_list) &&
223 list_empty(&dev_priv->render_ring.active_list) && 217 list_empty(&dev_priv->render_ring.active_list) &&
224 (!HAS_BSD(dev) 218 (!HAS_BSD(dev)
225 || list_empty(&dev_priv->bsd_ring.active_list))); 219 || list_empty(&dev_priv->bsd_ring.active_list)));
226 spin_unlock(&dev_priv->mm.active_list_lock);
227
228 if (lists_empty) 220 if (lists_empty)
229 return -ENOSPC; 221 return -ENOSPC;
230 222
@@ -239,13 +231,11 @@ i915_gem_evict_everything(struct drm_device *dev)
239 if (ret) 231 if (ret)
240 return ret; 232 return ret;
241 233
242 spin_lock(&dev_priv->mm.active_list_lock);
243 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 234 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
244 list_empty(&dev_priv->mm.flushing_list) && 235 list_empty(&dev_priv->mm.flushing_list) &&
245 list_empty(&dev_priv->render_ring.active_list) && 236 list_empty(&dev_priv->render_ring.active_list) &&
246 (!HAS_BSD(dev) 237 (!HAS_BSD(dev)
247 || list_empty(&dev_priv->bsd_ring.active_list))); 238 || list_empty(&dev_priv->bsd_ring.active_list)));
248 spin_unlock(&dev_priv->mm.active_list_lock);
249 BUG_ON(!lists_empty); 239 BUG_ON(!lists_empty);
250 240
251 return 0; 241 return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 710eca70b323..8c9ffc4768ee 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -98,7 +98,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
98 */ 98 */
99 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 99 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
100 swizzle_y = I915_BIT_6_SWIZZLE_9; 100 swizzle_y = I915_BIT_6_SWIZZLE_9;
101 } else if (!IS_I9XX(dev)) { 101 } else if (IS_GEN2(dev)) {
102 /* As far as we know, the 865 doesn't have these bit 6 102 /* As far as we know, the 865 doesn't have these bit 6
103 * swizzling issues. 103 * swizzling issues.
104 */ 104 */
@@ -190,19 +190,19 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
190 if (tiling_mode == I915_TILING_NONE) 190 if (tiling_mode == I915_TILING_NONE)
191 return true; 191 return true;
192 192
193 if (!IS_I9XX(dev) || 193 if (IS_GEN2(dev) ||
194 (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) 194 (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
195 tile_width = 128; 195 tile_width = 128;
196 else 196 else
197 tile_width = 512; 197 tile_width = 512;
198 198
199 /* check maximum stride & object size */ 199 /* check maximum stride & object size */
200 if (IS_I965G(dev)) { 200 if (INTEL_INFO(dev)->gen >= 4) {
201 /* i965 stores the end address of the gtt mapping in the fence 201 /* i965 stores the end address of the gtt mapping in the fence
202 * reg, so dont bother to check the size */ 202 * reg, so dont bother to check the size */
203 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) 203 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
204 return false; 204 return false;
205 } else if (IS_GEN3(dev) || IS_GEN2(dev)) { 205 } else {
206 if (stride > 8192) 206 if (stride > 8192)
207 return false; 207 return false;
208 208
@@ -216,7 +216,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
216 } 216 }
217 217
218 /* 965+ just needs multiples of tile width */ 218 /* 965+ just needs multiples of tile width */
219 if (IS_I965G(dev)) { 219 if (INTEL_INFO(dev)->gen >= 4) {
220 if (stride & (tile_width - 1)) 220 if (stride & (tile_width - 1))
221 return false; 221 return false;
222 return true; 222 return true;
@@ -244,16 +244,18 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
244 if (tiling_mode == I915_TILING_NONE) 244 if (tiling_mode == I915_TILING_NONE)
245 return true; 245 return true;
246 246
247 if (!IS_I965G(dev)) { 247 if (INTEL_INFO(dev)->gen >= 4)
248 if (obj_priv->gtt_offset & (obj->size - 1)) 248 return true;
249
250 if (obj_priv->gtt_offset & (obj->size - 1))
251 return false;
252
253 if (IS_GEN3(dev)) {
254 if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
255 return false;
256 } else {
257 if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
249 return false; 258 return false;
250 if (IS_I9XX(dev)) {
251 if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
252 return false;
253 } else {
254 if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
255 return false;
256 }
257 } 259 }
258 260
259 return true; 261 return true;
@@ -271,7 +273,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
271 drm_i915_private_t *dev_priv = dev->dev_private; 273 drm_i915_private_t *dev_priv = dev->dev_private;
272 struct drm_gem_object *obj; 274 struct drm_gem_object *obj;
273 struct drm_i915_gem_object *obj_priv; 275 struct drm_i915_gem_object *obj_priv;
274 int ret = 0; 276 int ret;
277
278 ret = i915_gem_check_is_wedged(dev);
279 if (ret)
280 return ret;
275 281
276 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 282 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
277 if (obj == NULL) 283 if (obj == NULL)
@@ -328,7 +334,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
328 if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) 334 if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
329 ret = i915_gem_object_unbind(obj); 335 ret = i915_gem_object_unbind(obj);
330 else if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 336 else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
331 ret = i915_gem_object_put_fence_reg(obj); 337 ret = i915_gem_object_put_fence_reg(obj, true);
332 else 338 else
333 i915_gem_release_mmap(obj); 339 i915_gem_release_mmap(obj);
334 340
@@ -399,16 +405,14 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
399 * bit 17 of its physical address and therefore being interpreted differently 405 * bit 17 of its physical address and therefore being interpreted differently
400 * by the GPU. 406 * by the GPU.
401 */ 407 */
402static int 408static void
403i915_gem_swizzle_page(struct page *page) 409i915_gem_swizzle_page(struct page *page)
404{ 410{
411 char temp[64];
405 char *vaddr; 412 char *vaddr;
406 int i; 413 int i;
407 char temp[64];
408 414
409 vaddr = kmap(page); 415 vaddr = kmap(page);
410 if (vaddr == NULL)
411 return -ENOMEM;
412 416
413 for (i = 0; i < PAGE_SIZE; i += 128) { 417 for (i = 0; i < PAGE_SIZE; i += 128) {
414 memcpy(temp, &vaddr[i], 64); 418 memcpy(temp, &vaddr[i], 64);
@@ -417,8 +421,6 @@ i915_gem_swizzle_page(struct page *page)
417 } 421 }
418 422
419 kunmap(page); 423 kunmap(page);
420
421 return 0;
422} 424}
423 425
424void 426void
@@ -440,11 +442,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
440 char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; 442 char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
441 if ((new_bit_17 & 0x1) != 443 if ((new_bit_17 & 0x1) !=
442 (test_bit(i, obj_priv->bit_17) != 0)) { 444 (test_bit(i, obj_priv->bit_17) != 0)) {
443 int ret = i915_gem_swizzle_page(obj_priv->pages[i]); 445 i915_gem_swizzle_page(obj_priv->pages[i]);
444 if (ret != 0) {
445 DRM_ERROR("Failed to swizzle page\n");
446 return;
447 }
448 set_page_dirty(obj_priv->pages[i]); 446 set_page_dirty(obj_priv->pages[i]);
449 } 447 }
450 } 448 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 744225ebb4b2..64c07c24e300 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -85,7 +85,7 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
85} 85}
86 86
87/* For display hotplug interrupt */ 87/* For display hotplug interrupt */
88void 88static void
89ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 89ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
90{ 90{
91 if ((dev_priv->irq_mask_reg & mask) != 0) { 91 if ((dev_priv->irq_mask_reg & mask) != 0) {
@@ -172,7 +172,7 @@ void intel_enable_asle (struct drm_device *dev)
172 else { 172 else {
173 i915_enable_pipestat(dev_priv, 1, 173 i915_enable_pipestat(dev_priv, 1,
174 PIPE_LEGACY_BLC_EVENT_ENABLE); 174 PIPE_LEGACY_BLC_EVENT_ENABLE);
175 if (IS_I965G(dev)) 175 if (INTEL_INFO(dev)->gen >= 4)
176 i915_enable_pipestat(dev_priv, 0, 176 i915_enable_pipestat(dev_priv, 0,
177 PIPE_LEGACY_BLC_EVENT_ENABLE); 177 PIPE_LEGACY_BLC_EVENT_ENABLE);
178 } 178 }
@@ -191,12 +191,7 @@ static int
191i915_pipe_enabled(struct drm_device *dev, int pipe) 191i915_pipe_enabled(struct drm_device *dev, int pipe)
192{ 192{
193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
194 unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF; 194 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
195
196 if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
197 return 1;
198
199 return 0;
200} 195}
201 196
202/* Called from drm generic code, passed a 'crtc', which 197/* Called from drm generic code, passed a 'crtc', which
@@ -207,10 +202,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
207 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 202 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
208 unsigned long high_frame; 203 unsigned long high_frame;
209 unsigned long low_frame; 204 unsigned long low_frame;
210 u32 high1, high2, low, count; 205 u32 high1, high2, low;
211
212 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
213 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
214 206
215 if (!i915_pipe_enabled(dev, pipe)) { 207 if (!i915_pipe_enabled(dev, pipe)) {
216 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 208 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -218,23 +210,23 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
218 return 0; 210 return 0;
219 } 211 }
220 212
213 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
214 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
215
221 /* 216 /*
222 * High & low register fields aren't synchronized, so make sure 217 * High & low register fields aren't synchronized, so make sure
223 * we get a low value that's stable across two reads of the high 218 * we get a low value that's stable across two reads of the high
224 * register. 219 * register.
225 */ 220 */
226 do { 221 do {
227 high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> 222 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
228 PIPE_FRAME_HIGH_SHIFT); 223 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
229 low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >> 224 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
230 PIPE_FRAME_LOW_SHIFT);
231 high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
232 PIPE_FRAME_HIGH_SHIFT);
233 } while (high1 != high2); 225 } while (high1 != high2);
234 226
235 count = (high1 << 8) | low; 227 high1 >>= PIPE_FRAME_HIGH_SHIFT;
236 228 low >>= PIPE_FRAME_LOW_SHIFT;
237 return count; 229 return (high1 << 8) | low;
238} 230}
239 231
240u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 232u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -260,16 +252,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
260 hotplug_work); 252 hotplug_work);
261 struct drm_device *dev = dev_priv->dev; 253 struct drm_device *dev = dev_priv->dev;
262 struct drm_mode_config *mode_config = &dev->mode_config; 254 struct drm_mode_config *mode_config = &dev->mode_config;
263 struct drm_encoder *encoder; 255 struct intel_encoder *encoder;
264 256
265 if (mode_config->num_encoder) { 257 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
266 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 258 if (encoder->hot_plug)
267 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 259 encoder->hot_plug(encoder);
268 260
269 if (intel_encoder->hot_plug)
270 (*intel_encoder->hot_plug) (intel_encoder);
271 }
272 }
273 /* Just fire off a uevent and let userspace tell us what to do */ 261 /* Just fire off a uevent and let userspace tell us what to do */
274 drm_helper_hpd_irq_event(dev); 262 drm_helper_hpd_irq_event(dev);
275} 263}
@@ -305,13 +293,17 @@ static void i915_handle_rps_change(struct drm_device *dev)
305 return; 293 return;
306} 294}
307 295
308irqreturn_t ironlake_irq_handler(struct drm_device *dev) 296static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
309{ 297{
310 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 298 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
311 int ret = IRQ_NONE; 299 int ret = IRQ_NONE;
312 u32 de_iir, gt_iir, de_ier, pch_iir; 300 u32 de_iir, gt_iir, de_ier, pch_iir;
313 struct drm_i915_master_private *master_priv; 301 struct drm_i915_master_private *master_priv;
314 struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 302 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
303 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
304
305 if (IS_GEN6(dev))
306 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
315 307
316 /* disable master interrupt before clearing iir */ 308 /* disable master interrupt before clearing iir */
317 de_ier = I915_READ(DEIER); 309 de_ier = I915_READ(DEIER);
@@ -335,28 +327,28 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
335 } 327 }
336 328
337 if (gt_iir & GT_PIPE_NOTIFY) { 329 if (gt_iir & GT_PIPE_NOTIFY) {
338 u32 seqno = render_ring->get_gem_seqno(dev, render_ring); 330 u32 seqno = render_ring->get_seqno(dev, render_ring);
339 render_ring->irq_gem_seqno = seqno; 331 render_ring->irq_gem_seqno = seqno;
340 trace_i915_gem_request_complete(dev, seqno); 332 trace_i915_gem_request_complete(dev, seqno);
341 DRM_WAKEUP(&dev_priv->render_ring.irq_queue); 333 wake_up_all(&dev_priv->render_ring.irq_queue);
342 dev_priv->hangcheck_count = 0; 334 dev_priv->hangcheck_count = 0;
343 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 335 mod_timer(&dev_priv->hangcheck_timer,
336 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
344 } 337 }
345 if (gt_iir & GT_BSD_USER_INTERRUPT) 338 if (gt_iir & bsd_usr_interrupt)
346 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); 339 wake_up_all(&dev_priv->bsd_ring.irq_queue);
347
348 340
349 if (de_iir & DE_GSE) 341 if (de_iir & DE_GSE)
350 ironlake_opregion_gse_intr(dev); 342 intel_opregion_gse_intr(dev);
351 343
352 if (de_iir & DE_PLANEA_FLIP_DONE) { 344 if (de_iir & DE_PLANEA_FLIP_DONE) {
353 intel_prepare_page_flip(dev, 0); 345 intel_prepare_page_flip(dev, 0);
354 intel_finish_page_flip(dev, 0); 346 intel_finish_page_flip_plane(dev, 0);
355 } 347 }
356 348
357 if (de_iir & DE_PLANEB_FLIP_DONE) { 349 if (de_iir & DE_PLANEB_FLIP_DONE) {
358 intel_prepare_page_flip(dev, 1); 350 intel_prepare_page_flip(dev, 1);
359 intel_finish_page_flip(dev, 1); 351 intel_finish_page_flip_plane(dev, 1);
360 } 352 }
361 353
362 if (de_iir & DE_PIPEA_VBLANK) 354 if (de_iir & DE_PIPEA_VBLANK)
@@ -404,23 +396,20 @@ static void i915_error_work_func(struct work_struct *work)
404 char *reset_event[] = { "RESET=1", NULL }; 396 char *reset_event[] = { "RESET=1", NULL };
405 char *reset_done_event[] = { "ERROR=0", NULL }; 397 char *reset_done_event[] = { "ERROR=0", NULL };
406 398
407 DRM_DEBUG_DRIVER("generating error event\n");
408 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 399 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
409 400
410 if (atomic_read(&dev_priv->mm.wedged)) { 401 if (atomic_read(&dev_priv->mm.wedged)) {
411 if (IS_I965G(dev)) { 402 DRM_DEBUG_DRIVER("resetting chip\n");
412 DRM_DEBUG_DRIVER("resetting chip\n"); 403 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
413 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); 404 if (!i915_reset(dev, GRDOM_RENDER)) {
414 if (!i965_reset(dev, GDRST_RENDER)) { 405 atomic_set(&dev_priv->mm.wedged, 0);
415 atomic_set(&dev_priv->mm.wedged, 0); 406 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
416 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
417 }
418 } else {
419 DRM_DEBUG_DRIVER("reboot required\n");
420 } 407 }
408 complete_all(&dev_priv->error_completion);
421 } 409 }
422} 410}
423 411
412#ifdef CONFIG_DEBUG_FS
424static struct drm_i915_error_object * 413static struct drm_i915_error_object *
425i915_error_object_create(struct drm_device *dev, 414i915_error_object_create(struct drm_device *dev,
426 struct drm_gem_object *src) 415 struct drm_gem_object *src)
@@ -511,7 +500,7 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring)
511 500
512 if (IS_I830(dev) || IS_845G(dev)) 501 if (IS_I830(dev) || IS_845G(dev))
513 cmd = MI_BATCH_BUFFER; 502 cmd = MI_BATCH_BUFFER;
514 else if (IS_I965G(dev)) 503 else if (INTEL_INFO(dev)->gen >= 4)
515 cmd = (MI_BATCH_BUFFER_START | (2 << 6) | 504 cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
516 MI_BATCH_NON_SECURE_I965); 505 MI_BATCH_NON_SECURE_I965);
517 else 506 else
@@ -584,13 +573,16 @@ static void i915_capture_error_state(struct drm_device *dev)
584 return; 573 return;
585 } 574 }
586 575
587 error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring); 576 DRM_DEBUG_DRIVER("generating error event\n");
577
578 error->seqno =
579 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
588 error->eir = I915_READ(EIR); 580 error->eir = I915_READ(EIR);
589 error->pgtbl_er = I915_READ(PGTBL_ER); 581 error->pgtbl_er = I915_READ(PGTBL_ER);
590 error->pipeastat = I915_READ(PIPEASTAT); 582 error->pipeastat = I915_READ(PIPEASTAT);
591 error->pipebstat = I915_READ(PIPEBSTAT); 583 error->pipebstat = I915_READ(PIPEBSTAT);
592 error->instpm = I915_READ(INSTPM); 584 error->instpm = I915_READ(INSTPM);
593 if (!IS_I965G(dev)) { 585 if (INTEL_INFO(dev)->gen < 4) {
594 error->ipeir = I915_READ(IPEIR); 586 error->ipeir = I915_READ(IPEIR);
595 error->ipehr = I915_READ(IPEHR); 587 error->ipehr = I915_READ(IPEHR);
596 error->instdone = I915_READ(INSTDONE); 588 error->instdone = I915_READ(INSTDONE);
@@ -744,6 +736,9 @@ void i915_destroy_error_state(struct drm_device *dev)
744 if (error) 736 if (error)
745 i915_error_state_free(dev, error); 737 i915_error_state_free(dev, error);
746} 738}
739#else
740#define i915_capture_error_state(x)
741#endif
747 742
748static void i915_report_and_clear_eir(struct drm_device *dev) 743static void i915_report_and_clear_eir(struct drm_device *dev)
749{ 744{
@@ -785,7 +780,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
785 } 780 }
786 } 781 }
787 782
788 if (IS_I9XX(dev)) { 783 if (!IS_GEN2(dev)) {
789 if (eir & I915_ERROR_PAGE_TABLE) { 784 if (eir & I915_ERROR_PAGE_TABLE) {
790 u32 pgtbl_err = I915_READ(PGTBL_ER); 785 u32 pgtbl_err = I915_READ(PGTBL_ER);
791 printk(KERN_ERR "page table error\n"); 786 printk(KERN_ERR "page table error\n");
@@ -811,7 +806,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
811 printk(KERN_ERR "instruction error\n"); 806 printk(KERN_ERR "instruction error\n");
812 printk(KERN_ERR " INSTPM: 0x%08x\n", 807 printk(KERN_ERR " INSTPM: 0x%08x\n",
813 I915_READ(INSTPM)); 808 I915_READ(INSTPM));
814 if (!IS_I965G(dev)) { 809 if (INTEL_INFO(dev)->gen < 4) {
815 u32 ipeir = I915_READ(IPEIR); 810 u32 ipeir = I915_READ(IPEIR);
816 811
817 printk(KERN_ERR " IPEIR: 0x%08x\n", 812 printk(KERN_ERR " IPEIR: 0x%08x\n",
@@ -876,12 +871,15 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
876 i915_report_and_clear_eir(dev); 871 i915_report_and_clear_eir(dev);
877 872
878 if (wedged) { 873 if (wedged) {
874 INIT_COMPLETION(dev_priv->error_completion);
879 atomic_set(&dev_priv->mm.wedged, 1); 875 atomic_set(&dev_priv->mm.wedged, 1);
880 876
881 /* 877 /*
882 * Wakeup waiting processes so they don't hang 878 * Wakeup waiting processes so they don't hang
883 */ 879 */
884 DRM_WAKEUP(&dev_priv->render_ring.irq_queue); 880 wake_up_all(&dev_priv->render_ring.irq_queue);
881 if (HAS_BSD(dev))
882 wake_up_all(&dev_priv->bsd_ring.irq_queue);
885 } 883 }
886 884
887 queue_work(dev_priv->wq, &dev_priv->error_work); 885 queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -912,7 +910,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
912 910
913 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 911 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
914 obj_priv = to_intel_bo(work->pending_flip_obj); 912 obj_priv = to_intel_bo(work->pending_flip_obj);
915 if(IS_I965G(dev)) { 913 if (INTEL_INFO(dev)->gen >= 4) {
916 int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; 914 int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
917 stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; 915 stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
918 } else { 916 } else {
@@ -951,7 +949,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
951 949
952 iir = I915_READ(IIR); 950 iir = I915_READ(IIR);
953 951
954 if (IS_I965G(dev)) 952 if (INTEL_INFO(dev)->gen >= 4)
955 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS; 953 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
956 else 954 else
957 vblank_status = PIPE_VBLANK_INTERRUPT_STATUS; 955 vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
@@ -1020,17 +1018,17 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1020 } 1018 }
1021 1019
1022 if (iir & I915_USER_INTERRUPT) { 1020 if (iir & I915_USER_INTERRUPT) {
1023 u32 seqno = 1021 u32 seqno = render_ring->get_seqno(dev, render_ring);
1024 render_ring->get_gem_seqno(dev, render_ring);
1025 render_ring->irq_gem_seqno = seqno; 1022 render_ring->irq_gem_seqno = seqno;
1026 trace_i915_gem_request_complete(dev, seqno); 1023 trace_i915_gem_request_complete(dev, seqno);
1027 DRM_WAKEUP(&dev_priv->render_ring.irq_queue); 1024 wake_up_all(&dev_priv->render_ring.irq_queue);
1028 dev_priv->hangcheck_count = 0; 1025 dev_priv->hangcheck_count = 0;
1029 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 1026 mod_timer(&dev_priv->hangcheck_timer,
1027 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1030 } 1028 }
1031 1029
1032 if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) 1030 if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
1033 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); 1031 wake_up_all(&dev_priv->bsd_ring.irq_queue);
1034 1032
1035 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { 1033 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1036 intel_prepare_page_flip(dev, 0); 1034 intel_prepare_page_flip(dev, 0);
@@ -1065,7 +1063,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1065 if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || 1063 if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
1066 (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || 1064 (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
1067 (iir & I915_ASLE_INTERRUPT)) 1065 (iir & I915_ASLE_INTERRUPT))
1068 opregion_asle_intr(dev); 1066 intel_opregion_asle_intr(dev);
1069 1067
1070 /* With MSI, interrupts are only generated when iir 1068 /* With MSI, interrupts are only generated when iir
1071 * transitions from zero to nonzero. If another bit got 1069 * transitions from zero to nonzero. If another bit got
@@ -1207,18 +1205,15 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
1207{ 1205{
1208 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1206 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1209 unsigned long irqflags; 1207 unsigned long irqflags;
1210 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
1211 u32 pipeconf;
1212 1208
1213 pipeconf = I915_READ(pipeconf_reg); 1209 if (!i915_pipe_enabled(dev, pipe))
1214 if (!(pipeconf & PIPEACONF_ENABLE))
1215 return -EINVAL; 1210 return -EINVAL;
1216 1211
1217 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1212 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1218 if (HAS_PCH_SPLIT(dev)) 1213 if (HAS_PCH_SPLIT(dev))
1219 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1214 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1220 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1215 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1221 else if (IS_I965G(dev)) 1216 else if (INTEL_INFO(dev)->gen >= 4)
1222 i915_enable_pipestat(dev_priv, pipe, 1217 i915_enable_pipestat(dev_priv, pipe,
1223 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1218 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1224 else 1219 else
@@ -1252,7 +1247,7 @@ void i915_enable_interrupt (struct drm_device *dev)
1252 struct drm_i915_private *dev_priv = dev->dev_private; 1247 struct drm_i915_private *dev_priv = dev->dev_private;
1253 1248
1254 if (!HAS_PCH_SPLIT(dev)) 1249 if (!HAS_PCH_SPLIT(dev))
1255 opregion_enable_asle(dev); 1250 intel_opregion_enable_asle(dev);
1256 dev_priv->irq_enabled = 1; 1251 dev_priv->irq_enabled = 1;
1257} 1252}
1258 1253
@@ -1311,7 +1306,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
1311 return -EINVAL; 1306 return -EINVAL;
1312} 1307}
1313 1308
1314struct drm_i915_gem_request * 1309static struct drm_i915_gem_request *
1315i915_get_tail_request(struct drm_device *dev) 1310i915_get_tail_request(struct drm_device *dev)
1316{ 1311{
1317 drm_i915_private_t *dev_priv = dev->dev_private; 1312 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -1331,11 +1326,7 @@ void i915_hangcheck_elapsed(unsigned long data)
1331 drm_i915_private_t *dev_priv = dev->dev_private; 1326 drm_i915_private_t *dev_priv = dev->dev_private;
1332 uint32_t acthd, instdone, instdone1; 1327 uint32_t acthd, instdone, instdone1;
1333 1328
1334 /* No reset support on this chip yet. */ 1329 if (INTEL_INFO(dev)->gen < 4) {
1335 if (IS_GEN6(dev))
1336 return;
1337
1338 if (!IS_I965G(dev)) {
1339 acthd = I915_READ(ACTHD); 1330 acthd = I915_READ(ACTHD);
1340 instdone = I915_READ(INSTDONE); 1331 instdone = I915_READ(INSTDONE);
1341 instdone1 = 0; 1332 instdone1 = 0;
@@ -1347,9 +1338,8 @@ void i915_hangcheck_elapsed(unsigned long data)
1347 1338
1348 /* If all work is done then ACTHD clearly hasn't advanced. */ 1339 /* If all work is done then ACTHD clearly hasn't advanced. */
1349 if (list_empty(&dev_priv->render_ring.request_list) || 1340 if (list_empty(&dev_priv->render_ring.request_list) ||
1350 i915_seqno_passed(i915_get_gem_seqno(dev, 1341 i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
1351 &dev_priv->render_ring), 1342 i915_get_tail_request(dev)->seqno)) {
1352 i915_get_tail_request(dev)->seqno)) {
1353 bool missed_wakeup = false; 1343 bool missed_wakeup = false;
1354 1344
1355 dev_priv->hangcheck_count = 0; 1345 dev_priv->hangcheck_count = 0;
@@ -1357,13 +1347,13 @@ void i915_hangcheck_elapsed(unsigned long data)
1357 /* Issue a wake-up to catch stuck h/w. */ 1347 /* Issue a wake-up to catch stuck h/w. */
1358 if (dev_priv->render_ring.waiting_gem_seqno && 1348 if (dev_priv->render_ring.waiting_gem_seqno &&
1359 waitqueue_active(&dev_priv->render_ring.irq_queue)) { 1349 waitqueue_active(&dev_priv->render_ring.irq_queue)) {
1360 DRM_WAKEUP(&dev_priv->render_ring.irq_queue); 1350 wake_up_all(&dev_priv->render_ring.irq_queue);
1361 missed_wakeup = true; 1351 missed_wakeup = true;
1362 } 1352 }
1363 1353
1364 if (dev_priv->bsd_ring.waiting_gem_seqno && 1354 if (dev_priv->bsd_ring.waiting_gem_seqno &&
1365 waitqueue_active(&dev_priv->bsd_ring.irq_queue)) { 1355 waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
1366 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); 1356 wake_up_all(&dev_priv->bsd_ring.irq_queue);
1367 missed_wakeup = true; 1357 missed_wakeup = true;
1368 } 1358 }
1369 1359
@@ -1377,6 +1367,21 @@ void i915_hangcheck_elapsed(unsigned long data)
1377 dev_priv->last_instdone1 == instdone1) { 1367 dev_priv->last_instdone1 == instdone1) {
1378 if (dev_priv->hangcheck_count++ > 1) { 1368 if (dev_priv->hangcheck_count++ > 1) {
1379 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 1369 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1370
1371 if (!IS_GEN2(dev)) {
1372 /* Is the chip hanging on a WAIT_FOR_EVENT?
1373 * If so we can simply poke the RB_WAIT bit
1374 * and break the hang. This should work on
1375 * all but the second generation chipsets.
1376 */
1377 u32 tmp = I915_READ(PRB0_CTL);
1378 if (tmp & RING_WAIT) {
1379 I915_WRITE(PRB0_CTL, tmp);
1380 POSTING_READ(PRB0_CTL);
1381 goto out;
1382 }
1383 }
1384
1380 i915_handle_error(dev, true); 1385 i915_handle_error(dev, true);
1381 return; 1386 return;
1382 } 1387 }
@@ -1388,8 +1393,10 @@ void i915_hangcheck_elapsed(unsigned long data)
1388 dev_priv->last_instdone1 = instdone1; 1393 dev_priv->last_instdone1 = instdone1;
1389 } 1394 }
1390 1395
1396out:
1391 /* Reset timer case chip hangs without another request being added */ 1397 /* Reset timer case chip hangs without another request being added */
1392 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 1398 mod_timer(&dev_priv->hangcheck_timer,
1399 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1393} 1400}
1394 1401
1395/* drm_dma.h hooks 1402/* drm_dma.h hooks
@@ -1436,17 +1443,19 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1436 I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); 1443 I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
1437 (void) I915_READ(DEIER); 1444 (void) I915_READ(DEIER);
1438 1445
1439 /* Gen6 only needs render pipe_control now */
1440 if (IS_GEN6(dev)) 1446 if (IS_GEN6(dev))
1441 render_mask = GT_PIPE_NOTIFY; 1447 render_mask = GT_PIPE_NOTIFY | GT_GEN6_BSD_USER_INTERRUPT;
1442 1448
1443 dev_priv->gt_irq_mask_reg = ~render_mask; 1449 dev_priv->gt_irq_mask_reg = ~render_mask;
1444 dev_priv->gt_irq_enable_reg = render_mask; 1450 dev_priv->gt_irq_enable_reg = render_mask;
1445 1451
1446 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1452 I915_WRITE(GTIIR, I915_READ(GTIIR));
1447 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); 1453 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
1448 if (IS_GEN6(dev)) 1454 if (IS_GEN6(dev)) {
1449 I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); 1455 I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
1456 I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
1457 }
1458
1450 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); 1459 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
1451 (void) I915_READ(GTIER); 1460 (void) I915_READ(GTIER);
1452 1461
@@ -1578,7 +1587,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1578 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 1587 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1579 } 1588 }
1580 1589
1581 opregion_enable_asle(dev); 1590 intel_opregion_enable_asle(dev);
1582 1591
1583 return 0; 1592 return 0;
1584} 1593}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4f5e15577e89..d02de212e6ad 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -25,52 +25,16 @@
25#ifndef _I915_REG_H_ 25#ifndef _I915_REG_H_
26#define _I915_REG_H_ 26#define _I915_REG_H_
27 27
28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
29
28/* 30/*
29 * The Bridge device's PCI config space has information about the 31 * The Bridge device's PCI config space has information about the
30 * fb aperture size and the amount of pre-reserved memory. 32 * fb aperture size and the amount of pre-reserved memory.
33 * This is all handled in the intel-gtt.ko module. i915.ko only
34 * cares about the vga bit for the vga rbiter.
31 */ 35 */
32#define INTEL_GMCH_CTRL 0x52 36#define INTEL_GMCH_CTRL 0x52
33#define INTEL_GMCH_VGA_DISABLE (1 << 1) 37#define INTEL_GMCH_VGA_DISABLE (1 << 1)
34#define INTEL_GMCH_ENABLED 0x4
35#define INTEL_GMCH_MEM_MASK 0x1
36#define INTEL_GMCH_MEM_64M 0x1
37#define INTEL_GMCH_MEM_128M 0
38
39#define INTEL_GMCH_GMS_MASK (0xf << 4)
40#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4)
41#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4)
42#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4)
43#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4)
44#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
45#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
46
47#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
48#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
49#define INTEL_GMCH_GMS_STOLEN_128M (0x8 << 4)
50#define INTEL_GMCH_GMS_STOLEN_256M (0x9 << 4)
51#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
52#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
53#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
54#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
55
56#define SNB_GMCH_CTRL 0x50
57#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
58#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
59#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
60#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
61#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
62#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
63#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
64#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
65#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
66#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
67#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
68#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
69#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
70#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
71#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
72#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
73#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
74 38
75/* PCI config space */ 39/* PCI config space */
76 40
@@ -106,10 +70,13 @@
106#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) 70#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
107#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) 71#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
108#define LBB 0xf4 72#define LBB 0xf4
109#define GDRST 0xc0 73
110#define GDRST_FULL (0<<2) 74/* Graphics reset regs */
111#define GDRST_RENDER (1<<2) 75#define I965_GDRST 0xc0 /* PCI config register */
112#define GDRST_MEDIA (3<<2) 76#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
77#define GRDOM_FULL (0<<2)
78#define GRDOM_RENDER (1<<2)
79#define GRDOM_MEDIA (3<<2)
113 80
114/* VGA stuff */ 81/* VGA stuff */
115 82
@@ -192,11 +159,11 @@
192#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) 159#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
193#define MI_STORE_DWORD_INDEX_SHIFT 2 160#define MI_STORE_DWORD_INDEX_SHIFT 2
194#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1) 161#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
162#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */
195#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) 163#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
196#define MI_BATCH_NON_SECURE (1) 164#define MI_BATCH_NON_SECURE (1)
197#define MI_BATCH_NON_SECURE_I965 (1<<8) 165#define MI_BATCH_NON_SECURE_I965 (1<<8)
198#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) 166#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
199
200/* 167/*
201 * 3D instructions used by the kernel 168 * 3D instructions used by the kernel
202 */ 169 */
@@ -249,6 +216,16 @@
249#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ 216#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
250#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */ 217#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
251 218
219
220/*
221 * Reset registers
222 */
223#define DEBUG_RESET_I830 0x6070
224#define DEBUG_RESET_FULL (1<<7)
225#define DEBUG_RESET_RENDER (1<<8)
226#define DEBUG_RESET_DISPLAY (1<<9)
227
228
252/* 229/*
253 * Fence registers 230 * Fence registers
254 */ 231 */
@@ -283,6 +260,16 @@
283#define PRB0_HEAD 0x02034 260#define PRB0_HEAD 0x02034
284#define PRB0_START 0x02038 261#define PRB0_START 0x02038
285#define PRB0_CTL 0x0203c 262#define PRB0_CTL 0x0203c
263#define RENDER_RING_BASE 0x02000
264#define BSD_RING_BASE 0x04000
265#define GEN6_BSD_RING_BASE 0x12000
266#define RING_TAIL(base) ((base)+0x30)
267#define RING_HEAD(base) ((base)+0x34)
268#define RING_START(base) ((base)+0x38)
269#define RING_CTL(base) ((base)+0x3c)
270#define RING_HWS_PGA(base) ((base)+0x80)
271#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
272#define RING_ACTHD(base) ((base)+0x74)
286#define TAIL_ADDR 0x001FFFF8 273#define TAIL_ADDR 0x001FFFF8
287#define HEAD_WRAP_COUNT 0xFFE00000 274#define HEAD_WRAP_COUNT 0xFFE00000
288#define HEAD_WRAP_ONE 0x00200000 275#define HEAD_WRAP_ONE 0x00200000
@@ -295,6 +282,8 @@
295#define RING_VALID_MASK 0x00000001 282#define RING_VALID_MASK 0x00000001
296#define RING_VALID 0x00000001 283#define RING_VALID 0x00000001
297#define RING_INVALID 0x00000000 284#define RING_INVALID 0x00000000
285#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
286#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
298#define PRB1_TAIL 0x02040 /* 915+ only */ 287#define PRB1_TAIL 0x02040 /* 915+ only */
299#define PRB1_HEAD 0x02044 /* 915+ only */ 288#define PRB1_HEAD 0x02044 /* 915+ only */
300#define PRB1_START 0x02048 /* 915+ only */ 289#define PRB1_START 0x02048 /* 915+ only */
@@ -306,7 +295,6 @@
306#define INSTDONE1 0x0207c /* 965+ only */ 295#define INSTDONE1 0x0207c /* 965+ only */
307#define ACTHD_I965 0x02074 296#define ACTHD_I965 0x02074
308#define HWS_PGA 0x02080 297#define HWS_PGA 0x02080
309#define HWS_PGA_GEN6 0x04080
310#define HWS_ADDRESS_MASK 0xfffff000 298#define HWS_ADDRESS_MASK 0xfffff000
311#define HWS_START_ADDRESS_SHIFT 4 299#define HWS_START_ADDRESS_SHIFT 4
312#define PWRCTXA 0x2088 /* 965GM+ only */ 300#define PWRCTXA 0x2088 /* 965GM+ only */
@@ -464,17 +452,17 @@
464#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25) 452#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25)
465#define GEN6_BLITTER_SYNC_STATUS (1 << 24) 453#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
466#define GEN6_BLITTER_USER_INTERRUPT (1 << 22) 454#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
467/*
468 * BSD (bit stream decoder instruction and interrupt control register defines
469 * (G4X and Ironlake only)
470 */
471 455
472#define BSD_RING_TAIL 0x04030 456#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
473#define BSD_RING_HEAD 0x04034 457#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
474#define BSD_RING_START 0x04038 458#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
475#define BSD_RING_CTL 0x0403c 459#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
476#define BSD_RING_ACTHD 0x04074 460#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
477#define BSD_HWS_PGA 0x04080 461
462#define GEN6_BSD_IMR 0x120a8
463#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12)
464
465#define GEN6_BSD_RNCID 0x12198
478 466
479/* 467/*
480 * Framebuffer compression (915+ only) 468 * Framebuffer compression (915+ only)
@@ -579,12 +567,51 @@
579# define GPIO_DATA_VAL_IN (1 << 12) 567# define GPIO_DATA_VAL_IN (1 << 12)
580# define GPIO_DATA_PULLUP_DISABLE (1 << 13) 568# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
581 569
582#define GMBUS0 0x5100 570#define GMBUS0 0x5100 /* clock/port select */
583#define GMBUS1 0x5104 571#define GMBUS_RATE_100KHZ (0<<8)
584#define GMBUS2 0x5108 572#define GMBUS_RATE_50KHZ (1<<8)
585#define GMBUS3 0x510c 573#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
586#define GMBUS4 0x5110 574#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
587#define GMBUS5 0x5120 575#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
576#define GMBUS_PORT_DISABLED 0
577#define GMBUS_PORT_SSC 1
578#define GMBUS_PORT_VGADDC 2
579#define GMBUS_PORT_PANEL 3
580#define GMBUS_PORT_DPC 4 /* HDMIC */
581#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
582 /* 6 reserved */
583#define GMBUS_PORT_DPD 7 /* HDMID */
584#define GMBUS_NUM_PORTS 8
585#define GMBUS1 0x5104 /* command/status */
586#define GMBUS_SW_CLR_INT (1<<31)
587#define GMBUS_SW_RDY (1<<30)
588#define GMBUS_ENT (1<<29) /* enable timeout */
589#define GMBUS_CYCLE_NONE (0<<25)
590#define GMBUS_CYCLE_WAIT (1<<25)
591#define GMBUS_CYCLE_INDEX (2<<25)
592#define GMBUS_CYCLE_STOP (4<<25)
593#define GMBUS_BYTE_COUNT_SHIFT 16
594#define GMBUS_SLAVE_INDEX_SHIFT 8
595#define GMBUS_SLAVE_ADDR_SHIFT 1
596#define GMBUS_SLAVE_READ (1<<0)
597#define GMBUS_SLAVE_WRITE (0<<0)
598#define GMBUS2 0x5108 /* status */
599#define GMBUS_INUSE (1<<15)
600#define GMBUS_HW_WAIT_PHASE (1<<14)
601#define GMBUS_STALL_TIMEOUT (1<<13)
602#define GMBUS_INT (1<<12)
603#define GMBUS_HW_RDY (1<<11)
604#define GMBUS_SATOER (1<<10)
605#define GMBUS_ACTIVE (1<<9)
606#define GMBUS3 0x510c /* data buffer bytes 3-0 */
607#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */
608#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
609#define GMBUS_NAK_EN (1<<3)
610#define GMBUS_IDLE_EN (1<<2)
611#define GMBUS_HW_WAIT_EN (1<<1)
612#define GMBUS_HW_RDY_EN (1<<0)
613#define GMBUS5 0x5120 /* byte index */
614#define GMBUS_2BYTE_INDEX_EN (1<<31)
588 615
589/* 616/*
590 * Clock control & power management 617 * Clock control & power management
@@ -603,6 +630,7 @@
603#define VGA1_PD_P1_MASK (0x1f << 8) 630#define VGA1_PD_P1_MASK (0x1f << 8)
604#define DPLL_A 0x06014 631#define DPLL_A 0x06014
605#define DPLL_B 0x06018 632#define DPLL_B 0x06018
633#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B)
606#define DPLL_VCO_ENABLE (1 << 31) 634#define DPLL_VCO_ENABLE (1 << 31)
607#define DPLL_DVO_HIGH_SPEED (1 << 30) 635#define DPLL_DVO_HIGH_SPEED (1 << 30)
608#define DPLL_SYNCLOCK_ENABLE (1 << 29) 636#define DPLL_SYNCLOCK_ENABLE (1 << 29)
@@ -640,24 +668,6 @@
640#define ADPA_DPMS_STANDBY (2<<10) 668#define ADPA_DPMS_STANDBY (2<<10)
641#define ADPA_DPMS_OFF (3<<10) 669#define ADPA_DPMS_OFF (3<<10)
642 670
643#define RING_TAIL 0x00
644#define TAIL_ADDR 0x001FFFF8
645#define RING_HEAD 0x04
646#define HEAD_WRAP_COUNT 0xFFE00000
647#define HEAD_WRAP_ONE 0x00200000
648#define HEAD_ADDR 0x001FFFFC
649#define RING_START 0x08
650#define START_ADDR 0xFFFFF000
651#define RING_LEN 0x0C
652#define RING_NR_PAGES 0x001FF000
653#define RING_REPORT_MASK 0x00000006
654#define RING_REPORT_64K 0x00000002
655#define RING_REPORT_128K 0x00000004
656#define RING_NO_REPORT 0x00000000
657#define RING_VALID_MASK 0x00000001
658#define RING_VALID 0x00000001
659#define RING_INVALID 0x00000000
660
661/* Scratch pad debug 0 reg: 671/* Scratch pad debug 0 reg:
662 */ 672 */
663#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 673#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
@@ -736,10 +746,13 @@
736#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f 746#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
737#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 747#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
738#define DPLL_B_MD 0x06020 /* 965+ only */ 748#define DPLL_B_MD 0x06020 /* 965+ only */
749#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD)
739#define FPA0 0x06040 750#define FPA0 0x06040
740#define FPA1 0x06044 751#define FPA1 0x06044
741#define FPB0 0x06048 752#define FPB0 0x06048
742#define FPB1 0x0604c 753#define FPB1 0x0604c
754#define FP0(pipe) _PIPE(pipe, FPA0, FPB0)
755#define FP1(pipe) _PIPE(pipe, FPA1, FPB1)
743#define FP_N_DIV_MASK 0x003f0000 756#define FP_N_DIV_MASK 0x003f0000
744#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 757#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
745#define FP_N_DIV_SHIFT 16 758#define FP_N_DIV_SHIFT 16
@@ -760,6 +773,7 @@
760#define DPLLA_TEST_M_BYPASS (1 << 2) 773#define DPLLA_TEST_M_BYPASS (1 << 2)
761#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) 774#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
762#define D_STATE 0x6104 775#define D_STATE 0x6104
776#define DSTATE_GFX_RESET_I830 (1<<6)
763#define DSTATE_PLL_D3_OFF (1<<3) 777#define DSTATE_PLL_D3_OFF (1<<3)
764#define DSTATE_GFX_CLOCK_GATING (1<<1) 778#define DSTATE_GFX_CLOCK_GATING (1<<1)
765#define DSTATE_DOT_CLOCK_GATING (1<<0) 779#define DSTATE_DOT_CLOCK_GATING (1<<0)
@@ -926,6 +940,8 @@
926#define CLKCFG_MEM_800 (3 << 4) 940#define CLKCFG_MEM_800 (3 << 4)
927#define CLKCFG_MEM_MASK (7 << 4) 941#define CLKCFG_MEM_MASK (7 << 4)
928 942
943#define TSC1 0x11001
944#define TSE (1<<0)
929#define TR1 0x11006 945#define TR1 0x11006
930#define TSFS 0x11020 946#define TSFS 0x11020
931#define TSFS_SLOPE_MASK 0x0000ff00 947#define TSFS_SLOPE_MASK 0x0000ff00
@@ -1070,6 +1086,8 @@
1070#define MEMSTAT_SRC_CTL_STDBY 3 1086#define MEMSTAT_SRC_CTL_STDBY 3
1071#define RCPREVBSYTUPAVG 0x113b8 1087#define RCPREVBSYTUPAVG 0x113b8
1072#define RCPREVBSYTDNAVG 0x113bc 1088#define RCPREVBSYTDNAVG 0x113bc
1089#define PMMISC 0x11214
1090#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */
1073#define SDEW 0x1124c 1091#define SDEW 0x1124c
1074#define CSIEW0 0x11250 1092#define CSIEW0 0x11250
1075#define CSIEW1 0x11254 1093#define CSIEW1 0x11254
@@ -1150,6 +1168,15 @@
1150#define PIPEBSRC 0x6101c 1168#define PIPEBSRC 0x6101c
1151#define BCLRPAT_B 0x61020 1169#define BCLRPAT_B 0x61020
1152 1170
1171#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B)
1172#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B)
1173#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B)
1174#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
1175#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
1176#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
1177#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
1178#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
1179
1153/* VGA port control */ 1180/* VGA port control */
1154#define ADPA 0x61100 1181#define ADPA 0x61100
1155#define ADPA_DAC_ENABLE (1<<31) 1182#define ADPA_DAC_ENABLE (1<<31)
@@ -1481,6 +1508,7 @@
1481# define TV_TEST_MODE_MASK (7 << 0) 1508# define TV_TEST_MODE_MASK (7 << 0)
1482 1509
1483#define TV_DAC 0x68004 1510#define TV_DAC 0x68004
1511# define TV_DAC_SAVE 0x00ffff00
1484/** 1512/**
1485 * Reports that DAC state change logic has reported change (RO). 1513 * Reports that DAC state change logic has reported change (RO).
1486 * 1514 *
@@ -2075,29 +2103,35 @@
2075 2103
2076/* Display & cursor control */ 2104/* Display & cursor control */
2077 2105
2078/* dithering flag on Ironlake */
2079#define PIPE_ENABLE_DITHER (1 << 4)
2080#define PIPE_DITHER_TYPE_MASK (3 << 2)
2081#define PIPE_DITHER_TYPE_SPATIAL (0 << 2)
2082#define PIPE_DITHER_TYPE_ST01 (1 << 2)
2083/* Pipe A */ 2106/* Pipe A */
2084#define PIPEADSL 0x70000 2107#define PIPEADSL 0x70000
2085#define DSL_LINEMASK 0x00000fff 2108#define DSL_LINEMASK 0x00000fff
2086#define PIPEACONF 0x70008 2109#define PIPEACONF 0x70008
2087#define PIPEACONF_ENABLE (1<<31) 2110#define PIPECONF_ENABLE (1<<31)
2088#define PIPEACONF_DISABLE 0 2111#define PIPECONF_DISABLE 0
2089#define PIPEACONF_DOUBLE_WIDE (1<<30) 2112#define PIPECONF_DOUBLE_WIDE (1<<30)
2090#define I965_PIPECONF_ACTIVE (1<<30) 2113#define I965_PIPECONF_ACTIVE (1<<30)
2091#define PIPEACONF_SINGLE_WIDE 0 2114#define PIPECONF_SINGLE_WIDE 0
2092#define PIPEACONF_PIPE_UNLOCKED 0 2115#define PIPECONF_PIPE_UNLOCKED 0
2093#define PIPEACONF_PIPE_LOCKED (1<<25) 2116#define PIPECONF_PIPE_LOCKED (1<<25)
2094#define PIPEACONF_PALETTE 0 2117#define PIPECONF_PALETTE 0
2095#define PIPEACONF_GAMMA (1<<24) 2118#define PIPECONF_GAMMA (1<<24)
2096#define PIPECONF_FORCE_BORDER (1<<25) 2119#define PIPECONF_FORCE_BORDER (1<<25)
2097#define PIPECONF_PROGRESSIVE (0 << 21) 2120#define PIPECONF_PROGRESSIVE (0 << 21)
2098#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) 2121#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
2099#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) 2122#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
2100#define PIPECONF_CXSR_DOWNCLOCK (1<<16) 2123#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
2124#define PIPECONF_BPP_MASK (0x000000e0)
2125#define PIPECONF_BPP_8 (0<<5)
2126#define PIPECONF_BPP_10 (1<<5)
2127#define PIPECONF_BPP_6 (2<<5)
2128#define PIPECONF_BPP_12 (3<<5)
2129#define PIPECONF_DITHER_EN (1<<4)
2130#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
2131#define PIPECONF_DITHER_TYPE_SP (0<<2)
2132#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
2133#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
2134#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
2101#define PIPEASTAT 0x70024 2135#define PIPEASTAT 0x70024
2102#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) 2136#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
2103#define PIPE_CRC_ERROR_ENABLE (1UL<<29) 2137#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
@@ -2128,12 +2162,15 @@
2128#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ 2162#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
2129#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) 2163#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
2130#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) 2164#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
2131#define PIPE_BPC_MASK (7 << 5) /* Ironlake */ 2165#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
2132#define PIPE_8BPC (0 << 5) 2166#define PIPE_8BPC (0 << 5)
2133#define PIPE_10BPC (1 << 5) 2167#define PIPE_10BPC (1 << 5)
2134#define PIPE_6BPC (2 << 5) 2168#define PIPE_6BPC (2 << 5)
2135#define PIPE_12BPC (3 << 5) 2169#define PIPE_12BPC (3 << 5)
2136 2170
2171#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
2172#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL)
2173
2137#define DSPARB 0x70030 2174#define DSPARB 0x70030
2138#define DSPARB_CSTART_MASK (0x7f << 7) 2175#define DSPARB_CSTART_MASK (0x7f << 7)
2139#define DSPARB_CSTART_SHIFT 7 2176#define DSPARB_CSTART_SHIFT 7
@@ -2206,8 +2243,8 @@
2206#define WM1_LP_SR_EN (1<<31) 2243#define WM1_LP_SR_EN (1<<31)
2207#define WM1_LP_LATENCY_SHIFT 24 2244#define WM1_LP_LATENCY_SHIFT 24
2208#define WM1_LP_LATENCY_MASK (0x7f<<24) 2245#define WM1_LP_LATENCY_MASK (0x7f<<24)
2209#define WM1_LP_FBC_LP1_MASK (0xf<<20) 2246#define WM1_LP_FBC_MASK (0xf<<20)
2210#define WM1_LP_FBC_LP1_SHIFT 20 2247#define WM1_LP_FBC_SHIFT 20
2211#define WM1_LP_SR_MASK (0x1ff<<8) 2248#define WM1_LP_SR_MASK (0x1ff<<8)
2212#define WM1_LP_SR_SHIFT 8 2249#define WM1_LP_SR_SHIFT 8
2213#define WM1_LP_CURSOR_MASK (0x3f) 2250#define WM1_LP_CURSOR_MASK (0x3f)
@@ -2333,6 +2370,14 @@
2333#define DSPASURF 0x7019C /* 965+ only */ 2370#define DSPASURF 0x7019C /* 965+ only */
2334#define DSPATILEOFF 0x701A4 /* 965+ only */ 2371#define DSPATILEOFF 0x701A4 /* 965+ only */
2335 2372
2373#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR)
2374#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR)
2375#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE)
2376#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS)
2377#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE)
2378#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF)
2379#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF)
2380
2336/* VBIOS flags */ 2381/* VBIOS flags */
2337#define SWF00 0x71410 2382#define SWF00 0x71410
2338#define SWF01 0x71414 2383#define SWF01 0x71414
@@ -2397,6 +2442,7 @@
2397#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00 2442#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
2398 2443
2399#define FDI_PLL_BIOS_0 0x46000 2444#define FDI_PLL_BIOS_0 0x46000
2445#define FDI_PLL_FB_CLOCK_MASK 0xff
2400#define FDI_PLL_BIOS_1 0x46004 2446#define FDI_PLL_BIOS_1 0x46004
2401#define FDI_PLL_BIOS_2 0x46008 2447#define FDI_PLL_BIOS_2 0x46008
2402#define DISPLAY_PORT_PLL_BIOS_0 0x4600c 2448#define DISPLAY_PORT_PLL_BIOS_0 0x4600c
@@ -2420,46 +2466,47 @@
2420#define PIPEA_DATA_M1 0x60030 2466#define PIPEA_DATA_M1 0x60030
2421#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ 2467#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
2422#define TU_SIZE_MASK 0x7e000000 2468#define TU_SIZE_MASK 0x7e000000
2423#define PIPEA_DATA_M1_OFFSET 0 2469#define PIPE_DATA_M1_OFFSET 0
2424#define PIPEA_DATA_N1 0x60034 2470#define PIPEA_DATA_N1 0x60034
2425#define PIPEA_DATA_N1_OFFSET 0 2471#define PIPE_DATA_N1_OFFSET 0
2426 2472
2427#define PIPEA_DATA_M2 0x60038 2473#define PIPEA_DATA_M2 0x60038
2428#define PIPEA_DATA_M2_OFFSET 0 2474#define PIPE_DATA_M2_OFFSET 0
2429#define PIPEA_DATA_N2 0x6003c 2475#define PIPEA_DATA_N2 0x6003c
2430#define PIPEA_DATA_N2_OFFSET 0 2476#define PIPE_DATA_N2_OFFSET 0
2431 2477
2432#define PIPEA_LINK_M1 0x60040 2478#define PIPEA_LINK_M1 0x60040
2433#define PIPEA_LINK_M1_OFFSET 0 2479#define PIPE_LINK_M1_OFFSET 0
2434#define PIPEA_LINK_N1 0x60044 2480#define PIPEA_LINK_N1 0x60044
2435#define PIPEA_LINK_N1_OFFSET 0 2481#define PIPE_LINK_N1_OFFSET 0
2436 2482
2437#define PIPEA_LINK_M2 0x60048 2483#define PIPEA_LINK_M2 0x60048
2438#define PIPEA_LINK_M2_OFFSET 0 2484#define PIPE_LINK_M2_OFFSET 0
2439#define PIPEA_LINK_N2 0x6004c 2485#define PIPEA_LINK_N2 0x6004c
2440#define PIPEA_LINK_N2_OFFSET 0 2486#define PIPE_LINK_N2_OFFSET 0
2441 2487
2442/* PIPEB timing regs are same start from 0x61000 */ 2488/* PIPEB timing regs are same start from 0x61000 */
2443 2489
2444#define PIPEB_DATA_M1 0x61030 2490#define PIPEB_DATA_M1 0x61030
2445#define PIPEB_DATA_M1_OFFSET 0
2446#define PIPEB_DATA_N1 0x61034 2491#define PIPEB_DATA_N1 0x61034
2447#define PIPEB_DATA_N1_OFFSET 0
2448 2492
2449#define PIPEB_DATA_M2 0x61038 2493#define PIPEB_DATA_M2 0x61038
2450#define PIPEB_DATA_M2_OFFSET 0
2451#define PIPEB_DATA_N2 0x6103c 2494#define PIPEB_DATA_N2 0x6103c
2452#define PIPEB_DATA_N2_OFFSET 0
2453 2495
2454#define PIPEB_LINK_M1 0x61040 2496#define PIPEB_LINK_M1 0x61040
2455#define PIPEB_LINK_M1_OFFSET 0
2456#define PIPEB_LINK_N1 0x61044 2497#define PIPEB_LINK_N1 0x61044
2457#define PIPEB_LINK_N1_OFFSET 0
2458 2498
2459#define PIPEB_LINK_M2 0x61048 2499#define PIPEB_LINK_M2 0x61048
2460#define PIPEB_LINK_M2_OFFSET 0
2461#define PIPEB_LINK_N2 0x6104c 2500#define PIPEB_LINK_N2 0x6104c
2462#define PIPEB_LINK_N2_OFFSET 0 2501
2502#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1)
2503#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1)
2504#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2)
2505#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2)
2506#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1)
2507#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1)
2508#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2)
2509#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2)
2463 2510
2464/* CPU panel fitter */ 2511/* CPU panel fitter */
2465#define PFA_CTL_1 0x68080 2512#define PFA_CTL_1 0x68080
@@ -2516,7 +2563,7 @@
2516#define GT_SYNC_STATUS (1 << 2) 2563#define GT_SYNC_STATUS (1 << 2)
2517#define GT_USER_INTERRUPT (1 << 0) 2564#define GT_USER_INTERRUPT (1 << 0)
2518#define GT_BSD_USER_INTERRUPT (1 << 5) 2565#define GT_BSD_USER_INTERRUPT (1 << 5)
2519 2566#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
2520 2567
2521#define GTISR 0x44010 2568#define GTISR 0x44010
2522#define GTIMR 0x44014 2569#define GTIMR 0x44014
@@ -2600,11 +2647,14 @@
2600 2647
2601#define PCH_DPLL_A 0xc6014 2648#define PCH_DPLL_A 0xc6014
2602#define PCH_DPLL_B 0xc6018 2649#define PCH_DPLL_B 0xc6018
2650#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
2603 2651
2604#define PCH_FPA0 0xc6040 2652#define PCH_FPA0 0xc6040
2605#define PCH_FPA1 0xc6044 2653#define PCH_FPA1 0xc6044
2606#define PCH_FPB0 0xc6048 2654#define PCH_FPB0 0xc6048
2607#define PCH_FPB1 0xc604c 2655#define PCH_FPB1 0xc604c
2656#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0)
2657#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1)
2608 2658
2609#define PCH_DPLL_TEST 0xc606c 2659#define PCH_DPLL_TEST 0xc606c
2610 2660
@@ -2690,6 +2740,13 @@
2690#define TRANS_VBLANK_B 0xe1010 2740#define TRANS_VBLANK_B 0xe1010
2691#define TRANS_VSYNC_B 0xe1014 2741#define TRANS_VSYNC_B 0xe1014
2692 2742
2743#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B)
2744#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B)
2745#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B)
2746#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B)
2747#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B)
2748#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B)
2749
2693#define TRANSB_DATA_M1 0xe1030 2750#define TRANSB_DATA_M1 0xe1030
2694#define TRANSB_DATA_N1 0xe1034 2751#define TRANSB_DATA_N1 0xe1034
2695#define TRANSB_DATA_M2 0xe1038 2752#define TRANSB_DATA_M2 0xe1038
@@ -2701,6 +2758,7 @@
2701 2758
2702#define TRANSACONF 0xf0008 2759#define TRANSACONF 0xf0008
2703#define TRANSBCONF 0xf1008 2760#define TRANSBCONF 0xf1008
2761#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF)
2704#define TRANS_DISABLE (0<<31) 2762#define TRANS_DISABLE (0<<31)
2705#define TRANS_ENABLE (1<<31) 2763#define TRANS_ENABLE (1<<31)
2706#define TRANS_STATE_MASK (1<<30) 2764#define TRANS_STATE_MASK (1<<30)
@@ -2725,6 +2783,7 @@
2725/* CPU: FDI_TX */ 2783/* CPU: FDI_TX */
2726#define FDI_TXA_CTL 0x60100 2784#define FDI_TXA_CTL 0x60100
2727#define FDI_TXB_CTL 0x61100 2785#define FDI_TXB_CTL 0x61100
2786#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL)
2728#define FDI_TX_DISABLE (0<<31) 2787#define FDI_TX_DISABLE (0<<31)
2729#define FDI_TX_ENABLE (1<<31) 2788#define FDI_TX_ENABLE (1<<31)
2730#define FDI_LINK_TRAIN_PATTERN_1 (0<<28) 2789#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
@@ -2766,8 +2825,8 @@
2766/* FDI_RX, FDI_X is hard-wired to Transcoder_X */ 2825/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
2767#define FDI_RXA_CTL 0xf000c 2826#define FDI_RXA_CTL 0xf000c
2768#define FDI_RXB_CTL 0xf100c 2827#define FDI_RXB_CTL 0xf100c
2828#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL)
2769#define FDI_RX_ENABLE (1<<31) 2829#define FDI_RX_ENABLE (1<<31)
2770#define FDI_RX_DISABLE (0<<31)
2771/* train, dp width same as FDI_TX */ 2830/* train, dp width same as FDI_TX */
2772#define FDI_DP_PORT_WIDTH_X8 (7<<19) 2831#define FDI_DP_PORT_WIDTH_X8 (7<<19)
2773#define FDI_8BPC (0<<16) 2832#define FDI_8BPC (0<<16)
@@ -2782,8 +2841,7 @@
2782#define FDI_FS_ERR_REPORT_ENABLE (1<<9) 2841#define FDI_FS_ERR_REPORT_ENABLE (1<<9)
2783#define FDI_FE_ERR_REPORT_ENABLE (1<<8) 2842#define FDI_FE_ERR_REPORT_ENABLE (1<<8)
2784#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) 2843#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
2785#define FDI_SEL_RAWCLK (0<<4) 2844#define FDI_PCDCLK (1<<4)
2786#define FDI_SEL_PCDCLK (1<<4)
2787/* CPT */ 2845/* CPT */
2788#define FDI_AUTO_TRAINING (1<<10) 2846#define FDI_AUTO_TRAINING (1<<10)
2789#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8) 2847#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
@@ -2798,6 +2856,9 @@
2798#define FDI_RXA_TUSIZE2 0xf0038 2856#define FDI_RXA_TUSIZE2 0xf0038
2799#define FDI_RXB_TUSIZE1 0xf1030 2857#define FDI_RXB_TUSIZE1 0xf1030
2800#define FDI_RXB_TUSIZE2 0xf1038 2858#define FDI_RXB_TUSIZE2 0xf1038
2859#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC)
2860#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1)
2861#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2)
2801 2862
2802/* FDI_RX interrupt register format */ 2863/* FDI_RX interrupt register format */
2803#define FDI_RX_INTER_LANE_ALIGN (1<<10) 2864#define FDI_RX_INTER_LANE_ALIGN (1<<10)
@@ -2816,6 +2877,8 @@
2816#define FDI_RXA_IMR 0xf0018 2877#define FDI_RXA_IMR 0xf0018
2817#define FDI_RXB_IIR 0xf1014 2878#define FDI_RXB_IIR 0xf1014
2818#define FDI_RXB_IMR 0xf1018 2879#define FDI_RXB_IMR 0xf1018
2880#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR)
2881#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR)
2819 2882
2820#define FDI_PLL_CTL_1 0xfe000 2883#define FDI_PLL_CTL_1 0xfe000
2821#define FDI_PLL_CTL_2 0xfe004 2884#define FDI_PLL_CTL_2 0xfe004
@@ -2935,6 +2998,7 @@
2935#define TRANS_DP_CTL_A 0xe0300 2998#define TRANS_DP_CTL_A 0xe0300
2936#define TRANS_DP_CTL_B 0xe1300 2999#define TRANS_DP_CTL_B 0xe1300
2937#define TRANS_DP_CTL_C 0xe2300 3000#define TRANS_DP_CTL_C 0xe2300
3001#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000)
2938#define TRANS_DP_OUTPUT_ENABLE (1<<31) 3002#define TRANS_DP_OUTPUT_ENABLE (1<<31)
2939#define TRANS_DP_PORT_SEL_B (0<<29) 3003#define TRANS_DP_PORT_SEL_B (0<<29)
2940#define TRANS_DP_PORT_SEL_C (1<<29) 3004#define TRANS_DP_PORT_SEL_C (1<<29)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 31f08581e93a..989c19d2d959 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
256 dev_priv->saveFPA1 = I915_READ(FPA1); 256 dev_priv->saveFPA1 = I915_READ(FPA1);
257 dev_priv->saveDPLL_A = I915_READ(DPLL_A); 257 dev_priv->saveDPLL_A = I915_READ(DPLL_A);
258 } 258 }
259 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) 259 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
260 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); 260 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
261 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); 261 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
262 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); 262 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -294,7 +294,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
294 dev_priv->saveDSPASIZE = I915_READ(DSPASIZE); 294 dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
295 dev_priv->saveDSPAPOS = I915_READ(DSPAPOS); 295 dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
296 dev_priv->saveDSPAADDR = I915_READ(DSPAADDR); 296 dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
297 if (IS_I965G(dev)) { 297 if (INTEL_INFO(dev)->gen >= 4) {
298 dev_priv->saveDSPASURF = I915_READ(DSPASURF); 298 dev_priv->saveDSPASURF = I915_READ(DSPASURF);
299 dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); 299 dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
300 } 300 }
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
313 dev_priv->saveFPB1 = I915_READ(FPB1); 313 dev_priv->saveFPB1 = I915_READ(FPB1);
314 dev_priv->saveDPLL_B = I915_READ(DPLL_B); 314 dev_priv->saveDPLL_B = I915_READ(DPLL_B);
315 } 315 }
316 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) 316 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
317 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); 317 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
318 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); 318 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
319 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); 319 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -351,7 +351,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
351 dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE); 351 dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
352 dev_priv->saveDSPBPOS = I915_READ(DSPBPOS); 352 dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
353 dev_priv->saveDSPBADDR = I915_READ(DSPBADDR); 353 dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
354 if (IS_I965GM(dev) || IS_GM45(dev)) { 354 if (INTEL_INFO(dev)->gen >= 4) {
355 dev_priv->saveDSPBSURF = I915_READ(DSPBSURF); 355 dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
356 dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); 356 dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
357 } 357 }
@@ -404,7 +404,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
404 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); 404 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
405 POSTING_READ(dpll_a_reg); 405 POSTING_READ(dpll_a_reg);
406 udelay(150); 406 udelay(150);
407 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { 407 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
408 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); 408 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
409 POSTING_READ(DPLL_A_MD); 409 POSTING_READ(DPLL_A_MD);
410 } 410 }
@@ -448,7 +448,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
448 I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); 448 I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
449 I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); 449 I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
450 I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); 450 I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
451 if (IS_I965G(dev)) { 451 if (INTEL_INFO(dev)->gen >= 4) {
452 I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); 452 I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
453 I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); 453 I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
454 } 454 }
@@ -473,7 +473,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
473 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); 473 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
474 POSTING_READ(dpll_b_reg); 474 POSTING_READ(dpll_b_reg);
475 udelay(150); 475 udelay(150);
476 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { 476 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
477 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); 477 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
478 POSTING_READ(DPLL_B_MD); 478 POSTING_READ(DPLL_B_MD);
479 } 479 }
@@ -517,7 +517,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
517 I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); 517 I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
518 I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); 518 I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
519 I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); 519 I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
520 if (IS_I965G(dev)) { 520 if (INTEL_INFO(dev)->gen >= 4) {
521 I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); 521 I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
522 I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); 522 I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
523 } 523 }
@@ -550,7 +550,7 @@ void i915_save_display(struct drm_device *dev)
550 dev_priv->saveCURBCNTR = I915_READ(CURBCNTR); 550 dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
551 dev_priv->saveCURBPOS = I915_READ(CURBPOS); 551 dev_priv->saveCURBPOS = I915_READ(CURBPOS);
552 dev_priv->saveCURBBASE = I915_READ(CURBBASE); 552 dev_priv->saveCURBBASE = I915_READ(CURBBASE);
553 if (!IS_I9XX(dev)) 553 if (IS_GEN2(dev))
554 dev_priv->saveCURSIZE = I915_READ(CURSIZE); 554 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
555 555
556 /* CRT state */ 556 /* CRT state */
@@ -573,7 +573,7 @@ void i915_save_display(struct drm_device *dev)
573 dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); 573 dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
574 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); 574 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
575 dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); 575 dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
576 if (IS_I965G(dev)) 576 if (INTEL_INFO(dev)->gen >= 4)
577 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); 577 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
578 if (IS_MOBILE(dev) && !IS_I830(dev)) 578 if (IS_MOBILE(dev) && !IS_I830(dev))
579 dev_priv->saveLVDS = I915_READ(LVDS); 579 dev_priv->saveLVDS = I915_READ(LVDS);
@@ -664,7 +664,7 @@ void i915_restore_display(struct drm_device *dev)
664 I915_WRITE(CURBPOS, dev_priv->saveCURBPOS); 664 I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
665 I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR); 665 I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
666 I915_WRITE(CURBBASE, dev_priv->saveCURBBASE); 666 I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
667 if (!IS_I9XX(dev)) 667 if (IS_GEN2(dev))
668 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); 668 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
669 669
670 /* CRT state */ 670 /* CRT state */
@@ -674,7 +674,7 @@ void i915_restore_display(struct drm_device *dev)
674 I915_WRITE(ADPA, dev_priv->saveADPA); 674 I915_WRITE(ADPA, dev_priv->saveADPA);
675 675
676 /* LVDS state */ 676 /* LVDS state */
677 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) 677 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
678 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); 678 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
679 679
680 if (HAS_PCH_SPLIT(dev)) { 680 if (HAS_PCH_SPLIT(dev)) {
@@ -878,9 +878,7 @@ int i915_restore_state(struct drm_device *dev)
878 for (i = 0; i < 3; i++) 878 for (i = 0; i < 3; i++)
879 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); 879 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
880 880
881 /* I2C state */ 881 intel_i2c_reset(dev);
882 intel_i2c_reset_gmbus(dev);
883 882
884 return 0; 883 return 0;
885} 884}
886
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 96f75d7f6633..b1f73ac0f3fd 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -129,10 +129,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
129 int i, temp_downclock; 129 int i, temp_downclock;
130 struct drm_display_mode *temp_mode; 130 struct drm_display_mode *temp_mode;
131 131
132 /* Defaults if we can't find VBT info */
133 dev_priv->lvds_dither = 0;
134 dev_priv->lvds_vbt = 0;
135
136 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); 132 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
137 if (!lvds_options) 133 if (!lvds_options)
138 return; 134 return;
@@ -140,6 +136,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
140 dev_priv->lvds_dither = lvds_options->pixel_dither; 136 dev_priv->lvds_dither = lvds_options->pixel_dither;
141 if (lvds_options->panel_type == 0xff) 137 if (lvds_options->panel_type == 0xff)
142 return; 138 return;
139
143 panel_type = lvds_options->panel_type; 140 panel_type = lvds_options->panel_type;
144 141
145 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); 142 lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
@@ -169,6 +166,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
169 ((unsigned char *)entry + dvo_timing_offset); 166 ((unsigned char *)entry + dvo_timing_offset);
170 167
171 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); 168 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
169 if (!panel_fixed_mode)
170 return;
172 171
173 fill_detail_timing_data(panel_fixed_mode, dvo_timing); 172 fill_detail_timing_data(panel_fixed_mode, dvo_timing);
174 173
@@ -230,8 +229,6 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
230 struct lvds_dvo_timing *dvo_timing; 229 struct lvds_dvo_timing *dvo_timing;
231 struct drm_display_mode *panel_fixed_mode; 230 struct drm_display_mode *panel_fixed_mode;
232 231
233 dev_priv->sdvo_lvds_vbt_mode = NULL;
234
235 sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); 232 sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
236 if (!sdvo_lvds_options) 233 if (!sdvo_lvds_options)
237 return; 234 return;
@@ -260,10 +257,6 @@ parse_general_features(struct drm_i915_private *dev_priv,
260 struct drm_device *dev = dev_priv->dev; 257 struct drm_device *dev = dev_priv->dev;
261 struct bdb_general_features *general; 258 struct bdb_general_features *general;
262 259
263 /* Set sensible defaults in case we can't find the general block */
264 dev_priv->int_tv_support = 1;
265 dev_priv->int_crt_support = 1;
266
267 general = find_section(bdb, BDB_GENERAL_FEATURES); 260 general = find_section(bdb, BDB_GENERAL_FEATURES);
268 if (general) { 261 if (general) {
269 dev_priv->int_tv_support = general->int_tv_support; 262 dev_priv->int_tv_support = general->int_tv_support;
@@ -289,14 +282,6 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
289 struct bdb_header *bdb) 282 struct bdb_header *bdb)
290{ 283{
291 struct bdb_general_definitions *general; 284 struct bdb_general_definitions *general;
292 const int crt_bus_map_table[] = {
293 GPIOB,
294 GPIOA,
295 GPIOC,
296 GPIOD,
297 GPIOE,
298 GPIOF,
299 };
300 285
301 general = find_section(bdb, BDB_GENERAL_DEFINITIONS); 286 general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
302 if (general) { 287 if (general) {
@@ -304,10 +289,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
304 if (block_size >= sizeof(*general)) { 289 if (block_size >= sizeof(*general)) {
305 int bus_pin = general->crt_ddc_gmbus_pin; 290 int bus_pin = general->crt_ddc_gmbus_pin;
306 DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); 291 DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
307 if ((bus_pin >= 1) && (bus_pin <= 6)) { 292 if (bus_pin >= 1 && bus_pin <= 6)
308 dev_priv->crt_ddc_bus = 293 dev_priv->crt_ddc_pin = bus_pin;
309 crt_bus_map_table[bus_pin-1];
310 }
311 } else { 294 } else {
312 DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", 295 DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
313 block_size); 296 block_size);
@@ -317,7 +300,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
317 300
318static void 301static void
319parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, 302parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
320 struct bdb_header *bdb) 303 struct bdb_header *bdb)
321{ 304{
322 struct sdvo_device_mapping *p_mapping; 305 struct sdvo_device_mapping *p_mapping;
323 struct bdb_general_definitions *p_defs; 306 struct bdb_general_definitions *p_defs;
@@ -327,7 +310,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
327 310
328 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); 311 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
329 if (!p_defs) { 312 if (!p_defs) {
330 DRM_DEBUG_KMS("No general definition block is found\n"); 313 DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
331 return; 314 return;
332 } 315 }
333 /* judge whether the size of child device meets the requirements. 316 /* judge whether the size of child device meets the requirements.
@@ -377,7 +360,16 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
377 p_mapping->slave_addr = p_child->slave_addr; 360 p_mapping->slave_addr = p_child->slave_addr;
378 p_mapping->dvo_wiring = p_child->dvo_wiring; 361 p_mapping->dvo_wiring = p_child->dvo_wiring;
379 p_mapping->ddc_pin = p_child->ddc_pin; 362 p_mapping->ddc_pin = p_child->ddc_pin;
363 p_mapping->i2c_pin = p_child->i2c_pin;
364 p_mapping->i2c_speed = p_child->i2c_speed;
380 p_mapping->initialized = 1; 365 p_mapping->initialized = 1;
366 DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n",
367 p_mapping->dvo_port,
368 p_mapping->slave_addr,
369 p_mapping->dvo_wiring,
370 p_mapping->ddc_pin,
371 p_mapping->i2c_pin,
372 p_mapping->i2c_speed);
381 } else { 373 } else {
382 DRM_DEBUG_KMS("Maybe one SDVO port is shared by " 374 DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
383 "two SDVO device.\n"); 375 "two SDVO device.\n");
@@ -409,14 +401,11 @@ parse_driver_features(struct drm_i915_private *dev_priv,
409 if (!driver) 401 if (!driver)
410 return; 402 return;
411 403
412 if (driver && SUPPORTS_EDP(dev) && 404 if (SUPPORTS_EDP(dev) &&
413 driver->lvds_config == BDB_DRIVER_FEATURE_EDP) { 405 driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
414 dev_priv->edp_support = 1; 406 dev_priv->edp.support = 1;
415 } else {
416 dev_priv->edp_support = 0;
417 }
418 407
419 if (driver && driver->dual_frequency) 408 if (driver->dual_frequency)
420 dev_priv->render_reclock_avail = true; 409 dev_priv->render_reclock_avail = true;
421} 410}
422 411
@@ -427,26 +416,40 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
427 416
428 edp = find_section(bdb, BDB_EDP); 417 edp = find_section(bdb, BDB_EDP);
429 if (!edp) { 418 if (!edp) {
430 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) { 419 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
431 DRM_DEBUG_KMS("No eDP BDB found but eDP panel " 420 DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
432 "supported, assume 18bpp panel color " 421 "supported, assume %dbpp panel color "
433 "depth.\n"); 422 "depth.\n",
434 dev_priv->edp_bpp = 18; 423 dev_priv->edp.bpp);
435 } 424 }
436 return; 425 return;
437 } 426 }
438 427
439 switch ((edp->color_depth >> (panel_type * 2)) & 3) { 428 switch ((edp->color_depth >> (panel_type * 2)) & 3) {
440 case EDP_18BPP: 429 case EDP_18BPP:
441 dev_priv->edp_bpp = 18; 430 dev_priv->edp.bpp = 18;
442 break; 431 break;
443 case EDP_24BPP: 432 case EDP_24BPP:
444 dev_priv->edp_bpp = 24; 433 dev_priv->edp.bpp = 24;
445 break; 434 break;
446 case EDP_30BPP: 435 case EDP_30BPP:
447 dev_priv->edp_bpp = 30; 436 dev_priv->edp.bpp = 30;
448 break; 437 break;
449 } 438 }
439
440 dev_priv->edp.rate = edp->link_params[panel_type].rate;
441 dev_priv->edp.lanes = edp->link_params[panel_type].lanes;
442 dev_priv->edp.preemphasis = edp->link_params[panel_type].preemphasis;
443 dev_priv->edp.vswing = edp->link_params[panel_type].vswing;
444
445 DRM_DEBUG_KMS("eDP vBIOS settings: bpp=%d, rate=%d, lanes=%d, preemphasis=%d, vswing=%d\n",
446 dev_priv->edp.bpp,
447 dev_priv->edp.rate,
448 dev_priv->edp.lanes,
449 dev_priv->edp.preemphasis,
450 dev_priv->edp.vswing);
451
452 dev_priv->edp.initialized = true;
450} 453}
451 454
452static void 455static void
@@ -460,7 +463,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
460 463
461 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); 464 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
462 if (!p_defs) { 465 if (!p_defs) {
463 DRM_DEBUG_KMS("No general definition block is found\n"); 466 DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
464 return; 467 return;
465 } 468 }
466 /* judge whether the size of child device meets the requirements. 469 /* judge whether the size of child device meets the requirements.
@@ -513,6 +516,28 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
513 } 516 }
514 return; 517 return;
515} 518}
519
520static void
521init_vbt_defaults(struct drm_i915_private *dev_priv)
522{
523 dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
524
525 /* LFP panel data */
526 dev_priv->lvds_dither = 1;
527 dev_priv->lvds_vbt = 0;
528
529 /* SDVO panel data */
530 dev_priv->sdvo_lvds_vbt_mode = NULL;
531
532 /* general features */
533 dev_priv->int_tv_support = 1;
534 dev_priv->int_crt_support = 1;
535 dev_priv->lvds_use_ssc = 0;
536
537 /* eDP data */
538 dev_priv->edp.bpp = 18;
539}
540
516/** 541/**
517 * intel_init_bios - initialize VBIOS settings & find VBT 542 * intel_init_bios - initialize VBIOS settings & find VBT
518 * @dev: DRM device 543 * @dev: DRM device
@@ -520,11 +545,6 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
520 * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers 545 * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
521 * to appropriate values. 546 * to appropriate values.
522 * 547 *
523 * VBT existence is a sanity check that is relied on by other i830_bios.c code.
524 * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
525 * feed an updated VBT back through that, compared to what we'll fetch using
526 * this method of groping around in the BIOS data.
527 *
528 * Returns 0 on success, nonzero on failure. 548 * Returns 0 on success, nonzero on failure.
529 */ 549 */
530bool 550bool
@@ -532,31 +552,47 @@ intel_init_bios(struct drm_device *dev)
532{ 552{
533 struct drm_i915_private *dev_priv = dev->dev_private; 553 struct drm_i915_private *dev_priv = dev->dev_private;
534 struct pci_dev *pdev = dev->pdev; 554 struct pci_dev *pdev = dev->pdev;
535 struct vbt_header *vbt = NULL; 555 struct bdb_header *bdb = NULL;
536 struct bdb_header *bdb; 556 u8 __iomem *bios = NULL;
537 u8 __iomem *bios; 557
538 size_t size; 558 init_vbt_defaults(dev_priv);
539 int i; 559
540 560 /* XXX Should this validation be moved to intel_opregion.c? */
541 bios = pci_map_rom(pdev, &size); 561 if (dev_priv->opregion.vbt) {
542 if (!bios) 562 struct vbt_header *vbt = dev_priv->opregion.vbt;
543 return -1; 563 if (memcmp(vbt->signature, "$VBT", 4) == 0) {
544 564 DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n",
545 /* Scour memory looking for the VBT signature */ 565 vbt->signature);
546 for (i = 0; i + 4 < size; i++) { 566 bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
547 if (!memcmp(bios + i, "$VBT", 4)) { 567 } else
548 vbt = (struct vbt_header *)(bios + i); 568 dev_priv->opregion.vbt = NULL;
549 break;
550 }
551 } 569 }
552 570
553 if (!vbt) { 571 if (bdb == NULL) {
554 DRM_ERROR("VBT signature missing\n"); 572 struct vbt_header *vbt = NULL;
555 pci_unmap_rom(pdev, bios); 573 size_t size;
556 return -1; 574 int i;
557 } 575
576 bios = pci_map_rom(pdev, &size);
577 if (!bios)
578 return -1;
579
580 /* Scour memory looking for the VBT signature */
581 for (i = 0; i + 4 < size; i++) {
582 if (!memcmp(bios + i, "$VBT", 4)) {
583 vbt = (struct vbt_header *)(bios + i);
584 break;
585 }
586 }
558 587
559 bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); 588 if (!vbt) {
589 DRM_ERROR("VBT signature missing\n");
590 pci_unmap_rom(pdev, bios);
591 return -1;
592 }
593
594 bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
595 }
560 596
561 /* Grab useful general definitions */ 597 /* Grab useful general definitions */
562 parse_general_features(dev_priv, bdb); 598 parse_general_features(dev_priv, bdb);
@@ -568,7 +604,8 @@ intel_init_bios(struct drm_device *dev)
568 parse_driver_features(dev_priv, bdb); 604 parse_driver_features(dev_priv, bdb);
569 parse_edp(dev_priv, bdb); 605 parse_edp(dev_priv, bdb);
570 606
571 pci_unmap_rom(pdev, bios); 607 if (bios)
608 pci_unmap_rom(pdev, bios);
572 609
573 return 0; 610 return 0;
574} 611}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 4c18514f6f80..e1a598f2a966 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -197,7 +197,8 @@ struct bdb_general_features {
197struct child_device_config { 197struct child_device_config {
198 u16 handle; 198 u16 handle;
199 u16 device_type; 199 u16 device_type;
200 u8 device_id[10]; /* See DEVICE_TYPE_* above */ 200 u8 i2c_speed;
201 u8 rsvd[9];
201 u16 addin_offset; 202 u16 addin_offset;
202 u8 dvo_port; /* See Device_PORT_* above */ 203 u8 dvo_port; /* See Device_PORT_* above */
203 u8 i2c_pin; 204 u8 i2c_pin;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 197d4f32585a..389fcd2aea1f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -79,7 +79,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
79 if (mode->clock < 25000) 79 if (mode->clock < 25000)
80 return MODE_CLOCK_LOW; 80 return MODE_CLOCK_LOW;
81 81
82 if (!IS_I9XX(dev)) 82 if (IS_GEN2(dev))
83 max_clock = 350000; 83 max_clock = 350000;
84 else 84 else
85 max_clock = 400000; 85 max_clock = 400000;
@@ -123,7 +123,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
123 * Disable separate mode multiplier used when cloning SDVO to CRT 123 * Disable separate mode multiplier used when cloning SDVO to CRT
124 * XXX this needs to be adjusted when we really are cloning 124 * XXX this needs to be adjusted when we really are cloning
125 */ 125 */
126 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { 126 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
127 dpll_md = I915_READ(dpll_md_reg); 127 dpll_md = I915_READ(dpll_md_reg);
128 I915_WRITE(dpll_md_reg, 128 I915_WRITE(dpll_md_reg,
129 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); 129 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -187,7 +187,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
187 I915_WRITE(PCH_ADPA, adpa); 187 I915_WRITE(PCH_ADPA, adpa);
188 188
189 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, 189 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
190 1000, 1)) 190 1000))
191 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); 191 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
192 192
193 if (turn_off_dac) { 193 if (turn_off_dac) {
@@ -244,7 +244,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
244 /* wait for FORCE_DETECT to go off */ 244 /* wait for FORCE_DETECT to go off */
245 if (wait_for((I915_READ(PORT_HOTPLUG_EN) & 245 if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
246 CRT_HOTPLUG_FORCE_DETECT) == 0, 246 CRT_HOTPLUG_FORCE_DETECT) == 0,
247 1000, 1)) 247 1000))
248 DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off"); 248 DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
249 } 249 }
250 250
@@ -261,21 +261,47 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
261 return ret; 261 return ret;
262} 262}
263 263
264static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
265{
266 u8 buf;
267 struct i2c_msg msgs[] = {
268 {
269 .addr = 0xA0,
270 .flags = 0,
271 .len = 1,
272 .buf = &buf,
273 },
274 };
275 /* DDC monitor detect: Does it ACK a write to 0xA0? */
276 return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
277}
278
264static bool intel_crt_detect_ddc(struct drm_encoder *encoder) 279static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
265{ 280{
266 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 281 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
282 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
267 283
268 /* CRT should always be at 0, but check anyway */ 284 /* CRT should always be at 0, but check anyway */
269 if (intel_encoder->type != INTEL_OUTPUT_ANALOG) 285 if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
270 return false; 286 return false;
271 287
272 return intel_ddc_probe(intel_encoder); 288 if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) {
289 DRM_DEBUG_KMS("CRT detected via DDC:0xa0\n");
290 return true;
291 }
292
293 if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) {
294 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
295 return true;
296 }
297
298 return false;
273} 299}
274 300
275static enum drm_connector_status 301static enum drm_connector_status
276intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder) 302intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
277{ 303{
278 struct drm_encoder *encoder = &intel_encoder->enc; 304 struct drm_encoder *encoder = &intel_encoder->base;
279 struct drm_device *dev = encoder->dev; 305 struct drm_device *dev = encoder->dev;
280 struct drm_i915_private *dev_priv = dev->dev_private; 306 struct drm_i915_private *dev_priv = dev->dev_private;
281 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 307 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -295,6 +321,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
295 uint8_t st00; 321 uint8_t st00;
296 enum drm_connector_status status; 322 enum drm_connector_status status;
297 323
324 DRM_DEBUG_KMS("starting load-detect on CRT\n");
325
298 if (pipe == 0) { 326 if (pipe == 0) {
299 bclrpat_reg = BCLRPAT_A; 327 bclrpat_reg = BCLRPAT_A;
300 vtotal_reg = VTOTAL_A; 328 vtotal_reg = VTOTAL_A;
@@ -324,9 +352,10 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
324 /* Set the border color to purple. */ 352 /* Set the border color to purple. */
325 I915_WRITE(bclrpat_reg, 0x500050); 353 I915_WRITE(bclrpat_reg, 0x500050);
326 354
327 if (IS_I9XX(dev)) { 355 if (!IS_GEN2(dev)) {
328 uint32_t pipeconf = I915_READ(pipeconf_reg); 356 uint32_t pipeconf = I915_READ(pipeconf_reg);
329 I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER); 357 I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
358 POSTING_READ(pipeconf_reg);
330 /* Wait for next Vblank to substitue 359 /* Wait for next Vblank to substitue
331 * border color for Color info */ 360 * border color for Color info */
332 intel_wait_for_vblank(dev, pipe); 361 intel_wait_for_vblank(dev, pipe);
@@ -404,34 +433,37 @@ static enum drm_connector_status
404intel_crt_detect(struct drm_connector *connector, bool force) 433intel_crt_detect(struct drm_connector *connector, bool force)
405{ 434{
406 struct drm_device *dev = connector->dev; 435 struct drm_device *dev = connector->dev;
407 struct drm_encoder *encoder = intel_attached_encoder(connector); 436 struct intel_encoder *encoder = intel_attached_encoder(connector);
408 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
409 struct drm_crtc *crtc; 437 struct drm_crtc *crtc;
410 int dpms_mode; 438 int dpms_mode;
411 enum drm_connector_status status; 439 enum drm_connector_status status;
412 440
413 if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) { 441 if (I915_HAS_HOTPLUG(dev)) {
414 if (intel_crt_detect_hotplug(connector)) 442 if (intel_crt_detect_hotplug(connector)) {
443 DRM_DEBUG_KMS("CRT detected via hotplug\n");
415 return connector_status_connected; 444 return connector_status_connected;
416 else 445 } else
417 return connector_status_disconnected; 446 return connector_status_disconnected;
418 } 447 }
419 448
420 if (intel_crt_detect_ddc(encoder)) 449 if (intel_crt_detect_ddc(&encoder->base))
421 return connector_status_connected; 450 return connector_status_connected;
422 451
423 if (!force) 452 if (!force)
424 return connector->status; 453 return connector->status;
425 454
426 /* for pre-945g platforms use load detect */ 455 /* for pre-945g platforms use load detect */
427 if (encoder->crtc && encoder->crtc->enabled) { 456 if (encoder->base.crtc && encoder->base.crtc->enabled) {
428 status = intel_crt_load_detect(encoder->crtc, intel_encoder); 457 status = intel_crt_load_detect(encoder->base.crtc, encoder);
429 } else { 458 } else {
430 crtc = intel_get_load_detect_pipe(intel_encoder, connector, 459 crtc = intel_get_load_detect_pipe(encoder, connector,
431 NULL, &dpms_mode); 460 NULL, &dpms_mode);
432 if (crtc) { 461 if (crtc) {
433 status = intel_crt_load_detect(crtc, intel_encoder); 462 if (intel_crt_detect_ddc(&encoder->base))
434 intel_release_load_detect_pipe(intel_encoder, 463 status = connector_status_connected;
464 else
465 status = intel_crt_load_detect(crtc, encoder);
466 intel_release_load_detect_pipe(encoder,
435 connector, dpms_mode); 467 connector, dpms_mode);
436 } else 468 } else
437 status = connector_status_unknown; 469 status = connector_status_unknown;
@@ -449,32 +481,18 @@ static void intel_crt_destroy(struct drm_connector *connector)
449 481
450static int intel_crt_get_modes(struct drm_connector *connector) 482static int intel_crt_get_modes(struct drm_connector *connector)
451{ 483{
452 int ret;
453 struct drm_encoder *encoder = intel_attached_encoder(connector);
454 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
455 struct i2c_adapter *ddc_bus;
456 struct drm_device *dev = connector->dev; 484 struct drm_device *dev = connector->dev;
485 struct drm_i915_private *dev_priv = dev->dev_private;
486 int ret;
457 487
458 488 ret = intel_ddc_get_modes(connector,
459 ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); 489 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
460 if (ret || !IS_G4X(dev)) 490 if (ret || !IS_G4X(dev))
461 goto end; 491 return ret;
462 492
463 /* Try to probe digital port for output in DVI-I -> VGA mode. */ 493 /* Try to probe digital port for output in DVI-I -> VGA mode. */
464 ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); 494 return intel_ddc_get_modes(connector,
465 495 &dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
466 if (!ddc_bus) {
467 dev_printk(KERN_ERR, &connector->dev->pdev->dev,
468 "DDC bus registration failed for CRTDDC_D.\n");
469 goto end;
470 }
471 /* Try to get modes by GPIOD port */
472 ret = intel_ddc_get_modes(connector, ddc_bus);
473 intel_i2c_destroy(ddc_bus);
474
475end:
476 return ret;
477
478} 496}
479 497
480static int intel_crt_set_property(struct drm_connector *connector, 498static int intel_crt_set_property(struct drm_connector *connector,
@@ -507,7 +525,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
507static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { 525static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
508 .mode_valid = intel_crt_mode_valid, 526 .mode_valid = intel_crt_mode_valid,
509 .get_modes = intel_crt_get_modes, 527 .get_modes = intel_crt_get_modes,
510 .best_encoder = intel_attached_encoder, 528 .best_encoder = intel_best_encoder,
511}; 529};
512 530
513static const struct drm_encoder_funcs intel_crt_enc_funcs = { 531static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -520,7 +538,6 @@ void intel_crt_init(struct drm_device *dev)
520 struct intel_encoder *intel_encoder; 538 struct intel_encoder *intel_encoder;
521 struct intel_connector *intel_connector; 539 struct intel_connector *intel_connector;
522 struct drm_i915_private *dev_priv = dev->dev_private; 540 struct drm_i915_private *dev_priv = dev->dev_private;
523 u32 i2c_reg;
524 541
525 intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL); 542 intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
526 if (!intel_encoder) 543 if (!intel_encoder)
@@ -536,27 +553,10 @@ void intel_crt_init(struct drm_device *dev)
536 drm_connector_init(dev, &intel_connector->base, 553 drm_connector_init(dev, &intel_connector->base,
537 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 554 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
538 555
539 drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs, 556 drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs,
540 DRM_MODE_ENCODER_DAC); 557 DRM_MODE_ENCODER_DAC);
541 558
542 drm_mode_connector_attach_encoder(&intel_connector->base, 559 intel_connector_attach_encoder(intel_connector, intel_encoder);
543 &intel_encoder->enc);
544
545 /* Set up the DDC bus. */
546 if (HAS_PCH_SPLIT(dev))
547 i2c_reg = PCH_GPIOA;
548 else {
549 i2c_reg = GPIOA;
550 /* Use VBT information for CRT DDC if available */
551 if (dev_priv->crt_ddc_bus != 0)
552 i2c_reg = dev_priv->crt_ddc_bus;
553 }
554 intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
555 if (!intel_encoder->ddc_bus) {
556 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
557 "failed.\n");
558 return;
559 }
560 560
561 intel_encoder->type = INTEL_OUTPUT_ANALOG; 561 intel_encoder->type = INTEL_OUTPUT_ANALOG;
562 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 562 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
@@ -566,7 +566,7 @@ void intel_crt_init(struct drm_device *dev)
566 connector->interlace_allowed = 1; 566 connector->interlace_allowed = 1;
567 connector->doublescan_allowed = 0; 567 connector->doublescan_allowed = 0;
568 568
569 drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs); 569 drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs);
570 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 570 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
571 571
572 drm_sysfs_connector_add(connector); 572 drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b5bf51a4502d..69c54c5a4254 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -43,8 +43,8 @@
43 43
44bool intel_pipe_has_type (struct drm_crtc *crtc, int type); 44bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
45static void intel_update_watermarks(struct drm_device *dev); 45static void intel_update_watermarks(struct drm_device *dev);
46static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule); 46static void intel_increase_pllclock(struct drm_crtc *crtc);
47static void intel_crtc_update_cursor(struct drm_crtc *crtc); 47static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
48 48
49typedef struct { 49typedef struct {
50 /* given values */ 50 /* given values */
@@ -342,6 +342,13 @@ static bool
342intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, 342intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
343 int target, int refclk, intel_clock_t *best_clock); 343 int target, int refclk, intel_clock_t *best_clock);
344 344
345static inline u32 /* units of 100MHz */
346intel_fdi_link_freq(struct drm_device *dev)
347{
348 struct drm_i915_private *dev_priv = dev->dev_private;
349 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
350}
351
345static const intel_limit_t intel_limits_i8xx_dvo = { 352static const intel_limit_t intel_limits_i8xx_dvo = {
346 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 353 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
347 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, 354 .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
@@ -701,16 +708,16 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
701 limit = intel_ironlake_limit(crtc); 708 limit = intel_ironlake_limit(crtc);
702 else if (IS_G4X(dev)) { 709 else if (IS_G4X(dev)) {
703 limit = intel_g4x_limit(crtc); 710 limit = intel_g4x_limit(crtc);
704 } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
705 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
706 limit = &intel_limits_i9xx_lvds;
707 else
708 limit = &intel_limits_i9xx_sdvo;
709 } else if (IS_PINEVIEW(dev)) { 711 } else if (IS_PINEVIEW(dev)) {
710 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 712 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
711 limit = &intel_limits_pineview_lvds; 713 limit = &intel_limits_pineview_lvds;
712 else 714 else
713 limit = &intel_limits_pineview_sdvo; 715 limit = &intel_limits_pineview_sdvo;
716 } else if (!IS_GEN2(dev)) {
717 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
718 limit = &intel_limits_i9xx_lvds;
719 else
720 limit = &intel_limits_i9xx_sdvo;
714 } else { 721 } else {
715 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 722 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
716 limit = &intel_limits_i8xx_lvds; 723 limit = &intel_limits_i8xx_lvds;
@@ -744,20 +751,17 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock
744/** 751/**
745 * Returns whether any output on the specified pipe is of the specified type 752 * Returns whether any output on the specified pipe is of the specified type
746 */ 753 */
747bool intel_pipe_has_type (struct drm_crtc *crtc, int type) 754bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
748{ 755{
749 struct drm_device *dev = crtc->dev; 756 struct drm_device *dev = crtc->dev;
750 struct drm_mode_config *mode_config = &dev->mode_config; 757 struct drm_mode_config *mode_config = &dev->mode_config;
751 struct drm_encoder *l_entry; 758 struct intel_encoder *encoder;
759
760 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
761 if (encoder->base.crtc == crtc && encoder->type == type)
762 return true;
752 763
753 list_for_each_entry(l_entry, &mode_config->encoder_list, head) { 764 return false;
754 if (l_entry && l_entry->crtc == crtc) {
755 struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
756 if (intel_encoder->type == type)
757 return true;
758 }
759 }
760 return false;
761} 765}
762 766
763#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 767#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -955,26 +959,26 @@ static bool
955intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 959intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
956 int target, int refclk, intel_clock_t *best_clock) 960 int target, int refclk, intel_clock_t *best_clock)
957{ 961{
958 intel_clock_t clock; 962 intel_clock_t clock;
959 if (target < 200000) { 963 if (target < 200000) {
960 clock.p1 = 2; 964 clock.p1 = 2;
961 clock.p2 = 10; 965 clock.p2 = 10;
962 clock.n = 2; 966 clock.n = 2;
963 clock.m1 = 23; 967 clock.m1 = 23;
964 clock.m2 = 8; 968 clock.m2 = 8;
965 } else { 969 } else {
966 clock.p1 = 1; 970 clock.p1 = 1;
967 clock.p2 = 10; 971 clock.p2 = 10;
968 clock.n = 1; 972 clock.n = 1;
969 clock.m1 = 14; 973 clock.m1 = 14;
970 clock.m2 = 2; 974 clock.m2 = 2;
971 } 975 }
972 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); 976 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
973 clock.p = (clock.p1 * clock.p2); 977 clock.p = (clock.p1 * clock.p2);
974 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; 978 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
975 clock.vco = 0; 979 clock.vco = 0;
976 memcpy(best_clock, &clock, sizeof(intel_clock_t)); 980 memcpy(best_clock, &clock, sizeof(intel_clock_t));
977 return true; 981 return true;
978} 982}
979 983
980/** 984/**
@@ -1007,14 +1011,14 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
1007 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); 1011 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
1008 1012
1009 /* Wait for vblank interrupt bit to set */ 1013 /* Wait for vblank interrupt bit to set */
1010 if (wait_for((I915_READ(pipestat_reg) & 1014 if (wait_for(I915_READ(pipestat_reg) &
1011 PIPE_VBLANK_INTERRUPT_STATUS), 1015 PIPE_VBLANK_INTERRUPT_STATUS,
1012 50, 0)) 1016 50))
1013 DRM_DEBUG_KMS("vblank wait timed out\n"); 1017 DRM_DEBUG_KMS("vblank wait timed out\n");
1014} 1018}
1015 1019
1016/** 1020/*
1017 * intel_wait_for_vblank_off - wait for vblank after disabling a pipe 1021 * intel_wait_for_pipe_off - wait for pipe to turn off
1018 * @dev: drm device 1022 * @dev: drm device
1019 * @pipe: pipe to wait for 1023 * @pipe: pipe to wait for
1020 * 1024 *
@@ -1022,28 +1026,41 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
1022 * spinning on the vblank interrupt status bit, since we won't actually 1026 * spinning on the vblank interrupt status bit, since we won't actually
1023 * see an interrupt when the pipe is disabled. 1027 * see an interrupt when the pipe is disabled.
1024 * 1028 *
1025 * So this function waits for the display line value to settle (it 1029 * On Gen4 and above:
1026 * usually ends up stopping at the start of the next frame). 1030 * wait for the pipe register state bit to turn off
1031 *
1032 * Otherwise:
1033 * wait for the display line value to settle (it usually
1034 * ends up stopping at the start of the next frame).
1035 *
1027 */ 1036 */
1028void intel_wait_for_vblank_off(struct drm_device *dev, int pipe) 1037void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1029{ 1038{
1030 struct drm_i915_private *dev_priv = dev->dev_private; 1039 struct drm_i915_private *dev_priv = dev->dev_private;
1031 int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); 1040
1032 unsigned long timeout = jiffies + msecs_to_jiffies(100); 1041 if (INTEL_INFO(dev)->gen >= 4) {
1033 u32 last_line; 1042 int reg = PIPECONF(pipe);
1034 1043
1035 /* Wait for the display line to settle */ 1044 /* Wait for the Pipe State to go off */
1036 do { 1045 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1037 last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; 1046 100))
1038 mdelay(5); 1047 DRM_DEBUG_KMS("pipe_off wait timed out\n");
1039 } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && 1048 } else {
1040 time_after(timeout, jiffies)); 1049 u32 last_line;
1041 1050 int reg = PIPEDSL(pipe);
1042 if (time_after(jiffies, timeout)) 1051 unsigned long timeout = jiffies + msecs_to_jiffies(100);
1043 DRM_DEBUG_KMS("vblank wait timed out\n"); 1052
1053 /* Wait for the display line to settle */
1054 do {
1055 last_line = I915_READ(reg) & DSL_LINEMASK;
1056 mdelay(5);
1057 } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
1058 time_after(timeout, jiffies));
1059 if (time_after(jiffies, timeout))
1060 DRM_DEBUG_KMS("pipe_off wait timed out\n");
1061 }
1044} 1062}
1045 1063
1046/* Parameters have changed, update FBC info */
1047static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1064static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1048{ 1065{
1049 struct drm_device *dev = crtc->dev; 1066 struct drm_device *dev = crtc->dev;
@@ -1055,6 +1072,14 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1055 int plane, i; 1072 int plane, i;
1056 u32 fbc_ctl, fbc_ctl2; 1073 u32 fbc_ctl, fbc_ctl2;
1057 1074
1075 if (fb->pitch == dev_priv->cfb_pitch &&
1076 obj_priv->fence_reg == dev_priv->cfb_fence &&
1077 intel_crtc->plane == dev_priv->cfb_plane &&
1078 I915_READ(FBC_CONTROL) & FBC_CTL_EN)
1079 return;
1080
1081 i8xx_disable_fbc(dev);
1082
1058 dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; 1083 dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1059 1084
1060 if (fb->pitch < dev_priv->cfb_pitch) 1085 if (fb->pitch < dev_priv->cfb_pitch)
@@ -1088,7 +1113,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1088 I915_WRITE(FBC_CONTROL, fbc_ctl); 1113 I915_WRITE(FBC_CONTROL, fbc_ctl);
1089 1114
1090 DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ", 1115 DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
1091 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); 1116 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
1092} 1117}
1093 1118
1094void i8xx_disable_fbc(struct drm_device *dev) 1119void i8xx_disable_fbc(struct drm_device *dev)
@@ -1096,19 +1121,16 @@ void i8xx_disable_fbc(struct drm_device *dev)
1096 struct drm_i915_private *dev_priv = dev->dev_private; 1121 struct drm_i915_private *dev_priv = dev->dev_private;
1097 u32 fbc_ctl; 1122 u32 fbc_ctl;
1098 1123
1099 if (!I915_HAS_FBC(dev))
1100 return;
1101
1102 if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN))
1103 return; /* Already off, just return */
1104
1105 /* Disable compression */ 1124 /* Disable compression */
1106 fbc_ctl = I915_READ(FBC_CONTROL); 1125 fbc_ctl = I915_READ(FBC_CONTROL);
1126 if ((fbc_ctl & FBC_CTL_EN) == 0)
1127 return;
1128
1107 fbc_ctl &= ~FBC_CTL_EN; 1129 fbc_ctl &= ~FBC_CTL_EN;
1108 I915_WRITE(FBC_CONTROL, fbc_ctl); 1130 I915_WRITE(FBC_CONTROL, fbc_ctl);
1109 1131
1110 /* Wait for compressing bit to clear */ 1132 /* Wait for compressing bit to clear */
1111 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) { 1133 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1112 DRM_DEBUG_KMS("FBC idle timed out\n"); 1134 DRM_DEBUG_KMS("FBC idle timed out\n");
1113 return; 1135 return;
1114 } 1136 }
@@ -1131,14 +1153,27 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1131 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1153 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1132 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); 1154 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
1133 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1155 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1134 int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : 1156 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1135 DPFC_CTL_PLANEB);
1136 unsigned long stall_watermark = 200; 1157 unsigned long stall_watermark = 200;
1137 u32 dpfc_ctl; 1158 u32 dpfc_ctl;
1138 1159
1160 dpfc_ctl = I915_READ(DPFC_CONTROL);
1161 if (dpfc_ctl & DPFC_CTL_EN) {
1162 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
1163 dev_priv->cfb_fence == obj_priv->fence_reg &&
1164 dev_priv->cfb_plane == intel_crtc->plane &&
1165 dev_priv->cfb_y == crtc->y)
1166 return;
1167
1168 I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
1169 POSTING_READ(DPFC_CONTROL);
1170 intel_wait_for_vblank(dev, intel_crtc->pipe);
1171 }
1172
1139 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; 1173 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1140 dev_priv->cfb_fence = obj_priv->fence_reg; 1174 dev_priv->cfb_fence = obj_priv->fence_reg;
1141 dev_priv->cfb_plane = intel_crtc->plane; 1175 dev_priv->cfb_plane = intel_crtc->plane;
1176 dev_priv->cfb_y = crtc->y;
1142 1177
1143 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; 1178 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1144 if (obj_priv->tiling_mode != I915_TILING_NONE) { 1179 if (obj_priv->tiling_mode != I915_TILING_NONE) {
@@ -1148,7 +1183,6 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1148 I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY); 1183 I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1149 } 1184 }
1150 1185
1151 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1152 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | 1186 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1153 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 1187 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1154 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); 1188 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
@@ -1167,10 +1201,12 @@ void g4x_disable_fbc(struct drm_device *dev)
1167 1201
1168 /* Disable compression */ 1202 /* Disable compression */
1169 dpfc_ctl = I915_READ(DPFC_CONTROL); 1203 dpfc_ctl = I915_READ(DPFC_CONTROL);
1170 dpfc_ctl &= ~DPFC_CTL_EN; 1204 if (dpfc_ctl & DPFC_CTL_EN) {
1171 I915_WRITE(DPFC_CONTROL, dpfc_ctl); 1205 dpfc_ctl &= ~DPFC_CTL_EN;
1206 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1172 1207
1173 DRM_DEBUG_KMS("disabled FBC\n"); 1208 DRM_DEBUG_KMS("disabled FBC\n");
1209 }
1174} 1210}
1175 1211
1176static bool g4x_fbc_enabled(struct drm_device *dev) 1212static bool g4x_fbc_enabled(struct drm_device *dev)
@@ -1188,16 +1224,30 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1188 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1224 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1189 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); 1225 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
1190 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1226 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1191 int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA : 1227 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1192 DPFC_CTL_PLANEB;
1193 unsigned long stall_watermark = 200; 1228 unsigned long stall_watermark = 200;
1194 u32 dpfc_ctl; 1229 u32 dpfc_ctl;
1195 1230
1231 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1232 if (dpfc_ctl & DPFC_CTL_EN) {
1233 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
1234 dev_priv->cfb_fence == obj_priv->fence_reg &&
1235 dev_priv->cfb_plane == intel_crtc->plane &&
1236 dev_priv->cfb_offset == obj_priv->gtt_offset &&
1237 dev_priv->cfb_y == crtc->y)
1238 return;
1239
1240 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
1241 POSTING_READ(ILK_DPFC_CONTROL);
1242 intel_wait_for_vblank(dev, intel_crtc->pipe);
1243 }
1244
1196 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; 1245 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1197 dev_priv->cfb_fence = obj_priv->fence_reg; 1246 dev_priv->cfb_fence = obj_priv->fence_reg;
1198 dev_priv->cfb_plane = intel_crtc->plane; 1247 dev_priv->cfb_plane = intel_crtc->plane;
1248 dev_priv->cfb_offset = obj_priv->gtt_offset;
1249 dev_priv->cfb_y = crtc->y;
1199 1250
1200 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1201 dpfc_ctl &= DPFC_RESERVED; 1251 dpfc_ctl &= DPFC_RESERVED;
1202 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); 1252 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1203 if (obj_priv->tiling_mode != I915_TILING_NONE) { 1253 if (obj_priv->tiling_mode != I915_TILING_NONE) {
@@ -1207,15 +1257,13 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1207 I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY); 1257 I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1208 } 1258 }
1209 1259
1210 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1211 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | 1260 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1212 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 1261 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1213 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); 1262 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1214 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); 1263 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1215 I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); 1264 I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
1216 /* enable it... */ 1265 /* enable it... */
1217 I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) | 1266 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1218 DPFC_CTL_EN);
1219 1267
1220 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 1268 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1221} 1269}
@@ -1227,10 +1275,12 @@ void ironlake_disable_fbc(struct drm_device *dev)
1227 1275
1228 /* Disable compression */ 1276 /* Disable compression */
1229 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 1277 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1230 dpfc_ctl &= ~DPFC_CTL_EN; 1278 if (dpfc_ctl & DPFC_CTL_EN) {
1231 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); 1279 dpfc_ctl &= ~DPFC_CTL_EN;
1280 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1232 1281
1233 DRM_DEBUG_KMS("disabled FBC\n"); 1282 DRM_DEBUG_KMS("disabled FBC\n");
1283 }
1234} 1284}
1235 1285
1236static bool ironlake_fbc_enabled(struct drm_device *dev) 1286static bool ironlake_fbc_enabled(struct drm_device *dev)
@@ -1272,8 +1322,7 @@ void intel_disable_fbc(struct drm_device *dev)
1272 1322
1273/** 1323/**
1274 * intel_update_fbc - enable/disable FBC as needed 1324 * intel_update_fbc - enable/disable FBC as needed
1275 * @crtc: CRTC to point the compressor at 1325 * @dev: the drm_device
1276 * @mode: mode in use
1277 * 1326 *
1278 * Set up the framebuffer compression hardware at mode set time. We 1327 * Set up the framebuffer compression hardware at mode set time. We
1279 * enable it if possible: 1328 * enable it if possible:
@@ -1290,18 +1339,14 @@ void intel_disable_fbc(struct drm_device *dev)
1290 * 1339 *
1291 * We need to enable/disable FBC on a global basis. 1340 * We need to enable/disable FBC on a global basis.
1292 */ 1341 */
1293static void intel_update_fbc(struct drm_crtc *crtc, 1342static void intel_update_fbc(struct drm_device *dev)
1294 struct drm_display_mode *mode)
1295{ 1343{
1296 struct drm_device *dev = crtc->dev;
1297 struct drm_i915_private *dev_priv = dev->dev_private; 1344 struct drm_i915_private *dev_priv = dev->dev_private;
1298 struct drm_framebuffer *fb = crtc->fb; 1345 struct drm_crtc *crtc = NULL, *tmp_crtc;
1346 struct intel_crtc *intel_crtc;
1347 struct drm_framebuffer *fb;
1299 struct intel_framebuffer *intel_fb; 1348 struct intel_framebuffer *intel_fb;
1300 struct drm_i915_gem_object *obj_priv; 1349 struct drm_i915_gem_object *obj_priv;
1301 struct drm_crtc *tmp_crtc;
1302 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1303 int plane = intel_crtc->plane;
1304 int crtcs_enabled = 0;
1305 1350
1306 DRM_DEBUG_KMS("\n"); 1351 DRM_DEBUG_KMS("\n");
1307 1352
@@ -1311,12 +1356,6 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1311 if (!I915_HAS_FBC(dev)) 1356 if (!I915_HAS_FBC(dev))
1312 return; 1357 return;
1313 1358
1314 if (!crtc->fb)
1315 return;
1316
1317 intel_fb = to_intel_framebuffer(fb);
1318 obj_priv = to_intel_bo(intel_fb->obj);
1319
1320 /* 1359 /*
1321 * If FBC is already on, we just have to verify that we can 1360 * If FBC is already on, we just have to verify that we can
1322 * keep it that way... 1361 * keep it that way...
@@ -1327,35 +1366,47 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1327 * - going to an unsupported config (interlace, pixel multiply, etc.) 1366 * - going to an unsupported config (interlace, pixel multiply, etc.)
1328 */ 1367 */
1329 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { 1368 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1330 if (tmp_crtc->enabled) 1369 if (tmp_crtc->enabled) {
1331 crtcs_enabled++; 1370 if (crtc) {
1371 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1372 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1373 goto out_disable;
1374 }
1375 crtc = tmp_crtc;
1376 }
1332 } 1377 }
1333 DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled); 1378
1334 if (crtcs_enabled > 1) { 1379 if (!crtc || crtc->fb == NULL) {
1335 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); 1380 DRM_DEBUG_KMS("no output, disabling\n");
1336 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; 1381 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
1337 goto out_disable; 1382 goto out_disable;
1338 } 1383 }
1384
1385 intel_crtc = to_intel_crtc(crtc);
1386 fb = crtc->fb;
1387 intel_fb = to_intel_framebuffer(fb);
1388 obj_priv = to_intel_bo(intel_fb->obj);
1389
1339 if (intel_fb->obj->size > dev_priv->cfb_size) { 1390 if (intel_fb->obj->size > dev_priv->cfb_size) {
1340 DRM_DEBUG_KMS("framebuffer too large, disabling " 1391 DRM_DEBUG_KMS("framebuffer too large, disabling "
1341 "compression\n"); 1392 "compression\n");
1342 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 1393 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1343 goto out_disable; 1394 goto out_disable;
1344 } 1395 }
1345 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 1396 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1346 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { 1397 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1347 DRM_DEBUG_KMS("mode incompatible with compression, " 1398 DRM_DEBUG_KMS("mode incompatible with compression, "
1348 "disabling\n"); 1399 "disabling\n");
1349 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; 1400 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1350 goto out_disable; 1401 goto out_disable;
1351 } 1402 }
1352 if ((mode->hdisplay > 2048) || 1403 if ((crtc->mode.hdisplay > 2048) ||
1353 (mode->vdisplay > 1536)) { 1404 (crtc->mode.vdisplay > 1536)) {
1354 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 1405 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1355 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; 1406 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1356 goto out_disable; 1407 goto out_disable;
1357 } 1408 }
1358 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { 1409 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1359 DRM_DEBUG_KMS("plane not 0, disabling compression\n"); 1410 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1360 dev_priv->no_fbc_reason = FBC_BAD_PLANE; 1411 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1361 goto out_disable; 1412 goto out_disable;
@@ -1370,18 +1421,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1370 if (in_dbg_master()) 1421 if (in_dbg_master())
1371 goto out_disable; 1422 goto out_disable;
1372 1423
1373 if (intel_fbc_enabled(dev)) { 1424 intel_enable_fbc(crtc, 500);
1374 /* We can re-enable it in this case, but need to update pitch */
1375 if ((fb->pitch > dev_priv->cfb_pitch) ||
1376 (obj_priv->fence_reg != dev_priv->cfb_fence) ||
1377 (plane != dev_priv->cfb_plane))
1378 intel_disable_fbc(dev);
1379 }
1380
1381 /* Now try to turn it back on if possible */
1382 if (!intel_fbc_enabled(dev))
1383 intel_enable_fbc(crtc, 500);
1384
1385 return; 1425 return;
1386 1426
1387out_disable: 1427out_disable:
@@ -1393,7 +1433,9 @@ out_disable:
1393} 1433}
1394 1434
1395int 1435int
1396intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) 1436intel_pin_and_fence_fb_obj(struct drm_device *dev,
1437 struct drm_gem_object *obj,
1438 bool pipelined)
1397{ 1439{
1398 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1440 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1399 u32 alignment; 1441 u32 alignment;
@@ -1403,7 +1445,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1403 case I915_TILING_NONE: 1445 case I915_TILING_NONE:
1404 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 1446 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1405 alignment = 128 * 1024; 1447 alignment = 128 * 1024;
1406 else if (IS_I965G(dev)) 1448 else if (INTEL_INFO(dev)->gen >= 4)
1407 alignment = 4 * 1024; 1449 alignment = 4 * 1024;
1408 else 1450 else
1409 alignment = 64 * 1024; 1451 alignment = 64 * 1024;
@@ -1421,9 +1463,13 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1421 } 1463 }
1422 1464
1423 ret = i915_gem_object_pin(obj, alignment); 1465 ret = i915_gem_object_pin(obj, alignment);
1424 if (ret != 0) 1466 if (ret)
1425 return ret; 1467 return ret;
1426 1468
1469 ret = i915_gem_object_set_to_display_plane(obj, pipelined);
1470 if (ret)
1471 goto err_unpin;
1472
1427 /* Install a fence for tiled scan-out. Pre-i965 always needs a 1473 /* Install a fence for tiled scan-out. Pre-i965 always needs a
1428 * fence, whereas 965+ only requires a fence if using 1474 * fence, whereas 965+ only requires a fence if using
1429 * framebuffer compression. For simplicity, we always install 1475 * framebuffer compression. For simplicity, we always install
@@ -1431,14 +1477,16 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1431 */ 1477 */
1432 if (obj_priv->fence_reg == I915_FENCE_REG_NONE && 1478 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1433 obj_priv->tiling_mode != I915_TILING_NONE) { 1479 obj_priv->tiling_mode != I915_TILING_NONE) {
1434 ret = i915_gem_object_get_fence_reg(obj); 1480 ret = i915_gem_object_get_fence_reg(obj, false);
1435 if (ret != 0) { 1481 if (ret)
1436 i915_gem_object_unpin(obj); 1482 goto err_unpin;
1437 return ret;
1438 }
1439 } 1483 }
1440 1484
1441 return 0; 1485 return 0;
1486
1487err_unpin:
1488 i915_gem_object_unpin(obj);
1489 return ret;
1442} 1490}
1443 1491
1444/* Assume fb object is pinned & idle & fenced and just update base pointers */ 1492/* Assume fb object is pinned & idle & fenced and just update base pointers */
@@ -1454,12 +1502,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1454 struct drm_gem_object *obj; 1502 struct drm_gem_object *obj;
1455 int plane = intel_crtc->plane; 1503 int plane = intel_crtc->plane;
1456 unsigned long Start, Offset; 1504 unsigned long Start, Offset;
1457 int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
1458 int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
1459 int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
1460 int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
1461 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
1462 u32 dspcntr; 1505 u32 dspcntr;
1506 u32 reg;
1463 1507
1464 switch (plane) { 1508 switch (plane) {
1465 case 0: 1509 case 0:
@@ -1474,7 +1518,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1474 obj = intel_fb->obj; 1518 obj = intel_fb->obj;
1475 obj_priv = to_intel_bo(obj); 1519 obj_priv = to_intel_bo(obj);
1476 1520
1477 dspcntr = I915_READ(dspcntr_reg); 1521 reg = DSPCNTR(plane);
1522 dspcntr = I915_READ(reg);
1478 /* Mask out pixel format bits in case we change it */ 1523 /* Mask out pixel format bits in case we change it */
1479 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 1524 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1480 switch (fb->bits_per_pixel) { 1525 switch (fb->bits_per_pixel) {
@@ -1495,7 +1540,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1495 DRM_ERROR("Unknown color depth\n"); 1540 DRM_ERROR("Unknown color depth\n");
1496 return -EINVAL; 1541 return -EINVAL;
1497 } 1542 }
1498 if (IS_I965G(dev)) { 1543 if (INTEL_INFO(dev)->gen >= 4) {
1499 if (obj_priv->tiling_mode != I915_TILING_NONE) 1544 if (obj_priv->tiling_mode != I915_TILING_NONE)
1500 dspcntr |= DISPPLANE_TILED; 1545 dspcntr |= DISPPLANE_TILED;
1501 else 1546 else
@@ -1506,28 +1551,24 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1506 /* must disable */ 1551 /* must disable */
1507 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 1552 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1508 1553
1509 I915_WRITE(dspcntr_reg, dspcntr); 1554 I915_WRITE(reg, dspcntr);
1510 1555
1511 Start = obj_priv->gtt_offset; 1556 Start = obj_priv->gtt_offset;
1512 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); 1557 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
1513 1558
1514 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 1559 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1515 Start, Offset, x, y, fb->pitch); 1560 Start, Offset, x, y, fb->pitch);
1516 I915_WRITE(dspstride, fb->pitch); 1561 I915_WRITE(DSPSTRIDE(plane), fb->pitch);
1517 if (IS_I965G(dev)) { 1562 if (INTEL_INFO(dev)->gen >= 4) {
1518 I915_WRITE(dspsurf, Start); 1563 I915_WRITE(DSPSURF(plane), Start);
1519 I915_WRITE(dsptileoff, (y << 16) | x); 1564 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1520 I915_WRITE(dspbase, Offset); 1565 I915_WRITE(DSPADDR(plane), Offset);
1521 } else { 1566 } else
1522 I915_WRITE(dspbase, Start + Offset); 1567 I915_WRITE(DSPADDR(plane), Start + Offset);
1523 } 1568 POSTING_READ(reg);
1524 POSTING_READ(dspbase);
1525 1569
1526 if (IS_I965G(dev) || plane == 0) 1570 intel_update_fbc(dev);
1527 intel_update_fbc(crtc, &crtc->mode); 1571 intel_increase_pllclock(crtc);
1528
1529 intel_wait_for_vblank(dev, intel_crtc->pipe);
1530 intel_increase_pllclock(crtc, true);
1531 1572
1532 return 0; 1573 return 0;
1533} 1574}
@@ -1539,11 +1580,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1539 struct drm_device *dev = crtc->dev; 1580 struct drm_device *dev = crtc->dev;
1540 struct drm_i915_master_private *master_priv; 1581 struct drm_i915_master_private *master_priv;
1541 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1582 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1542 struct intel_framebuffer *intel_fb;
1543 struct drm_i915_gem_object *obj_priv;
1544 struct drm_gem_object *obj;
1545 int pipe = intel_crtc->pipe;
1546 int plane = intel_crtc->plane;
1547 int ret; 1583 int ret;
1548 1584
1549 /* no fb bound */ 1585 /* no fb bound */
@@ -1552,45 +1588,41 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1552 return 0; 1588 return 0;
1553 } 1589 }
1554 1590
1555 switch (plane) { 1591 switch (intel_crtc->plane) {
1556 case 0: 1592 case 0:
1557 case 1: 1593 case 1:
1558 break; 1594 break;
1559 default: 1595 default:
1560 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1561 return -EINVAL; 1596 return -EINVAL;
1562 } 1597 }
1563 1598
1564 intel_fb = to_intel_framebuffer(crtc->fb);
1565 obj = intel_fb->obj;
1566 obj_priv = to_intel_bo(obj);
1567
1568 mutex_lock(&dev->struct_mutex); 1599 mutex_lock(&dev->struct_mutex);
1569 ret = intel_pin_and_fence_fb_obj(dev, obj); 1600 ret = intel_pin_and_fence_fb_obj(dev,
1601 to_intel_framebuffer(crtc->fb)->obj,
1602 false);
1570 if (ret != 0) { 1603 if (ret != 0) {
1571 mutex_unlock(&dev->struct_mutex); 1604 mutex_unlock(&dev->struct_mutex);
1572 return ret; 1605 return ret;
1573 } 1606 }
1574 1607
1575 ret = i915_gem_object_set_to_display_plane(obj); 1608 if (old_fb) {
1576 if (ret != 0) { 1609 struct drm_i915_private *dev_priv = dev->dev_private;
1577 i915_gem_object_unpin(obj); 1610 struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
1578 mutex_unlock(&dev->struct_mutex); 1611 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1579 return ret; 1612
1613 wait_event(dev_priv->pending_flip_queue,
1614 atomic_read(&obj_priv->pending_flip) == 0);
1580 } 1615 }
1581 1616
1582 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y); 1617 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y);
1583 if (ret) { 1618 if (ret) {
1584 i915_gem_object_unpin(obj); 1619 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
1585 mutex_unlock(&dev->struct_mutex); 1620 mutex_unlock(&dev->struct_mutex);
1586 return ret; 1621 return ret;
1587 } 1622 }
1588 1623
1589 if (old_fb) { 1624 if (old_fb)
1590 intel_fb = to_intel_framebuffer(old_fb); 1625 i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
1591 obj_priv = to_intel_bo(intel_fb->obj);
1592 i915_gem_object_unpin(intel_fb->obj);
1593 }
1594 1626
1595 mutex_unlock(&dev->struct_mutex); 1627 mutex_unlock(&dev->struct_mutex);
1596 1628
@@ -1601,7 +1633,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1601 if (!master_priv->sarea_priv) 1633 if (!master_priv->sarea_priv)
1602 return 0; 1634 return 0;
1603 1635
1604 if (pipe) { 1636 if (intel_crtc->pipe) {
1605 master_priv->sarea_priv->pipeB_x = x; 1637 master_priv->sarea_priv->pipeB_x = x;
1606 master_priv->sarea_priv->pipeB_y = y; 1638 master_priv->sarea_priv->pipeB_y = y;
1607 } else { 1639 } else {
@@ -1612,7 +1644,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1612 return 0; 1644 return 0;
1613} 1645}
1614 1646
1615static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) 1647static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
1616{ 1648{
1617 struct drm_device *dev = crtc->dev; 1649 struct drm_device *dev = crtc->dev;
1618 struct drm_i915_private *dev_priv = dev->dev_private; 1650 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1645,6 +1677,7 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
1645 } 1677 }
1646 I915_WRITE(DP_A, dpa_ctl); 1678 I915_WRITE(DP_A, dpa_ctl);
1647 1679
1680 POSTING_READ(DP_A);
1648 udelay(500); 1681 udelay(500);
1649} 1682}
1650 1683
@@ -1655,84 +1688,84 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1655 struct drm_i915_private *dev_priv = dev->dev_private; 1688 struct drm_i915_private *dev_priv = dev->dev_private;
1656 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1689 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1657 int pipe = intel_crtc->pipe; 1690 int pipe = intel_crtc->pipe;
1658 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; 1691 u32 reg, temp, tries;
1659 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1660 int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
1661 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
1662 u32 temp, tries = 0;
1663 1692
1664 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 1693 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1665 for train result */ 1694 for train result */
1666 temp = I915_READ(fdi_rx_imr_reg); 1695 reg = FDI_RX_IMR(pipe);
1696 temp = I915_READ(reg);
1667 temp &= ~FDI_RX_SYMBOL_LOCK; 1697 temp &= ~FDI_RX_SYMBOL_LOCK;
1668 temp &= ~FDI_RX_BIT_LOCK; 1698 temp &= ~FDI_RX_BIT_LOCK;
1669 I915_WRITE(fdi_rx_imr_reg, temp); 1699 I915_WRITE(reg, temp);
1670 I915_READ(fdi_rx_imr_reg); 1700 I915_READ(reg);
1671 udelay(150); 1701 udelay(150);
1672 1702
1673 /* enable CPU FDI TX and PCH FDI RX */ 1703 /* enable CPU FDI TX and PCH FDI RX */
1674 temp = I915_READ(fdi_tx_reg); 1704 reg = FDI_TX_CTL(pipe);
1675 temp |= FDI_TX_ENABLE; 1705 temp = I915_READ(reg);
1676 temp &= ~(7 << 19); 1706 temp &= ~(7 << 19);
1677 temp |= (intel_crtc->fdi_lanes - 1) << 19; 1707 temp |= (intel_crtc->fdi_lanes - 1) << 19;
1678 temp &= ~FDI_LINK_TRAIN_NONE; 1708 temp &= ~FDI_LINK_TRAIN_NONE;
1679 temp |= FDI_LINK_TRAIN_PATTERN_1; 1709 temp |= FDI_LINK_TRAIN_PATTERN_1;
1680 I915_WRITE(fdi_tx_reg, temp); 1710 I915_WRITE(reg, temp | FDI_TX_ENABLE);
1681 I915_READ(fdi_tx_reg);
1682 1711
1683 temp = I915_READ(fdi_rx_reg); 1712 reg = FDI_RX_CTL(pipe);
1713 temp = I915_READ(reg);
1684 temp &= ~FDI_LINK_TRAIN_NONE; 1714 temp &= ~FDI_LINK_TRAIN_NONE;
1685 temp |= FDI_LINK_TRAIN_PATTERN_1; 1715 temp |= FDI_LINK_TRAIN_PATTERN_1;
1686 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); 1716 I915_WRITE(reg, temp | FDI_RX_ENABLE);
1687 I915_READ(fdi_rx_reg); 1717
1718 POSTING_READ(reg);
1688 udelay(150); 1719 udelay(150);
1689 1720
1721 reg = FDI_RX_IIR(pipe);
1690 for (tries = 0; tries < 5; tries++) { 1722 for (tries = 0; tries < 5; tries++) {
1691 temp = I915_READ(fdi_rx_iir_reg); 1723 temp = I915_READ(reg);
1692 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 1724 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1693 1725
1694 if ((temp & FDI_RX_BIT_LOCK)) { 1726 if ((temp & FDI_RX_BIT_LOCK)) {
1695 DRM_DEBUG_KMS("FDI train 1 done.\n"); 1727 DRM_DEBUG_KMS("FDI train 1 done.\n");
1696 I915_WRITE(fdi_rx_iir_reg, 1728 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
1697 temp | FDI_RX_BIT_LOCK);
1698 break; 1729 break;
1699 } 1730 }
1700 } 1731 }
1701 if (tries == 5) 1732 if (tries == 5)
1702 DRM_DEBUG_KMS("FDI train 1 fail!\n"); 1733 DRM_ERROR("FDI train 1 fail!\n");
1703 1734
1704 /* Train 2 */ 1735 /* Train 2 */
1705 temp = I915_READ(fdi_tx_reg); 1736 reg = FDI_TX_CTL(pipe);
1737 temp = I915_READ(reg);
1706 temp &= ~FDI_LINK_TRAIN_NONE; 1738 temp &= ~FDI_LINK_TRAIN_NONE;
1707 temp |= FDI_LINK_TRAIN_PATTERN_2; 1739 temp |= FDI_LINK_TRAIN_PATTERN_2;
1708 I915_WRITE(fdi_tx_reg, temp); 1740 I915_WRITE(reg, temp);
1709 1741
1710 temp = I915_READ(fdi_rx_reg); 1742 reg = FDI_RX_CTL(pipe);
1743 temp = I915_READ(reg);
1711 temp &= ~FDI_LINK_TRAIN_NONE; 1744 temp &= ~FDI_LINK_TRAIN_NONE;
1712 temp |= FDI_LINK_TRAIN_PATTERN_2; 1745 temp |= FDI_LINK_TRAIN_PATTERN_2;
1713 I915_WRITE(fdi_rx_reg, temp); 1746 I915_WRITE(reg, temp);
1714 udelay(150);
1715 1747
1716 tries = 0; 1748 POSTING_READ(reg);
1749 udelay(150);
1717 1750
1751 reg = FDI_RX_IIR(pipe);
1718 for (tries = 0; tries < 5; tries++) { 1752 for (tries = 0; tries < 5; tries++) {
1719 temp = I915_READ(fdi_rx_iir_reg); 1753 temp = I915_READ(reg);
1720 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 1754 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1721 1755
1722 if (temp & FDI_RX_SYMBOL_LOCK) { 1756 if (temp & FDI_RX_SYMBOL_LOCK) {
1723 I915_WRITE(fdi_rx_iir_reg, 1757 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
1724 temp | FDI_RX_SYMBOL_LOCK);
1725 DRM_DEBUG_KMS("FDI train 2 done.\n"); 1758 DRM_DEBUG_KMS("FDI train 2 done.\n");
1726 break; 1759 break;
1727 } 1760 }
1728 } 1761 }
1729 if (tries == 5) 1762 if (tries == 5)
1730 DRM_DEBUG_KMS("FDI train 2 fail!\n"); 1763 DRM_ERROR("FDI train 2 fail!\n");
1731 1764
1732 DRM_DEBUG_KMS("FDI train done\n"); 1765 DRM_DEBUG_KMS("FDI train done\n");
1733} 1766}
1734 1767
1735static int snb_b_fdi_train_param [] = { 1768static const int const snb_b_fdi_train_param [] = {
1736 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 1769 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
1737 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 1770 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
1738 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 1771 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
@@ -1746,24 +1779,22 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
1746 struct drm_i915_private *dev_priv = dev->dev_private; 1779 struct drm_i915_private *dev_priv = dev->dev_private;
1747 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1780 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1748 int pipe = intel_crtc->pipe; 1781 int pipe = intel_crtc->pipe;
1749 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; 1782 u32 reg, temp, i;
1750 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1751 int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
1752 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
1753 u32 temp, i;
1754 1783
1755 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 1784 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1756 for train result */ 1785 for train result */
1757 temp = I915_READ(fdi_rx_imr_reg); 1786 reg = FDI_RX_IMR(pipe);
1787 temp = I915_READ(reg);
1758 temp &= ~FDI_RX_SYMBOL_LOCK; 1788 temp &= ~FDI_RX_SYMBOL_LOCK;
1759 temp &= ~FDI_RX_BIT_LOCK; 1789 temp &= ~FDI_RX_BIT_LOCK;
1760 I915_WRITE(fdi_rx_imr_reg, temp); 1790 I915_WRITE(reg, temp);
1761 I915_READ(fdi_rx_imr_reg); 1791
1792 POSTING_READ(reg);
1762 udelay(150); 1793 udelay(150);
1763 1794
1764 /* enable CPU FDI TX and PCH FDI RX */ 1795 /* enable CPU FDI TX and PCH FDI RX */
1765 temp = I915_READ(fdi_tx_reg); 1796 reg = FDI_TX_CTL(pipe);
1766 temp |= FDI_TX_ENABLE; 1797 temp = I915_READ(reg);
1767 temp &= ~(7 << 19); 1798 temp &= ~(7 << 19);
1768 temp |= (intel_crtc->fdi_lanes - 1) << 19; 1799 temp |= (intel_crtc->fdi_lanes - 1) << 19;
1769 temp &= ~FDI_LINK_TRAIN_NONE; 1800 temp &= ~FDI_LINK_TRAIN_NONE;
@@ -1771,10 +1802,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
1771 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 1802 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
1772 /* SNB-B */ 1803 /* SNB-B */
1773 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 1804 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
1774 I915_WRITE(fdi_tx_reg, temp); 1805 I915_WRITE(reg, temp | FDI_TX_ENABLE);
1775 I915_READ(fdi_tx_reg);
1776 1806
1777 temp = I915_READ(fdi_rx_reg); 1807 reg = FDI_RX_CTL(pipe);
1808 temp = I915_READ(reg);
1778 if (HAS_PCH_CPT(dev)) { 1809 if (HAS_PCH_CPT(dev)) {
1779 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 1810 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1780 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 1811 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
@@ -1782,32 +1813,37 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
1782 temp &= ~FDI_LINK_TRAIN_NONE; 1813 temp &= ~FDI_LINK_TRAIN_NONE;
1783 temp |= FDI_LINK_TRAIN_PATTERN_1; 1814 temp |= FDI_LINK_TRAIN_PATTERN_1;
1784 } 1815 }
1785 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); 1816 I915_WRITE(reg, temp | FDI_RX_ENABLE);
1786 I915_READ(fdi_rx_reg); 1817
1818 POSTING_READ(reg);
1787 udelay(150); 1819 udelay(150);
1788 1820
1789 for (i = 0; i < 4; i++ ) { 1821 for (i = 0; i < 4; i++ ) {
1790 temp = I915_READ(fdi_tx_reg); 1822 reg = FDI_TX_CTL(pipe);
1823 temp = I915_READ(reg);
1791 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 1824 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
1792 temp |= snb_b_fdi_train_param[i]; 1825 temp |= snb_b_fdi_train_param[i];
1793 I915_WRITE(fdi_tx_reg, temp); 1826 I915_WRITE(reg, temp);
1827
1828 POSTING_READ(reg);
1794 udelay(500); 1829 udelay(500);
1795 1830
1796 temp = I915_READ(fdi_rx_iir_reg); 1831 reg = FDI_RX_IIR(pipe);
1832 temp = I915_READ(reg);
1797 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 1833 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1798 1834
1799 if (temp & FDI_RX_BIT_LOCK) { 1835 if (temp & FDI_RX_BIT_LOCK) {
1800 I915_WRITE(fdi_rx_iir_reg, 1836 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
1801 temp | FDI_RX_BIT_LOCK);
1802 DRM_DEBUG_KMS("FDI train 1 done.\n"); 1837 DRM_DEBUG_KMS("FDI train 1 done.\n");
1803 break; 1838 break;
1804 } 1839 }
1805 } 1840 }
1806 if (i == 4) 1841 if (i == 4)
1807 DRM_DEBUG_KMS("FDI train 1 fail!\n"); 1842 DRM_ERROR("FDI train 1 fail!\n");
1808 1843
1809 /* Train 2 */ 1844 /* Train 2 */
1810 temp = I915_READ(fdi_tx_reg); 1845 reg = FDI_TX_CTL(pipe);
1846 temp = I915_READ(reg);
1811 temp &= ~FDI_LINK_TRAIN_NONE; 1847 temp &= ~FDI_LINK_TRAIN_NONE;
1812 temp |= FDI_LINK_TRAIN_PATTERN_2; 1848 temp |= FDI_LINK_TRAIN_PATTERN_2;
1813 if (IS_GEN6(dev)) { 1849 if (IS_GEN6(dev)) {
@@ -1815,9 +1851,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
1815 /* SNB-B */ 1851 /* SNB-B */
1816 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 1852 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
1817 } 1853 }
1818 I915_WRITE(fdi_tx_reg, temp); 1854 I915_WRITE(reg, temp);
1819 1855
1820 temp = I915_READ(fdi_rx_reg); 1856 reg = FDI_RX_CTL(pipe);
1857 temp = I915_READ(reg);
1821 if (HAS_PCH_CPT(dev)) { 1858 if (HAS_PCH_CPT(dev)) {
1822 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 1859 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1823 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 1860 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
@@ -1825,535 +1862,611 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
1825 temp &= ~FDI_LINK_TRAIN_NONE; 1862 temp &= ~FDI_LINK_TRAIN_NONE;
1826 temp |= FDI_LINK_TRAIN_PATTERN_2; 1863 temp |= FDI_LINK_TRAIN_PATTERN_2;
1827 } 1864 }
1828 I915_WRITE(fdi_rx_reg, temp); 1865 I915_WRITE(reg, temp);
1866
1867 POSTING_READ(reg);
1829 udelay(150); 1868 udelay(150);
1830 1869
1831 for (i = 0; i < 4; i++ ) { 1870 for (i = 0; i < 4; i++ ) {
1832 temp = I915_READ(fdi_tx_reg); 1871 reg = FDI_TX_CTL(pipe);
1872 temp = I915_READ(reg);
1833 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 1873 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
1834 temp |= snb_b_fdi_train_param[i]; 1874 temp |= snb_b_fdi_train_param[i];
1835 I915_WRITE(fdi_tx_reg, temp); 1875 I915_WRITE(reg, temp);
1876
1877 POSTING_READ(reg);
1836 udelay(500); 1878 udelay(500);
1837 1879
1838 temp = I915_READ(fdi_rx_iir_reg); 1880 reg = FDI_RX_IIR(pipe);
1881 temp = I915_READ(reg);
1839 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 1882 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1840 1883
1841 if (temp & FDI_RX_SYMBOL_LOCK) { 1884 if (temp & FDI_RX_SYMBOL_LOCK) {
1842 I915_WRITE(fdi_rx_iir_reg, 1885 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
1843 temp | FDI_RX_SYMBOL_LOCK);
1844 DRM_DEBUG_KMS("FDI train 2 done.\n"); 1886 DRM_DEBUG_KMS("FDI train 2 done.\n");
1845 break; 1887 break;
1846 } 1888 }
1847 } 1889 }
1848 if (i == 4) 1890 if (i == 4)
1849 DRM_DEBUG_KMS("FDI train 2 fail!\n"); 1891 DRM_ERROR("FDI train 2 fail!\n");
1850 1892
1851 DRM_DEBUG_KMS("FDI train done.\n"); 1893 DRM_DEBUG_KMS("FDI train done.\n");
1852} 1894}
1853 1895
1854static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) 1896static void ironlake_fdi_enable(struct drm_crtc *crtc)
1855{ 1897{
1856 struct drm_device *dev = crtc->dev; 1898 struct drm_device *dev = crtc->dev;
1857 struct drm_i915_private *dev_priv = dev->dev_private; 1899 struct drm_i915_private *dev_priv = dev->dev_private;
1858 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1900 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1859 int pipe = intel_crtc->pipe; 1901 int pipe = intel_crtc->pipe;
1860 int plane = intel_crtc->plane; 1902 u32 reg, temp;
1861 int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
1862 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
1863 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
1864 int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
1865 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
1866 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1867 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
1868 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
1869 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
1870 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
1871 int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
1872 int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
1873 int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
1874 int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B;
1875 int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B;
1876 int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B;
1877 int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
1878 int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
1879 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
1880 int trans_dpll_sel = (pipe == 0) ? 0 : 1;
1881 u32 temp;
1882 u32 pipe_bpc;
1883
1884 temp = I915_READ(pipeconf_reg);
1885 pipe_bpc = temp & PIPE_BPC_MASK;
1886 1903
1887 /* XXX: When our outputs are all unaware of DPMS modes other than off 1904 /* Write the TU size bits so error detection works */
1888 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 1905 I915_WRITE(FDI_RX_TUSIZE1(pipe),
1889 */ 1906 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
1890 switch (mode) {
1891 case DRM_MODE_DPMS_ON:
1892 case DRM_MODE_DPMS_STANDBY:
1893 case DRM_MODE_DPMS_SUSPEND:
1894 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
1895 1907
1896 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 1908 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1897 temp = I915_READ(PCH_LVDS); 1909 reg = FDI_RX_CTL(pipe);
1898 if ((temp & LVDS_PORT_EN) == 0) { 1910 temp = I915_READ(reg);
1899 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); 1911 temp &= ~((0x7 << 19) | (0x7 << 16));
1900 POSTING_READ(PCH_LVDS); 1912 temp |= (intel_crtc->fdi_lanes - 1) << 19;
1901 } 1913 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
1902 } 1914 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
1903 1915
1904 if (!HAS_eDP) { 1916 POSTING_READ(reg);
1917 udelay(200);
1905 1918
1906 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 1919 /* Switch from Rawclk to PCDclk */
1907 temp = I915_READ(fdi_rx_reg); 1920 temp = I915_READ(reg);
1908 /* 1921 I915_WRITE(reg, temp | FDI_PCDCLK);
1909 * make the BPC in FDI Rx be consistent with that in
1910 * pipeconf reg.
1911 */
1912 temp &= ~(0x7 << 16);
1913 temp |= (pipe_bpc << 11);
1914 temp &= ~(7 << 19);
1915 temp |= (intel_crtc->fdi_lanes - 1) << 19;
1916 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
1917 I915_READ(fdi_rx_reg);
1918 udelay(200);
1919 1922
1920 /* Switch from Rawclk to PCDclk */ 1923 POSTING_READ(reg);
1921 temp = I915_READ(fdi_rx_reg); 1924 udelay(200);
1922 I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
1923 I915_READ(fdi_rx_reg);
1924 udelay(200);
1925 1925
1926 /* Enable CPU FDI TX PLL, always on for Ironlake */ 1926 /* Enable CPU FDI TX PLL, always on for Ironlake */
1927 temp = I915_READ(fdi_tx_reg); 1927 reg = FDI_TX_CTL(pipe);
1928 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 1928 temp = I915_READ(reg);
1929 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); 1929 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1930 I915_READ(fdi_tx_reg); 1930 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
1931 udelay(100);
1932 }
1933 }
1934 1931
1935 /* Enable panel fitting for LVDS */ 1932 POSTING_READ(reg);
1936 if (dev_priv->pch_pf_size && 1933 udelay(100);
1937 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) 1934 }
1938 || HAS_eDP || intel_pch_has_edp(crtc))) { 1935}
1939 /* Force use of hard-coded filter coefficients
1940 * as some pre-programmed values are broken,
1941 * e.g. x201.
1942 */
1943 I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
1944 PF_ENABLE | PF_FILTER_MED_3x3);
1945 I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
1946 dev_priv->pch_pf_pos);
1947 I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
1948 dev_priv->pch_pf_size);
1949 }
1950 1936
1951 /* Enable CPU pipe */ 1937static void intel_flush_display_plane(struct drm_device *dev,
1952 temp = I915_READ(pipeconf_reg); 1938 int plane)
1953 if ((temp & PIPEACONF_ENABLE) == 0) { 1939{
1954 I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); 1940 struct drm_i915_private *dev_priv = dev->dev_private;
1955 I915_READ(pipeconf_reg); 1941 u32 reg = DSPADDR(plane);
1956 udelay(100); 1942 I915_WRITE(reg, I915_READ(reg));
1957 } 1943}
1958 1944
1959 /* configure and enable CPU plane */ 1945/*
1960 temp = I915_READ(dspcntr_reg); 1946 * When we disable a pipe, we need to clear any pending scanline wait events
1961 if ((temp & DISPLAY_PLANE_ENABLE) == 0) { 1947 * to avoid hanging the ring, which we assume we are waiting on.
1962 I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); 1948 */
1963 /* Flush the plane changes */ 1949static void intel_clear_scanline_wait(struct drm_device *dev)
1964 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); 1950{
1965 } 1951 struct drm_i915_private *dev_priv = dev->dev_private;
1952 u32 tmp;
1966 1953
1967 if (!HAS_eDP) { 1954 if (IS_GEN2(dev))
1968 /* For PCH output, training FDI link */ 1955 /* Can't break the hang on i8xx */
1969 if (IS_GEN6(dev)) 1956 return;
1970 gen6_fdi_link_train(crtc);
1971 else
1972 ironlake_fdi_link_train(crtc);
1973 1957
1974 /* enable PCH DPLL */ 1958 tmp = I915_READ(PRB0_CTL);
1975 temp = I915_READ(pch_dpll_reg); 1959 if (tmp & RING_WAIT) {
1976 if ((temp & DPLL_VCO_ENABLE) == 0) { 1960 I915_WRITE(PRB0_CTL, tmp);
1977 I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); 1961 POSTING_READ(PRB0_CTL);
1978 I915_READ(pch_dpll_reg); 1962 }
1979 } 1963}
1980 udelay(200);
1981 1964
1982 if (HAS_PCH_CPT(dev)) { 1965static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
1983 /* Be sure PCH DPLL SEL is set */ 1966{
1984 temp = I915_READ(PCH_DPLL_SEL); 1967 struct drm_i915_gem_object *obj_priv;
1985 if (trans_dpll_sel == 0 && 1968 struct drm_i915_private *dev_priv;
1986 (temp & TRANSA_DPLL_ENABLE) == 0)
1987 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
1988 else if (trans_dpll_sel == 1 &&
1989 (temp & TRANSB_DPLL_ENABLE) == 0)
1990 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
1991 I915_WRITE(PCH_DPLL_SEL, temp);
1992 I915_READ(PCH_DPLL_SEL);
1993 }
1994 1969
1995 /* set transcoder timing */ 1970 if (crtc->fb == NULL)
1996 I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); 1971 return;
1997 I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg));
1998 I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg));
1999
2000 I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg));
2001 I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
2002 I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
2003
2004 /* enable normal train */
2005 temp = I915_READ(fdi_tx_reg);
2006 temp &= ~FDI_LINK_TRAIN_NONE;
2007 I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
2008 FDI_TX_ENHANCE_FRAME_ENABLE);
2009 I915_READ(fdi_tx_reg);
2010
2011 temp = I915_READ(fdi_rx_reg);
2012 if (HAS_PCH_CPT(dev)) {
2013 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2014 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2015 } else {
2016 temp &= ~FDI_LINK_TRAIN_NONE;
2017 temp |= FDI_LINK_TRAIN_NONE;
2018 }
2019 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2020 I915_READ(fdi_rx_reg);
2021 1972
2022 /* wait one idle pattern time */ 1973 obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj);
2023 udelay(100); 1974 dev_priv = crtc->dev->dev_private;
1975 wait_event(dev_priv->pending_flip_queue,
1976 atomic_read(&obj_priv->pending_flip) == 0);
1977}
2024 1978
2025 /* For PCH DP, enable TRANS_DP_CTL */ 1979static void ironlake_crtc_enable(struct drm_crtc *crtc)
2026 if (HAS_PCH_CPT(dev) && 1980{
2027 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 1981 struct drm_device *dev = crtc->dev;
2028 int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; 1982 struct drm_i915_private *dev_priv = dev->dev_private;
2029 int reg; 1983 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2030 1984 int pipe = intel_crtc->pipe;
2031 reg = I915_READ(trans_dp_ctl); 1985 int plane = intel_crtc->plane;
2032 reg &= ~(TRANS_DP_PORT_SEL_MASK | 1986 u32 reg, temp;
2033 TRANS_DP_SYNC_MASK);
2034 reg |= (TRANS_DP_OUTPUT_ENABLE |
2035 TRANS_DP_ENH_FRAMING);
2036
2037 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2038 reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2039 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2040 reg |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2041
2042 switch (intel_trans_dp_port_sel(crtc)) {
2043 case PCH_DP_B:
2044 reg |= TRANS_DP_PORT_SEL_B;
2045 break;
2046 case PCH_DP_C:
2047 reg |= TRANS_DP_PORT_SEL_C;
2048 break;
2049 case PCH_DP_D:
2050 reg |= TRANS_DP_PORT_SEL_D;
2051 break;
2052 default:
2053 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
2054 reg |= TRANS_DP_PORT_SEL_B;
2055 break;
2056 }
2057 1987
2058 I915_WRITE(trans_dp_ctl, reg); 1988 if (intel_crtc->active)
2059 POSTING_READ(trans_dp_ctl); 1989 return;
2060 }
2061 1990
2062 /* enable PCH transcoder */ 1991 intel_crtc->active = true;
2063 temp = I915_READ(transconf_reg); 1992 intel_update_watermarks(dev);
2064 /*
2065 * make the BPC in transcoder be consistent with
2066 * that in pipeconf reg.
2067 */
2068 temp &= ~PIPE_BPC_MASK;
2069 temp |= pipe_bpc;
2070 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
2071 I915_READ(transconf_reg);
2072 1993
2073 if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1)) 1994 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2074 DRM_ERROR("failed to enable transcoder\n"); 1995 temp = I915_READ(PCH_LVDS);
2075 } 1996 if ((temp & LVDS_PORT_EN) == 0)
1997 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
1998 }
2076 1999
2077 intel_crtc_load_lut(crtc); 2000 ironlake_fdi_enable(crtc);
2078 2001
2079 intel_update_fbc(crtc, &crtc->mode); 2002 /* Enable panel fitting for LVDS */
2080 break; 2003 if (dev_priv->pch_pf_size &&
2004 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
2005 || HAS_eDP || intel_pch_has_edp(crtc))) {
2006 /* Force use of hard-coded filter coefficients
2007 * as some pre-programmed values are broken,
2008 * e.g. x201.
2009 */
2010 I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
2011 PF_ENABLE | PF_FILTER_MED_3x3);
2012 I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
2013 dev_priv->pch_pf_pos);
2014 I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
2015 dev_priv->pch_pf_size);
2016 }
2017
2018 /* Enable CPU pipe */
2019 reg = PIPECONF(pipe);
2020 temp = I915_READ(reg);
2021 if ((temp & PIPECONF_ENABLE) == 0) {
2022 I915_WRITE(reg, temp | PIPECONF_ENABLE);
2023 POSTING_READ(reg);
2024 udelay(100);
2025 }
2081 2026
2082 case DRM_MODE_DPMS_OFF: 2027 /* configure and enable CPU plane */
2083 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); 2028 reg = DSPCNTR(plane);
2029 temp = I915_READ(reg);
2030 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
2031 I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
2032 intel_flush_display_plane(dev, plane);
2033 }
2034
2035 /* For PCH output, training FDI link */
2036 if (IS_GEN6(dev))
2037 gen6_fdi_link_train(crtc);
2038 else
2039 ironlake_fdi_link_train(crtc);
2040
2041 /* enable PCH DPLL */
2042 reg = PCH_DPLL(pipe);
2043 temp = I915_READ(reg);
2044 if ((temp & DPLL_VCO_ENABLE) == 0) {
2045 I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
2046 POSTING_READ(reg);
2047 udelay(200);
2048 }
2049
2050 if (HAS_PCH_CPT(dev)) {
2051 /* Be sure PCH DPLL SEL is set */
2052 temp = I915_READ(PCH_DPLL_SEL);
2053 if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
2054 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2055 else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
2056 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2057 I915_WRITE(PCH_DPLL_SEL, temp);
2058 }
2059
2060 /* set transcoder timing */
2061 I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2062 I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2063 I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
2064
2065 I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2066 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2067 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
2068
2069 /* enable normal train */
2070 reg = FDI_TX_CTL(pipe);
2071 temp = I915_READ(reg);
2072 temp &= ~FDI_LINK_TRAIN_NONE;
2073 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2074 I915_WRITE(reg, temp);
2084 2075
2085 drm_vblank_off(dev, pipe); 2076 reg = FDI_RX_CTL(pipe);
2086 /* Disable display plane */ 2077 temp = I915_READ(reg);
2087 temp = I915_READ(dspcntr_reg); 2078 if (HAS_PCH_CPT(dev)) {
2088 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 2079 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2089 I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); 2080 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2090 /* Flush the plane changes */ 2081 } else {
2091 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); 2082 temp &= ~FDI_LINK_TRAIN_NONE;
2092 I915_READ(dspbase_reg); 2083 temp |= FDI_LINK_TRAIN_NONE;
2084 }
2085 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2086
2087 /* wait one idle pattern time */
2088 POSTING_READ(reg);
2089 udelay(100);
2090
2091 /* For PCH DP, enable TRANS_DP_CTL */
2092 if (HAS_PCH_CPT(dev) &&
2093 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
2094 reg = TRANS_DP_CTL(pipe);
2095 temp = I915_READ(reg);
2096 temp &= ~(TRANS_DP_PORT_SEL_MASK |
2097 TRANS_DP_SYNC_MASK);
2098 temp |= (TRANS_DP_OUTPUT_ENABLE |
2099 TRANS_DP_ENH_FRAMING);
2100
2101 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2102 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2103 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2104 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2105
2106 switch (intel_trans_dp_port_sel(crtc)) {
2107 case PCH_DP_B:
2108 temp |= TRANS_DP_PORT_SEL_B;
2109 break;
2110 case PCH_DP_C:
2111 temp |= TRANS_DP_PORT_SEL_C;
2112 break;
2113 case PCH_DP_D:
2114 temp |= TRANS_DP_PORT_SEL_D;
2115 break;
2116 default:
2117 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
2118 temp |= TRANS_DP_PORT_SEL_B;
2119 break;
2093 } 2120 }
2094 2121
2095 if (dev_priv->cfb_plane == plane && 2122 I915_WRITE(reg, temp);
2096 dev_priv->display.disable_fbc) 2123 }
2097 dev_priv->display.disable_fbc(dev);
2098 2124
2099 /* disable cpu pipe, disable after all planes disabled */ 2125 /* enable PCH transcoder */
2100 temp = I915_READ(pipeconf_reg); 2126 reg = TRANSCONF(pipe);
2101 if ((temp & PIPEACONF_ENABLE) != 0) { 2127 temp = I915_READ(reg);
2102 I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); 2128 /*
2129 * make the BPC in transcoder be consistent with
2130 * that in pipeconf reg.
2131 */
2132 temp &= ~PIPE_BPC_MASK;
2133 temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
2134 I915_WRITE(reg, temp | TRANS_ENABLE);
2135 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
2136 DRM_ERROR("failed to enable transcoder\n");
2103 2137
2104 /* wait for cpu pipe off, pipe state */ 2138 intel_crtc_load_lut(crtc);
2105 if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1)) 2139 intel_update_fbc(dev);
2106 DRM_ERROR("failed to turn off cpu pipe\n"); 2140 intel_crtc_update_cursor(crtc, true);
2107 } else 2141}
2108 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
2109 2142
2110 udelay(100); 2143static void ironlake_crtc_disable(struct drm_crtc *crtc)
2144{
2145 struct drm_device *dev = crtc->dev;
2146 struct drm_i915_private *dev_priv = dev->dev_private;
2147 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2148 int pipe = intel_crtc->pipe;
2149 int plane = intel_crtc->plane;
2150 u32 reg, temp;
2111 2151
2112 /* Disable PF */ 2152 if (!intel_crtc->active)
2113 I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0); 2153 return;
2114 I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
2115 2154
2116 /* disable CPU FDI tx and PCH FDI rx */ 2155 intel_crtc_wait_for_pending_flips(crtc);
2117 temp = I915_READ(fdi_tx_reg); 2156 drm_vblank_off(dev, pipe);
2118 I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE); 2157 intel_crtc_update_cursor(crtc, false);
2119 I915_READ(fdi_tx_reg);
2120 2158
2121 temp = I915_READ(fdi_rx_reg); 2159 /* Disable display plane */
2122 /* BPC in FDI rx is consistent with that in pipeconf */ 2160 reg = DSPCNTR(plane);
2123 temp &= ~(0x07 << 16); 2161 temp = I915_READ(reg);
2124 temp |= (pipe_bpc << 11); 2162 if (temp & DISPLAY_PLANE_ENABLE) {
2125 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); 2163 I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
2126 I915_READ(fdi_rx_reg); 2164 intel_flush_display_plane(dev, plane);
2165 }
2127 2166
2128 udelay(100); 2167 if (dev_priv->cfb_plane == plane &&
2168 dev_priv->display.disable_fbc)
2169 dev_priv->display.disable_fbc(dev);
2129 2170
2130 /* still set train pattern 1 */ 2171 /* disable cpu pipe, disable after all planes disabled */
2131 temp = I915_READ(fdi_tx_reg); 2172 reg = PIPECONF(pipe);
2173 temp = I915_READ(reg);
2174 if (temp & PIPECONF_ENABLE) {
2175 I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
2176 /* wait for cpu pipe off, pipe state */
2177 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 50))
2178 DRM_ERROR("failed to turn off cpu pipe\n");
2179 }
2180
2181 /* Disable PF */
2182 I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
2183 I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
2184
2185 /* disable CPU FDI tx and PCH FDI rx */
2186 reg = FDI_TX_CTL(pipe);
2187 temp = I915_READ(reg);
2188 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2189 POSTING_READ(reg);
2190
2191 reg = FDI_RX_CTL(pipe);
2192 temp = I915_READ(reg);
2193 temp &= ~(0x7 << 16);
2194 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2195 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2196
2197 POSTING_READ(reg);
2198 udelay(100);
2199
2200 /* still set train pattern 1 */
2201 reg = FDI_TX_CTL(pipe);
2202 temp = I915_READ(reg);
2203 temp &= ~FDI_LINK_TRAIN_NONE;
2204 temp |= FDI_LINK_TRAIN_PATTERN_1;
2205 I915_WRITE(reg, temp);
2206
2207 reg = FDI_RX_CTL(pipe);
2208 temp = I915_READ(reg);
2209 if (HAS_PCH_CPT(dev)) {
2210 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2211 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2212 } else {
2132 temp &= ~FDI_LINK_TRAIN_NONE; 2213 temp &= ~FDI_LINK_TRAIN_NONE;
2133 temp |= FDI_LINK_TRAIN_PATTERN_1; 2214 temp |= FDI_LINK_TRAIN_PATTERN_1;
2134 I915_WRITE(fdi_tx_reg, temp); 2215 }
2135 POSTING_READ(fdi_tx_reg); 2216 /* BPC in FDI rx is consistent with that in PIPECONF */
2136 2217 temp &= ~(0x07 << 16);
2137 temp = I915_READ(fdi_rx_reg); 2218 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2138 if (HAS_PCH_CPT(dev)) { 2219 I915_WRITE(reg, temp);
2139 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2140 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2141 } else {
2142 temp &= ~FDI_LINK_TRAIN_NONE;
2143 temp |= FDI_LINK_TRAIN_PATTERN_1;
2144 }
2145 I915_WRITE(fdi_rx_reg, temp);
2146 POSTING_READ(fdi_rx_reg);
2147 2220
2148 udelay(100); 2221 POSTING_READ(reg);
2222 udelay(100);
2149 2223
2150 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 2224 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2151 temp = I915_READ(PCH_LVDS); 2225 temp = I915_READ(PCH_LVDS);
2226 if (temp & LVDS_PORT_EN) {
2152 I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN); 2227 I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
2153 I915_READ(PCH_LVDS); 2228 POSTING_READ(PCH_LVDS);
2154 udelay(100); 2229 udelay(100);
2155 } 2230 }
2231 }
2156 2232
2157 /* disable PCH transcoder */ 2233 /* disable PCH transcoder */
2158 temp = I915_READ(transconf_reg); 2234 reg = TRANSCONF(plane);
2159 if ((temp & TRANS_ENABLE) != 0) { 2235 temp = I915_READ(reg);
2160 I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); 2236 if (temp & TRANS_ENABLE) {
2237 I915_WRITE(reg, temp & ~TRANS_ENABLE);
2238 /* wait for PCH transcoder off, transcoder state */
2239 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
2240 DRM_ERROR("failed to disable transcoder\n");
2241 }
2161 2242
2162 /* wait for PCH transcoder off, transcoder state */ 2243 if (HAS_PCH_CPT(dev)) {
2163 if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1)) 2244 /* disable TRANS_DP_CTL */
2164 DRM_ERROR("failed to disable transcoder\n"); 2245 reg = TRANS_DP_CTL(pipe);
2165 } 2246 temp = I915_READ(reg);
2247 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
2248 I915_WRITE(reg, temp);
2166 2249
2167 temp = I915_READ(transconf_reg); 2250 /* disable DPLL_SEL */
2168 /* BPC in transcoder is consistent with that in pipeconf */ 2251 temp = I915_READ(PCH_DPLL_SEL);
2169 temp &= ~PIPE_BPC_MASK; 2252 if (pipe == 0)
2170 temp |= pipe_bpc; 2253 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
2171 I915_WRITE(transconf_reg, temp); 2254 else
2172 I915_READ(transconf_reg); 2255 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2173 udelay(100); 2256 I915_WRITE(PCH_DPLL_SEL, temp);
2257 }
2174 2258
2175 if (HAS_PCH_CPT(dev)) { 2259 /* disable PCH DPLL */
2176 /* disable TRANS_DP_CTL */ 2260 reg = PCH_DPLL(pipe);
2177 int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; 2261 temp = I915_READ(reg);
2178 int reg; 2262 I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
2179 2263
2180 reg = I915_READ(trans_dp_ctl); 2264 /* Switch from PCDclk to Rawclk */
2181 reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); 2265 reg = FDI_RX_CTL(pipe);
2182 I915_WRITE(trans_dp_ctl, reg); 2266 temp = I915_READ(reg);
2183 POSTING_READ(trans_dp_ctl); 2267 I915_WRITE(reg, temp & ~FDI_PCDCLK);
2184 2268
2185 /* disable DPLL_SEL */ 2269 /* Disable CPU FDI TX PLL */
2186 temp = I915_READ(PCH_DPLL_SEL); 2270 reg = FDI_TX_CTL(pipe);
2187 if (trans_dpll_sel == 0) 2271 temp = I915_READ(reg);
2188 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); 2272 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2189 else
2190 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2191 I915_WRITE(PCH_DPLL_SEL, temp);
2192 I915_READ(PCH_DPLL_SEL);
2193 2273
2194 } 2274 POSTING_READ(reg);
2275 udelay(100);
2195 2276
2196 /* disable PCH DPLL */ 2277 reg = FDI_RX_CTL(pipe);
2197 temp = I915_READ(pch_dpll_reg); 2278 temp = I915_READ(reg);
2198 I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); 2279 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2199 I915_READ(pch_dpll_reg);
2200
2201 /* Switch from PCDclk to Rawclk */
2202 temp = I915_READ(fdi_rx_reg);
2203 temp &= ~FDI_SEL_PCDCLK;
2204 I915_WRITE(fdi_rx_reg, temp);
2205 I915_READ(fdi_rx_reg);
2206
2207 /* Disable CPU FDI TX PLL */
2208 temp = I915_READ(fdi_tx_reg);
2209 I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
2210 I915_READ(fdi_tx_reg);
2211 udelay(100);
2212 2280
2213 temp = I915_READ(fdi_rx_reg); 2281 /* Wait for the clocks to turn off. */
2214 temp &= ~FDI_RX_PLL_ENABLE; 2282 POSTING_READ(reg);
2215 I915_WRITE(fdi_rx_reg, temp); 2283 udelay(100);
2216 I915_READ(fdi_rx_reg);
2217 2284
2218 /* Wait for the clocks to turn off. */ 2285 intel_crtc->active = false;
2219 udelay(100); 2286 intel_update_watermarks(dev);
2287 intel_update_fbc(dev);
2288 intel_clear_scanline_wait(dev);
2289}
2290
2291static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2292{
2293 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2294 int pipe = intel_crtc->pipe;
2295 int plane = intel_crtc->plane;
2296
2297 /* XXX: When our outputs are all unaware of DPMS modes other than off
2298 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
2299 */
2300 switch (mode) {
2301 case DRM_MODE_DPMS_ON:
2302 case DRM_MODE_DPMS_STANDBY:
2303 case DRM_MODE_DPMS_SUSPEND:
2304 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
2305 ironlake_crtc_enable(crtc);
2306 break;
2307
2308 case DRM_MODE_DPMS_OFF:
2309 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
2310 ironlake_crtc_disable(crtc);
2220 break; 2311 break;
2221 } 2312 }
2222} 2313}
2223 2314
2224static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) 2315static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
2225{ 2316{
2226 struct intel_overlay *overlay;
2227 int ret;
2228
2229 if (!enable && intel_crtc->overlay) { 2317 if (!enable && intel_crtc->overlay) {
2230 overlay = intel_crtc->overlay; 2318 struct drm_device *dev = intel_crtc->base.dev;
2231 mutex_lock(&overlay->dev->struct_mutex);
2232 for (;;) {
2233 ret = intel_overlay_switch_off(overlay);
2234 if (ret == 0)
2235 break;
2236 2319
2237 ret = intel_overlay_recover_from_interrupt(overlay, 0); 2320 mutex_lock(&dev->struct_mutex);
2238 if (ret != 0) { 2321 (void) intel_overlay_switch_off(intel_crtc->overlay, false);
2239 /* overlay doesn't react anymore. Usually 2322 mutex_unlock(&dev->struct_mutex);
2240 * results in a black screen and an unkillable
2241 * X server. */
2242 BUG();
2243 overlay->hw_wedged = HW_WEDGED;
2244 break;
2245 }
2246 }
2247 mutex_unlock(&overlay->dev->struct_mutex);
2248 } 2323 }
2249 /* Let userspace switch the overlay on again. In most cases userspace
2250 * has to recompute where to put it anyway. */
2251 2324
2252 return; 2325 /* Let userspace switch the overlay on again. In most cases userspace
2326 * has to recompute where to put it anyway.
2327 */
2253} 2328}
2254 2329
2255static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) 2330static void i9xx_crtc_enable(struct drm_crtc *crtc)
2256{ 2331{
2257 struct drm_device *dev = crtc->dev; 2332 struct drm_device *dev = crtc->dev;
2258 struct drm_i915_private *dev_priv = dev->dev_private; 2333 struct drm_i915_private *dev_priv = dev->dev_private;
2259 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2334 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2260 int pipe = intel_crtc->pipe; 2335 int pipe = intel_crtc->pipe;
2261 int plane = intel_crtc->plane; 2336 int plane = intel_crtc->plane;
2262 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 2337 u32 reg, temp;
2263 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
2264 int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
2265 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
2266 u32 temp;
2267 2338
2268 /* XXX: When our outputs are all unaware of DPMS modes other than off 2339 if (intel_crtc->active)
2269 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 2340 return;
2270 */
2271 switch (mode) {
2272 case DRM_MODE_DPMS_ON:
2273 case DRM_MODE_DPMS_STANDBY:
2274 case DRM_MODE_DPMS_SUSPEND:
2275 /* Enable the DPLL */
2276 temp = I915_READ(dpll_reg);
2277 if ((temp & DPLL_VCO_ENABLE) == 0) {
2278 I915_WRITE(dpll_reg, temp);
2279 I915_READ(dpll_reg);
2280 /* Wait for the clocks to stabilize. */
2281 udelay(150);
2282 I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
2283 I915_READ(dpll_reg);
2284 /* Wait for the clocks to stabilize. */
2285 udelay(150);
2286 I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
2287 I915_READ(dpll_reg);
2288 /* Wait for the clocks to stabilize. */
2289 udelay(150);
2290 }
2291 2341
2292 /* Enable the pipe */ 2342 intel_crtc->active = true;
2293 temp = I915_READ(pipeconf_reg); 2343 intel_update_watermarks(dev);
2294 if ((temp & PIPEACONF_ENABLE) == 0)
2295 I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
2296
2297 /* Enable the plane */
2298 temp = I915_READ(dspcntr_reg);
2299 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
2300 I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
2301 /* Flush the plane changes */
2302 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
2303 }
2304 2344
2305 intel_crtc_load_lut(crtc); 2345 /* Enable the DPLL */
2346 reg = DPLL(pipe);
2347 temp = I915_READ(reg);
2348 if ((temp & DPLL_VCO_ENABLE) == 0) {
2349 I915_WRITE(reg, temp);
2306 2350
2307 if ((IS_I965G(dev) || plane == 0)) 2351 /* Wait for the clocks to stabilize. */
2308 intel_update_fbc(crtc, &crtc->mode); 2352 POSTING_READ(reg);
2353 udelay(150);
2309 2354
2310 /* Give the overlay scaler a chance to enable if it's on this pipe */ 2355 I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
2311 intel_crtc_dpms_overlay(intel_crtc, true); 2356
2312 break; 2357 /* Wait for the clocks to stabilize. */
2313 case DRM_MODE_DPMS_OFF: 2358 POSTING_READ(reg);
2314 /* Give the overlay scaler a chance to disable if it's on this pipe */ 2359 udelay(150);
2315 intel_crtc_dpms_overlay(intel_crtc, false); 2360
2316 drm_vblank_off(dev, pipe); 2361 I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
2317 2362
2318 if (dev_priv->cfb_plane == plane && 2363 /* Wait for the clocks to stabilize. */
2319 dev_priv->display.disable_fbc) 2364 POSTING_READ(reg);
2320 dev_priv->display.disable_fbc(dev); 2365 udelay(150);
2321 2366 }
2322 /* Disable display plane */ 2367
2323 temp = I915_READ(dspcntr_reg); 2368 /* Enable the pipe */
2324 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 2369 reg = PIPECONF(pipe);
2325 I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); 2370 temp = I915_READ(reg);
2326 /* Flush the plane changes */ 2371 if ((temp & PIPECONF_ENABLE) == 0)
2327 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); 2372 I915_WRITE(reg, temp | PIPECONF_ENABLE);
2328 I915_READ(dspbase_reg); 2373
2329 } 2374 /* Enable the plane */
2375 reg = DSPCNTR(plane);
2376 temp = I915_READ(reg);
2377 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
2378 I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
2379 intel_flush_display_plane(dev, plane);
2380 }
2381
2382 intel_crtc_load_lut(crtc);
2383 intel_update_fbc(dev);
2384
2385 /* Give the overlay scaler a chance to enable if it's on this pipe */
2386 intel_crtc_dpms_overlay(intel_crtc, true);
2387 intel_crtc_update_cursor(crtc, true);
2388}
2389
2390static void i9xx_crtc_disable(struct drm_crtc *crtc)
2391{
2392 struct drm_device *dev = crtc->dev;
2393 struct drm_i915_private *dev_priv = dev->dev_private;
2394 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2395 int pipe = intel_crtc->pipe;
2396 int plane = intel_crtc->plane;
2397 u32 reg, temp;
2398
2399 if (!intel_crtc->active)
2400 return;
2401
2402 /* Give the overlay scaler a chance to disable if it's on this pipe */
2403 intel_crtc_wait_for_pending_flips(crtc);
2404 drm_vblank_off(dev, pipe);
2405 intel_crtc_dpms_overlay(intel_crtc, false);
2406 intel_crtc_update_cursor(crtc, false);
2407
2408 if (dev_priv->cfb_plane == plane &&
2409 dev_priv->display.disable_fbc)
2410 dev_priv->display.disable_fbc(dev);
2411
2412 /* Disable display plane */
2413 reg = DSPCNTR(plane);
2414 temp = I915_READ(reg);
2415 if (temp & DISPLAY_PLANE_ENABLE) {
2416 I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
2417 /* Flush the plane changes */
2418 intel_flush_display_plane(dev, plane);
2330 2419
2331 /* Wait for vblank for the disable to take effect */ 2420 /* Wait for vblank for the disable to take effect */
2332 intel_wait_for_vblank_off(dev, pipe); 2421 if (IS_GEN2(dev))
2333 2422 intel_wait_for_vblank(dev, pipe);
2334 /* Don't disable pipe A or pipe A PLLs if needed */ 2423 }
2335 if (pipeconf_reg == PIPEACONF &&
2336 (dev_priv->quirks & QUIRK_PIPEA_FORCE))
2337 goto skip_pipe_off;
2338
2339 /* Next, disable display pipes */
2340 temp = I915_READ(pipeconf_reg);
2341 if ((temp & PIPEACONF_ENABLE) != 0) {
2342 I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
2343 I915_READ(pipeconf_reg);
2344 }
2345 2424
2346 /* Wait for vblank for the disable to take effect. */ 2425 /* Don't disable pipe A or pipe A PLLs if needed */
2347 intel_wait_for_vblank_off(dev, pipe); 2426 if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
2427 goto done;
2428
2429 /* Next, disable display pipes */
2430 reg = PIPECONF(pipe);
2431 temp = I915_READ(reg);
2432 if (temp & PIPECONF_ENABLE) {
2433 I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
2434
2435 /* Wait for the pipe to turn off */
2436 POSTING_READ(reg);
2437 intel_wait_for_pipe_off(dev, pipe);
2438 }
2439
2440 reg = DPLL(pipe);
2441 temp = I915_READ(reg);
2442 if (temp & DPLL_VCO_ENABLE) {
2443 I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
2348 2444
2349 temp = I915_READ(dpll_reg);
2350 if ((temp & DPLL_VCO_ENABLE) != 0) {
2351 I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
2352 I915_READ(dpll_reg);
2353 }
2354 skip_pipe_off:
2355 /* Wait for the clocks to turn off. */ 2445 /* Wait for the clocks to turn off. */
2446 POSTING_READ(reg);
2356 udelay(150); 2447 udelay(150);
2448 }
2449
2450done:
2451 intel_crtc->active = false;
2452 intel_update_fbc(dev);
2453 intel_update_watermarks(dev);
2454 intel_clear_scanline_wait(dev);
2455}
2456
2457static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2458{
2459 /* XXX: When our outputs are all unaware of DPMS modes other than off
2460 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
2461 */
2462 switch (mode) {
2463 case DRM_MODE_DPMS_ON:
2464 case DRM_MODE_DPMS_STANDBY:
2465 case DRM_MODE_DPMS_SUSPEND:
2466 i9xx_crtc_enable(crtc);
2467 break;
2468 case DRM_MODE_DPMS_OFF:
2469 i9xx_crtc_disable(crtc);
2357 break; 2470 break;
2358 } 2471 }
2359} 2472}
@@ -2374,26 +2487,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
2374 return; 2487 return;
2375 2488
2376 intel_crtc->dpms_mode = mode; 2489 intel_crtc->dpms_mode = mode;
2377 intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
2378
2379 /* When switching on the display, ensure that SR is disabled
2380 * with multiple pipes prior to enabling to new pipe.
2381 *
2382 * When switching off the display, make sure the cursor is
2383 * properly hidden prior to disabling the pipe.
2384 */
2385 if (mode == DRM_MODE_DPMS_ON)
2386 intel_update_watermarks(dev);
2387 else
2388 intel_crtc_update_cursor(crtc);
2389 2490
2390 dev_priv->display.dpms(crtc, mode); 2491 dev_priv->display.dpms(crtc, mode);
2391 2492
2392 if (mode == DRM_MODE_DPMS_ON)
2393 intel_crtc_update_cursor(crtc);
2394 else
2395 intel_update_watermarks(dev);
2396
2397 if (!dev->primary->master) 2493 if (!dev->primary->master)
2398 return; 2494 return;
2399 2495
@@ -2418,16 +2514,46 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
2418 } 2514 }
2419} 2515}
2420 2516
2421static void intel_crtc_prepare (struct drm_crtc *crtc) 2517static void intel_crtc_disable(struct drm_crtc *crtc)
2422{ 2518{
2423 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 2519 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
2520 struct drm_device *dev = crtc->dev;
2521
2424 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 2522 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
2523
2524 if (crtc->fb) {
2525 mutex_lock(&dev->struct_mutex);
2526 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
2527 mutex_unlock(&dev->struct_mutex);
2528 }
2425} 2529}
2426 2530
2427static void intel_crtc_commit (struct drm_crtc *crtc) 2531/* Prepare for a mode set.
2532 *
2533 * Note we could be a lot smarter here. We need to figure out which outputs
2534 * will be enabled, which disabled (in short, how the config will changes)
2535 * and perform the minimum necessary steps to accomplish that, e.g. updating
2536 * watermarks, FBC configuration, making sure PLLs are programmed correctly,
2537 * panel fitting is in the proper state, etc.
2538 */
2539static void i9xx_crtc_prepare(struct drm_crtc *crtc)
2428{ 2540{
2429 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 2541 i9xx_crtc_disable(crtc);
2430 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); 2542}
2543
2544static void i9xx_crtc_commit(struct drm_crtc *crtc)
2545{
2546 i9xx_crtc_enable(crtc);
2547}
2548
2549static void ironlake_crtc_prepare(struct drm_crtc *crtc)
2550{
2551 ironlake_crtc_disable(crtc);
2552}
2553
2554static void ironlake_crtc_commit(struct drm_crtc *crtc)
2555{
2556 ironlake_crtc_enable(crtc);
2431} 2557}
2432 2558
2433void intel_encoder_prepare (struct drm_encoder *encoder) 2559void intel_encoder_prepare (struct drm_encoder *encoder)
@@ -2446,13 +2572,7 @@ void intel_encoder_commit (struct drm_encoder *encoder)
2446 2572
2447void intel_encoder_destroy(struct drm_encoder *encoder) 2573void intel_encoder_destroy(struct drm_encoder *encoder)
2448{ 2574{
2449 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 2575 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2450
2451 if (intel_encoder->ddc_bus)
2452 intel_i2c_destroy(intel_encoder->ddc_bus);
2453
2454 if (intel_encoder->i2c_bus)
2455 intel_i2c_destroy(intel_encoder->i2c_bus);
2456 2576
2457 drm_encoder_cleanup(encoder); 2577 drm_encoder_cleanup(encoder);
2458 kfree(intel_encoder); 2578 kfree(intel_encoder);
@@ -2543,33 +2663,6 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
2543 return 133000; 2663 return 133000;
2544} 2664}
2545 2665
2546/**
2547 * Return the pipe currently connected to the panel fitter,
2548 * or -1 if the panel fitter is not present or not in use
2549 */
2550int intel_panel_fitter_pipe (struct drm_device *dev)
2551{
2552 struct drm_i915_private *dev_priv = dev->dev_private;
2553 u32 pfit_control;
2554
2555 /* i830 doesn't have a panel fitter */
2556 if (IS_I830(dev))
2557 return -1;
2558
2559 pfit_control = I915_READ(PFIT_CONTROL);
2560
2561 /* See if the panel fitter is in use */
2562 if ((pfit_control & PFIT_ENABLE) == 0)
2563 return -1;
2564
2565 /* 965 can place panel fitter on either pipe */
2566 if (IS_I965G(dev))
2567 return (pfit_control >> 29) & 0x3;
2568
2569 /* older chips can only use pipe 1 */
2570 return 1;
2571}
2572
2573struct fdi_m_n { 2666struct fdi_m_n {
2574 u32 tu; 2667 u32 tu;
2575 u32 gmch_m; 2668 u32 gmch_m;
@@ -2888,7 +2981,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
2888 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; 2981 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
2889 2982
2890 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 2983 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2891 plane ? "B" : "A", size); 2984 plane ? "B" : "A", size);
2892 2985
2893 return size; 2986 return size;
2894} 2987}
@@ -2905,7 +2998,7 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
2905 size >>= 1; /* Convert to cachelines */ 2998 size >>= 1; /* Convert to cachelines */
2906 2999
2907 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 3000 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2908 plane ? "B" : "A", size); 3001 plane ? "B" : "A", size);
2909 3002
2910 return size; 3003 return size;
2911} 3004}
@@ -2920,8 +3013,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
2920 size >>= 2; /* Convert to cachelines */ 3013 size >>= 2; /* Convert to cachelines */
2921 3014
2922 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 3015 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2923 plane ? "B" : "A", 3016 plane ? "B" : "A",
2924 size); 3017 size);
2925 3018
2926 return size; 3019 return size;
2927} 3020}
@@ -2936,14 +3029,14 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
2936 size >>= 1; /* Convert to cachelines */ 3029 size >>= 1; /* Convert to cachelines */
2937 3030
2938 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 3031 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2939 plane ? "B" : "A", size); 3032 plane ? "B" : "A", size);
2940 3033
2941 return size; 3034 return size;
2942} 3035}
2943 3036
2944static void pineview_update_wm(struct drm_device *dev, int planea_clock, 3037static void pineview_update_wm(struct drm_device *dev, int planea_clock,
2945 int planeb_clock, int sr_hdisplay, int unused, 3038 int planeb_clock, int sr_hdisplay, int unused,
2946 int pixel_size) 3039 int pixel_size)
2947{ 3040{
2948 struct drm_i915_private *dev_priv = dev->dev_private; 3041 struct drm_i915_private *dev_priv = dev->dev_private;
2949 const struct cxsr_latency *latency; 3042 const struct cxsr_latency *latency;
@@ -3055,13 +3148,13 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
3055 3148
3056 /* Use ns/us then divide to preserve precision */ 3149 /* Use ns/us then divide to preserve precision */
3057 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 3150 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3058 pixel_size * sr_hdisplay; 3151 pixel_size * sr_hdisplay;
3059 sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); 3152 sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
3060 3153
3061 entries_required = (((sr_latency_ns / line_time_us) + 3154 entries_required = (((sr_latency_ns / line_time_us) +
3062 1000) / 1000) * pixel_size * 64; 3155 1000) / 1000) * pixel_size * 64;
3063 entries_required = DIV_ROUND_UP(entries_required, 3156 entries_required = DIV_ROUND_UP(entries_required,
3064 g4x_cursor_wm_info.cacheline_size); 3157 g4x_cursor_wm_info.cacheline_size);
3065 cursor_sr = entries_required + g4x_cursor_wm_info.guard_size; 3158 cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
3066 3159
3067 if (cursor_sr > g4x_cursor_wm_info.max_wm) 3160 if (cursor_sr > g4x_cursor_wm_info.max_wm)
@@ -3073,7 +3166,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
3073 } else { 3166 } else {
3074 /* Turn off self refresh if both pipes are enabled */ 3167 /* Turn off self refresh if both pipes are enabled */
3075 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 3168 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
3076 & ~FW_BLC_SELF_EN); 3169 & ~FW_BLC_SELF_EN);
3077 } 3170 }
3078 3171
3079 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", 3172 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -3111,7 +3204,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
3111 3204
3112 /* Use ns/us then divide to preserve precision */ 3205 /* Use ns/us then divide to preserve precision */
3113 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 3206 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3114 pixel_size * sr_hdisplay; 3207 pixel_size * sr_hdisplay;
3115 sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE); 3208 sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
3116 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 3209 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
3117 srwm = I965_FIFO_SIZE - sr_entries; 3210 srwm = I965_FIFO_SIZE - sr_entries;
@@ -3120,11 +3213,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
3120 srwm &= 0x1ff; 3213 srwm &= 0x1ff;
3121 3214
3122 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 3215 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3123 pixel_size * 64; 3216 pixel_size * 64;
3124 sr_entries = DIV_ROUND_UP(sr_entries, 3217 sr_entries = DIV_ROUND_UP(sr_entries,
3125 i965_cursor_wm_info.cacheline_size); 3218 i965_cursor_wm_info.cacheline_size);
3126 cursor_sr = i965_cursor_wm_info.fifo_size - 3219 cursor_sr = i965_cursor_wm_info.fifo_size -
3127 (sr_entries + i965_cursor_wm_info.guard_size); 3220 (sr_entries + i965_cursor_wm_info.guard_size);
3128 3221
3129 if (cursor_sr > i965_cursor_wm_info.max_wm) 3222 if (cursor_sr > i965_cursor_wm_info.max_wm)
3130 cursor_sr = i965_cursor_wm_info.max_wm; 3223 cursor_sr = i965_cursor_wm_info.max_wm;
@@ -3132,11 +3225,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
3132 DRM_DEBUG_KMS("self-refresh watermark: display plane %d " 3225 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
3133 "cursor %d\n", srwm, cursor_sr); 3226 "cursor %d\n", srwm, cursor_sr);
3134 3227
3135 if (IS_I965GM(dev)) 3228 if (IS_CRESTLINE(dev))
3136 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 3229 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
3137 } else { 3230 } else {
3138 /* Turn off self refresh if both pipes are enabled */ 3231 /* Turn off self refresh if both pipes are enabled */
3139 if (IS_I965GM(dev)) 3232 if (IS_CRESTLINE(dev))
3140 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 3233 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
3141 & ~FW_BLC_SELF_EN); 3234 & ~FW_BLC_SELF_EN);
3142 } 3235 }
@@ -3166,9 +3259,9 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
3166 int sr_clock, sr_entries = 0; 3259 int sr_clock, sr_entries = 0;
3167 3260
3168 /* Create copies of the base settings for each pipe */ 3261 /* Create copies of the base settings for each pipe */
3169 if (IS_I965GM(dev) || IS_I945GM(dev)) 3262 if (IS_CRESTLINE(dev) || IS_I945GM(dev))
3170 planea_params = planeb_params = i945_wm_info; 3263 planea_params = planeb_params = i945_wm_info;
3171 else if (IS_I9XX(dev)) 3264 else if (!IS_GEN2(dev))
3172 planea_params = planeb_params = i915_wm_info; 3265 planea_params = planeb_params = i915_wm_info;
3173 else 3266 else
3174 planea_params = planeb_params = i855_wm_info; 3267 planea_params = planeb_params = i855_wm_info;
@@ -3203,7 +3296,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
3203 3296
3204 /* Use ns/us then divide to preserve precision */ 3297 /* Use ns/us then divide to preserve precision */
3205 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 3298 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3206 pixel_size * sr_hdisplay; 3299 pixel_size * sr_hdisplay;
3207 sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); 3300 sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
3208 DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); 3301 DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
3209 srwm = total_size - sr_entries; 3302 srwm = total_size - sr_entries;
@@ -3228,7 +3321,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
3228 } 3321 }
3229 3322
3230 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 3323 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
3231 planea_wm, planeb_wm, cwm, srwm); 3324 planea_wm, planeb_wm, cwm, srwm);
3232 3325
3233 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); 3326 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
3234 fwater_hi = (cwm & 0x1f); 3327 fwater_hi = (cwm & 0x1f);
@@ -3262,146 +3355,130 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
3262#define ILK_LP0_PLANE_LATENCY 700 3355#define ILK_LP0_PLANE_LATENCY 700
3263#define ILK_LP0_CURSOR_LATENCY 1300 3356#define ILK_LP0_CURSOR_LATENCY 1300
3264 3357
3265static void ironlake_update_wm(struct drm_device *dev, int planea_clock, 3358static bool ironlake_compute_wm0(struct drm_device *dev,
3266 int planeb_clock, int sr_hdisplay, int sr_htotal, 3359 int pipe,
3267 int pixel_size) 3360 int *plane_wm,
3361 int *cursor_wm)
3268{ 3362{
3269 struct drm_i915_private *dev_priv = dev->dev_private;
3270 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
3271 int sr_wm, cursor_wm;
3272 unsigned long line_time_us;
3273 int sr_clock, entries_required;
3274 u32 reg_value;
3275 int line_count;
3276 int planea_htotal = 0, planeb_htotal = 0;
3277 struct drm_crtc *crtc; 3363 struct drm_crtc *crtc;
3364 int htotal, hdisplay, clock, pixel_size = 0;
3365 int line_time_us, line_count, entries;
3278 3366
3279 /* Need htotal for all active display plane */ 3367 crtc = intel_get_crtc_for_pipe(dev, pipe);
3280 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3368 if (crtc->fb == NULL || !crtc->enabled)
3281 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3369 return false;
3282 if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
3283 if (intel_crtc->plane == 0)
3284 planea_htotal = crtc->mode.htotal;
3285 else
3286 planeb_htotal = crtc->mode.htotal;
3287 }
3288 }
3289
3290 /* Calculate and update the watermark for plane A */
3291 if (planea_clock) {
3292 entries_required = ((planea_clock / 1000) * pixel_size *
3293 ILK_LP0_PLANE_LATENCY) / 1000;
3294 entries_required = DIV_ROUND_UP(entries_required,
3295 ironlake_display_wm_info.cacheline_size);
3296 planea_wm = entries_required +
3297 ironlake_display_wm_info.guard_size;
3298
3299 if (planea_wm > (int)ironlake_display_wm_info.max_wm)
3300 planea_wm = ironlake_display_wm_info.max_wm;
3301
3302 /* Use the large buffer method to calculate cursor watermark */
3303 line_time_us = (planea_htotal * 1000) / planea_clock;
3304
3305 /* Use ns/us then divide to preserve precision */
3306 line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
3307
3308 /* calculate the cursor watermark for cursor A */
3309 entries_required = line_count * 64 * pixel_size;
3310 entries_required = DIV_ROUND_UP(entries_required,
3311 ironlake_cursor_wm_info.cacheline_size);
3312 cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size;
3313 if (cursora_wm > ironlake_cursor_wm_info.max_wm)
3314 cursora_wm = ironlake_cursor_wm_info.max_wm;
3315
3316 reg_value = I915_READ(WM0_PIPEA_ILK);
3317 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
3318 reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
3319 (cursora_wm & WM0_PIPE_CURSOR_MASK);
3320 I915_WRITE(WM0_PIPEA_ILK, reg_value);
3321 DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
3322 "cursor: %d\n", planea_wm, cursora_wm);
3323 }
3324 /* Calculate and update the watermark for plane B */
3325 if (planeb_clock) {
3326 entries_required = ((planeb_clock / 1000) * pixel_size *
3327 ILK_LP0_PLANE_LATENCY) / 1000;
3328 entries_required = DIV_ROUND_UP(entries_required,
3329 ironlake_display_wm_info.cacheline_size);
3330 planeb_wm = entries_required +
3331 ironlake_display_wm_info.guard_size;
3332
3333 if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
3334 planeb_wm = ironlake_display_wm_info.max_wm;
3335 3370
3336 /* Use the large buffer method to calculate cursor watermark */ 3371 htotal = crtc->mode.htotal;
3337 line_time_us = (planeb_htotal * 1000) / planeb_clock; 3372 hdisplay = crtc->mode.hdisplay;
3373 clock = crtc->mode.clock;
3374 pixel_size = crtc->fb->bits_per_pixel / 8;
3375
3376 /* Use the small buffer method to calculate plane watermark */
3377 entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000;
3378 entries = DIV_ROUND_UP(entries,
3379 ironlake_display_wm_info.cacheline_size);
3380 *plane_wm = entries + ironlake_display_wm_info.guard_size;
3381 if (*plane_wm > (int)ironlake_display_wm_info.max_wm)
3382 *plane_wm = ironlake_display_wm_info.max_wm;
3383
3384 /* Use the large buffer method to calculate cursor watermark */
3385 line_time_us = ((htotal * 1000) / clock);
3386 line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
3387 entries = line_count * 64 * pixel_size;
3388 entries = DIV_ROUND_UP(entries,
3389 ironlake_cursor_wm_info.cacheline_size);
3390 *cursor_wm = entries + ironlake_cursor_wm_info.guard_size;
3391 if (*cursor_wm > ironlake_cursor_wm_info.max_wm)
3392 *cursor_wm = ironlake_cursor_wm_info.max_wm;
3338 3393
3339 /* Use ns/us then divide to preserve precision */ 3394 return true;
3340 line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; 3395}
3341 3396
3342 /* calculate the cursor watermark for cursor B */ 3397static void ironlake_update_wm(struct drm_device *dev,
3343 entries_required = line_count * 64 * pixel_size; 3398 int planea_clock, int planeb_clock,
3344 entries_required = DIV_ROUND_UP(entries_required, 3399 int sr_hdisplay, int sr_htotal,
3345 ironlake_cursor_wm_info.cacheline_size); 3400 int pixel_size)
3346 cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size; 3401{
3347 if (cursorb_wm > ironlake_cursor_wm_info.max_wm) 3402 struct drm_i915_private *dev_priv = dev->dev_private;
3348 cursorb_wm = ironlake_cursor_wm_info.max_wm; 3403 int plane_wm, cursor_wm, enabled;
3404 int tmp;
3405
3406 enabled = 0;
3407 if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) {
3408 I915_WRITE(WM0_PIPEA_ILK,
3409 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
3410 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
3411 " plane %d, " "cursor: %d\n",
3412 plane_wm, cursor_wm);
3413 enabled++;
3414 }
3349 3415
3350 reg_value = I915_READ(WM0_PIPEB_ILK); 3416 if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) {
3351 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); 3417 I915_WRITE(WM0_PIPEB_ILK,
3352 reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | 3418 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
3353 (cursorb_wm & WM0_PIPE_CURSOR_MASK); 3419 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
3354 I915_WRITE(WM0_PIPEB_ILK, reg_value); 3420 " plane %d, cursor: %d\n",
3355 DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, " 3421 plane_wm, cursor_wm);
3356 "cursor: %d\n", planeb_wm, cursorb_wm); 3422 enabled++;
3357 } 3423 }
3358 3424
3359 /* 3425 /*
3360 * Calculate and update the self-refresh watermark only when one 3426 * Calculate and update the self-refresh watermark only when one
3361 * display plane is used. 3427 * display plane is used.
3362 */ 3428 */
3363 if (!planea_clock || !planeb_clock) { 3429 tmp = 0;
3364 3430 if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) {
3431 unsigned long line_time_us;
3432 int small, large, plane_fbc;
3433 int sr_clock, entries;
3434 int line_count, line_size;
3365 /* Read the self-refresh latency. The unit is 0.5us */ 3435 /* Read the self-refresh latency. The unit is 0.5us */
3366 int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; 3436 int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
3367 3437
3368 sr_clock = planea_clock ? planea_clock : planeb_clock; 3438 sr_clock = planea_clock ? planea_clock : planeb_clock;
3369 line_time_us = ((sr_htotal * 1000) / sr_clock); 3439 line_time_us = (sr_htotal * 1000) / sr_clock;
3370 3440
3371 /* Use ns/us then divide to preserve precision */ 3441 /* Use ns/us then divide to preserve precision */
3372 line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) 3442 line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
3373 / 1000; 3443 / 1000;
3444 line_size = sr_hdisplay * pixel_size;
3374 3445
3375 /* calculate the self-refresh watermark for display plane */ 3446 /* Use the minimum of the small and large buffer method for primary */
3376 entries_required = line_count * sr_hdisplay * pixel_size; 3447 small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000;
3377 entries_required = DIV_ROUND_UP(entries_required, 3448 large = line_count * line_size;
3378 ironlake_display_srwm_info.cacheline_size);
3379 sr_wm = entries_required +
3380 ironlake_display_srwm_info.guard_size;
3381 3449
3382 /* calculate the self-refresh watermark for display cursor */ 3450 entries = DIV_ROUND_UP(min(small, large),
3383 entries_required = line_count * pixel_size * 64; 3451 ironlake_display_srwm_info.cacheline_size);
3384 entries_required = DIV_ROUND_UP(entries_required,
3385 ironlake_cursor_srwm_info.cacheline_size);
3386 cursor_wm = entries_required +
3387 ironlake_cursor_srwm_info.guard_size;
3388 3452
3389 /* configure watermark and enable self-refresh */ 3453 plane_fbc = entries * 64;
3390 reg_value = I915_READ(WM1_LP_ILK); 3454 plane_fbc = DIV_ROUND_UP(plane_fbc, line_size);
3391 reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
3392 WM1_LP_CURSOR_MASK);
3393 reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
3394 (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
3395 3455
3396 I915_WRITE(WM1_LP_ILK, reg_value); 3456 plane_wm = entries + ironlake_display_srwm_info.guard_size;
3397 DRM_DEBUG_KMS("self-refresh watermark: display plane %d " 3457 if (plane_wm > (int)ironlake_display_srwm_info.max_wm)
3398 "cursor %d\n", sr_wm, cursor_wm); 3458 plane_wm = ironlake_display_srwm_info.max_wm;
3399 3459
3400 } else { 3460 /* calculate the self-refresh watermark for display cursor */
3401 /* Turn off self refresh if both pipes are enabled */ 3461 entries = line_count * pixel_size * 64;
3402 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); 3462 entries = DIV_ROUND_UP(entries,
3403 } 3463 ironlake_cursor_srwm_info.cacheline_size);
3464
3465 cursor_wm = entries + ironlake_cursor_srwm_info.guard_size;
3466 if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm)
3467 cursor_wm = ironlake_cursor_srwm_info.max_wm;
3468
3469 /* configure watermark and enable self-refresh */
3470 tmp = (WM1_LP_SR_EN |
3471 (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
3472 (plane_fbc << WM1_LP_FBC_SHIFT) |
3473 (plane_wm << WM1_LP_SR_SHIFT) |
3474 cursor_wm);
3475 DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d,"
3476 " cursor %d\n", plane_wm, plane_fbc, cursor_wm);
3477 }
3478 I915_WRITE(WM1_LP_ILK, tmp);
3479 /* XXX setup WM2 and WM3 */
3404} 3480}
3481
3405/** 3482/**
3406 * intel_update_watermarks - update FIFO watermark values based on current modes 3483 * intel_update_watermarks - update FIFO watermark values based on current modes
3407 * 3484 *
@@ -3433,7 +3510,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3433 * 3510 *
3434 * We don't use the sprite, so we can ignore that. And on Crestline we have 3511 * We don't use the sprite, so we can ignore that. And on Crestline we have
3435 * to set the non-SR watermarks to 8. 3512 * to set the non-SR watermarks to 8.
3436 */ 3513 */
3437static void intel_update_watermarks(struct drm_device *dev) 3514static void intel_update_watermarks(struct drm_device *dev)
3438{ 3515{
3439 struct drm_i915_private *dev_priv = dev->dev_private; 3516 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3449,15 +3526,15 @@ static void intel_update_watermarks(struct drm_device *dev)
3449 /* Get the clock config from both planes */ 3526 /* Get the clock config from both planes */
3450 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3527 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3451 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3528 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3452 if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) { 3529 if (intel_crtc->active) {
3453 enabled++; 3530 enabled++;
3454 if (intel_crtc->plane == 0) { 3531 if (intel_crtc->plane == 0) {
3455 DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n", 3532 DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
3456 intel_crtc->pipe, crtc->mode.clock); 3533 intel_crtc->pipe, crtc->mode.clock);
3457 planea_clock = crtc->mode.clock; 3534 planea_clock = crtc->mode.clock;
3458 } else { 3535 } else {
3459 DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n", 3536 DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
3460 intel_crtc->pipe, crtc->mode.clock); 3537 intel_crtc->pipe, crtc->mode.clock);
3461 planeb_clock = crtc->mode.clock; 3538 planeb_clock = crtc->mode.clock;
3462 } 3539 }
3463 sr_hdisplay = crtc->mode.hdisplay; 3540 sr_hdisplay = crtc->mode.hdisplay;
@@ -3488,62 +3565,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3488 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3565 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3489 int pipe = intel_crtc->pipe; 3566 int pipe = intel_crtc->pipe;
3490 int plane = intel_crtc->plane; 3567 int plane = intel_crtc->plane;
3491 int fp_reg = (pipe == 0) ? FPA0 : FPB0; 3568 u32 fp_reg, dpll_reg;
3492 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3493 int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
3494 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
3495 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
3496 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
3497 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
3498 int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
3499 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
3500 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
3501 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
3502 int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
3503 int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
3504 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
3505 int refclk, num_connectors = 0; 3569 int refclk, num_connectors = 0;
3506 intel_clock_t clock, reduced_clock; 3570 intel_clock_t clock, reduced_clock;
3507 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; 3571 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
3508 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 3572 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
3509 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 3573 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
3510 struct intel_encoder *has_edp_encoder = NULL; 3574 struct intel_encoder *has_edp_encoder = NULL;
3511 struct drm_mode_config *mode_config = &dev->mode_config; 3575 struct drm_mode_config *mode_config = &dev->mode_config;
3512 struct drm_encoder *encoder; 3576 struct intel_encoder *encoder;
3513 const intel_limit_t *limit; 3577 const intel_limit_t *limit;
3514 int ret; 3578 int ret;
3515 struct fdi_m_n m_n = {0}; 3579 struct fdi_m_n m_n = {0};
3516 int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1; 3580 u32 reg, temp;
3517 int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1;
3518 int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1;
3519 int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1;
3520 int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
3521 int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
3522 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
3523 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
3524 int trans_dpll_sel = (pipe == 0) ? 0 : 1;
3525 int lvds_reg = LVDS;
3526 u32 temp;
3527 int sdvo_pixel_multiply;
3528 int target_clock; 3581 int target_clock;
3529 3582
3530 drm_vblank_pre_modeset(dev, pipe); 3583 drm_vblank_pre_modeset(dev, pipe);
3531 3584
3532 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 3585 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
3533 struct intel_encoder *intel_encoder; 3586 if (encoder->base.crtc != crtc)
3534
3535 if (encoder->crtc != crtc)
3536 continue; 3587 continue;
3537 3588
3538 intel_encoder = enc_to_intel_encoder(encoder); 3589 switch (encoder->type) {
3539 switch (intel_encoder->type) {
3540 case INTEL_OUTPUT_LVDS: 3590 case INTEL_OUTPUT_LVDS:
3541 is_lvds = true; 3591 is_lvds = true;
3542 break; 3592 break;
3543 case INTEL_OUTPUT_SDVO: 3593 case INTEL_OUTPUT_SDVO:
3544 case INTEL_OUTPUT_HDMI: 3594 case INTEL_OUTPUT_HDMI:
3545 is_sdvo = true; 3595 is_sdvo = true;
3546 if (intel_encoder->needs_tv_clock) 3596 if (encoder->needs_tv_clock)
3547 is_tv = true; 3597 is_tv = true;
3548 break; 3598 break;
3549 case INTEL_OUTPUT_DVO: 3599 case INTEL_OUTPUT_DVO:
@@ -3559,7 +3609,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3559 is_dp = true; 3609 is_dp = true;
3560 break; 3610 break;
3561 case INTEL_OUTPUT_EDP: 3611 case INTEL_OUTPUT_EDP:
3562 has_edp_encoder = intel_encoder; 3612 has_edp_encoder = encoder;
3563 break; 3613 break;
3564 } 3614 }
3565 3615
@@ -3569,15 +3619,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3569 if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) { 3619 if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
3570 refclk = dev_priv->lvds_ssc_freq * 1000; 3620 refclk = dev_priv->lvds_ssc_freq * 1000;
3571 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 3621 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
3572 refclk / 1000); 3622 refclk / 1000);
3573 } else if (IS_I9XX(dev)) { 3623 } else if (!IS_GEN2(dev)) {
3574 refclk = 96000; 3624 refclk = 96000;
3575 if (HAS_PCH_SPLIT(dev)) 3625 if (HAS_PCH_SPLIT(dev))
3576 refclk = 120000; /* 120Mhz refclk */ 3626 refclk = 120000; /* 120Mhz refclk */
3577 } else { 3627 } else {
3578 refclk = 48000; 3628 refclk = 48000;
3579 } 3629 }
3580
3581 3630
3582 /* 3631 /*
3583 * Returns a set of divisors for the desired target clock with the given 3632 * Returns a set of divisors for the desired target clock with the given
@@ -3593,13 +3642,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3593 } 3642 }
3594 3643
3595 /* Ensure that the cursor is valid for the new mode before changing... */ 3644 /* Ensure that the cursor is valid for the new mode before changing... */
3596 intel_crtc_update_cursor(crtc); 3645 intel_crtc_update_cursor(crtc, true);
3597 3646
3598 if (is_lvds && dev_priv->lvds_downclock_avail) { 3647 if (is_lvds && dev_priv->lvds_downclock_avail) {
3599 has_reduced_clock = limit->find_pll(limit, crtc, 3648 has_reduced_clock = limit->find_pll(limit, crtc,
3600 dev_priv->lvds_downclock, 3649 dev_priv->lvds_downclock,
3601 refclk, 3650 refclk,
3602 &reduced_clock); 3651 &reduced_clock);
3603 if (has_reduced_clock && (clock.p != reduced_clock.p)) { 3652 if (has_reduced_clock && (clock.p != reduced_clock.p)) {
3604 /* 3653 /*
3605 * If the different P is found, it means that we can't 3654 * If the different P is found, it means that we can't
@@ -3608,7 +3657,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3608 * feature. 3657 * feature.
3609 */ 3658 */
3610 DRM_DEBUG_KMS("Different P is found for " 3659 DRM_DEBUG_KMS("Different P is found for "
3611 "LVDS clock/downclock\n"); 3660 "LVDS clock/downclock\n");
3612 has_reduced_clock = 0; 3661 has_reduced_clock = 0;
3613 } 3662 }
3614 } 3663 }
@@ -3616,14 +3665,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3616 this mirrors vbios setting. */ 3665 this mirrors vbios setting. */
3617 if (is_sdvo && is_tv) { 3666 if (is_sdvo && is_tv) {
3618 if (adjusted_mode->clock >= 100000 3667 if (adjusted_mode->clock >= 100000
3619 && adjusted_mode->clock < 140500) { 3668 && adjusted_mode->clock < 140500) {
3620 clock.p1 = 2; 3669 clock.p1 = 2;
3621 clock.p2 = 10; 3670 clock.p2 = 10;
3622 clock.n = 3; 3671 clock.n = 3;
3623 clock.m1 = 16; 3672 clock.m1 = 16;
3624 clock.m2 = 8; 3673 clock.m2 = 8;
3625 } else if (adjusted_mode->clock >= 140500 3674 } else if (adjusted_mode->clock >= 140500
3626 && adjusted_mode->clock <= 200000) { 3675 && adjusted_mode->clock <= 200000) {
3627 clock.p1 = 1; 3676 clock.p1 = 1;
3628 clock.p2 = 10; 3677 clock.p2 = 10;
3629 clock.n = 6; 3678 clock.n = 6;
@@ -3648,21 +3697,28 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3648 target_clock = mode->clock; 3697 target_clock = mode->clock;
3649 else 3698 else
3650 target_clock = adjusted_mode->clock; 3699 target_clock = adjusted_mode->clock;
3651 link_bw = 270000; 3700
3701 /* FDI is a binary signal running at ~2.7GHz, encoding
3702 * each output octet as 10 bits. The actual frequency
3703 * is stored as a divider into a 100MHz clock, and the
3704 * mode pixel clock is stored in units of 1KHz.
3705 * Hence the bw of each lane in terms of the mode signal
3706 * is:
3707 */
3708 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
3652 } 3709 }
3653 3710
3654 /* determine panel color depth */ 3711 /* determine panel color depth */
3655 temp = I915_READ(pipeconf_reg); 3712 temp = I915_READ(PIPECONF(pipe));
3656 temp &= ~PIPE_BPC_MASK; 3713 temp &= ~PIPE_BPC_MASK;
3657 if (is_lvds) { 3714 if (is_lvds) {
3658 int lvds_reg = I915_READ(PCH_LVDS);
3659 /* the BPC will be 6 if it is 18-bit LVDS panel */ 3715 /* the BPC will be 6 if it is 18-bit LVDS panel */
3660 if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) 3716 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
3661 temp |= PIPE_8BPC; 3717 temp |= PIPE_8BPC;
3662 else 3718 else
3663 temp |= PIPE_6BPC; 3719 temp |= PIPE_6BPC;
3664 } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) { 3720 } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
3665 switch (dev_priv->edp_bpp/3) { 3721 switch (dev_priv->edp.bpp/3) {
3666 case 8: 3722 case 8:
3667 temp |= PIPE_8BPC; 3723 temp |= PIPE_8BPC;
3668 break; 3724 break;
@@ -3678,8 +3734,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3678 } 3734 }
3679 } else 3735 } else
3680 temp |= PIPE_8BPC; 3736 temp |= PIPE_8BPC;
3681 I915_WRITE(pipeconf_reg, temp); 3737 I915_WRITE(PIPECONF(pipe), temp);
3682 I915_READ(pipeconf_reg);
3683 3738
3684 switch (temp & PIPE_BPC_MASK) { 3739 switch (temp & PIPE_BPC_MASK) {
3685 case PIPE_8BPC: 3740 case PIPE_8BPC:
@@ -3724,33 +3779,27 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3724 /* Always enable nonspread source */ 3779 /* Always enable nonspread source */
3725 temp &= ~DREF_NONSPREAD_SOURCE_MASK; 3780 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
3726 temp |= DREF_NONSPREAD_SOURCE_ENABLE; 3781 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
3727 I915_WRITE(PCH_DREF_CONTROL, temp);
3728 POSTING_READ(PCH_DREF_CONTROL);
3729
3730 temp &= ~DREF_SSC_SOURCE_MASK; 3782 temp &= ~DREF_SSC_SOURCE_MASK;
3731 temp |= DREF_SSC_SOURCE_ENABLE; 3783 temp |= DREF_SSC_SOURCE_ENABLE;
3732 I915_WRITE(PCH_DREF_CONTROL, temp); 3784 I915_WRITE(PCH_DREF_CONTROL, temp);
3733 POSTING_READ(PCH_DREF_CONTROL);
3734 3785
3786 POSTING_READ(PCH_DREF_CONTROL);
3735 udelay(200); 3787 udelay(200);
3736 3788
3737 if (has_edp_encoder) { 3789 if (has_edp_encoder) {
3738 if (dev_priv->lvds_use_ssc) { 3790 if (dev_priv->lvds_use_ssc) {
3739 temp |= DREF_SSC1_ENABLE; 3791 temp |= DREF_SSC1_ENABLE;
3740 I915_WRITE(PCH_DREF_CONTROL, temp); 3792 I915_WRITE(PCH_DREF_CONTROL, temp);
3741 POSTING_READ(PCH_DREF_CONTROL);
3742 3793
3794 POSTING_READ(PCH_DREF_CONTROL);
3743 udelay(200); 3795 udelay(200);
3744 3796
3745 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 3797 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
3746 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 3798 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
3747 I915_WRITE(PCH_DREF_CONTROL, temp);
3748 POSTING_READ(PCH_DREF_CONTROL);
3749 } else { 3799 } else {
3750 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 3800 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
3751 I915_WRITE(PCH_DREF_CONTROL, temp);
3752 POSTING_READ(PCH_DREF_CONTROL);
3753 } 3801 }
3802 I915_WRITE(PCH_DREF_CONTROL, temp);
3754 } 3803 }
3755 } 3804 }
3756 3805
@@ -3766,21 +3815,24 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3766 reduced_clock.m2; 3815 reduced_clock.m2;
3767 } 3816 }
3768 3817
3818 dpll = 0;
3769 if (!HAS_PCH_SPLIT(dev)) 3819 if (!HAS_PCH_SPLIT(dev))
3770 dpll = DPLL_VGA_MODE_DIS; 3820 dpll = DPLL_VGA_MODE_DIS;
3771 3821
3772 if (IS_I9XX(dev)) { 3822 if (!IS_GEN2(dev)) {
3773 if (is_lvds) 3823 if (is_lvds)
3774 dpll |= DPLLB_MODE_LVDS; 3824 dpll |= DPLLB_MODE_LVDS;
3775 else 3825 else
3776 dpll |= DPLLB_MODE_DAC_SERIAL; 3826 dpll |= DPLLB_MODE_DAC_SERIAL;
3777 if (is_sdvo) { 3827 if (is_sdvo) {
3828 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
3829 if (pixel_multiplier > 1) {
3830 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3831 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
3832 else if (HAS_PCH_SPLIT(dev))
3833 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
3834 }
3778 dpll |= DPLL_DVO_HIGH_SPEED; 3835 dpll |= DPLL_DVO_HIGH_SPEED;
3779 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
3780 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3781 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
3782 else if (HAS_PCH_SPLIT(dev))
3783 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
3784 } 3836 }
3785 if (is_dp) 3837 if (is_dp)
3786 dpll |= DPLL_DVO_HIGH_SPEED; 3838 dpll |= DPLL_DVO_HIGH_SPEED;
@@ -3810,7 +3862,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3810 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 3862 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
3811 break; 3863 break;
3812 } 3864 }
3813 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) 3865 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
3814 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 3866 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
3815 } else { 3867 } else {
3816 if (is_lvds) { 3868 if (is_lvds) {
@@ -3837,7 +3889,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3837 dpll |= PLL_REF_INPUT_DREFCLK; 3889 dpll |= PLL_REF_INPUT_DREFCLK;
3838 3890
3839 /* setup pipeconf */ 3891 /* setup pipeconf */
3840 pipeconf = I915_READ(pipeconf_reg); 3892 pipeconf = I915_READ(PIPECONF(pipe));
3841 3893
3842 /* Set up the display plane register */ 3894 /* Set up the display plane register */
3843 dspcntr = DISPPLANE_GAMMA_ENABLE; 3895 dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -3851,7 +3903,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3851 dspcntr |= DISPPLANE_SEL_PIPE_B; 3903 dspcntr |= DISPPLANE_SEL_PIPE_B;
3852 } 3904 }
3853 3905
3854 if (pipe == 0 && !IS_I965G(dev)) { 3906 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
3855 /* Enable pixel doubling when the dot clock is > 90% of the (display) 3907 /* Enable pixel doubling when the dot clock is > 90% of the (display)
3856 * core speed. 3908 * core speed.
3857 * 3909 *
@@ -3860,51 +3912,46 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3860 */ 3912 */
3861 if (mode->clock > 3913 if (mode->clock >
3862 dev_priv->display.get_display_clock_speed(dev) * 9 / 10) 3914 dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
3863 pipeconf |= PIPEACONF_DOUBLE_WIDE; 3915 pipeconf |= PIPECONF_DOUBLE_WIDE;
3864 else 3916 else
3865 pipeconf &= ~PIPEACONF_DOUBLE_WIDE; 3917 pipeconf &= ~PIPECONF_DOUBLE_WIDE;
3866 } 3918 }
3867 3919
3868 dspcntr |= DISPLAY_PLANE_ENABLE; 3920 dspcntr |= DISPLAY_PLANE_ENABLE;
3869 pipeconf |= PIPEACONF_ENABLE; 3921 pipeconf |= PIPECONF_ENABLE;
3870 dpll |= DPLL_VCO_ENABLE; 3922 dpll |= DPLL_VCO_ENABLE;
3871 3923
3872
3873 /* Disable the panel fitter if it was on our pipe */
3874 if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
3875 I915_WRITE(PFIT_CONTROL, 0);
3876
3877 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 3924 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
3878 drm_mode_debug_printmodeline(mode); 3925 drm_mode_debug_printmodeline(mode);
3879 3926
3880 /* assign to Ironlake registers */ 3927 /* assign to Ironlake registers */
3881 if (HAS_PCH_SPLIT(dev)) { 3928 if (HAS_PCH_SPLIT(dev)) {
3882 fp_reg = pch_fp_reg; 3929 fp_reg = PCH_FP0(pipe);
3883 dpll_reg = pch_dpll_reg; 3930 dpll_reg = PCH_DPLL(pipe);
3931 } else {
3932 fp_reg = FP0(pipe);
3933 dpll_reg = DPLL(pipe);
3884 } 3934 }
3885 3935
3886 if (!has_edp_encoder) { 3936 if (!has_edp_encoder) {
3887 I915_WRITE(fp_reg, fp); 3937 I915_WRITE(fp_reg, fp);
3888 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); 3938 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
3889 I915_READ(dpll_reg); 3939
3940 POSTING_READ(dpll_reg);
3890 udelay(150); 3941 udelay(150);
3891 } 3942 }
3892 3943
3893 /* enable transcoder DPLL */ 3944 /* enable transcoder DPLL */
3894 if (HAS_PCH_CPT(dev)) { 3945 if (HAS_PCH_CPT(dev)) {
3895 temp = I915_READ(PCH_DPLL_SEL); 3946 temp = I915_READ(PCH_DPLL_SEL);
3896 if (trans_dpll_sel == 0) 3947 if (pipe == 0)
3897 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); 3948 temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
3898 else 3949 else
3899 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); 3950 temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
3900 I915_WRITE(PCH_DPLL_SEL, temp); 3951 I915_WRITE(PCH_DPLL_SEL, temp);
3901 I915_READ(PCH_DPLL_SEL);
3902 udelay(150);
3903 }
3904 3952
3905 if (HAS_PCH_SPLIT(dev)) { 3953 POSTING_READ(PCH_DPLL_SEL);
3906 pipeconf &= ~PIPE_ENABLE_DITHER; 3954 udelay(150);
3907 pipeconf &= ~PIPE_DITHER_TYPE_MASK;
3908 } 3955 }
3909 3956
3910 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 3957 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -3912,55 +3959,57 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3912 * things on. 3959 * things on.
3913 */ 3960 */
3914 if (is_lvds) { 3961 if (is_lvds) {
3915 u32 lvds; 3962 reg = LVDS;
3916
3917 if (HAS_PCH_SPLIT(dev)) 3963 if (HAS_PCH_SPLIT(dev))
3918 lvds_reg = PCH_LVDS; 3964 reg = PCH_LVDS;
3919 3965
3920 lvds = I915_READ(lvds_reg); 3966 temp = I915_READ(reg);
3921 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 3967 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
3922 if (pipe == 1) { 3968 if (pipe == 1) {
3923 if (HAS_PCH_CPT(dev)) 3969 if (HAS_PCH_CPT(dev))
3924 lvds |= PORT_TRANS_B_SEL_CPT; 3970 temp |= PORT_TRANS_B_SEL_CPT;
3925 else 3971 else
3926 lvds |= LVDS_PIPEB_SELECT; 3972 temp |= LVDS_PIPEB_SELECT;
3927 } else { 3973 } else {
3928 if (HAS_PCH_CPT(dev)) 3974 if (HAS_PCH_CPT(dev))
3929 lvds &= ~PORT_TRANS_SEL_MASK; 3975 temp &= ~PORT_TRANS_SEL_MASK;
3930 else 3976 else
3931 lvds &= ~LVDS_PIPEB_SELECT; 3977 temp &= ~LVDS_PIPEB_SELECT;
3932 } 3978 }
3933 /* set the corresponsding LVDS_BORDER bit */ 3979 /* set the corresponsding LVDS_BORDER bit */
3934 lvds |= dev_priv->lvds_border_bits; 3980 temp |= dev_priv->lvds_border_bits;
3935 /* Set the B0-B3 data pairs corresponding to whether we're going to 3981 /* Set the B0-B3 data pairs corresponding to whether we're going to
3936 * set the DPLLs for dual-channel mode or not. 3982 * set the DPLLs for dual-channel mode or not.
3937 */ 3983 */
3938 if (clock.p2 == 7) 3984 if (clock.p2 == 7)
3939 lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; 3985 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
3940 else 3986 else
3941 lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); 3987 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
3942 3988
3943 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 3989 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
3944 * appropriately here, but we need to look more thoroughly into how 3990 * appropriately here, but we need to look more thoroughly into how
3945 * panels behave in the two modes. 3991 * panels behave in the two modes.
3946 */ 3992 */
3947 /* set the dithering flag */ 3993 /* set the dithering flag on non-PCH LVDS as needed */
3948 if (IS_I965G(dev)) { 3994 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
3949 if (dev_priv->lvds_dither) { 3995 if (dev_priv->lvds_dither)
3950 if (HAS_PCH_SPLIT(dev)) { 3996 temp |= LVDS_ENABLE_DITHER;
3951 pipeconf |= PIPE_ENABLE_DITHER; 3997 else
3952 pipeconf |= PIPE_DITHER_TYPE_ST01; 3998 temp &= ~LVDS_ENABLE_DITHER;
3953 } else
3954 lvds |= LVDS_ENABLE_DITHER;
3955 } else {
3956 if (!HAS_PCH_SPLIT(dev)) {
3957 lvds &= ~LVDS_ENABLE_DITHER;
3958 }
3959 }
3960 } 3999 }
3961 I915_WRITE(lvds_reg, lvds); 4000 I915_WRITE(reg, temp);
3962 I915_READ(lvds_reg);
3963 } 4001 }
4002
4003 /* set the dithering flag and clear for anything other than a panel. */
4004 if (HAS_PCH_SPLIT(dev)) {
4005 pipeconf &= ~PIPECONF_DITHER_EN;
4006 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
4007 if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) {
4008 pipeconf |= PIPECONF_DITHER_EN;
4009 pipeconf |= PIPECONF_DITHER_TYPE_ST1;
4010 }
4011 }
4012
3964 if (is_dp) 4013 if (is_dp)
3965 intel_dp_set_m_n(crtc, mode, adjusted_mode); 4014 intel_dp_set_m_n(crtc, mode, adjusted_mode);
3966 else if (HAS_PCH_SPLIT(dev)) { 4015 else if (HAS_PCH_SPLIT(dev)) {
@@ -3981,26 +4030,32 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3981 if (!has_edp_encoder) { 4030 if (!has_edp_encoder) {
3982 I915_WRITE(fp_reg, fp); 4031 I915_WRITE(fp_reg, fp);
3983 I915_WRITE(dpll_reg, dpll); 4032 I915_WRITE(dpll_reg, dpll);
3984 I915_READ(dpll_reg); 4033
3985 /* Wait for the clocks to stabilize. */ 4034 /* Wait for the clocks to stabilize. */
4035 POSTING_READ(dpll_reg);
3986 udelay(150); 4036 udelay(150);
3987 4037
3988 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { 4038 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
4039 temp = 0;
3989 if (is_sdvo) { 4040 if (is_sdvo) {
3990 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 4041 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
3991 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | 4042 if (temp > 1)
3992 ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); 4043 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
3993 } else 4044 else
3994 I915_WRITE(dpll_md_reg, 0); 4045 temp = 0;
4046 }
4047 I915_WRITE(DPLL_MD(pipe), temp);
3995 } else { 4048 } else {
3996 /* write it again -- the BIOS does, after all */ 4049 /* write it again -- the BIOS does, after all */
3997 I915_WRITE(dpll_reg, dpll); 4050 I915_WRITE(dpll_reg, dpll);
3998 } 4051 }
3999 I915_READ(dpll_reg); 4052
4000 /* Wait for the clocks to stabilize. */ 4053 /* Wait for the clocks to stabilize. */
4054 POSTING_READ(dpll_reg);
4001 udelay(150); 4055 udelay(150);
4002 } 4056 }
4003 4057
4058 intel_crtc->lowfreq_avail = false;
4004 if (is_lvds && has_reduced_clock && i915_powersave) { 4059 if (is_lvds && has_reduced_clock && i915_powersave) {
4005 I915_WRITE(fp_reg + 4, fp2); 4060 I915_WRITE(fp_reg + 4, fp2);
4006 intel_crtc->lowfreq_avail = true; 4061 intel_crtc->lowfreq_avail = true;
@@ -4010,7 +4065,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4010 } 4065 }
4011 } else { 4066 } else {
4012 I915_WRITE(fp_reg + 4, fp); 4067 I915_WRITE(fp_reg + 4, fp);
4013 intel_crtc->lowfreq_avail = false;
4014 if (HAS_PIPE_CXSR(dev)) { 4068 if (HAS_PIPE_CXSR(dev)) {
4015 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 4069 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4016 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 4070 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
@@ -4029,58 +4083,72 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4029 } else 4083 } else
4030 pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ 4084 pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
4031 4085
4032 I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | 4086 I915_WRITE(HTOTAL(pipe),
4087 (adjusted_mode->crtc_hdisplay - 1) |
4033 ((adjusted_mode->crtc_htotal - 1) << 16)); 4088 ((adjusted_mode->crtc_htotal - 1) << 16));
4034 I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | 4089 I915_WRITE(HBLANK(pipe),
4090 (adjusted_mode->crtc_hblank_start - 1) |
4035 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 4091 ((adjusted_mode->crtc_hblank_end - 1) << 16));
4036 I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | 4092 I915_WRITE(HSYNC(pipe),
4093 (adjusted_mode->crtc_hsync_start - 1) |
4037 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 4094 ((adjusted_mode->crtc_hsync_end - 1) << 16));
4038 I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | 4095
4096 I915_WRITE(VTOTAL(pipe),
4097 (adjusted_mode->crtc_vdisplay - 1) |
4039 ((adjusted_mode->crtc_vtotal - 1) << 16)); 4098 ((adjusted_mode->crtc_vtotal - 1) << 16));
4040 I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | 4099 I915_WRITE(VBLANK(pipe),
4100 (adjusted_mode->crtc_vblank_start - 1) |
4041 ((adjusted_mode->crtc_vblank_end - 1) << 16)); 4101 ((adjusted_mode->crtc_vblank_end - 1) << 16));
4042 I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | 4102 I915_WRITE(VSYNC(pipe),
4103 (adjusted_mode->crtc_vsync_start - 1) |
4043 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 4104 ((adjusted_mode->crtc_vsync_end - 1) << 16));
4044 /* pipesrc and dspsize control the size that is scaled from, which should 4105
4045 * always be the user's requested size. 4106 /* pipesrc and dspsize control the size that is scaled from,
4107 * which should always be the user's requested size.
4046 */ 4108 */
4047 if (!HAS_PCH_SPLIT(dev)) { 4109 if (!HAS_PCH_SPLIT(dev)) {
4048 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | 4110 I915_WRITE(DSPSIZE(plane),
4049 (mode->hdisplay - 1)); 4111 ((mode->vdisplay - 1) << 16) |
4050 I915_WRITE(dsppos_reg, 0); 4112 (mode->hdisplay - 1));
4113 I915_WRITE(DSPPOS(plane), 0);
4051 } 4114 }
4052 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 4115 I915_WRITE(PIPESRC(pipe),
4116 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4053 4117
4054 if (HAS_PCH_SPLIT(dev)) { 4118 if (HAS_PCH_SPLIT(dev)) {
4055 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); 4119 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
4056 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); 4120 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
4057 I915_WRITE(link_m1_reg, m_n.link_m); 4121 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
4058 I915_WRITE(link_n1_reg, m_n.link_n); 4122 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
4059 4123
4060 if (has_edp_encoder) { 4124 if (has_edp_encoder) {
4061 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 4125 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
4062 } else { 4126 } else {
4063 /* enable FDI RX PLL too */ 4127 /* enable FDI RX PLL too */
4064 temp = I915_READ(fdi_rx_reg); 4128 reg = FDI_RX_CTL(pipe);
4065 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); 4129 temp = I915_READ(reg);
4066 I915_READ(fdi_rx_reg); 4130 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4131
4132 POSTING_READ(reg);
4067 udelay(200); 4133 udelay(200);
4068 4134
4069 /* enable FDI TX PLL too */ 4135 /* enable FDI TX PLL too */
4070 temp = I915_READ(fdi_tx_reg); 4136 reg = FDI_TX_CTL(pipe);
4071 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); 4137 temp = I915_READ(reg);
4072 I915_READ(fdi_tx_reg); 4138 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4073 4139
4074 /* enable FDI RX PCDCLK */ 4140 /* enable FDI RX PCDCLK */
4075 temp = I915_READ(fdi_rx_reg); 4141 reg = FDI_RX_CTL(pipe);
4076 I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); 4142 temp = I915_READ(reg);
4077 I915_READ(fdi_rx_reg); 4143 I915_WRITE(reg, temp | FDI_PCDCLK);
4144
4145 POSTING_READ(reg);
4078 udelay(200); 4146 udelay(200);
4079 } 4147 }
4080 } 4148 }
4081 4149
4082 I915_WRITE(pipeconf_reg, pipeconf); 4150 I915_WRITE(PIPECONF(pipe), pipeconf);
4083 I915_READ(pipeconf_reg); 4151 POSTING_READ(PIPECONF(pipe));
4084 4152
4085 intel_wait_for_vblank(dev, pipe); 4153 intel_wait_for_vblank(dev, pipe);
4086 4154
@@ -4090,9 +4158,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4090 I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); 4158 I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
4091 } 4159 }
4092 4160
4093 I915_WRITE(dspcntr_reg, dspcntr); 4161 I915_WRITE(DSPCNTR(plane), dspcntr);
4094 4162
4095 /* Flush the plane changes */
4096 ret = intel_pipe_set_base(crtc, x, y, old_fb); 4163 ret = intel_pipe_set_base(crtc, x, y, old_fb);
4097 4164
4098 intel_update_watermarks(dev); 4165 intel_update_watermarks(dev);
@@ -4185,7 +4252,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
4185} 4252}
4186 4253
4187/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 4254/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
4188static void intel_crtc_update_cursor(struct drm_crtc *crtc) 4255static void intel_crtc_update_cursor(struct drm_crtc *crtc,
4256 bool on)
4189{ 4257{
4190 struct drm_device *dev = crtc->dev; 4258 struct drm_device *dev = crtc->dev;
4191 struct drm_i915_private *dev_priv = dev->dev_private; 4259 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4198,7 +4266,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
4198 4266
4199 pos = 0; 4267 pos = 0;
4200 4268
4201 if (intel_crtc->cursor_on && crtc->fb) { 4269 if (on && crtc->enabled && crtc->fb) {
4202 base = intel_crtc->cursor_addr; 4270 base = intel_crtc->cursor_addr;
4203 if (x > (int) crtc->fb->width) 4271 if (x > (int) crtc->fb->width)
4204 base = 0; 4272 base = 0;
@@ -4310,7 +4378,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4310 addr = obj_priv->phys_obj->handle->busaddr; 4378 addr = obj_priv->phys_obj->handle->busaddr;
4311 } 4379 }
4312 4380
4313 if (!IS_I9XX(dev)) 4381 if (IS_GEN2(dev))
4314 I915_WRITE(CURSIZE, (height << 12) | width); 4382 I915_WRITE(CURSIZE, (height << 12) | width);
4315 4383
4316 finish: 4384 finish:
@@ -4330,7 +4398,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4330 intel_crtc->cursor_width = width; 4398 intel_crtc->cursor_width = width;
4331 intel_crtc->cursor_height = height; 4399 intel_crtc->cursor_height = height;
4332 4400
4333 intel_crtc_update_cursor(crtc); 4401 intel_crtc_update_cursor(crtc, true);
4334 4402
4335 return 0; 4403 return 0;
4336fail_unpin: 4404fail_unpin:
@@ -4349,7 +4417,7 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
4349 intel_crtc->cursor_x = x; 4417 intel_crtc->cursor_x = x;
4350 intel_crtc->cursor_y = y; 4418 intel_crtc->cursor_y = y;
4351 4419
4352 intel_crtc_update_cursor(crtc); 4420 intel_crtc_update_cursor(crtc, true);
4353 4421
4354 return 0; 4422 return 0;
4355} 4423}
@@ -4418,7 +4486,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
4418 struct intel_crtc *intel_crtc; 4486 struct intel_crtc *intel_crtc;
4419 struct drm_crtc *possible_crtc; 4487 struct drm_crtc *possible_crtc;
4420 struct drm_crtc *supported_crtc =NULL; 4488 struct drm_crtc *supported_crtc =NULL;
4421 struct drm_encoder *encoder = &intel_encoder->enc; 4489 struct drm_encoder *encoder = &intel_encoder->base;
4422 struct drm_crtc *crtc = NULL; 4490 struct drm_crtc *crtc = NULL;
4423 struct drm_device *dev = encoder->dev; 4491 struct drm_device *dev = encoder->dev;
4424 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 4492 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -4499,7 +4567,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
4499void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, 4567void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
4500 struct drm_connector *connector, int dpms_mode) 4568 struct drm_connector *connector, int dpms_mode)
4501{ 4569{
4502 struct drm_encoder *encoder = &intel_encoder->enc; 4570 struct drm_encoder *encoder = &intel_encoder->base;
4503 struct drm_device *dev = encoder->dev; 4571 struct drm_device *dev = encoder->dev;
4504 struct drm_crtc *crtc = encoder->crtc; 4572 struct drm_crtc *crtc = encoder->crtc;
4505 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 4573 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -4545,7 +4613,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
4545 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 4613 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
4546 } 4614 }
4547 4615
4548 if (IS_I9XX(dev)) { 4616 if (!IS_GEN2(dev)) {
4549 if (IS_PINEVIEW(dev)) 4617 if (IS_PINEVIEW(dev))
4550 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 4618 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
4551 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 4619 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
@@ -4649,8 +4717,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
4649 struct drm_device *dev = (struct drm_device *)arg; 4717 struct drm_device *dev = (struct drm_device *)arg;
4650 drm_i915_private_t *dev_priv = dev->dev_private; 4718 drm_i915_private_t *dev_priv = dev->dev_private;
4651 4719
4652 DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
4653
4654 dev_priv->busy = false; 4720 dev_priv->busy = false;
4655 4721
4656 queue_work(dev_priv->wq, &dev_priv->idle_work); 4722 queue_work(dev_priv->wq, &dev_priv->idle_work);
@@ -4664,14 +4730,12 @@ static void intel_crtc_idle_timer(unsigned long arg)
4664 struct drm_crtc *crtc = &intel_crtc->base; 4730 struct drm_crtc *crtc = &intel_crtc->base;
4665 drm_i915_private_t *dev_priv = crtc->dev->dev_private; 4731 drm_i915_private_t *dev_priv = crtc->dev->dev_private;
4666 4732
4667 DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
4668
4669 intel_crtc->busy = false; 4733 intel_crtc->busy = false;
4670 4734
4671 queue_work(dev_priv->wq, &dev_priv->idle_work); 4735 queue_work(dev_priv->wq, &dev_priv->idle_work);
4672} 4736}
4673 4737
4674static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) 4738static void intel_increase_pllclock(struct drm_crtc *crtc)
4675{ 4739{
4676 struct drm_device *dev = crtc->dev; 4740 struct drm_device *dev = crtc->dev;
4677 drm_i915_private_t *dev_priv = dev->dev_private; 4741 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4706,9 +4770,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
4706 } 4770 }
4707 4771
4708 /* Schedule downclock */ 4772 /* Schedule downclock */
4709 if (schedule) 4773 mod_timer(&intel_crtc->idle_timer, jiffies +
4710 mod_timer(&intel_crtc->idle_timer, jiffies + 4774 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
4711 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
4712} 4775}
4713 4776
4714static void intel_decrease_pllclock(struct drm_crtc *crtc) 4777static void intel_decrease_pllclock(struct drm_crtc *crtc)
@@ -4844,7 +4907,7 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4844 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); 4907 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
4845 } 4908 }
4846 /* Non-busy -> busy, upclock */ 4909 /* Non-busy -> busy, upclock */
4847 intel_increase_pllclock(crtc, true); 4910 intel_increase_pllclock(crtc);
4848 intel_crtc->busy = true; 4911 intel_crtc->busy = true;
4849 } else { 4912 } else {
4850 /* Busy -> busy, put off timer */ 4913 /* Busy -> busy, put off timer */
@@ -4858,8 +4921,22 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4858static void intel_crtc_destroy(struct drm_crtc *crtc) 4921static void intel_crtc_destroy(struct drm_crtc *crtc)
4859{ 4922{
4860 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4923 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4924 struct drm_device *dev = crtc->dev;
4925 struct intel_unpin_work *work;
4926 unsigned long flags;
4927
4928 spin_lock_irqsave(&dev->event_lock, flags);
4929 work = intel_crtc->unpin_work;
4930 intel_crtc->unpin_work = NULL;
4931 spin_unlock_irqrestore(&dev->event_lock, flags);
4932
4933 if (work) {
4934 cancel_work_sync(&work->work);
4935 kfree(work);
4936 }
4861 4937
4862 drm_crtc_cleanup(crtc); 4938 drm_crtc_cleanup(crtc);
4939
4863 kfree(intel_crtc); 4940 kfree(intel_crtc);
4864} 4941}
4865 4942
@@ -4919,7 +4996,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
4919 /* Initial scanout buffer will have a 0 pending flip count */ 4996 /* Initial scanout buffer will have a 0 pending flip count */
4920 if ((atomic_read(&obj_priv->pending_flip) == 0) || 4997 if ((atomic_read(&obj_priv->pending_flip) == 0) ||
4921 atomic_dec_and_test(&obj_priv->pending_flip)) 4998 atomic_dec_and_test(&obj_priv->pending_flip))
4922 DRM_WAKEUP(&dev_priv->pending_flip_queue); 4999 wake_up(&dev_priv->pending_flip_queue);
4923 schedule_work(&work->work); 5000 schedule_work(&work->work);
4924 5001
4925 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); 5002 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
@@ -5000,7 +5077,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5000 obj = intel_fb->obj; 5077 obj = intel_fb->obj;
5001 5078
5002 mutex_lock(&dev->struct_mutex); 5079 mutex_lock(&dev->struct_mutex);
5003 ret = intel_pin_and_fence_fb_obj(dev, obj); 5080 ret = intel_pin_and_fence_fb_obj(dev, obj, true);
5004 if (ret) 5081 if (ret)
5005 goto cleanup_work; 5082 goto cleanup_work;
5006 5083
@@ -5009,9 +5086,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5009 drm_gem_object_reference(obj); 5086 drm_gem_object_reference(obj);
5010 5087
5011 crtc->fb = fb; 5088 crtc->fb = fb;
5012 ret = i915_gem_object_flush_write_domain(obj);
5013 if (ret)
5014 goto cleanup_objs;
5015 5089
5016 ret = drm_vblank_get(dev, intel_crtc->pipe); 5090 ret = drm_vblank_get(dev, intel_crtc->pipe);
5017 if (ret) 5091 if (ret)
@@ -5024,14 +5098,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5024 if (IS_GEN3(dev) || IS_GEN2(dev)) { 5098 if (IS_GEN3(dev) || IS_GEN2(dev)) {
5025 u32 flip_mask; 5099 u32 flip_mask;
5026 5100
5101 /* Can't queue multiple flips, so wait for the previous
5102 * one to finish before executing the next.
5103 */
5104 BEGIN_LP_RING(2);
5027 if (intel_crtc->plane) 5105 if (intel_crtc->plane)
5028 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 5106 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5029 else 5107 else
5030 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 5108 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5031
5032 BEGIN_LP_RING(2);
5033 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); 5109 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
5034 OUT_RING(0); 5110 OUT_RING(MI_NOOP);
5035 ADVANCE_LP_RING(); 5111 ADVANCE_LP_RING();
5036 } 5112 }
5037 5113
@@ -5112,15 +5188,14 @@ cleanup_work:
5112 return ret; 5188 return ret;
5113} 5189}
5114 5190
5115static const struct drm_crtc_helper_funcs intel_helper_funcs = { 5191static struct drm_crtc_helper_funcs intel_helper_funcs = {
5116 .dpms = intel_crtc_dpms, 5192 .dpms = intel_crtc_dpms,
5117 .mode_fixup = intel_crtc_mode_fixup, 5193 .mode_fixup = intel_crtc_mode_fixup,
5118 .mode_set = intel_crtc_mode_set, 5194 .mode_set = intel_crtc_mode_set,
5119 .mode_set_base = intel_pipe_set_base, 5195 .mode_set_base = intel_pipe_set_base,
5120 .mode_set_base_atomic = intel_pipe_set_base_atomic, 5196 .mode_set_base_atomic = intel_pipe_set_base_atomic,
5121 .prepare = intel_crtc_prepare,
5122 .commit = intel_crtc_commit,
5123 .load_lut = intel_crtc_load_lut, 5197 .load_lut = intel_crtc_load_lut,
5198 .disable = intel_crtc_disable,
5124}; 5199};
5125 5200
5126static const struct drm_crtc_funcs intel_crtc_funcs = { 5201static const struct drm_crtc_funcs intel_crtc_funcs = {
@@ -5146,8 +5221,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5146 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); 5221 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
5147 5222
5148 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 5223 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
5149 intel_crtc->pipe = pipe;
5150 intel_crtc->plane = pipe;
5151 for (i = 0; i < 256; i++) { 5224 for (i = 0; i < 256; i++) {
5152 intel_crtc->lut_r[i] = i; 5225 intel_crtc->lut_r[i] = i;
5153 intel_crtc->lut_g[i] = i; 5226 intel_crtc->lut_g[i] = i;
@@ -5157,9 +5230,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5157 /* Swap pipes & planes for FBC on pre-965 */ 5230 /* Swap pipes & planes for FBC on pre-965 */
5158 intel_crtc->pipe = pipe; 5231 intel_crtc->pipe = pipe;
5159 intel_crtc->plane = pipe; 5232 intel_crtc->plane = pipe;
5160 if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) { 5233 if (IS_MOBILE(dev) && IS_GEN3(dev)) {
5161 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 5234 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
5162 intel_crtc->plane = ((pipe == 0) ? 1 : 0); 5235 intel_crtc->plane = !pipe;
5163 } 5236 }
5164 5237
5165 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 5238 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
@@ -5169,6 +5242,16 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5169 5242
5170 intel_crtc->cursor_addr = 0; 5243 intel_crtc->cursor_addr = 0;
5171 intel_crtc->dpms_mode = -1; 5244 intel_crtc->dpms_mode = -1;
5245 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5246
5247 if (HAS_PCH_SPLIT(dev)) {
5248 intel_helper_funcs.prepare = ironlake_crtc_prepare;
5249 intel_helper_funcs.commit = ironlake_crtc_commit;
5250 } else {
5251 intel_helper_funcs.prepare = i9xx_crtc_prepare;
5252 intel_helper_funcs.commit = i9xx_crtc_commit;
5253 }
5254
5172 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 5255 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
5173 5256
5174 intel_crtc->busy = false; 5257 intel_crtc->busy = false;
@@ -5204,38 +5287,25 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
5204 return 0; 5287 return 0;
5205} 5288}
5206 5289
5207struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
5208{
5209 struct drm_crtc *crtc = NULL;
5210
5211 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5212 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5213 if (intel_crtc->pipe == pipe)
5214 break;
5215 }
5216 return crtc;
5217}
5218
5219static int intel_encoder_clones(struct drm_device *dev, int type_mask) 5290static int intel_encoder_clones(struct drm_device *dev, int type_mask)
5220{ 5291{
5292 struct intel_encoder *encoder;
5221 int index_mask = 0; 5293 int index_mask = 0;
5222 struct drm_encoder *encoder;
5223 int entry = 0; 5294 int entry = 0;
5224 5295
5225 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 5296 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
5226 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 5297 if (type_mask & encoder->clone_mask)
5227 if (type_mask & intel_encoder->clone_mask)
5228 index_mask |= (1 << entry); 5298 index_mask |= (1 << entry);
5229 entry++; 5299 entry++;
5230 } 5300 }
5301
5231 return index_mask; 5302 return index_mask;
5232} 5303}
5233 5304
5234
5235static void intel_setup_outputs(struct drm_device *dev) 5305static void intel_setup_outputs(struct drm_device *dev)
5236{ 5306{
5237 struct drm_i915_private *dev_priv = dev->dev_private; 5307 struct drm_i915_private *dev_priv = dev->dev_private;
5238 struct drm_encoder *encoder; 5308 struct intel_encoder *encoder;
5239 bool dpd_is_edp = false; 5309 bool dpd_is_edp = false;
5240 5310
5241 if (IS_MOBILE(dev) && !IS_I830(dev)) 5311 if (IS_MOBILE(dev) && !IS_I830(dev))
@@ -5324,12 +5394,10 @@ static void intel_setup_outputs(struct drm_device *dev)
5324 if (SUPPORTS_TV(dev)) 5394 if (SUPPORTS_TV(dev))
5325 intel_tv_init(dev); 5395 intel_tv_init(dev);
5326 5396
5327 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 5397 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
5328 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 5398 encoder->base.possible_crtcs = encoder->crtc_mask;
5329 5399 encoder->base.possible_clones =
5330 encoder->possible_crtcs = intel_encoder->crtc_mask; 5400 intel_encoder_clones(dev, encoder->clone_mask);
5331 encoder->possible_clones = intel_encoder_clones(dev,
5332 intel_encoder->clone_mask);
5333 } 5401 }
5334} 5402}
5335 5403
@@ -5363,8 +5431,25 @@ int intel_framebuffer_init(struct drm_device *dev,
5363 struct drm_mode_fb_cmd *mode_cmd, 5431 struct drm_mode_fb_cmd *mode_cmd,
5364 struct drm_gem_object *obj) 5432 struct drm_gem_object *obj)
5365{ 5433{
5434 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
5366 int ret; 5435 int ret;
5367 5436
5437 if (obj_priv->tiling_mode == I915_TILING_Y)
5438 return -EINVAL;
5439
5440 if (mode_cmd->pitch & 63)
5441 return -EINVAL;
5442
5443 switch (mode_cmd->bpp) {
5444 case 8:
5445 case 16:
5446 case 24:
5447 case 32:
5448 break;
5449 default:
5450 return -EINVAL;
5451 }
5452
5368 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 5453 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
5369 if (ret) { 5454 if (ret) {
5370 DRM_ERROR("framebuffer init failed %d\n", ret); 5455 DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -5473,6 +5558,10 @@ void ironlake_enable_drps(struct drm_device *dev)
5473 u32 rgvmodectl = I915_READ(MEMMODECTL); 5558 u32 rgvmodectl = I915_READ(MEMMODECTL);
5474 u8 fmax, fmin, fstart, vstart; 5559 u8 fmax, fmin, fstart, vstart;
5475 5560
5561 /* Enable temp reporting */
5562 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
5563 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
5564
5476 /* 100ms RC evaluation intervals */ 5565 /* 100ms RC evaluation intervals */
5477 I915_WRITE(RCUPEI, 100000); 5566 I915_WRITE(RCUPEI, 100000);
5478 I915_WRITE(RCDNEI, 100000); 5567 I915_WRITE(RCDNEI, 100000);
@@ -5515,7 +5604,7 @@ void ironlake_enable_drps(struct drm_device *dev)
5515 rgvmodectl |= MEMMODE_SWMODE_EN; 5604 rgvmodectl |= MEMMODE_SWMODE_EN;
5516 I915_WRITE(MEMMODECTL, rgvmodectl); 5605 I915_WRITE(MEMMODECTL, rgvmodectl);
5517 5606
5518 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0)) 5607 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
5519 DRM_ERROR("stuck trying to change perf mode\n"); 5608 DRM_ERROR("stuck trying to change perf mode\n");
5520 msleep(1); 5609 msleep(1);
5521 5610
@@ -5714,20 +5803,20 @@ void intel_init_clock_gating(struct drm_device *dev)
5714 if (IS_GM45(dev)) 5803 if (IS_GM45(dev))
5715 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; 5804 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
5716 I915_WRITE(DSPCLK_GATE_D, dspclk_gate); 5805 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
5717 } else if (IS_I965GM(dev)) { 5806 } else if (IS_CRESTLINE(dev)) {
5718 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); 5807 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
5719 I915_WRITE(RENCLK_GATE_D2, 0); 5808 I915_WRITE(RENCLK_GATE_D2, 0);
5720 I915_WRITE(DSPCLK_GATE_D, 0); 5809 I915_WRITE(DSPCLK_GATE_D, 0);
5721 I915_WRITE(RAMCLK_GATE_D, 0); 5810 I915_WRITE(RAMCLK_GATE_D, 0);
5722 I915_WRITE16(DEUC, 0); 5811 I915_WRITE16(DEUC, 0);
5723 } else if (IS_I965G(dev)) { 5812 } else if (IS_BROADWATER(dev)) {
5724 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 5813 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
5725 I965_RCC_CLOCK_GATE_DISABLE | 5814 I965_RCC_CLOCK_GATE_DISABLE |
5726 I965_RCPB_CLOCK_GATE_DISABLE | 5815 I965_RCPB_CLOCK_GATE_DISABLE |
5727 I965_ISC_CLOCK_GATE_DISABLE | 5816 I965_ISC_CLOCK_GATE_DISABLE |
5728 I965_FBC_CLOCK_GATE_DISABLE); 5817 I965_FBC_CLOCK_GATE_DISABLE);
5729 I915_WRITE(RENCLK_GATE_D2, 0); 5818 I915_WRITE(RENCLK_GATE_D2, 0);
5730 } else if (IS_I9XX(dev)) { 5819 } else if (IS_GEN3(dev)) {
5731 u32 dstate = I915_READ(D_STATE); 5820 u32 dstate = I915_READ(D_STATE);
5732 5821
5733 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 5822 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
@@ -5809,7 +5898,7 @@ static void intel_init_display(struct drm_device *dev)
5809 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 5898 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
5810 dev_priv->display.enable_fbc = g4x_enable_fbc; 5899 dev_priv->display.enable_fbc = g4x_enable_fbc;
5811 dev_priv->display.disable_fbc = g4x_disable_fbc; 5900 dev_priv->display.disable_fbc = g4x_disable_fbc;
5812 } else if (IS_I965GM(dev)) { 5901 } else if (IS_CRESTLINE(dev)) {
5813 dev_priv->display.fbc_enabled = i8xx_fbc_enabled; 5902 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
5814 dev_priv->display.enable_fbc = i8xx_enable_fbc; 5903 dev_priv->display.enable_fbc = i8xx_enable_fbc;
5815 dev_priv->display.disable_fbc = i8xx_disable_fbc; 5904 dev_priv->display.disable_fbc = i8xx_disable_fbc;
@@ -5869,9 +5958,9 @@ static void intel_init_display(struct drm_device *dev)
5869 dev_priv->display.update_wm = pineview_update_wm; 5958 dev_priv->display.update_wm = pineview_update_wm;
5870 } else if (IS_G4X(dev)) 5959 } else if (IS_G4X(dev))
5871 dev_priv->display.update_wm = g4x_update_wm; 5960 dev_priv->display.update_wm = g4x_update_wm;
5872 else if (IS_I965G(dev)) 5961 else if (IS_GEN4(dev))
5873 dev_priv->display.update_wm = i965_update_wm; 5962 dev_priv->display.update_wm = i965_update_wm;
5874 else if (IS_I9XX(dev)) { 5963 else if (IS_GEN3(dev)) {
5875 dev_priv->display.update_wm = i9xx_update_wm; 5964 dev_priv->display.update_wm = i9xx_update_wm;
5876 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 5965 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
5877 } else if (IS_I85X(dev)) { 5966 } else if (IS_I85X(dev)) {
@@ -5985,24 +6074,24 @@ void intel_modeset_init(struct drm_device *dev)
5985 6074
5986 intel_init_display(dev); 6075 intel_init_display(dev);
5987 6076
5988 if (IS_I965G(dev)) { 6077 if (IS_GEN2(dev)) {
5989 dev->mode_config.max_width = 8192; 6078 dev->mode_config.max_width = 2048;
5990 dev->mode_config.max_height = 8192; 6079 dev->mode_config.max_height = 2048;
5991 } else if (IS_I9XX(dev)) { 6080 } else if (IS_GEN3(dev)) {
5992 dev->mode_config.max_width = 4096; 6081 dev->mode_config.max_width = 4096;
5993 dev->mode_config.max_height = 4096; 6082 dev->mode_config.max_height = 4096;
5994 } else { 6083 } else {
5995 dev->mode_config.max_width = 2048; 6084 dev->mode_config.max_width = 8192;
5996 dev->mode_config.max_height = 2048; 6085 dev->mode_config.max_height = 8192;
5997 } 6086 }
5998 6087
5999 /* set memory base */ 6088 /* set memory base */
6000 if (IS_I9XX(dev)) 6089 if (IS_GEN2(dev))
6001 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
6002 else
6003 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); 6090 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
6091 else
6092 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
6004 6093
6005 if (IS_MOBILE(dev) || IS_I9XX(dev)) 6094 if (IS_MOBILE(dev) || !IS_GEN2(dev))
6006 dev_priv->num_pipe = 2; 6095 dev_priv->num_pipe = 2;
6007 else 6096 else
6008 dev_priv->num_pipe = 1; 6097 dev_priv->num_pipe = 1;
@@ -6038,10 +6127,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
6038 struct drm_crtc *crtc; 6127 struct drm_crtc *crtc;
6039 struct intel_crtc *intel_crtc; 6128 struct intel_crtc *intel_crtc;
6040 6129
6041 mutex_lock(&dev->struct_mutex);
6042
6043 drm_kms_helper_poll_fini(dev); 6130 drm_kms_helper_poll_fini(dev);
6044 intel_fbdev_fini(dev); 6131 mutex_lock(&dev->struct_mutex);
6045 6132
6046 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 6133 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6047 /* Skip inactive CRTCs */ 6134 /* Skip inactive CRTCs */
@@ -6049,12 +6136,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
6049 continue; 6136 continue;
6050 6137
6051 intel_crtc = to_intel_crtc(crtc); 6138 intel_crtc = to_intel_crtc(crtc);
6052 intel_increase_pllclock(crtc, false); 6139 intel_increase_pllclock(crtc);
6053 del_timer_sync(&intel_crtc->idle_timer);
6054 } 6140 }
6055 6141
6056 del_timer_sync(&dev_priv->idle_timer);
6057
6058 if (dev_priv->display.disable_fbc) 6142 if (dev_priv->display.disable_fbc)
6059 dev_priv->display.disable_fbc(dev); 6143 dev_priv->display.disable_fbc(dev);
6060 6144
@@ -6083,33 +6167,36 @@ void intel_modeset_cleanup(struct drm_device *dev)
6083 6167
6084 mutex_unlock(&dev->struct_mutex); 6168 mutex_unlock(&dev->struct_mutex);
6085 6169
6170 /* Disable the irq before mode object teardown, for the irq might
6171 * enqueue unpin/hotplug work. */
6172 drm_irq_uninstall(dev);
6173 cancel_work_sync(&dev_priv->hotplug_work);
6174
6175 /* Shut off idle work before the crtcs get freed. */
6176 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6177 intel_crtc = to_intel_crtc(crtc);
6178 del_timer_sync(&intel_crtc->idle_timer);
6179 }
6180 del_timer_sync(&dev_priv->idle_timer);
6181 cancel_work_sync(&dev_priv->idle_work);
6182
6086 drm_mode_config_cleanup(dev); 6183 drm_mode_config_cleanup(dev);
6087} 6184}
6088 6185
6089
6090/* 6186/*
6091 * Return which encoder is currently attached for connector. 6187 * Return which encoder is currently attached for connector.
6092 */ 6188 */
6093struct drm_encoder *intel_attached_encoder (struct drm_connector *connector) 6189struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
6094{ 6190{
6095 struct drm_mode_object *obj; 6191 return &intel_attached_encoder(connector)->base;
6096 struct drm_encoder *encoder; 6192}
6097 int i;
6098
6099 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
6100 if (connector->encoder_ids[i] == 0)
6101 break;
6102
6103 obj = drm_mode_object_find(connector->dev,
6104 connector->encoder_ids[i],
6105 DRM_MODE_OBJECT_ENCODER);
6106 if (!obj)
6107 continue;
6108 6193
6109 encoder = obj_to_encoder(obj); 6194void intel_connector_attach_encoder(struct intel_connector *connector,
6110 return encoder; 6195 struct intel_encoder *encoder)
6111 } 6196{
6112 return NULL; 6197 connector->encoder = encoder;
6198 drm_mode_connector_attach_encoder(&connector->base,
6199 &encoder->base);
6113} 6200}
6114 6201
6115/* 6202/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1a51ee07de3e..152d94507b79 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -58,14 +58,23 @@ struct intel_dp {
58 struct i2c_adapter adapter; 58 struct i2c_adapter adapter;
59 struct i2c_algo_dp_aux_data algo; 59 struct i2c_algo_dp_aux_data algo;
60 bool is_pch_edp; 60 bool is_pch_edp;
61 uint8_t train_set[4];
62 uint8_t link_status[DP_LINK_STATUS_SIZE];
61}; 63};
62 64
63static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) 65static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
64{ 66{
65 return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base); 67 return container_of(encoder, struct intel_dp, base.base);
68}
69
70static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
71{
72 return container_of(intel_attached_encoder(connector),
73 struct intel_dp, base);
66} 74}
67 75
68static void intel_dp_link_train(struct intel_dp *intel_dp); 76static void intel_dp_start_link_train(struct intel_dp *intel_dp);
77static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
69static void intel_dp_link_down(struct intel_dp *intel_dp); 78static void intel_dp_link_down(struct intel_dp *intel_dp);
70 79
71void 80void
@@ -130,7 +139,7 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi
130 struct drm_i915_private *dev_priv = dev->dev_private; 139 struct drm_i915_private *dev_priv = dev->dev_private;
131 140
132 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) 141 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
133 return (pixel_clock * dev_priv->edp_bpp) / 8; 142 return (pixel_clock * dev_priv->edp.bpp + 7) / 8;
134 else 143 else
135 return pixel_clock * 3; 144 return pixel_clock * 3;
136} 145}
@@ -145,8 +154,7 @@ static int
145intel_dp_mode_valid(struct drm_connector *connector, 154intel_dp_mode_valid(struct drm_connector *connector,
146 struct drm_display_mode *mode) 155 struct drm_display_mode *mode)
147{ 156{
148 struct drm_encoder *encoder = intel_attached_encoder(connector); 157 struct intel_dp *intel_dp = intel_attached_dp(connector);
149 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
150 struct drm_device *dev = connector->dev; 158 struct drm_device *dev = connector->dev;
151 struct drm_i915_private *dev_priv = dev->dev_private; 159 struct drm_i915_private *dev_priv = dev->dev_private;
152 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); 160 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
@@ -233,7 +241,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
233 uint8_t *recv, int recv_size) 241 uint8_t *recv, int recv_size)
234{ 242{
235 uint32_t output_reg = intel_dp->output_reg; 243 uint32_t output_reg = intel_dp->output_reg;
236 struct drm_device *dev = intel_dp->base.enc.dev; 244 struct drm_device *dev = intel_dp->base.base.dev;
237 struct drm_i915_private *dev_priv = dev->dev_private; 245 struct drm_i915_private *dev_priv = dev->dev_private;
238 uint32_t ch_ctl = output_reg + 0x10; 246 uint32_t ch_ctl = output_reg + 0x10;
239 uint32_t ch_data = ch_ctl + 4; 247 uint32_t ch_data = ch_ctl + 4;
@@ -246,8 +254,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
246 /* The clock divider is based off the hrawclk, 254 /* The clock divider is based off the hrawclk,
247 * and would like to run at 2MHz. So, take the 255 * and would like to run at 2MHz. So, take the
248 * hrawclk value and divide by 2 and use that 256 * hrawclk value and divide by 2 and use that
257 *
258 * Note that PCH attached eDP panels should use a 125MHz input
259 * clock divider.
249 */ 260 */
250 if (IS_eDP(intel_dp)) { 261 if (IS_eDP(intel_dp) && !IS_PCH_eDP(intel_dp)) {
251 if (IS_GEN6(dev)) 262 if (IS_GEN6(dev))
252 aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ 263 aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
253 else 264 else
@@ -642,7 +653,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
642 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { 653 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
643 lane_count = intel_dp->lane_count; 654 lane_count = intel_dp->lane_count;
644 if (IS_PCH_eDP(intel_dp)) 655 if (IS_PCH_eDP(intel_dp))
645 bpp = dev_priv->edp_bpp; 656 bpp = dev_priv->edp.bpp;
646 break; 657 break;
647 } 658 }
648 } 659 }
@@ -698,7 +709,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
698{ 709{
699 struct drm_device *dev = encoder->dev; 710 struct drm_device *dev = encoder->dev;
700 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 711 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
701 struct drm_crtc *crtc = intel_dp->base.enc.crtc; 712 struct drm_crtc *crtc = intel_dp->base.base.crtc;
702 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 713 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
703 714
704 intel_dp->DP = (DP_VOLTAGE_0_4 | 715 intel_dp->DP = (DP_VOLTAGE_0_4 |
@@ -754,13 +765,14 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
754 } 765 }
755} 766}
756 767
757static void ironlake_edp_panel_on (struct drm_device *dev) 768/* Returns true if the panel was already on when called */
769static bool ironlake_edp_panel_on (struct drm_device *dev)
758{ 770{
759 struct drm_i915_private *dev_priv = dev->dev_private; 771 struct drm_i915_private *dev_priv = dev->dev_private;
760 u32 pp; 772 u32 pp;
761 773
762 if (I915_READ(PCH_PP_STATUS) & PP_ON) 774 if (I915_READ(PCH_PP_STATUS) & PP_ON)
763 return; 775 return true;
764 776
765 pp = I915_READ(PCH_PP_CONTROL); 777 pp = I915_READ(PCH_PP_CONTROL);
766 778
@@ -769,17 +781,24 @@ static void ironlake_edp_panel_on (struct drm_device *dev)
769 I915_WRITE(PCH_PP_CONTROL, pp); 781 I915_WRITE(PCH_PP_CONTROL, pp);
770 POSTING_READ(PCH_PP_CONTROL); 782 POSTING_READ(PCH_PP_CONTROL);
771 783
772 pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; 784 pp |= POWER_TARGET_ON;
773 I915_WRITE(PCH_PP_CONTROL, pp); 785 I915_WRITE(PCH_PP_CONTROL, pp);
774 786
775 if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10)) 787 /* Ouch. We need to wait here for some panels, like Dell e6510
788 * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
789 */
790 msleep(300);
791
792 if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000))
776 DRM_ERROR("panel on wait timed out: 0x%08x\n", 793 DRM_ERROR("panel on wait timed out: 0x%08x\n",
777 I915_READ(PCH_PP_STATUS)); 794 I915_READ(PCH_PP_STATUS));
778 795
779 pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD); 796 pp &= ~(PANEL_UNLOCK_REGS);
780 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 797 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
781 I915_WRITE(PCH_PP_CONTROL, pp); 798 I915_WRITE(PCH_PP_CONTROL, pp);
782 POSTING_READ(PCH_PP_CONTROL); 799 POSTING_READ(PCH_PP_CONTROL);
800
801 return false;
783} 802}
784 803
785static void ironlake_edp_panel_off (struct drm_device *dev) 804static void ironlake_edp_panel_off (struct drm_device *dev)
@@ -797,14 +816,43 @@ static void ironlake_edp_panel_off (struct drm_device *dev)
797 pp &= ~POWER_TARGET_ON; 816 pp &= ~POWER_TARGET_ON;
798 I915_WRITE(PCH_PP_CONTROL, pp); 817 I915_WRITE(PCH_PP_CONTROL, pp);
799 818
800 if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10)) 819 if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000))
801 DRM_ERROR("panel off wait timed out: 0x%08x\n", 820 DRM_ERROR("panel off wait timed out: 0x%08x\n",
802 I915_READ(PCH_PP_STATUS)); 821 I915_READ(PCH_PP_STATUS));
803 822
804 /* Make sure VDD is enabled so DP AUX will work */ 823 /* Make sure VDD is enabled so DP AUX will work */
805 pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */ 824 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
806 I915_WRITE(PCH_PP_CONTROL, pp); 825 I915_WRITE(PCH_PP_CONTROL, pp);
807 POSTING_READ(PCH_PP_CONTROL); 826 POSTING_READ(PCH_PP_CONTROL);
827
828 /* Ouch. We need to wait here for some panels, like Dell e6510
829 * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
830 */
831 msleep(300);
832}
833
834static void ironlake_edp_panel_vdd_on(struct drm_device *dev)
835{
836 struct drm_i915_private *dev_priv = dev->dev_private;
837 u32 pp;
838
839 pp = I915_READ(PCH_PP_CONTROL);
840 pp |= EDP_FORCE_VDD;
841 I915_WRITE(PCH_PP_CONTROL, pp);
842 POSTING_READ(PCH_PP_CONTROL);
843 msleep(300);
844}
845
846static void ironlake_edp_panel_vdd_off(struct drm_device *dev)
847{
848 struct drm_i915_private *dev_priv = dev->dev_private;
849 u32 pp;
850
851 pp = I915_READ(PCH_PP_CONTROL);
852 pp &= ~EDP_FORCE_VDD;
853 I915_WRITE(PCH_PP_CONTROL, pp);
854 POSTING_READ(PCH_PP_CONTROL);
855 msleep(300);
808} 856}
809 857
810static void ironlake_edp_backlight_on (struct drm_device *dev) 858static void ironlake_edp_backlight_on (struct drm_device *dev)
@@ -850,6 +898,7 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder)
850 dpa_ctl = I915_READ(DP_A); 898 dpa_ctl = I915_READ(DP_A);
851 dpa_ctl |= DP_PLL_ENABLE; 899 dpa_ctl |= DP_PLL_ENABLE;
852 I915_WRITE(DP_A, dpa_ctl); 900 I915_WRITE(DP_A, dpa_ctl);
901 POSTING_READ(DP_A);
853 udelay(200); 902 udelay(200);
854} 903}
855 904
@@ -860,9 +909,10 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
860 struct drm_i915_private *dev_priv = dev->dev_private; 909 struct drm_i915_private *dev_priv = dev->dev_private;
861 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 910 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
862 911
863 if (IS_eDP(intel_dp)) { 912 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
913 ironlake_edp_panel_off(dev);
864 ironlake_edp_backlight_off(dev); 914 ironlake_edp_backlight_off(dev);
865 ironlake_edp_panel_on(dev); 915 ironlake_edp_panel_vdd_on(dev);
866 ironlake_edp_pll_on(encoder); 916 ironlake_edp_pll_on(encoder);
867 } 917 }
868 if (dp_reg & DP_PORT_EN) 918 if (dp_reg & DP_PORT_EN)
@@ -873,14 +923,17 @@ static void intel_dp_commit(struct drm_encoder *encoder)
873{ 923{
874 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 924 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
875 struct drm_device *dev = encoder->dev; 925 struct drm_device *dev = encoder->dev;
876 struct drm_i915_private *dev_priv = dev->dev_private;
877 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
878 926
879 if (!(dp_reg & DP_PORT_EN)) { 927 intel_dp_start_link_train(intel_dp);
880 intel_dp_link_train(intel_dp); 928
881 } 929 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
930 ironlake_edp_panel_on(dev);
931
932 intel_dp_complete_link_train(intel_dp);
933
882 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) 934 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
883 ironlake_edp_backlight_on(dev); 935 ironlake_edp_backlight_on(dev);
936 intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
884} 937}
885 938
886static void 939static void
@@ -902,9 +955,10 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
902 ironlake_edp_pll_off(encoder); 955 ironlake_edp_pll_off(encoder);
903 } else { 956 } else {
904 if (!(dp_reg & DP_PORT_EN)) { 957 if (!(dp_reg & DP_PORT_EN)) {
958 intel_dp_start_link_train(intel_dp);
905 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) 959 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
906 ironlake_edp_panel_on(dev); 960 ironlake_edp_panel_on(dev);
907 intel_dp_link_train(intel_dp); 961 intel_dp_complete_link_train(intel_dp);
908 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) 962 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
909 ironlake_edp_backlight_on(dev); 963 ironlake_edp_backlight_on(dev);
910 } 964 }
@@ -917,14 +971,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
917 * link status information 971 * link status information
918 */ 972 */
919static bool 973static bool
920intel_dp_get_link_status(struct intel_dp *intel_dp, 974intel_dp_get_link_status(struct intel_dp *intel_dp)
921 uint8_t link_status[DP_LINK_STATUS_SIZE])
922{ 975{
923 int ret; 976 int ret;
924 977
925 ret = intel_dp_aux_native_read(intel_dp, 978 ret = intel_dp_aux_native_read(intel_dp,
926 DP_LANE0_1_STATUS, 979 DP_LANE0_1_STATUS,
927 link_status, DP_LINK_STATUS_SIZE); 980 intel_dp->link_status, DP_LINK_STATUS_SIZE);
928 if (ret != DP_LINK_STATUS_SIZE) 981 if (ret != DP_LINK_STATUS_SIZE)
929 return false; 982 return false;
930 return true; 983 return true;
@@ -999,18 +1052,15 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
999} 1052}
1000 1053
1001static void 1054static void
1002intel_get_adjust_train(struct intel_dp *intel_dp, 1055intel_get_adjust_train(struct intel_dp *intel_dp)
1003 uint8_t link_status[DP_LINK_STATUS_SIZE],
1004 int lane_count,
1005 uint8_t train_set[4])
1006{ 1056{
1007 uint8_t v = 0; 1057 uint8_t v = 0;
1008 uint8_t p = 0; 1058 uint8_t p = 0;
1009 int lane; 1059 int lane;
1010 1060
1011 for (lane = 0; lane < lane_count; lane++) { 1061 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1012 uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane); 1062 uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane);
1013 uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane); 1063 uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
1014 1064
1015 if (this_v > v) 1065 if (this_v > v)
1016 v = this_v; 1066 v = this_v;
@@ -1025,7 +1075,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp,
1025 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 1075 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
1026 1076
1027 for (lane = 0; lane < 4; lane++) 1077 for (lane = 0; lane < 4; lane++)
1028 train_set[lane] = v | p; 1078 intel_dp->train_set[lane] = v | p;
1029} 1079}
1030 1080
1031static uint32_t 1081static uint32_t
@@ -1116,18 +1166,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
1116 DP_LANE_CHANNEL_EQ_DONE|\ 1166 DP_LANE_CHANNEL_EQ_DONE|\
1117 DP_LANE_SYMBOL_LOCKED) 1167 DP_LANE_SYMBOL_LOCKED)
1118static bool 1168static bool
1119intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) 1169intel_channel_eq_ok(struct intel_dp *intel_dp)
1120{ 1170{
1121 uint8_t lane_align; 1171 uint8_t lane_align;
1122 uint8_t lane_status; 1172 uint8_t lane_status;
1123 int lane; 1173 int lane;
1124 1174
1125 lane_align = intel_dp_link_status(link_status, 1175 lane_align = intel_dp_link_status(intel_dp->link_status,
1126 DP_LANE_ALIGN_STATUS_UPDATED); 1176 DP_LANE_ALIGN_STATUS_UPDATED);
1127 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) 1177 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
1128 return false; 1178 return false;
1129 for (lane = 0; lane < lane_count; lane++) { 1179 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1130 lane_status = intel_get_lane_status(link_status, lane); 1180 lane_status = intel_get_lane_status(intel_dp->link_status, lane);
1131 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) 1181 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
1132 return false; 1182 return false;
1133 } 1183 }
@@ -1137,48 +1187,47 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
1137static bool 1187static bool
1138intel_dp_set_link_train(struct intel_dp *intel_dp, 1188intel_dp_set_link_train(struct intel_dp *intel_dp,
1139 uint32_t dp_reg_value, 1189 uint32_t dp_reg_value,
1140 uint8_t dp_train_pat, 1190 uint8_t dp_train_pat)
1141 uint8_t train_set[4],
1142 bool first)
1143{ 1191{
1144 struct drm_device *dev = intel_dp->base.enc.dev; 1192 struct drm_device *dev = intel_dp->base.base.dev;
1145 struct drm_i915_private *dev_priv = dev->dev_private; 1193 struct drm_i915_private *dev_priv = dev->dev_private;
1146 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
1147 int ret; 1194 int ret;
1148 1195
1149 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1196 I915_WRITE(intel_dp->output_reg, dp_reg_value);
1150 POSTING_READ(intel_dp->output_reg); 1197 POSTING_READ(intel_dp->output_reg);
1151 if (first)
1152 intel_wait_for_vblank(dev, intel_crtc->pipe);
1153 1198
1154 intel_dp_aux_native_write_1(intel_dp, 1199 intel_dp_aux_native_write_1(intel_dp,
1155 DP_TRAINING_PATTERN_SET, 1200 DP_TRAINING_PATTERN_SET,
1156 dp_train_pat); 1201 dp_train_pat);
1157 1202
1158 ret = intel_dp_aux_native_write(intel_dp, 1203 ret = intel_dp_aux_native_write(intel_dp,
1159 DP_TRAINING_LANE0_SET, train_set, 4); 1204 DP_TRAINING_LANE0_SET,
1205 intel_dp->train_set, 4);
1160 if (ret != 4) 1206 if (ret != 4)
1161 return false; 1207 return false;
1162 1208
1163 return true; 1209 return true;
1164} 1210}
1165 1211
1212/* Enable corresponding port and start training pattern 1 */
1166static void 1213static void
1167intel_dp_link_train(struct intel_dp *intel_dp) 1214intel_dp_start_link_train(struct intel_dp *intel_dp)
1168{ 1215{
1169 struct drm_device *dev = intel_dp->base.enc.dev; 1216 struct drm_device *dev = intel_dp->base.base.dev;
1170 struct drm_i915_private *dev_priv = dev->dev_private; 1217 struct drm_i915_private *dev_priv = dev->dev_private;
1171 uint8_t train_set[4]; 1218 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
1172 uint8_t link_status[DP_LINK_STATUS_SIZE];
1173 int i; 1219 int i;
1174 uint8_t voltage; 1220 uint8_t voltage;
1175 bool clock_recovery = false; 1221 bool clock_recovery = false;
1176 bool channel_eq = false;
1177 bool first = true;
1178 int tries; 1222 int tries;
1179 u32 reg; 1223 u32 reg;
1180 uint32_t DP = intel_dp->DP; 1224 uint32_t DP = intel_dp->DP;
1181 1225
1226 /* Enable output, wait for it to become active */
1227 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
1228 POSTING_READ(intel_dp->output_reg);
1229 intel_wait_for_vblank(dev, intel_crtc->pipe);
1230
1182 /* Write the link configuration data */ 1231 /* Write the link configuration data */
1183 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1232 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1184 intel_dp->link_configuration, 1233 intel_dp->link_configuration,
@@ -1189,18 +1238,18 @@ intel_dp_link_train(struct intel_dp *intel_dp)
1189 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1238 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1190 else 1239 else
1191 DP &= ~DP_LINK_TRAIN_MASK; 1240 DP &= ~DP_LINK_TRAIN_MASK;
1192 memset(train_set, 0, 4); 1241 memset(intel_dp->train_set, 0, 4);
1193 voltage = 0xff; 1242 voltage = 0xff;
1194 tries = 0; 1243 tries = 0;
1195 clock_recovery = false; 1244 clock_recovery = false;
1196 for (;;) { 1245 for (;;) {
1197 /* Use train_set[0] to set the voltage and pre emphasis values */ 1246 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1198 uint32_t signal_levels; 1247 uint32_t signal_levels;
1199 if (IS_GEN6(dev) && IS_eDP(intel_dp)) { 1248 if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
1200 signal_levels = intel_gen6_edp_signal_levels(train_set[0]); 1249 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1201 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1250 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1202 } else { 1251 } else {
1203 signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count); 1252 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
1204 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1253 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1205 } 1254 }
1206 1255
@@ -1210,52 +1259,64 @@ intel_dp_link_train(struct intel_dp *intel_dp)
1210 reg = DP | DP_LINK_TRAIN_PAT_1; 1259 reg = DP | DP_LINK_TRAIN_PAT_1;
1211 1260
1212 if (!intel_dp_set_link_train(intel_dp, reg, 1261 if (!intel_dp_set_link_train(intel_dp, reg,
1213 DP_TRAINING_PATTERN_1, train_set, first)) 1262 DP_TRAINING_PATTERN_1))
1214 break; 1263 break;
1215 first = false;
1216 /* Set training pattern 1 */ 1264 /* Set training pattern 1 */
1217 1265
1218 udelay(100); 1266 udelay(100);
1219 if (!intel_dp_get_link_status(intel_dp, link_status)) 1267 if (!intel_dp_get_link_status(intel_dp))
1220 break; 1268 break;
1221 1269
1222 if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1270 if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
1223 clock_recovery = true; 1271 clock_recovery = true;
1224 break; 1272 break;
1225 } 1273 }
1226 1274
1227 /* Check to see if we've tried the max voltage */ 1275 /* Check to see if we've tried the max voltage */
1228 for (i = 0; i < intel_dp->lane_count; i++) 1276 for (i = 0; i < intel_dp->lane_count; i++)
1229 if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1277 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1230 break; 1278 break;
1231 if (i == intel_dp->lane_count) 1279 if (i == intel_dp->lane_count)
1232 break; 1280 break;
1233 1281
1234 /* Check to see if we've tried the same voltage 5 times */ 1282 /* Check to see if we've tried the same voltage 5 times */
1235 if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1283 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1236 ++tries; 1284 ++tries;
1237 if (tries == 5) 1285 if (tries == 5)
1238 break; 1286 break;
1239 } else 1287 } else
1240 tries = 0; 1288 tries = 0;
1241 voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1289 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1242 1290
1243 /* Compute new train_set as requested by target */ 1291 /* Compute new intel_dp->train_set as requested by target */
1244 intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set); 1292 intel_get_adjust_train(intel_dp);
1245 } 1293 }
1246 1294
1295 intel_dp->DP = DP;
1296}
1297
1298static void
1299intel_dp_complete_link_train(struct intel_dp *intel_dp)
1300{
1301 struct drm_device *dev = intel_dp->base.base.dev;
1302 struct drm_i915_private *dev_priv = dev->dev_private;
1303 bool channel_eq = false;
1304 int tries;
1305 u32 reg;
1306 uint32_t DP = intel_dp->DP;
1307
1247 /* channel equalization */ 1308 /* channel equalization */
1248 tries = 0; 1309 tries = 0;
1249 channel_eq = false; 1310 channel_eq = false;
1250 for (;;) { 1311 for (;;) {
1251 /* Use train_set[0] to set the voltage and pre emphasis values */ 1312 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1252 uint32_t signal_levels; 1313 uint32_t signal_levels;
1253 1314
1254 if (IS_GEN6(dev) && IS_eDP(intel_dp)) { 1315 if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
1255 signal_levels = intel_gen6_edp_signal_levels(train_set[0]); 1316 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1256 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1317 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1257 } else { 1318 } else {
1258 signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count); 1319 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
1259 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1320 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1260 } 1321 }
1261 1322
@@ -1266,15 +1327,14 @@ intel_dp_link_train(struct intel_dp *intel_dp)
1266 1327
1267 /* channel eq pattern */ 1328 /* channel eq pattern */
1268 if (!intel_dp_set_link_train(intel_dp, reg, 1329 if (!intel_dp_set_link_train(intel_dp, reg,
1269 DP_TRAINING_PATTERN_2, train_set, 1330 DP_TRAINING_PATTERN_2))
1270 false))
1271 break; 1331 break;
1272 1332
1273 udelay(400); 1333 udelay(400);
1274 if (!intel_dp_get_link_status(intel_dp, link_status)) 1334 if (!intel_dp_get_link_status(intel_dp))
1275 break; 1335 break;
1276 1336
1277 if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) { 1337 if (intel_channel_eq_ok(intel_dp)) {
1278 channel_eq = true; 1338 channel_eq = true;
1279 break; 1339 break;
1280 } 1340 }
@@ -1283,8 +1343,8 @@ intel_dp_link_train(struct intel_dp *intel_dp)
1283 if (tries > 5) 1343 if (tries > 5)
1284 break; 1344 break;
1285 1345
1286 /* Compute new train_set as requested by target */ 1346 /* Compute new intel_dp->train_set as requested by target */
1287 intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set); 1347 intel_get_adjust_train(intel_dp);
1288 ++tries; 1348 ++tries;
1289 } 1349 }
1290 1350
@@ -1302,7 +1362,7 @@ intel_dp_link_train(struct intel_dp *intel_dp)
1302static void 1362static void
1303intel_dp_link_down(struct intel_dp *intel_dp) 1363intel_dp_link_down(struct intel_dp *intel_dp)
1304{ 1364{
1305 struct drm_device *dev = intel_dp->base.enc.dev; 1365 struct drm_device *dev = intel_dp->base.base.dev;
1306 struct drm_i915_private *dev_priv = dev->dev_private; 1366 struct drm_i915_private *dev_priv = dev->dev_private;
1307 uint32_t DP = intel_dp->DP; 1367 uint32_t DP = intel_dp->DP;
1308 1368
@@ -1318,14 +1378,13 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1318 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) { 1378 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) {
1319 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1379 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1320 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 1380 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
1321 POSTING_READ(intel_dp->output_reg);
1322 } else { 1381 } else {
1323 DP &= ~DP_LINK_TRAIN_MASK; 1382 DP &= ~DP_LINK_TRAIN_MASK;
1324 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 1383 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
1325 POSTING_READ(intel_dp->output_reg);
1326 } 1384 }
1385 POSTING_READ(intel_dp->output_reg);
1327 1386
1328 udelay(17000); 1387 msleep(17);
1329 1388
1330 if (IS_eDP(intel_dp)) 1389 if (IS_eDP(intel_dp))
1331 DP |= DP_LINK_TRAIN_OFF; 1390 DP |= DP_LINK_TRAIN_OFF;
@@ -1345,27 +1404,29 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1345static void 1404static void
1346intel_dp_check_link_status(struct intel_dp *intel_dp) 1405intel_dp_check_link_status(struct intel_dp *intel_dp)
1347{ 1406{
1348 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1407 if (!intel_dp->base.base.crtc)
1349
1350 if (!intel_dp->base.enc.crtc)
1351 return; 1408 return;
1352 1409
1353 if (!intel_dp_get_link_status(intel_dp, link_status)) { 1410 if (!intel_dp_get_link_status(intel_dp)) {
1354 intel_dp_link_down(intel_dp); 1411 intel_dp_link_down(intel_dp);
1355 return; 1412 return;
1356 } 1413 }
1357 1414
1358 if (!intel_channel_eq_ok(link_status, intel_dp->lane_count)) 1415 if (!intel_channel_eq_ok(intel_dp)) {
1359 intel_dp_link_train(intel_dp); 1416 intel_dp_start_link_train(intel_dp);
1417 intel_dp_complete_link_train(intel_dp);
1418 }
1360} 1419}
1361 1420
1362static enum drm_connector_status 1421static enum drm_connector_status
1363ironlake_dp_detect(struct drm_connector *connector) 1422ironlake_dp_detect(struct drm_connector *connector)
1364{ 1423{
1365 struct drm_encoder *encoder = intel_attached_encoder(connector); 1424 struct intel_dp *intel_dp = intel_attached_dp(connector);
1366 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1367 enum drm_connector_status status; 1425 enum drm_connector_status status;
1368 1426
1427 /* Panel needs power for AUX to work */
1428 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
1429 ironlake_edp_panel_vdd_on(connector->dev);
1369 status = connector_status_disconnected; 1430 status = connector_status_disconnected;
1370 if (intel_dp_aux_native_read(intel_dp, 1431 if (intel_dp_aux_native_read(intel_dp,
1371 0x000, intel_dp->dpcd, 1432 0x000, intel_dp->dpcd,
@@ -1376,6 +1437,8 @@ ironlake_dp_detect(struct drm_connector *connector)
1376 } 1437 }
1377 DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], 1438 DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
1378 intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); 1439 intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
1440 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
1441 ironlake_edp_panel_vdd_off(connector->dev);
1379 return status; 1442 return status;
1380} 1443}
1381 1444
@@ -1388,9 +1451,8 @@ ironlake_dp_detect(struct drm_connector *connector)
1388static enum drm_connector_status 1451static enum drm_connector_status
1389intel_dp_detect(struct drm_connector *connector, bool force) 1452intel_dp_detect(struct drm_connector *connector, bool force)
1390{ 1453{
1391 struct drm_encoder *encoder = intel_attached_encoder(connector); 1454 struct intel_dp *intel_dp = intel_attached_dp(connector);
1392 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1455 struct drm_device *dev = intel_dp->base.base.dev;
1393 struct drm_device *dev = intel_dp->base.enc.dev;
1394 struct drm_i915_private *dev_priv = dev->dev_private; 1456 struct drm_i915_private *dev_priv = dev->dev_private;
1395 uint32_t temp, bit; 1457 uint32_t temp, bit;
1396 enum drm_connector_status status; 1458 enum drm_connector_status status;
@@ -1432,16 +1494,15 @@ intel_dp_detect(struct drm_connector *connector, bool force)
1432 1494
1433static int intel_dp_get_modes(struct drm_connector *connector) 1495static int intel_dp_get_modes(struct drm_connector *connector)
1434{ 1496{
1435 struct drm_encoder *encoder = intel_attached_encoder(connector); 1497 struct intel_dp *intel_dp = intel_attached_dp(connector);
1436 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1498 struct drm_device *dev = intel_dp->base.base.dev;
1437 struct drm_device *dev = intel_dp->base.enc.dev;
1438 struct drm_i915_private *dev_priv = dev->dev_private; 1499 struct drm_i915_private *dev_priv = dev->dev_private;
1439 int ret; 1500 int ret;
1440 1501
1441 /* We should parse the EDID data and find out if it has an audio sink 1502 /* We should parse the EDID data and find out if it has an audio sink
1442 */ 1503 */
1443 1504
1444 ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus); 1505 ret = intel_ddc_get_modes(connector, &intel_dp->adapter);
1445 if (ret) { 1506 if (ret) {
1446 if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && 1507 if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
1447 !dev_priv->panel_fixed_mode) { 1508 !dev_priv->panel_fixed_mode) {
@@ -1479,6 +1540,15 @@ intel_dp_destroy (struct drm_connector *connector)
1479 kfree(connector); 1540 kfree(connector);
1480} 1541}
1481 1542
1543static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
1544{
1545 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1546
1547 i2c_del_adapter(&intel_dp->adapter);
1548 drm_encoder_cleanup(encoder);
1549 kfree(intel_dp);
1550}
1551
1482static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 1552static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
1483 .dpms = intel_dp_dpms, 1553 .dpms = intel_dp_dpms,
1484 .mode_fixup = intel_dp_mode_fixup, 1554 .mode_fixup = intel_dp_mode_fixup,
@@ -1497,14 +1567,14 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
1497static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 1567static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
1498 .get_modes = intel_dp_get_modes, 1568 .get_modes = intel_dp_get_modes,
1499 .mode_valid = intel_dp_mode_valid, 1569 .mode_valid = intel_dp_mode_valid,
1500 .best_encoder = intel_attached_encoder, 1570 .best_encoder = intel_best_encoder,
1501}; 1571};
1502 1572
1503static const struct drm_encoder_funcs intel_dp_enc_funcs = { 1573static const struct drm_encoder_funcs intel_dp_enc_funcs = {
1504 .destroy = intel_encoder_destroy, 1574 .destroy = intel_dp_encoder_destroy,
1505}; 1575};
1506 1576
1507void 1577static void
1508intel_dp_hot_plug(struct intel_encoder *intel_encoder) 1578intel_dp_hot_plug(struct intel_encoder *intel_encoder)
1509{ 1579{
1510 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 1580 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
@@ -1613,12 +1683,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1613 intel_dp->has_audio = false; 1683 intel_dp->has_audio = false;
1614 intel_dp->dpms_mode = DRM_MODE_DPMS_ON; 1684 intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
1615 1685
1616 drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs, 1686 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
1617 DRM_MODE_ENCODER_TMDS); 1687 DRM_MODE_ENCODER_TMDS);
1618 drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs); 1688 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
1619 1689
1620 drm_mode_connector_attach_encoder(&intel_connector->base, 1690 intel_connector_attach_encoder(intel_connector, intel_encoder);
1621 &intel_encoder->enc);
1622 drm_sysfs_connector_add(connector); 1691 drm_sysfs_connector_add(connector);
1623 1692
1624 /* Set up the DDC bus. */ 1693 /* Set up the DDC bus. */
@@ -1648,7 +1717,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1648 1717
1649 intel_dp_i2c_init(intel_dp, intel_connector, name); 1718 intel_dp_i2c_init(intel_dp, intel_connector, name);
1650 1719
1651 intel_encoder->ddc_bus = &intel_dp->adapter;
1652 intel_encoder->hot_plug = intel_dp_hot_plug; 1720 intel_encoder->hot_plug = intel_dp_hot_plug;
1653 1721
1654 if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { 1722 if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ad312ca6b3e5..40e99bf27ff7 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -26,14 +26,12 @@
26#define __INTEL_DRV_H__ 26#define __INTEL_DRV_H__
27 27
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/i2c-id.h>
30#include <linux/i2c-algo-bit.h>
31#include "i915_drv.h" 29#include "i915_drv.h"
32#include "drm_crtc.h" 30#include "drm_crtc.h"
33
34#include "drm_crtc_helper.h" 31#include "drm_crtc_helper.h"
32#include "drm_fb_helper.h"
35 33
36#define wait_for(COND, MS, W) ({ \ 34#define _wait_for(COND, MS, W) ({ \
37 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ 35 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
38 int ret__ = 0; \ 36 int ret__ = 0; \
39 while (! (COND)) { \ 37 while (! (COND)) { \
@@ -41,11 +39,24 @@
41 ret__ = -ETIMEDOUT; \ 39 ret__ = -ETIMEDOUT; \
42 break; \ 40 break; \
43 } \ 41 } \
44 if (W) msleep(W); \ 42 if (W && !in_dbg_master()) msleep(W); \
45 } \ 43 } \
46 ret__; \ 44 ret__; \
47}) 45})
48 46
47#define wait_for(COND, MS) _wait_for(COND, MS, 1)
48#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
49
50#define MSLEEP(x) do { \
51 if (in_dbg_master()) \
52 mdelay(x); \
53 else \
54 msleep(x); \
55} while(0)
56
57#define KHz(x) (1000*x)
58#define MHz(x) KHz(1000*x)
59
49/* 60/*
50 * Display related stuff 61 * Display related stuff
51 */ 62 */
@@ -96,24 +107,39 @@
96#define INTEL_DVO_CHIP_TMDS 2 107#define INTEL_DVO_CHIP_TMDS 2
97#define INTEL_DVO_CHIP_TVOUT 4 108#define INTEL_DVO_CHIP_TVOUT 4
98 109
99struct intel_i2c_chan { 110/* drm_display_mode->private_flags */
100 struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */ 111#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
101 u32 reg; /* GPIO reg */ 112#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
102 struct i2c_adapter adapter; 113
103 struct i2c_algo_bit_data algo; 114static inline void
104}; 115intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
116 int multiplier)
117{
118 mode->clock *= multiplier;
119 mode->private_flags |= multiplier;
120}
121
122static inline int
123intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
124{
125 return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
126}
105 127
106struct intel_framebuffer { 128struct intel_framebuffer {
107 struct drm_framebuffer base; 129 struct drm_framebuffer base;
108 struct drm_gem_object *obj; 130 struct drm_gem_object *obj;
109}; 131};
110 132
133struct intel_fbdev {
134 struct drm_fb_helper helper;
135 struct intel_framebuffer ifb;
136 struct list_head fbdev_list;
137 struct drm_display_mode *our_mode;
138};
111 139
112struct intel_encoder { 140struct intel_encoder {
113 struct drm_encoder enc; 141 struct drm_encoder base;
114 int type; 142 int type;
115 struct i2c_adapter *i2c_bus;
116 struct i2c_adapter *ddc_bus;
117 bool load_detect_temp; 143 bool load_detect_temp;
118 bool needs_tv_clock; 144 bool needs_tv_clock;
119 void (*hot_plug)(struct intel_encoder *); 145 void (*hot_plug)(struct intel_encoder *);
@@ -123,32 +149,7 @@ struct intel_encoder {
123 149
124struct intel_connector { 150struct intel_connector {
125 struct drm_connector base; 151 struct drm_connector base;
126}; 152 struct intel_encoder *encoder;
127
128struct intel_crtc;
129struct intel_overlay {
130 struct drm_device *dev;
131 struct intel_crtc *crtc;
132 struct drm_i915_gem_object *vid_bo;
133 struct drm_i915_gem_object *old_vid_bo;
134 int active;
135 int pfit_active;
136 u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
137 u32 color_key;
138 u32 brightness, contrast, saturation;
139 u32 old_xscale, old_yscale;
140 /* register access */
141 u32 flip_addr;
142 struct drm_i915_gem_object *reg_bo;
143 void *virt_addr;
144 /* flip handling */
145 uint32_t last_flip_req;
146 int hw_wedged;
147#define HW_WEDGED 1
148#define NEEDS_WAIT_FOR_FLIP 2
149#define RELEASE_OLD_VID 3
150#define SWITCH_OFF_STAGE_1 4
151#define SWITCH_OFF_STAGE_2 5
152}; 153};
153 154
154struct intel_crtc { 155struct intel_crtc {
@@ -157,6 +158,7 @@ struct intel_crtc {
157 enum plane plane; 158 enum plane plane;
158 u8 lut_r[256], lut_g[256], lut_b[256]; 159 u8 lut_r[256], lut_g[256], lut_b[256];
159 int dpms_mode; 160 int dpms_mode;
161 bool active; /* is the crtc on? independent of the dpms mode */
160 bool busy; /* is scanout buffer being updated frequently? */ 162 bool busy; /* is scanout buffer being updated frequently? */
161 struct timer_list idle_timer; 163 struct timer_list idle_timer;
162 bool lowfreq_avail; 164 bool lowfreq_avail;
@@ -168,14 +170,21 @@ struct intel_crtc {
168 uint32_t cursor_addr; 170 uint32_t cursor_addr;
169 int16_t cursor_x, cursor_y; 171 int16_t cursor_x, cursor_y;
170 int16_t cursor_width, cursor_height; 172 int16_t cursor_width, cursor_height;
171 bool cursor_visible, cursor_on; 173 bool cursor_visible;
172}; 174};
173 175
174#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 176#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
175#define to_intel_connector(x) container_of(x, struct intel_connector, base) 177#define to_intel_connector(x) container_of(x, struct intel_connector, base)
176#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) 178#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
177#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 179#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
178 180
181static inline struct drm_crtc *
182intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
183{
184 struct drm_i915_private *dev_priv = dev->dev_private;
185 return dev_priv->pipe_to_crtc_mapping[pipe];
186}
187
179struct intel_unpin_work { 188struct intel_unpin_work {
180 struct work_struct work; 189 struct work_struct work;
181 struct drm_device *dev; 190 struct drm_device *dev;
@@ -186,13 +195,8 @@ struct intel_unpin_work {
186 bool enable_stall_check; 195 bool enable_stall_check;
187}; 196};
188 197
189struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
190 const char *name);
191void intel_i2c_destroy(struct i2c_adapter *adapter);
192int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); 198int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
193extern bool intel_ddc_probe(struct intel_encoder *intel_encoder); 199extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
194void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
195void intel_i2c_reset_gmbus(struct drm_device *dev);
196 200
197extern void intel_crt_init(struct drm_device *dev); 201extern void intel_crt_init(struct drm_device *dev);
198extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); 202extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
@@ -209,29 +213,37 @@ extern bool intel_pch_has_edp(struct drm_crtc *crtc);
209extern bool intel_dpd_is_edp(struct drm_device *dev); 213extern bool intel_dpd_is_edp(struct drm_device *dev);
210extern void intel_edp_link_config (struct intel_encoder *, int *, int *); 214extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
211 215
212 216/* intel_panel.c */
213extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 217extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
214 struct drm_display_mode *adjusted_mode); 218 struct drm_display_mode *adjusted_mode);
215extern void intel_pch_panel_fitting(struct drm_device *dev, 219extern void intel_pch_panel_fitting(struct drm_device *dev,
216 int fitting_mode, 220 int fitting_mode,
217 struct drm_display_mode *mode, 221 struct drm_display_mode *mode,
218 struct drm_display_mode *adjusted_mode); 222 struct drm_display_mode *adjusted_mode);
223extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
224extern u32 intel_panel_get_backlight(struct drm_device *dev);
225extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
219 226
220extern int intel_panel_fitter_pipe (struct drm_device *dev);
221extern void intel_crtc_load_lut(struct drm_crtc *crtc); 227extern void intel_crtc_load_lut(struct drm_crtc *crtc);
222extern void intel_encoder_prepare (struct drm_encoder *encoder); 228extern void intel_encoder_prepare (struct drm_encoder *encoder);
223extern void intel_encoder_commit (struct drm_encoder *encoder); 229extern void intel_encoder_commit (struct drm_encoder *encoder);
224extern void intel_encoder_destroy(struct drm_encoder *encoder); 230extern void intel_encoder_destroy(struct drm_encoder *encoder);
225 231
226extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector); 232static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
233{
234 return to_intel_connector(connector)->encoder;
235}
236
237extern void intel_connector_attach_encoder(struct intel_connector *connector,
238 struct intel_encoder *encoder);
239extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
227 240
228extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 241extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
229 struct drm_crtc *crtc); 242 struct drm_crtc *crtc);
230int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 243int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
231 struct drm_file *file_priv); 244 struct drm_file *file_priv);
232extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe);
233extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 245extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
234extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); 246extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
235extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 247extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
236 struct drm_connector *connector, 248 struct drm_connector *connector,
237 struct drm_display_mode *mode, 249 struct drm_display_mode *mode,
@@ -253,7 +265,8 @@ extern void ironlake_enable_drps(struct drm_device *dev);
253extern void ironlake_disable_drps(struct drm_device *dev); 265extern void ironlake_disable_drps(struct drm_device *dev);
254 266
255extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, 267extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
256 struct drm_gem_object *obj); 268 struct drm_gem_object *obj,
269 bool pipelined);
257 270
258extern int intel_framebuffer_init(struct drm_device *dev, 271extern int intel_framebuffer_init(struct drm_device *dev,
259 struct intel_framebuffer *ifb, 272 struct intel_framebuffer *ifb,
@@ -268,9 +281,8 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
268 281
269extern void intel_setup_overlay(struct drm_device *dev); 282extern void intel_setup_overlay(struct drm_device *dev);
270extern void intel_cleanup_overlay(struct drm_device *dev); 283extern void intel_cleanup_overlay(struct drm_device *dev);
271extern int intel_overlay_switch_off(struct intel_overlay *overlay); 284extern int intel_overlay_switch_off(struct intel_overlay *overlay,
272extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, 285 bool interruptible);
273 int interruptible);
274extern int intel_overlay_put_image(struct drm_device *dev, void *data, 286extern int intel_overlay_put_image(struct drm_device *dev, void *data,
275 struct drm_file *file_priv); 287 struct drm_file *file_priv);
276extern int intel_overlay_attrs(struct drm_device *dev, void *data, 288extern int intel_overlay_attrs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 7c9ec1472d46..ea373283c93b 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -72,7 +72,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
72 .name = "ch7017", 72 .name = "ch7017",
73 .dvo_reg = DVOC, 73 .dvo_reg = DVOC,
74 .slave_addr = 0x75, 74 .slave_addr = 0x75,
75 .gpio = GPIOE, 75 .gpio = GMBUS_PORT_DPB,
76 .dev_ops = &ch7017_ops, 76 .dev_ops = &ch7017_ops,
77 } 77 }
78}; 78};
@@ -88,7 +88,13 @@ struct intel_dvo {
88 88
89static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder) 89static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
90{ 90{
91 return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base); 91 return container_of(encoder, struct intel_dvo, base.base);
92}
93
94static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
95{
96 return container_of(intel_attached_encoder(connector),
97 struct intel_dvo, base);
92} 98}
93 99
94static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) 100static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
@@ -112,8 +118,7 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
112static int intel_dvo_mode_valid(struct drm_connector *connector, 118static int intel_dvo_mode_valid(struct drm_connector *connector,
113 struct drm_display_mode *mode) 119 struct drm_display_mode *mode)
114{ 120{
115 struct drm_encoder *encoder = intel_attached_encoder(connector); 121 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
116 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
117 122
118 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 123 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
119 return MODE_NO_DBLESCAN; 124 return MODE_NO_DBLESCAN;
@@ -224,23 +229,22 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
224static enum drm_connector_status 229static enum drm_connector_status
225intel_dvo_detect(struct drm_connector *connector, bool force) 230intel_dvo_detect(struct drm_connector *connector, bool force)
226{ 231{
227 struct drm_encoder *encoder = intel_attached_encoder(connector); 232 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
228 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
229
230 return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); 233 return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
231} 234}
232 235
233static int intel_dvo_get_modes(struct drm_connector *connector) 236static int intel_dvo_get_modes(struct drm_connector *connector)
234{ 237{
235 struct drm_encoder *encoder = intel_attached_encoder(connector); 238 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
236 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); 239 struct drm_i915_private *dev_priv = connector->dev->dev_private;
237 240
238 /* We should probably have an i2c driver get_modes function for those 241 /* We should probably have an i2c driver get_modes function for those
239 * devices which will have a fixed set of modes determined by the chip 242 * devices which will have a fixed set of modes determined by the chip
240 * (TV-out, for example), but for now with just TMDS and LVDS, 243 * (TV-out, for example), but for now with just TMDS and LVDS,
241 * that's not the case. 244 * that's not the case.
242 */ 245 */
243 intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus); 246 intel_ddc_get_modes(connector,
247 &dev_priv->gmbus[GMBUS_PORT_DPC].adapter);
244 if (!list_empty(&connector->probed_modes)) 248 if (!list_empty(&connector->probed_modes))
245 return 1; 249 return 1;
246 250
@@ -281,7 +285,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
281static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { 285static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
282 .mode_valid = intel_dvo_mode_valid, 286 .mode_valid = intel_dvo_mode_valid,
283 .get_modes = intel_dvo_get_modes, 287 .get_modes = intel_dvo_get_modes,
284 .best_encoder = intel_attached_encoder, 288 .best_encoder = intel_best_encoder,
285}; 289};
286 290
287static void intel_dvo_enc_destroy(struct drm_encoder *encoder) 291static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
@@ -311,8 +315,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
311{ 315{
312 struct drm_device *dev = connector->dev; 316 struct drm_device *dev = connector->dev;
313 struct drm_i915_private *dev_priv = dev->dev_private; 317 struct drm_i915_private *dev_priv = dev->dev_private;
314 struct drm_encoder *encoder = intel_attached_encoder(connector); 318 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
315 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
316 uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg); 319 uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
317 struct drm_display_mode *mode = NULL; 320 struct drm_display_mode *mode = NULL;
318 321
@@ -323,7 +326,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
323 struct drm_crtc *crtc; 326 struct drm_crtc *crtc;
324 int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0; 327 int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
325 328
326 crtc = intel_get_crtc_from_pipe(dev, pipe); 329 crtc = intel_get_crtc_for_pipe(dev, pipe);
327 if (crtc) { 330 if (crtc) {
328 mode = intel_crtc_mode_get(dev, crtc); 331 mode = intel_crtc_mode_get(dev, crtc);
329 if (mode) { 332 if (mode) {
@@ -341,11 +344,10 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
341 344
342void intel_dvo_init(struct drm_device *dev) 345void intel_dvo_init(struct drm_device *dev)
343{ 346{
347 struct drm_i915_private *dev_priv = dev->dev_private;
344 struct intel_encoder *intel_encoder; 348 struct intel_encoder *intel_encoder;
345 struct intel_dvo *intel_dvo; 349 struct intel_dvo *intel_dvo;
346 struct intel_connector *intel_connector; 350 struct intel_connector *intel_connector;
347 struct i2c_adapter *i2cbus = NULL;
348 int ret = 0;
349 int i; 351 int i;
350 int encoder_type = DRM_MODE_ENCODER_NONE; 352 int encoder_type = DRM_MODE_ENCODER_NONE;
351 353
@@ -360,16 +362,14 @@ void intel_dvo_init(struct drm_device *dev)
360 } 362 }
361 363
362 intel_encoder = &intel_dvo->base; 364 intel_encoder = &intel_dvo->base;
363 365 drm_encoder_init(dev, &intel_encoder->base,
364 /* Set up the DDC bus */ 366 &intel_dvo_enc_funcs, encoder_type);
365 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
366 if (!intel_encoder->ddc_bus)
367 goto free_intel;
368 367
369 /* Now, try to find a controller */ 368 /* Now, try to find a controller */
370 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { 369 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
371 struct drm_connector *connector = &intel_connector->base; 370 struct drm_connector *connector = &intel_connector->base;
372 const struct intel_dvo_device *dvo = &intel_dvo_devices[i]; 371 const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
372 struct i2c_adapter *i2c;
373 int gpio; 373 int gpio;
374 374
375 /* Allow the I2C driver info to specify the GPIO to be used in 375 /* Allow the I2C driver info to specify the GPIO to be used in
@@ -379,24 +379,18 @@ void intel_dvo_init(struct drm_device *dev)
379 if (dvo->gpio != 0) 379 if (dvo->gpio != 0)
380 gpio = dvo->gpio; 380 gpio = dvo->gpio;
381 else if (dvo->type == INTEL_DVO_CHIP_LVDS) 381 else if (dvo->type == INTEL_DVO_CHIP_LVDS)
382 gpio = GPIOB; 382 gpio = GMBUS_PORT_SSC;
383 else 383 else
384 gpio = GPIOE; 384 gpio = GMBUS_PORT_DPB;
385 385
386 /* Set up the I2C bus necessary for the chip we're probing. 386 /* Set up the I2C bus necessary for the chip we're probing.
387 * It appears that everything is on GPIOE except for panels 387 * It appears that everything is on GPIOE except for panels
388 * on i830 laptops, which are on GPIOB (DVOA). 388 * on i830 laptops, which are on GPIOB (DVOA).
389 */ 389 */
390 if (i2cbus != NULL) 390 i2c = &dev_priv->gmbus[gpio].adapter;
391 intel_i2c_destroy(i2cbus);
392 if (!(i2cbus = intel_i2c_create(dev, gpio,
393 gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
394 continue;
395 }
396 391
397 intel_dvo->dev = *dvo; 392 intel_dvo->dev = *dvo;
398 ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus); 393 if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
399 if (!ret)
400 continue; 394 continue;
401 395
402 intel_encoder->type = INTEL_OUTPUT_DVO; 396 intel_encoder->type = INTEL_OUTPUT_DVO;
@@ -427,13 +421,10 @@ void intel_dvo_init(struct drm_device *dev)
427 connector->interlace_allowed = false; 421 connector->interlace_allowed = false;
428 connector->doublescan_allowed = false; 422 connector->doublescan_allowed = false;
429 423
430 drm_encoder_init(dev, &intel_encoder->enc, 424 drm_encoder_helper_add(&intel_encoder->base,
431 &intel_dvo_enc_funcs, encoder_type);
432 drm_encoder_helper_add(&intel_encoder->enc,
433 &intel_dvo_helper_funcs); 425 &intel_dvo_helper_funcs);
434 426
435 drm_mode_connector_attach_encoder(&intel_connector->base, 427 intel_connector_attach_encoder(intel_connector, intel_encoder);
436 &intel_encoder->enc);
437 if (dvo->type == INTEL_DVO_CHIP_LVDS) { 428 if (dvo->type == INTEL_DVO_CHIP_LVDS) {
438 /* For our LVDS chipsets, we should hopefully be able 429 /* For our LVDS chipsets, we should hopefully be able
439 * to dig the fixed panel mode out of the BIOS data. 430 * to dig the fixed panel mode out of the BIOS data.
@@ -451,11 +442,7 @@ void intel_dvo_init(struct drm_device *dev)
451 return; 442 return;
452 } 443 }
453 444
454 intel_i2c_destroy(intel_encoder->ddc_bus); 445 drm_encoder_cleanup(&intel_encoder->base);
455 /* Didn't find a chip, so tear down. */
456 if (i2cbus != NULL)
457 intel_i2c_destroy(i2cbus);
458free_intel:
459 kfree(intel_dvo); 446 kfree(intel_dvo);
460 kfree(intel_connector); 447 kfree(intel_connector);
461} 448}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 7bdc96256bf5..7dc50acd65d7 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -44,13 +44,6 @@
44#include "i915_drm.h" 44#include "i915_drm.h"
45#include "i915_drv.h" 45#include "i915_drv.h"
46 46
47struct intel_fbdev {
48 struct drm_fb_helper helper;
49 struct intel_framebuffer ifb;
50 struct list_head fbdev_list;
51 struct drm_display_mode *our_mode;
52};
53
54static struct fb_ops intelfb_ops = { 47static struct fb_ops intelfb_ops = {
55 .owner = THIS_MODULE, 48 .owner = THIS_MODULE,
56 .fb_check_var = drm_fb_helper_check_var, 49 .fb_check_var = drm_fb_helper_check_var,
@@ -75,7 +68,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
75 struct drm_gem_object *fbo = NULL; 68 struct drm_gem_object *fbo = NULL;
76 struct drm_i915_gem_object *obj_priv; 69 struct drm_i915_gem_object *obj_priv;
77 struct device *device = &dev->pdev->dev; 70 struct device *device = &dev->pdev->dev;
78 int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; 71 int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
79 72
80 /* we don't do packed 24bpp */ 73 /* we don't do packed 24bpp */
81 if (sizes->surface_bpp == 24) 74 if (sizes->surface_bpp == 24)
@@ -100,19 +93,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
100 93
101 mutex_lock(&dev->struct_mutex); 94 mutex_lock(&dev->struct_mutex);
102 95
103 ret = intel_pin_and_fence_fb_obj(dev, fbo); 96 /* Flush everything out, we'll be doing GTT only from now on */
97 ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
104 if (ret) { 98 if (ret) {
105 DRM_ERROR("failed to pin fb: %d\n", ret); 99 DRM_ERROR("failed to pin fb: %d\n", ret);
106 goto out_unref; 100 goto out_unref;
107 } 101 }
108 102
109 /* Flush everything out, we'll be doing GTT only from now on */
110 ret = i915_gem_object_set_to_gtt_domain(fbo, 1);
111 if (ret) {
112 DRM_ERROR("failed to bind fb: %d.\n", ret);
113 goto out_unpin;
114 }
115
116 info = framebuffer_alloc(0, device); 103 info = framebuffer_alloc(0, device);
117 if (!info) { 104 if (!info) {
118 ret = -ENOMEM; 105 ret = -ENOMEM;
@@ -142,7 +129,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
142 goto out_unpin; 129 goto out_unpin;
143 } 130 }
144 info->apertures->ranges[0].base = dev->mode_config.fb_base; 131 info->apertures->ranges[0].base = dev->mode_config.fb_base;
145 if (IS_I9XX(dev)) 132 if (!IS_GEN2(dev))
146 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2); 133 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
147 else 134 else
148 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); 135 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
@@ -219,8 +206,8 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
219 .fb_probe = intel_fb_find_or_create_single, 206 .fb_probe = intel_fb_find_or_create_single,
220}; 207};
221 208
222int intel_fbdev_destroy(struct drm_device *dev, 209static void intel_fbdev_destroy(struct drm_device *dev,
223 struct intel_fbdev *ifbdev) 210 struct intel_fbdev *ifbdev)
224{ 211{
225 struct fb_info *info; 212 struct fb_info *info;
226 struct intel_framebuffer *ifb = &ifbdev->ifb; 213 struct intel_framebuffer *ifb = &ifbdev->ifb;
@@ -238,9 +225,7 @@ int intel_fbdev_destroy(struct drm_device *dev,
238 225
239 drm_framebuffer_cleanup(&ifb->base); 226 drm_framebuffer_cleanup(&ifb->base);
240 if (ifb->obj) 227 if (ifb->obj)
241 drm_gem_object_unreference(ifb->obj); 228 drm_gem_object_unreference_unlocked(ifb->obj);
242
243 return 0;
244} 229}
245 230
246int intel_fbdev_init(struct drm_device *dev) 231int intel_fbdev_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 926934a482ec..9fb9501f2d07 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -40,12 +40,19 @@
40struct intel_hdmi { 40struct intel_hdmi {
41 struct intel_encoder base; 41 struct intel_encoder base;
42 u32 sdvox_reg; 42 u32 sdvox_reg;
43 int ddc_bus;
43 bool has_hdmi_sink; 44 bool has_hdmi_sink;
44}; 45};
45 46
46static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) 47static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
47{ 48{
48 return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base); 49 return container_of(encoder, struct intel_hdmi, base.base);
50}
51
52static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
53{
54 return container_of(intel_attached_encoder(connector),
55 struct intel_hdmi, base);
49} 56}
50 57
51static void intel_hdmi_mode_set(struct drm_encoder *encoder, 58static void intel_hdmi_mode_set(struct drm_encoder *encoder,
@@ -141,13 +148,14 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
141static enum drm_connector_status 148static enum drm_connector_status
142intel_hdmi_detect(struct drm_connector *connector, bool force) 149intel_hdmi_detect(struct drm_connector *connector, bool force)
143{ 150{
144 struct drm_encoder *encoder = intel_attached_encoder(connector); 151 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
145 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 152 struct drm_i915_private *dev_priv = connector->dev->dev_private;
146 struct edid *edid = NULL; 153 struct edid *edid;
147 enum drm_connector_status status = connector_status_disconnected; 154 enum drm_connector_status status = connector_status_disconnected;
148 155
149 intel_hdmi->has_hdmi_sink = false; 156 intel_hdmi->has_hdmi_sink = false;
150 edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus); 157 edid = drm_get_edid(connector,
158 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
151 159
152 if (edid) { 160 if (edid) {
153 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 161 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -163,14 +171,15 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
163 171
164static int intel_hdmi_get_modes(struct drm_connector *connector) 172static int intel_hdmi_get_modes(struct drm_connector *connector)
165{ 173{
166 struct drm_encoder *encoder = intel_attached_encoder(connector); 174 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
167 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 175 struct drm_i915_private *dev_priv = connector->dev->dev_private;
168 176
169 /* We should parse the EDID data and find out if it's an HDMI sink so 177 /* We should parse the EDID data and find out if it's an HDMI sink so
170 * we can send audio to it. 178 * we can send audio to it.
171 */ 179 */
172 180
173 return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus); 181 return intel_ddc_get_modes(connector,
182 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
174} 183}
175 184
176static void intel_hdmi_destroy(struct drm_connector *connector) 185static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -198,7 +207,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
198static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { 207static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
199 .get_modes = intel_hdmi_get_modes, 208 .get_modes = intel_hdmi_get_modes,
200 .mode_valid = intel_hdmi_mode_valid, 209 .mode_valid = intel_hdmi_mode_valid,
201 .best_encoder = intel_attached_encoder, 210 .best_encoder = intel_best_encoder,
202}; 211};
203 212
204static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { 213static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@@ -224,6 +233,9 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
224 } 233 }
225 234
226 intel_encoder = &intel_hdmi->base; 235 intel_encoder = &intel_hdmi->base;
236 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
237 DRM_MODE_ENCODER_TMDS);
238
227 connector = &intel_connector->base; 239 connector = &intel_connector->base;
228 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, 240 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
229 DRM_MODE_CONNECTOR_HDMIA); 241 DRM_MODE_CONNECTOR_HDMIA);
@@ -239,39 +251,31 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
239 /* Set up the DDC bus. */ 251 /* Set up the DDC bus. */
240 if (sdvox_reg == SDVOB) { 252 if (sdvox_reg == SDVOB) {
241 intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); 253 intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
242 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); 254 intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
243 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 255 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
244 } else if (sdvox_reg == SDVOC) { 256 } else if (sdvox_reg == SDVOC) {
245 intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); 257 intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
246 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); 258 intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
247 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; 259 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
248 } else if (sdvox_reg == HDMIB) { 260 } else if (sdvox_reg == HDMIB) {
249 intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); 261 intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
250 intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, 262 intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
251 "HDMIB");
252 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 263 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
253 } else if (sdvox_reg == HDMIC) { 264 } else if (sdvox_reg == HDMIC) {
254 intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); 265 intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
255 intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, 266 intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
256 "HDMIC");
257 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; 267 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
258 } else if (sdvox_reg == HDMID) { 268 } else if (sdvox_reg == HDMID) {
259 intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); 269 intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
260 intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, 270 intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
261 "HDMID");
262 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; 271 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
263 } 272 }
264 if (!intel_encoder->ddc_bus)
265 goto err_connector;
266 273
267 intel_hdmi->sdvox_reg = sdvox_reg; 274 intel_hdmi->sdvox_reg = sdvox_reg;
268 275
269 drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs, 276 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
270 DRM_MODE_ENCODER_TMDS);
271 drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
272 277
273 drm_mode_connector_attach_encoder(&intel_connector->base, 278 intel_connector_attach_encoder(intel_connector, intel_encoder);
274 &intel_encoder->enc);
275 drm_sysfs_connector_add(connector); 279 drm_sysfs_connector_add(connector);
276 280
277 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 281 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -282,13 +286,4 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
282 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 286 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
283 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 287 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
284 } 288 }
285
286 return;
287
288err_connector:
289 drm_connector_cleanup(connector);
290 kfree(intel_hdmi);
291 kfree(intel_connector);
292
293 return;
294} 289}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c2649c7df14c..2449a74d4d80 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2006 Dave Airlie <airlied@linux.ie> 2 * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
3 * Copyright © 2006-2008 Intel Corporation 3 * Copyright © 2006-2008,2010 Intel Corporation
4 * Jesse Barnes <jesse.barnes@intel.com> 4 * Jesse Barnes <jesse.barnes@intel.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -24,10 +24,9 @@
24 * 24 *
25 * Authors: 25 * Authors:
26 * Eric Anholt <eric@anholt.net> 26 * Eric Anholt <eric@anholt.net>
27 * Chris Wilson <chris@chris-wilson.co.uk>
27 */ 28 */
28#include <linux/i2c.h> 29#include <linux/i2c.h>
29#include <linux/slab.h>
30#include <linux/i2c-id.h>
31#include <linux/i2c-algo-bit.h> 30#include <linux/i2c-algo-bit.h>
32#include "drmP.h" 31#include "drmP.h"
33#include "drm.h" 32#include "drm.h"
@@ -35,79 +34,106 @@
35#include "i915_drm.h" 34#include "i915_drm.h"
36#include "i915_drv.h" 35#include "i915_drv.h"
37 36
38void intel_i2c_quirk_set(struct drm_device *dev, bool enable) 37/* Intel GPIO access functions */
38
39#define I2C_RISEFALL_TIME 20
40
41static inline struct intel_gmbus *
42to_intel_gmbus(struct i2c_adapter *i2c)
43{
44 return container_of(i2c, struct intel_gmbus, adapter);
45}
46
47struct intel_gpio {
48 struct i2c_adapter adapter;
49 struct i2c_algo_bit_data algo;
50 struct drm_i915_private *dev_priv;
51 u32 reg;
52};
53
54void
55intel_i2c_reset(struct drm_device *dev)
39{ 56{
40 struct drm_i915_private *dev_priv = dev->dev_private; 57 struct drm_i915_private *dev_priv = dev->dev_private;
58 if (HAS_PCH_SPLIT(dev))
59 I915_WRITE(PCH_GMBUS0, 0);
60 else
61 I915_WRITE(GMBUS0, 0);
62}
63
64static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
65{
66 u32 val;
41 67
42 /* When using bit bashing for I2C, this bit needs to be set to 1 */ 68 /* When using bit bashing for I2C, this bit needs to be set to 1 */
43 if (!IS_PINEVIEW(dev)) 69 if (!IS_PINEVIEW(dev_priv->dev))
44 return; 70 return;
71
72 val = I915_READ(DSPCLK_GATE_D);
45 if (enable) 73 if (enable)
46 I915_WRITE(DSPCLK_GATE_D, 74 val |= DPCUNIT_CLOCK_GATE_DISABLE;
47 I915_READ(DSPCLK_GATE_D) | DPCUNIT_CLOCK_GATE_DISABLE);
48 else 75 else
49 I915_WRITE(DSPCLK_GATE_D, 76 val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
50 I915_READ(DSPCLK_GATE_D) & (~DPCUNIT_CLOCK_GATE_DISABLE)); 77 I915_WRITE(DSPCLK_GATE_D, val);
51} 78}
52 79
53/* 80static u32 get_reserved(struct intel_gpio *gpio)
54 * Intel GPIO access functions 81{
55 */ 82 struct drm_i915_private *dev_priv = gpio->dev_priv;
83 struct drm_device *dev = dev_priv->dev;
84 u32 reserved = 0;
56 85
57#define I2C_RISEFALL_TIME 20 86 /* On most chips, these bits must be preserved in software. */
87 if (!IS_I830(dev) && !IS_845G(dev))
88 reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE |
89 GPIO_CLOCK_PULLUP_DISABLE);
90
91 return reserved;
92}
58 93
59static int get_clock(void *data) 94static int get_clock(void *data)
60{ 95{
61 struct intel_i2c_chan *chan = data; 96 struct intel_gpio *gpio = data;
62 struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; 97 struct drm_i915_private *dev_priv = gpio->dev_priv;
63 u32 val; 98 u32 reserved = get_reserved(gpio);
64 99 I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
65 val = I915_READ(chan->reg); 100 I915_WRITE(gpio->reg, reserved);
66 return ((val & GPIO_CLOCK_VAL_IN) != 0); 101 return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
67} 102}
68 103
69static int get_data(void *data) 104static int get_data(void *data)
70{ 105{
71 struct intel_i2c_chan *chan = data; 106 struct intel_gpio *gpio = data;
72 struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; 107 struct drm_i915_private *dev_priv = gpio->dev_priv;
73 u32 val; 108 u32 reserved = get_reserved(gpio);
74 109 I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
75 val = I915_READ(chan->reg); 110 I915_WRITE(gpio->reg, reserved);
76 return ((val & GPIO_DATA_VAL_IN) != 0); 111 return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
77} 112}
78 113
79static void set_clock(void *data, int state_high) 114static void set_clock(void *data, int state_high)
80{ 115{
81 struct intel_i2c_chan *chan = data; 116 struct intel_gpio *gpio = data;
82 struct drm_device *dev = chan->drm_dev; 117 struct drm_i915_private *dev_priv = gpio->dev_priv;
83 struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; 118 u32 reserved = get_reserved(gpio);
84 u32 reserved = 0, clock_bits; 119 u32 clock_bits;
85
86 /* On most chips, these bits must be preserved in software. */
87 if (!IS_I830(dev) && !IS_845G(dev))
88 reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
89 GPIO_CLOCK_PULLUP_DISABLE);
90 120
91 if (state_high) 121 if (state_high)
92 clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK; 122 clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
93 else 123 else
94 clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | 124 clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
95 GPIO_CLOCK_VAL_MASK; 125 GPIO_CLOCK_VAL_MASK;
96 I915_WRITE(chan->reg, reserved | clock_bits); 126
97 udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ 127 I915_WRITE(gpio->reg, reserved | clock_bits);
128 POSTING_READ(gpio->reg);
98} 129}
99 130
100static void set_data(void *data, int state_high) 131static void set_data(void *data, int state_high)
101{ 132{
102 struct intel_i2c_chan *chan = data; 133 struct intel_gpio *gpio = data;
103 struct drm_device *dev = chan->drm_dev; 134 struct drm_i915_private *dev_priv = gpio->dev_priv;
104 struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; 135 u32 reserved = get_reserved(gpio);
105 u32 reserved = 0, data_bits; 136 u32 data_bits;
106
107 /* On most chips, these bits must be preserved in software. */
108 if (!IS_I830(dev) && !IS_845G(dev))
109 reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
110 GPIO_CLOCK_PULLUP_DISABLE);
111 137
112 if (state_high) 138 if (state_high)
113 data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK; 139 data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
@@ -115,109 +141,312 @@ static void set_data(void *data, int state_high)
115 data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | 141 data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
116 GPIO_DATA_VAL_MASK; 142 GPIO_DATA_VAL_MASK;
117 143
118 I915_WRITE(chan->reg, reserved | data_bits); 144 I915_WRITE(gpio->reg, reserved | data_bits);
119 udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ 145 POSTING_READ(gpio->reg);
120} 146}
121 147
122/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C 148static struct i2c_adapter *
123 * engine, but if the BIOS leaves it enabled, then that can break our use 149intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
124 * of the bit-banging I2C interfaces. This is notably the case with the
125 * Mac Mini in EFI mode.
126 */
127void
128intel_i2c_reset_gmbus(struct drm_device *dev)
129{ 150{
130 struct drm_i915_private *dev_priv = dev->dev_private; 151 static const int map_pin_to_reg[] = {
152 0,
153 GPIOB,
154 GPIOA,
155 GPIOC,
156 GPIOD,
157 GPIOE,
158 GPIOF,
159 };
160 struct intel_gpio *gpio;
131 161
132 if (HAS_PCH_SPLIT(dev)) { 162 if (pin < 1 || pin > 7)
133 I915_WRITE(PCH_GMBUS0, 0); 163 return NULL;
134 } else { 164
135 I915_WRITE(GMBUS0, 0); 165 gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
166 if (gpio == NULL)
167 return NULL;
168
169 gpio->reg = map_pin_to_reg[pin];
170 if (HAS_PCH_SPLIT(dev_priv->dev))
171 gpio->reg += PCH_GPIOA - GPIOA;
172 gpio->dev_priv = dev_priv;
173
174 snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]);
175 gpio->adapter.owner = THIS_MODULE;
176 gpio->adapter.algo_data = &gpio->algo;
177 gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
178 gpio->algo.setsda = set_data;
179 gpio->algo.setscl = set_clock;
180 gpio->algo.getsda = get_data;
181 gpio->algo.getscl = get_clock;
182 gpio->algo.udelay = I2C_RISEFALL_TIME;
183 gpio->algo.timeout = usecs_to_jiffies(2200);
184 gpio->algo.data = gpio;
185
186 if (i2c_bit_add_bus(&gpio->adapter))
187 goto out_free;
188
189 return &gpio->adapter;
190
191out_free:
192 kfree(gpio);
193 return NULL;
194}
195
196static int
197intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv,
198 struct i2c_adapter *adapter,
199 struct i2c_msg *msgs,
200 int num)
201{
202 struct intel_gpio *gpio = container_of(adapter,
203 struct intel_gpio,
204 adapter);
205 int ret;
206
207 intel_i2c_reset(dev_priv->dev);
208
209 intel_i2c_quirk_set(dev_priv, true);
210 set_data(gpio, 1);
211 set_clock(gpio, 1);
212 udelay(I2C_RISEFALL_TIME);
213
214 ret = adapter->algo->master_xfer(adapter, msgs, num);
215
216 set_data(gpio, 1);
217 set_clock(gpio, 1);
218 intel_i2c_quirk_set(dev_priv, false);
219
220 return ret;
221}
222
223static int
224gmbus_xfer(struct i2c_adapter *adapter,
225 struct i2c_msg *msgs,
226 int num)
227{
228 struct intel_gmbus *bus = container_of(adapter,
229 struct intel_gmbus,
230 adapter);
231 struct drm_i915_private *dev_priv = adapter->algo_data;
232 int i, reg_offset;
233
234 if (bus->force_bit)
235 return intel_i2c_quirk_xfer(dev_priv,
236 bus->force_bit, msgs, num);
237
238 reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
239
240 I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
241
242 for (i = 0; i < num; i++) {
243 u16 len = msgs[i].len;
244 u8 *buf = msgs[i].buf;
245
246 if (msgs[i].flags & I2C_M_RD) {
247 I915_WRITE(GMBUS1 + reg_offset,
248 GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
249 (len << GMBUS_BYTE_COUNT_SHIFT) |
250 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
251 GMBUS_SLAVE_READ | GMBUS_SW_RDY);
252 POSTING_READ(GMBUS2+reg_offset);
253 do {
254 u32 val, loop = 0;
255
256 if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
257 goto timeout;
258 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
259 return 0;
260
261 val = I915_READ(GMBUS3 + reg_offset);
262 do {
263 *buf++ = val & 0xff;
264 val >>= 8;
265 } while (--len && ++loop < 4);
266 } while (len);
267 } else {
268 u32 val, loop;
269
270 val = loop = 0;
271 do {
272 val |= *buf++ << (8 * loop);
273 } while (--len && ++loop < 4);
274
275 I915_WRITE(GMBUS3 + reg_offset, val);
276 I915_WRITE(GMBUS1 + reg_offset,
277 (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
278 (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
279 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
280 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
281 POSTING_READ(GMBUS2+reg_offset);
282
283 while (len) {
284 if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
285 goto timeout;
286 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
287 return 0;
288
289 val = loop = 0;
290 do {
291 val |= *buf++ << (8 * loop);
292 } while (--len && ++loop < 4);
293
294 I915_WRITE(GMBUS3 + reg_offset, val);
295 POSTING_READ(GMBUS2+reg_offset);
296 }
297 }
298
299 if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
300 goto timeout;
301 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
302 return 0;
136 } 303 }
304
305 return num;
306
307timeout:
308 DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
309 bus->reg0 & 0xff, bus->adapter.name);
310 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
311 bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
312 if (!bus->force_bit)
313 return -ENOMEM;
314
315 return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
137} 316}
138 317
318static u32 gmbus_func(struct i2c_adapter *adapter)
319{
320 struct intel_gmbus *bus = container_of(adapter,
321 struct intel_gmbus,
322 adapter);
323
324 if (bus->force_bit)
325 bus->force_bit->algo->functionality(bus->force_bit);
326
327 return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
328 /* I2C_FUNC_10BIT_ADDR | */
329 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
330 I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
331}
332
333static const struct i2c_algorithm gmbus_algorithm = {
334 .master_xfer = gmbus_xfer,
335 .functionality = gmbus_func
336};
337
139/** 338/**
140 * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg 339 * intel_gmbus_setup - instantiate all Intel i2c GMBuses
141 * @dev: DRM device 340 * @dev: DRM device
142 * @output: driver specific output device
143 * @reg: GPIO reg to use
144 * @name: name for this bus
145 * @slave_addr: slave address (if fixed)
146 *
147 * Creates and registers a new i2c bus with the Linux i2c layer, for use
148 * in output probing and control (e.g. DDC or SDVO control functions).
149 *
150 * Possible values for @reg include:
151 * %GPIOA
152 * %GPIOB
153 * %GPIOC
154 * %GPIOD
155 * %GPIOE
156 * %GPIOF
157 * %GPIOG
158 * %GPIOH
159 * see PRM for details on how these different busses are used.
160 */ 341 */
161struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, 342int intel_setup_gmbus(struct drm_device *dev)
162 const char *name)
163{ 343{
164 struct intel_i2c_chan *chan; 344 static const char *names[GMBUS_NUM_PORTS] = {
345 "disabled",
346 "ssc",
347 "vga",
348 "panel",
349 "dpc",
350 "dpb",
351 "reserved"
352 "dpd",
353 };
354 struct drm_i915_private *dev_priv = dev->dev_private;
355 int ret, i;
165 356
166 chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL); 357 dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
167 if (!chan) 358 GFP_KERNEL);
168 goto out_free; 359 if (dev_priv->gmbus == NULL)
360 return -ENOMEM;
169 361
170 chan->drm_dev = dev; 362 for (i = 0; i < GMBUS_NUM_PORTS; i++) {
171 chan->reg = reg; 363 struct intel_gmbus *bus = &dev_priv->gmbus[i];
172 snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
173 chan->adapter.owner = THIS_MODULE;
174 chan->adapter.algo_data = &chan->algo;
175 chan->adapter.dev.parent = &dev->pdev->dev;
176 chan->algo.setsda = set_data;
177 chan->algo.setscl = set_clock;
178 chan->algo.getsda = get_data;
179 chan->algo.getscl = get_clock;
180 chan->algo.udelay = 20;
181 chan->algo.timeout = usecs_to_jiffies(2200);
182 chan->algo.data = chan;
183
184 i2c_set_adapdata(&chan->adapter, chan);
185
186 if(i2c_bit_add_bus(&chan->adapter))
187 goto out_free;
188 364
189 intel_i2c_reset_gmbus(dev); 365 bus->adapter.owner = THIS_MODULE;
366 bus->adapter.class = I2C_CLASS_DDC;
367 snprintf(bus->adapter.name,
368 I2C_NAME_SIZE,
369 "gmbus %s",
370 names[i]);
190 371
191 /* JJJ: raise SCL and SDA? */ 372 bus->adapter.dev.parent = &dev->pdev->dev;
192 intel_i2c_quirk_set(dev, true); 373 bus->adapter.algo_data = dev_priv;
193 set_data(chan, 1);
194 set_clock(chan, 1);
195 intel_i2c_quirk_set(dev, false);
196 udelay(20);
197 374
198 return &chan->adapter; 375 bus->adapter.algo = &gmbus_algorithm;
376 ret = i2c_add_adapter(&bus->adapter);
377 if (ret)
378 goto err;
199 379
200out_free: 380 /* By default use a conservative clock rate */
201 kfree(chan); 381 bus->reg0 = i | GMBUS_RATE_100KHZ;
202 return NULL; 382
383 /* XXX force bit banging until GMBUS is fully debugged */
384 bus->force_bit = intel_gpio_create(dev_priv, i);
385 }
386
387 intel_i2c_reset(dev_priv->dev);
388
389 return 0;
390
391err:
392 while (--i) {
393 struct intel_gmbus *bus = &dev_priv->gmbus[i];
394 i2c_del_adapter(&bus->adapter);
395 }
396 kfree(dev_priv->gmbus);
397 dev_priv->gmbus = NULL;
398 return ret;
203} 399}
204 400
205/** 401void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
206 * intel_i2c_destroy - unregister and free i2c bus resources 402{
207 * @output: channel to free 403 struct intel_gmbus *bus = to_intel_gmbus(adapter);
208 * 404
209 * Unregister the adapter from the i2c layer, then free the structure. 405 /* speed:
210 */ 406 * 0x0 = 100 KHz
211void intel_i2c_destroy(struct i2c_adapter *adapter) 407 * 0x1 = 50 KHz
408 * 0x2 = 400 KHz
409 * 0x3 = 1000 Khz
410 */
411 bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
412}
413
414void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
415{
416 struct intel_gmbus *bus = to_intel_gmbus(adapter);
417
418 if (force_bit) {
419 if (bus->force_bit == NULL) {
420 struct drm_i915_private *dev_priv = adapter->algo_data;
421 bus->force_bit = intel_gpio_create(dev_priv,
422 bus->reg0 & 0xff);
423 }
424 } else {
425 if (bus->force_bit) {
426 i2c_del_adapter(bus->force_bit);
427 kfree(bus->force_bit);
428 bus->force_bit = NULL;
429 }
430 }
431}
432
433void intel_teardown_gmbus(struct drm_device *dev)
212{ 434{
213 struct intel_i2c_chan *chan; 435 struct drm_i915_private *dev_priv = dev->dev_private;
436 int i;
214 437
215 if (!adapter) 438 if (dev_priv->gmbus == NULL)
216 return; 439 return;
217 440
218 chan = container_of(adapter, 441 for (i = 0; i < GMBUS_NUM_PORTS; i++) {
219 struct intel_i2c_chan, 442 struct intel_gmbus *bus = &dev_priv->gmbus[i];
220 adapter); 443 if (bus->force_bit) {
221 i2c_del_adapter(&chan->adapter); 444 i2c_del_adapter(bus->force_bit);
222 kfree(chan); 445 kfree(bus->force_bit);
446 }
447 i2c_del_adapter(&bus->adapter);
448 }
449
450 kfree(dev_priv->gmbus);
451 dev_priv->gmbus = NULL;
223} 452}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 6ec39a86ed06..f1a649990ea9 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -43,102 +43,76 @@
43/* Private structure for the integrated LVDS support */ 43/* Private structure for the integrated LVDS support */
44struct intel_lvds { 44struct intel_lvds {
45 struct intel_encoder base; 45 struct intel_encoder base;
46
47 struct edid *edid;
48
46 int fitting_mode; 49 int fitting_mode;
47 u32 pfit_control; 50 u32 pfit_control;
48 u32 pfit_pgm_ratios; 51 u32 pfit_pgm_ratios;
52 bool pfit_dirty;
53
54 struct drm_display_mode *fixed_mode;
49}; 55};
50 56
51static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder) 57static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
52{ 58{
53 return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base); 59 return container_of(encoder, struct intel_lvds, base.base);
54}
55
56/**
57 * Sets the backlight level.
58 *
59 * \param level backlight level, from 0 to intel_lvds_get_max_backlight().
60 */
61static void intel_lvds_set_backlight(struct drm_device *dev, int level)
62{
63 struct drm_i915_private *dev_priv = dev->dev_private;
64 u32 blc_pwm_ctl, reg;
65
66 if (HAS_PCH_SPLIT(dev))
67 reg = BLC_PWM_CPU_CTL;
68 else
69 reg = BLC_PWM_CTL;
70
71 blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK;
72 I915_WRITE(reg, (blc_pwm_ctl |
73 (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
74} 60}
75 61
76/** 62static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
77 * Returns the maximum level of the backlight duty cycle field.
78 */
79static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
80{ 63{
81 struct drm_i915_private *dev_priv = dev->dev_private; 64 return container_of(intel_attached_encoder(connector),
82 u32 reg; 65 struct intel_lvds, base);
83
84 if (HAS_PCH_SPLIT(dev))
85 reg = BLC_PWM_PCH_CTL2;
86 else
87 reg = BLC_PWM_CTL;
88
89 return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >>
90 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
91} 66}
92 67
93/** 68/**
94 * Sets the power state for the panel. 69 * Sets the power state for the panel.
95 */ 70 */
96static void intel_lvds_set_power(struct drm_device *dev, bool on) 71static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
97{ 72{
73 struct drm_device *dev = intel_lvds->base.base.dev;
98 struct drm_i915_private *dev_priv = dev->dev_private; 74 struct drm_i915_private *dev_priv = dev->dev_private;
99 u32 ctl_reg, status_reg, lvds_reg; 75 u32 ctl_reg, lvds_reg;
100 76
101 if (HAS_PCH_SPLIT(dev)) { 77 if (HAS_PCH_SPLIT(dev)) {
102 ctl_reg = PCH_PP_CONTROL; 78 ctl_reg = PCH_PP_CONTROL;
103 status_reg = PCH_PP_STATUS;
104 lvds_reg = PCH_LVDS; 79 lvds_reg = PCH_LVDS;
105 } else { 80 } else {
106 ctl_reg = PP_CONTROL; 81 ctl_reg = PP_CONTROL;
107 status_reg = PP_STATUS;
108 lvds_reg = LVDS; 82 lvds_reg = LVDS;
109 } 83 }
110 84
111 if (on) { 85 if (on) {
112 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); 86 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
113 POSTING_READ(lvds_reg); 87 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
114 88 intel_panel_set_backlight(dev, dev_priv->backlight_level);
115 I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
116 POWER_TARGET_ON);
117 if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0))
118 DRM_ERROR("timed out waiting to enable LVDS pipe");
119
120 intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
121 } else { 89 } else {
122 intel_lvds_set_backlight(dev, 0); 90 dev_priv->backlight_level = intel_panel_get_backlight(dev);
91
92 intel_panel_set_backlight(dev, 0);
93 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
123 94
124 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & 95 if (intel_lvds->pfit_control) {
125 ~POWER_TARGET_ON); 96 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
126 if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0)) 97 DRM_ERROR("timed out waiting for panel to power off\n");
127 DRM_ERROR("timed out waiting for LVDS pipe to turn off"); 98 I915_WRITE(PFIT_CONTROL, 0);
99 intel_lvds->pfit_control = 0;
100 intel_lvds->pfit_dirty = false;
101 }
128 102
129 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); 103 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
130 POSTING_READ(lvds_reg);
131 } 104 }
105 POSTING_READ(lvds_reg);
132} 106}
133 107
134static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) 108static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
135{ 109{
136 struct drm_device *dev = encoder->dev; 110 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
137 111
138 if (mode == DRM_MODE_DPMS_ON) 112 if (mode == DRM_MODE_DPMS_ON)
139 intel_lvds_set_power(dev, true); 113 intel_lvds_set_power(intel_lvds, true);
140 else 114 else
141 intel_lvds_set_power(dev, false); 115 intel_lvds_set_power(intel_lvds, false);
142 116
143 /* XXX: We never power down the LVDS pairs. */ 117 /* XXX: We never power down the LVDS pairs. */
144} 118}
@@ -146,16 +120,13 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
146static int intel_lvds_mode_valid(struct drm_connector *connector, 120static int intel_lvds_mode_valid(struct drm_connector *connector,
147 struct drm_display_mode *mode) 121 struct drm_display_mode *mode)
148{ 122{
149 struct drm_device *dev = connector->dev; 123 struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
150 struct drm_i915_private *dev_priv = dev->dev_private; 124 struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
151 struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
152 125
153 if (fixed_mode) { 126 if (mode->hdisplay > fixed_mode->hdisplay)
154 if (mode->hdisplay > fixed_mode->hdisplay) 127 return MODE_PANEL;
155 return MODE_PANEL; 128 if (mode->vdisplay > fixed_mode->vdisplay)
156 if (mode->vdisplay > fixed_mode->vdisplay) 129 return MODE_PANEL;
157 return MODE_PANEL;
158 }
159 130
160 return MODE_OK; 131 return MODE_OK;
161} 132}
@@ -223,12 +194,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
223 struct drm_device *dev = encoder->dev; 194 struct drm_device *dev = encoder->dev;
224 struct drm_i915_private *dev_priv = dev->dev_private; 195 struct drm_i915_private *dev_priv = dev->dev_private;
225 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 196 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
226 struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); 197 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
227 struct drm_encoder *tmp_encoder; 198 struct drm_encoder *tmp_encoder;
228 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; 199 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
229 200
230 /* Should never happen!! */ 201 /* Should never happen!! */
231 if (!IS_I965G(dev) && intel_crtc->pipe == 0) { 202 if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
232 DRM_ERROR("Can't support LVDS on pipe A\n"); 203 DRM_ERROR("Can't support LVDS on pipe A\n");
233 return false; 204 return false;
234 } 205 }
@@ -241,9 +212,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
241 return false; 212 return false;
242 } 213 }
243 } 214 }
244 /* If we don't have a panel mode, there is nothing we can do */
245 if (dev_priv->panel_fixed_mode == NULL)
246 return true;
247 215
248 /* 216 /*
249 * We have timings from the BIOS for the panel, put them in 217 * We have timings from the BIOS for the panel, put them in
@@ -251,7 +219,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
251 * with the panel scaling set up to source from the H/VDisplay 219 * with the panel scaling set up to source from the H/VDisplay
252 * of the original mode. 220 * of the original mode.
253 */ 221 */
254 intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); 222 intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
255 223
256 if (HAS_PCH_SPLIT(dev)) { 224 if (HAS_PCH_SPLIT(dev)) {
257 intel_pch_panel_fitting(dev, intel_lvds->fitting_mode, 225 intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
@@ -260,8 +228,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
260 } 228 }
261 229
262 /* Make sure pre-965s set dither correctly */ 230 /* Make sure pre-965s set dither correctly */
263 if (!IS_I965G(dev)) { 231 if (INTEL_INFO(dev)->gen < 4) {
264 if (dev_priv->panel_wants_dither || dev_priv->lvds_dither) 232 if (dev_priv->lvds_dither)
265 pfit_control |= PANEL_8TO6_DITHER_ENABLE; 233 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
266 } 234 }
267 235
@@ -271,7 +239,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
271 goto out; 239 goto out;
272 240
273 /* 965+ wants fuzzy fitting */ 241 /* 965+ wants fuzzy fitting */
274 if (IS_I965G(dev)) 242 if (INTEL_INFO(dev)->gen >= 4)
275 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | 243 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
276 PFIT_FILTER_FUZZY); 244 PFIT_FILTER_FUZZY);
277 245
@@ -297,7 +265,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
297 265
298 case DRM_MODE_SCALE_ASPECT: 266 case DRM_MODE_SCALE_ASPECT:
299 /* Scale but preserve the aspect ratio */ 267 /* Scale but preserve the aspect ratio */
300 if (IS_I965G(dev)) { 268 if (INTEL_INFO(dev)->gen >= 4) {
301 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; 269 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
302 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; 270 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
303 271
@@ -356,7 +324,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
356 * Fortunately this is all done for us in hw. 324 * Fortunately this is all done for us in hw.
357 */ 325 */
358 pfit_control |= PFIT_ENABLE; 326 pfit_control |= PFIT_ENABLE;
359 if (IS_I965G(dev)) 327 if (INTEL_INFO(dev)->gen >= 4)
360 pfit_control |= PFIT_SCALING_AUTO; 328 pfit_control |= PFIT_SCALING_AUTO;
361 else 329 else
362 pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | 330 pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
@@ -369,8 +337,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
369 } 337 }
370 338
371out: 339out:
372 intel_lvds->pfit_control = pfit_control; 340 if (pfit_control != intel_lvds->pfit_control ||
373 intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; 341 pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
342 intel_lvds->pfit_control = pfit_control;
343 intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
344 intel_lvds->pfit_dirty = true;
345 }
374 dev_priv->lvds_border_bits = border; 346 dev_priv->lvds_border_bits = border;
375 347
376 /* 348 /*
@@ -386,30 +358,60 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
386{ 358{
387 struct drm_device *dev = encoder->dev; 359 struct drm_device *dev = encoder->dev;
388 struct drm_i915_private *dev_priv = dev->dev_private; 360 struct drm_i915_private *dev_priv = dev->dev_private;
389 u32 reg; 361 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
390 362
391 if (HAS_PCH_SPLIT(dev)) 363 dev_priv->backlight_level = intel_panel_get_backlight(dev);
392 reg = BLC_PWM_CPU_CTL; 364
393 else 365 /* We try to do the minimum that is necessary in order to unlock
394 reg = BLC_PWM_CTL; 366 * the registers for mode setting.
395 367 *
396 dev_priv->saveBLC_PWM_CTL = I915_READ(reg); 368 * On Ironlake, this is quite simple as we just set the unlock key
397 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & 369 * and ignore all subtleties. (This may cause some issues...)
398 BACKLIGHT_DUTY_CYCLE_MASK); 370 *
371 * Prior to Ironlake, we must disable the pipe if we want to adjust
372 * the panel fitter. However at all other times we can just reset
373 * the registers regardless.
374 */
399 375
400 intel_lvds_set_power(dev, false); 376 if (HAS_PCH_SPLIT(dev)) {
377 I915_WRITE(PCH_PP_CONTROL,
378 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
379 } else if (intel_lvds->pfit_dirty) {
380 I915_WRITE(PP_CONTROL,
381 (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS)
382 & ~POWER_TARGET_ON);
383 } else {
384 I915_WRITE(PP_CONTROL,
385 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
386 }
401} 387}
402 388
403static void intel_lvds_commit( struct drm_encoder *encoder) 389static void intel_lvds_commit(struct drm_encoder *encoder)
404{ 390{
405 struct drm_device *dev = encoder->dev; 391 struct drm_device *dev = encoder->dev;
406 struct drm_i915_private *dev_priv = dev->dev_private; 392 struct drm_i915_private *dev_priv = dev->dev_private;
393 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
407 394
408 if (dev_priv->backlight_duty_cycle == 0) 395 if (dev_priv->backlight_level == 0)
409 dev_priv->backlight_duty_cycle = 396 dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
410 intel_lvds_get_max_backlight(dev); 397
398 /* Undo any unlocking done in prepare to prevent accidental
399 * adjustment of the registers.
400 */
401 if (HAS_PCH_SPLIT(dev)) {
402 u32 val = I915_READ(PCH_PP_CONTROL);
403 if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
404 I915_WRITE(PCH_PP_CONTROL, val & 0x3);
405 } else {
406 u32 val = I915_READ(PP_CONTROL);
407 if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
408 I915_WRITE(PP_CONTROL, val & 0x3);
409 }
411 410
412 intel_lvds_set_power(dev, true); 411 /* Always do a full power on as we do not know what state
412 * we were left in.
413 */
414 intel_lvds_set_power(intel_lvds, true);
413} 415}
414 416
415static void intel_lvds_mode_set(struct drm_encoder *encoder, 417static void intel_lvds_mode_set(struct drm_encoder *encoder,
@@ -418,7 +420,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
418{ 420{
419 struct drm_device *dev = encoder->dev; 421 struct drm_device *dev = encoder->dev;
420 struct drm_i915_private *dev_priv = dev->dev_private; 422 struct drm_i915_private *dev_priv = dev->dev_private;
421 struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); 423 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
422 424
423 /* 425 /*
424 * The LVDS pin pair will already have been turned on in the 426 * The LVDS pin pair will already have been turned on in the
@@ -429,13 +431,23 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
429 if (HAS_PCH_SPLIT(dev)) 431 if (HAS_PCH_SPLIT(dev))
430 return; 432 return;
431 433
434 if (!intel_lvds->pfit_dirty)
435 return;
436
432 /* 437 /*
433 * Enable automatic panel scaling so that non-native modes fill the 438 * Enable automatic panel scaling so that non-native modes fill the
434 * screen. Should be enabled before the pipe is enabled, according to 439 * screen. Should be enabled before the pipe is enabled, according to
435 * register description and PRM. 440 * register description and PRM.
436 */ 441 */
442 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
443 intel_lvds->pfit_control,
444 intel_lvds->pfit_pgm_ratios);
445 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
446 DRM_ERROR("timed out waiting for panel to power off\n");
447
437 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); 448 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
438 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); 449 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
450 intel_lvds->pfit_dirty = false;
439} 451}
440 452
441/** 453/**
@@ -465,38 +477,22 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
465 */ 477 */
466static int intel_lvds_get_modes(struct drm_connector *connector) 478static int intel_lvds_get_modes(struct drm_connector *connector)
467{ 479{
480 struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
468 struct drm_device *dev = connector->dev; 481 struct drm_device *dev = connector->dev;
469 struct drm_encoder *encoder = intel_attached_encoder(connector); 482 struct drm_display_mode *mode;
470 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
471 struct drm_i915_private *dev_priv = dev->dev_private;
472 int ret = 0;
473
474 if (dev_priv->lvds_edid_good) {
475 ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
476 483
477 if (ret) 484 if (intel_lvds->edid) {
478 return ret; 485 drm_mode_connector_update_edid_property(connector,
486 intel_lvds->edid);
487 return drm_add_edid_modes(connector, intel_lvds->edid);
479 } 488 }
480 489
481 /* Didn't get an EDID, so 490 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
482 * Set wide sync ranges so we get all modes 491 if (mode == 0)
483 * handed to valid_mode for checking 492 return 0;
484 */
485 connector->display_info.min_vfreq = 0;
486 connector->display_info.max_vfreq = 200;
487 connector->display_info.min_hfreq = 0;
488 connector->display_info.max_hfreq = 200;
489
490 if (dev_priv->panel_fixed_mode != NULL) {
491 struct drm_display_mode *mode;
492
493 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
494 drm_mode_probed_add(connector, mode);
495
496 return 1;
497 }
498 493
499 return 0; 494 drm_mode_probed_add(connector, mode);
495 return 1;
500} 496}
501 497
502static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) 498static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
@@ -587,18 +583,17 @@ static int intel_lvds_set_property(struct drm_connector *connector,
587 struct drm_property *property, 583 struct drm_property *property,
588 uint64_t value) 584 uint64_t value)
589{ 585{
586 struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
590 struct drm_device *dev = connector->dev; 587 struct drm_device *dev = connector->dev;
591 588
592 if (property == dev->mode_config.scaling_mode_property && 589 if (property == dev->mode_config.scaling_mode_property) {
593 connector->encoder) { 590 struct drm_crtc *crtc = intel_lvds->base.base.crtc;
594 struct drm_crtc *crtc = connector->encoder->crtc;
595 struct drm_encoder *encoder = connector->encoder;
596 struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
597 591
598 if (value == DRM_MODE_SCALE_NONE) { 592 if (value == DRM_MODE_SCALE_NONE) {
599 DRM_DEBUG_KMS("no scaling not supported\n"); 593 DRM_DEBUG_KMS("no scaling not supported\n");
600 return 0; 594 return -EINVAL;
601 } 595 }
596
602 if (intel_lvds->fitting_mode == value) { 597 if (intel_lvds->fitting_mode == value) {
603 /* the LVDS scaling property is not changed */ 598 /* the LVDS scaling property is not changed */
604 return 0; 599 return 0;
@@ -628,7 +623,7 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
628static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { 623static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
629 .get_modes = intel_lvds_get_modes, 624 .get_modes = intel_lvds_get_modes,
630 .mode_valid = intel_lvds_mode_valid, 625 .mode_valid = intel_lvds_mode_valid,
631 .best_encoder = intel_attached_encoder, 626 .best_encoder = intel_best_encoder,
632}; 627};
633 628
634static const struct drm_connector_funcs intel_lvds_connector_funcs = { 629static const struct drm_connector_funcs intel_lvds_connector_funcs = {
@@ -726,16 +721,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
726 * Find the reduced downclock for LVDS in EDID. 721 * Find the reduced downclock for LVDS in EDID.
727 */ 722 */
728static void intel_find_lvds_downclock(struct drm_device *dev, 723static void intel_find_lvds_downclock(struct drm_device *dev,
729 struct drm_connector *connector) 724 struct drm_display_mode *fixed_mode,
725 struct drm_connector *connector)
730{ 726{
731 struct drm_i915_private *dev_priv = dev->dev_private; 727 struct drm_i915_private *dev_priv = dev->dev_private;
732 struct drm_display_mode *scan, *panel_fixed_mode; 728 struct drm_display_mode *scan;
733 int temp_downclock; 729 int temp_downclock;
734 730
735 panel_fixed_mode = dev_priv->panel_fixed_mode; 731 temp_downclock = fixed_mode->clock;
736 temp_downclock = panel_fixed_mode->clock;
737
738 mutex_lock(&dev->mode_config.mutex);
739 list_for_each_entry(scan, &connector->probed_modes, head) { 732 list_for_each_entry(scan, &connector->probed_modes, head) {
740 /* 733 /*
741 * If one mode has the same resolution with the fixed_panel 734 * If one mode has the same resolution with the fixed_panel
@@ -744,14 +737,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
744 * case we can set the different FPx0/1 to dynamically select 737 * case we can set the different FPx0/1 to dynamically select
745 * between low and high frequency. 738 * between low and high frequency.
746 */ 739 */
747 if (scan->hdisplay == panel_fixed_mode->hdisplay && 740 if (scan->hdisplay == fixed_mode->hdisplay &&
748 scan->hsync_start == panel_fixed_mode->hsync_start && 741 scan->hsync_start == fixed_mode->hsync_start &&
749 scan->hsync_end == panel_fixed_mode->hsync_end && 742 scan->hsync_end == fixed_mode->hsync_end &&
750 scan->htotal == panel_fixed_mode->htotal && 743 scan->htotal == fixed_mode->htotal &&
751 scan->vdisplay == panel_fixed_mode->vdisplay && 744 scan->vdisplay == fixed_mode->vdisplay &&
752 scan->vsync_start == panel_fixed_mode->vsync_start && 745 scan->vsync_start == fixed_mode->vsync_start &&
753 scan->vsync_end == panel_fixed_mode->vsync_end && 746 scan->vsync_end == fixed_mode->vsync_end &&
754 scan->vtotal == panel_fixed_mode->vtotal) { 747 scan->vtotal == fixed_mode->vtotal) {
755 if (scan->clock < temp_downclock) { 748 if (scan->clock < temp_downclock) {
756 /* 749 /*
757 * The downclock is already found. But we 750 * The downclock is already found. But we
@@ -761,17 +754,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
761 } 754 }
762 } 755 }
763 } 756 }
764 mutex_unlock(&dev->mode_config.mutex); 757 if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
765 if (temp_downclock < panel_fixed_mode->clock &&
766 i915_lvds_downclock) {
767 /* We found the downclock for LVDS. */ 758 /* We found the downclock for LVDS. */
768 dev_priv->lvds_downclock_avail = 1; 759 dev_priv->lvds_downclock_avail = 1;
769 dev_priv->lvds_downclock = temp_downclock; 760 dev_priv->lvds_downclock = temp_downclock;
770 DRM_DEBUG_KMS("LVDS downclock is found in EDID. " 761 DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
771 "Normal clock %dKhz, downclock %dKhz\n", 762 "Normal clock %dKhz, downclock %dKhz\n",
772 panel_fixed_mode->clock, temp_downclock); 763 fixed_mode->clock, temp_downclock);
773 } 764 }
774 return;
775} 765}
776 766
777/* 767/*
@@ -780,38 +770,67 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
780 * If it is present, return 1. 770 * If it is present, return 1.
781 * If it is not present, return false. 771 * If it is not present, return false.
782 * If no child dev is parsed from VBT, it assumes that the LVDS is present. 772 * If no child dev is parsed from VBT, it assumes that the LVDS is present.
783 * Note: The addin_offset should also be checked for LVDS panel.
784 * Only when it is non-zero, it is assumed that it is present.
785 */ 773 */
786static int lvds_is_present_in_vbt(struct drm_device *dev) 774static bool lvds_is_present_in_vbt(struct drm_device *dev,
775 u8 *i2c_pin)
787{ 776{
788 struct drm_i915_private *dev_priv = dev->dev_private; 777 struct drm_i915_private *dev_priv = dev->dev_private;
789 struct child_device_config *p_child; 778 int i;
790 int i, ret;
791 779
792 if (!dev_priv->child_dev_num) 780 if (!dev_priv->child_dev_num)
793 return 1; 781 return true;
794 782
795 ret = 0;
796 for (i = 0; i < dev_priv->child_dev_num; i++) { 783 for (i = 0; i < dev_priv->child_dev_num; i++) {
797 p_child = dev_priv->child_dev + i; 784 struct child_device_config *child = dev_priv->child_dev + i;
798 /* 785
799 * If the device type is not LFP, continue. 786 /* If the device type is not LFP, continue.
800 * If the device type is 0x22, it is also regarded as LFP. 787 * We have to check both the new identifiers as well as the
788 * old for compatibility with some BIOSes.
801 */ 789 */
802 if (p_child->device_type != DEVICE_TYPE_INT_LFP && 790 if (child->device_type != DEVICE_TYPE_INT_LFP &&
803 p_child->device_type != DEVICE_TYPE_LFP) 791 child->device_type != DEVICE_TYPE_LFP)
804 continue; 792 continue;
805 793
806 /* The addin_offset should be checked. Only when it is 794 if (child->i2c_pin)
807 * non-zero, it is regarded as present. 795 *i2c_pin = child->i2c_pin;
796
797 /* However, we cannot trust the BIOS writers to populate
798 * the VBT correctly. Since LVDS requires additional
799 * information from AIM blocks, a non-zero addin offset is
800 * a good indicator that the LVDS is actually present.
808 */ 801 */
809 if (p_child->addin_offset) { 802 if (child->addin_offset)
810 ret = 1; 803 return true;
811 break; 804
812 } 805 /* But even then some BIOS writers perform some black magic
806 * and instantiate the device without reference to any
807 * additional data. Trust that if the VBT was written into
808 * the OpRegion then they have validated the LVDS's existence.
809 */
810 if (dev_priv->opregion.vbt)
811 return true;
813 } 812 }
814 return ret; 813
814 return false;
815}
816
817static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin)
818{
819 struct drm_i915_private *dev_priv = dev->dev_private;
820 u8 buf = 0;
821 struct i2c_msg msgs[] = {
822 {
823 .addr = 0xA0,
824 .flags = 0,
825 .len = 1,
826 .buf = &buf,
827 },
828 };
829 struct i2c_adapter *i2c = &dev_priv->gmbus[pin].adapter;
830 /* XXX this only appears to work when using GMBUS */
831 if (intel_gmbus_is_forced_bit(i2c))
832 return true;
833 return i2c_transfer(i2c, msgs, 1) == 1;
815} 834}
816 835
817/** 836/**
@@ -832,13 +851,15 @@ void intel_lvds_init(struct drm_device *dev)
832 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 851 struct drm_display_mode *scan; /* *modes, *bios_mode; */
833 struct drm_crtc *crtc; 852 struct drm_crtc *crtc;
834 u32 lvds; 853 u32 lvds;
835 int pipe, gpio = GPIOC; 854 int pipe;
855 u8 pin;
836 856
837 /* Skip init on machines we know falsely report LVDS */ 857 /* Skip init on machines we know falsely report LVDS */
838 if (dmi_check_system(intel_no_lvds)) 858 if (dmi_check_system(intel_no_lvds))
839 return; 859 return;
840 860
841 if (!lvds_is_present_in_vbt(dev)) { 861 pin = GMBUS_PORT_PANEL;
862 if (!lvds_is_present_in_vbt(dev, &pin)) {
842 DRM_DEBUG_KMS("LVDS is not present in VBT\n"); 863 DRM_DEBUG_KMS("LVDS is not present in VBT\n");
843 return; 864 return;
844 } 865 }
@@ -846,11 +867,15 @@ void intel_lvds_init(struct drm_device *dev)
846 if (HAS_PCH_SPLIT(dev)) { 867 if (HAS_PCH_SPLIT(dev)) {
847 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) 868 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
848 return; 869 return;
849 if (dev_priv->edp_support) { 870 if (dev_priv->edp.support) {
850 DRM_DEBUG_KMS("disable LVDS for eDP support\n"); 871 DRM_DEBUG_KMS("disable LVDS for eDP support\n");
851 return; 872 return;
852 } 873 }
853 gpio = PCH_GPIOC; 874 }
875
876 if (!intel_lvds_ddc_probe(dev, pin)) {
877 DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n");
878 return;
854 } 879 }
855 880
856 intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); 881 intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
@@ -864,16 +889,20 @@ void intel_lvds_init(struct drm_device *dev)
864 return; 889 return;
865 } 890 }
866 891
892 if (!HAS_PCH_SPLIT(dev)) {
893 intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
894 }
895
867 intel_encoder = &intel_lvds->base; 896 intel_encoder = &intel_lvds->base;
868 encoder = &intel_encoder->enc; 897 encoder = &intel_encoder->base;
869 connector = &intel_connector->base; 898 connector = &intel_connector->base;
870 drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, 899 drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
871 DRM_MODE_CONNECTOR_LVDS); 900 DRM_MODE_CONNECTOR_LVDS);
872 901
873 drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs, 902 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
874 DRM_MODE_ENCODER_LVDS); 903 DRM_MODE_ENCODER_LVDS);
875 904
876 drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); 905 intel_connector_attach_encoder(intel_connector, intel_encoder);
877 intel_encoder->type = INTEL_OUTPUT_LVDS; 906 intel_encoder->type = INTEL_OUTPUT_LVDS;
878 907
879 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); 908 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
@@ -904,43 +933,41 @@ void intel_lvds_init(struct drm_device *dev)
904 * if closed, act like it's not there for now 933 * if closed, act like it's not there for now
905 */ 934 */
906 935
907 /* Set up the DDC bus. */
908 intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
909 if (!intel_encoder->ddc_bus) {
910 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
911 "failed.\n");
912 goto failed;
913 }
914
915 /* 936 /*
916 * Attempt to get the fixed panel mode from DDC. Assume that the 937 * Attempt to get the fixed panel mode from DDC. Assume that the
917 * preferred mode is the right one. 938 * preferred mode is the right one.
918 */ 939 */
919 dev_priv->lvds_edid_good = true; 940 intel_lvds->edid = drm_get_edid(connector,
941 &dev_priv->gmbus[pin].adapter);
920 942
921 if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus)) 943 if (!intel_lvds->edid) {
922 dev_priv->lvds_edid_good = false; 944 /* Didn't get an EDID, so
945 * Set wide sync ranges so we get all modes
946 * handed to valid_mode for checking
947 */
948 connector->display_info.min_vfreq = 0;
949 connector->display_info.max_vfreq = 200;
950 connector->display_info.min_hfreq = 0;
951 connector->display_info.max_hfreq = 200;
952 }
923 953
924 list_for_each_entry(scan, &connector->probed_modes, head) { 954 list_for_each_entry(scan, &connector->probed_modes, head) {
925 mutex_lock(&dev->mode_config.mutex);
926 if (scan->type & DRM_MODE_TYPE_PREFERRED) { 955 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
927 dev_priv->panel_fixed_mode = 956 intel_lvds->fixed_mode =
928 drm_mode_duplicate(dev, scan); 957 drm_mode_duplicate(dev, scan);
929 mutex_unlock(&dev->mode_config.mutex); 958 intel_find_lvds_downclock(dev,
930 intel_find_lvds_downclock(dev, connector); 959 intel_lvds->fixed_mode,
960 connector);
931 goto out; 961 goto out;
932 } 962 }
933 mutex_unlock(&dev->mode_config.mutex);
934 } 963 }
935 964
936 /* Failed to get EDID, what about VBT? */ 965 /* Failed to get EDID, what about VBT? */
937 if (dev_priv->lfp_lvds_vbt_mode) { 966 if (dev_priv->lfp_lvds_vbt_mode) {
938 mutex_lock(&dev->mode_config.mutex); 967 intel_lvds->fixed_mode =
939 dev_priv->panel_fixed_mode =
940 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 968 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
941 mutex_unlock(&dev->mode_config.mutex); 969 if (intel_lvds->fixed_mode) {
942 if (dev_priv->panel_fixed_mode) { 970 intel_lvds->fixed_mode->type |=
943 dev_priv->panel_fixed_mode->type |=
944 DRM_MODE_TYPE_PREFERRED; 971 DRM_MODE_TYPE_PREFERRED;
945 goto out; 972 goto out;
946 } 973 }
@@ -958,19 +985,19 @@ void intel_lvds_init(struct drm_device *dev)
958 985
959 lvds = I915_READ(LVDS); 986 lvds = I915_READ(LVDS);
960 pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; 987 pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
961 crtc = intel_get_crtc_from_pipe(dev, pipe); 988 crtc = intel_get_crtc_for_pipe(dev, pipe);
962 989
963 if (crtc && (lvds & LVDS_PORT_EN)) { 990 if (crtc && (lvds & LVDS_PORT_EN)) {
964 dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc); 991 intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
965 if (dev_priv->panel_fixed_mode) { 992 if (intel_lvds->fixed_mode) {
966 dev_priv->panel_fixed_mode->type |= 993 intel_lvds->fixed_mode->type |=
967 DRM_MODE_TYPE_PREFERRED; 994 DRM_MODE_TYPE_PREFERRED;
968 goto out; 995 goto out;
969 } 996 }
970 } 997 }
971 998
972 /* If we still don't have a mode after all that, give up. */ 999 /* If we still don't have a mode after all that, give up. */
973 if (!dev_priv->panel_fixed_mode) 1000 if (!intel_lvds->fixed_mode)
974 goto failed; 1001 goto failed;
975 1002
976out: 1003out:
@@ -997,8 +1024,6 @@ out:
997 1024
998failed: 1025failed:
999 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); 1026 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
1000 if (intel_encoder->ddc_bus)
1001 intel_i2c_destroy(intel_encoder->ddc_bus);
1002 drm_connector_cleanup(connector); 1027 drm_connector_cleanup(connector);
1003 drm_encoder_cleanup(encoder); 1028 drm_encoder_cleanup(encoder);
1004 kfree(intel_lvds); 1029 kfree(intel_lvds);
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 4b1fd3d9c73c..f70b7cf32bff 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> 2 * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
3 * Copyright (c) 2007 Intel Corporation 3 * Copyright (c) 2007, 2010 Intel Corporation
4 * Jesse Barnes <jesse.barnes@intel.com> 4 * Jesse Barnes <jesse.barnes@intel.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -34,11 +34,11 @@
34 * intel_ddc_probe 34 * intel_ddc_probe
35 * 35 *
36 */ 36 */
37bool intel_ddc_probe(struct intel_encoder *intel_encoder) 37bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
38{ 38{
39 struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
39 u8 out_buf[] = { 0x0, 0x0}; 40 u8 out_buf[] = { 0x0, 0x0};
40 u8 buf[2]; 41 u8 buf[2];
41 int ret;
42 struct i2c_msg msgs[] = { 42 struct i2c_msg msgs[] = {
43 { 43 {
44 .addr = 0x50, 44 .addr = 0x50,
@@ -54,13 +54,7 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
54 } 54 }
55 }; 55 };
56 56
57 intel_i2c_quirk_set(intel_encoder->enc.dev, true); 57 return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2;
58 ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
59 intel_i2c_quirk_set(intel_encoder->enc.dev, false);
60 if (ret == 2)
61 return true;
62
63 return false;
64} 58}
65 59
66/** 60/**
@@ -76,9 +70,7 @@ int intel_ddc_get_modes(struct drm_connector *connector,
76 struct edid *edid; 70 struct edid *edid;
77 int ret = 0; 71 int ret = 0;
78 72
79 intel_i2c_quirk_set(connector->dev, true);
80 edid = drm_get_edid(connector, adapter); 73 edid = drm_get_edid(connector, adapter);
81 intel_i2c_quirk_set(connector->dev, false);
82 if (edid) { 74 if (edid) {
83 drm_mode_connector_update_edid_property(connector, edid); 75 drm_mode_connector_update_edid_property(connector, edid);
84 ret = drm_add_edid_modes(connector, edid); 76 ret = drm_add_edid_modes(connector, edid);
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index ea5d3fea4b61..917c7dc3cd6b 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -31,17 +31,16 @@
31#include "drmP.h" 31#include "drmP.h"
32#include "i915_drm.h" 32#include "i915_drm.h"
33#include "i915_drv.h" 33#include "i915_drv.h"
34#include "intel_drv.h"
34 35
35#define PCI_ASLE 0xe4 36#define PCI_ASLE 0xe4
36#define PCI_LBPC 0xf4
37#define PCI_ASLS 0xfc 37#define PCI_ASLS 0xfc
38 38
39#define OPREGION_SZ (8*1024)
40#define OPREGION_HEADER_OFFSET 0 39#define OPREGION_HEADER_OFFSET 0
41#define OPREGION_ACPI_OFFSET 0x100 40#define OPREGION_ACPI_OFFSET 0x100
42#define OPREGION_SWSCI_OFFSET 0x200 41#define OPREGION_SWSCI_OFFSET 0x200
43#define OPREGION_ASLE_OFFSET 0x300 42#define OPREGION_ASLE_OFFSET 0x300
44#define OPREGION_VBT_OFFSET 0x1000 43#define OPREGION_VBT_OFFSET 0x400
45 44
46#define OPREGION_SIGNATURE "IntelGraphicsMem" 45#define OPREGION_SIGNATURE "IntelGraphicsMem"
47#define MBOX_ACPI (1<<0) 46#define MBOX_ACPI (1<<0)
@@ -143,40 +142,22 @@ struct opregion_asle {
143#define ACPI_DIGITAL_OUTPUT (3<<8) 142#define ACPI_DIGITAL_OUTPUT (3<<8)
144#define ACPI_LVDS_OUTPUT (4<<8) 143#define ACPI_LVDS_OUTPUT (4<<8)
145 144
145#ifdef CONFIG_ACPI
146static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 146static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
147{ 147{
148 struct drm_i915_private *dev_priv = dev->dev_private; 148 struct drm_i915_private *dev_priv = dev->dev_private;
149 struct opregion_asle *asle = dev_priv->opregion.asle; 149 struct opregion_asle *asle = dev_priv->opregion.asle;
150 u32 blc_pwm_ctl, blc_pwm_ctl2; 150 u32 max;
151 u32 max_backlight, level, shift;
152 151
153 if (!(bclp & ASLE_BCLP_VALID)) 152 if (!(bclp & ASLE_BCLP_VALID))
154 return ASLE_BACKLIGHT_FAILED; 153 return ASLE_BACKLIGHT_FAILED;
155 154
156 bclp &= ASLE_BCLP_MSK; 155 bclp &= ASLE_BCLP_MSK;
157 if (bclp < 0 || bclp > 255) 156 if (bclp > 255)
158 return ASLE_BACKLIGHT_FAILED; 157 return ASLE_BACKLIGHT_FAILED;
159 158
160 blc_pwm_ctl = I915_READ(BLC_PWM_CTL); 159 max = intel_panel_get_max_backlight(dev);
161 blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); 160 intel_panel_set_backlight(dev, bclp * max / 255);
162
163 if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
164 pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
165 else {
166 if (IS_PINEVIEW(dev)) {
167 blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
168 max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
169 BACKLIGHT_MODULATION_FREQ_SHIFT;
170 shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1;
171 } else {
172 blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
173 max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
174 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
175 shift = BACKLIGHT_DUTY_CYCLE_SHIFT;
176 }
177 level = (bclp * max_backlight) / 255;
178 I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift));
179 }
180 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; 161 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
181 162
182 return 0; 163 return 0;
@@ -211,7 +192,7 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
211 return 0; 192 return 0;
212} 193}
213 194
214void opregion_asle_intr(struct drm_device *dev) 195void intel_opregion_asle_intr(struct drm_device *dev)
215{ 196{
216 struct drm_i915_private *dev_priv = dev->dev_private; 197 struct drm_i915_private *dev_priv = dev->dev_private;
217 struct opregion_asle *asle = dev_priv->opregion.asle; 198 struct opregion_asle *asle = dev_priv->opregion.asle;
@@ -243,37 +224,8 @@ void opregion_asle_intr(struct drm_device *dev)
243 asle->aslc = asle_stat; 224 asle->aslc = asle_stat;
244} 225}
245 226
246static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp) 227/* Only present on Ironlake+ */
247{ 228void intel_opregion_gse_intr(struct drm_device *dev)
248 struct drm_i915_private *dev_priv = dev->dev_private;
249 struct opregion_asle *asle = dev_priv->opregion.asle;
250 u32 cpu_pwm_ctl, pch_pwm_ctl2;
251 u32 max_backlight, level;
252
253 if (!(bclp & ASLE_BCLP_VALID))
254 return ASLE_BACKLIGHT_FAILED;
255
256 bclp &= ASLE_BCLP_MSK;
257 if (bclp < 0 || bclp > 255)
258 return ASLE_BACKLIGHT_FAILED;
259
260 cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL);
261 pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
262 /* get the max PWM frequency */
263 max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK;
264 /* calculate the expected PMW frequency */
265 level = (bclp * max_backlight) / 255;
266 /* reserve the high 16 bits */
267 cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK);
268 /* write the updated PWM frequency */
269 I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level);
270
271 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
272
273 return 0;
274}
275
276void ironlake_opregion_gse_intr(struct drm_device *dev)
277{ 229{
278 struct drm_i915_private *dev_priv = dev->dev_private; 230 struct drm_i915_private *dev_priv = dev->dev_private;
279 struct opregion_asle *asle = dev_priv->opregion.asle; 231 struct opregion_asle *asle = dev_priv->opregion.asle;
@@ -296,7 +248,7 @@ void ironlake_opregion_gse_intr(struct drm_device *dev)
296 } 248 }
297 249
298 if (asle_req & ASLE_SET_BACKLIGHT) 250 if (asle_req & ASLE_SET_BACKLIGHT)
299 asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp); 251 asle_stat |= asle_set_backlight(dev, asle->bclp);
300 252
301 if (asle_req & ASLE_SET_PFIT) { 253 if (asle_req & ASLE_SET_PFIT) {
302 DRM_DEBUG_DRIVER("Pfit is not supported\n"); 254 DRM_DEBUG_DRIVER("Pfit is not supported\n");
@@ -315,7 +267,7 @@ void ironlake_opregion_gse_intr(struct drm_device *dev)
315#define ASLE_PFIT_EN (1<<2) 267#define ASLE_PFIT_EN (1<<2)
316#define ASLE_PFMB_EN (1<<3) 268#define ASLE_PFMB_EN (1<<3)
317 269
318void opregion_enable_asle(struct drm_device *dev) 270void intel_opregion_enable_asle(struct drm_device *dev)
319{ 271{
320 struct drm_i915_private *dev_priv = dev->dev_private; 272 struct drm_i915_private *dev_priv = dev->dev_private;
321 struct opregion_asle *asle = dev_priv->opregion.asle; 273 struct opregion_asle *asle = dev_priv->opregion.asle;
@@ -464,7 +416,58 @@ blind_set:
464 goto end; 416 goto end;
465} 417}
466 418
467int intel_opregion_init(struct drm_device *dev, int resume) 419void intel_opregion_init(struct drm_device *dev)
420{
421 struct drm_i915_private *dev_priv = dev->dev_private;
422 struct intel_opregion *opregion = &dev_priv->opregion;
423
424 if (!opregion->header)
425 return;
426
427 if (opregion->acpi) {
428 if (drm_core_check_feature(dev, DRIVER_MODESET))
429 intel_didl_outputs(dev);
430
431 /* Notify BIOS we are ready to handle ACPI video ext notifs.
432 * Right now, all the events are handled by the ACPI video module.
433 * We don't actually need to do anything with them. */
434 opregion->acpi->csts = 0;
435 opregion->acpi->drdy = 1;
436
437 system_opregion = opregion;
438 register_acpi_notifier(&intel_opregion_notifier);
439 }
440
441 if (opregion->asle)
442 intel_opregion_enable_asle(dev);
443}
444
445void intel_opregion_fini(struct drm_device *dev)
446{
447 struct drm_i915_private *dev_priv = dev->dev_private;
448 struct intel_opregion *opregion = &dev_priv->opregion;
449
450 if (!opregion->header)
451 return;
452
453 if (opregion->acpi) {
454 opregion->acpi->drdy = 0;
455
456 system_opregion = NULL;
457 unregister_acpi_notifier(&intel_opregion_notifier);
458 }
459
460 /* just clear all opregion memory pointers now */
461 iounmap(opregion->header);
462 opregion->header = NULL;
463 opregion->acpi = NULL;
464 opregion->swsci = NULL;
465 opregion->asle = NULL;
466 opregion->vbt = NULL;
467}
468#endif
469
470int intel_opregion_setup(struct drm_device *dev)
468{ 471{
469 struct drm_i915_private *dev_priv = dev->dev_private; 472 struct drm_i915_private *dev_priv = dev->dev_private;
470 struct intel_opregion *opregion = &dev_priv->opregion; 473 struct intel_opregion *opregion = &dev_priv->opregion;
@@ -479,29 +482,23 @@ int intel_opregion_init(struct drm_device *dev, int resume)
479 return -ENOTSUPP; 482 return -ENOTSUPP;
480 } 483 }
481 484
482 base = ioremap(asls, OPREGION_SZ); 485 base = ioremap(asls, OPREGION_SIZE);
483 if (!base) 486 if (!base)
484 return -ENOMEM; 487 return -ENOMEM;
485 488
486 opregion->header = base; 489 if (memcmp(base, OPREGION_SIGNATURE, 16)) {
487 if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
488 DRM_DEBUG_DRIVER("opregion signature mismatch\n"); 490 DRM_DEBUG_DRIVER("opregion signature mismatch\n");
489 err = -EINVAL; 491 err = -EINVAL;
490 goto err_out; 492 goto err_out;
491 } 493 }
494 opregion->header = base;
495 opregion->vbt = base + OPREGION_VBT_OFFSET;
492 496
493 mboxes = opregion->header->mboxes; 497 mboxes = opregion->header->mboxes;
494 if (mboxes & MBOX_ACPI) { 498 if (mboxes & MBOX_ACPI) {
495 DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); 499 DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
496 opregion->acpi = base + OPREGION_ACPI_OFFSET; 500 opregion->acpi = base + OPREGION_ACPI_OFFSET;
497 if (drm_core_check_feature(dev, DRIVER_MODESET))
498 intel_didl_outputs(dev);
499 } else {
500 DRM_DEBUG_DRIVER("Public ACPI methods not supported\n");
501 err = -ENOTSUPP;
502 goto err_out;
503 } 501 }
504 opregion->enabled = 1;
505 502
506 if (mboxes & MBOX_SWSCI) { 503 if (mboxes & MBOX_SWSCI) {
507 DRM_DEBUG_DRIVER("SWSCI supported\n"); 504 DRM_DEBUG_DRIVER("SWSCI supported\n");
@@ -510,53 +507,11 @@ int intel_opregion_init(struct drm_device *dev, int resume)
510 if (mboxes & MBOX_ASLE) { 507 if (mboxes & MBOX_ASLE) {
511 DRM_DEBUG_DRIVER("ASLE supported\n"); 508 DRM_DEBUG_DRIVER("ASLE supported\n");
512 opregion->asle = base + OPREGION_ASLE_OFFSET; 509 opregion->asle = base + OPREGION_ASLE_OFFSET;
513 opregion_enable_asle(dev);
514 } 510 }
515 511
516 if (!resume)
517 acpi_video_register();
518
519
520 /* Notify BIOS we are ready to handle ACPI video ext notifs.
521 * Right now, all the events are handled by the ACPI video module.
522 * We don't actually need to do anything with them. */
523 opregion->acpi->csts = 0;
524 opregion->acpi->drdy = 1;
525
526 system_opregion = opregion;
527 register_acpi_notifier(&intel_opregion_notifier);
528
529 return 0; 512 return 0;
530 513
531err_out: 514err_out:
532 iounmap(opregion->header); 515 iounmap(opregion->header);
533 opregion->header = NULL;
534 acpi_video_register();
535 return err; 516 return err;
536} 517}
537
538void intel_opregion_free(struct drm_device *dev, int suspend)
539{
540 struct drm_i915_private *dev_priv = dev->dev_private;
541 struct intel_opregion *opregion = &dev_priv->opregion;
542
543 if (!opregion->enabled)
544 return;
545
546 if (!suspend)
547 acpi_video_unregister();
548
549 opregion->acpi->drdy = 0;
550
551 system_opregion = NULL;
552 unregister_acpi_notifier(&intel_opregion_notifier);
553
554 /* just clear all opregion memory pointers now */
555 iounmap(opregion->header);
556 opregion->header = NULL;
557 opregion->acpi = NULL;
558 opregion->swsci = NULL;
559 opregion->asle = NULL;
560
561 opregion->enabled = 0;
562}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 1d306a458be6..375316a8420e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -170,57 +170,143 @@ struct overlay_registers {
170 u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES]; 170 u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
171}; 171};
172 172
173/* overlay flip addr flag */ 173struct intel_overlay {
174#define OFC_UPDATE 0x1 174 struct drm_device *dev;
175 175 struct intel_crtc *crtc;
176#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev)) 176 struct drm_i915_gem_object *vid_bo;
177#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev)) 177 struct drm_i915_gem_object *old_vid_bo;
178 178 int active;
179 int pfit_active;
180 u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
181 u32 color_key;
182 u32 brightness, contrast, saturation;
183 u32 old_xscale, old_yscale;
184 /* register access */
185 u32 flip_addr;
186 struct drm_i915_gem_object *reg_bo;
187 /* flip handling */
188 uint32_t last_flip_req;
189 void (*flip_tail)(struct intel_overlay *);
190};
179 191
180static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay) 192static struct overlay_registers *
193intel_overlay_map_regs(struct intel_overlay *overlay)
181{ 194{
182 drm_i915_private_t *dev_priv = overlay->dev->dev_private; 195 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
183 struct overlay_registers *regs; 196 struct overlay_registers *regs;
184 197
185 /* no recursive mappings */ 198 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
186 BUG_ON(overlay->virt_addr); 199 regs = overlay->reg_bo->phys_obj->handle->vaddr;
200 else
201 regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
202 overlay->reg_bo->gtt_offset);
187 203
188 if (OVERLAY_NONPHYSICAL(overlay->dev)) { 204 return regs;
189 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 205}
190 overlay->reg_bo->gtt_offset,
191 KM_USER0);
192 206
193 if (!regs) { 207static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
194 DRM_ERROR("failed to map overlay regs in GTT\n"); 208 struct overlay_registers *regs)
195 return NULL; 209{
196 } 210 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
197 } else 211 io_mapping_unmap(regs);
198 regs = overlay->reg_bo->phys_obj->handle->vaddr; 212}
213
214static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
215 struct drm_i915_gem_request *request,
216 bool interruptible,
217 void (*tail)(struct intel_overlay *))
218{
219 struct drm_device *dev = overlay->dev;
220 drm_i915_private_t *dev_priv = dev->dev_private;
221 int ret;
222
223 BUG_ON(overlay->last_flip_req);
224 overlay->last_flip_req =
225 i915_add_request(dev, NULL, request, &dev_priv->render_ring);
226 if (overlay->last_flip_req == 0)
227 return -ENOMEM;
199 228
200 return overlay->virt_addr = regs; 229 overlay->flip_tail = tail;
230 ret = i915_do_wait_request(dev,
231 overlay->last_flip_req, true,
232 &dev_priv->render_ring);
233 if (ret)
234 return ret;
235
236 overlay->last_flip_req = 0;
237 return 0;
201} 238}
202 239
203static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) 240/* Workaround for i830 bug where pipe a must be enable to change control regs */
241static int
242i830_activate_pipe_a(struct drm_device *dev)
204{ 243{
205 if (OVERLAY_NONPHYSICAL(overlay->dev)) 244 drm_i915_private_t *dev_priv = dev->dev_private;
206 io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0); 245 struct intel_crtc *crtc;
246 struct drm_crtc_helper_funcs *crtc_funcs;
247 struct drm_display_mode vesa_640x480 = {
248 DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
249 752, 800, 0, 480, 489, 492, 525, 0,
250 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
251 }, *mode;
252
253 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
254 if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
255 return 0;
207 256
208 overlay->virt_addr = NULL; 257 /* most i8xx have pipe a forced on, so don't trust dpms mode */
258 if (I915_READ(PIPEACONF) & PIPECONF_ENABLE)
259 return 0;
209 260
210 return; 261 crtc_funcs = crtc->base.helper_private;
262 if (crtc_funcs->dpms == NULL)
263 return 0;
264
265 DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
266
267 mode = drm_mode_duplicate(dev, &vesa_640x480);
268 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
269 if(!drm_crtc_helper_set_mode(&crtc->base, mode,
270 crtc->base.x, crtc->base.y,
271 crtc->base.fb))
272 return 0;
273
274 crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
275 return 1;
276}
277
278static void
279i830_deactivate_pipe_a(struct drm_device *dev)
280{
281 drm_i915_private_t *dev_priv = dev->dev_private;
282 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
283 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
284
285 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
211} 286}
212 287
213/* overlay needs to be disable in OCMD reg */ 288/* overlay needs to be disable in OCMD reg */
214static int intel_overlay_on(struct intel_overlay *overlay) 289static int intel_overlay_on(struct intel_overlay *overlay)
215{ 290{
216 struct drm_device *dev = overlay->dev; 291 struct drm_device *dev = overlay->dev;
292 struct drm_i915_gem_request *request;
293 int pipe_a_quirk = 0;
217 int ret; 294 int ret;
218 drm_i915_private_t *dev_priv = dev->dev_private;
219 295
220 BUG_ON(overlay->active); 296 BUG_ON(overlay->active);
221
222 overlay->active = 1; 297 overlay->active = 1;
223 overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP; 298
299 if (IS_I830(dev)) {
300 pipe_a_quirk = i830_activate_pipe_a(dev);
301 if (pipe_a_quirk < 0)
302 return pipe_a_quirk;
303 }
304
305 request = kzalloc(sizeof(*request), GFP_KERNEL);
306 if (request == NULL) {
307 ret = -ENOMEM;
308 goto out;
309 }
224 310
225 BEGIN_LP_RING(4); 311 BEGIN_LP_RING(4);
226 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); 312 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
@@ -229,32 +315,30 @@ static int intel_overlay_on(struct intel_overlay *overlay)
229 OUT_RING(MI_NOOP); 315 OUT_RING(MI_NOOP);
230 ADVANCE_LP_RING(); 316 ADVANCE_LP_RING();
231 317
232 overlay->last_flip_req = 318 ret = intel_overlay_do_wait_request(overlay, request, true, NULL);
233 i915_add_request(dev, NULL, 0, &dev_priv->render_ring); 319out:
234 if (overlay->last_flip_req == 0) 320 if (pipe_a_quirk)
235 return -ENOMEM; 321 i830_deactivate_pipe_a(dev);
236
237 ret = i915_do_wait_request(dev,
238 overlay->last_flip_req, 1, &dev_priv->render_ring);
239 if (ret != 0)
240 return ret;
241 322
242 overlay->hw_wedged = 0; 323 return ret;
243 overlay->last_flip_req = 0;
244 return 0;
245} 324}
246 325
247/* overlay needs to be enabled in OCMD reg */ 326/* overlay needs to be enabled in OCMD reg */
248static void intel_overlay_continue(struct intel_overlay *overlay, 327static int intel_overlay_continue(struct intel_overlay *overlay,
249 bool load_polyphase_filter) 328 bool load_polyphase_filter)
250{ 329{
251 struct drm_device *dev = overlay->dev; 330 struct drm_device *dev = overlay->dev;
252 drm_i915_private_t *dev_priv = dev->dev_private; 331 drm_i915_private_t *dev_priv = dev->dev_private;
332 struct drm_i915_gem_request *request;
253 u32 flip_addr = overlay->flip_addr; 333 u32 flip_addr = overlay->flip_addr;
254 u32 tmp; 334 u32 tmp;
255 335
256 BUG_ON(!overlay->active); 336 BUG_ON(!overlay->active);
257 337
338 request = kzalloc(sizeof(*request), GFP_KERNEL);
339 if (request == NULL)
340 return -ENOMEM;
341
258 if (load_polyphase_filter) 342 if (load_polyphase_filter)
259 flip_addr |= OFC_UPDATE; 343 flip_addr |= OFC_UPDATE;
260 344
@@ -269,220 +353,132 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
269 ADVANCE_LP_RING(); 353 ADVANCE_LP_RING();
270 354
271 overlay->last_flip_req = 355 overlay->last_flip_req =
272 i915_add_request(dev, NULL, 0, &dev_priv->render_ring); 356 i915_add_request(dev, NULL, request, &dev_priv->render_ring);
357 return 0;
273} 358}
274 359
275static int intel_overlay_wait_flip(struct intel_overlay *overlay) 360static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
276{ 361{
277 struct drm_device *dev = overlay->dev; 362 struct drm_gem_object *obj = &overlay->old_vid_bo->base;
278 drm_i915_private_t *dev_priv = dev->dev_private;
279 int ret;
280 u32 tmp;
281
282 if (overlay->last_flip_req != 0) {
283 ret = i915_do_wait_request(dev, overlay->last_flip_req,
284 1, &dev_priv->render_ring);
285 if (ret == 0) {
286 overlay->last_flip_req = 0;
287 363
288 tmp = I915_READ(ISR); 364 i915_gem_object_unpin(obj);
289 365 drm_gem_object_unreference(obj);
290 if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
291 return 0;
292 }
293 }
294 366
295 /* synchronous slowpath */ 367 overlay->old_vid_bo = NULL;
296 overlay->hw_wedged = RELEASE_OLD_VID; 368}
297 369
298 BEGIN_LP_RING(2); 370static void intel_overlay_off_tail(struct intel_overlay *overlay)
299 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 371{
300 OUT_RING(MI_NOOP); 372 struct drm_gem_object *obj;
301 ADVANCE_LP_RING();
302 373
303 overlay->last_flip_req = 374 /* never have the overlay hw on without showing a frame */
304 i915_add_request(dev, NULL, 0, &dev_priv->render_ring); 375 BUG_ON(!overlay->vid_bo);
305 if (overlay->last_flip_req == 0) 376 obj = &overlay->vid_bo->base;
306 return -ENOMEM;
307 377
308 ret = i915_do_wait_request(dev, overlay->last_flip_req, 378 i915_gem_object_unpin(obj);
309 1, &dev_priv->render_ring); 379 drm_gem_object_unreference(obj);
310 if (ret != 0) 380 overlay->vid_bo = NULL;
311 return ret;
312 381
313 overlay->hw_wedged = 0; 382 overlay->crtc->overlay = NULL;
314 overlay->last_flip_req = 0; 383 overlay->crtc = NULL;
315 return 0; 384 overlay->active = 0;
316} 385}
317 386
318/* overlay needs to be disabled in OCMD reg */ 387/* overlay needs to be disabled in OCMD reg */
319static int intel_overlay_off(struct intel_overlay *overlay) 388static int intel_overlay_off(struct intel_overlay *overlay,
389 bool interruptible)
320{ 390{
321 u32 flip_addr = overlay->flip_addr;
322 struct drm_device *dev = overlay->dev; 391 struct drm_device *dev = overlay->dev;
323 drm_i915_private_t *dev_priv = dev->dev_private; 392 u32 flip_addr = overlay->flip_addr;
324 int ret; 393 struct drm_i915_gem_request *request;
325 394
326 BUG_ON(!overlay->active); 395 BUG_ON(!overlay->active);
327 396
397 request = kzalloc(sizeof(*request), GFP_KERNEL);
398 if (request == NULL)
399 return -ENOMEM;
400
328 /* According to intel docs the overlay hw may hang (when switching 401 /* According to intel docs the overlay hw may hang (when switching
329 * off) without loading the filter coeffs. It is however unclear whether 402 * off) without loading the filter coeffs. It is however unclear whether
330 * this applies to the disabling of the overlay or to the switching off 403 * this applies to the disabling of the overlay or to the switching off
331 * of the hw. Do it in both cases */ 404 * of the hw. Do it in both cases */
332 flip_addr |= OFC_UPDATE; 405 flip_addr |= OFC_UPDATE;
333 406
407 BEGIN_LP_RING(6);
334 /* wait for overlay to go idle */ 408 /* wait for overlay to go idle */
335 overlay->hw_wedged = SWITCH_OFF_STAGE_1;
336
337 BEGIN_LP_RING(4);
338 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 409 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
339 OUT_RING(flip_addr); 410 OUT_RING(flip_addr);
340 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 411 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
341 OUT_RING(MI_NOOP);
342 ADVANCE_LP_RING();
343
344 overlay->last_flip_req =
345 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
346 if (overlay->last_flip_req == 0)
347 return -ENOMEM;
348
349 ret = i915_do_wait_request(dev, overlay->last_flip_req,
350 1, &dev_priv->render_ring);
351 if (ret != 0)
352 return ret;
353
354 /* turn overlay off */ 412 /* turn overlay off */
355 overlay->hw_wedged = SWITCH_OFF_STAGE_2; 413 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
356
357 BEGIN_LP_RING(4);
358 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
359 OUT_RING(flip_addr); 414 OUT_RING(flip_addr);
360 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 415 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
361 OUT_RING(MI_NOOP);
362 ADVANCE_LP_RING(); 416 ADVANCE_LP_RING();
363 417
364 overlay->last_flip_req = 418 return intel_overlay_do_wait_request(overlay, request, interruptible,
365 i915_add_request(dev, NULL, 0, &dev_priv->render_ring); 419 intel_overlay_off_tail);
366 if (overlay->last_flip_req == 0)
367 return -ENOMEM;
368
369 ret = i915_do_wait_request(dev, overlay->last_flip_req,
370 1, &dev_priv->render_ring);
371 if (ret != 0)
372 return ret;
373
374 overlay->hw_wedged = 0;
375 overlay->last_flip_req = 0;
376 return ret;
377}
378
379static void intel_overlay_off_tail(struct intel_overlay *overlay)
380{
381 struct drm_gem_object *obj;
382
383 /* never have the overlay hw on without showing a frame */
384 BUG_ON(!overlay->vid_bo);
385 obj = &overlay->vid_bo->base;
386
387 i915_gem_object_unpin(obj);
388 drm_gem_object_unreference(obj);
389 overlay->vid_bo = NULL;
390
391 overlay->crtc->overlay = NULL;
392 overlay->crtc = NULL;
393 overlay->active = 0;
394} 420}
395 421
396/* recover from an interruption due to a signal 422/* recover from an interruption due to a signal
397 * We have to be careful not to repeat work forever an make forward progess. */ 423 * We have to be careful not to repeat work forever an make forward progess. */
398int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, 424static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
399 int interruptible) 425 bool interruptible)
400{ 426{
401 struct drm_device *dev = overlay->dev; 427 struct drm_device *dev = overlay->dev;
402 struct drm_gem_object *obj;
403 drm_i915_private_t *dev_priv = dev->dev_private; 428 drm_i915_private_t *dev_priv = dev->dev_private;
404 u32 flip_addr;
405 int ret; 429 int ret;
406 430
407 if (overlay->hw_wedged == HW_WEDGED) 431 if (overlay->last_flip_req == 0)
408 return -EIO; 432 return 0;
409
410 if (overlay->last_flip_req == 0) {
411 overlay->last_flip_req =
412 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
413 if (overlay->last_flip_req == 0)
414 return -ENOMEM;
415 }
416 433
417 ret = i915_do_wait_request(dev, overlay->last_flip_req, 434 ret = i915_do_wait_request(dev, overlay->last_flip_req,
418 interruptible, &dev_priv->render_ring); 435 interruptible, &dev_priv->render_ring);
419 if (ret != 0) 436 if (ret)
420 return ret; 437 return ret;
421 438
422 switch (overlay->hw_wedged) { 439 if (overlay->flip_tail)
423 case RELEASE_OLD_VID: 440 overlay->flip_tail(overlay);
424 obj = &overlay->old_vid_bo->base;
425 i915_gem_object_unpin(obj);
426 drm_gem_object_unreference(obj);
427 overlay->old_vid_bo = NULL;
428 break;
429 case SWITCH_OFF_STAGE_1:
430 flip_addr = overlay->flip_addr;
431 flip_addr |= OFC_UPDATE;
432
433 overlay->hw_wedged = SWITCH_OFF_STAGE_2;
434
435 BEGIN_LP_RING(4);
436 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
437 OUT_RING(flip_addr);
438 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
439 OUT_RING(MI_NOOP);
440 ADVANCE_LP_RING();
441
442 overlay->last_flip_req = i915_add_request(dev, NULL,
443 0, &dev_priv->render_ring);
444 if (overlay->last_flip_req == 0)
445 return -ENOMEM;
446
447 ret = i915_do_wait_request(dev, overlay->last_flip_req,
448 interruptible, &dev_priv->render_ring);
449 if (ret != 0)
450 return ret;
451
452 case SWITCH_OFF_STAGE_2:
453 intel_overlay_off_tail(overlay);
454 break;
455 default:
456 BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
457 }
458 441
459 overlay->hw_wedged = 0;
460 overlay->last_flip_req = 0; 442 overlay->last_flip_req = 0;
461 return 0; 443 return 0;
462} 444}
463 445
464/* Wait for pending overlay flip and release old frame. 446/* Wait for pending overlay flip and release old frame.
465 * Needs to be called before the overlay register are changed 447 * Needs to be called before the overlay register are changed
466 * via intel_overlay_(un)map_regs_atomic */ 448 * via intel_overlay_(un)map_regs
449 */
467static int intel_overlay_release_old_vid(struct intel_overlay *overlay) 450static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
468{ 451{
452 struct drm_device *dev = overlay->dev;
453 drm_i915_private_t *dev_priv = dev->dev_private;
469 int ret; 454 int ret;
470 struct drm_gem_object *obj;
471 455
472 /* only wait if there is actually an old frame to release to 456 /* Only wait if there is actually an old frame to release to
473 * guarantee forward progress */ 457 * guarantee forward progress.
458 */
474 if (!overlay->old_vid_bo) 459 if (!overlay->old_vid_bo)
475 return 0; 460 return 0;
476 461
477 ret = intel_overlay_wait_flip(overlay); 462 if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
478 if (ret != 0) 463 struct drm_i915_gem_request *request;
479 return ret;
480 464
481 obj = &overlay->old_vid_bo->base; 465 /* synchronous slowpath */
482 i915_gem_object_unpin(obj); 466 request = kzalloc(sizeof(*request), GFP_KERNEL);
483 drm_gem_object_unreference(obj); 467 if (request == NULL)
484 overlay->old_vid_bo = NULL; 468 return -ENOMEM;
469
470 BEGIN_LP_RING(2);
471 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
472 OUT_RING(MI_NOOP);
473 ADVANCE_LP_RING();
485 474
475 ret = intel_overlay_do_wait_request(overlay, request, true,
476 intel_overlay_release_old_vid_tail);
477 if (ret)
478 return ret;
479 }
480
481 intel_overlay_release_old_vid_tail(overlay);
486 return 0; 482 return 0;
487} 483}
488 484
@@ -506,65 +502,65 @@ struct put_image_params {
506static int packed_depth_bytes(u32 format) 502static int packed_depth_bytes(u32 format)
507{ 503{
508 switch (format & I915_OVERLAY_DEPTH_MASK) { 504 switch (format & I915_OVERLAY_DEPTH_MASK) {
509 case I915_OVERLAY_YUV422: 505 case I915_OVERLAY_YUV422:
510 return 4; 506 return 4;
511 case I915_OVERLAY_YUV411: 507 case I915_OVERLAY_YUV411:
512 /* return 6; not implemented */ 508 /* return 6; not implemented */
513 default: 509 default:
514 return -EINVAL; 510 return -EINVAL;
515 } 511 }
516} 512}
517 513
518static int packed_width_bytes(u32 format, short width) 514static int packed_width_bytes(u32 format, short width)
519{ 515{
520 switch (format & I915_OVERLAY_DEPTH_MASK) { 516 switch (format & I915_OVERLAY_DEPTH_MASK) {
521 case I915_OVERLAY_YUV422: 517 case I915_OVERLAY_YUV422:
522 return width << 1; 518 return width << 1;
523 default: 519 default:
524 return -EINVAL; 520 return -EINVAL;
525 } 521 }
526} 522}
527 523
528static int uv_hsubsampling(u32 format) 524static int uv_hsubsampling(u32 format)
529{ 525{
530 switch (format & I915_OVERLAY_DEPTH_MASK) { 526 switch (format & I915_OVERLAY_DEPTH_MASK) {
531 case I915_OVERLAY_YUV422: 527 case I915_OVERLAY_YUV422:
532 case I915_OVERLAY_YUV420: 528 case I915_OVERLAY_YUV420:
533 return 2; 529 return 2;
534 case I915_OVERLAY_YUV411: 530 case I915_OVERLAY_YUV411:
535 case I915_OVERLAY_YUV410: 531 case I915_OVERLAY_YUV410:
536 return 4; 532 return 4;
537 default: 533 default:
538 return -EINVAL; 534 return -EINVAL;
539 } 535 }
540} 536}
541 537
542static int uv_vsubsampling(u32 format) 538static int uv_vsubsampling(u32 format)
543{ 539{
544 switch (format & I915_OVERLAY_DEPTH_MASK) { 540 switch (format & I915_OVERLAY_DEPTH_MASK) {
545 case I915_OVERLAY_YUV420: 541 case I915_OVERLAY_YUV420:
546 case I915_OVERLAY_YUV410: 542 case I915_OVERLAY_YUV410:
547 return 2; 543 return 2;
548 case I915_OVERLAY_YUV422: 544 case I915_OVERLAY_YUV422:
549 case I915_OVERLAY_YUV411: 545 case I915_OVERLAY_YUV411:
550 return 1; 546 return 1;
551 default: 547 default:
552 return -EINVAL; 548 return -EINVAL;
553 } 549 }
554} 550}
555 551
556static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) 552static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
557{ 553{
558 u32 mask, shift, ret; 554 u32 mask, shift, ret;
559 if (IS_I9XX(dev)) { 555 if (IS_GEN2(dev)) {
560 mask = 0x3f;
561 shift = 6;
562 } else {
563 mask = 0x1f; 556 mask = 0x1f;
564 shift = 5; 557 shift = 5;
558 } else {
559 mask = 0x3f;
560 shift = 6;
565 } 561 }
566 ret = ((offset + width + mask) >> shift) - (offset >> shift); 562 ret = ((offset + width + mask) >> shift) - (offset >> shift);
567 if (IS_I9XX(dev)) 563 if (!IS_GEN2(dev))
568 ret <<= 1; 564 ret <<= 1;
569 ret -=1; 565 ret -=1;
570 return ret << 2; 566 return ret << 2;
@@ -587,7 +583,9 @@ static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
587 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, 583 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
588 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, 584 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
589 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, 585 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
590 0xb000, 0x3000, 0x0800, 0x3000, 0xb000}; 586 0xb000, 0x3000, 0x0800, 0x3000, 0xb000
587};
588
591static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = { 589static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
592 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60, 590 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
593 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40, 591 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
@@ -597,7 +595,8 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
597 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0, 595 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
598 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240, 596 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
599 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0, 597 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
600 0x3000, 0x0800, 0x3000}; 598 0x3000, 0x0800, 0x3000
599};
601 600
602static void update_polyphase_filter(struct overlay_registers *regs) 601static void update_polyphase_filter(struct overlay_registers *regs)
603{ 602{
@@ -630,29 +629,31 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
630 yscale = 1 << FP_SHIFT; 629 yscale = 1 << FP_SHIFT;
631 630
632 /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/ 631 /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
633 xscale_UV = xscale/uv_hscale; 632 xscale_UV = xscale/uv_hscale;
634 yscale_UV = yscale/uv_vscale; 633 yscale_UV = yscale/uv_vscale;
635 /* make the Y scale to UV scale ratio an exact multiply */ 634 /* make the Y scale to UV scale ratio an exact multiply */
636 xscale = xscale_UV * uv_hscale; 635 xscale = xscale_UV * uv_hscale;
637 yscale = yscale_UV * uv_vscale; 636 yscale = yscale_UV * uv_vscale;
638 /*} else { 637 /*} else {
639 xscale_UV = 0; 638 xscale_UV = 0;
640 yscale_UV = 0; 639 yscale_UV = 0;
641 }*/ 640 }*/
642 641
643 if (xscale != overlay->old_xscale || yscale != overlay->old_yscale) 642 if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
644 scale_changed = true; 643 scale_changed = true;
645 overlay->old_xscale = xscale; 644 overlay->old_xscale = xscale;
646 overlay->old_yscale = yscale; 645 overlay->old_yscale = yscale;
647 646
648 regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20) 647 regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
649 | ((xscale >> FP_SHIFT) << 16) 648 ((xscale >> FP_SHIFT) << 16) |
650 | ((xscale & FRACT_MASK) << 3); 649 ((xscale & FRACT_MASK) << 3));
651 regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20) 650
652 | ((xscale_UV >> FP_SHIFT) << 16) 651 regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
653 | ((xscale_UV & FRACT_MASK) << 3); 652 ((xscale_UV >> FP_SHIFT) << 16) |
654 regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16) 653 ((xscale_UV & FRACT_MASK) << 3));
655 | ((yscale_UV >> FP_SHIFT) << 0); 654
655 regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) |
656 ((yscale_UV >> FP_SHIFT) << 0)));
656 657
657 if (scale_changed) 658 if (scale_changed)
658 update_polyphase_filter(regs); 659 update_polyphase_filter(regs);
@@ -664,22 +665,28 @@ static void update_colorkey(struct intel_overlay *overlay,
664 struct overlay_registers *regs) 665 struct overlay_registers *regs)
665{ 666{
666 u32 key = overlay->color_key; 667 u32 key = overlay->color_key;
668
667 switch (overlay->crtc->base.fb->bits_per_pixel) { 669 switch (overlay->crtc->base.fb->bits_per_pixel) {
668 case 8: 670 case 8:
669 regs->DCLRKV = 0; 671 regs->DCLRKV = 0;
670 regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE; 672 regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
671 case 16: 673 break;
672 if (overlay->crtc->base.fb->depth == 15) { 674
673 regs->DCLRKV = RGB15_TO_COLORKEY(key); 675 case 16:
674 regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE; 676 if (overlay->crtc->base.fb->depth == 15) {
675 } else { 677 regs->DCLRKV = RGB15_TO_COLORKEY(key);
676 regs->DCLRKV = RGB16_TO_COLORKEY(key); 678 regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
677 regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE; 679 } else {
678 } 680 regs->DCLRKV = RGB16_TO_COLORKEY(key);
679 case 24: 681 regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
680 case 32: 682 }
681 regs->DCLRKV = key; 683 break;
682 regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE; 684
685 case 24:
686 case 32:
687 regs->DCLRKV = key;
688 regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
689 break;
683 } 690 }
684} 691}
685 692
@@ -689,48 +696,48 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
689 696
690 if (params->format & I915_OVERLAY_YUV_PLANAR) { 697 if (params->format & I915_OVERLAY_YUV_PLANAR) {
691 switch (params->format & I915_OVERLAY_DEPTH_MASK) { 698 switch (params->format & I915_OVERLAY_DEPTH_MASK) {
692 case I915_OVERLAY_YUV422: 699 case I915_OVERLAY_YUV422:
693 cmd |= OCMD_YUV_422_PLANAR; 700 cmd |= OCMD_YUV_422_PLANAR;
694 break; 701 break;
695 case I915_OVERLAY_YUV420: 702 case I915_OVERLAY_YUV420:
696 cmd |= OCMD_YUV_420_PLANAR; 703 cmd |= OCMD_YUV_420_PLANAR;
697 break; 704 break;
698 case I915_OVERLAY_YUV411: 705 case I915_OVERLAY_YUV411:
699 case I915_OVERLAY_YUV410: 706 case I915_OVERLAY_YUV410:
700 cmd |= OCMD_YUV_410_PLANAR; 707 cmd |= OCMD_YUV_410_PLANAR;
701 break; 708 break;
702 } 709 }
703 } else { /* YUV packed */ 710 } else { /* YUV packed */
704 switch (params->format & I915_OVERLAY_DEPTH_MASK) { 711 switch (params->format & I915_OVERLAY_DEPTH_MASK) {
705 case I915_OVERLAY_YUV422: 712 case I915_OVERLAY_YUV422:
706 cmd |= OCMD_YUV_422_PACKED; 713 cmd |= OCMD_YUV_422_PACKED;
707 break; 714 break;
708 case I915_OVERLAY_YUV411: 715 case I915_OVERLAY_YUV411:
709 cmd |= OCMD_YUV_411_PACKED; 716 cmd |= OCMD_YUV_411_PACKED;
710 break; 717 break;
711 } 718 }
712 719
713 switch (params->format & I915_OVERLAY_SWAP_MASK) { 720 switch (params->format & I915_OVERLAY_SWAP_MASK) {
714 case I915_OVERLAY_NO_SWAP: 721 case I915_OVERLAY_NO_SWAP:
715 break; 722 break;
716 case I915_OVERLAY_UV_SWAP: 723 case I915_OVERLAY_UV_SWAP:
717 cmd |= OCMD_UV_SWAP; 724 cmd |= OCMD_UV_SWAP;
718 break; 725 break;
719 case I915_OVERLAY_Y_SWAP: 726 case I915_OVERLAY_Y_SWAP:
720 cmd |= OCMD_Y_SWAP; 727 cmd |= OCMD_Y_SWAP;
721 break; 728 break;
722 case I915_OVERLAY_Y_AND_UV_SWAP: 729 case I915_OVERLAY_Y_AND_UV_SWAP:
723 cmd |= OCMD_Y_AND_UV_SWAP; 730 cmd |= OCMD_Y_AND_UV_SWAP;
724 break; 731 break;
725 } 732 }
726 } 733 }
727 734
728 return cmd; 735 return cmd;
729} 736}
730 737
731int intel_overlay_do_put_image(struct intel_overlay *overlay, 738static int intel_overlay_do_put_image(struct intel_overlay *overlay,
732 struct drm_gem_object *new_bo, 739 struct drm_gem_object *new_bo,
733 struct put_image_params *params) 740 struct put_image_params *params)
734{ 741{
735 int ret, tmp_width; 742 int ret, tmp_width;
736 struct overlay_registers *regs; 743 struct overlay_registers *regs;
@@ -755,24 +762,24 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
755 goto out_unpin; 762 goto out_unpin;
756 763
757 if (!overlay->active) { 764 if (!overlay->active) {
758 regs = intel_overlay_map_regs_atomic(overlay); 765 regs = intel_overlay_map_regs(overlay);
759 if (!regs) { 766 if (!regs) {
760 ret = -ENOMEM; 767 ret = -ENOMEM;
761 goto out_unpin; 768 goto out_unpin;
762 } 769 }
763 regs->OCONFIG = OCONF_CC_OUT_8BIT; 770 regs->OCONFIG = OCONF_CC_OUT_8BIT;
764 if (IS_I965GM(overlay->dev)) 771 if (IS_GEN4(overlay->dev))
765 regs->OCONFIG |= OCONF_CSC_MODE_BT709; 772 regs->OCONFIG |= OCONF_CSC_MODE_BT709;
766 regs->OCONFIG |= overlay->crtc->pipe == 0 ? 773 regs->OCONFIG |= overlay->crtc->pipe == 0 ?
767 OCONF_PIPE_A : OCONF_PIPE_B; 774 OCONF_PIPE_A : OCONF_PIPE_B;
768 intel_overlay_unmap_regs_atomic(overlay); 775 intel_overlay_unmap_regs(overlay, regs);
769 776
770 ret = intel_overlay_on(overlay); 777 ret = intel_overlay_on(overlay);
771 if (ret != 0) 778 if (ret != 0)
772 goto out_unpin; 779 goto out_unpin;
773 } 780 }
774 781
775 regs = intel_overlay_map_regs_atomic(overlay); 782 regs = intel_overlay_map_regs(overlay);
776 if (!regs) { 783 if (!regs) {
777 ret = -ENOMEM; 784 ret = -ENOMEM;
778 goto out_unpin; 785 goto out_unpin;
@@ -788,7 +795,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
788 795
789 regs->SWIDTH = params->src_w; 796 regs->SWIDTH = params->src_w;
790 regs->SWIDTHSW = calc_swidthsw(overlay->dev, 797 regs->SWIDTHSW = calc_swidthsw(overlay->dev,
791 params->offset_Y, tmp_width); 798 params->offset_Y, tmp_width);
792 regs->SHEIGHT = params->src_h; 799 regs->SHEIGHT = params->src_h;
793 regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y; 800 regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
794 regs->OSTRIDE = params->stride_Y; 801 regs->OSTRIDE = params->stride_Y;
@@ -799,9 +806,9 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
799 u32 tmp_U, tmp_V; 806 u32 tmp_U, tmp_V;
800 regs->SWIDTH |= (params->src_w/uv_hscale) << 16; 807 regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
801 tmp_U = calc_swidthsw(overlay->dev, params->offset_U, 808 tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
802 params->src_w/uv_hscale); 809 params->src_w/uv_hscale);
803 tmp_V = calc_swidthsw(overlay->dev, params->offset_V, 810 tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
804 params->src_w/uv_hscale); 811 params->src_w/uv_hscale);
805 regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; 812 regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
806 regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; 813 regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
807 regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U; 814 regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
@@ -815,9 +822,11 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
815 822
816 regs->OCMD = overlay_cmd_reg(params); 823 regs->OCMD = overlay_cmd_reg(params);
817 824
818 intel_overlay_unmap_regs_atomic(overlay); 825 intel_overlay_unmap_regs(overlay, regs);
819 826
820 intel_overlay_continue(overlay, scale_changed); 827 ret = intel_overlay_continue(overlay, scale_changed);
828 if (ret)
829 goto out_unpin;
821 830
822 overlay->old_vid_bo = overlay->vid_bo; 831 overlay->old_vid_bo = overlay->vid_bo;
823 overlay->vid_bo = to_intel_bo(new_bo); 832 overlay->vid_bo = to_intel_bo(new_bo);
@@ -829,20 +838,19 @@ out_unpin:
829 return ret; 838 return ret;
830} 839}
831 840
832int intel_overlay_switch_off(struct intel_overlay *overlay) 841int intel_overlay_switch_off(struct intel_overlay *overlay,
842 bool interruptible)
833{ 843{
834 int ret;
835 struct overlay_registers *regs; 844 struct overlay_registers *regs;
836 struct drm_device *dev = overlay->dev; 845 struct drm_device *dev = overlay->dev;
846 int ret;
837 847
838 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 848 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
839 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); 849 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
840 850
841 if (overlay->hw_wedged) { 851 ret = intel_overlay_recover_from_interrupt(overlay, interruptible);
842 ret = intel_overlay_recover_from_interrupt(overlay, 1); 852 if (ret != 0)
843 if (ret != 0) 853 return ret;
844 return ret;
845 }
846 854
847 if (!overlay->active) 855 if (!overlay->active)
848 return 0; 856 return 0;
@@ -851,33 +859,29 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
851 if (ret != 0) 859 if (ret != 0)
852 return ret; 860 return ret;
853 861
854 regs = intel_overlay_map_regs_atomic(overlay); 862 regs = intel_overlay_map_regs(overlay);
855 regs->OCMD = 0; 863 regs->OCMD = 0;
856 intel_overlay_unmap_regs_atomic(overlay); 864 intel_overlay_unmap_regs(overlay, regs);
857 865
858 ret = intel_overlay_off(overlay); 866 ret = intel_overlay_off(overlay, interruptible);
859 if (ret != 0) 867 if (ret != 0)
860 return ret; 868 return ret;
861 869
862 intel_overlay_off_tail(overlay); 870 intel_overlay_off_tail(overlay);
863
864 return 0; 871 return 0;
865} 872}
866 873
867static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, 874static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
868 struct intel_crtc *crtc) 875 struct intel_crtc *crtc)
869{ 876{
870 drm_i915_private_t *dev_priv = overlay->dev->dev_private; 877 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
871 u32 pipeconf;
872 int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
873 878
874 if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON) 879 if (!crtc->active)
875 return -EINVAL; 880 return -EINVAL;
876 881
877 pipeconf = I915_READ(pipeconf_reg);
878
879 /* can't use the overlay with double wide pipe */ 882 /* can't use the overlay with double wide pipe */
880 if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE) 883 if (INTEL_INFO(overlay->dev)->gen < 4 &&
884 (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
881 return -EINVAL; 885 return -EINVAL;
882 886
883 return 0; 887 return 0;
@@ -886,20 +890,22 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
886static void update_pfit_vscale_ratio(struct intel_overlay *overlay) 890static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
887{ 891{
888 struct drm_device *dev = overlay->dev; 892 struct drm_device *dev = overlay->dev;
889 drm_i915_private_t *dev_priv = dev->dev_private; 893 drm_i915_private_t *dev_priv = dev->dev_private;
890 u32 ratio;
891 u32 pfit_control = I915_READ(PFIT_CONTROL); 894 u32 pfit_control = I915_READ(PFIT_CONTROL);
895 u32 ratio;
892 896
893 /* XXX: This is not the same logic as in the xorg driver, but more in 897 /* XXX: This is not the same logic as in the xorg driver, but more in
894 * line with the intel documentation for the i965 */ 898 * line with the intel documentation for the i965
895 if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) { 899 */
896 ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT; 900 if (INTEL_INFO(dev)->gen >= 4) {
897 } else { /* on i965 use the PGM reg to read out the autoscaler values */ 901 /* on i965 use the PGM reg to read out the autoscaler values */
898 ratio = I915_READ(PFIT_PGM_RATIOS); 902 ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
899 if (IS_I965G(dev)) 903 } else {
900 ratio >>= PFIT_VERT_SCALE_SHIFT_965; 904 if (pfit_control & VERT_AUTO_SCALE)
905 ratio = I915_READ(PFIT_AUTO_RATIOS);
901 else 906 else
902 ratio >>= PFIT_VERT_SCALE_SHIFT; 907 ratio = I915_READ(PFIT_PGM_RATIOS);
908 ratio >>= PFIT_VERT_SCALE_SHIFT;
903 } 909 }
904 910
905 overlay->pfit_vscale_ratio = ratio; 911 overlay->pfit_vscale_ratio = ratio;
@@ -910,12 +916,10 @@ static int check_overlay_dst(struct intel_overlay *overlay,
910{ 916{
911 struct drm_display_mode *mode = &overlay->crtc->base.mode; 917 struct drm_display_mode *mode = &overlay->crtc->base.mode;
912 918
913 if ((rec->dst_x < mode->crtc_hdisplay) 919 if (rec->dst_x < mode->crtc_hdisplay &&
914 && (rec->dst_x + rec->dst_width 920 rec->dst_x + rec->dst_width <= mode->crtc_hdisplay &&
915 <= mode->crtc_hdisplay) 921 rec->dst_y < mode->crtc_vdisplay &&
916 && (rec->dst_y < mode->crtc_vdisplay) 922 rec->dst_y + rec->dst_height <= mode->crtc_vdisplay)
917 && (rec->dst_y + rec->dst_height
918 <= mode->crtc_vdisplay))
919 return 0; 923 return 0;
920 else 924 else
921 return -EINVAL; 925 return -EINVAL;
@@ -940,53 +944,57 @@ static int check_overlay_src(struct drm_device *dev,
940 struct drm_intel_overlay_put_image *rec, 944 struct drm_intel_overlay_put_image *rec,
941 struct drm_gem_object *new_bo) 945 struct drm_gem_object *new_bo)
942{ 946{
943 u32 stride_mask;
944 int depth;
945 int uv_hscale = uv_hsubsampling(rec->flags); 947 int uv_hscale = uv_hsubsampling(rec->flags);
946 int uv_vscale = uv_vsubsampling(rec->flags); 948 int uv_vscale = uv_vsubsampling(rec->flags);
947 size_t tmp; 949 u32 stride_mask, depth, tmp;
948 950
949 /* check src dimensions */ 951 /* check src dimensions */
950 if (IS_845G(dev) || IS_I830(dev)) { 952 if (IS_845G(dev) || IS_I830(dev)) {
951 if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY 953 if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
952 || rec->src_width > IMAGE_MAX_WIDTH_LEGACY) 954 rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
953 return -EINVAL; 955 return -EINVAL;
954 } else { 956 } else {
955 if (rec->src_height > IMAGE_MAX_HEIGHT 957 if (rec->src_height > IMAGE_MAX_HEIGHT ||
956 || rec->src_width > IMAGE_MAX_WIDTH) 958 rec->src_width > IMAGE_MAX_WIDTH)
957 return -EINVAL; 959 return -EINVAL;
958 } 960 }
961
959 /* better safe than sorry, use 4 as the maximal subsampling ratio */ 962 /* better safe than sorry, use 4 as the maximal subsampling ratio */
960 if (rec->src_height < N_VERT_Y_TAPS*4 963 if (rec->src_height < N_VERT_Y_TAPS*4 ||
961 || rec->src_width < N_HORIZ_Y_TAPS*4) 964 rec->src_width < N_HORIZ_Y_TAPS*4)
962 return -EINVAL; 965 return -EINVAL;
963 966
964 /* check alignment constraints */ 967 /* check alignment constraints */
965 switch (rec->flags & I915_OVERLAY_TYPE_MASK) { 968 switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
966 case I915_OVERLAY_RGB: 969 case I915_OVERLAY_RGB:
967 /* not implemented */ 970 /* not implemented */
971 return -EINVAL;
972
973 case I915_OVERLAY_YUV_PACKED:
974 if (uv_vscale != 1)
968 return -EINVAL; 975 return -EINVAL;
969 case I915_OVERLAY_YUV_PACKED: 976
970 depth = packed_depth_bytes(rec->flags); 977 depth = packed_depth_bytes(rec->flags);
971 if (uv_vscale != 1) 978 if (depth < 0)
972 return -EINVAL; 979 return depth;
973 if (depth < 0) 980
974 return depth; 981 /* ignore UV planes */
975 /* ignore UV planes */ 982 rec->stride_UV = 0;
976 rec->stride_UV = 0; 983 rec->offset_U = 0;
977 rec->offset_U = 0; 984 rec->offset_V = 0;
978 rec->offset_V = 0; 985 /* check pixel alignment */
979 /* check pixel alignment */ 986 if (rec->offset_Y % depth)
980 if (rec->offset_Y % depth) 987 return -EINVAL;
981 return -EINVAL; 988 break;
982 break; 989
983 case I915_OVERLAY_YUV_PLANAR: 990 case I915_OVERLAY_YUV_PLANAR:
984 if (uv_vscale < 0 || uv_hscale < 0) 991 if (uv_vscale < 0 || uv_hscale < 0)
985 return -EINVAL;
986 /* no offset restrictions for planar formats */
987 break;
988 default:
989 return -EINVAL; 992 return -EINVAL;
993 /* no offset restrictions for planar formats */
994 break;
995
996 default:
997 return -EINVAL;
990 } 998 }
991 999
992 if (rec->src_width % uv_hscale) 1000 if (rec->src_width % uv_hscale)
@@ -1000,47 +1008,74 @@ static int check_overlay_src(struct drm_device *dev,
1000 1008
1001 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) 1009 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
1002 return -EINVAL; 1010 return -EINVAL;
1003 if (IS_I965G(dev) && rec->stride_Y < 512) 1011 if (IS_GEN4(dev) && rec->stride_Y < 512)
1004 return -EINVAL; 1012 return -EINVAL;
1005 1013
1006 tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? 1014 tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
1007 4 : 8; 1015 4096 : 8192;
1008 if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024) 1016 if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
1009 return -EINVAL; 1017 return -EINVAL;
1010 1018
1011 /* check buffer dimensions */ 1019 /* check buffer dimensions */
1012 switch (rec->flags & I915_OVERLAY_TYPE_MASK) { 1020 switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
1013 case I915_OVERLAY_RGB: 1021 case I915_OVERLAY_RGB:
1014 case I915_OVERLAY_YUV_PACKED: 1022 case I915_OVERLAY_YUV_PACKED:
1015 /* always 4 Y values per depth pixels */ 1023 /* always 4 Y values per depth pixels */
1016 if (packed_width_bytes(rec->flags, rec->src_width) 1024 if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
1017 > rec->stride_Y) 1025 return -EINVAL;
1018 return -EINVAL; 1026
1019 1027 tmp = rec->stride_Y*rec->src_height;
1020 tmp = rec->stride_Y*rec->src_height; 1028 if (rec->offset_Y + tmp > new_bo->size)
1021 if (rec->offset_Y + tmp > new_bo->size) 1029 return -EINVAL;
1022 return -EINVAL; 1030 break;
1023 break; 1031
1024 case I915_OVERLAY_YUV_PLANAR: 1032 case I915_OVERLAY_YUV_PLANAR:
1025 if (rec->src_width > rec->stride_Y) 1033 if (rec->src_width > rec->stride_Y)
1026 return -EINVAL; 1034 return -EINVAL;
1027 if (rec->src_width/uv_hscale > rec->stride_UV) 1035 if (rec->src_width/uv_hscale > rec->stride_UV)
1028 return -EINVAL; 1036 return -EINVAL;
1029 1037
1030 tmp = rec->stride_Y*rec->src_height; 1038 tmp = rec->stride_Y * rec->src_height;
1031 if (rec->offset_Y + tmp > new_bo->size) 1039 if (rec->offset_Y + tmp > new_bo->size)
1032 return -EINVAL; 1040 return -EINVAL;
1033 tmp = rec->stride_UV*rec->src_height; 1041
1034 tmp /= uv_vscale; 1042 tmp = rec->stride_UV * (rec->src_height / uv_vscale);
1035 if (rec->offset_U + tmp > new_bo->size 1043 if (rec->offset_U + tmp > new_bo->size ||
1036 || rec->offset_V + tmp > new_bo->size) 1044 rec->offset_V + tmp > new_bo->size)
1037 return -EINVAL; 1045 return -EINVAL;
1038 break; 1046 break;
1039 } 1047 }
1040 1048
1041 return 0; 1049 return 0;
1042} 1050}
1043 1051
1052/**
1053 * Return the pipe currently connected to the panel fitter,
1054 * or -1 if the panel fitter is not present or not in use
1055 */
1056static int intel_panel_fitter_pipe(struct drm_device *dev)
1057{
1058 struct drm_i915_private *dev_priv = dev->dev_private;
1059 u32 pfit_control;
1060
1061 /* i830 doesn't have a panel fitter */
1062 if (IS_I830(dev))
1063 return -1;
1064
1065 pfit_control = I915_READ(PFIT_CONTROL);
1066
1067 /* See if the panel fitter is in use */
1068 if ((pfit_control & PFIT_ENABLE) == 0)
1069 return -1;
1070
1071 /* 965 can place panel fitter on either pipe */
1072 if (IS_GEN4(dev))
1073 return (pfit_control >> 29) & 0x3;
1074
1075 /* older chips can only use pipe 1 */
1076 return 1;
1077}
1078
1044int intel_overlay_put_image(struct drm_device *dev, void *data, 1079int intel_overlay_put_image(struct drm_device *dev, void *data,
1045 struct drm_file *file_priv) 1080 struct drm_file *file_priv)
1046{ 1081{
@@ -1068,7 +1103,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1068 mutex_lock(&dev->mode_config.mutex); 1103 mutex_lock(&dev->mode_config.mutex);
1069 mutex_lock(&dev->struct_mutex); 1104 mutex_lock(&dev->struct_mutex);
1070 1105
1071 ret = intel_overlay_switch_off(overlay); 1106 ret = intel_overlay_switch_off(overlay, true);
1072 1107
1073 mutex_unlock(&dev->struct_mutex); 1108 mutex_unlock(&dev->struct_mutex);
1074 mutex_unlock(&dev->mode_config.mutex); 1109 mutex_unlock(&dev->mode_config.mutex);
@@ -1081,7 +1116,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1081 return -ENOMEM; 1116 return -ENOMEM;
1082 1117
1083 drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id, 1118 drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
1084 DRM_MODE_OBJECT_CRTC); 1119 DRM_MODE_OBJECT_CRTC);
1085 if (!drmmode_obj) { 1120 if (!drmmode_obj) {
1086 ret = -ENOENT; 1121 ret = -ENOENT;
1087 goto out_free; 1122 goto out_free;
@@ -1089,7 +1124,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1089 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); 1124 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
1090 1125
1091 new_bo = drm_gem_object_lookup(dev, file_priv, 1126 new_bo = drm_gem_object_lookup(dev, file_priv,
1092 put_image_rec->bo_handle); 1127 put_image_rec->bo_handle);
1093 if (!new_bo) { 1128 if (!new_bo) {
1094 ret = -ENOENT; 1129 ret = -ENOENT;
1095 goto out_free; 1130 goto out_free;
@@ -1098,15 +1133,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1098 mutex_lock(&dev->mode_config.mutex); 1133 mutex_lock(&dev->mode_config.mutex);
1099 mutex_lock(&dev->struct_mutex); 1134 mutex_lock(&dev->struct_mutex);
1100 1135
1101 if (overlay->hw_wedged) { 1136 ret = intel_overlay_recover_from_interrupt(overlay, true);
1102 ret = intel_overlay_recover_from_interrupt(overlay, 1); 1137 if (ret != 0)
1103 if (ret != 0) 1138 goto out_unlock;
1104 goto out_unlock;
1105 }
1106 1139
1107 if (overlay->crtc != crtc) { 1140 if (overlay->crtc != crtc) {
1108 struct drm_display_mode *mode = &crtc->base.mode; 1141 struct drm_display_mode *mode = &crtc->base.mode;
1109 ret = intel_overlay_switch_off(overlay); 1142 ret = intel_overlay_switch_off(overlay, true);
1110 if (ret != 0) 1143 if (ret != 0)
1111 goto out_unlock; 1144 goto out_unlock;
1112 1145
@@ -1117,9 +1150,9 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1117 overlay->crtc = crtc; 1150 overlay->crtc = crtc;
1118 crtc->overlay = overlay; 1151 crtc->overlay = overlay;
1119 1152
1120 if (intel_panel_fitter_pipe(dev) == crtc->pipe 1153 /* line too wide, i.e. one-line-mode */
1121 /* and line to wide, i.e. one-line-mode */ 1154 if (mode->hdisplay > 1024 &&
1122 && mode->hdisplay > 1024) { 1155 intel_panel_fitter_pipe(dev) == crtc->pipe) {
1123 overlay->pfit_active = 1; 1156 overlay->pfit_active = 1;
1124 update_pfit_vscale_ratio(overlay); 1157 update_pfit_vscale_ratio(overlay);
1125 } else 1158 } else
@@ -1132,10 +1165,10 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1132 1165
1133 if (overlay->pfit_active) { 1166 if (overlay->pfit_active) {
1134 params->dst_y = ((((u32)put_image_rec->dst_y) << 12) / 1167 params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
1135 overlay->pfit_vscale_ratio); 1168 overlay->pfit_vscale_ratio);
1136 /* shifting right rounds downwards, so add 1 */ 1169 /* shifting right rounds downwards, so add 1 */
1137 params->dst_h = ((((u32)put_image_rec->dst_height) << 12) / 1170 params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
1138 overlay->pfit_vscale_ratio) + 1; 1171 overlay->pfit_vscale_ratio) + 1;
1139 } else { 1172 } else {
1140 params->dst_y = put_image_rec->dst_y; 1173 params->dst_y = put_image_rec->dst_y;
1141 params->dst_h = put_image_rec->dst_height; 1174 params->dst_h = put_image_rec->dst_height;
@@ -1147,8 +1180,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1147 params->src_h = put_image_rec->src_height; 1180 params->src_h = put_image_rec->src_height;
1148 params->src_scan_w = put_image_rec->src_scan_width; 1181 params->src_scan_w = put_image_rec->src_scan_width;
1149 params->src_scan_h = put_image_rec->src_scan_height; 1182 params->src_scan_h = put_image_rec->src_scan_height;
1150 if (params->src_scan_h > params->src_h 1183 if (params->src_scan_h > params->src_h ||
1151 || params->src_scan_w > params->src_w) { 1184 params->src_scan_w > params->src_w) {
1152 ret = -EINVAL; 1185 ret = -EINVAL;
1153 goto out_unlock; 1186 goto out_unlock;
1154 } 1187 }
@@ -1204,7 +1237,7 @@ static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
1204 return false; 1237 return false;
1205 1238
1206 for (i = 0; i < 3; i++) { 1239 for (i = 0; i < 3; i++) {
1207 if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff)) 1240 if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
1208 return false; 1241 return false;
1209 } 1242 }
1210 1243
@@ -1225,16 +1258,18 @@ static bool check_gamma5_errata(u32 gamma5)
1225 1258
1226static int check_gamma(struct drm_intel_overlay_attrs *attrs) 1259static int check_gamma(struct drm_intel_overlay_attrs *attrs)
1227{ 1260{
1228 if (!check_gamma_bounds(0, attrs->gamma0) 1261 if (!check_gamma_bounds(0, attrs->gamma0) ||
1229 || !check_gamma_bounds(attrs->gamma0, attrs->gamma1) 1262 !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
1230 || !check_gamma_bounds(attrs->gamma1, attrs->gamma2) 1263 !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
1231 || !check_gamma_bounds(attrs->gamma2, attrs->gamma3) 1264 !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
1232 || !check_gamma_bounds(attrs->gamma3, attrs->gamma4) 1265 !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
1233 || !check_gamma_bounds(attrs->gamma4, attrs->gamma5) 1266 !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
1234 || !check_gamma_bounds(attrs->gamma5, 0x00ffffff)) 1267 !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
1235 return -EINVAL; 1268 return -EINVAL;
1269
1236 if (!check_gamma5_errata(attrs->gamma5)) 1270 if (!check_gamma5_errata(attrs->gamma5))
1237 return -EINVAL; 1271 return -EINVAL;
1272
1238 return 0; 1273 return 0;
1239} 1274}
1240 1275
@@ -1261,13 +1296,14 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1261 mutex_lock(&dev->mode_config.mutex); 1296 mutex_lock(&dev->mode_config.mutex);
1262 mutex_lock(&dev->struct_mutex); 1297 mutex_lock(&dev->struct_mutex);
1263 1298
1299 ret = -EINVAL;
1264 if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) { 1300 if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
1265 attrs->color_key = overlay->color_key; 1301 attrs->color_key = overlay->color_key;
1266 attrs->brightness = overlay->brightness; 1302 attrs->brightness = overlay->brightness;
1267 attrs->contrast = overlay->contrast; 1303 attrs->contrast = overlay->contrast;
1268 attrs->saturation = overlay->saturation; 1304 attrs->saturation = overlay->saturation;
1269 1305
1270 if (IS_I9XX(dev)) { 1306 if (!IS_GEN2(dev)) {
1271 attrs->gamma0 = I915_READ(OGAMC0); 1307 attrs->gamma0 = I915_READ(OGAMC0);
1272 attrs->gamma1 = I915_READ(OGAMC1); 1308 attrs->gamma1 = I915_READ(OGAMC1);
1273 attrs->gamma2 = I915_READ(OGAMC2); 1309 attrs->gamma2 = I915_READ(OGAMC2);
@@ -1275,29 +1311,20 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1275 attrs->gamma4 = I915_READ(OGAMC4); 1311 attrs->gamma4 = I915_READ(OGAMC4);
1276 attrs->gamma5 = I915_READ(OGAMC5); 1312 attrs->gamma5 = I915_READ(OGAMC5);
1277 } 1313 }
1278 ret = 0;
1279 } else { 1314 } else {
1280 overlay->color_key = attrs->color_key; 1315 if (attrs->brightness < -128 || attrs->brightness > 127)
1281 if (attrs->brightness >= -128 && attrs->brightness <= 127) {
1282 overlay->brightness = attrs->brightness;
1283 } else {
1284 ret = -EINVAL;
1285 goto out_unlock; 1316 goto out_unlock;
1286 } 1317 if (attrs->contrast > 255)
1287 if (attrs->contrast <= 255) {
1288 overlay->contrast = attrs->contrast;
1289 } else {
1290 ret = -EINVAL;
1291 goto out_unlock; 1318 goto out_unlock;
1292 } 1319 if (attrs->saturation > 1023)
1293 if (attrs->saturation <= 1023) {
1294 overlay->saturation = attrs->saturation;
1295 } else {
1296 ret = -EINVAL;
1297 goto out_unlock; 1320 goto out_unlock;
1298 }
1299 1321
1300 regs = intel_overlay_map_regs_atomic(overlay); 1322 overlay->color_key = attrs->color_key;
1323 overlay->brightness = attrs->brightness;
1324 overlay->contrast = attrs->contrast;
1325 overlay->saturation = attrs->saturation;
1326
1327 regs = intel_overlay_map_regs(overlay);
1301 if (!regs) { 1328 if (!regs) {
1302 ret = -ENOMEM; 1329 ret = -ENOMEM;
1303 goto out_unlock; 1330 goto out_unlock;
@@ -1305,13 +1332,11 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1305 1332
1306 update_reg_attrs(overlay, regs); 1333 update_reg_attrs(overlay, regs);
1307 1334
1308 intel_overlay_unmap_regs_atomic(overlay); 1335 intel_overlay_unmap_regs(overlay, regs);
1309 1336
1310 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { 1337 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
1311 if (!IS_I9XX(dev)) { 1338 if (IS_GEN2(dev))
1312 ret = -EINVAL;
1313 goto out_unlock; 1339 goto out_unlock;
1314 }
1315 1340
1316 if (overlay->active) { 1341 if (overlay->active) {
1317 ret = -EBUSY; 1342 ret = -EBUSY;
@@ -1319,7 +1344,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1319 } 1344 }
1320 1345
1321 ret = check_gamma(attrs); 1346 ret = check_gamma(attrs);
1322 if (ret != 0) 1347 if (ret)
1323 goto out_unlock; 1348 goto out_unlock;
1324 1349
1325 I915_WRITE(OGAMC0, attrs->gamma0); 1350 I915_WRITE(OGAMC0, attrs->gamma0);
@@ -1329,9 +1354,9 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1329 I915_WRITE(OGAMC4, attrs->gamma4); 1354 I915_WRITE(OGAMC4, attrs->gamma4);
1330 I915_WRITE(OGAMC5, attrs->gamma5); 1355 I915_WRITE(OGAMC5, attrs->gamma5);
1331 } 1356 }
1332 ret = 0;
1333 } 1357 }
1334 1358
1359 ret = 0;
1335out_unlock: 1360out_unlock:
1336 mutex_unlock(&dev->struct_mutex); 1361 mutex_unlock(&dev->struct_mutex);
1337 mutex_unlock(&dev->mode_config.mutex); 1362 mutex_unlock(&dev->mode_config.mutex);
@@ -1347,7 +1372,7 @@ void intel_setup_overlay(struct drm_device *dev)
1347 struct overlay_registers *regs; 1372 struct overlay_registers *regs;
1348 int ret; 1373 int ret;
1349 1374
1350 if (!OVERLAY_EXISTS(dev)) 1375 if (!HAS_OVERLAY(dev))
1351 return; 1376 return;
1352 1377
1353 overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL); 1378 overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
@@ -1360,22 +1385,28 @@ void intel_setup_overlay(struct drm_device *dev)
1360 goto out_free; 1385 goto out_free;
1361 overlay->reg_bo = to_intel_bo(reg_bo); 1386 overlay->reg_bo = to_intel_bo(reg_bo);
1362 1387
1363 if (OVERLAY_NONPHYSICAL(dev)) { 1388 if (OVERLAY_NEEDS_PHYSICAL(dev)) {
1364 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
1365 if (ret) {
1366 DRM_ERROR("failed to pin overlay register bo\n");
1367 goto out_free_bo;
1368 }
1369 overlay->flip_addr = overlay->reg_bo->gtt_offset;
1370 } else {
1371 ret = i915_gem_attach_phys_object(dev, reg_bo, 1389 ret = i915_gem_attach_phys_object(dev, reg_bo,
1372 I915_GEM_PHYS_OVERLAY_REGS, 1390 I915_GEM_PHYS_OVERLAY_REGS,
1373 0); 1391 PAGE_SIZE);
1374 if (ret) { 1392 if (ret) {
1375 DRM_ERROR("failed to attach phys overlay regs\n"); 1393 DRM_ERROR("failed to attach phys overlay regs\n");
1376 goto out_free_bo; 1394 goto out_free_bo;
1377 } 1395 }
1378 overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; 1396 overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
1397 } else {
1398 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
1399 if (ret) {
1400 DRM_ERROR("failed to pin overlay register bo\n");
1401 goto out_free_bo;
1402 }
1403 overlay->flip_addr = overlay->reg_bo->gtt_offset;
1404
1405 ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
1406 if (ret) {
1407 DRM_ERROR("failed to move overlay register bo into the GTT\n");
1408 goto out_unpin_bo;
1409 }
1379 } 1410 }
1380 1411
1381 /* init all values */ 1412 /* init all values */
@@ -1384,21 +1415,22 @@ void intel_setup_overlay(struct drm_device *dev)
1384 overlay->contrast = 75; 1415 overlay->contrast = 75;
1385 overlay->saturation = 146; 1416 overlay->saturation = 146;
1386 1417
1387 regs = intel_overlay_map_regs_atomic(overlay); 1418 regs = intel_overlay_map_regs(overlay);
1388 if (!regs) 1419 if (!regs)
1389 goto out_free_bo; 1420 goto out_free_bo;
1390 1421
1391 memset(regs, 0, sizeof(struct overlay_registers)); 1422 memset(regs, 0, sizeof(struct overlay_registers));
1392 update_polyphase_filter(regs); 1423 update_polyphase_filter(regs);
1393
1394 update_reg_attrs(overlay, regs); 1424 update_reg_attrs(overlay, regs);
1395 1425
1396 intel_overlay_unmap_regs_atomic(overlay); 1426 intel_overlay_unmap_regs(overlay, regs);
1397 1427
1398 dev_priv->overlay = overlay; 1428 dev_priv->overlay = overlay;
1399 DRM_INFO("initialized overlay support\n"); 1429 DRM_INFO("initialized overlay support\n");
1400 return; 1430 return;
1401 1431
1432out_unpin_bo:
1433 i915_gem_object_unpin(reg_bo);
1402out_free_bo: 1434out_free_bo:
1403 drm_gem_object_unreference(reg_bo); 1435 drm_gem_object_unreference(reg_bo);
1404out_free: 1436out_free:
@@ -1408,18 +1440,23 @@ out_free:
1408 1440
1409void intel_cleanup_overlay(struct drm_device *dev) 1441void intel_cleanup_overlay(struct drm_device *dev)
1410{ 1442{
1411 drm_i915_private_t *dev_priv = dev->dev_private; 1443 drm_i915_private_t *dev_priv = dev->dev_private;
1412 1444
1413 if (dev_priv->overlay) { 1445 if (!dev_priv->overlay)
1414 /* The bo's should be free'd by the generic code already. 1446 return;
1415 * Furthermore modesetting teardown happens beforehand so the
1416 * hardware should be off already */
1417 BUG_ON(dev_priv->overlay->active);
1418 1447
1419 kfree(dev_priv->overlay); 1448 /* The bo's should be free'd by the generic code already.
1420 } 1449 * Furthermore modesetting teardown happens beforehand so the
1450 * hardware should be off already */
1451 BUG_ON(dev_priv->overlay->active);
1452
1453 drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
1454 kfree(dev_priv->overlay);
1421} 1455}
1422 1456
1457#ifdef CONFIG_DEBUG_FS
1458#include <linux/seq_file.h>
1459
1423struct intel_overlay_error_state { 1460struct intel_overlay_error_state {
1424 struct overlay_registers regs; 1461 struct overlay_registers regs;
1425 unsigned long base; 1462 unsigned long base;
@@ -1427,6 +1464,32 @@ struct intel_overlay_error_state {
1427 u32 isr; 1464 u32 isr;
1428}; 1465};
1429 1466
1467static struct overlay_registers *
1468intel_overlay_map_regs_atomic(struct intel_overlay *overlay,
1469 int slot)
1470{
1471 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
1472 struct overlay_registers *regs;
1473
1474 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1475 regs = overlay->reg_bo->phys_obj->handle->vaddr;
1476 else
1477 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
1478 overlay->reg_bo->gtt_offset,
1479 slot);
1480
1481 return regs;
1482}
1483
1484static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
1485 int slot,
1486 struct overlay_registers *regs)
1487{
1488 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1489 io_mapping_unmap_atomic(regs, slot);
1490}
1491
1492
1430struct intel_overlay_error_state * 1493struct intel_overlay_error_state *
1431intel_overlay_capture_error_state(struct drm_device *dev) 1494intel_overlay_capture_error_state(struct drm_device *dev)
1432{ 1495{
@@ -1444,17 +1507,17 @@ intel_overlay_capture_error_state(struct drm_device *dev)
1444 1507
1445 error->dovsta = I915_READ(DOVSTA); 1508 error->dovsta = I915_READ(DOVSTA);
1446 error->isr = I915_READ(ISR); 1509 error->isr = I915_READ(ISR);
1447 if (OVERLAY_NONPHYSICAL(overlay->dev)) 1510 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1448 error->base = (long) overlay->reg_bo->gtt_offset;
1449 else
1450 error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr; 1511 error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
1512 else
1513 error->base = (long) overlay->reg_bo->gtt_offset;
1451 1514
1452 regs = intel_overlay_map_regs_atomic(overlay); 1515 regs = intel_overlay_map_regs_atomic(overlay, KM_IRQ0);
1453 if (!regs) 1516 if (!regs)
1454 goto err; 1517 goto err;
1455 1518
1456 memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers)); 1519 memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
1457 intel_overlay_unmap_regs_atomic(overlay); 1520 intel_overlay_unmap_regs_atomic(overlay, KM_IRQ0, regs);
1458 1521
1459 return error; 1522 return error;
1460 1523
@@ -1515,3 +1578,4 @@ intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_s
1515 P(UVSCALEV); 1578 P(UVSCALEV);
1516#undef P 1579#undef P
1517} 1580}
1581#endif
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e7f5299d9d57..92ff8f385278 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -30,6 +30,8 @@
30 30
31#include "intel_drv.h" 31#include "intel_drv.h"
32 32
33#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
34
33void 35void
34intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 36intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
35 struct drm_display_mode *adjusted_mode) 37 struct drm_display_mode *adjusted_mode)
@@ -109,3 +111,110 @@ done:
109 dev_priv->pch_pf_pos = (x << 16) | y; 111 dev_priv->pch_pf_pos = (x << 16) | y;
110 dev_priv->pch_pf_size = (width << 16) | height; 112 dev_priv->pch_pf_size = (width << 16) | height;
111} 113}
114
115static int is_backlight_combination_mode(struct drm_device *dev)
116{
117 struct drm_i915_private *dev_priv = dev->dev_private;
118
119 if (INTEL_INFO(dev)->gen >= 4)
120 return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
121
122 if (IS_GEN2(dev))
123 return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
124
125 return 0;
126}
127
128u32 intel_panel_get_max_backlight(struct drm_device *dev)
129{
130 struct drm_i915_private *dev_priv = dev->dev_private;
131 u32 max;
132
133 if (HAS_PCH_SPLIT(dev)) {
134 max = I915_READ(BLC_PWM_PCH_CTL2) >> 16;
135 } else {
136 max = I915_READ(BLC_PWM_CTL);
137 if (IS_PINEVIEW(dev)) {
138 max >>= 17;
139 } else {
140 max >>= 16;
141 if (INTEL_INFO(dev)->gen < 4)
142 max &= ~1;
143 }
144
145 if (is_backlight_combination_mode(dev))
146 max *= 0xff;
147 }
148
149 if (max == 0) {
150 /* XXX add code here to query mode clock or hardware clock
151 * and program max PWM appropriately.
152 */
153 DRM_ERROR("fixme: max PWM is zero.\n");
154 max = 1;
155 }
156
157 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
158 return max;
159}
160
161u32 intel_panel_get_backlight(struct drm_device *dev)
162{
163 struct drm_i915_private *dev_priv = dev->dev_private;
164 u32 val;
165
166 if (HAS_PCH_SPLIT(dev)) {
167 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
168 } else {
169 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
170 if (IS_PINEVIEW(dev))
171 val >>= 1;
172
173 if (is_backlight_combination_mode(dev)){
174 u8 lbpc;
175
176 val &= ~1;
177 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
178 val *= lbpc;
179 val >>= 1;
180 }
181 }
182
183 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
184 return val;
185}
186
187static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
188{
189 struct drm_i915_private *dev_priv = dev->dev_private;
190 u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
191 I915_WRITE(BLC_PWM_CPU_CTL, val | level);
192}
193
194void intel_panel_set_backlight(struct drm_device *dev, u32 level)
195{
196 struct drm_i915_private *dev_priv = dev->dev_private;
197 u32 tmp;
198
199 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
200
201 if (HAS_PCH_SPLIT(dev))
202 return intel_pch_panel_set_backlight(dev, level);
203
204 if (is_backlight_combination_mode(dev)){
205 u32 max = intel_panel_get_max_backlight(dev);
206 u8 lpbc;
207
208 lpbc = level * 0xfe / max + 1;
209 level /= lpbc;
210 pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
211 }
212
213 tmp = I915_READ(BLC_PWM_CTL);
214 if (IS_PINEVIEW(dev)) {
215 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
216 level <<= 1;
217 } else
218 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
219 I915_WRITE(BLC_PWM_CTL, tmp | level);
220}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cb3508f78bc3..d89b88791aac 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -32,6 +32,7 @@
32#include "i915_drv.h" 32#include "i915_drv.h"
33#include "i915_drm.h" 33#include "i915_drm.h"
34#include "i915_trace.h" 34#include "i915_trace.h"
35#include "intel_drv.h"
35 36
36static u32 i915_gem_get_seqno(struct drm_device *dev) 37static u32 i915_gem_get_seqno(struct drm_device *dev)
37{ 38{
@@ -49,9 +50,9 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
49 50
50static void 51static void
51render_ring_flush(struct drm_device *dev, 52render_ring_flush(struct drm_device *dev,
52 struct intel_ring_buffer *ring, 53 struct intel_ring_buffer *ring,
53 u32 invalidate_domains, 54 u32 invalidate_domains,
54 u32 flush_domains) 55 u32 flush_domains)
55{ 56{
56 drm_i915_private_t *dev_priv = dev->dev_private; 57 drm_i915_private_t *dev_priv = dev->dev_private;
57 u32 cmd; 58 u32 cmd;
@@ -97,7 +98,7 @@ render_ring_flush(struct drm_device *dev,
97 if ((invalidate_domains|flush_domains) & 98 if ((invalidate_domains|flush_domains) &
98 I915_GEM_DOMAIN_RENDER) 99 I915_GEM_DOMAIN_RENDER)
99 cmd &= ~MI_NO_WRITE_FLUSH; 100 cmd &= ~MI_NO_WRITE_FLUSH;
100 if (!IS_I965G(dev)) { 101 if (INTEL_INFO(dev)->gen < 4) {
101 /* 102 /*
102 * On the 965, the sampler cache always gets flushed 103 * On the 965, the sampler cache always gets flushed
103 * and this bit is reserved. 104 * and this bit is reserved.
@@ -118,38 +119,26 @@ render_ring_flush(struct drm_device *dev,
118 } 119 }
119} 120}
120 121
121static unsigned int render_ring_get_head(struct drm_device *dev, 122static void ring_set_tail(struct drm_device *dev,
122 struct intel_ring_buffer *ring) 123 struct intel_ring_buffer *ring,
123{ 124 u32 value)
124 drm_i915_private_t *dev_priv = dev->dev_private;
125 return I915_READ(PRB0_HEAD) & HEAD_ADDR;
126}
127
128static unsigned int render_ring_get_tail(struct drm_device *dev,
129 struct intel_ring_buffer *ring)
130{ 125{
131 drm_i915_private_t *dev_priv = dev->dev_private; 126 drm_i915_private_t *dev_priv = dev->dev_private;
132 return I915_READ(PRB0_TAIL) & TAIL_ADDR; 127 I915_WRITE_TAIL(ring, ring->tail);
133} 128}
134 129
135static unsigned int render_ring_get_active_head(struct drm_device *dev, 130u32 intel_ring_get_active_head(struct drm_device *dev,
136 struct intel_ring_buffer *ring) 131 struct intel_ring_buffer *ring)
137{ 132{
138 drm_i915_private_t *dev_priv = dev->dev_private; 133 drm_i915_private_t *dev_priv = dev->dev_private;
139 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; 134 u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ?
135 RING_ACTHD(ring->mmio_base) : ACTHD;
140 136
141 return I915_READ(acthd_reg); 137 return I915_READ(acthd_reg);
142} 138}
143 139
144static void render_ring_advance_ring(struct drm_device *dev,
145 struct intel_ring_buffer *ring)
146{
147 drm_i915_private_t *dev_priv = dev->dev_private;
148 I915_WRITE(PRB0_TAIL, ring->tail);
149}
150
151static int init_ring_common(struct drm_device *dev, 140static int init_ring_common(struct drm_device *dev,
152 struct intel_ring_buffer *ring) 141 struct intel_ring_buffer *ring)
153{ 142{
154 u32 head; 143 u32 head;
155 drm_i915_private_t *dev_priv = dev->dev_private; 144 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -157,57 +146,57 @@ static int init_ring_common(struct drm_device *dev,
157 obj_priv = to_intel_bo(ring->gem_object); 146 obj_priv = to_intel_bo(ring->gem_object);
158 147
159 /* Stop the ring if it's running. */ 148 /* Stop the ring if it's running. */
160 I915_WRITE(ring->regs.ctl, 0); 149 I915_WRITE_CTL(ring, 0);
161 I915_WRITE(ring->regs.head, 0); 150 I915_WRITE_HEAD(ring, 0);
162 I915_WRITE(ring->regs.tail, 0); 151 ring->set_tail(dev, ring, 0);
163 152
164 /* Initialize the ring. */ 153 /* Initialize the ring. */
165 I915_WRITE(ring->regs.start, obj_priv->gtt_offset); 154 I915_WRITE_START(ring, obj_priv->gtt_offset);
166 head = ring->get_head(dev, ring); 155 head = I915_READ_HEAD(ring) & HEAD_ADDR;
167 156
168 /* G45 ring initialization fails to reset head to zero */ 157 /* G45 ring initialization fails to reset head to zero */
169 if (head != 0) { 158 if (head != 0) {
170 DRM_ERROR("%s head not reset to zero " 159 DRM_ERROR("%s head not reset to zero "
171 "ctl %08x head %08x tail %08x start %08x\n", 160 "ctl %08x head %08x tail %08x start %08x\n",
172 ring->name, 161 ring->name,
173 I915_READ(ring->regs.ctl), 162 I915_READ_CTL(ring),
174 I915_READ(ring->regs.head), 163 I915_READ_HEAD(ring),
175 I915_READ(ring->regs.tail), 164 I915_READ_TAIL(ring),
176 I915_READ(ring->regs.start)); 165 I915_READ_START(ring));
177 166
178 I915_WRITE(ring->regs.head, 0); 167 I915_WRITE_HEAD(ring, 0);
179 168
180 DRM_ERROR("%s head forced to zero " 169 DRM_ERROR("%s head forced to zero "
181 "ctl %08x head %08x tail %08x start %08x\n", 170 "ctl %08x head %08x tail %08x start %08x\n",
182 ring->name, 171 ring->name,
183 I915_READ(ring->regs.ctl), 172 I915_READ_CTL(ring),
184 I915_READ(ring->regs.head), 173 I915_READ_HEAD(ring),
185 I915_READ(ring->regs.tail), 174 I915_READ_TAIL(ring),
186 I915_READ(ring->regs.start)); 175 I915_READ_START(ring));
187 } 176 }
188 177
189 I915_WRITE(ring->regs.ctl, 178 I915_WRITE_CTL(ring,
190 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) 179 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
191 | RING_NO_REPORT | RING_VALID); 180 | RING_NO_REPORT | RING_VALID);
192 181
193 head = I915_READ(ring->regs.head) & HEAD_ADDR; 182 head = I915_READ_HEAD(ring) & HEAD_ADDR;
194 /* If the head is still not zero, the ring is dead */ 183 /* If the head is still not zero, the ring is dead */
195 if (head != 0) { 184 if (head != 0) {
196 DRM_ERROR("%s initialization failed " 185 DRM_ERROR("%s initialization failed "
197 "ctl %08x head %08x tail %08x start %08x\n", 186 "ctl %08x head %08x tail %08x start %08x\n",
198 ring->name, 187 ring->name,
199 I915_READ(ring->regs.ctl), 188 I915_READ_CTL(ring),
200 I915_READ(ring->regs.head), 189 I915_READ_HEAD(ring),
201 I915_READ(ring->regs.tail), 190 I915_READ_TAIL(ring),
202 I915_READ(ring->regs.start)); 191 I915_READ_START(ring));
203 return -EIO; 192 return -EIO;
204 } 193 }
205 194
206 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 195 if (!drm_core_check_feature(dev, DRIVER_MODESET))
207 i915_kernel_lost_context(dev); 196 i915_kernel_lost_context(dev);
208 else { 197 else {
209 ring->head = ring->get_head(dev, ring); 198 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
210 ring->tail = ring->get_tail(dev, ring); 199 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
211 ring->space = ring->head - (ring->tail + 8); 200 ring->space = ring->head - (ring->tail + 8);
212 if (ring->space < 0) 201 if (ring->space < 0)
213 ring->space += ring->size; 202 ring->space += ring->size;
@@ -216,13 +205,13 @@ static int init_ring_common(struct drm_device *dev,
216} 205}
217 206
218static int init_render_ring(struct drm_device *dev, 207static int init_render_ring(struct drm_device *dev,
219 struct intel_ring_buffer *ring) 208 struct intel_ring_buffer *ring)
220{ 209{
221 drm_i915_private_t *dev_priv = dev->dev_private; 210 drm_i915_private_t *dev_priv = dev->dev_private;
222 int ret = init_ring_common(dev, ring); 211 int ret = init_ring_common(dev, ring);
223 int mode; 212 int mode;
224 213
225 if (IS_I9XX(dev) && !IS_GEN3(dev)) { 214 if (INTEL_INFO(dev)->gen > 3) {
226 mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; 215 mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
227 if (IS_GEN6(dev)) 216 if (IS_GEN6(dev))
228 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; 217 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
@@ -250,9 +239,8 @@ do { \
250 */ 239 */
251static u32 240static u32
252render_ring_add_request(struct drm_device *dev, 241render_ring_add_request(struct drm_device *dev,
253 struct intel_ring_buffer *ring, 242 struct intel_ring_buffer *ring,
254 struct drm_file *file_priv, 243 u32 flush_domains)
255 u32 flush_domains)
256{ 244{
257 drm_i915_private_t *dev_priv = dev->dev_private; 245 drm_i915_private_t *dev_priv = dev->dev_private;
258 u32 seqno; 246 u32 seqno;
@@ -315,8 +303,8 @@ render_ring_add_request(struct drm_device *dev,
315} 303}
316 304
317static u32 305static u32
318render_ring_get_gem_seqno(struct drm_device *dev, 306render_ring_get_seqno(struct drm_device *dev,
319 struct intel_ring_buffer *ring) 307 struct intel_ring_buffer *ring)
320{ 308{
321 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 309 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
322 if (HAS_PIPE_CONTROL(dev)) 310 if (HAS_PIPE_CONTROL(dev))
@@ -327,7 +315,7 @@ render_ring_get_gem_seqno(struct drm_device *dev,
327 315
328static void 316static void
329render_ring_get_user_irq(struct drm_device *dev, 317render_ring_get_user_irq(struct drm_device *dev,
330 struct intel_ring_buffer *ring) 318 struct intel_ring_buffer *ring)
331{ 319{
332 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 320 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
333 unsigned long irqflags; 321 unsigned long irqflags;
@@ -344,7 +332,7 @@ render_ring_get_user_irq(struct drm_device *dev,
344 332
345static void 333static void
346render_ring_put_user_irq(struct drm_device *dev, 334render_ring_put_user_irq(struct drm_device *dev,
347 struct intel_ring_buffer *ring) 335 struct intel_ring_buffer *ring)
348{ 336{
349 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 337 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
350 unsigned long irqflags; 338 unsigned long irqflags;
@@ -360,21 +348,23 @@ render_ring_put_user_irq(struct drm_device *dev,
360 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 348 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
361} 349}
362 350
363static void render_setup_status_page(struct drm_device *dev, 351void intel_ring_setup_status_page(struct drm_device *dev,
364 struct intel_ring_buffer *ring) 352 struct intel_ring_buffer *ring)
365{ 353{
366 drm_i915_private_t *dev_priv = dev->dev_private; 354 drm_i915_private_t *dev_priv = dev->dev_private;
367 if (IS_GEN6(dev)) { 355 if (IS_GEN6(dev)) {
368 I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr); 356 I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base),
369 I915_READ(HWS_PGA_GEN6); /* posting read */ 357 ring->status_page.gfx_addr);
358 I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */
370 } else { 359 } else {
371 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); 360 I915_WRITE(RING_HWS_PGA(ring->mmio_base),
372 I915_READ(HWS_PGA); /* posting read */ 361 ring->status_page.gfx_addr);
362 I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
373 } 363 }
374 364
375} 365}
376 366
377void 367static void
378bsd_ring_flush(struct drm_device *dev, 368bsd_ring_flush(struct drm_device *dev,
379 struct intel_ring_buffer *ring, 369 struct intel_ring_buffer *ring,
380 u32 invalidate_domains, 370 u32 invalidate_domains,
@@ -386,45 +376,16 @@ bsd_ring_flush(struct drm_device *dev,
386 intel_ring_advance(dev, ring); 376 intel_ring_advance(dev, ring);
387} 377}
388 378
389static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
390 struct intel_ring_buffer *ring)
391{
392 drm_i915_private_t *dev_priv = dev->dev_private;
393 return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
394}
395
396static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
397 struct intel_ring_buffer *ring)
398{
399 drm_i915_private_t *dev_priv = dev->dev_private;
400 return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
401}
402
403static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
404 struct intel_ring_buffer *ring)
405{
406 drm_i915_private_t *dev_priv = dev->dev_private;
407 return I915_READ(BSD_RING_ACTHD);
408}
409
410static inline void bsd_ring_advance_ring(struct drm_device *dev,
411 struct intel_ring_buffer *ring)
412{
413 drm_i915_private_t *dev_priv = dev->dev_private;
414 I915_WRITE(BSD_RING_TAIL, ring->tail);
415}
416
417static int init_bsd_ring(struct drm_device *dev, 379static int init_bsd_ring(struct drm_device *dev,
418 struct intel_ring_buffer *ring) 380 struct intel_ring_buffer *ring)
419{ 381{
420 return init_ring_common(dev, ring); 382 return init_ring_common(dev, ring);
421} 383}
422 384
423static u32 385static u32
424bsd_ring_add_request(struct drm_device *dev, 386bsd_ring_add_request(struct drm_device *dev,
425 struct intel_ring_buffer *ring, 387 struct intel_ring_buffer *ring,
426 struct drm_file *file_priv, 388 u32 flush_domains)
427 u32 flush_domains)
428{ 389{
429 u32 seqno; 390 u32 seqno;
430 391
@@ -443,40 +404,32 @@ bsd_ring_add_request(struct drm_device *dev,
443 return seqno; 404 return seqno;
444} 405}
445 406
446static void bsd_setup_status_page(struct drm_device *dev,
447 struct intel_ring_buffer *ring)
448{
449 drm_i915_private_t *dev_priv = dev->dev_private;
450 I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
451 I915_READ(BSD_HWS_PGA);
452}
453
454static void 407static void
455bsd_ring_get_user_irq(struct drm_device *dev, 408bsd_ring_get_user_irq(struct drm_device *dev,
456 struct intel_ring_buffer *ring) 409 struct intel_ring_buffer *ring)
457{ 410{
458 /* do nothing */ 411 /* do nothing */
459} 412}
460static void 413static void
461bsd_ring_put_user_irq(struct drm_device *dev, 414bsd_ring_put_user_irq(struct drm_device *dev,
462 struct intel_ring_buffer *ring) 415 struct intel_ring_buffer *ring)
463{ 416{
464 /* do nothing */ 417 /* do nothing */
465} 418}
466 419
467static u32 420static u32
468bsd_ring_get_gem_seqno(struct drm_device *dev, 421bsd_ring_get_seqno(struct drm_device *dev,
469 struct intel_ring_buffer *ring) 422 struct intel_ring_buffer *ring)
470{ 423{
471 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 424 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
472} 425}
473 426
474static int 427static int
475bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, 428bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
476 struct intel_ring_buffer *ring, 429 struct intel_ring_buffer *ring,
477 struct drm_i915_gem_execbuffer2 *exec, 430 struct drm_i915_gem_execbuffer2 *exec,
478 struct drm_clip_rect *cliprects, 431 struct drm_clip_rect *cliprects,
479 uint64_t exec_offset) 432 uint64_t exec_offset)
480{ 433{
481 uint32_t exec_start; 434 uint32_t exec_start;
482 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 435 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
@@ -491,10 +444,10 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
491 444
492static int 445static int
493render_ring_dispatch_gem_execbuffer(struct drm_device *dev, 446render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
494 struct intel_ring_buffer *ring, 447 struct intel_ring_buffer *ring,
495 struct drm_i915_gem_execbuffer2 *exec, 448 struct drm_i915_gem_execbuffer2 *exec,
496 struct drm_clip_rect *cliprects, 449 struct drm_clip_rect *cliprects,
497 uint64_t exec_offset) 450 uint64_t exec_offset)
498{ 451{
499 drm_i915_private_t *dev_priv = dev->dev_private; 452 drm_i915_private_t *dev_priv = dev->dev_private;
500 int nbox = exec->num_cliprects; 453 int nbox = exec->num_cliprects;
@@ -524,7 +477,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
524 intel_ring_emit(dev, ring, 0); 477 intel_ring_emit(dev, ring, 0);
525 } else { 478 } else {
526 intel_ring_begin(dev, ring, 4); 479 intel_ring_begin(dev, ring, 4);
527 if (IS_I965G(dev)) { 480 if (INTEL_INFO(dev)->gen >= 4) {
528 intel_ring_emit(dev, ring, 481 intel_ring_emit(dev, ring,
529 MI_BATCH_BUFFER_START | (2 << 6) 482 MI_BATCH_BUFFER_START | (2 << 6)
530 | MI_BATCH_NON_SECURE_I965); 483 | MI_BATCH_NON_SECURE_I965);
@@ -553,7 +506,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
553} 506}
554 507
555static void cleanup_status_page(struct drm_device *dev, 508static void cleanup_status_page(struct drm_device *dev,
556 struct intel_ring_buffer *ring) 509 struct intel_ring_buffer *ring)
557{ 510{
558 drm_i915_private_t *dev_priv = dev->dev_private; 511 drm_i915_private_t *dev_priv = dev->dev_private;
559 struct drm_gem_object *obj; 512 struct drm_gem_object *obj;
@@ -573,7 +526,7 @@ static void cleanup_status_page(struct drm_device *dev,
573} 526}
574 527
575static int init_status_page(struct drm_device *dev, 528static int init_status_page(struct drm_device *dev,
576 struct intel_ring_buffer *ring) 529 struct intel_ring_buffer *ring)
577{ 530{
578 drm_i915_private_t *dev_priv = dev->dev_private; 531 drm_i915_private_t *dev_priv = dev->dev_private;
579 struct drm_gem_object *obj; 532 struct drm_gem_object *obj;
@@ -603,7 +556,7 @@ static int init_status_page(struct drm_device *dev,
603 ring->status_page.obj = obj; 556 ring->status_page.obj = obj;
604 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 557 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
605 558
606 ring->setup_status_page(dev, ring); 559 intel_ring_setup_status_page(dev, ring);
607 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 560 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
608 ring->name, ring->status_page.gfx_addr); 561 ring->name, ring->status_page.gfx_addr);
609 562
@@ -617,15 +570,17 @@ err:
617 return ret; 570 return ret;
618} 571}
619 572
620
621int intel_init_ring_buffer(struct drm_device *dev, 573int intel_init_ring_buffer(struct drm_device *dev,
622 struct intel_ring_buffer *ring) 574 struct intel_ring_buffer *ring)
623{ 575{
576 struct drm_i915_private *dev_priv = dev->dev_private;
624 struct drm_i915_gem_object *obj_priv; 577 struct drm_i915_gem_object *obj_priv;
625 struct drm_gem_object *obj; 578 struct drm_gem_object *obj;
626 int ret; 579 int ret;
627 580
628 ring->dev = dev; 581 ring->dev = dev;
582 INIT_LIST_HEAD(&ring->active_list);
583 INIT_LIST_HEAD(&ring->request_list);
629 584
630 if (I915_NEED_GFX_HWS(dev)) { 585 if (I915_NEED_GFX_HWS(dev)) {
631 ret = init_status_page(dev, ring); 586 ret = init_status_page(dev, ring);
@@ -642,7 +597,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
642 597
643 ring->gem_object = obj; 598 ring->gem_object = obj;
644 599
645 ret = i915_gem_object_pin(obj, ring->alignment); 600 ret = i915_gem_object_pin(obj, PAGE_SIZE);
646 if (ret) 601 if (ret)
647 goto err_unref; 602 goto err_unref;
648 603
@@ -668,14 +623,12 @@ int intel_init_ring_buffer(struct drm_device *dev,
668 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 623 if (!drm_core_check_feature(dev, DRIVER_MODESET))
669 i915_kernel_lost_context(dev); 624 i915_kernel_lost_context(dev);
670 else { 625 else {
671 ring->head = ring->get_head(dev, ring); 626 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
672 ring->tail = ring->get_tail(dev, ring); 627 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
673 ring->space = ring->head - (ring->tail + 8); 628 ring->space = ring->head - (ring->tail + 8);
674 if (ring->space < 0) 629 if (ring->space < 0)
675 ring->space += ring->size; 630 ring->space += ring->size;
676 } 631 }
677 INIT_LIST_HEAD(&ring->active_list);
678 INIT_LIST_HEAD(&ring->request_list);
679 return ret; 632 return ret;
680 633
681err_unmap: 634err_unmap:
@@ -691,7 +644,7 @@ err_hws:
691} 644}
692 645
693void intel_cleanup_ring_buffer(struct drm_device *dev, 646void intel_cleanup_ring_buffer(struct drm_device *dev,
694 struct intel_ring_buffer *ring) 647 struct intel_ring_buffer *ring)
695{ 648{
696 if (ring->gem_object == NULL) 649 if (ring->gem_object == NULL)
697 return; 650 return;
@@ -704,8 +657,8 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
704 cleanup_status_page(dev, ring); 657 cleanup_status_page(dev, ring);
705} 658}
706 659
707int intel_wrap_ring_buffer(struct drm_device *dev, 660static int intel_wrap_ring_buffer(struct drm_device *dev,
708 struct intel_ring_buffer *ring) 661 struct intel_ring_buffer *ring)
709{ 662{
710 unsigned int *virt; 663 unsigned int *virt;
711 int rem; 664 int rem;
@@ -731,14 +684,15 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
731} 684}
732 685
733int intel_wait_ring_buffer(struct drm_device *dev, 686int intel_wait_ring_buffer(struct drm_device *dev,
734 struct intel_ring_buffer *ring, int n) 687 struct intel_ring_buffer *ring, int n)
735{ 688{
736 unsigned long end; 689 unsigned long end;
690 drm_i915_private_t *dev_priv = dev->dev_private;
737 691
738 trace_i915_ring_wait_begin (dev); 692 trace_i915_ring_wait_begin (dev);
739 end = jiffies + 3 * HZ; 693 end = jiffies + 3 * HZ;
740 do { 694 do {
741 ring->head = ring->get_head(dev, ring); 695 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
742 ring->space = ring->head - (ring->tail + 8); 696 ring->space = ring->head - (ring->tail + 8);
743 if (ring->space < 0) 697 if (ring->space < 0)
744 ring->space += ring->size; 698 ring->space += ring->size;
@@ -760,7 +714,8 @@ int intel_wait_ring_buffer(struct drm_device *dev,
760} 714}
761 715
762void intel_ring_begin(struct drm_device *dev, 716void intel_ring_begin(struct drm_device *dev,
763 struct intel_ring_buffer *ring, int num_dwords) 717 struct intel_ring_buffer *ring,
718 int num_dwords)
764{ 719{
765 int n = 4*num_dwords; 720 int n = 4*num_dwords;
766 if (unlikely(ring->tail + n > ring->size)) 721 if (unlikely(ring->tail + n > ring->size))
@@ -772,16 +727,16 @@ void intel_ring_begin(struct drm_device *dev,
772} 727}
773 728
774void intel_ring_advance(struct drm_device *dev, 729void intel_ring_advance(struct drm_device *dev,
775 struct intel_ring_buffer *ring) 730 struct intel_ring_buffer *ring)
776{ 731{
777 ring->tail &= ring->size - 1; 732 ring->tail &= ring->size - 1;
778 ring->advance_ring(dev, ring); 733 ring->set_tail(dev, ring, ring->tail);
779} 734}
780 735
781void intel_fill_struct(struct drm_device *dev, 736void intel_fill_struct(struct drm_device *dev,
782 struct intel_ring_buffer *ring, 737 struct intel_ring_buffer *ring,
783 void *data, 738 void *data,
784 unsigned int len) 739 unsigned int len)
785{ 740{
786 unsigned int *virt = ring->virtual_start + ring->tail; 741 unsigned int *virt = ring->virtual_start + ring->tail;
787 BUG_ON((len&~(4-1)) != 0); 742 BUG_ON((len&~(4-1)) != 0);
@@ -793,76 +748,136 @@ void intel_fill_struct(struct drm_device *dev,
793 intel_ring_advance(dev, ring); 748 intel_ring_advance(dev, ring);
794} 749}
795 750
796struct intel_ring_buffer render_ring = { 751static const struct intel_ring_buffer render_ring = {
797 .name = "render ring", 752 .name = "render ring",
798 .regs = { 753 .id = RING_RENDER,
799 .ctl = PRB0_CTL, 754 .mmio_base = RENDER_RING_BASE,
800 .head = PRB0_HEAD,
801 .tail = PRB0_TAIL,
802 .start = PRB0_START
803 },
804 .ring_flag = I915_EXEC_RENDER,
805 .size = 32 * PAGE_SIZE, 755 .size = 32 * PAGE_SIZE,
806 .alignment = PAGE_SIZE,
807 .virtual_start = NULL,
808 .dev = NULL,
809 .gem_object = NULL,
810 .head = 0,
811 .tail = 0,
812 .space = 0,
813 .user_irq_refcount = 0,
814 .irq_gem_seqno = 0,
815 .waiting_gem_seqno = 0,
816 .setup_status_page = render_setup_status_page,
817 .init = init_render_ring, 756 .init = init_render_ring,
818 .get_head = render_ring_get_head, 757 .set_tail = ring_set_tail,
819 .get_tail = render_ring_get_tail,
820 .get_active_head = render_ring_get_active_head,
821 .advance_ring = render_ring_advance_ring,
822 .flush = render_ring_flush, 758 .flush = render_ring_flush,
823 .add_request = render_ring_add_request, 759 .add_request = render_ring_add_request,
824 .get_gem_seqno = render_ring_get_gem_seqno, 760 .get_seqno = render_ring_get_seqno,
825 .user_irq_get = render_ring_get_user_irq, 761 .user_irq_get = render_ring_get_user_irq,
826 .user_irq_put = render_ring_put_user_irq, 762 .user_irq_put = render_ring_put_user_irq,
827 .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, 763 .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
828 .status_page = {NULL, 0, NULL},
829 .map = {0,}
830}; 764};
831 765
832/* ring buffer for bit-stream decoder */ 766/* ring buffer for bit-stream decoder */
833 767
834struct intel_ring_buffer bsd_ring = { 768static const struct intel_ring_buffer bsd_ring = {
835 .name = "bsd ring", 769 .name = "bsd ring",
836 .regs = { 770 .id = RING_BSD,
837 .ctl = BSD_RING_CTL, 771 .mmio_base = BSD_RING_BASE,
838 .head = BSD_RING_HEAD,
839 .tail = BSD_RING_TAIL,
840 .start = BSD_RING_START
841 },
842 .ring_flag = I915_EXEC_BSD,
843 .size = 32 * PAGE_SIZE, 772 .size = 32 * PAGE_SIZE,
844 .alignment = PAGE_SIZE,
845 .virtual_start = NULL,
846 .dev = NULL,
847 .gem_object = NULL,
848 .head = 0,
849 .tail = 0,
850 .space = 0,
851 .user_irq_refcount = 0,
852 .irq_gem_seqno = 0,
853 .waiting_gem_seqno = 0,
854 .setup_status_page = bsd_setup_status_page,
855 .init = init_bsd_ring, 773 .init = init_bsd_ring,
856 .get_head = bsd_ring_get_head, 774 .set_tail = ring_set_tail,
857 .get_tail = bsd_ring_get_tail,
858 .get_active_head = bsd_ring_get_active_head,
859 .advance_ring = bsd_ring_advance_ring,
860 .flush = bsd_ring_flush, 775 .flush = bsd_ring_flush,
861 .add_request = bsd_ring_add_request, 776 .add_request = bsd_ring_add_request,
862 .get_gem_seqno = bsd_ring_get_gem_seqno, 777 .get_seqno = bsd_ring_get_seqno,
863 .user_irq_get = bsd_ring_get_user_irq, 778 .user_irq_get = bsd_ring_get_user_irq,
864 .user_irq_put = bsd_ring_put_user_irq, 779 .user_irq_put = bsd_ring_put_user_irq,
865 .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, 780 .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
866 .status_page = {NULL, 0, NULL},
867 .map = {0,}
868}; 781};
782
783
784static void gen6_bsd_ring_set_tail(struct drm_device *dev,
785 struct intel_ring_buffer *ring,
786 u32 value)
787{
788 drm_i915_private_t *dev_priv = dev->dev_private;
789
790 /* Every tail move must follow the sequence below */
791 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
792 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
793 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
794 I915_WRITE(GEN6_BSD_RNCID, 0x0);
795
796 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
797 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
798 50))
799 DRM_ERROR("timed out waiting for IDLE Indicator\n");
800
801 I915_WRITE_TAIL(ring, value);
802 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
803 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
804 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
805}
806
807static void gen6_bsd_ring_flush(struct drm_device *dev,
808 struct intel_ring_buffer *ring,
809 u32 invalidate_domains,
810 u32 flush_domains)
811{
812 intel_ring_begin(dev, ring, 4);
813 intel_ring_emit(dev, ring, MI_FLUSH_DW);
814 intel_ring_emit(dev, ring, 0);
815 intel_ring_emit(dev, ring, 0);
816 intel_ring_emit(dev, ring, 0);
817 intel_ring_advance(dev, ring);
818}
819
820static int
821gen6_bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
822 struct intel_ring_buffer *ring,
823 struct drm_i915_gem_execbuffer2 *exec,
824 struct drm_clip_rect *cliprects,
825 uint64_t exec_offset)
826{
827 uint32_t exec_start;
828
829 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
830
831 intel_ring_begin(dev, ring, 2);
832 intel_ring_emit(dev, ring,
833 MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
834 /* bit0-7 is the length on GEN6+ */
835 intel_ring_emit(dev, ring, exec_start);
836 intel_ring_advance(dev, ring);
837
838 return 0;
839}
840
841/* ring buffer for Video Codec for Gen6+ */
842static const struct intel_ring_buffer gen6_bsd_ring = {
843 .name = "gen6 bsd ring",
844 .id = RING_BSD,
845 .mmio_base = GEN6_BSD_RING_BASE,
846 .size = 32 * PAGE_SIZE,
847 .init = init_bsd_ring,
848 .set_tail = gen6_bsd_ring_set_tail,
849 .flush = gen6_bsd_ring_flush,
850 .add_request = bsd_ring_add_request,
851 .get_seqno = bsd_ring_get_seqno,
852 .user_irq_get = bsd_ring_get_user_irq,
853 .user_irq_put = bsd_ring_put_user_irq,
854 .dispatch_gem_execbuffer = gen6_bsd_ring_dispatch_gem_execbuffer,
855};
856
857int intel_init_render_ring_buffer(struct drm_device *dev)
858{
859 drm_i915_private_t *dev_priv = dev->dev_private;
860
861 dev_priv->render_ring = render_ring;
862
863 if (!I915_NEED_GFX_HWS(dev)) {
864 dev_priv->render_ring.status_page.page_addr
865 = dev_priv->status_page_dmah->vaddr;
866 memset(dev_priv->render_ring.status_page.page_addr,
867 0, PAGE_SIZE);
868 }
869
870 return intel_init_ring_buffer(dev, &dev_priv->render_ring);
871}
872
873int intel_init_bsd_ring_buffer(struct drm_device *dev)
874{
875 drm_i915_private_t *dev_priv = dev->dev_private;
876
877 if (IS_GEN6(dev))
878 dev_priv->bsd_ring = gen6_bsd_ring;
879 else
880 dev_priv->bsd_ring = bsd_ring;
881
882 return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
883}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 525e7d3edda8..9725f783db20 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -7,25 +7,31 @@ struct intel_hw_status_page {
7 struct drm_gem_object *obj; 7 struct drm_gem_object *obj;
8}; 8};
9 9
10#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
11#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
12#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
13#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
14#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
15#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
16#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
17#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
18
10struct drm_i915_gem_execbuffer2; 19struct drm_i915_gem_execbuffer2;
11struct intel_ring_buffer { 20struct intel_ring_buffer {
12 const char *name; 21 const char *name;
13 struct ring_regs { 22 enum intel_ring_id {
14 u32 ctl; 23 RING_RENDER = 0x1,
15 u32 head; 24 RING_BSD = 0x2,
16 u32 tail; 25 } id;
17 u32 start; 26 u32 mmio_base;
18 } regs;
19 unsigned int ring_flag;
20 unsigned long size; 27 unsigned long size;
21 unsigned int alignment;
22 void *virtual_start; 28 void *virtual_start;
23 struct drm_device *dev; 29 struct drm_device *dev;
24 struct drm_gem_object *gem_object; 30 struct drm_gem_object *gem_object;
25 31
26 unsigned int head; 32 unsigned int head;
27 unsigned int tail; 33 unsigned int tail;
28 unsigned int space; 34 int space;
29 struct intel_hw_status_page status_page; 35 struct intel_hw_status_page status_page;
30 36
31 u32 irq_gem_seqno; /* last seq seem at irq time */ 37 u32 irq_gem_seqno; /* last seq seem at irq time */
@@ -35,30 +41,22 @@ struct intel_ring_buffer {
35 struct intel_ring_buffer *ring); 41 struct intel_ring_buffer *ring);
36 void (*user_irq_put)(struct drm_device *dev, 42 void (*user_irq_put)(struct drm_device *dev,
37 struct intel_ring_buffer *ring); 43 struct intel_ring_buffer *ring);
38 void (*setup_status_page)(struct drm_device *dev,
39 struct intel_ring_buffer *ring);
40 44
41 int (*init)(struct drm_device *dev, 45 int (*init)(struct drm_device *dev,
42 struct intel_ring_buffer *ring); 46 struct intel_ring_buffer *ring);
43 47
44 unsigned int (*get_head)(struct drm_device *dev, 48 void (*set_tail)(struct drm_device *dev,
45 struct intel_ring_buffer *ring); 49 struct intel_ring_buffer *ring,
46 unsigned int (*get_tail)(struct drm_device *dev, 50 u32 value);
47 struct intel_ring_buffer *ring);
48 unsigned int (*get_active_head)(struct drm_device *dev,
49 struct intel_ring_buffer *ring);
50 void (*advance_ring)(struct drm_device *dev,
51 struct intel_ring_buffer *ring);
52 void (*flush)(struct drm_device *dev, 51 void (*flush)(struct drm_device *dev,
53 struct intel_ring_buffer *ring, 52 struct intel_ring_buffer *ring,
54 u32 invalidate_domains, 53 u32 invalidate_domains,
55 u32 flush_domains); 54 u32 flush_domains);
56 u32 (*add_request)(struct drm_device *dev, 55 u32 (*add_request)(struct drm_device *dev,
57 struct intel_ring_buffer *ring, 56 struct intel_ring_buffer *ring,
58 struct drm_file *file_priv,
59 u32 flush_domains); 57 u32 flush_domains);
60 u32 (*get_gem_seqno)(struct drm_device *dev, 58 u32 (*get_seqno)(struct drm_device *dev,
61 struct intel_ring_buffer *ring); 59 struct intel_ring_buffer *ring);
62 int (*dispatch_gem_execbuffer)(struct drm_device *dev, 60 int (*dispatch_gem_execbuffer)(struct drm_device *dev,
63 struct intel_ring_buffer *ring, 61 struct intel_ring_buffer *ring,
64 struct drm_i915_gem_execbuffer2 *exec, 62 struct drm_i915_gem_execbuffer2 *exec,
@@ -83,6 +81,11 @@ struct intel_ring_buffer {
83 */ 81 */
84 struct list_head request_list; 82 struct list_head request_list;
85 83
84 /**
85 * Do we have some not yet emitted requests outstanding?
86 */
87 bool outstanding_lazy_request;
88
86 wait_queue_head_t irq_queue; 89 wait_queue_head_t irq_queue;
87 drm_local_map_t map; 90 drm_local_map_t map;
88}; 91};
@@ -96,15 +99,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
96} 99}
97 100
98int intel_init_ring_buffer(struct drm_device *dev, 101int intel_init_ring_buffer(struct drm_device *dev,
99 struct intel_ring_buffer *ring); 102 struct intel_ring_buffer *ring);
100void intel_cleanup_ring_buffer(struct drm_device *dev, 103void intel_cleanup_ring_buffer(struct drm_device *dev,
101 struct intel_ring_buffer *ring); 104 struct intel_ring_buffer *ring);
102int intel_wait_ring_buffer(struct drm_device *dev, 105int intel_wait_ring_buffer(struct drm_device *dev,
103 struct intel_ring_buffer *ring, int n); 106 struct intel_ring_buffer *ring, int n);
104int intel_wrap_ring_buffer(struct drm_device *dev,
105 struct intel_ring_buffer *ring);
106void intel_ring_begin(struct drm_device *dev, 107void intel_ring_begin(struct drm_device *dev,
107 struct intel_ring_buffer *ring, int n); 108 struct intel_ring_buffer *ring, int n);
108 109
109static inline void intel_ring_emit(struct drm_device *dev, 110static inline void intel_ring_emit(struct drm_device *dev,
110 struct intel_ring_buffer *ring, 111 struct intel_ring_buffer *ring,
@@ -125,7 +126,12 @@ void intel_ring_advance(struct drm_device *dev,
125u32 intel_ring_get_seqno(struct drm_device *dev, 126u32 intel_ring_get_seqno(struct drm_device *dev,
126 struct intel_ring_buffer *ring); 127 struct intel_ring_buffer *ring);
127 128
128extern struct intel_ring_buffer render_ring; 129int intel_init_render_ring_buffer(struct drm_device *dev);
129extern struct intel_ring_buffer bsd_ring; 130int intel_init_bsd_ring_buffer(struct drm_device *dev);
131
132u32 intel_ring_get_active_head(struct drm_device *dev,
133 struct intel_ring_buffer *ring);
134void intel_ring_setup_status_page(struct drm_device *dev,
135 struct intel_ring_buffer *ring);
130 136
131#endif /* _INTEL_RINGBUFFER_H_ */ 137#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e8e902d614ed..a84224f37605 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -65,8 +65,11 @@ static const char *tv_format_names[] = {
65struct intel_sdvo { 65struct intel_sdvo {
66 struct intel_encoder base; 66 struct intel_encoder base;
67 67
68 struct i2c_adapter *i2c;
68 u8 slave_addr; 69 u8 slave_addr;
69 70
71 struct i2c_adapter ddc;
72
70 /* Register for the SDVO device: SDVOB or SDVOC */ 73 /* Register for the SDVO device: SDVOB or SDVOC */
71 int sdvo_reg; 74 int sdvo_reg;
72 75
@@ -106,16 +109,12 @@ struct intel_sdvo {
106 bool is_hdmi; 109 bool is_hdmi;
107 110
108 /** 111 /**
109 * This is set if we detect output of sdvo device as LVDS. 112 * This is set if we detect output of sdvo device as LVDS and
113 * have a valid fixed mode to use with the panel.
110 */ 114 */
111 bool is_lvds; 115 bool is_lvds;
112 116
113 /** 117 /**
114 * This is sdvo flags for input timing.
115 */
116 uint8_t sdvo_flags;
117
118 /**
119 * This is sdvo fixed pannel mode pointer 118 * This is sdvo fixed pannel mode pointer
120 */ 119 */
121 struct drm_display_mode *sdvo_lvds_fixed_mode; 120 struct drm_display_mode *sdvo_lvds_fixed_mode;
@@ -129,9 +128,8 @@ struct intel_sdvo {
129 /* DDC bus used by this SDVO encoder */ 128 /* DDC bus used by this SDVO encoder */
130 uint8_t ddc_bus; 129 uint8_t ddc_bus;
131 130
132 /* Mac mini hack -- use the same DDC as the analog connector */ 131 /* Input timings for adjusted_mode */
133 struct i2c_adapter *analog_ddc_bus; 132 struct intel_sdvo_dtd input_dtd;
134
135}; 133};
136 134
137struct intel_sdvo_connector { 135struct intel_sdvo_connector {
@@ -186,9 +184,15 @@ struct intel_sdvo_connector {
186 u32 cur_dot_crawl, max_dot_crawl; 184 u32 cur_dot_crawl, max_dot_crawl;
187}; 185};
188 186
189static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder) 187static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder)
188{
189 return container_of(encoder, struct intel_sdvo, base.base);
190}
191
192static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
190{ 193{
191 return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base); 194 return container_of(intel_attached_encoder(connector),
195 struct intel_sdvo, base);
192} 196}
193 197
194static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) 198static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
@@ -213,7 +217,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
213 */ 217 */
214static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) 218static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
215{ 219{
216 struct drm_device *dev = intel_sdvo->base.enc.dev; 220 struct drm_device *dev = intel_sdvo->base.base.dev;
217 struct drm_i915_private *dev_priv = dev->dev_private; 221 struct drm_i915_private *dev_priv = dev->dev_private;
218 u32 bval = val, cval = val; 222 u32 bval = val, cval = val;
219 int i; 223 int i;
@@ -245,49 +249,29 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
245 249
246static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) 250static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
247{ 251{
248 u8 out_buf[2] = { addr, 0 };
249 u8 buf[2];
250 struct i2c_msg msgs[] = { 252 struct i2c_msg msgs[] = {
251 { 253 {
252 .addr = intel_sdvo->slave_addr >> 1, 254 .addr = intel_sdvo->slave_addr,
253 .flags = 0, 255 .flags = 0,
254 .len = 1, 256 .len = 1,
255 .buf = out_buf, 257 .buf = &addr,
256 }, 258 },
257 { 259 {
258 .addr = intel_sdvo->slave_addr >> 1, 260 .addr = intel_sdvo->slave_addr,
259 .flags = I2C_M_RD, 261 .flags = I2C_M_RD,
260 .len = 1, 262 .len = 1,
261 .buf = buf, 263 .buf = ch,
262 } 264 }
263 }; 265 };
264 int ret; 266 int ret;
265 267
266 if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2) 268 if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2)
267 {
268 *ch = buf[0];
269 return true; 269 return true;
270 }
271 270
272 DRM_DEBUG_KMS("i2c transfer returned %d\n", ret); 271 DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
273 return false; 272 return false;
274} 273}
275 274
276static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch)
277{
278 u8 out_buf[2] = { addr, ch };
279 struct i2c_msg msgs[] = {
280 {
281 .addr = intel_sdvo->slave_addr >> 1,
282 .flags = 0,
283 .len = 2,
284 .buf = out_buf,
285 }
286 };
287
288 return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1;
289}
290
291#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} 275#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
292/** Mapping of command numbers to names, for debug output */ 276/** Mapping of command numbers to names, for debug output */
293static const struct _sdvo_cmd_name { 277static const struct _sdvo_cmd_name {
@@ -432,22 +416,6 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
432 DRM_LOG_KMS("\n"); 416 DRM_LOG_KMS("\n");
433} 417}
434 418
435static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
436 const void *args, int args_len)
437{
438 int i;
439
440 intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
441
442 for (i = 0; i < args_len; i++) {
443 if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i,
444 ((u8*)args)[i]))
445 return false;
446 }
447
448 return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd);
449}
450
451static const char *cmd_status_names[] = { 419static const char *cmd_status_names[] = {
452 "Power on", 420 "Power on",
453 "Success", 421 "Success",
@@ -458,54 +426,115 @@ static const char *cmd_status_names[] = {
458 "Scaling not supported" 426 "Scaling not supported"
459}; 427};
460 428
461static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo, 429static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
462 void *response, int response_len, 430 const void *args, int args_len)
463 u8 status)
464{ 431{
465 int i; 432 u8 buf[args_len*2 + 2], status;
433 struct i2c_msg msgs[args_len + 3];
434 int i, ret;
466 435
467 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); 436 intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
468 for (i = 0; i < response_len; i++) 437
469 DRM_LOG_KMS("%02X ", ((u8 *)response)[i]); 438 for (i = 0; i < args_len; i++) {
470 for (; i < 8; i++) 439 msgs[i].addr = intel_sdvo->slave_addr;
471 DRM_LOG_KMS(" "); 440 msgs[i].flags = 0;
472 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) 441 msgs[i].len = 2;
473 DRM_LOG_KMS("(%s)", cmd_status_names[status]); 442 msgs[i].buf = buf + 2 *i;
474 else 443 buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
475 DRM_LOG_KMS("(??? %d)", status); 444 buf[2*i + 1] = ((u8*)args)[i];
476 DRM_LOG_KMS("\n"); 445 }
446 msgs[i].addr = intel_sdvo->slave_addr;
447 msgs[i].flags = 0;
448 msgs[i].len = 2;
449 msgs[i].buf = buf + 2*i;
450 buf[2*i + 0] = SDVO_I2C_OPCODE;
451 buf[2*i + 1] = cmd;
452
453 /* the following two are to read the response */
454 status = SDVO_I2C_CMD_STATUS;
455 msgs[i+1].addr = intel_sdvo->slave_addr;
456 msgs[i+1].flags = 0;
457 msgs[i+1].len = 1;
458 msgs[i+1].buf = &status;
459
460 msgs[i+2].addr = intel_sdvo->slave_addr;
461 msgs[i+2].flags = I2C_M_RD;
462 msgs[i+2].len = 1;
463 msgs[i+2].buf = &status;
464
465 ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
466 if (ret < 0) {
467 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
468 return false;
469 }
470 if (ret != i+3) {
471 /* failure in I2C transfer */
472 DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
473 return false;
474 }
475
476 i = 3;
477 while (status == SDVO_CMD_STATUS_PENDING && i--) {
478 if (!intel_sdvo_read_byte(intel_sdvo,
479 SDVO_I2C_CMD_STATUS,
480 &status))
481 return false;
482 }
483 if (status != SDVO_CMD_STATUS_SUCCESS) {
484 DRM_DEBUG_KMS("command returns response %s [%d]\n",
485 status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
486 status);
487 return false;
488 }
489
490 return true;
477} 491}
478 492
479static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, 493static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
480 void *response, int response_len) 494 void *response, int response_len)
481{ 495{
482 int i; 496 u8 retry = 5;
483 u8 status; 497 u8 status;
484 u8 retry = 50; 498 int i;
485
486 while (retry--) {
487 /* Read the command response */
488 for (i = 0; i < response_len; i++) {
489 if (!intel_sdvo_read_byte(intel_sdvo,
490 SDVO_I2C_RETURN_0 + i,
491 &((u8 *)response)[i]))
492 return false;
493 }
494 499
495 /* read the return status */ 500 /*
496 if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, 501 * The documentation states that all commands will be
502 * processed within 15µs, and that we need only poll
503 * the status byte a maximum of 3 times in order for the
504 * command to be complete.
505 *
506 * Check 5 times in case the hardware failed to read the docs.
507 */
508 do {
509 if (!intel_sdvo_read_byte(intel_sdvo,
510 SDVO_I2C_CMD_STATUS,
497 &status)) 511 &status))
498 return false; 512 return false;
513 } while (status == SDVO_CMD_STATUS_PENDING && --retry);
499 514
500 intel_sdvo_debug_response(intel_sdvo, response, response_len, 515 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
501 status); 516 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
502 if (status != SDVO_CMD_STATUS_PENDING) 517 DRM_LOG_KMS("(%s)", cmd_status_names[status]);
503 break; 518 else
519 DRM_LOG_KMS("(??? %d)", status);
520
521 if (status != SDVO_CMD_STATUS_SUCCESS)
522 goto log_fail;
504 523
505 mdelay(50); 524 /* Read the command response */
525 for (i = 0; i < response_len; i++) {
526 if (!intel_sdvo_read_byte(intel_sdvo,
527 SDVO_I2C_RETURN_0 + i,
528 &((u8 *)response)[i]))
529 goto log_fail;
530 DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
506 } 531 }
532 DRM_LOG_KMS("\n");
533 return true;
507 534
508 return status == SDVO_CMD_STATUS_SUCCESS; 535log_fail:
536 DRM_LOG_KMS("\n");
537 return false;
509} 538}
510 539
511static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) 540static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
@@ -518,71 +547,17 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
518 return 4; 547 return 4;
519} 548}
520 549
521/** 550static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
522 * Try to read the response after issuie the DDC switch command. But it 551 u8 ddc_bus)
523 * is noted that we must do the action of reading response and issuing DDC
524 * switch command in one I2C transaction. Otherwise when we try to start
525 * another I2C transaction after issuing the DDC bus switch, it will be
526 * switched to the internal SDVO register.
527 */
528static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
529 u8 target)
530{ 552{
531 u8 out_buf[2], cmd_buf[2], ret_value[2], ret; 553 return intel_sdvo_write_cmd(intel_sdvo,
532 struct i2c_msg msgs[] = { 554 SDVO_CMD_SET_CONTROL_BUS_SWITCH,
533 { 555 &ddc_bus, 1);
534 .addr = intel_sdvo->slave_addr >> 1,
535 .flags = 0,
536 .len = 2,
537 .buf = out_buf,
538 },
539 /* the following two are to read the response */
540 {
541 .addr = intel_sdvo->slave_addr >> 1,
542 .flags = 0,
543 .len = 1,
544 .buf = cmd_buf,
545 },
546 {
547 .addr = intel_sdvo->slave_addr >> 1,
548 .flags = I2C_M_RD,
549 .len = 1,
550 .buf = ret_value,
551 },
552 };
553
554 intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
555 &target, 1);
556 /* write the DDC switch command argument */
557 intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target);
558
559 out_buf[0] = SDVO_I2C_OPCODE;
560 out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
561 cmd_buf[0] = SDVO_I2C_CMD_STATUS;
562 cmd_buf[1] = 0;
563 ret_value[0] = 0;
564 ret_value[1] = 0;
565
566 ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3);
567 if (ret != 3) {
568 /* failure in I2C transfer */
569 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
570 return;
571 }
572 if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
573 DRM_DEBUG_KMS("DDC switch command returns response %d\n",
574 ret_value[0]);
575 return;
576 }
577 return;
578} 556}
579 557
580static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) 558static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
581{ 559{
582 if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len)) 560 return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len);
583 return false;
584
585 return intel_sdvo_read_response(intel_sdvo, NULL, 0);
586} 561}
587 562
588static bool 563static bool
@@ -1022,8 +997,6 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
1022 struct drm_display_mode *mode, 997 struct drm_display_mode *mode,
1023 struct drm_display_mode *adjusted_mode) 998 struct drm_display_mode *adjusted_mode)
1024{ 999{
1025 struct intel_sdvo_dtd input_dtd;
1026
1027 /* Reset the input timing to the screen. Assume always input 0. */ 1000 /* Reset the input timing to the screen. Assume always input 0. */
1028 if (!intel_sdvo_set_target_input(intel_sdvo)) 1001 if (!intel_sdvo_set_target_input(intel_sdvo))
1029 return false; 1002 return false;
@@ -1035,14 +1008,12 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
1035 return false; 1008 return false;
1036 1009
1037 if (!intel_sdvo_get_preferred_input_timing(intel_sdvo, 1010 if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
1038 &input_dtd)) 1011 &intel_sdvo->input_dtd))
1039 return false; 1012 return false;
1040 1013
1041 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); 1014 intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
1042 intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags;
1043 1015
1044 drm_mode_set_crtcinfo(adjusted_mode, 0); 1016 drm_mode_set_crtcinfo(adjusted_mode, 0);
1045 mode->clock = adjusted_mode->clock;
1046 return true; 1017 return true;
1047} 1018}
1048 1019
@@ -1050,7 +1021,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1050 struct drm_display_mode *mode, 1021 struct drm_display_mode *mode,
1051 struct drm_display_mode *adjusted_mode) 1022 struct drm_display_mode *adjusted_mode)
1052{ 1023{
1053 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); 1024 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
1025 int multiplier;
1054 1026
1055 /* We need to construct preferred input timings based on our 1027 /* We need to construct preferred input timings based on our
1056 * output timings. To do that, we have to set the output 1028 * output timings. To do that, we have to set the output
@@ -1065,10 +1037,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1065 mode, 1037 mode,
1066 adjusted_mode); 1038 adjusted_mode);
1067 } else if (intel_sdvo->is_lvds) { 1039 } else if (intel_sdvo->is_lvds) {
1068 drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
1069
1070 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, 1040 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
1071 intel_sdvo->sdvo_lvds_fixed_mode)) 1041 intel_sdvo->sdvo_lvds_fixed_mode))
1072 return false; 1042 return false;
1073 1043
1074 (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, 1044 (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
@@ -1077,9 +1047,10 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1077 } 1047 }
1078 1048
1079 /* Make the CRTC code factor in the SDVO pixel multiplier. The 1049 /* Make the CRTC code factor in the SDVO pixel multiplier. The
1080 * SDVO device will be told of the multiplier during mode_set. 1050 * SDVO device will factor out the multiplier during mode_set.
1081 */ 1051 */
1082 adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); 1052 multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
1053 intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
1083 1054
1084 return true; 1055 return true;
1085} 1056}
@@ -1092,11 +1063,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1092 struct drm_i915_private *dev_priv = dev->dev_private; 1063 struct drm_i915_private *dev_priv = dev->dev_private;
1093 struct drm_crtc *crtc = encoder->crtc; 1064 struct drm_crtc *crtc = encoder->crtc;
1094 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1065 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1095 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); 1066 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
1096 u32 sdvox = 0; 1067 u32 sdvox;
1097 int sdvo_pixel_multiply, rate;
1098 struct intel_sdvo_in_out_map in_out; 1068 struct intel_sdvo_in_out_map in_out;
1099 struct intel_sdvo_dtd input_dtd; 1069 struct intel_sdvo_dtd input_dtd;
1070 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
1071 int rate;
1100 1072
1101 if (!mode) 1073 if (!mode)
1102 return; 1074 return;
@@ -1114,28 +1086,23 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1114 SDVO_CMD_SET_IN_OUT_MAP, 1086 SDVO_CMD_SET_IN_OUT_MAP,
1115 &in_out, sizeof(in_out)); 1087 &in_out, sizeof(in_out));
1116 1088
1117 if (intel_sdvo->is_hdmi) { 1089 /* Set the output timings to the screen */
1118 if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode)) 1090 if (!intel_sdvo_set_target_output(intel_sdvo,
1119 return; 1091 intel_sdvo->attached_output))
1120 1092 return;
1121 sdvox |= SDVO_AUDIO_ENABLE;
1122 }
1123 1093
1124 /* We have tried to get input timing in mode_fixup, and filled into 1094 /* We have tried to get input timing in mode_fixup, and filled into
1125 adjusted_mode */ 1095 * adjusted_mode.
1126 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1127 if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
1128 input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
1129
1130 /* If it's a TV, we already set the output timing in mode_fixup.
1131 * Otherwise, the output timing is equal to the input timing.
1132 */ 1096 */
1133 if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) { 1097 if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
1098 input_dtd = intel_sdvo->input_dtd;
1099 } else {
1134 /* Set the output timing to the screen */ 1100 /* Set the output timing to the screen */
1135 if (!intel_sdvo_set_target_output(intel_sdvo, 1101 if (!intel_sdvo_set_target_output(intel_sdvo,
1136 intel_sdvo->attached_output)) 1102 intel_sdvo->attached_output))
1137 return; 1103 return;
1138 1104
1105 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1139 (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd); 1106 (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
1140 } 1107 }
1141 1108
@@ -1143,31 +1110,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1143 if (!intel_sdvo_set_target_input(intel_sdvo)) 1110 if (!intel_sdvo_set_target_input(intel_sdvo))
1144 return; 1111 return;
1145 1112
1146 if (intel_sdvo->is_tv) { 1113 if (intel_sdvo->is_hdmi &&
1147 if (!intel_sdvo_set_tv_format(intel_sdvo)) 1114 !intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
1148 return; 1115 return;
1149 }
1150 1116
1151 /* We would like to use intel_sdvo_create_preferred_input_timing() to 1117 if (intel_sdvo->is_tv &&
1152 * provide the device with a timing it can support, if it supports that 1118 !intel_sdvo_set_tv_format(intel_sdvo))
1153 * feature. However, presumably we would need to adjust the CRTC to 1119 return;
1154 * output the preferred timing, and we don't support that currently.
1155 */
1156#if 0
1157 success = intel_sdvo_create_preferred_input_timing(encoder, clock,
1158 width, height);
1159 if (success) {
1160 struct intel_sdvo_dtd *input_dtd;
1161 1120
1162 intel_sdvo_get_preferred_input_timing(encoder, &input_dtd);
1163 intel_sdvo_set_input_timing(encoder, &input_dtd);
1164 }
1165#else
1166 (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); 1121 (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
1167#endif
1168 1122
1169 sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); 1123 switch (pixel_multiplier) {
1170 switch (sdvo_pixel_multiply) { 1124 default:
1171 case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; 1125 case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
1172 case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; 1126 case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
1173 case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; 1127 case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
@@ -1176,14 +1130,14 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1176 return; 1130 return;
1177 1131
1178 /* Set the SDVO control regs. */ 1132 /* Set the SDVO control regs. */
1179 if (IS_I965G(dev)) { 1133 if (INTEL_INFO(dev)->gen >= 4) {
1180 sdvox |= SDVO_BORDER_ENABLE; 1134 sdvox = SDVO_BORDER_ENABLE;
1181 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 1135 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1182 sdvox |= SDVO_VSYNC_ACTIVE_HIGH; 1136 sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
1183 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 1137 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1184 sdvox |= SDVO_HSYNC_ACTIVE_HIGH; 1138 sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
1185 } else { 1139 } else {
1186 sdvox |= I915_READ(intel_sdvo->sdvo_reg); 1140 sdvox = I915_READ(intel_sdvo->sdvo_reg);
1187 switch (intel_sdvo->sdvo_reg) { 1141 switch (intel_sdvo->sdvo_reg) {
1188 case SDVOB: 1142 case SDVOB:
1189 sdvox &= SDVOB_PRESERVE_MASK; 1143 sdvox &= SDVOB_PRESERVE_MASK;
@@ -1196,16 +1150,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1196 } 1150 }
1197 if (intel_crtc->pipe == 1) 1151 if (intel_crtc->pipe == 1)
1198 sdvox |= SDVO_PIPE_B_SELECT; 1152 sdvox |= SDVO_PIPE_B_SELECT;
1153 if (intel_sdvo->is_hdmi)
1154 sdvox |= SDVO_AUDIO_ENABLE;
1199 1155
1200 if (IS_I965G(dev)) { 1156 if (INTEL_INFO(dev)->gen >= 4) {
1201 /* done in crtc_mode_set as the dpll_md reg must be written early */ 1157 /* done in crtc_mode_set as the dpll_md reg must be written early */
1202 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 1158 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
1203 /* done in crtc_mode_set as it lives inside the dpll register */ 1159 /* done in crtc_mode_set as it lives inside the dpll register */
1204 } else { 1160 } else {
1205 sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; 1161 sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
1206 } 1162 }
1207 1163
1208 if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL) 1164 if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
1209 sdvox |= SDVO_STALL_SELECT; 1165 sdvox |= SDVO_STALL_SELECT;
1210 intel_sdvo_write_sdvox(intel_sdvo, sdvox); 1166 intel_sdvo_write_sdvox(intel_sdvo, sdvox);
1211} 1167}
@@ -1214,7 +1170,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1214{ 1170{
1215 struct drm_device *dev = encoder->dev; 1171 struct drm_device *dev = encoder->dev;
1216 struct drm_i915_private *dev_priv = dev->dev_private; 1172 struct drm_i915_private *dev_priv = dev->dev_private;
1217 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); 1173 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
1218 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 1174 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
1219 u32 temp; 1175 u32 temp;
1220 1176
@@ -1260,8 +1216,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1260static int intel_sdvo_mode_valid(struct drm_connector *connector, 1216static int intel_sdvo_mode_valid(struct drm_connector *connector,
1261 struct drm_display_mode *mode) 1217 struct drm_display_mode *mode)
1262{ 1218{
1263 struct drm_encoder *encoder = intel_attached_encoder(connector); 1219 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1264 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1265 1220
1266 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 1221 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1267 return MODE_NO_DBLESCAN; 1222 return MODE_NO_DBLESCAN;
@@ -1285,7 +1240,38 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
1285 1240
1286static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) 1241static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
1287{ 1242{
1288 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps)); 1243 if (!intel_sdvo_get_value(intel_sdvo,
1244 SDVO_CMD_GET_DEVICE_CAPS,
1245 caps, sizeof(*caps)))
1246 return false;
1247
1248 DRM_DEBUG_KMS("SDVO capabilities:\n"
1249 " vendor_id: %d\n"
1250 " device_id: %d\n"
1251 " device_rev_id: %d\n"
1252 " sdvo_version_major: %d\n"
1253 " sdvo_version_minor: %d\n"
1254 " sdvo_inputs_mask: %d\n"
1255 " smooth_scaling: %d\n"
1256 " sharp_scaling: %d\n"
1257 " up_scaling: %d\n"
1258 " down_scaling: %d\n"
1259 " stall_support: %d\n"
1260 " output_flags: %d\n",
1261 caps->vendor_id,
1262 caps->device_id,
1263 caps->device_rev_id,
1264 caps->sdvo_version_major,
1265 caps->sdvo_version_minor,
1266 caps->sdvo_inputs_mask,
1267 caps->smooth_scaling,
1268 caps->sharp_scaling,
1269 caps->up_scaling,
1270 caps->down_scaling,
1271 caps->stall_support,
1272 caps->output_flags);
1273
1274 return true;
1289} 1275}
1290 1276
1291/* No use! */ 1277/* No use! */
@@ -1389,22 +1375,33 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
1389 return (caps > 1); 1375 return (caps > 1);
1390} 1376}
1391 1377
1378static struct edid *
1379intel_sdvo_get_edid(struct drm_connector *connector)
1380{
1381 struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
1382 return drm_get_edid(connector, &sdvo->ddc);
1383}
1384
1392static struct drm_connector * 1385static struct drm_connector *
1393intel_find_analog_connector(struct drm_device *dev) 1386intel_find_analog_connector(struct drm_device *dev)
1394{ 1387{
1395 struct drm_connector *connector; 1388 struct drm_connector *connector;
1396 struct drm_encoder *encoder; 1389 struct intel_sdvo *encoder;
1397 struct intel_sdvo *intel_sdvo; 1390
1398 1391 list_for_each_entry(encoder,
1399 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1392 &dev->mode_config.encoder_list,
1400 intel_sdvo = enc_to_intel_sdvo(encoder); 1393 base.base.head) {
1401 if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) { 1394 if (encoder->base.type == INTEL_OUTPUT_ANALOG) {
1402 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1395 list_for_each_entry(connector,
1403 if (encoder == intel_attached_encoder(connector)) 1396 &dev->mode_config.connector_list,
1397 head) {
1398 if (&encoder->base ==
1399 intel_attached_encoder(connector))
1404 return connector; 1400 return connector;
1405 } 1401 }
1406 } 1402 }
1407 } 1403 }
1404
1408 return NULL; 1405 return NULL;
1409} 1406}
1410 1407
@@ -1424,65 +1421,66 @@ intel_analog_is_connected(struct drm_device *dev)
1424 return true; 1421 return true;
1425} 1422}
1426 1423
1424/* Mac mini hack -- use the same DDC as the analog connector */
1425static struct edid *
1426intel_sdvo_get_analog_edid(struct drm_connector *connector)
1427{
1428 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1429
1430 if (!intel_analog_is_connected(connector->dev))
1431 return NULL;
1432
1433 return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
1434}
1435
1427enum drm_connector_status 1436enum drm_connector_status
1428intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) 1437intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1429{ 1438{
1430 struct drm_encoder *encoder = intel_attached_encoder(connector); 1439 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1431 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); 1440 enum drm_connector_status status;
1432 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1441 struct edid *edid;
1433 enum drm_connector_status status = connector_status_connected;
1434 struct edid *edid = NULL;
1435 1442
1436 edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus); 1443 edid = intel_sdvo_get_edid(connector);
1437 1444
1438 /* This is only applied to SDVO cards with multiple outputs */
1439 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) { 1445 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
1440 uint8_t saved_ddc, temp_ddc; 1446 u8 ddc, saved_ddc = intel_sdvo->ddc_bus;
1441 saved_ddc = intel_sdvo->ddc_bus; 1447
1442 temp_ddc = intel_sdvo->ddc_bus >> 1;
1443 /* 1448 /*
1444 * Don't use the 1 as the argument of DDC bus switch to get 1449 * Don't use the 1 as the argument of DDC bus switch to get
1445 * the EDID. It is used for SDVO SPD ROM. 1450 * the EDID. It is used for SDVO SPD ROM.
1446 */ 1451 */
1447 while(temp_ddc > 1) { 1452 for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
1448 intel_sdvo->ddc_bus = temp_ddc; 1453 intel_sdvo->ddc_bus = ddc;
1449 edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus); 1454 edid = intel_sdvo_get_edid(connector);
1450 if (edid) { 1455 if (edid)
1451 /*
1452 * When we can get the EDID, maybe it is the
1453 * correct DDC bus. Update it.
1454 */
1455 intel_sdvo->ddc_bus = temp_ddc;
1456 break; 1456 break;
1457 }
1458 temp_ddc >>= 1;
1459 } 1457 }
1458 /*
1459 * If we found the EDID on the other bus,
1460 * assume that is the correct DDC bus.
1461 */
1460 if (edid == NULL) 1462 if (edid == NULL)
1461 intel_sdvo->ddc_bus = saved_ddc; 1463 intel_sdvo->ddc_bus = saved_ddc;
1462 } 1464 }
1463 /* when there is no edid and no monitor is connected with VGA 1465
1464 * port, try to use the CRT ddc to read the EDID for DVI-connector 1466 /*
1467 * When there is no edid and no monitor is connected with VGA
1468 * port, try to use the CRT ddc to read the EDID for DVI-connector.
1465 */ 1469 */
1466 if (edid == NULL && intel_sdvo->analog_ddc_bus && 1470 if (edid == NULL)
1467 !intel_analog_is_connected(connector->dev)) 1471 edid = intel_sdvo_get_analog_edid(connector);
1468 edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus);
1469 1472
1473 status = connector_status_unknown;
1470 if (edid != NULL) { 1474 if (edid != NULL) {
1471 bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
1472 bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK);
1473
1474 /* DDC bus is shared, match EDID to connector type */ 1475 /* DDC bus is shared, match EDID to connector type */
1475 if (is_digital && need_digital) 1476 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
1477 status = connector_status_connected;
1476 intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); 1478 intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
1477 else if (is_digital != need_digital) 1479 }
1478 status = connector_status_disconnected;
1479
1480 connector->display_info.raw_edid = NULL; 1480 connector->display_info.raw_edid = NULL;
1481 } else 1481 kfree(edid);
1482 status = connector_status_disconnected; 1482 }
1483 1483
1484 kfree(edid);
1485
1486 return status; 1484 return status;
1487} 1485}
1488 1486
@@ -1490,13 +1488,12 @@ static enum drm_connector_status
1490intel_sdvo_detect(struct drm_connector *connector, bool force) 1488intel_sdvo_detect(struct drm_connector *connector, bool force)
1491{ 1489{
1492 uint16_t response; 1490 uint16_t response;
1493 struct drm_encoder *encoder = intel_attached_encoder(connector); 1491 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1494 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1495 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1492 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1496 enum drm_connector_status ret; 1493 enum drm_connector_status ret;
1497 1494
1498 if (!intel_sdvo_write_cmd(intel_sdvo, 1495 if (!intel_sdvo_write_cmd(intel_sdvo,
1499 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) 1496 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
1500 return connector_status_unknown; 1497 return connector_status_unknown;
1501 if (intel_sdvo->is_tv) { 1498 if (intel_sdvo->is_tv) {
1502 /* add 30ms delay when the output type is SDVO-TV */ 1499 /* add 30ms delay when the output type is SDVO-TV */
@@ -1505,7 +1502,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1505 if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) 1502 if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
1506 return connector_status_unknown; 1503 return connector_status_unknown;
1507 1504
1508 DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); 1505 DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
1506 response & 0xff, response >> 8,
1507 intel_sdvo_connector->output_flag);
1509 1508
1510 if (response == 0) 1509 if (response == 0)
1511 return connector_status_disconnected; 1510 return connector_status_disconnected;
@@ -1538,12 +1537,10 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1538 1537
1539static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) 1538static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1540{ 1539{
1541 struct drm_encoder *encoder = intel_attached_encoder(connector); 1540 struct edid *edid;
1542 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1543 int num_modes;
1544 1541
1545 /* set the bus switch and get the modes */ 1542 /* set the bus switch and get the modes */
1546 num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus); 1543 edid = intel_sdvo_get_edid(connector);
1547 1544
1548 /* 1545 /*
1549 * Mac mini hack. On this device, the DVI-I connector shares one DDC 1546 * Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1551,12 +1548,14 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1551 * DDC fails, check to see if the analog output is disconnected, in 1548 * DDC fails, check to see if the analog output is disconnected, in
1552 * which case we'll look there for the digital DDC data. 1549 * which case we'll look there for the digital DDC data.
1553 */ 1550 */
1554 if (num_modes == 0 && 1551 if (edid == NULL)
1555 intel_sdvo->analog_ddc_bus && 1552 edid = intel_sdvo_get_analog_edid(connector);
1556 !intel_analog_is_connected(connector->dev)) { 1553
1557 /* Switch to the analog ddc bus and try that 1554 if (edid != NULL) {
1558 */ 1555 drm_mode_connector_update_edid_property(connector, edid);
1559 (void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus); 1556 drm_add_edid_modes(connector, edid);
1557 connector->display_info.raw_edid = NULL;
1558 kfree(edid);
1560 } 1559 }
1561} 1560}
1562 1561
@@ -1627,8 +1626,7 @@ struct drm_display_mode sdvo_tv_modes[] = {
1627 1626
1628static void intel_sdvo_get_tv_modes(struct drm_connector *connector) 1627static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1629{ 1628{
1630 struct drm_encoder *encoder = intel_attached_encoder(connector); 1629 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1631 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1632 struct intel_sdvo_sdtv_resolution_request tv_res; 1630 struct intel_sdvo_sdtv_resolution_request tv_res;
1633 uint32_t reply = 0, format_map = 0; 1631 uint32_t reply = 0, format_map = 0;
1634 int i; 1632 int i;
@@ -1644,7 +1642,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1644 return; 1642 return;
1645 1643
1646 BUILD_BUG_ON(sizeof(tv_res) != 3); 1644 BUILD_BUG_ON(sizeof(tv_res) != 3);
1647 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, 1645 if (!intel_sdvo_write_cmd(intel_sdvo,
1646 SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
1648 &tv_res, sizeof(tv_res))) 1647 &tv_res, sizeof(tv_res)))
1649 return; 1648 return;
1650 if (!intel_sdvo_read_response(intel_sdvo, &reply, 3)) 1649 if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
@@ -1662,8 +1661,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1662 1661
1663static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) 1662static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1664{ 1663{
1665 struct drm_encoder *encoder = intel_attached_encoder(connector); 1664 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1666 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1667 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1665 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1668 struct drm_display_mode *newmode; 1666 struct drm_display_mode *newmode;
1669 1667
@@ -1672,7 +1670,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1672 * Assume that the preferred modes are 1670 * Assume that the preferred modes are
1673 * arranged in priority order. 1671 * arranged in priority order.
1674 */ 1672 */
1675 intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus); 1673 intel_ddc_get_modes(connector, intel_sdvo->i2c);
1676 if (list_empty(&connector->probed_modes) == false) 1674 if (list_empty(&connector->probed_modes) == false)
1677 goto end; 1675 goto end;
1678 1676
@@ -1693,6 +1691,10 @@ end:
1693 if (newmode->type & DRM_MODE_TYPE_PREFERRED) { 1691 if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
1694 intel_sdvo->sdvo_lvds_fixed_mode = 1692 intel_sdvo->sdvo_lvds_fixed_mode =
1695 drm_mode_duplicate(connector->dev, newmode); 1693 drm_mode_duplicate(connector->dev, newmode);
1694
1695 drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
1696 0);
1697
1696 intel_sdvo->is_lvds = true; 1698 intel_sdvo->is_lvds = true;
1697 break; 1699 break;
1698 } 1700 }
@@ -1775,8 +1777,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1775 struct drm_property *property, 1777 struct drm_property *property,
1776 uint64_t val) 1778 uint64_t val)
1777{ 1779{
1778 struct drm_encoder *encoder = intel_attached_encoder(connector); 1780 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1779 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1780 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1781 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1781 uint16_t temp_value; 1782 uint16_t temp_value;
1782 uint8_t cmd; 1783 uint8_t cmd;
@@ -1879,9 +1880,8 @@ set_value:
1879 1880
1880 1881
1881done: 1882done:
1882 if (encoder->crtc) { 1883 if (intel_sdvo->base.base.crtc) {
1883 struct drm_crtc *crtc = encoder->crtc; 1884 struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
1884
1885 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, 1885 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
1886 crtc->y, crtc->fb); 1886 crtc->y, crtc->fb);
1887 } 1887 }
@@ -1909,20 +1909,18 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
1909static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { 1909static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
1910 .get_modes = intel_sdvo_get_modes, 1910 .get_modes = intel_sdvo_get_modes,
1911 .mode_valid = intel_sdvo_mode_valid, 1911 .mode_valid = intel_sdvo_mode_valid,
1912 .best_encoder = intel_attached_encoder, 1912 .best_encoder = intel_best_encoder,
1913}; 1913};
1914 1914
1915static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) 1915static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
1916{ 1916{
1917 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); 1917 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
1918
1919 if (intel_sdvo->analog_ddc_bus)
1920 intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
1921 1918
1922 if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) 1919 if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
1923 drm_mode_destroy(encoder->dev, 1920 drm_mode_destroy(encoder->dev,
1924 intel_sdvo->sdvo_lvds_fixed_mode); 1921 intel_sdvo->sdvo_lvds_fixed_mode);
1925 1922
1923 i2c_del_adapter(&intel_sdvo->ddc);
1926 intel_encoder_destroy(encoder); 1924 intel_encoder_destroy(encoder);
1927} 1925}
1928 1926
@@ -1990,54 +1988,39 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
1990 intel_sdvo_guess_ddc_bus(sdvo); 1988 intel_sdvo_guess_ddc_bus(sdvo);
1991} 1989}
1992 1990
1993static bool 1991static void
1994intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device) 1992intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
1993 struct intel_sdvo *sdvo, u32 reg)
1995{ 1994{
1996 return intel_sdvo_set_target_output(intel_sdvo, 1995 struct sdvo_device_mapping *mapping;
1997 device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) && 1996 u8 pin, speed;
1998 intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
1999 &intel_sdvo->is_hdmi, 1);
2000}
2001 1997
2002static struct intel_sdvo * 1998 if (IS_SDVOB(reg))
2003intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan) 1999 mapping = &dev_priv->sdvo_mappings[0];
2004{ 2000 else
2005 struct drm_device *dev = chan->drm_dev; 2001 mapping = &dev_priv->sdvo_mappings[1];
2006 struct drm_encoder *encoder;
2007 2002
2008 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 2003 pin = GMBUS_PORT_DPB;
2009 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); 2004 speed = GMBUS_RATE_1MHZ >> 8;
2010 if (intel_sdvo->base.ddc_bus == &chan->adapter) 2005 if (mapping->initialized) {
2011 return intel_sdvo; 2006 pin = mapping->i2c_pin;
2007 speed = mapping->i2c_speed;
2012 } 2008 }
2013 2009
2014 return NULL; 2010 sdvo->i2c = &dev_priv->gmbus[pin].adapter;
2011 intel_gmbus_set_speed(sdvo->i2c, speed);
2012 intel_gmbus_force_bit(sdvo->i2c, true);
2015} 2013}
2016 2014
2017static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, 2015static bool
2018 struct i2c_msg msgs[], int num) 2016intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device)
2019{ 2017{
2020 struct intel_sdvo *intel_sdvo; 2018 return intel_sdvo_set_target_output(intel_sdvo,
2021 struct i2c_algo_bit_data *algo_data; 2019 device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) &&
2022 const struct i2c_algorithm *algo; 2020 intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
2023 2021 &intel_sdvo->is_hdmi, 1);
2024 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
2025 intel_sdvo =
2026 intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *)
2027 (algo_data->data));
2028 if (intel_sdvo == NULL)
2029 return -EINVAL;
2030
2031 algo = intel_sdvo->base.i2c_bus->algo;
2032
2033 intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus);
2034 return algo->master_xfer(i2c_adap, msgs, num);
2035} 2022}
2036 2023
2037static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
2038 .master_xfer = intel_sdvo_master_xfer,
2039};
2040
2041static u8 2024static u8
2042intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) 2025intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
2043{ 2026{
@@ -2076,26 +2059,29 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
2076} 2059}
2077 2060
2078static void 2061static void
2079intel_sdvo_connector_init(struct drm_encoder *encoder, 2062intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
2080 struct drm_connector *connector) 2063 struct intel_sdvo *encoder)
2081{ 2064{
2082 drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs, 2065 drm_connector_init(encoder->base.base.dev,
2083 connector->connector_type); 2066 &connector->base.base,
2067 &intel_sdvo_connector_funcs,
2068 connector->base.base.connector_type);
2084 2069
2085 drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); 2070 drm_connector_helper_add(&connector->base.base,
2071 &intel_sdvo_connector_helper_funcs);
2086 2072
2087 connector->interlace_allowed = 0; 2073 connector->base.base.interlace_allowed = 0;
2088 connector->doublescan_allowed = 0; 2074 connector->base.base.doublescan_allowed = 0;
2089 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 2075 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
2090 2076
2091 drm_mode_connector_attach_encoder(connector, encoder); 2077 intel_connector_attach_encoder(&connector->base, &encoder->base);
2092 drm_sysfs_connector_add(connector); 2078 drm_sysfs_connector_add(&connector->base.base);
2093} 2079}
2094 2080
2095static bool 2081static bool
2096intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) 2082intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2097{ 2083{
2098 struct drm_encoder *encoder = &intel_sdvo->base.enc; 2084 struct drm_encoder *encoder = &intel_sdvo->base.base;
2099 struct drm_connector *connector; 2085 struct drm_connector *connector;
2100 struct intel_connector *intel_connector; 2086 struct intel_connector *intel_connector;
2101 struct intel_sdvo_connector *intel_sdvo_connector; 2087 struct intel_sdvo_connector *intel_sdvo_connector;
@@ -2130,7 +2116,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2130 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2116 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2131 (1 << INTEL_ANALOG_CLONE_BIT)); 2117 (1 << INTEL_ANALOG_CLONE_BIT));
2132 2118
2133 intel_sdvo_connector_init(encoder, connector); 2119 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2134 2120
2135 return true; 2121 return true;
2136} 2122}
@@ -2138,83 +2124,83 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2138static bool 2124static bool
2139intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) 2125intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2140{ 2126{
2141 struct drm_encoder *encoder = &intel_sdvo->base.enc; 2127 struct drm_encoder *encoder = &intel_sdvo->base.base;
2142 struct drm_connector *connector; 2128 struct drm_connector *connector;
2143 struct intel_connector *intel_connector; 2129 struct intel_connector *intel_connector;
2144 struct intel_sdvo_connector *intel_sdvo_connector; 2130 struct intel_sdvo_connector *intel_sdvo_connector;
2145 2131
2146 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2132 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
2147 if (!intel_sdvo_connector) 2133 if (!intel_sdvo_connector)
2148 return false; 2134 return false;
2149 2135
2150 intel_connector = &intel_sdvo_connector->base; 2136 intel_connector = &intel_sdvo_connector->base;
2151 connector = &intel_connector->base; 2137 connector = &intel_connector->base;
2152 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; 2138 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2153 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; 2139 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2154 2140
2155 intel_sdvo->controlled_output |= type; 2141 intel_sdvo->controlled_output |= type;
2156 intel_sdvo_connector->output_flag = type; 2142 intel_sdvo_connector->output_flag = type;
2157 2143
2158 intel_sdvo->is_tv = true; 2144 intel_sdvo->is_tv = true;
2159 intel_sdvo->base.needs_tv_clock = true; 2145 intel_sdvo->base.needs_tv_clock = true;
2160 intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; 2146 intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2161 2147
2162 intel_sdvo_connector_init(encoder, connector); 2148 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2163 2149
2164 if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) 2150 if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
2165 goto err; 2151 goto err;
2166 2152
2167 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) 2153 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
2168 goto err; 2154 goto err;
2169 2155
2170 return true; 2156 return true;
2171 2157
2172err: 2158err:
2173 intel_sdvo_destroy_enhance_property(connector); 2159 intel_sdvo_destroy(connector);
2174 kfree(intel_sdvo_connector);
2175 return false; 2160 return false;
2176} 2161}
2177 2162
2178static bool 2163static bool
2179intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) 2164intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
2180{ 2165{
2181 struct drm_encoder *encoder = &intel_sdvo->base.enc; 2166 struct drm_encoder *encoder = &intel_sdvo->base.base;
2182 struct drm_connector *connector; 2167 struct drm_connector *connector;
2183 struct intel_connector *intel_connector; 2168 struct intel_connector *intel_connector;
2184 struct intel_sdvo_connector *intel_sdvo_connector; 2169 struct intel_sdvo_connector *intel_sdvo_connector;
2185 2170
2186 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2171 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
2187 if (!intel_sdvo_connector) 2172 if (!intel_sdvo_connector)
2188 return false; 2173 return false;
2189 2174
2190 intel_connector = &intel_sdvo_connector->base; 2175 intel_connector = &intel_sdvo_connector->base;
2191 connector = &intel_connector->base; 2176 connector = &intel_connector->base;
2192 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 2177 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
2193 encoder->encoder_type = DRM_MODE_ENCODER_DAC; 2178 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2194 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2179 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2195 2180
2196 if (device == 0) { 2181 if (device == 0) {
2197 intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; 2182 intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
2198 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; 2183 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
2199 } else if (device == 1) { 2184 } else if (device == 1) {
2200 intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; 2185 intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
2201 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; 2186 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
2202 } 2187 }
2203 2188
2204 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2189 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2205 (1 << INTEL_ANALOG_CLONE_BIT)); 2190 (1 << INTEL_ANALOG_CLONE_BIT));
2206 2191
2207 intel_sdvo_connector_init(encoder, connector); 2192 intel_sdvo_connector_init(intel_sdvo_connector,
2208 return true; 2193 intel_sdvo);
2194 return true;
2209} 2195}
2210 2196
2211static bool 2197static bool
2212intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) 2198intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2213{ 2199{
2214 struct drm_encoder *encoder = &intel_sdvo->base.enc; 2200 struct drm_encoder *encoder = &intel_sdvo->base.base;
2215 struct drm_connector *connector; 2201 struct drm_connector *connector;
2216 struct intel_connector *intel_connector; 2202 struct intel_connector *intel_connector;
2217 struct intel_sdvo_connector *intel_sdvo_connector; 2203 struct intel_sdvo_connector *intel_sdvo_connector;
2218 2204
2219 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2205 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
2220 if (!intel_sdvo_connector) 2206 if (!intel_sdvo_connector)
@@ -2222,29 +2208,28 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2222 2208
2223 intel_connector = &intel_sdvo_connector->base; 2209 intel_connector = &intel_sdvo_connector->base;
2224 connector = &intel_connector->base; 2210 connector = &intel_connector->base;
2225 encoder->encoder_type = DRM_MODE_ENCODER_LVDS; 2211 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
2226 connector->connector_type = DRM_MODE_CONNECTOR_LVDS; 2212 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
2227 2213
2228 if (device == 0) { 2214 if (device == 0) {
2229 intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; 2215 intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
2230 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; 2216 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
2231 } else if (device == 1) { 2217 } else if (device == 1) {
2232 intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; 2218 intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
2233 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; 2219 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
2234 } 2220 }
2235 2221
2236 intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | 2222 intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
2237 (1 << INTEL_SDVO_LVDS_CLONE_BIT)); 2223 (1 << INTEL_SDVO_LVDS_CLONE_BIT));
2238 2224
2239 intel_sdvo_connector_init(encoder, connector); 2225 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2240 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) 2226 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
2241 goto err; 2227 goto err;
2242 2228
2243 return true; 2229 return true;
2244 2230
2245err: 2231err:
2246 intel_sdvo_destroy_enhance_property(connector); 2232 intel_sdvo_destroy(connector);
2247 kfree(intel_sdvo_connector);
2248 return false; 2233 return false;
2249} 2234}
2250 2235
@@ -2309,7 +2294,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
2309 struct intel_sdvo_connector *intel_sdvo_connector, 2294 struct intel_sdvo_connector *intel_sdvo_connector,
2310 int type) 2295 int type)
2311{ 2296{
2312 struct drm_device *dev = intel_sdvo->base.enc.dev; 2297 struct drm_device *dev = intel_sdvo->base.base.dev;
2313 struct intel_sdvo_tv_format format; 2298 struct intel_sdvo_tv_format format;
2314 uint32_t format_map, i; 2299 uint32_t format_map, i;
2315 2300
@@ -2375,7 +2360,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2375 struct intel_sdvo_connector *intel_sdvo_connector, 2360 struct intel_sdvo_connector *intel_sdvo_connector,
2376 struct intel_sdvo_enhancements_reply enhancements) 2361 struct intel_sdvo_enhancements_reply enhancements)
2377{ 2362{
2378 struct drm_device *dev = intel_sdvo->base.enc.dev; 2363 struct drm_device *dev = intel_sdvo->base.base.dev;
2379 struct drm_connector *connector = &intel_sdvo_connector->base.base; 2364 struct drm_connector *connector = &intel_sdvo_connector->base.base;
2380 uint16_t response, data_value[2]; 2365 uint16_t response, data_value[2];
2381 2366
@@ -2504,7 +2489,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
2504 struct intel_sdvo_connector *intel_sdvo_connector, 2489 struct intel_sdvo_connector *intel_sdvo_connector,
2505 struct intel_sdvo_enhancements_reply enhancements) 2490 struct intel_sdvo_enhancements_reply enhancements)
2506{ 2491{
2507 struct drm_device *dev = intel_sdvo->base.enc.dev; 2492 struct drm_device *dev = intel_sdvo->base.base.dev;
2508 struct drm_connector *connector = &intel_sdvo_connector->base.base; 2493 struct drm_connector *connector = &intel_sdvo_connector->base.base;
2509 uint16_t response, data_value[2]; 2494 uint16_t response, data_value[2];
2510 2495
@@ -2522,11 +2507,10 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
2522 uint16_t response; 2507 uint16_t response;
2523 } enhancements; 2508 } enhancements;
2524 2509
2525 if (!intel_sdvo_get_value(intel_sdvo, 2510 enhancements.response = 0;
2526 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, 2511 intel_sdvo_get_value(intel_sdvo,
2527 &enhancements, sizeof(enhancements))) 2512 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
2528 return false; 2513 &enhancements, sizeof(enhancements));
2529
2530 if (enhancements.response == 0) { 2514 if (enhancements.response == 0) {
2531 DRM_DEBUG_KMS("No enhancement is supported\n"); 2515 DRM_DEBUG_KMS("No enhancement is supported\n");
2532 return true; 2516 return true;
@@ -2538,7 +2522,43 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
2538 return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply); 2522 return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
2539 else 2523 else
2540 return true; 2524 return true;
2525}
2526
2527static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
2528 struct i2c_msg *msgs,
2529 int num)
2530{
2531 struct intel_sdvo *sdvo = adapter->algo_data;
2532
2533 if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
2534 return -EIO;
2535
2536 return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
2537}
2541 2538
2539static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
2540{
2541 struct intel_sdvo *sdvo = adapter->algo_data;
2542 return sdvo->i2c->algo->functionality(sdvo->i2c);
2543}
2544
2545static const struct i2c_algorithm intel_sdvo_ddc_proxy = {
2546 .master_xfer = intel_sdvo_ddc_proxy_xfer,
2547 .functionality = intel_sdvo_ddc_proxy_func
2548};
2549
2550static bool
2551intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
2552 struct drm_device *dev)
2553{
2554 sdvo->ddc.owner = THIS_MODULE;
2555 sdvo->ddc.class = I2C_CLASS_DDC;
2556 snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
2557 sdvo->ddc.dev.parent = &dev->pdev->dev;
2558 sdvo->ddc.algo_data = sdvo;
2559 sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
2560
2561 return i2c_add_adapter(&sdvo->ddc) == 0;
2542} 2562}
2543 2563
2544bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) 2564bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
@@ -2546,95 +2566,66 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2546 struct drm_i915_private *dev_priv = dev->dev_private; 2566 struct drm_i915_private *dev_priv = dev->dev_private;
2547 struct intel_encoder *intel_encoder; 2567 struct intel_encoder *intel_encoder;
2548 struct intel_sdvo *intel_sdvo; 2568 struct intel_sdvo *intel_sdvo;
2549 u8 ch[0x40];
2550 int i; 2569 int i;
2551 u32 i2c_reg, ddc_reg, analog_ddc_reg;
2552 2570
2553 intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); 2571 intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
2554 if (!intel_sdvo) 2572 if (!intel_sdvo)
2555 return false; 2573 return false;
2556 2574
2575 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
2576 kfree(intel_sdvo);
2577 return false;
2578 }
2579
2557 intel_sdvo->sdvo_reg = sdvo_reg; 2580 intel_sdvo->sdvo_reg = sdvo_reg;
2558 2581
2559 intel_encoder = &intel_sdvo->base; 2582 intel_encoder = &intel_sdvo->base;
2560 intel_encoder->type = INTEL_OUTPUT_SDVO; 2583 intel_encoder->type = INTEL_OUTPUT_SDVO;
2584 /* encoder type will be decided later */
2585 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
2561 2586
2562 if (HAS_PCH_SPLIT(dev)) { 2587 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
2563 i2c_reg = PCH_GPIOE; 2588 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
2564 ddc_reg = PCH_GPIOE;
2565 analog_ddc_reg = PCH_GPIOA;
2566 } else {
2567 i2c_reg = GPIOE;
2568 ddc_reg = GPIOE;
2569 analog_ddc_reg = GPIOA;
2570 }
2571
2572 /* setup the DDC bus. */
2573 if (IS_SDVOB(sdvo_reg))
2574 intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB");
2575 else
2576 intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC");
2577
2578 if (!intel_encoder->i2c_bus)
2579 goto err_inteloutput;
2580
2581 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
2582
2583 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
2584 intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
2585 2589
2586 /* Read the regs to test if we can talk to the device */ 2590 /* Read the regs to test if we can talk to the device */
2587 for (i = 0; i < 0x40; i++) { 2591 for (i = 0; i < 0x40; i++) {
2588 if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) { 2592 u8 byte;
2593
2594 if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
2589 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", 2595 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
2590 IS_SDVOB(sdvo_reg) ? 'B' : 'C'); 2596 IS_SDVOB(sdvo_reg) ? 'B' : 'C');
2591 goto err_i2c; 2597 goto err;
2592 } 2598 }
2593 } 2599 }
2594 2600
2595 /* setup the DDC bus. */ 2601 if (IS_SDVOB(sdvo_reg))
2596 if (IS_SDVOB(sdvo_reg)) {
2597 intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
2598 intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
2599 "SDVOB/VGA DDC BUS");
2600 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; 2602 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
2601 } else { 2603 else
2602 intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
2603 intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
2604 "SDVOC/VGA DDC BUS");
2605 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; 2604 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2606 }
2607 if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL)
2608 goto err_i2c;
2609
2610 /* Wrap with our custom algo which switches to DDC mode */
2611 intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
2612 2605
2613 /* encoder type will be decided later */ 2606 drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
2614 drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0);
2615 drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
2616 2607
2617 /* In default case sdvo lvds is false */ 2608 /* In default case sdvo lvds is false */
2618 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) 2609 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
2619 goto err_enc; 2610 goto err;
2620 2611
2621 if (intel_sdvo_output_setup(intel_sdvo, 2612 if (intel_sdvo_output_setup(intel_sdvo,
2622 intel_sdvo->caps.output_flags) != true) { 2613 intel_sdvo->caps.output_flags) != true) {
2623 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", 2614 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
2624 IS_SDVOB(sdvo_reg) ? 'B' : 'C'); 2615 IS_SDVOB(sdvo_reg) ? 'B' : 'C');
2625 goto err_enc; 2616 goto err;
2626 } 2617 }
2627 2618
2628 intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); 2619 intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
2629 2620
2630 /* Set the input timing to the screen. Assume always input 0. */ 2621 /* Set the input timing to the screen. Assume always input 0. */
2631 if (!intel_sdvo_set_target_input(intel_sdvo)) 2622 if (!intel_sdvo_set_target_input(intel_sdvo))
2632 goto err_enc; 2623 goto err;
2633 2624
2634 if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, 2625 if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
2635 &intel_sdvo->pixel_clock_min, 2626 &intel_sdvo->pixel_clock_min,
2636 &intel_sdvo->pixel_clock_max)) 2627 &intel_sdvo->pixel_clock_max))
2637 goto err_enc; 2628 goto err;
2638 2629
2639 DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " 2630 DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
2640 "clock range %dMHz - %dMHz, " 2631 "clock range %dMHz - %dMHz, "
@@ -2654,16 +2645,9 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2654 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); 2645 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
2655 return true; 2646 return true;
2656 2647
2657err_enc: 2648err:
2658 drm_encoder_cleanup(&intel_encoder->enc); 2649 drm_encoder_cleanup(&intel_encoder->base);
2659err_i2c: 2650 i2c_del_adapter(&intel_sdvo->ddc);
2660 if (intel_sdvo->analog_ddc_bus != NULL)
2661 intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
2662 if (intel_encoder->ddc_bus != NULL)
2663 intel_i2c_destroy(intel_encoder->ddc_bus);
2664 if (intel_encoder->i2c_bus != NULL)
2665 intel_i2c_destroy(intel_encoder->i2c_bus);
2666err_inteloutput:
2667 kfree(intel_sdvo); 2651 kfree(intel_sdvo);
2668 2652
2669 return false; 2653 return false;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 4a117e318a73..2f7681989316 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -48,7 +48,7 @@ struct intel_tv {
48 struct intel_encoder base; 48 struct intel_encoder base;
49 49
50 int type; 50 int type;
51 char *tv_format; 51 const char *tv_format;
52 int margin[4]; 52 int margin[4];
53 u32 save_TV_H_CTL_1; 53 u32 save_TV_H_CTL_1;
54 u32 save_TV_H_CTL_2; 54 u32 save_TV_H_CTL_2;
@@ -350,7 +350,7 @@ static const struct video_levels component_levels = {
350 350
351 351
352struct tv_mode { 352struct tv_mode {
353 char *name; 353 const char *name;
354 int clock; 354 int clock;
355 int refresh; /* in millihertz (for precision) */ 355 int refresh; /* in millihertz (for precision) */
356 u32 oversample; 356 u32 oversample;
@@ -900,7 +900,14 @@ static const struct tv_mode tv_modes[] = {
900 900
901static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder) 901static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
902{ 902{
903 return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base); 903 return container_of(encoder, struct intel_tv, base.base);
904}
905
906static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
907{
908 return container_of(intel_attached_encoder(connector),
909 struct intel_tv,
910 base);
904} 911}
905 912
906static void 913static void
@@ -922,7 +929,7 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
922} 929}
923 930
924static const struct tv_mode * 931static const struct tv_mode *
925intel_tv_mode_lookup (char *tv_format) 932intel_tv_mode_lookup(const char *tv_format)
926{ 933{
927 int i; 934 int i;
928 935
@@ -936,22 +943,23 @@ intel_tv_mode_lookup (char *tv_format)
936} 943}
937 944
938static const struct tv_mode * 945static const struct tv_mode *
939intel_tv_mode_find (struct intel_tv *intel_tv) 946intel_tv_mode_find(struct intel_tv *intel_tv)
940{ 947{
941 return intel_tv_mode_lookup(intel_tv->tv_format); 948 return intel_tv_mode_lookup(intel_tv->tv_format);
942} 949}
943 950
944static enum drm_mode_status 951static enum drm_mode_status
945intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) 952intel_tv_mode_valid(struct drm_connector *connector,
953 struct drm_display_mode *mode)
946{ 954{
947 struct drm_encoder *encoder = intel_attached_encoder(connector); 955 struct intel_tv *intel_tv = intel_attached_tv(connector);
948 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
949 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 956 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
950 957
951 /* Ensure TV refresh is close to desired refresh */ 958 /* Ensure TV refresh is close to desired refresh */
952 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) 959 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
953 < 1000) 960 < 1000)
954 return MODE_OK; 961 return MODE_OK;
962
955 return MODE_CLOCK_RANGE; 963 return MODE_CLOCK_RANGE;
956} 964}
957 965
@@ -1131,7 +1139,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1131 color_conversion->av); 1139 color_conversion->av);
1132 } 1140 }
1133 1141
1134 if (IS_I965G(dev)) 1142 if (INTEL_INFO(dev)->gen >= 4)
1135 I915_WRITE(TV_CLR_KNOBS, 0x00404000); 1143 I915_WRITE(TV_CLR_KNOBS, 0x00404000);
1136 else 1144 else
1137 I915_WRITE(TV_CLR_KNOBS, 0x00606000); 1145 I915_WRITE(TV_CLR_KNOBS, 0x00606000);
@@ -1157,12 +1165,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1157 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); 1165 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1158 1166
1159 /* Wait for vblank for the disable to take effect */ 1167 /* Wait for vblank for the disable to take effect */
1160 if (!IS_I9XX(dev)) 1168 if (IS_GEN2(dev))
1161 intel_wait_for_vblank(dev, intel_crtc->pipe); 1169 intel_wait_for_vblank(dev, intel_crtc->pipe);
1162 1170
1163 I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); 1171 I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE);
1164 /* Wait for vblank for the disable to take effect. */ 1172 /* Wait for vblank for the disable to take effect. */
1165 intel_wait_for_vblank(dev, intel_crtc->pipe); 1173 intel_wait_for_pipe_off(dev, intel_crtc->pipe);
1166 1174
1167 /* Filter ctl must be set before TV_WIN_SIZE */ 1175 /* Filter ctl must be set before TV_WIN_SIZE */
1168 I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE); 1176 I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
@@ -1196,7 +1204,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1196 I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]); 1204 I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
1197 for (i = 0; i < 43; i++) 1205 for (i = 0; i < 43; i++)
1198 I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]); 1206 I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
1199 I915_WRITE(TV_DAC, 0); 1207 I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
1200 I915_WRITE(TV_CTL, tv_ctl); 1208 I915_WRITE(TV_CTL, tv_ctl);
1201} 1209}
1202 1210
@@ -1228,15 +1236,13 @@ static const struct drm_display_mode reported_modes[] = {
1228static int 1236static int
1229intel_tv_detect_type (struct intel_tv *intel_tv) 1237intel_tv_detect_type (struct intel_tv *intel_tv)
1230{ 1238{
1231 struct drm_encoder *encoder = &intel_tv->base.enc; 1239 struct drm_encoder *encoder = &intel_tv->base.base;
1232 struct drm_device *dev = encoder->dev; 1240 struct drm_device *dev = encoder->dev;
1233 struct drm_i915_private *dev_priv = dev->dev_private; 1241 struct drm_i915_private *dev_priv = dev->dev_private;
1234 unsigned long irqflags; 1242 unsigned long irqflags;
1235 u32 tv_ctl, save_tv_ctl; 1243 u32 tv_ctl, save_tv_ctl;
1236 u32 tv_dac, save_tv_dac; 1244 u32 tv_dac, save_tv_dac;
1237 int type = DRM_MODE_CONNECTOR_Unknown; 1245 int type;
1238
1239 tv_dac = I915_READ(TV_DAC);
1240 1246
1241 /* Disable TV interrupts around load detect or we'll recurse */ 1247 /* Disable TV interrupts around load detect or we'll recurse */
1242 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1248 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -1244,19 +1250,14 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1244 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1250 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1245 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1251 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
1246 1252
1247 /* 1253 save_tv_dac = tv_dac = I915_READ(TV_DAC);
1248 * Detect TV by polling) 1254 save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
1249 */ 1255
1250 save_tv_dac = tv_dac; 1256 /* Poll for TV detection */
1251 tv_ctl = I915_READ(TV_CTL); 1257 tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
1252 save_tv_ctl = tv_ctl;
1253 tv_ctl &= ~TV_ENC_ENABLE;
1254 tv_ctl &= ~TV_TEST_MODE_MASK;
1255 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; 1258 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
1256 tv_dac &= ~TVDAC_SENSE_MASK; 1259
1257 tv_dac &= ~DAC_A_MASK; 1260 tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
1258 tv_dac &= ~DAC_B_MASK;
1259 tv_dac &= ~DAC_C_MASK;
1260 tv_dac |= (TVDAC_STATE_CHG_EN | 1261 tv_dac |= (TVDAC_STATE_CHG_EN |
1261 TVDAC_A_SENSE_CTL | 1262 TVDAC_A_SENSE_CTL |
1262 TVDAC_B_SENSE_CTL | 1263 TVDAC_B_SENSE_CTL |
@@ -1265,37 +1266,40 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1265 DAC_A_0_7_V | 1266 DAC_A_0_7_V |
1266 DAC_B_0_7_V | 1267 DAC_B_0_7_V |
1267 DAC_C_0_7_V); 1268 DAC_C_0_7_V);
1269
1268 I915_WRITE(TV_CTL, tv_ctl); 1270 I915_WRITE(TV_CTL, tv_ctl);
1269 I915_WRITE(TV_DAC, tv_dac); 1271 I915_WRITE(TV_DAC, tv_dac);
1270 POSTING_READ(TV_DAC); 1272 POSTING_READ(TV_DAC);
1271 msleep(20);
1272 1273
1273 tv_dac = I915_READ(TV_DAC); 1274 intel_wait_for_vblank(intel_tv->base.base.dev,
1274 I915_WRITE(TV_DAC, save_tv_dac); 1275 to_intel_crtc(intel_tv->base.base.crtc)->pipe);
1275 I915_WRITE(TV_CTL, save_tv_ctl);
1276 POSTING_READ(TV_CTL);
1277 msleep(20);
1278 1276
1279 /* 1277 type = -1;
1280 * A B C 1278 if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) {
1281 * 0 1 1 Composite 1279 DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
1282 * 1 0 X svideo 1280 /*
1283 * 0 0 0 Component 1281 * A B C
1284 */ 1282 * 0 1 1 Composite
1285 if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { 1283 * 1 0 X svideo
1286 DRM_DEBUG_KMS("Detected Composite TV connection\n"); 1284 * 0 0 0 Component
1287 type = DRM_MODE_CONNECTOR_Composite; 1285 */
1288 } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { 1286 if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
1289 DRM_DEBUG_KMS("Detected S-Video TV connection\n"); 1287 DRM_DEBUG_KMS("Detected Composite TV connection\n");
1290 type = DRM_MODE_CONNECTOR_SVIDEO; 1288 type = DRM_MODE_CONNECTOR_Composite;
1291 } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { 1289 } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
1292 DRM_DEBUG_KMS("Detected Component TV connection\n"); 1290 DRM_DEBUG_KMS("Detected S-Video TV connection\n");
1293 type = DRM_MODE_CONNECTOR_Component; 1291 type = DRM_MODE_CONNECTOR_SVIDEO;
1294 } else { 1292 } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
1295 DRM_DEBUG_KMS("No TV connection detected\n"); 1293 DRM_DEBUG_KMS("Detected Component TV connection\n");
1296 type = -1; 1294 type = DRM_MODE_CONNECTOR_Component;
1295 } else {
1296 DRM_DEBUG_KMS("Unrecognised TV connection\n");
1297 }
1297 } 1298 }
1298 1299
1300 I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
1301 I915_WRITE(TV_CTL, save_tv_ctl);
1302
1299 /* Restore interrupt config */ 1303 /* Restore interrupt config */
1300 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1304 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1301 i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | 1305 i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
@@ -1311,8 +1315,7 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1311 */ 1315 */
1312static void intel_tv_find_better_format(struct drm_connector *connector) 1316static void intel_tv_find_better_format(struct drm_connector *connector)
1313{ 1317{
1314 struct drm_encoder *encoder = intel_attached_encoder(connector); 1318 struct intel_tv *intel_tv = intel_attached_tv(connector);
1315 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
1316 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 1319 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
1317 int i; 1320 int i;
1318 1321
@@ -1344,14 +1347,13 @@ static enum drm_connector_status
1344intel_tv_detect(struct drm_connector *connector, bool force) 1347intel_tv_detect(struct drm_connector *connector, bool force)
1345{ 1348{
1346 struct drm_display_mode mode; 1349 struct drm_display_mode mode;
1347 struct drm_encoder *encoder = intel_attached_encoder(connector); 1350 struct intel_tv *intel_tv = intel_attached_tv(connector);
1348 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
1349 int type; 1351 int type;
1350 1352
1351 mode = reported_modes[0]; 1353 mode = reported_modes[0];
1352 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); 1354 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
1353 1355
1354 if (encoder->crtc && encoder->crtc->enabled) { 1356 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
1355 type = intel_tv_detect_type(intel_tv); 1357 type = intel_tv_detect_type(intel_tv);
1356 } else if (force) { 1358 } else if (force) {
1357 struct drm_crtc *crtc; 1359 struct drm_crtc *crtc;
@@ -1375,11 +1377,10 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1375 return connector_status_connected; 1377 return connector_status_connected;
1376} 1378}
1377 1379
1378static struct input_res { 1380static const struct input_res {
1379 char *name; 1381 const char *name;
1380 int w, h; 1382 int w, h;
1381} input_res_table[] = 1383} input_res_table[] = {
1382{
1383 {"640x480", 640, 480}, 1384 {"640x480", 640, 480},
1384 {"800x600", 800, 600}, 1385 {"800x600", 800, 600},
1385 {"1024x768", 1024, 768}, 1386 {"1024x768", 1024, 768},
@@ -1396,8 +1397,7 @@ static void
1396intel_tv_chose_preferred_modes(struct drm_connector *connector, 1397intel_tv_chose_preferred_modes(struct drm_connector *connector,
1397 struct drm_display_mode *mode_ptr) 1398 struct drm_display_mode *mode_ptr)
1398{ 1399{
1399 struct drm_encoder *encoder = intel_attached_encoder(connector); 1400 struct intel_tv *intel_tv = intel_attached_tv(connector);
1400 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
1401 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 1401 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
1402 1402
1403 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) 1403 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
@@ -1422,15 +1422,14 @@ static int
1422intel_tv_get_modes(struct drm_connector *connector) 1422intel_tv_get_modes(struct drm_connector *connector)
1423{ 1423{
1424 struct drm_display_mode *mode_ptr; 1424 struct drm_display_mode *mode_ptr;
1425 struct drm_encoder *encoder = intel_attached_encoder(connector); 1425 struct intel_tv *intel_tv = intel_attached_tv(connector);
1426 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
1427 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 1426 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
1428 int j, count = 0; 1427 int j, count = 0;
1429 u64 tmp; 1428 u64 tmp;
1430 1429
1431 for (j = 0; j < ARRAY_SIZE(input_res_table); 1430 for (j = 0; j < ARRAY_SIZE(input_res_table);
1432 j++) { 1431 j++) {
1433 struct input_res *input = &input_res_table[j]; 1432 const struct input_res *input = &input_res_table[j];
1434 unsigned int hactive_s = input->w; 1433 unsigned int hactive_s = input->w;
1435 unsigned int vactive_s = input->h; 1434 unsigned int vactive_s = input->h;
1436 1435
@@ -1488,9 +1487,8 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1488 uint64_t val) 1487 uint64_t val)
1489{ 1488{
1490 struct drm_device *dev = connector->dev; 1489 struct drm_device *dev = connector->dev;
1491 struct drm_encoder *encoder = intel_attached_encoder(connector); 1490 struct intel_tv *intel_tv = intel_attached_tv(connector);
1492 struct intel_tv *intel_tv = enc_to_intel_tv(encoder); 1491 struct drm_crtc *crtc = intel_tv->base.base.crtc;
1493 struct drm_crtc *crtc = encoder->crtc;
1494 int ret = 0; 1492 int ret = 0;
1495 bool changed = false; 1493 bool changed = false;
1496 1494
@@ -1555,7 +1553,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
1555static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { 1553static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
1556 .mode_valid = intel_tv_mode_valid, 1554 .mode_valid = intel_tv_mode_valid,
1557 .get_modes = intel_tv_get_modes, 1555 .get_modes = intel_tv_get_modes,
1558 .best_encoder = intel_attached_encoder, 1556 .best_encoder = intel_best_encoder,
1559}; 1557};
1560 1558
1561static const struct drm_encoder_funcs intel_tv_enc_funcs = { 1559static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1607,7 +1605,7 @@ intel_tv_init(struct drm_device *dev)
1607 struct intel_encoder *intel_encoder; 1605 struct intel_encoder *intel_encoder;
1608 struct intel_connector *intel_connector; 1606 struct intel_connector *intel_connector;
1609 u32 tv_dac_on, tv_dac_off, save_tv_dac; 1607 u32 tv_dac_on, tv_dac_off, save_tv_dac;
1610 char **tv_format_names; 1608 char *tv_format_names[ARRAY_SIZE(tv_modes)];
1611 int i, initial_mode = 0; 1609 int i, initial_mode = 0;
1612 1610
1613 if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) 1611 if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
@@ -1661,15 +1659,15 @@ intel_tv_init(struct drm_device *dev)
1661 drm_connector_init(dev, connector, &intel_tv_connector_funcs, 1659 drm_connector_init(dev, connector, &intel_tv_connector_funcs,
1662 DRM_MODE_CONNECTOR_SVIDEO); 1660 DRM_MODE_CONNECTOR_SVIDEO);
1663 1661
1664 drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs, 1662 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
1665 DRM_MODE_ENCODER_TVDAC); 1663 DRM_MODE_ENCODER_TVDAC);
1666 1664
1667 drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); 1665 intel_connector_attach_encoder(intel_connector, intel_encoder);
1668 intel_encoder->type = INTEL_OUTPUT_TVOUT; 1666 intel_encoder->type = INTEL_OUTPUT_TVOUT;
1669 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 1667 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
1670 intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); 1668 intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
1671 intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1)); 1669 intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
1672 intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); 1670 intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
1673 intel_tv->type = DRM_MODE_CONNECTOR_Unknown; 1671 intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
1674 1672
1675 /* BIOS margin values */ 1673 /* BIOS margin values */
@@ -1678,21 +1676,19 @@ intel_tv_init(struct drm_device *dev)
1678 intel_tv->margin[TV_MARGIN_RIGHT] = 46; 1676 intel_tv->margin[TV_MARGIN_RIGHT] = 46;
1679 intel_tv->margin[TV_MARGIN_BOTTOM] = 37; 1677 intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
1680 1678
1681 intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); 1679 intel_tv->tv_format = tv_modes[initial_mode].name;
1682 1680
1683 drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs); 1681 drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
1684 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); 1682 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
1685 connector->interlace_allowed = false; 1683 connector->interlace_allowed = false;
1686 connector->doublescan_allowed = false; 1684 connector->doublescan_allowed = false;
1687 1685
1688 /* Create TV properties then attach current values */ 1686 /* Create TV properties then attach current values */
1689 tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes),
1690 GFP_KERNEL);
1691 if (!tv_format_names)
1692 goto out;
1693 for (i = 0; i < ARRAY_SIZE(tv_modes); i++) 1687 for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
1694 tv_format_names[i] = tv_modes[i].name; 1688 tv_format_names[i] = (char *)tv_modes[i].name;
1695 drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names); 1689 drm_mode_create_tv_properties(dev,
1690 ARRAY_SIZE(tv_modes),
1691 tv_format_names);
1696 1692
1697 drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, 1693 drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
1698 initial_mode); 1694 initial_mode);
@@ -1708,6 +1704,5 @@ intel_tv_init(struct drm_device *dev)
1708 drm_connector_attach_property(connector, 1704 drm_connector_attach_property(connector,
1709 dev->mode_config.tv_bottom_margin_property, 1705 dev->mode_config.tv_bottom_margin_property,
1710 intel_tv->margin[TV_MARGIN_BOTTOM]); 1706 intel_tv->margin[TV_MARGIN_BOTTOM]);
1711out:
1712 drm_sysfs_connector_add(connector); 1707 drm_sysfs_connector_add(connector);
1713} 1708}