aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_drv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_drv.c')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c434
1 files changed, 337 insertions, 97 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6dbe14cc4f74..eb91e2dd7914 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -32,6 +32,7 @@
32#include "drm.h" 32#include "drm.h"
33#include "i915_drm.h" 33#include "i915_drm.h"
34#include "i915_drv.h" 34#include "i915_drv.h"
35#include "intel_drv.h"
35 36
36#include <linux/console.h> 37#include <linux/console.h>
37#include "drm_crtc_helper.h" 38#include "drm_crtc_helper.h"
@@ -42,18 +43,39 @@ module_param_named(modeset, i915_modeset, int, 0400);
42unsigned int i915_fbpercrtc = 0; 43unsigned int i915_fbpercrtc = 0;
43module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); 44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
44 45
46int i915_panel_ignore_lid = 0;
47module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
48
45unsigned int i915_powersave = 1; 49unsigned int i915_powersave = 1;
46module_param_named(powersave, i915_powersave, int, 0400); 50module_param_named(powersave, i915_powersave, int, 0600);
51
52unsigned int i915_semaphores = 0;
53module_param_named(semaphores, i915_semaphores, int, 0600);
54
55unsigned int i915_enable_rc6 = 0;
56module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
57
58unsigned int i915_enable_fbc = 0;
59module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
47 60
48unsigned int i915_lvds_downclock = 0; 61unsigned int i915_lvds_downclock = 0;
49module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 62module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
50 63
64unsigned int i915_panel_use_ssc = 1;
65module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
66
67int i915_vbt_sdvo_panel_type = -1;
68module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
69
70static bool i915_try_reset = true;
71module_param_named(reset, i915_try_reset, bool, 0600);
72
51static struct drm_driver driver; 73static struct drm_driver driver;
52extern int intel_agp_enabled; 74extern int intel_agp_enabled;
53 75
54#define INTEL_VGA_DEVICE(id, info) { \ 76#define INTEL_VGA_DEVICE(id, info) { \
55 .class = PCI_CLASS_DISPLAY_VGA << 8, \ 77 .class = PCI_CLASS_DISPLAY_VGA << 8, \
56 .class_mask = 0xffff00, \ 78 .class_mask = 0xff0000, \
57 .vendor = 0x8086, \ 79 .vendor = 0x8086, \
58 .device = id, \ 80 .device = id, \
59 .subvendor = PCI_ANY_ID, \ 81 .subvendor = PCI_ANY_ID, \
@@ -61,86 +83,127 @@ extern int intel_agp_enabled;
61 .driver_data = (unsigned long) info } 83 .driver_data = (unsigned long) info }
62 84
63static const struct intel_device_info intel_i830_info = { 85static const struct intel_device_info intel_i830_info = {
64 .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, 86 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
87 .has_overlay = 1, .overlay_needs_physical = 1,
65}; 88};
66 89
67static const struct intel_device_info intel_845g_info = { 90static const struct intel_device_info intel_845g_info = {
68 .gen = 2, .is_i8xx = 1, 91 .gen = 2,
92 .has_overlay = 1, .overlay_needs_physical = 1,
69}; 93};
70 94
71static const struct intel_device_info intel_i85x_info = { 95static const struct intel_device_info intel_i85x_info = {
72 .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, 96 .gen = 2, .is_i85x = 1, .is_mobile = 1,
73 .cursor_needs_physical = 1, 97 .cursor_needs_physical = 1,
98 .has_overlay = 1, .overlay_needs_physical = 1,
74}; 99};
75 100
76static const struct intel_device_info intel_i865g_info = { 101static const struct intel_device_info intel_i865g_info = {
77 .gen = 2, .is_i8xx = 1, 102 .gen = 2,
103 .has_overlay = 1, .overlay_needs_physical = 1,
78}; 104};
79 105
80static const struct intel_device_info intel_i915g_info = { 106static const struct intel_device_info intel_i915g_info = {
81 .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, 107 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
108 .has_overlay = 1, .overlay_needs_physical = 1,
82}; 109};
83static const struct intel_device_info intel_i915gm_info = { 110static const struct intel_device_info intel_i915gm_info = {
84 .gen = 3, .is_i9xx = 1, .is_mobile = 1, 111 .gen = 3, .is_mobile = 1,
85 .cursor_needs_physical = 1, 112 .cursor_needs_physical = 1,
113 .has_overlay = 1, .overlay_needs_physical = 1,
114 .supports_tv = 1,
86}; 115};
87static const struct intel_device_info intel_i945g_info = { 116static const struct intel_device_info intel_i945g_info = {
88 .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, 117 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
118 .has_overlay = 1, .overlay_needs_physical = 1,
89}; 119};
90static const struct intel_device_info intel_i945gm_info = { 120static const struct intel_device_info intel_i945gm_info = {
91 .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, 121 .gen = 3, .is_i945gm = 1, .is_mobile = 1,
92 .has_hotplug = 1, .cursor_needs_physical = 1, 122 .has_hotplug = 1, .cursor_needs_physical = 1,
123 .has_overlay = 1, .overlay_needs_physical = 1,
124 .supports_tv = 1,
93}; 125};
94 126
95static const struct intel_device_info intel_i965g_info = { 127static const struct intel_device_info intel_i965g_info = {
96 .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, 128 .gen = 4, .is_broadwater = 1,
97 .has_hotplug = 1, 129 .has_hotplug = 1,
130 .has_overlay = 1,
98}; 131};
99 132
100static const struct intel_device_info intel_i965gm_info = { 133static const struct intel_device_info intel_i965gm_info = {
101 .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, 134 .gen = 4, .is_crestline = 1,
102 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, 135 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
136 .has_overlay = 1,
137 .supports_tv = 1,
103}; 138};
104 139
105static const struct intel_device_info intel_g33_info = { 140static const struct intel_device_info intel_g33_info = {
106 .gen = 3, .is_g33 = 1, .is_i9xx = 1, 141 .gen = 3, .is_g33 = 1,
107 .need_gfx_hws = 1, .has_hotplug = 1, 142 .need_gfx_hws = 1, .has_hotplug = 1,
143 .has_overlay = 1,
108}; 144};
109 145
110static const struct intel_device_info intel_g45_info = { 146static const struct intel_device_info intel_g45_info = {
111 .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, 147 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
112 .has_pipe_cxsr = 1, .has_hotplug = 1, 148 .has_pipe_cxsr = 1, .has_hotplug = 1,
149 .has_bsd_ring = 1,
113}; 150};
114 151
115static const struct intel_device_info intel_gm45_info = { 152static const struct intel_device_info intel_gm45_info = {
116 .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, 153 .gen = 4, .is_g4x = 1,
117 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 154 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
118 .has_pipe_cxsr = 1, .has_hotplug = 1, 155 .has_pipe_cxsr = 1, .has_hotplug = 1,
156 .supports_tv = 1,
157 .has_bsd_ring = 1,
119}; 158};
120 159
121static const struct intel_device_info intel_pineview_info = { 160static const struct intel_device_info intel_pineview_info = {
122 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 161 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
123 .need_gfx_hws = 1, .has_hotplug = 1, 162 .need_gfx_hws = 1, .has_hotplug = 1,
163 .has_overlay = 1,
124}; 164};
125 165
126static const struct intel_device_info intel_ironlake_d_info = { 166static const struct intel_device_info intel_ironlake_d_info = {
127 .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, 167 .gen = 5,
128 .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, 168 .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
169 .has_bsd_ring = 1,
129}; 170};
130 171
131static const struct intel_device_info intel_ironlake_m_info = { 172static const struct intel_device_info intel_ironlake_m_info = {
132 .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, 173 .gen = 5, .is_mobile = 1,
133 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, 174 .need_gfx_hws = 1, .has_hotplug = 1,
175 .has_fbc = 1,
176 .has_bsd_ring = 1,
134}; 177};
135 178
136static const struct intel_device_info intel_sandybridge_d_info = { 179static const struct intel_device_info intel_sandybridge_d_info = {
137 .gen = 6, .is_i965g = 1, .is_i9xx = 1, 180 .gen = 6,
138 .need_gfx_hws = 1, .has_hotplug = 1, 181 .need_gfx_hws = 1, .has_hotplug = 1,
182 .has_bsd_ring = 1,
183 .has_blt_ring = 1,
139}; 184};
140 185
141static const struct intel_device_info intel_sandybridge_m_info = { 186static const struct intel_device_info intel_sandybridge_m_info = {
142 .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, 187 .gen = 6, .is_mobile = 1,
143 .need_gfx_hws = 1, .has_hotplug = 1, 188 .need_gfx_hws = 1, .has_hotplug = 1,
189 .has_fbc = 1,
190 .has_bsd_ring = 1,
191 .has_blt_ring = 1,
192};
193
194static const struct intel_device_info intel_ivybridge_d_info = {
195 .is_ivybridge = 1, .gen = 7,
196 .need_gfx_hws = 1, .has_hotplug = 1,
197 .has_bsd_ring = 1,
198 .has_blt_ring = 1,
199};
200
201static const struct intel_device_info intel_ivybridge_m_info = {
202 .is_ivybridge = 1, .gen = 7, .is_mobile = 1,
203 .need_gfx_hws = 1, .has_hotplug = 1,
204 .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */
205 .has_bsd_ring = 1,
206 .has_blt_ring = 1,
144}; 207};
145 208
146static const struct pci_device_id pciidlist[] = { /* aka */ 209static const struct pci_device_id pciidlist[] = { /* aka */
@@ -182,6 +245,11 @@ static const struct pci_device_id pciidlist[] = { /* aka */
182 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), 245 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
183 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), 246 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
184 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), 247 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
248 INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
249 INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
250 INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
251 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
252 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
185 {0, 0, 0} 253 {0, 0, 0}
186}; 254};
187 255
@@ -190,7 +258,9 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
190#endif 258#endif
191 259
192#define INTEL_PCH_DEVICE_ID_MASK 0xff00 260#define INTEL_PCH_DEVICE_ID_MASK 0xff00
261#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
193#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 262#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
263#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
194 264
195void intel_detect_pch (struct drm_device *dev) 265void intel_detect_pch (struct drm_device *dev)
196{ 266{
@@ -209,19 +279,86 @@ void intel_detect_pch (struct drm_device *dev)
209 int id; 279 int id;
210 id = pch->device & INTEL_PCH_DEVICE_ID_MASK; 280 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
211 281
212 if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 282 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
283 dev_priv->pch_type = PCH_IBX;
284 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
285 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
213 dev_priv->pch_type = PCH_CPT; 286 dev_priv->pch_type = PCH_CPT;
214 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 287 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
288 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
289 /* PantherPoint is CPT compatible */
290 dev_priv->pch_type = PCH_CPT;
291 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
215 } 292 }
216 } 293 }
217 pci_dev_put(pch); 294 pci_dev_put(pch);
218 } 295 }
219} 296}
220 297
298static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
299{
300 int count;
301
302 count = 0;
303 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
304 udelay(10);
305
306 I915_WRITE_NOTRACE(FORCEWAKE, 1);
307 POSTING_READ(FORCEWAKE);
308
309 count = 0;
310 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
311 udelay(10);
312}
313
314/*
315 * Generally this is called implicitly by the register read function. However,
316 * if some sequence requires the GT to not power down then this function should
317 * be called at the beginning of the sequence followed by a call to
318 * gen6_gt_force_wake_put() at the end of the sequence.
319 */
320void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
321{
322 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
323
324 /* Forcewake is atomic in case we get in here without the lock */
325 if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
326 __gen6_gt_force_wake_get(dev_priv);
327}
328
329static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
330{
331 I915_WRITE_NOTRACE(FORCEWAKE, 0);
332 POSTING_READ(FORCEWAKE);
333}
334
335/*
336 * see gen6_gt_force_wake_get()
337 */
338void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
339{
340 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
341
342 if (atomic_dec_and_test(&dev_priv->forcewake_count))
343 __gen6_gt_force_wake_put(dev_priv);
344}
345
346void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
347{
348 int loop = 500;
349 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
350 while (fifo < 20 && loop--) {
351 udelay(10);
352 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
353 }
354}
355
221static int i915_drm_freeze(struct drm_device *dev) 356static int i915_drm_freeze(struct drm_device *dev)
222{ 357{
223 struct drm_i915_private *dev_priv = dev->dev_private; 358 struct drm_i915_private *dev_priv = dev->dev_private;
224 359
360 drm_kms_helper_poll_disable(dev);
361
225 pci_save_state(dev->pdev); 362 pci_save_state(dev->pdev);
226 363
227 /* If KMS is active, we do the leavevt stuff here */ 364 /* If KMS is active, we do the leavevt stuff here */
@@ -237,7 +374,7 @@ static int i915_drm_freeze(struct drm_device *dev)
237 374
238 i915_save_state(dev); 375 i915_save_state(dev);
239 376
240 intel_opregion_free(dev, 1); 377 intel_opregion_fini(dev);
241 378
242 /* Modeset on resume, not lid events */ 379 /* Modeset on resume, not lid events */
243 dev_priv->modeset_on_lid = 0; 380 dev_priv->modeset_on_lid = 0;
@@ -258,6 +395,10 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
258 if (state.event == PM_EVENT_PRETHAW) 395 if (state.event == PM_EVENT_PRETHAW)
259 return 0; 396 return 0;
260 397
398
399 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
400 return 0;
401
261 error = i915_drm_freeze(dev); 402 error = i915_drm_freeze(dev);
262 if (error) 403 if (error)
263 return error; 404 return error;
@@ -276,9 +417,14 @@ static int i915_drm_thaw(struct drm_device *dev)
276 struct drm_i915_private *dev_priv = dev->dev_private; 417 struct drm_i915_private *dev_priv = dev->dev_private;
277 int error = 0; 418 int error = 0;
278 419
279 i915_restore_state(dev); 420 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
421 mutex_lock(&dev->struct_mutex);
422 i915_gem_restore_gtt_mappings(dev);
423 mutex_unlock(&dev->struct_mutex);
424 }
280 425
281 intel_opregion_init(dev, 1); 426 i915_restore_state(dev);
427 intel_opregion_setup(dev);
282 428
283 /* KMS EnterVT equivalent */ 429 /* KMS EnterVT equivalent */
284 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 430 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -288,12 +434,18 @@ static int i915_drm_thaw(struct drm_device *dev)
288 error = i915_gem_init_ringbuffer(dev); 434 error = i915_gem_init_ringbuffer(dev);
289 mutex_unlock(&dev->struct_mutex); 435 mutex_unlock(&dev->struct_mutex);
290 436
437 drm_mode_config_reset(dev);
291 drm_irq_install(dev); 438 drm_irq_install(dev);
292 439
293 /* Resume the modeset for every activated CRTC */ 440 /* Resume the modeset for every activated CRTC */
294 drm_helper_resume_force_mode(dev); 441 drm_helper_resume_force_mode(dev);
442
443 if (IS_IRONLAKE_M(dev))
444 ironlake_enable_rc6(dev);
295 } 445 }
296 446
447 intel_opregion_init(dev);
448
297 dev_priv->modeset_on_lid = 0; 449 dev_priv->modeset_on_lid = 0;
298 450
299 return error; 451 return error;
@@ -301,12 +453,90 @@ static int i915_drm_thaw(struct drm_device *dev)
301 453
302int i915_resume(struct drm_device *dev) 454int i915_resume(struct drm_device *dev)
303{ 455{
456 int ret;
457
458 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
459 return 0;
460
304 if (pci_enable_device(dev->pdev)) 461 if (pci_enable_device(dev->pdev))
305 return -EIO; 462 return -EIO;
306 463
307 pci_set_master(dev->pdev); 464 pci_set_master(dev->pdev);
308 465
309 return i915_drm_thaw(dev); 466 ret = i915_drm_thaw(dev);
467 if (ret)
468 return ret;
469
470 drm_kms_helper_poll_enable(dev);
471 return 0;
472}
473
474static int i8xx_do_reset(struct drm_device *dev, u8 flags)
475{
476 struct drm_i915_private *dev_priv = dev->dev_private;
477
478 if (IS_I85X(dev))
479 return -ENODEV;
480
481 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
482 POSTING_READ(D_STATE);
483
484 if (IS_I830(dev) || IS_845G(dev)) {
485 I915_WRITE(DEBUG_RESET_I830,
486 DEBUG_RESET_DISPLAY |
487 DEBUG_RESET_RENDER |
488 DEBUG_RESET_FULL);
489 POSTING_READ(DEBUG_RESET_I830);
490 msleep(1);
491
492 I915_WRITE(DEBUG_RESET_I830, 0);
493 POSTING_READ(DEBUG_RESET_I830);
494 }
495
496 msleep(1);
497
498 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
499 POSTING_READ(D_STATE);
500
501 return 0;
502}
503
504static int i965_reset_complete(struct drm_device *dev)
505{
506 u8 gdrst;
507 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
508 return gdrst & 0x1;
509}
510
511static int i965_do_reset(struct drm_device *dev, u8 flags)
512{
513 u8 gdrst;
514
515 /*
516 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
517 * well as the reset bit (GR/bit 0). Setting the GR bit
518 * triggers the reset; when done, the hardware will clear it.
519 */
520 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
521 pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
522
523 return wait_for(i965_reset_complete(dev), 500);
524}
525
526static int ironlake_do_reset(struct drm_device *dev, u8 flags)
527{
528 struct drm_i915_private *dev_priv = dev->dev_private;
529 u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
530 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
531 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
532}
533
534static int gen6_do_reset(struct drm_device *dev, u8 flags)
535{
536 struct drm_i915_private *dev_priv = dev->dev_private;
537
538 I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
539 return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
310} 540}
311 541
312/** 542/**
@@ -325,54 +555,50 @@ int i915_resume(struct drm_device *dev)
325 * - re-init interrupt state 555 * - re-init interrupt state
326 * - re-init display 556 * - re-init display
327 */ 557 */
328int i965_reset(struct drm_device *dev, u8 flags) 558int i915_reset(struct drm_device *dev, u8 flags)
329{ 559{
330 drm_i915_private_t *dev_priv = dev->dev_private; 560 drm_i915_private_t *dev_priv = dev->dev_private;
331 unsigned long timeout;
332 u8 gdrst;
333 /* 561 /*
334 * We really should only reset the display subsystem if we actually 562 * We really should only reset the display subsystem if we actually
335 * need to 563 * need to
336 */ 564 */
337 bool need_display = true; 565 bool need_display = true;
566 int ret;
338 567
339 mutex_lock(&dev->struct_mutex); 568 if (!i915_try_reset)
569 return 0;
340 570
341 /* 571 if (!mutex_trylock(&dev->struct_mutex))
342 * Clear request list 572 return -EBUSY;
343 */ 573
344 i915_gem_retire_requests(dev); 574 i915_gem_reset(dev);
345 575
346 if (need_display) 576 ret = -ENODEV;
347 i915_save_display(dev); 577 if (get_seconds() - dev_priv->last_gpu_reset < 5) {
348 578 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
349 if (IS_I965G(dev) || IS_G4X(dev)) { 579 } else switch (INTEL_INFO(dev)->gen) {
350 /* 580 case 7:
351 * Set the domains we want to reset, then the reset bit (bit 0). 581 case 6:
352 * Clear the reset bit after a while and wait for hardware status 582 ret = gen6_do_reset(dev, flags);
353 * bit (bit 1) to be set 583 /* If reset with a user forcewake, try to restore */
354 */ 584 if (atomic_read(&dev_priv->forcewake_count))
355 pci_read_config_byte(dev->pdev, GDRST, &gdrst); 585 __gen6_gt_force_wake_get(dev_priv);
356 pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0)); 586 break;
357 udelay(50); 587 case 5:
358 pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe); 588 ret = ironlake_do_reset(dev, flags);
359 589 break;
360 /* ...we don't want to loop forever though, 500ms should be plenty */ 590 case 4:
361 timeout = jiffies + msecs_to_jiffies(500); 591 ret = i965_do_reset(dev, flags);
362 do { 592 break;
363 udelay(100); 593 case 2:
364 pci_read_config_byte(dev->pdev, GDRST, &gdrst); 594 ret = i8xx_do_reset(dev, flags);
365 } while ((gdrst & 0x1) && time_after(timeout, jiffies)); 595 break;
366 596 }
367 if (gdrst & 0x1) { 597 dev_priv->last_gpu_reset = get_seconds();
368 WARN(true, "i915: Failed to reset chip\n"); 598 if (ret) {
369 mutex_unlock(&dev->struct_mutex); 599 DRM_ERROR("Failed to reset chip.\n");
370 return -EIO;
371 }
372 } else {
373 DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
374 mutex_unlock(&dev->struct_mutex); 600 mutex_unlock(&dev->struct_mutex);
375 return -ENODEV; 601 return ret;
376 } 602 }
377 603
378 /* Ok, now get things going again... */ 604 /* Ok, now get things going again... */
@@ -391,22 +617,34 @@ int i965_reset(struct drm_device *dev, u8 flags)
391 */ 617 */
392 if (drm_core_check_feature(dev, DRIVER_MODESET) || 618 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
393 !dev_priv->mm.suspended) { 619 !dev_priv->mm.suspended) {
394 struct intel_ring_buffer *ring = &dev_priv->render_ring;
395 dev_priv->mm.suspended = 0; 620 dev_priv->mm.suspended = 0;
396 ring->init(dev, ring); 621
622 dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
623 if (HAS_BSD(dev))
624 dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
625 if (HAS_BLT(dev))
626 dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
627
397 mutex_unlock(&dev->struct_mutex); 628 mutex_unlock(&dev->struct_mutex);
398 drm_irq_uninstall(dev); 629 drm_irq_uninstall(dev);
630 drm_mode_config_reset(dev);
399 drm_irq_install(dev); 631 drm_irq_install(dev);
400 mutex_lock(&dev->struct_mutex); 632 mutex_lock(&dev->struct_mutex);
401 } 633 }
402 634
635 mutex_unlock(&dev->struct_mutex);
636
403 /* 637 /*
404 * Display needs restore too... 638 * Perform a full modeset as on later generations, e.g. Ironlake, we may
639 * need to retrain the display link and cannot just restore the register
640 * values.
405 */ 641 */
406 if (need_display) 642 if (need_display) {
407 i915_restore_display(dev); 643 mutex_lock(&dev->mode_config.mutex);
644 drm_helper_resume_force_mode(dev);
645 mutex_unlock(&dev->mode_config.mutex);
646 }
408 647
409 mutex_unlock(&dev->struct_mutex);
410 return 0; 648 return 0;
411} 649}
412 650
@@ -414,6 +652,14 @@ int i965_reset(struct drm_device *dev, u8 flags)
414static int __devinit 652static int __devinit
415i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 653i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
416{ 654{
655 /* Only bind to function 0 of the device. Early generations
656 * used function 1 as a placeholder for multi-head. This causes
657 * us confusion instead, especially on the systems where both
658 * functions have the same PCI-ID!
659 */
660 if (PCI_FUNC(pdev->devfn))
661 return -ENODEV;
662
417 return drm_get_pci_dev(pdev, ent, &driver); 663 return drm_get_pci_dev(pdev, ent, &driver);
418} 664}
419 665
@@ -436,6 +682,9 @@ static int i915_pm_suspend(struct device *dev)
436 return -ENODEV; 682 return -ENODEV;
437 } 683 }
438 684
685 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
686 return 0;
687
439 error = i915_drm_freeze(drm_dev); 688 error = i915_drm_freeze(drm_dev);
440 if (error) 689 if (error)
441 return error; 690 return error;
@@ -517,15 +766,7 @@ static struct drm_driver driver = {
517 .resume = i915_resume, 766 .resume = i915_resume,
518 767
519 .device_is_agp = i915_driver_device_is_agp, 768 .device_is_agp = i915_driver_device_is_agp,
520 .enable_vblank = i915_enable_vblank,
521 .disable_vblank = i915_disable_vblank,
522 .irq_preinstall = i915_driver_irq_preinstall,
523 .irq_postinstall = i915_driver_irq_postinstall,
524 .irq_uninstall = i915_driver_irq_uninstall,
525 .irq_handler = i915_driver_irq_handler,
526 .reclaim_buffers = drm_core_reclaim_buffers, 769 .reclaim_buffers = drm_core_reclaim_buffers,
527 .get_map_ofs = drm_core_get_map_ofs,
528 .get_reg_ofs = drm_core_get_reg_ofs,
529 .master_create = i915_master_create, 770 .master_create = i915_master_create,
530 .master_destroy = i915_master_destroy, 771 .master_destroy = i915_master_destroy,
531#if defined(CONFIG_DEBUG_FS) 772#if defined(CONFIG_DEBUG_FS)
@@ -535,6 +776,9 @@ static struct drm_driver driver = {
535 .gem_init_object = i915_gem_init_object, 776 .gem_init_object = i915_gem_init_object,
536 .gem_free_object = i915_gem_free_object, 777 .gem_free_object = i915_gem_free_object,
537 .gem_vm_ops = &i915_gem_vm_ops, 778 .gem_vm_ops = &i915_gem_vm_ops,
779 .dumb_create = i915_gem_dumb_create,
780 .dumb_map_offset = i915_gem_mmap_gtt,
781 .dumb_destroy = i915_gem_dumb_destroy,
538 .ioctls = i915_ioctls, 782 .ioctls = i915_ioctls,
539 .fops = { 783 .fops = {
540 .owner = THIS_MODULE, 784 .owner = THIS_MODULE,
@@ -548,14 +792,7 @@ static struct drm_driver driver = {
548#ifdef CONFIG_COMPAT 792#ifdef CONFIG_COMPAT
549 .compat_ioctl = i915_compat_ioctl, 793 .compat_ioctl = i915_compat_ioctl,
550#endif 794#endif
551 }, 795 .llseek = noop_llseek,
552
553 .pci_driver = {
554 .name = DRIVER_NAME,
555 .id_table = pciidlist,
556 .probe = i915_pci_probe,
557 .remove = i915_pci_remove,
558 .driver.pm = &i915_pm_ops,
559 }, 796 },
560 797
561 .name = DRIVER_NAME, 798 .name = DRIVER_NAME,
@@ -566,6 +803,14 @@ static struct drm_driver driver = {
566 .patchlevel = DRIVER_PATCHLEVEL, 803 .patchlevel = DRIVER_PATCHLEVEL,
567}; 804};
568 805
806static struct pci_driver i915_pci_driver = {
807 .name = DRIVER_NAME,
808 .id_table = pciidlist,
809 .probe = i915_pci_probe,
810 .remove = i915_pci_remove,
811 .driver.pm = &i915_pm_ops,
812};
813
569static int __init i915_init(void) 814static int __init i915_init(void)
570{ 815{
571 if (!intel_agp_enabled) { 816 if (!intel_agp_enabled) {
@@ -575,8 +820,6 @@ static int __init i915_init(void)
575 820
576 driver.num_ioctls = i915_max_ioctl; 821 driver.num_ioctls = i915_max_ioctl;
577 822
578 i915_gem_shrinker_init();
579
580 /* 823 /*
581 * If CONFIG_DRM_I915_KMS is set, default to KMS unless 824 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
582 * explicitly disabled with the module pararmeter. 825 * explicitly disabled with the module pararmeter.
@@ -598,18 +841,15 @@ static int __init i915_init(void)
598 driver.driver_features &= ~DRIVER_MODESET; 841 driver.driver_features &= ~DRIVER_MODESET;
599#endif 842#endif
600 843
601 if (!(driver.driver_features & DRIVER_MODESET)) { 844 if (!(driver.driver_features & DRIVER_MODESET))
602 driver.suspend = i915_suspend; 845 driver.get_vblank_timestamp = NULL;
603 driver.resume = i915_resume;
604 }
605 846
606 return drm_init(&driver); 847 return drm_pci_init(&driver, &i915_pci_driver);
607} 848}
608 849
609static void __exit i915_exit(void) 850static void __exit i915_exit(void)
610{ 851{
611 i915_gem_shrinker_exit(); 852 drm_pci_exit(&driver, &i915_pci_driver);
612 drm_exit(&driver);
613} 853}
614 854
615module_init(i915_init); 855module_init(i915_init);