aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/drm_crtc.c9
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c395
-rw-r--r--drivers/gpu/drm/drm_edid.c789
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c913
-rw-r--r--drivers/gpu/drm/drm_gem.c49
-rw-r--r--drivers/gpu/drm/drm_modes.c105
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/dvo.h10
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c46
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c44
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c38
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c32
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c28
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c29
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h33
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c127
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c22
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h143
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c41
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h86
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c88
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1062
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c254
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h31
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c103
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c204
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c70
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c111
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c21
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c8
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c933
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c185
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c114
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c198
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c16
-rw-r--r--drivers/gpu/drm/radeon/atombios.h76
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1494
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h556
-rw-r--r--drivers/gpu/drm/radeon/r100.c253
-rw-r--r--drivers/gpu/drm/radeon/r100d.h128
-rw-r--r--drivers/gpu/drm/radeon/r300.c148
-rw-r--r--drivers/gpu/drm/radeon/r300d.h47
-rw-r--r--drivers/gpu/drm/radeon/r420.c4
-rw-r--r--drivers/gpu/drm/radeon/r520.c5
-rw-r--r--drivers/gpu/drm/radeon/r600.c192
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c58
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c65
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h57
-rw-r--r--drivers/gpu/drm/radeon/radeon.h128
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h26
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c352
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c107
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h24
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c116
-rw-r--r--drivers/gpu/drm/radeon/rs400.c6
-rw-r--r--drivers/gpu/drm/radeon/rs600.c77
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h46
-rw-r--r--drivers/gpu/drm/radeon/rs690.c6
-rw-r--r--drivers/gpu/drm/radeon/rv515.c96
-rw-r--r--drivers/gpu/drm/radeon/rv515d.h46
-rw-r--r--drivers/gpu/drm/radeon/rv770.c21
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c84
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c122
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c41
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c845
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c50
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
102 files changed, 8212 insertions, 3931 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 305c59003963..be5aa7d5206b 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -23,6 +23,7 @@ config DRM_KMS_HELPER
23 depends on DRM 23 depends on DRM
24 select FB 24 select FB
25 select FRAMEBUFFER_CONSOLE if !EMBEDDED 25 select FRAMEBUFFER_CONSOLE if !EMBEDDED
26 select SLOW_WORK
26 help 27 help
27 FB and CRTC helpers for KMS drivers. 28 FB and CRTC helpers for KMS drivers.
28 29
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 61b9bcfdf040..994d23beeb1d 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -34,6 +34,7 @@
34#include "drm.h" 34#include "drm.h"
35#include "drmP.h" 35#include "drmP.h"
36#include "drm_crtc.h" 36#include "drm_crtc.h"
37#include "drm_edid.h"
37 38
38struct drm_prop_enum_list { 39struct drm_prop_enum_list {
39 int type; 40 int type;
@@ -494,7 +495,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
494 list_for_each_entry_safe(mode, t, &connector->user_modes, head) 495 list_for_each_entry_safe(mode, t, &connector->user_modes, head)
495 drm_mode_remove(connector, mode); 496 drm_mode_remove(connector, mode);
496 497
497 kfree(connector->fb_helper_private);
498 mutex_lock(&dev->mode_config.mutex); 498 mutex_lock(&dev->mode_config.mutex);
499 drm_mode_object_put(dev, &connector->base); 499 drm_mode_object_put(dev, &connector->base);
500 list_del(&connector->head); 500 list_del(&connector->head);
@@ -858,7 +858,6 @@ void drm_mode_config_init(struct drm_device *dev)
858 mutex_init(&dev->mode_config.mutex); 858 mutex_init(&dev->mode_config.mutex);
859 mutex_init(&dev->mode_config.idr_mutex); 859 mutex_init(&dev->mode_config.idr_mutex);
860 INIT_LIST_HEAD(&dev->mode_config.fb_list); 860 INIT_LIST_HEAD(&dev->mode_config.fb_list);
861 INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
862 INIT_LIST_HEAD(&dev->mode_config.crtc_list); 861 INIT_LIST_HEAD(&dev->mode_config.crtc_list);
863 INIT_LIST_HEAD(&dev->mode_config.connector_list); 862 INIT_LIST_HEAD(&dev->mode_config.connector_list);
864 INIT_LIST_HEAD(&dev->mode_config.encoder_list); 863 INIT_LIST_HEAD(&dev->mode_config.encoder_list);
@@ -2350,7 +2349,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
2350 struct edid *edid) 2349 struct edid *edid)
2351{ 2350{
2352 struct drm_device *dev = connector->dev; 2351 struct drm_device *dev = connector->dev;
2353 int ret = 0; 2352 int ret = 0, size;
2354 2353
2355 if (connector->edid_blob_ptr) 2354 if (connector->edid_blob_ptr)
2356 drm_property_destroy_blob(dev, connector->edid_blob_ptr); 2355 drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@@ -2362,7 +2361,9 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
2362 return ret; 2361 return ret;
2363 } 2362 }
2364 2363
2365 connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid); 2364 size = EDID_LENGTH * (1 + edid->extensions);
2365 connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
2366 size, edid);
2366 2367
2367 ret = drm_connector_property_set_value(connector, 2368 ret = drm_connector_property_set_value(connector,
2368 dev->mode_config.edid_property, 2369 dev->mode_config.edid_property,
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 51103aa469f8..b142ac260d97 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -55,7 +55,7 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
55} 55}
56 56
57/** 57/**
58 * drm_helper_probe_connector_modes - get complete set of display modes 58 * drm_helper_probe_single_connector_modes - get complete set of display modes
59 * @dev: DRM device 59 * @dev: DRM device
60 * @maxX: max width for modes 60 * @maxX: max width for modes
61 * @maxY: max height for modes 61 * @maxY: max height for modes
@@ -154,21 +154,6 @@ prune:
154} 154}
155EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); 155EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
156 156
157int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
158 uint32_t maxY)
159{
160 struct drm_connector *connector;
161 int count = 0;
162
163 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
164 count += drm_helper_probe_single_connector_modes(connector,
165 maxX, maxY);
166 }
167
168 return count;
169}
170EXPORT_SYMBOL(drm_helper_probe_connector_modes);
171
172/** 157/**
173 * drm_helper_encoder_in_use - check if a given encoder is in use 158 * drm_helper_encoder_in_use - check if a given encoder is in use
174 * @encoder: encoder to check 159 * @encoder: encoder to check
@@ -263,302 +248,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
263} 248}
264EXPORT_SYMBOL(drm_helper_disable_unused_functions); 249EXPORT_SYMBOL(drm_helper_disable_unused_functions);
265 250
266static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height)
267{
268 struct drm_display_mode *mode;
269
270 list_for_each_entry(mode, &connector->modes, head) {
271 if (drm_mode_width(mode) > width ||
272 drm_mode_height(mode) > height)
273 continue;
274 if (mode->type & DRM_MODE_TYPE_PREFERRED)
275 return mode;
276 }
277 return NULL;
278}
279
280static bool drm_has_cmdline_mode(struct drm_connector *connector)
281{
282 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
283 struct drm_fb_helper_cmdline_mode *cmdline_mode;
284
285 if (!fb_help_conn)
286 return false;
287
288 cmdline_mode = &fb_help_conn->cmdline_mode;
289 return cmdline_mode->specified;
290}
291
292static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height)
293{
294 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
295 struct drm_fb_helper_cmdline_mode *cmdline_mode;
296 struct drm_display_mode *mode = NULL;
297
298 if (!fb_help_conn)
299 return mode;
300
301 cmdline_mode = &fb_help_conn->cmdline_mode;
302 if (cmdline_mode->specified == false)
303 return mode;
304
305 /* attempt to find a matching mode in the list of modes
306 * we have gotten so far, if not add a CVT mode that conforms
307 */
308 if (cmdline_mode->rb || cmdline_mode->margins)
309 goto create_mode;
310
311 list_for_each_entry(mode, &connector->modes, head) {
312 /* check width/height */
313 if (mode->hdisplay != cmdline_mode->xres ||
314 mode->vdisplay != cmdline_mode->yres)
315 continue;
316
317 if (cmdline_mode->refresh_specified) {
318 if (mode->vrefresh != cmdline_mode->refresh)
319 continue;
320 }
321
322 if (cmdline_mode->interlace) {
323 if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
324 continue;
325 }
326 return mode;
327 }
328
329create_mode:
330 mode = drm_cvt_mode(connector->dev, cmdline_mode->xres,
331 cmdline_mode->yres,
332 cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
333 cmdline_mode->rb, cmdline_mode->interlace,
334 cmdline_mode->margins);
335 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
336 list_add(&mode->head, &connector->modes);
337 return mode;
338}
339
340static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
341{
342 bool enable;
343
344 if (strict) {
345 enable = connector->status == connector_status_connected;
346 } else {
347 enable = connector->status != connector_status_disconnected;
348 }
349 return enable;
350}
351
352static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
353{
354 bool any_enabled = false;
355 struct drm_connector *connector;
356 int i = 0;
357
358 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
359 enabled[i] = drm_connector_enabled(connector, true);
360 DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
361 enabled[i] ? "yes" : "no");
362 any_enabled |= enabled[i];
363 i++;
364 }
365
366 if (any_enabled)
367 return;
368
369 i = 0;
370 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
371 enabled[i] = drm_connector_enabled(connector, false);
372 i++;
373 }
374}
375
376static bool drm_target_preferred(struct drm_device *dev,
377 struct drm_display_mode **modes,
378 bool *enabled, int width, int height)
379{
380 struct drm_connector *connector;
381 int i = 0;
382
383 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
384
385 if (enabled[i] == false) {
386 i++;
387 continue;
388 }
389
390 DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
391 connector->base.id);
392
393 /* got for command line mode first */
394 modes[i] = drm_pick_cmdline_mode(connector, width, height);
395 if (!modes[i]) {
396 DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
397 connector->base.id);
398 modes[i] = drm_has_preferred_mode(connector, width, height);
399 }
400 /* No preferred modes, pick one off the list */
401 if (!modes[i] && !list_empty(&connector->modes)) {
402 list_for_each_entry(modes[i], &connector->modes, head)
403 break;
404 }
405 DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
406 "none");
407 i++;
408 }
409 return true;
410}
411
412static int drm_pick_crtcs(struct drm_device *dev,
413 struct drm_crtc **best_crtcs,
414 struct drm_display_mode **modes,
415 int n, int width, int height)
416{
417 int c, o;
418 struct drm_connector *connector;
419 struct drm_connector_helper_funcs *connector_funcs;
420 struct drm_encoder *encoder;
421 struct drm_crtc *best_crtc;
422 int my_score, best_score, score;
423 struct drm_crtc **crtcs, *crtc;
424
425 if (n == dev->mode_config.num_connector)
426 return 0;
427 c = 0;
428 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
429 if (c == n)
430 break;
431 c++;
432 }
433
434 best_crtcs[n] = NULL;
435 best_crtc = NULL;
436 best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height);
437 if (modes[n] == NULL)
438 return best_score;
439
440 crtcs = kmalloc(dev->mode_config.num_connector *
441 sizeof(struct drm_crtc *), GFP_KERNEL);
442 if (!crtcs)
443 return best_score;
444
445 my_score = 1;
446 if (connector->status == connector_status_connected)
447 my_score++;
448 if (drm_has_cmdline_mode(connector))
449 my_score++;
450 if (drm_has_preferred_mode(connector, width, height))
451 my_score++;
452
453 connector_funcs = connector->helper_private;
454 encoder = connector_funcs->best_encoder(connector);
455 if (!encoder)
456 goto out;
457
458 connector->encoder = encoder;
459
460 /* select a crtc for this connector and then attempt to configure
461 remaining connectors */
462 c = 0;
463 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
464
465 if ((encoder->possible_crtcs & (1 << c)) == 0) {
466 c++;
467 continue;
468 }
469
470 for (o = 0; o < n; o++)
471 if (best_crtcs[o] == crtc)
472 break;
473
474 if (o < n) {
475 /* ignore cloning for now */
476 c++;
477 continue;
478 }
479
480 crtcs[n] = crtc;
481 memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *));
482 score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1,
483 width, height);
484 if (score > best_score) {
485 best_crtc = crtc;
486 best_score = score;
487 memcpy(best_crtcs, crtcs,
488 dev->mode_config.num_connector *
489 sizeof(struct drm_crtc *));
490 }
491 c++;
492 }
493out:
494 kfree(crtcs);
495 return best_score;
496}
497
498static void drm_setup_crtcs(struct drm_device *dev)
499{
500 struct drm_crtc **crtcs;
501 struct drm_display_mode **modes;
502 struct drm_encoder *encoder;
503 struct drm_connector *connector;
504 bool *enabled;
505 int width, height;
506 int i, ret;
507
508 DRM_DEBUG_KMS("\n");
509
510 width = dev->mode_config.max_width;
511 height = dev->mode_config.max_height;
512
513 /* clean out all the encoder/crtc combos */
514 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
515 encoder->crtc = NULL;
516 }
517
518 crtcs = kcalloc(dev->mode_config.num_connector,
519 sizeof(struct drm_crtc *), GFP_KERNEL);
520 modes = kcalloc(dev->mode_config.num_connector,
521 sizeof(struct drm_display_mode *), GFP_KERNEL);
522 enabled = kcalloc(dev->mode_config.num_connector,
523 sizeof(bool), GFP_KERNEL);
524
525 drm_enable_connectors(dev, enabled);
526
527 ret = drm_target_preferred(dev, modes, enabled, width, height);
528 if (!ret)
529 DRM_ERROR("Unable to find initial modes\n");
530
531 DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
532
533 drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
534
535 i = 0;
536 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
537 struct drm_display_mode *mode = modes[i];
538 struct drm_crtc *crtc = crtcs[i];
539
540 if (connector->encoder == NULL) {
541 i++;
542 continue;
543 }
544
545 if (mode && crtc) {
546 DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
547 mode->name, crtc->base.id);
548 crtc->desired_mode = mode;
549 connector->encoder->crtc = crtc;
550 } else {
551 connector->encoder->crtc = NULL;
552 connector->encoder = NULL;
553 }
554 i++;
555 }
556
557 kfree(crtcs);
558 kfree(modes);
559 kfree(enabled);
560}
561
562/** 251/**
563 * drm_encoder_crtc_ok - can a given crtc drive a given encoder? 252 * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
564 * @encoder: encoder to test 253 * @encoder: encoder to test
@@ -936,10 +625,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
936 ret = -EINVAL; 625 ret = -EINVAL;
937 goto fail; 626 goto fail;
938 } 627 }
939 /* TODO are these needed? */
940 set->crtc->desired_x = set->x;
941 set->crtc->desired_y = set->y;
942 set->crtc->desired_mode = set->mode;
943 } 628 }
944 drm_helper_disable_unused_functions(dev); 629 drm_helper_disable_unused_functions(dev);
945 } else if (fb_changed) { 630 } else if (fb_changed) {
@@ -984,63 +669,6 @@ fail:
984} 669}
985EXPORT_SYMBOL(drm_crtc_helper_set_config); 670EXPORT_SYMBOL(drm_crtc_helper_set_config);
986 671
987bool drm_helper_plugged_event(struct drm_device *dev)
988{
989 DRM_DEBUG_KMS("\n");
990
991 drm_helper_probe_connector_modes(dev, dev->mode_config.max_width,
992 dev->mode_config.max_height);
993
994 drm_setup_crtcs(dev);
995
996 /* alert the driver fb layer */
997 dev->mode_config.funcs->fb_changed(dev);
998
999 /* FIXME: send hotplug event */
1000 return true;
1001}
1002/**
1003 * drm_initial_config - setup a sane initial connector configuration
1004 * @dev: DRM device
1005 *
1006 * LOCKING:
1007 * Called at init time, must take mode config lock.
1008 *
1009 * Scan the CRTCs and connectors and try to put together an initial setup.
1010 * At the moment, this is a cloned configuration across all heads with
1011 * a new framebuffer object as the backing store.
1012 *
1013 * RETURNS:
1014 * Zero if everything went ok, nonzero otherwise.
1015 */
1016bool drm_helper_initial_config(struct drm_device *dev)
1017{
1018 int count = 0;
1019
1020 /* disable all the possible outputs/crtcs before entering KMS mode */
1021 drm_helper_disable_unused_functions(dev);
1022
1023 drm_fb_helper_parse_command_line(dev);
1024
1025 count = drm_helper_probe_connector_modes(dev,
1026 dev->mode_config.max_width,
1027 dev->mode_config.max_height);
1028
1029 /*
1030 * we shouldn't end up with no modes here.
1031 */
1032 if (count == 0)
1033 printk(KERN_INFO "No connectors reported connected with modes\n");
1034
1035 drm_setup_crtcs(dev);
1036
1037 /* alert the driver fb layer */
1038 dev->mode_config.funcs->fb_changed(dev);
1039
1040 return 0;
1041}
1042EXPORT_SYMBOL(drm_helper_initial_config);
1043
1044static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder) 672static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
1045{ 673{
1046 int dpms = DRM_MODE_DPMS_OFF; 674 int dpms = DRM_MODE_DPMS_OFF;
@@ -1123,27 +751,6 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
1123} 751}
1124EXPORT_SYMBOL(drm_helper_connector_dpms); 752EXPORT_SYMBOL(drm_helper_connector_dpms);
1125 753
1126/**
1127 * drm_hotplug_stage_two
1128 * @dev DRM device
1129 * @connector hotpluged connector
1130 *
1131 * LOCKING.
1132 * Caller must hold mode config lock, function might grab struct lock.
1133 *
1134 * Stage two of a hotplug.
1135 *
1136 * RETURNS:
1137 * Zero on success, errno on failure.
1138 */
1139int drm_helper_hotplug_stage_two(struct drm_device *dev)
1140{
1141 drm_helper_plugged_event(dev);
1142
1143 return 0;
1144}
1145EXPORT_SYMBOL(drm_helper_hotplug_stage_two);
1146
1147int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, 754int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
1148 struct drm_mode_fb_cmd *mode_cmd) 755 struct drm_mode_fb_cmd *mode_cmd)
1149{ 756{
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 18f41d7061f0..71886749fa2c 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2,6 +2,7 @@
2 * Copyright (c) 2006 Luc Verhaegen (quirks list) 2 * Copyright (c) 2006 Luc Verhaegen (quirks list)
3 * Copyright (c) 2007-2008 Intel Corporation 3 * Copyright (c) 2007-2008 Intel Corporation
4 * Jesse Barnes <jesse.barnes@intel.com> 4 * Jesse Barnes <jesse.barnes@intel.com>
5 * Copyright 2010 Red Hat, Inc.
5 * 6 *
6 * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from 7 * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
7 * FB layer. 8 * FB layer.
@@ -33,10 +34,9 @@
33#include "drmP.h" 34#include "drmP.h"
34#include "drm_edid.h" 35#include "drm_edid.h"
35 36
36/* 37#define EDID_EST_TIMINGS 16
37 * TODO: 38#define EDID_STD_TIMINGS 8
38 * - support EDID 1.4 (incl. CE blocks) 39#define EDID_DETAILED_TIMINGS 4
39 */
40 40
41/* 41/*
42 * EDID blocks out in the wild have a variety of bugs, try to collect 42 * EDID blocks out in the wild have a variety of bugs, try to collect
@@ -65,7 +65,8 @@
65 65
66#define LEVEL_DMT 0 66#define LEVEL_DMT 0
67#define LEVEL_GTF 1 67#define LEVEL_GTF 1
68#define LEVEL_CVT 2 68#define LEVEL_GTF2 2
69#define LEVEL_CVT 3
69 70
70static struct edid_quirk { 71static struct edid_quirk {
71 char *vendor; 72 char *vendor;
@@ -109,36 +110,38 @@ static struct edid_quirk {
109 { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 }, 110 { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
110}; 111};
111 112
113/*** DDC fetch and block validation ***/
112 114
113/* Valid EDID header has these bytes */
114static const u8 edid_header[] = { 115static const u8 edid_header[] = {
115 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 116 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
116}; 117};
117 118
118/** 119/*
119 * drm_edid_is_valid - sanity check EDID data 120 * Sanity check the EDID block (base or extension). Return 0 if the block
120 * @edid: EDID data 121 * doesn't check out, or 1 if it's valid.
121 *
122 * Sanity check the EDID block by looking at the header, the version number
123 * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
124 * valid.
125 */ 122 */
126bool drm_edid_is_valid(struct edid *edid) 123static bool
124drm_edid_block_valid(u8 *raw_edid)
127{ 125{
128 int i, score = 0; 126 int i;
129 u8 csum = 0; 127 u8 csum = 0;
130 u8 *raw_edid = (u8 *)edid; 128 struct edid *edid = (struct edid *)raw_edid;
131 129
132 for (i = 0; i < sizeof(edid_header); i++) 130 if (raw_edid[0] == 0x00) {
133 if (raw_edid[i] == edid_header[i]) 131 int score = 0;
134 score++;
135 132
136 if (score == 8) ; 133 for (i = 0; i < sizeof(edid_header); i++)
137 else if (score >= 6) { 134 if (raw_edid[i] == edid_header[i])
138 DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); 135 score++;
139 memcpy(raw_edid, edid_header, sizeof(edid_header)); 136
140 } else 137 if (score == 8) ;
141 goto bad; 138 else if (score >= 6) {
139 DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
140 memcpy(raw_edid, edid_header, sizeof(edid_header));
141 } else {
142 goto bad;
143 }
144 }
142 145
143 for (i = 0; i < EDID_LENGTH; i++) 146 for (i = 0; i < EDID_LENGTH; i++)
144 csum += raw_edid[i]; 147 csum += raw_edid[i];
@@ -147,13 +150,21 @@ bool drm_edid_is_valid(struct edid *edid)
147 goto bad; 150 goto bad;
148 } 151 }
149 152
150 if (edid->version != 1) { 153 /* per-block-type checks */
151 DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); 154 switch (raw_edid[0]) {
152 goto bad; 155 case 0: /* base */
153 } 156 if (edid->version != 1) {
157 DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
158 goto bad;
159 }
160
161 if (edid->revision > 4)
162 DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
163 break;
154 164
155 if (edid->revision > 4) 165 default:
156 DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); 166 break;
167 }
157 168
158 return 1; 169 return 1;
159 170
@@ -165,8 +176,158 @@ bad:
165 } 176 }
166 return 0; 177 return 0;
167} 178}
179
180/**
181 * drm_edid_is_valid - sanity check EDID data
182 * @edid: EDID data
183 *
184 * Sanity-check an entire EDID record (including extensions)
185 */
186bool drm_edid_is_valid(struct edid *edid)
187{
188 int i;
189 u8 *raw = (u8 *)edid;
190
191 if (!edid)
192 return false;
193
194 for (i = 0; i <= edid->extensions; i++)
195 if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
196 return false;
197
198 return true;
199}
168EXPORT_SYMBOL(drm_edid_is_valid); 200EXPORT_SYMBOL(drm_edid_is_valid);
169 201
202#define DDC_ADDR 0x50
203#define DDC_SEGMENT_ADDR 0x30
204/**
205 * Get EDID information via I2C.
206 *
207 * \param adapter : i2c device adaptor
208 * \param buf : EDID data buffer to be filled
209 * \param len : EDID data buffer length
210 * \return 0 on success or -1 on failure.
211 *
212 * Try to fetch EDID information by calling i2c driver function.
213 */
214static int
215drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
216 int block, int len)
217{
218 unsigned char start = block * EDID_LENGTH;
219 struct i2c_msg msgs[] = {
220 {
221 .addr = DDC_ADDR,
222 .flags = 0,
223 .len = 1,
224 .buf = &start,
225 }, {
226 .addr = DDC_ADDR,
227 .flags = I2C_M_RD,
228 .len = len,
229 .buf = buf + start,
230 }
231 };
232
233 if (i2c_transfer(adapter, msgs, 2) == 2)
234 return 0;
235
236 return -1;
237}
238
239static u8 *
240drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
241{
242 int i, j = 0;
243 u8 *block, *new;
244
245 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
246 return NULL;
247
248 /* base block fetch */
249 for (i = 0; i < 4; i++) {
250 if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
251 goto out;
252 if (drm_edid_block_valid(block))
253 break;
254 }
255 if (i == 4)
256 goto carp;
257
258 /* if there's no extensions, we're done */
259 if (block[0x7e] == 0)
260 return block;
261
262 new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
263 if (!new)
264 goto out;
265 block = new;
266
267 for (j = 1; j <= block[0x7e]; j++) {
268 for (i = 0; i < 4; i++) {
269 if (drm_do_probe_ddc_edid(adapter, block, j,
270 EDID_LENGTH))
271 goto out;
272 if (drm_edid_block_valid(block + j * EDID_LENGTH))
273 break;
274 }
275 if (i == 4)
276 goto carp;
277 }
278
279 return block;
280
281carp:
282 dev_warn(&connector->dev->pdev->dev, "%s: EDID block %d invalid.\n",
283 drm_get_connector_name(connector), j);
284
285out:
286 kfree(block);
287 return NULL;
288}
289
290/**
291 * Probe DDC presence.
292 *
293 * \param adapter : i2c device adaptor
294 * \return 1 on success
295 */
296static bool
297drm_probe_ddc(struct i2c_adapter *adapter)
298{
299 unsigned char out;
300
301 return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
302}
303
304/**
305 * drm_get_edid - get EDID data, if available
306 * @connector: connector we're probing
307 * @adapter: i2c adapter to use for DDC
308 *
309 * Poke the given i2c channel to grab EDID data if possible. If found,
310 * attach it to the connector.
311 *
312 * Return edid data or NULL if we couldn't find any.
313 */
314struct edid *drm_get_edid(struct drm_connector *connector,
315 struct i2c_adapter *adapter)
316{
317 struct edid *edid = NULL;
318
319 if (drm_probe_ddc(adapter))
320 edid = (struct edid *)drm_do_get_edid(connector, adapter);
321
322 connector->display_info.raw_edid = (char *)edid;
323
324 return edid;
325
326}
327EXPORT_SYMBOL(drm_get_edid);
328
329/*** EDID parsing ***/
330
170/** 331/**
171 * edid_vendor - match a string against EDID's obfuscated vendor field 332 * edid_vendor - match a string against EDID's obfuscated vendor field
172 * @edid: EDID to match 333 * @edid: EDID to match
@@ -517,6 +678,110 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
517 return mode; 678 return mode;
518} 679}
519 680
681typedef void detailed_cb(struct detailed_timing *timing, void *closure);
682
683static void
684drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
685{
686 int i;
687 struct edid *edid = (struct edid *)raw_edid;
688
689 if (edid == NULL)
690 return;
691
692 for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
693 cb(&(edid->detailed_timings[i]), closure);
694
695 /* XXX extension block walk */
696}
697
698static void
699is_rb(struct detailed_timing *t, void *data)
700{
701 u8 *r = (u8 *)t;
702 if (r[3] == EDID_DETAIL_MONITOR_RANGE)
703 if (r[15] & 0x10)
704 *(bool *)data = true;
705}
706
707/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
708static bool
709drm_monitor_supports_rb(struct edid *edid)
710{
711 if (edid->revision >= 4) {
712 bool ret;
713 drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
714 return ret;
715 }
716
717 return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
718}
719
720static void
721find_gtf2(struct detailed_timing *t, void *data)
722{
723 u8 *r = (u8 *)t;
724 if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
725 *(u8 **)data = r;
726}
727
728/* Secondary GTF curve kicks in above some break frequency */
729static int
730drm_gtf2_hbreak(struct edid *edid)
731{
732 u8 *r = NULL;
733 drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
734 return r ? (r[12] * 2) : 0;
735}
736
737static int
738drm_gtf2_2c(struct edid *edid)
739{
740 u8 *r = NULL;
741 drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
742 return r ? r[13] : 0;
743}
744
745static int
746drm_gtf2_m(struct edid *edid)
747{
748 u8 *r = NULL;
749 drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
750 return r ? (r[15] << 8) + r[14] : 0;
751}
752
753static int
754drm_gtf2_k(struct edid *edid)
755{
756 u8 *r = NULL;
757 drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
758 return r ? r[16] : 0;
759}
760
761static int
762drm_gtf2_2j(struct edid *edid)
763{
764 u8 *r = NULL;
765 drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
766 return r ? r[17] : 0;
767}
768
769/**
770 * standard_timing_level - get std. timing level(CVT/GTF/DMT)
771 * @edid: EDID block to scan
772 */
773static int standard_timing_level(struct edid *edid)
774{
775 if (edid->revision >= 2) {
776 if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
777 return LEVEL_CVT;
778 if (drm_gtf2_hbreak(edid))
779 return LEVEL_GTF2;
780 return LEVEL_GTF;
781 }
782 return LEVEL_DMT;
783}
784
520/* 785/*
521 * 0 is reserved. The spec says 0x01 fill for unused timings. Some old 786 * 0 is reserved. The spec says 0x01 fill for unused timings. Some old
522 * monitors fill with ascii space (0x20) instead. 787 * monitors fill with ascii space (0x20) instead.
@@ -536,22 +801,20 @@ bad_std_timing(u8 a, u8 b)
536 * 801 *
537 * Take the standard timing params (in this case width, aspect, and refresh) 802 * Take the standard timing params (in this case width, aspect, and refresh)
538 * and convert them into a real mode using CVT/GTF/DMT. 803 * and convert them into a real mode using CVT/GTF/DMT.
539 *
540 * Punts for now, but should eventually use the FB layer's CVT based mode
541 * generation code.
542 */ 804 */
543struct drm_display_mode *drm_mode_std(struct drm_device *dev, 805static struct drm_display_mode *
544 struct std_timing *t, 806drm_mode_std(struct drm_connector *connector, struct edid *edid,
545 int revision, 807 struct std_timing *t, int revision)
546 int timing_level)
547{ 808{
548 struct drm_display_mode *mode; 809 struct drm_device *dev = connector->dev;
810 struct drm_display_mode *m, *mode = NULL;
549 int hsize, vsize; 811 int hsize, vsize;
550 int vrefresh_rate; 812 int vrefresh_rate;
551 unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK) 813 unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
552 >> EDID_TIMING_ASPECT_SHIFT; 814 >> EDID_TIMING_ASPECT_SHIFT;
553 unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) 815 unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
554 >> EDID_TIMING_VFREQ_SHIFT; 816 >> EDID_TIMING_VFREQ_SHIFT;
817 int timing_level = standard_timing_level(edid);
555 818
556 if (bad_std_timing(t->hsize, t->vfreq_aspect)) 819 if (bad_std_timing(t->hsize, t->vfreq_aspect))
557 return NULL; 820 return NULL;
@@ -572,16 +835,36 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
572 vsize = (hsize * 4) / 5; 835 vsize = (hsize * 4) / 5;
573 else 836 else
574 vsize = (hsize * 9) / 16; 837 vsize = (hsize * 9) / 16;
575 /* HDTV hack */ 838
576 if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) { 839 /* HDTV hack, part 1 */
577 mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, 840 if (vrefresh_rate == 60 &&
841 ((hsize == 1360 && vsize == 765) ||
842 (hsize == 1368 && vsize == 769))) {
843 hsize = 1366;
844 vsize = 768;
845 }
846
847 /*
848 * If this connector already has a mode for this size and refresh
849 * rate (because it came from detailed or CVT info), use that
850 * instead. This way we don't have to guess at interlace or
851 * reduced blanking.
852 */
853 list_for_each_entry(m, &connector->probed_modes, head)
854 if (m->hdisplay == hsize && m->vdisplay == vsize &&
855 drm_mode_vrefresh(m) == vrefresh_rate)
856 return NULL;
857
858 /* HDTV hack, part 2 */
859 if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
860 mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
578 false); 861 false);
579 mode->hdisplay = 1366; 862 mode->hdisplay = 1366;
580 mode->vsync_start = mode->vsync_start - 1; 863 mode->vsync_start = mode->vsync_start - 1;
581 mode->vsync_end = mode->vsync_end - 1; 864 mode->vsync_end = mode->vsync_end - 1;
582 return mode; 865 return mode;
583 } 866 }
584 mode = NULL; 867
585 /* check whether it can be found in default mode table */ 868 /* check whether it can be found in default mode table */
586 mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate); 869 mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate);
587 if (mode) 870 if (mode)
@@ -593,6 +876,23 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
593 case LEVEL_GTF: 876 case LEVEL_GTF:
594 mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); 877 mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
595 break; 878 break;
879 case LEVEL_GTF2:
880 /*
881 * This is potentially wrong if there's ever a monitor with
882 * more than one ranges section, each claiming a different
883 * secondary GTF curve. Please don't do that.
884 */
885 mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
886 if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
887 kfree(mode);
888 mode = drm_gtf_mode_complex(dev, hsize, vsize,
889 vrefresh_rate, 0, 0,
890 drm_gtf2_m(edid),
891 drm_gtf2_2c(edid),
892 drm_gtf2_k(edid),
893 drm_gtf2_2j(edid));
894 }
895 break;
596 case LEVEL_CVT: 896 case LEVEL_CVT:
597 mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, 897 mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
598 false); 898 false);
@@ -716,10 +1016,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
716 if (mode->vsync_end > mode->vtotal) 1016 if (mode->vsync_end > mode->vtotal)
717 mode->vtotal = mode->vsync_end + 1; 1017 mode->vtotal = mode->vsync_end + 1;
718 1018
719 drm_mode_set_name(mode);
720
721 drm_mode_do_interlace_quirk(mode, pt); 1019 drm_mode_do_interlace_quirk(mode, pt);
722 1020
1021 drm_mode_set_name(mode);
1022
723 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { 1023 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
724 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; 1024 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
725 } 1025 }
@@ -802,10 +1102,6 @@ static struct drm_display_mode edid_est_modes[] = {
802 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */ 1102 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
803}; 1103};
804 1104
805#define EDID_EST_TIMINGS 16
806#define EDID_STD_TIMINGS 8
807#define EDID_DETAILED_TIMINGS 4
808
809/** 1105/**
810 * add_established_modes - get est. modes from EDID and add them 1106 * add_established_modes - get est. modes from EDID and add them
811 * @edid: EDID block to scan 1107 * @edid: EDID block to scan
@@ -833,19 +1129,6 @@ static int add_established_modes(struct drm_connector *connector, struct edid *e
833 1129
834 return modes; 1130 return modes;
835} 1131}
836/**
837 * stanard_timing_level - get std. timing level(CVT/GTF/DMT)
838 * @edid: EDID block to scan
839 */
840static int standard_timing_level(struct edid *edid)
841{
842 if (edid->revision >= 2) {
843 if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
844 return LEVEL_CVT;
845 return LEVEL_GTF;
846 }
847 return LEVEL_DMT;
848}
849 1132
850/** 1133/**
851 * add_standard_modes - get std. modes from EDID and add them 1134 * add_standard_modes - get std. modes from EDID and add them
@@ -856,22 +1139,14 @@ static int standard_timing_level(struct edid *edid)
856 */ 1139 */
857static int add_standard_modes(struct drm_connector *connector, struct edid *edid) 1140static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
858{ 1141{
859 struct drm_device *dev = connector->dev;
860 int i, modes = 0; 1142 int i, modes = 0;
861 int timing_level;
862
863 timing_level = standard_timing_level(edid);
864 1143
865 for (i = 0; i < EDID_STD_TIMINGS; i++) { 1144 for (i = 0; i < EDID_STD_TIMINGS; i++) {
866 struct std_timing *t = &edid->standard_timings[i];
867 struct drm_display_mode *newmode; 1145 struct drm_display_mode *newmode;
868 1146
869 /* If std timings bytes are 1, 1 it's empty */ 1147 newmode = drm_mode_std(connector, edid,
870 if (t->hsize == 1 && t->vfreq_aspect == 1) 1148 &edid->standard_timings[i],
871 continue; 1149 edid->revision);
872
873 newmode = drm_mode_std(dev, &edid->standard_timings[i],
874 edid->revision, timing_level);
875 if (newmode) { 1150 if (newmode) {
876 drm_mode_probed_add(connector, newmode); 1151 drm_mode_probed_add(connector, newmode);
877 modes++; 1152 modes++;
@@ -881,36 +1156,86 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
881 return modes; 1156 return modes;
882} 1157}
883 1158
884/*
885 * XXX fix this for:
886 * - GTF secondary curve formula
887 * - EDID 1.4 range offsets
888 * - CVT extended bits
889 */
890static bool 1159static bool
891mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing) 1160mode_is_rb(struct drm_display_mode *mode)
892{ 1161{
893 struct detailed_data_monitor_range *range; 1162 return (mode->htotal - mode->hdisplay == 160) &&
894 int hsync, vrefresh; 1163 (mode->hsync_end - mode->hdisplay == 80) &&
895 1164 (mode->hsync_end - mode->hsync_start == 32) &&
896 range = &timing->data.other_data.data.range; 1165 (mode->vsync_start - mode->vdisplay == 3);
1166}
897 1167
1168static bool
1169mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
1170{
1171 int hsync, hmin, hmax;
1172
1173 hmin = t[7];
1174 if (edid->revision >= 4)
1175 hmin += ((t[4] & 0x04) ? 255 : 0);
1176 hmax = t[8];
1177 if (edid->revision >= 4)
1178 hmax += ((t[4] & 0x08) ? 255 : 0);
898 hsync = drm_mode_hsync(mode); 1179 hsync = drm_mode_hsync(mode);
899 vrefresh = drm_mode_vrefresh(mode);
900 1180
901 if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz) 1181 return (hsync <= hmax && hsync >= hmin);
1182}
1183
1184static bool
1185mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
1186{
1187 int vsync, vmin, vmax;
1188
1189 vmin = t[5];
1190 if (edid->revision >= 4)
1191 vmin += ((t[4] & 0x01) ? 255 : 0);
1192 vmax = t[6];
1193 if (edid->revision >= 4)
1194 vmax += ((t[4] & 0x02) ? 255 : 0);
1195 vsync = drm_mode_vrefresh(mode);
1196
1197 return (vsync <= vmax && vsync >= vmin);
1198}
1199
1200static u32
1201range_pixel_clock(struct edid *edid, u8 *t)
1202{
1203 /* unspecified */
1204 if (t[9] == 0 || t[9] == 255)
1205 return 0;
1206
1207 /* 1.4 with CVT support gives us real precision, yay */
1208 if (edid->revision >= 4 && t[10] == 0x04)
1209 return (t[9] * 10000) - ((t[12] >> 2) * 250);
1210
1211 /* 1.3 is pathetic, so fuzz up a bit */
1212 return t[9] * 10000 + 5001;
1213}
1214
1215static bool
1216mode_in_range(struct drm_display_mode *mode, struct edid *edid,
1217 struct detailed_timing *timing)
1218{
1219 u32 max_clock;
1220 u8 *t = (u8 *)timing;
1221
1222 if (!mode_in_hsync_range(mode, edid, t))
902 return false; 1223 return false;
903 1224
904 if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq) 1225 if (!mode_in_vsync_range(mode, edid, t))
905 return false; 1226 return false;
906 1227
907 if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) { 1228 if ((max_clock = range_pixel_clock(edid, t)))
908 /* be forgiving since it's in units of 10MHz */
909 int max_clock = range->pixel_clock_mhz * 10 + 9;
910 max_clock *= 1000;
911 if (mode->clock > max_clock) 1229 if (mode->clock > max_clock)
912 return false; 1230 return false;
913 } 1231
1232 /* 1.4 max horizontal check */
1233 if (edid->revision >= 4 && t[10] == 0x04)
1234 if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
1235 return false;
1236
1237 if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
1238 return false;
914 1239
915 return true; 1240 return true;
916} 1241}
@@ -919,15 +1244,16 @@ mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
919 * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will 1244 * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
920 * need to account for them. 1245 * need to account for them.
921 */ 1246 */
922static int drm_gtf_modes_for_range(struct drm_connector *connector, 1247static int
923 struct detailed_timing *timing) 1248drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
1249 struct detailed_timing *timing)
924{ 1250{
925 int i, modes = 0; 1251 int i, modes = 0;
926 struct drm_display_mode *newmode; 1252 struct drm_display_mode *newmode;
927 struct drm_device *dev = connector->dev; 1253 struct drm_device *dev = connector->dev;
928 1254
929 for (i = 0; i < drm_num_dmt_modes; i++) { 1255 for (i = 0; i < drm_num_dmt_modes; i++) {
930 if (mode_in_range(drm_dmt_modes + i, timing)) { 1256 if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
931 newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); 1257 newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
932 if (newmode) { 1258 if (newmode) {
933 drm_mode_probed_add(connector, newmode); 1259 drm_mode_probed_add(connector, newmode);
@@ -988,13 +1314,100 @@ static int drm_cvt_modes(struct drm_connector *connector,
988 return modes; 1314 return modes;
989} 1315}
990 1316
1317static const struct {
1318 short w;
1319 short h;
1320 short r;
1321 short rb;
1322} est3_modes[] = {
1323 /* byte 6 */
1324 { 640, 350, 85, 0 },
1325 { 640, 400, 85, 0 },
1326 { 720, 400, 85, 0 },
1327 { 640, 480, 85, 0 },
1328 { 848, 480, 60, 0 },
1329 { 800, 600, 85, 0 },
1330 { 1024, 768, 85, 0 },
1331 { 1152, 864, 75, 0 },
1332 /* byte 7 */
1333 { 1280, 768, 60, 1 },
1334 { 1280, 768, 60, 0 },
1335 { 1280, 768, 75, 0 },
1336 { 1280, 768, 85, 0 },
1337 { 1280, 960, 60, 0 },
1338 { 1280, 960, 85, 0 },
1339 { 1280, 1024, 60, 0 },
1340 { 1280, 1024, 85, 0 },
1341 /* byte 8 */
1342 { 1360, 768, 60, 0 },
1343 { 1440, 900, 60, 1 },
1344 { 1440, 900, 60, 0 },
1345 { 1440, 900, 75, 0 },
1346 { 1440, 900, 85, 0 },
1347 { 1400, 1050, 60, 1 },
1348 { 1400, 1050, 60, 0 },
1349 { 1400, 1050, 75, 0 },
1350 /* byte 9 */
1351 { 1400, 1050, 85, 0 },
1352 { 1680, 1050, 60, 1 },
1353 { 1680, 1050, 60, 0 },
1354 { 1680, 1050, 75, 0 },
1355 { 1680, 1050, 85, 0 },
1356 { 1600, 1200, 60, 0 },
1357 { 1600, 1200, 65, 0 },
1358 { 1600, 1200, 70, 0 },
1359 /* byte 10 */
1360 { 1600, 1200, 75, 0 },
1361 { 1600, 1200, 85, 0 },
1362 { 1792, 1344, 60, 0 },
1363 { 1792, 1344, 85, 0 },
1364 { 1856, 1392, 60, 0 },
1365 { 1856, 1392, 75, 0 },
1366 { 1920, 1200, 60, 1 },
1367 { 1920, 1200, 60, 0 },
1368 /* byte 11 */
1369 { 1920, 1200, 75, 0 },
1370 { 1920, 1200, 85, 0 },
1371 { 1920, 1440, 60, 0 },
1372 { 1920, 1440, 75, 0 },
1373};
1374static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
1375
1376static int
1377drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
1378{
1379 int i, j, m, modes = 0;
1380 struct drm_display_mode *mode;
1381 u8 *est = ((u8 *)timing) + 5;
1382
1383 for (i = 0; i < 6; i++) {
1384 for (j = 7; j > 0; j--) {
1385 m = (i * 8) + (7 - j);
1386 if (m > num_est3_modes)
1387 break;
1388 if (est[i] & (1 << j)) {
1389 mode = drm_find_dmt(connector->dev,
1390 est3_modes[m].w,
1391 est3_modes[m].h,
1392 est3_modes[m].r
1393 /*, est3_modes[m].rb */);
1394 if (mode) {
1395 drm_mode_probed_add(connector, mode);
1396 modes++;
1397 }
1398 }
1399 }
1400 }
1401
1402 return modes;
1403}
1404
991static int add_detailed_modes(struct drm_connector *connector, 1405static int add_detailed_modes(struct drm_connector *connector,
992 struct detailed_timing *timing, 1406 struct detailed_timing *timing,
993 struct edid *edid, u32 quirks, int preferred) 1407 struct edid *edid, u32 quirks, int preferred)
994{ 1408{
995 int i, modes = 0; 1409 int i, modes = 0;
996 struct detailed_non_pixel *data = &timing->data.other_data; 1410 struct detailed_non_pixel *data = &timing->data.other_data;
997 int timing_level = standard_timing_level(edid);
998 int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF); 1411 int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
999 struct drm_display_mode *newmode; 1412 struct drm_display_mode *newmode;
1000 struct drm_device *dev = connector->dev; 1413 struct drm_device *dev = connector->dev;
@@ -1015,7 +1428,8 @@ static int add_detailed_modes(struct drm_connector *connector,
1015 switch (data->type) { 1428 switch (data->type) {
1016 case EDID_DETAIL_MONITOR_RANGE: 1429 case EDID_DETAIL_MONITOR_RANGE:
1017 if (gtf) 1430 if (gtf)
1018 modes += drm_gtf_modes_for_range(connector, timing); 1431 modes += drm_gtf_modes_for_range(connector, edid,
1432 timing);
1019 break; 1433 break;
1020 case EDID_DETAIL_STD_MODES: 1434 case EDID_DETAIL_STD_MODES:
1021 /* Six modes per detailed section */ 1435 /* Six modes per detailed section */
@@ -1024,8 +1438,8 @@ static int add_detailed_modes(struct drm_connector *connector,
1024 struct drm_display_mode *newmode; 1438 struct drm_display_mode *newmode;
1025 1439
1026 std = &data->data.timings[i]; 1440 std = &data->data.timings[i];
1027 newmode = drm_mode_std(dev, std, edid->revision, 1441 newmode = drm_mode_std(connector, edid, std,
1028 timing_level); 1442 edid->revision);
1029 if (newmode) { 1443 if (newmode) {
1030 drm_mode_probed_add(connector, newmode); 1444 drm_mode_probed_add(connector, newmode);
1031 modes++; 1445 modes++;
@@ -1035,6 +1449,9 @@ static int add_detailed_modes(struct drm_connector *connector,
1035 case EDID_DETAIL_CVT_3BYTE: 1449 case EDID_DETAIL_CVT_3BYTE:
1036 modes += drm_cvt_modes(connector, timing); 1450 modes += drm_cvt_modes(connector, timing);
1037 break; 1451 break;
1452 case EDID_DETAIL_EST_TIMINGS:
1453 modes += drm_est3_modes(connector, timing);
1454 break;
1038 default: 1455 default:
1039 break; 1456 break;
1040 } 1457 }
@@ -1058,7 +1475,10 @@ static int add_detailed_info(struct drm_connector *connector,
1058 1475
1059 for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { 1476 for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
1060 struct detailed_timing *timing = &edid->detailed_timings[i]; 1477 struct detailed_timing *timing = &edid->detailed_timings[i];
1061 int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING); 1478 int preferred = (i == 0);
1479
1480 if (preferred && edid->version == 1 && edid->revision < 4)
1481 preferred = (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
1062 1482
1063 /* In 1.0, only timings are allowed */ 1483 /* In 1.0, only timings are allowed */
1064 if (!timing->pixel_clock && edid->version == 1 && 1484 if (!timing->pixel_clock && edid->version == 1 &&
@@ -1088,39 +1508,23 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
1088 int i, modes = 0; 1508 int i, modes = 0;
1089 char *edid_ext = NULL; 1509 char *edid_ext = NULL;
1090 struct detailed_timing *timing; 1510 struct detailed_timing *timing;
1091 int edid_ext_num;
1092 int start_offset, end_offset; 1511 int start_offset, end_offset;
1093 int timing_level; 1512 int timing_level;
1094 1513
1095 if (edid->version == 1 && edid->revision < 3) { 1514 if (edid->version == 1 && edid->revision < 3)
1096 /* If the EDID version is less than 1.3, there is no
1097 * extension EDID.
1098 */
1099 return 0; 1515 return 0;
1100 } 1516 if (!edid->extensions)
1101 if (!edid->extensions) {
1102 /* if there is no extension EDID, it is unnecessary to
1103 * parse the E-EDID to get detailed info
1104 */
1105 return 0; 1517 return 0;
1106 }
1107
1108 /* Chose real EDID extension number */
1109 edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
1110 DRM_MAX_EDID_EXT_NUM : edid->extensions;
1111 1518
1112 /* Find CEA extension */ 1519 /* Find CEA extension */
1113 for (i = 0; i < edid_ext_num; i++) { 1520 for (i = 0; i < edid->extensions; i++) {
1114 edid_ext = (char *)edid + EDID_LENGTH * (i + 1); 1521 edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
1115 /* This block is CEA extension */
1116 if (edid_ext[0] == 0x02) 1522 if (edid_ext[0] == 0x02)
1117 break; 1523 break;
1118 } 1524 }
1119 1525
1120 if (i == edid_ext_num) { 1526 if (i == edid->extensions)
1121 /* if there is no additional timing EDID block, return */
1122 return 0; 1527 return 0;
1123 }
1124 1528
1125 /* Get the start offset of detailed timing block */ 1529 /* Get the start offset of detailed timing block */
1126 start_offset = edid_ext[2]; 1530 start_offset = edid_ext[2];
@@ -1144,123 +1548,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
1144 return modes; 1548 return modes;
1145} 1549}
1146 1550
1147#define DDC_ADDR 0x50
1148/**
1149 * Get EDID information via I2C.
1150 *
1151 * \param adapter : i2c device adaptor
1152 * \param buf : EDID data buffer to be filled
1153 * \param len : EDID data buffer length
1154 * \return 0 on success or -1 on failure.
1155 *
1156 * Try to fetch EDID information by calling i2c driver function.
1157 */
1158int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
1159 unsigned char *buf, int len)
1160{
1161 unsigned char start = 0x0;
1162 struct i2c_msg msgs[] = {
1163 {
1164 .addr = DDC_ADDR,
1165 .flags = 0,
1166 .len = 1,
1167 .buf = &start,
1168 }, {
1169 .addr = DDC_ADDR,
1170 .flags = I2C_M_RD,
1171 .len = len,
1172 .buf = buf,
1173 }
1174 };
1175
1176 if (i2c_transfer(adapter, msgs, 2) == 2)
1177 return 0;
1178
1179 return -1;
1180}
1181EXPORT_SYMBOL(drm_do_probe_ddc_edid);
1182
1183static int drm_ddc_read_edid(struct drm_connector *connector,
1184 struct i2c_adapter *adapter,
1185 char *buf, int len)
1186{
1187 int i;
1188
1189 for (i = 0; i < 4; i++) {
1190 if (drm_do_probe_ddc_edid(adapter, buf, len))
1191 return -1;
1192 if (drm_edid_is_valid((struct edid *)buf))
1193 return 0;
1194 }
1195
1196 /* repeated checksum failures; warn, but carry on */
1197 dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
1198 drm_get_connector_name(connector));
1199 return -1;
1200}
1201
1202/**
1203 * drm_get_edid - get EDID data, if available
1204 * @connector: connector we're probing
1205 * @adapter: i2c adapter to use for DDC
1206 *
1207 * Poke the given connector's i2c channel to grab EDID data if possible.
1208 *
1209 * Return edid data or NULL if we couldn't find any.
1210 */
1211struct edid *drm_get_edid(struct drm_connector *connector,
1212 struct i2c_adapter *adapter)
1213{
1214 int ret;
1215 struct edid *edid;
1216
1217 edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
1218 GFP_KERNEL);
1219 if (edid == NULL) {
1220 dev_warn(&connector->dev->pdev->dev,
1221 "Failed to allocate EDID\n");
1222 goto end;
1223 }
1224
1225 /* Read first EDID block */
1226 ret = drm_ddc_read_edid(connector, adapter,
1227 (unsigned char *)edid, EDID_LENGTH);
1228 if (ret != 0)
1229 goto clean_up;
1230
1231 /* There are EDID extensions to be read */
1232 if (edid->extensions != 0) {
1233 int edid_ext_num = edid->extensions;
1234
1235 if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) {
1236 dev_warn(&connector->dev->pdev->dev,
1237 "The number of extension(%d) is "
1238 "over max (%d), actually read number (%d)\n",
1239 edid_ext_num, DRM_MAX_EDID_EXT_NUM,
1240 DRM_MAX_EDID_EXT_NUM);
1241 /* Reset EDID extension number to be read */
1242 edid_ext_num = DRM_MAX_EDID_EXT_NUM;
1243 }
1244 /* Read EDID including extensions too */
1245 ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
1246 EDID_LENGTH * (edid_ext_num + 1));
1247 if (ret != 0)
1248 goto clean_up;
1249
1250 }
1251
1252 connector->display_info.raw_edid = (char *)edid;
1253 goto end;
1254
1255clean_up:
1256 kfree(edid);
1257 edid = NULL;
1258end:
1259 return edid;
1260
1261}
1262EXPORT_SYMBOL(drm_get_edid);
1263
1264#define HDMI_IDENTIFIER 0x000C03 1551#define HDMI_IDENTIFIER 0x000C03
1265#define VENDOR_BLOCK 0x03 1552#define VENDOR_BLOCK 0x03
1266/** 1553/**
@@ -1273,7 +1560,7 @@ EXPORT_SYMBOL(drm_get_edid);
1273bool drm_detect_hdmi_monitor(struct edid *edid) 1560bool drm_detect_hdmi_monitor(struct edid *edid)
1274{ 1561{
1275 char *edid_ext = NULL; 1562 char *edid_ext = NULL;
1276 int i, hdmi_id, edid_ext_num; 1563 int i, hdmi_id;
1277 int start_offset, end_offset; 1564 int start_offset, end_offset;
1278 bool is_hdmi = false; 1565 bool is_hdmi = false;
1279 1566
@@ -1281,19 +1568,15 @@ bool drm_detect_hdmi_monitor(struct edid *edid)
1281 if (edid == NULL || edid->extensions == 0) 1568 if (edid == NULL || edid->extensions == 0)
1282 goto end; 1569 goto end;
1283 1570
1284 /* Chose real EDID extension number */
1285 edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
1286 DRM_MAX_EDID_EXT_NUM : edid->extensions;
1287
1288 /* Find CEA extension */ 1571 /* Find CEA extension */
1289 for (i = 0; i < edid_ext_num; i++) { 1572 for (i = 0; i < edid->extensions; i++) {
1290 edid_ext = (char *)edid + EDID_LENGTH * (i + 1); 1573 edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
1291 /* This block is CEA extension */ 1574 /* This block is CEA extension */
1292 if (edid_ext[0] == 0x02) 1575 if (edid_ext[0] == 0x02)
1293 break; 1576 break;
1294 } 1577 }
1295 1578
1296 if (i == edid_ext_num) 1579 if (i == edid->extensions)
1297 goto end; 1580 goto end;
1298 1581
1299 /* Data block offset in CEA extension block */ 1582 /* Data block offset in CEA extension block */
@@ -1348,10 +1631,24 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
1348 1631
1349 quirks = edid_get_quirks(edid); 1632 quirks = edid_get_quirks(edid);
1350 1633
1351 num_modes += add_established_modes(connector, edid); 1634 /*
1352 num_modes += add_standard_modes(connector, edid); 1635 * EDID spec says modes should be preferred in this order:
1636 * - preferred detailed mode
1637 * - other detailed modes from base block
1638 * - detailed modes from extension blocks
1639 * - CVT 3-byte code modes
1640 * - standard timing codes
1641 * - established timing codes
1642 * - modes inferred from GTF or CVT range information
1643 *
1644 * We don't quite implement this yet, but we're close.
1645 *
1646 * XXX order for additional mode types in extension blocks?
1647 */
1353 num_modes += add_detailed_info(connector, edid, quirks); 1648 num_modes += add_detailed_info(connector, edid, quirks);
1354 num_modes += add_detailed_info_eedid(connector, edid, quirks); 1649 num_modes += add_detailed_info_eedid(connector, edid, quirks);
1650 num_modes += add_standard_modes(connector, edid);
1651 num_modes += add_established_modes(connector, edid);
1355 1652
1356 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) 1653 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
1357 edid_fixup_preferred(connector, quirks); 1654 edid_fixup_preferred(connector, quirks);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 288ea2f32772..b28e56382e86 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -42,15 +42,35 @@ MODULE_LICENSE("GPL and additional rights");
42 42
43static LIST_HEAD(kernel_fb_helper_list); 43static LIST_HEAD(kernel_fb_helper_list);
44 44
45int drm_fb_helper_add_connector(struct drm_connector *connector) 45static struct slow_work_ops output_status_change_ops;
46
47/* simple single crtc case helper function */
48int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
46{ 49{
47 connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL); 50 struct drm_device *dev = fb_helper->dev;
48 if (!connector->fb_helper_private) 51 struct drm_connector *connector;
49 return -ENOMEM; 52 int i;
50 53
54 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
55 struct drm_fb_helper_connector *fb_helper_connector;
56
57 fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
58 if (!fb_helper_connector)
59 goto fail;
60
61 fb_helper_connector->connector = connector;
62 fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
63 }
51 return 0; 64 return 0;
65fail:
66 for (i = 0; i < fb_helper->connector_count; i++) {
67 kfree(fb_helper->connector_info[i]);
68 fb_helper->connector_info[i] = NULL;
69 }
70 fb_helper->connector_count = 0;
71 return -ENOMEM;
52} 72}
53EXPORT_SYMBOL(drm_fb_helper_add_connector); 73EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
54 74
55/** 75/**
56 * drm_fb_helper_connector_parse_command_line - parse command line for connector 76 * drm_fb_helper_connector_parse_command_line - parse command line for connector
@@ -65,7 +85,7 @@ EXPORT_SYMBOL(drm_fb_helper_add_connector);
65 * 85 *
66 * enable/enable Digital/disable bit at the end 86 * enable/enable Digital/disable bit at the end
67 */ 87 */
68static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector, 88static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn,
69 const char *mode_option) 89 const char *mode_option)
70{ 90{
71 const char *name; 91 const char *name;
@@ -75,13 +95,13 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
75 int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0; 95 int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
76 int i; 96 int i;
77 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; 97 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
78 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
79 struct drm_fb_helper_cmdline_mode *cmdline_mode; 98 struct drm_fb_helper_cmdline_mode *cmdline_mode;
99 struct drm_connector *connector = fb_helper_conn->connector;
80 100
81 if (!fb_help_conn) 101 if (!fb_helper_conn)
82 return false; 102 return false;
83 103
84 cmdline_mode = &fb_help_conn->cmdline_mode; 104 cmdline_mode = &fb_helper_conn->cmdline_mode;
85 if (!mode_option) 105 if (!mode_option)
86 mode_option = fb_mode_option; 106 mode_option = fb_mode_option;
87 107
@@ -204,18 +224,21 @@ done:
204 return true; 224 return true;
205} 225}
206 226
207int drm_fb_helper_parse_command_line(struct drm_device *dev) 227static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
208{ 228{
209 struct drm_connector *connector; 229 struct drm_fb_helper_connector *fb_helper_conn;
230 int i;
210 231
211 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 232 for (i = 0; i < fb_helper->connector_count; i++) {
212 char *option = NULL; 233 char *option = NULL;
213 234
235 fb_helper_conn = fb_helper->connector_info[i];
236
214 /* do something on return - turn off connector maybe */ 237 /* do something on return - turn off connector maybe */
215 if (fb_get_options(drm_get_connector_name(connector), &option)) 238 if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option))
216 continue; 239 continue;
217 240
218 drm_fb_helper_connector_parse_command_line(connector, option); 241 drm_fb_helper_connector_parse_command_line(fb_helper_conn, option);
219 } 242 }
220 return 0; 243 return 0;
221} 244}
@@ -293,6 +316,7 @@ static void drm_fb_helper_on(struct fb_info *info)
293 struct drm_fb_helper *fb_helper = info->par; 316 struct drm_fb_helper *fb_helper = info->par;
294 struct drm_device *dev = fb_helper->dev; 317 struct drm_device *dev = fb_helper->dev;
295 struct drm_crtc *crtc; 318 struct drm_crtc *crtc;
319 struct drm_crtc_helper_funcs *crtc_funcs;
296 struct drm_encoder *encoder; 320 struct drm_encoder *encoder;
297 int i; 321 int i;
298 322
@@ -300,33 +324,28 @@ static void drm_fb_helper_on(struct fb_info *info)
300 * For each CRTC in this fb, turn the crtc on then, 324 * For each CRTC in this fb, turn the crtc on then,
301 * find all associated encoders and turn them on. 325 * find all associated encoders and turn them on.
302 */ 326 */
327 mutex_lock(&dev->mode_config.mutex);
303 for (i = 0; i < fb_helper->crtc_count; i++) { 328 for (i = 0; i < fb_helper->crtc_count; i++) {
304 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 329 crtc = fb_helper->crtc_info[i].mode_set.crtc;
305 struct drm_crtc_helper_funcs *crtc_funcs = 330 crtc_funcs = crtc->helper_private;
306 crtc->helper_private;
307 331
308 /* Only mess with CRTCs in this fb */ 332 if (!crtc->enabled)
309 if (crtc->base.id != fb_helper->crtc_info[i].crtc_id || 333 continue;
310 !crtc->enabled) 334
311 continue; 335 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
312 336
313 mutex_lock(&dev->mode_config.mutex);
314 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
315 mutex_unlock(&dev->mode_config.mutex);
316 337
317 /* Found a CRTC on this fb, now find encoders */ 338 /* Found a CRTC on this fb, now find encoders */
318 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 339 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
319 if (encoder->crtc == crtc) { 340 if (encoder->crtc == crtc) {
320 struct drm_encoder_helper_funcs *encoder_funcs; 341 struct drm_encoder_helper_funcs *encoder_funcs;
321 342
322 encoder_funcs = encoder->helper_private; 343 encoder_funcs = encoder->helper_private;
323 mutex_lock(&dev->mode_config.mutex); 344 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
324 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
325 mutex_unlock(&dev->mode_config.mutex);
326 }
327 } 345 }
328 } 346 }
329 } 347 }
348 mutex_unlock(&dev->mode_config.mutex);
330} 349}
331 350
332static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) 351static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
@@ -334,6 +353,7 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
334 struct drm_fb_helper *fb_helper = info->par; 353 struct drm_fb_helper *fb_helper = info->par;
335 struct drm_device *dev = fb_helper->dev; 354 struct drm_device *dev = fb_helper->dev;
336 struct drm_crtc *crtc; 355 struct drm_crtc *crtc;
356 struct drm_crtc_helper_funcs *crtc_funcs;
337 struct drm_encoder *encoder; 357 struct drm_encoder *encoder;
338 int i; 358 int i;
339 359
@@ -341,32 +361,26 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
341 * For each CRTC in this fb, find all associated encoders 361 * For each CRTC in this fb, find all associated encoders
342 * and turn them off, then turn off the CRTC. 362 * and turn them off, then turn off the CRTC.
343 */ 363 */
364 mutex_lock(&dev->mode_config.mutex);
344 for (i = 0; i < fb_helper->crtc_count; i++) { 365 for (i = 0; i < fb_helper->crtc_count; i++) {
345 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 366 crtc = fb_helper->crtc_info[i].mode_set.crtc;
346 struct drm_crtc_helper_funcs *crtc_funcs = 367 crtc_funcs = crtc->helper_private;
347 crtc->helper_private;
348 368
349 /* Only mess with CRTCs in this fb */ 369 if (!crtc->enabled)
350 if (crtc->base.id != fb_helper->crtc_info[i].crtc_id || 370 continue;
351 !crtc->enabled)
352 continue;
353 371
354 /* Found a CRTC on this fb, now find encoders */ 372 /* Found a CRTC on this fb, now find encoders */
355 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 373 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
356 if (encoder->crtc == crtc) { 374 if (encoder->crtc == crtc) {
357 struct drm_encoder_helper_funcs *encoder_funcs; 375 struct drm_encoder_helper_funcs *encoder_funcs;
358 376
359 encoder_funcs = encoder->helper_private; 377 encoder_funcs = encoder->helper_private;
360 mutex_lock(&dev->mode_config.mutex); 378 encoder_funcs->dpms(encoder, dpms_mode);
361 encoder_funcs->dpms(encoder, dpms_mode);
362 mutex_unlock(&dev->mode_config.mutex);
363 }
364 } 379 }
365 mutex_lock(&dev->mode_config.mutex);
366 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
367 mutex_unlock(&dev->mode_config.mutex);
368 } 380 }
381 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
369 } 382 }
383 mutex_unlock(&dev->mode_config.mutex);
370} 384}
371 385
372int drm_fb_helper_blank(int blank, struct fb_info *info) 386int drm_fb_helper_blank(int blank, struct fb_info *info)
@@ -401,50 +415,89 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
401{ 415{
402 int i; 416 int i;
403 417
418 for (i = 0; i < helper->connector_count; i++)
419 kfree(helper->connector_info[i]);
420 kfree(helper->connector_info);
404 for (i = 0; i < helper->crtc_count; i++) 421 for (i = 0; i < helper->crtc_count; i++)
405 kfree(helper->crtc_info[i].mode_set.connectors); 422 kfree(helper->crtc_info[i].mode_set.connectors);
406 kfree(helper->crtc_info); 423 kfree(helper->crtc_info);
407} 424}
408 425
409int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, int max_conn_count) 426int drm_fb_helper_init(struct drm_device *dev,
427 struct drm_fb_helper *fb_helper,
428 int crtc_count, int max_conn_count,
429 bool polled)
410{ 430{
411 struct drm_device *dev = helper->dev;
412 struct drm_crtc *crtc; 431 struct drm_crtc *crtc;
413 int ret = 0; 432 int ret = 0;
414 int i; 433 int i;
415 434
416 helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL); 435 fb_helper->dev = dev;
417 if (!helper->crtc_info) 436 fb_helper->poll_enabled = polled;
437
438 slow_work_register_user(THIS_MODULE);
439 delayed_slow_work_init(&fb_helper->output_status_change_slow_work,
440 &output_status_change_ops);
441
442 INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
443
444 fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
445 if (!fb_helper->crtc_info)
418 return -ENOMEM; 446 return -ENOMEM;
419 447
420 helper->crtc_count = crtc_count; 448 fb_helper->crtc_count = crtc_count;
449 fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
450 if (!fb_helper->connector_info) {
451 kfree(fb_helper->crtc_info);
452 return -ENOMEM;
453 }
454 fb_helper->connector_count = 0;
421 455
422 for (i = 0; i < crtc_count; i++) { 456 for (i = 0; i < crtc_count; i++) {
423 helper->crtc_info[i].mode_set.connectors = 457 fb_helper->crtc_info[i].mode_set.connectors =
424 kcalloc(max_conn_count, 458 kcalloc(max_conn_count,
425 sizeof(struct drm_connector *), 459 sizeof(struct drm_connector *),
426 GFP_KERNEL); 460 GFP_KERNEL);
427 461
428 if (!helper->crtc_info[i].mode_set.connectors) { 462 if (!fb_helper->crtc_info[i].mode_set.connectors) {
429 ret = -ENOMEM; 463 ret = -ENOMEM;
430 goto out_free; 464 goto out_free;
431 } 465 }
432 helper->crtc_info[i].mode_set.num_connectors = 0; 466 fb_helper->crtc_info[i].mode_set.num_connectors = 0;
433 } 467 }
434 468
435 i = 0; 469 i = 0;
436 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 470 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
437 helper->crtc_info[i].crtc_id = crtc->base.id; 471 fb_helper->crtc_info[i].crtc_id = crtc->base.id;
438 helper->crtc_info[i].mode_set.crtc = crtc; 472 fb_helper->crtc_info[i].mode_set.crtc = crtc;
439 i++; 473 i++;
440 } 474 }
441 helper->conn_limit = max_conn_count; 475 fb_helper->conn_limit = max_conn_count;
442 return 0; 476 return 0;
443out_free: 477out_free:
444 drm_fb_helper_crtc_free(helper); 478 drm_fb_helper_crtc_free(fb_helper);
445 return -ENOMEM; 479 return -ENOMEM;
446} 480}
447EXPORT_SYMBOL(drm_fb_helper_init_crtc_count); 481EXPORT_SYMBOL(drm_fb_helper_init);
482
483void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
484{
485 if (!list_empty(&fb_helper->kernel_fb_list)) {
486 list_del(&fb_helper->kernel_fb_list);
487 if (list_empty(&kernel_fb_helper_list)) {
488 printk(KERN_INFO "unregistered panic notifier\n");
489 atomic_notifier_chain_unregister(&panic_notifier_list,
490 &paniced);
491 unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
492 }
493 }
494
495 drm_fb_helper_crtc_free(fb_helper);
496
497 delayed_slow_work_cancel(&fb_helper->output_status_change_slow_work);
498 slow_work_unregister_user(THIS_MODULE);
499}
500EXPORT_SYMBOL(drm_fb_helper_fini);
448 501
449static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, 502static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
450 u16 blue, u16 regno, struct fb_info *info) 503 u16 blue, u16 regno, struct fb_info *info)
@@ -508,20 +561,15 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
508int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) 561int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
509{ 562{
510 struct drm_fb_helper *fb_helper = info->par; 563 struct drm_fb_helper *fb_helper = info->par;
511 struct drm_device *dev = fb_helper->dev; 564 struct drm_crtc_helper_funcs *crtc_funcs;
512 u16 *red, *green, *blue, *transp; 565 u16 *red, *green, *blue, *transp;
513 struct drm_crtc *crtc; 566 struct drm_crtc *crtc;
514 int i, rc = 0; 567 int i, rc = 0;
515 int start; 568 int start;
516 569
517 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 570 for (i = 0; i < fb_helper->crtc_count; i++) {
518 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 571 crtc = fb_helper->crtc_info[i].mode_set.crtc;
519 for (i = 0; i < fb_helper->crtc_count; i++) { 572 crtc_funcs = crtc->helper_private;
520 if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
521 break;
522 }
523 if (i == fb_helper->crtc_count)
524 continue;
525 573
526 red = cmap->red; 574 red = cmap->red;
527 green = cmap->green; 575 green = cmap->green;
@@ -549,41 +597,6 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
549} 597}
550EXPORT_SYMBOL(drm_fb_helper_setcmap); 598EXPORT_SYMBOL(drm_fb_helper_setcmap);
551 599
552int drm_fb_helper_setcolreg(unsigned regno,
553 unsigned red,
554 unsigned green,
555 unsigned blue,
556 unsigned transp,
557 struct fb_info *info)
558{
559 struct drm_fb_helper *fb_helper = info->par;
560 struct drm_device *dev = fb_helper->dev;
561 struct drm_crtc *crtc;
562 int i;
563 int ret;
564
565 if (regno > 255)
566 return 1;
567
568 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
569 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
570 for (i = 0; i < fb_helper->crtc_count; i++) {
571 if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
572 break;
573 }
574 if (i == fb_helper->crtc_count)
575 continue;
576
577 ret = setcolreg(crtc, red, green, blue, regno, info);
578 if (ret)
579 return ret;
580
581 crtc_funcs->load_lut(crtc);
582 }
583 return 0;
584}
585EXPORT_SYMBOL(drm_fb_helper_setcolreg);
586
587int drm_fb_helper_check_var(struct fb_var_screeninfo *var, 600int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
588 struct fb_info *info) 601 struct fb_info *info)
589{ 602{
@@ -687,23 +700,21 @@ int drm_fb_helper_set_par(struct fb_info *info)
687 return -EINVAL; 700 return -EINVAL;
688 } 701 }
689 702
690 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 703 mutex_lock(&dev->mode_config.mutex);
691 704 for (i = 0; i < fb_helper->crtc_count; i++) {
692 for (i = 0; i < fb_helper->crtc_count; i++) { 705 crtc = fb_helper->crtc_info[i].mode_set.crtc;
693 if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) 706 ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
694 break; 707 if (ret) {
695 }
696 if (i == fb_helper->crtc_count)
697 continue;
698
699 if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) {
700 mutex_lock(&dev->mode_config.mutex);
701 ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
702 mutex_unlock(&dev->mode_config.mutex); 708 mutex_unlock(&dev->mode_config.mutex);
703 if (ret) 709 return ret;
704 return ret;
705 } 710 }
706 } 711 }
712 mutex_unlock(&dev->mode_config.mutex);
713
714 if (fb_helper->delayed_hotplug) {
715 fb_helper->delayed_hotplug = false;
716 delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work, 0);
717 }
707 return 0; 718 return 0;
708} 719}
709EXPORT_SYMBOL(drm_fb_helper_set_par); 720EXPORT_SYMBOL(drm_fb_helper_set_par);
@@ -718,14 +729,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
718 int ret = 0; 729 int ret = 0;
719 int i; 730 int i;
720 731
721 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 732 mutex_lock(&dev->mode_config.mutex);
722 for (i = 0; i < fb_helper->crtc_count; i++) { 733 for (i = 0; i < fb_helper->crtc_count; i++) {
723 if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) 734 crtc = fb_helper->crtc_info[i].mode_set.crtc;
724 break;
725 }
726
727 if (i == fb_helper->crtc_count)
728 continue;
729 735
730 modeset = &fb_helper->crtc_info[i].mode_set; 736 modeset = &fb_helper->crtc_info[i].mode_set;
731 737
@@ -733,181 +739,122 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
733 modeset->y = var->yoffset; 739 modeset->y = var->yoffset;
734 740
735 if (modeset->num_connectors) { 741 if (modeset->num_connectors) {
736 mutex_lock(&dev->mode_config.mutex);
737 ret = crtc->funcs->set_config(modeset); 742 ret = crtc->funcs->set_config(modeset);
738 mutex_unlock(&dev->mode_config.mutex);
739 if (!ret) { 743 if (!ret) {
740 info->var.xoffset = var->xoffset; 744 info->var.xoffset = var->xoffset;
741 info->var.yoffset = var->yoffset; 745 info->var.yoffset = var->yoffset;
742 } 746 }
743 } 747 }
744 } 748 }
749 mutex_unlock(&dev->mode_config.mutex);
745 return ret; 750 return ret;
746} 751}
747EXPORT_SYMBOL(drm_fb_helper_pan_display); 752EXPORT_SYMBOL(drm_fb_helper_pan_display);
748 753
749int drm_fb_helper_single_fb_probe(struct drm_device *dev, 754int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
750 int preferred_bpp, 755 int preferred_bpp)
751 int (*fb_create)(struct drm_device *dev,
752 uint32_t fb_width,
753 uint32_t fb_height,
754 uint32_t surface_width,
755 uint32_t surface_height,
756 uint32_t surface_depth,
757 uint32_t surface_bpp,
758 struct drm_framebuffer **fb_ptr))
759{ 756{
760 struct drm_crtc *crtc;
761 struct drm_connector *connector;
762 unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
763 unsigned int surface_width = 0, surface_height = 0;
764 int new_fb = 0; 757 int new_fb = 0;
765 int crtc_count = 0; 758 int crtc_count = 0;
766 int ret, i, conn_count = 0; 759 int i;
767 struct fb_info *info; 760 struct fb_info *info;
768 struct drm_framebuffer *fb; 761 struct drm_fb_helper_surface_size sizes;
769 struct drm_mode_set *modeset = NULL; 762 int gamma_size = 0;
770 struct drm_fb_helper *fb_helper; 763
771 uint32_t surface_depth = 24, surface_bpp = 32; 764 memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
765 sizes.surface_depth = 24;
766 sizes.surface_bpp = 32;
767 sizes.fb_width = (unsigned)-1;
768 sizes.fb_height = (unsigned)-1;
772 769
773 /* if driver picks 8 or 16 by default use that 770 /* if driver picks 8 or 16 by default use that
774 for both depth/bpp */ 771 for both depth/bpp */
775 if (preferred_bpp != surface_bpp) { 772 if (preferred_bpp != sizes.surface_bpp) {
776 surface_depth = surface_bpp = preferred_bpp; 773 sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
777 } 774 }
778 /* first up get a count of crtcs now in use and new min/maxes width/heights */ 775 /* first up get a count of crtcs now in use and new min/maxes width/heights */
779 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 776 for (i = 0; i < fb_helper->connector_count; i++) {
780 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; 777 struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
781
782 struct drm_fb_helper_cmdline_mode *cmdline_mode; 778 struct drm_fb_helper_cmdline_mode *cmdline_mode;
783 779
784 if (!fb_help_conn) 780 cmdline_mode = &fb_helper_conn->cmdline_mode;
785 continue;
786
787 cmdline_mode = &fb_help_conn->cmdline_mode;
788 781
789 if (cmdline_mode->bpp_specified) { 782 if (cmdline_mode->bpp_specified) {
790 switch (cmdline_mode->bpp) { 783 switch (cmdline_mode->bpp) {
791 case 8: 784 case 8:
792 surface_depth = surface_bpp = 8; 785 sizes.surface_depth = sizes.surface_bpp = 8;
793 break; 786 break;
794 case 15: 787 case 15:
795 surface_depth = 15; 788 sizes.surface_depth = 15;
796 surface_bpp = 16; 789 sizes.surface_bpp = 16;
797 break; 790 break;
798 case 16: 791 case 16:
799 surface_depth = surface_bpp = 16; 792 sizes.surface_depth = sizes.surface_bpp = 16;
800 break; 793 break;
801 case 24: 794 case 24:
802 surface_depth = surface_bpp = 24; 795 sizes.surface_depth = sizes.surface_bpp = 24;
803 break; 796 break;
804 case 32: 797 case 32:
805 surface_depth = 24; 798 sizes.surface_depth = 24;
806 surface_bpp = 32; 799 sizes.surface_bpp = 32;
807 break; 800 break;
808 } 801 }
809 break; 802 break;
810 } 803 }
811 } 804 }
812 805
813 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 806 crtc_count = 0;
814 if (drm_helper_crtc_in_use(crtc)) { 807 for (i = 0; i < fb_helper->crtc_count; i++) {
815 if (crtc->desired_mode) { 808 struct drm_display_mode *desired_mode;
816 if (crtc->desired_mode->hdisplay < fb_width) 809 desired_mode = fb_helper->crtc_info[i].desired_mode;
817 fb_width = crtc->desired_mode->hdisplay; 810
818 811 if (desired_mode) {
819 if (crtc->desired_mode->vdisplay < fb_height) 812 if (gamma_size == 0)
820 fb_height = crtc->desired_mode->vdisplay; 813 gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
821 814 if (desired_mode->hdisplay < sizes.fb_width)
822 if (crtc->desired_mode->hdisplay > surface_width) 815 sizes.fb_width = desired_mode->hdisplay;
823 surface_width = crtc->desired_mode->hdisplay; 816 if (desired_mode->vdisplay < sizes.fb_height)
824 817 sizes.fb_height = desired_mode->vdisplay;
825 if (crtc->desired_mode->vdisplay > surface_height) 818 if (desired_mode->hdisplay > sizes.surface_width)
826 surface_height = crtc->desired_mode->vdisplay; 819 sizes.surface_width = desired_mode->hdisplay;
827 } 820 if (desired_mode->vdisplay > sizes.surface_height)
821 sizes.surface_height = desired_mode->vdisplay;
828 crtc_count++; 822 crtc_count++;
829 } 823 }
830 } 824 }
831 825
832 if (crtc_count == 0 || fb_width == -1 || fb_height == -1) { 826 if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
833 /* hmm everyone went away - assume VGA cable just fell out 827 /* hmm everyone went away - assume VGA cable just fell out
834 and will come back later. */ 828 and will come back later. */
835 return 0; 829 DRM_ERROR("Cannot find any crtc or sizes - going 1024x768\n");
836 } 830 sizes.fb_width = sizes.surface_width = 1024;
837 831 sizes.fb_height = sizes.surface_height = 768;
838 /* do we have an fb already? */
839 if (list_empty(&dev->mode_config.fb_kernel_list)) {
840 ret = (*fb_create)(dev, fb_width, fb_height, surface_width,
841 surface_height, surface_depth, surface_bpp,
842 &fb);
843 if (ret)
844 return -EINVAL;
845 new_fb = 1;
846 } else {
847 fb = list_first_entry(&dev->mode_config.fb_kernel_list,
848 struct drm_framebuffer, filp_head);
849
850 /* if someone hotplugs something bigger than we have already allocated, we are pwned.
851 As really we can't resize an fbdev that is in the wild currently due to fbdev
852 not really being designed for the lower layers moving stuff around under it.
853 - so in the grand style of things - punt. */
854 if ((fb->width < surface_width) ||
855 (fb->height < surface_height)) {
856 DRM_ERROR("Framebuffer not large enough to scale console onto.\n");
857 return -EINVAL;
858 }
859 } 832 }
860 833
861 info = fb->fbdev; 834 /* push down into drivers */
862 fb_helper = info->par; 835 new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
836 if (new_fb < 0)
837 return new_fb;
863 838
864 crtc_count = 0; 839 info = fb_helper->fbdev;
865 /* okay we need to setup new connector sets in the crtcs */
866 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
867 modeset = &fb_helper->crtc_info[crtc_count].mode_set;
868 modeset->fb = fb;
869 conn_count = 0;
870 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
871 if (connector->encoder)
872 if (connector->encoder->crtc == modeset->crtc) {
873 modeset->connectors[conn_count] = connector;
874 conn_count++;
875 if (conn_count > fb_helper->conn_limit)
876 BUG();
877 }
878 }
879 840
880 for (i = conn_count; i < fb_helper->conn_limit; i++) 841 /* set the fb pointer */
881 modeset->connectors[i] = NULL; 842 for (i = 0; i < fb_helper->crtc_count; i++) {
882 843 fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
883 modeset->crtc = crtc;
884 crtc_count++;
885
886 modeset->num_connectors = conn_count;
887 if (modeset->crtc->desired_mode) {
888 if (modeset->mode)
889 drm_mode_destroy(dev, modeset->mode);
890 modeset->mode = drm_mode_duplicate(dev,
891 modeset->crtc->desired_mode);
892 }
893 } 844 }
894 fb_helper->crtc_count = crtc_count;
895 fb_helper->fb = fb;
896 845
897 if (new_fb) { 846 if (new_fb) {
898 info->var.pixclock = 0; 847 info->var.pixclock = 0;
899 ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0);
900 if (ret)
901 return ret;
902 if (register_framebuffer(info) < 0) { 848 if (register_framebuffer(info) < 0) {
903 fb_dealloc_cmap(&info->cmap);
904 return -EINVAL; 849 return -EINVAL;
905 } 850 }
851
852 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
853 info->fix.id);
854
906 } else { 855 } else {
907 drm_fb_helper_set_par(info); 856 drm_fb_helper_set_par(info);
908 } 857 }
909 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
910 info->fix.id);
911 858
912 /* Switch back to kernel console on panic */ 859 /* Switch back to kernel console on panic */
913 /* multi card linked list maybe */ 860 /* multi card linked list maybe */
@@ -917,25 +864,13 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
917 &paniced); 864 &paniced);
918 register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); 865 register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
919 } 866 }
920 list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); 867 if (new_fb)
868 list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
869
921 return 0; 870 return 0;
922} 871}
923EXPORT_SYMBOL(drm_fb_helper_single_fb_probe); 872EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
924 873
925void drm_fb_helper_free(struct drm_fb_helper *helper)
926{
927 list_del(&helper->kernel_fb_list);
928 if (list_empty(&kernel_fb_helper_list)) {
929 printk(KERN_INFO "unregistered panic notifier\n");
930 atomic_notifier_chain_unregister(&panic_notifier_list,
931 &paniced);
932 unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
933 }
934 drm_fb_helper_crtc_free(helper);
935 fb_dealloc_cmap(&helper->fb->fbdev->cmap);
936}
937EXPORT_SYMBOL(drm_fb_helper_free);
938
939void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, 874void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
940 uint32_t depth) 875 uint32_t depth)
941{ 876{
@@ -954,10 +889,11 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
954} 889}
955EXPORT_SYMBOL(drm_fb_helper_fill_fix); 890EXPORT_SYMBOL(drm_fb_helper_fill_fix);
956 891
957void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, 892void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
958 uint32_t fb_width, uint32_t fb_height) 893 uint32_t fb_width, uint32_t fb_height)
959{ 894{
960 info->pseudo_palette = fb->pseudo_palette; 895 struct drm_framebuffer *fb = fb_helper->fb;
896 info->pseudo_palette = fb_helper->pseudo_palette;
961 info->var.xres_virtual = fb->width; 897 info->var.xres_virtual = fb->width;
962 info->var.yres_virtual = fb->height; 898 info->var.yres_virtual = fb->height;
963 info->var.bits_per_pixel = fb->bits_per_pixel; 899 info->var.bits_per_pixel = fb->bits_per_pixel;
@@ -1025,3 +961,454 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
1025 info->var.yres = fb_height; 961 info->var.yres = fb_height;
1026} 962}
1027EXPORT_SYMBOL(drm_fb_helper_fill_var); 963EXPORT_SYMBOL(drm_fb_helper_fill_var);
964
965static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
966 uint32_t maxX,
967 uint32_t maxY)
968{
969 struct drm_connector *connector;
970 int count = 0;
971 int i;
972
973 for (i = 0; i < fb_helper->connector_count; i++) {
974 connector = fb_helper->connector_info[i]->connector;
975 count += connector->funcs->fill_modes(connector, maxX, maxY);
976 }
977
978 return count;
979}
980
981static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
982{
983 struct drm_display_mode *mode;
984
985 list_for_each_entry(mode, &fb_connector->connector->modes, head) {
986 if (drm_mode_width(mode) > width ||
987 drm_mode_height(mode) > height)
988 continue;
989 if (mode->type & DRM_MODE_TYPE_PREFERRED)
990 return mode;
991 }
992 return NULL;
993}
994
995static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
996{
997 struct drm_fb_helper_cmdline_mode *cmdline_mode;
998 cmdline_mode = &fb_connector->cmdline_mode;
999 return cmdline_mode->specified;
1000}
1001
1002static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
1003 int width, int height)
1004{
1005 struct drm_fb_helper_cmdline_mode *cmdline_mode;
1006 struct drm_display_mode *mode = NULL;
1007
1008 cmdline_mode = &fb_helper_conn->cmdline_mode;
1009 if (cmdline_mode->specified == false)
1010 return mode;
1011
1012 /* attempt to find a matching mode in the list of modes
1013 * we have gotten so far, if not add a CVT mode that conforms
1014 */
1015 if (cmdline_mode->rb || cmdline_mode->margins)
1016 goto create_mode;
1017
1018 list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
1019 /* check width/height */
1020 if (mode->hdisplay != cmdline_mode->xres ||
1021 mode->vdisplay != cmdline_mode->yres)
1022 continue;
1023
1024 if (cmdline_mode->refresh_specified) {
1025 if (mode->vrefresh != cmdline_mode->refresh)
1026 continue;
1027 }
1028
1029 if (cmdline_mode->interlace) {
1030 if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
1031 continue;
1032 }
1033 return mode;
1034 }
1035
1036create_mode:
1037 mode = drm_cvt_mode(fb_helper_conn->connector->dev, cmdline_mode->xres,
1038 cmdline_mode->yres,
1039 cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
1040 cmdline_mode->rb, cmdline_mode->interlace,
1041 cmdline_mode->margins);
1042 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
1043 list_add(&mode->head, &fb_helper_conn->connector->modes);
1044 return mode;
1045}
1046
1047static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
1048{
1049 bool enable;
1050
1051 if (strict) {
1052 enable = connector->status == connector_status_connected;
1053 } else {
1054 enable = connector->status != connector_status_disconnected;
1055 }
1056 return enable;
1057}
1058
1059static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
1060 bool *enabled)
1061{
1062 bool any_enabled = false;
1063 struct drm_connector *connector;
1064 int i = 0;
1065
1066 for (i = 0; i < fb_helper->connector_count; i++) {
1067 connector = fb_helper->connector_info[i]->connector;
1068 enabled[i] = drm_connector_enabled(connector, true);
1069 DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
1070 enabled[i] ? "yes" : "no");
1071 any_enabled |= enabled[i];
1072 }
1073
1074 if (any_enabled)
1075 return;
1076
1077 for (i = 0; i < fb_helper->connector_count; i++) {
1078 connector = fb_helper->connector_info[i]->connector;
1079 enabled[i] = drm_connector_enabled(connector, false);
1080 }
1081}
1082
1083static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
1084 struct drm_display_mode **modes,
1085 bool *enabled, int width, int height)
1086{
1087 struct drm_fb_helper_connector *fb_helper_conn;
1088 int i;
1089
1090 for (i = 0; i < fb_helper->connector_count; i++) {
1091 fb_helper_conn = fb_helper->connector_info[i];
1092
1093 if (enabled[i] == false)
1094 continue;
1095
1096 DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
1097 fb_helper_conn->connector->base.id);
1098
1099 /* got for command line mode first */
1100 modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
1101 if (!modes[i]) {
1102 DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
1103 fb_helper_conn->connector->base.id);
1104 modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
1105 }
1106 /* No preferred modes, pick one off the list */
1107 if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
1108 list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
1109 break;
1110 }
1111 DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
1112 "none");
1113 }
1114 return true;
1115}
1116
1117static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
1118 struct drm_fb_helper_crtc **best_crtcs,
1119 struct drm_display_mode **modes,
1120 int n, int width, int height)
1121{
1122 int c, o;
1123 struct drm_device *dev = fb_helper->dev;
1124 struct drm_connector *connector;
1125 struct drm_connector_helper_funcs *connector_funcs;
1126 struct drm_encoder *encoder;
1127 struct drm_fb_helper_crtc *best_crtc;
1128 int my_score, best_score, score;
1129 struct drm_fb_helper_crtc **crtcs, *crtc;
1130 struct drm_fb_helper_connector *fb_helper_conn;
1131
1132 if (n == fb_helper->connector_count)
1133 return 0;
1134
1135 fb_helper_conn = fb_helper->connector_info[n];
1136 connector = fb_helper_conn->connector;
1137
1138 best_crtcs[n] = NULL;
1139 best_crtc = NULL;
1140 best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
1141 if (modes[n] == NULL)
1142 return best_score;
1143
1144 crtcs = kzalloc(dev->mode_config.num_connector *
1145 sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
1146 if (!crtcs)
1147 return best_score;
1148
1149 my_score = 1;
1150 if (connector->status == connector_status_connected)
1151 my_score++;
1152 if (drm_has_cmdline_mode(fb_helper_conn))
1153 my_score++;
1154 if (drm_has_preferred_mode(fb_helper_conn, width, height))
1155 my_score++;
1156
1157 connector_funcs = connector->helper_private;
1158 encoder = connector_funcs->best_encoder(connector);
1159 if (!encoder)
1160 goto out;
1161
1162 /* select a crtc for this connector and then attempt to configure
1163 remaining connectors */
1164 for (c = 0; c < fb_helper->crtc_count; c++) {
1165 crtc = &fb_helper->crtc_info[c];
1166
1167 if ((encoder->possible_crtcs & (1 << c)) == 0) {
1168 continue;
1169 }
1170
1171 for (o = 0; o < n; o++)
1172 if (best_crtcs[o] == crtc)
1173 break;
1174
1175 if (o < n) {
1176 /* ignore cloning for now */
1177 continue;
1178 }
1179
1180 crtcs[n] = crtc;
1181 memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
1182 score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
1183 width, height);
1184 if (score > best_score) {
1185 best_crtc = crtc;
1186 best_score = score;
1187 memcpy(best_crtcs, crtcs,
1188 dev->mode_config.num_connector *
1189 sizeof(struct drm_fb_helper_crtc *));
1190 }
1191 }
1192out:
1193 kfree(crtcs);
1194 return best_score;
1195}
1196
1197static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1198{
1199 struct drm_device *dev = fb_helper->dev;
1200 struct drm_fb_helper_crtc **crtcs;
1201 struct drm_display_mode **modes;
1202 struct drm_encoder *encoder;
1203 struct drm_mode_set *modeset;
1204 bool *enabled;
1205 int width, height;
1206 int i, ret;
1207
1208 DRM_DEBUG_KMS("\n");
1209
1210 width = dev->mode_config.max_width;
1211 height = dev->mode_config.max_height;
1212
1213 /* clean out all the encoder/crtc combos */
1214 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1215 encoder->crtc = NULL;
1216 }
1217
1218 crtcs = kcalloc(dev->mode_config.num_connector,
1219 sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
1220 modes = kcalloc(dev->mode_config.num_connector,
1221 sizeof(struct drm_display_mode *), GFP_KERNEL);
1222 enabled = kcalloc(dev->mode_config.num_connector,
1223 sizeof(bool), GFP_KERNEL);
1224
1225 drm_enable_connectors(fb_helper, enabled);
1226
1227 ret = drm_target_preferred(fb_helper, modes, enabled, width, height);
1228 if (!ret)
1229 DRM_ERROR("Unable to find initial modes\n");
1230
1231 DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
1232
1233 drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
1234
1235 /* need to set the modesets up here for use later */
1236 /* fill out the connector<->crtc mappings into the modesets */
1237 for (i = 0; i < fb_helper->crtc_count; i++) {
1238 modeset = &fb_helper->crtc_info[i].mode_set;
1239 modeset->num_connectors = 0;
1240 }
1241
1242 for (i = 0; i < fb_helper->connector_count; i++) {
1243 struct drm_display_mode *mode = modes[i];
1244 struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
1245 modeset = &fb_crtc->mode_set;
1246
1247 if (mode && fb_crtc) {
1248 DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
1249 mode->name, fb_crtc->mode_set.crtc->base.id);
1250 fb_crtc->desired_mode = mode;
1251 if (modeset->mode)
1252 drm_mode_destroy(dev, modeset->mode);
1253 modeset->mode = drm_mode_duplicate(dev,
1254 fb_crtc->desired_mode);
1255 modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
1256 }
1257 }
1258
1259 kfree(crtcs);
1260 kfree(modes);
1261 kfree(enabled);
1262}
1263
1264/**
1265 * drm_helper_initial_config - setup a sane initial connector configuration
1266 * @dev: DRM device
1267 *
1268 * LOCKING:
1269 * Called at init time, must take mode config lock.
1270 *
1271 * Scan the CRTCs and connectors and try to put together an initial setup.
1272 * At the moment, this is a cloned configuration across all heads with
1273 * a new framebuffer object as the backing store.
1274 *
1275 * RETURNS:
1276 * Zero if everything went ok, nonzero otherwise.
1277 */
1278bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
1279{
1280 struct drm_device *dev = fb_helper->dev;
1281 int count = 0;
1282
1283 /* disable all the possible outputs/crtcs before entering KMS mode */
1284 drm_helper_disable_unused_functions(fb_helper->dev);
1285
1286 drm_fb_helper_parse_command_line(fb_helper);
1287
1288 count = drm_fb_helper_probe_connector_modes(fb_helper,
1289 dev->mode_config.max_width,
1290 dev->mode_config.max_height);
1291 /*
1292 * we shouldn't end up with no modes here.
1293 */
1294 if (count == 0) {
1295 if (fb_helper->poll_enabled) {
1296 delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work,
1297 5*HZ);
1298 printk(KERN_INFO "No connectors reported connected with modes - started polling\n");
1299 } else
1300 printk(KERN_INFO "No connectors reported connected with modes\n");
1301 }
1302 drm_setup_crtcs(fb_helper);
1303
1304 return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
1305}
1306EXPORT_SYMBOL(drm_fb_helper_initial_config);
1307
1308/* we got a hotplug irq - need to update fbcon */
1309void drm_helper_fb_hpd_irq_event(struct drm_fb_helper *fb_helper)
1310{
1311 /* if we don't have the fbdev registered yet do nothing */
1312 if (!fb_helper->fbdev)
1313 return;
1314
1315 /* schedule a slow work asap */
1316 delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work, 0);
1317}
1318EXPORT_SYMBOL(drm_helper_fb_hpd_irq_event);
1319
1320bool drm_helper_fb_hotplug_event(struct drm_fb_helper *fb_helper, bool polled)
1321{
1322 int count = 0;
1323 int ret;
1324 u32 max_width, max_height, bpp_sel;
1325
1326 if (!fb_helper->fb)
1327 return false;
1328 DRM_DEBUG_KMS("\n");
1329
1330 max_width = fb_helper->fb->width;
1331 max_height = fb_helper->fb->height;
1332 bpp_sel = fb_helper->fb->bits_per_pixel;
1333
1334 count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
1335 max_height);
1336 if (fb_helper->poll_enabled && !polled) {
1337 if (count) {
1338 delayed_slow_work_cancel(&fb_helper->output_status_change_slow_work);
1339 } else {
1340 ret = delayed_slow_work_enqueue(&fb_helper->output_status_change_slow_work, 5*HZ);
1341 }
1342 }
1343 drm_setup_crtcs(fb_helper);
1344
1345 return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
1346}
1347EXPORT_SYMBOL(drm_helper_fb_hotplug_event);
1348
1349/*
1350 * delayed work queue execution function
1351 * - check if fbdev is actually in use on the gpu
1352 * - if not set delayed flag and repoll if necessary
1353 * - check for connector status change
1354 * - repoll if 0 modes found
1355 *- call driver output status changed notifier
1356 */
1357static void output_status_change_execute(struct slow_work *work)
1358{
1359 struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work);
1360 struct drm_fb_helper *fb_helper = container_of(delayed_work, struct drm_fb_helper, output_status_change_slow_work);
1361 struct drm_connector *connector;
1362 enum drm_connector_status old_status, status;
1363 bool repoll, changed = false;
1364 int ret;
1365 int i;
1366 bool bound = false, crtcs_bound = false;
1367 struct drm_crtc *crtc;
1368
1369 repoll = fb_helper->poll_enabled;
1370
1371 /* first of all check the fbcon framebuffer is actually bound to any crtc */
1372 /* take into account that no crtc at all maybe bound */
1373 list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) {
1374 if (crtc->fb)
1375 crtcs_bound = true;
1376 if (crtc->fb == fb_helper->fb)
1377 bound = true;
1378 }
1379
1380 if (bound == false && crtcs_bound) {
1381 fb_helper->delayed_hotplug = true;
1382 goto requeue;
1383 }
1384
1385 for (i = 0; i < fb_helper->connector_count; i++) {
1386 connector = fb_helper->connector_info[i]->connector;
1387 old_status = connector->status;
1388 status = connector->funcs->detect(connector);
1389 if (old_status != status) {
1390 changed = true;
1391 }
1392 if (status == connector_status_connected && repoll) {
1393 DRM_DEBUG("%s is connected - stop polling\n", drm_get_connector_name(connector));
1394 repoll = false;
1395 }
1396 }
1397
1398 if (changed) {
1399 if (fb_helper->funcs->fb_output_status_changed)
1400 fb_helper->funcs->fb_output_status_changed(fb_helper);
1401 }
1402
1403requeue:
1404 if (repoll) {
1405 ret = delayed_slow_work_enqueue(delayed_work, 5*HZ);
1406 if (ret)
1407 DRM_ERROR("delayed enqueue failed %d\n", ret);
1408 }
1409}
1410
1411static struct slow_work_ops output_status_change_ops = {
1412 .execute = output_status_change_execute,
1413};
1414
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index aa89d4b0b4c4..33dad3fa6043 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -124,6 +124,31 @@ drm_gem_destroy(struct drm_device *dev)
124} 124}
125 125
126/** 126/**
127 * Initialize an already allocate GEM object of the specified size with
128 * shmfs backing store.
129 */
130int drm_gem_object_init(struct drm_device *dev,
131 struct drm_gem_object *obj, size_t size)
132{
133 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
134
135 obj->dev = dev;
136 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
137 if (IS_ERR(obj->filp))
138 return -ENOMEM;
139
140 kref_init(&obj->refcount);
141 kref_init(&obj->handlecount);
142 obj->size = size;
143
144 atomic_inc(&dev->object_count);
145 atomic_add(obj->size, &dev->object_memory);
146
147 return 0;
148}
149EXPORT_SYMBOL(drm_gem_object_init);
150
151/**
127 * Allocate a GEM object of the specified size with shmfs backing store 152 * Allocate a GEM object of the specified size with shmfs backing store
128 */ 153 */
129struct drm_gem_object * 154struct drm_gem_object *
@@ -131,28 +156,22 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
131{ 156{
132 struct drm_gem_object *obj; 157 struct drm_gem_object *obj;
133 158
134 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
135
136 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 159 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
137 if (!obj) 160 if (!obj)
138 goto free; 161 goto free;
139 162
140 obj->dev = dev; 163 if (drm_gem_object_init(dev, obj, size) != 0)
141 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
142 if (IS_ERR(obj->filp))
143 goto free; 164 goto free;
144 165
145 kref_init(&obj->refcount);
146 kref_init(&obj->handlecount);
147 obj->size = size;
148 if (dev->driver->gem_init_object != NULL && 166 if (dev->driver->gem_init_object != NULL &&
149 dev->driver->gem_init_object(obj) != 0) { 167 dev->driver->gem_init_object(obj) != 0) {
150 goto fput; 168 goto fput;
151 } 169 }
152 atomic_inc(&dev->object_count);
153 atomic_add(obj->size, &dev->object_memory);
154 return obj; 170 return obj;
155fput: 171fput:
172 /* Object_init mangles the global counters - readjust them. */
173 atomic_dec(&dev->object_count);
174 atomic_sub(obj->size, &dev->object_memory);
156 fput(obj->filp); 175 fput(obj->filp);
157free: 176free:
158 kfree(obj); 177 kfree(obj);
@@ -403,15 +422,15 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
403 idr_destroy(&file_private->object_idr); 422 idr_destroy(&file_private->object_idr);
404} 423}
405 424
406static void 425void
407drm_gem_object_free_common(struct drm_gem_object *obj) 426drm_gem_object_release(struct drm_gem_object *obj)
408{ 427{
409 struct drm_device *dev = obj->dev; 428 struct drm_device *dev = obj->dev;
410 fput(obj->filp); 429 fput(obj->filp);
411 atomic_dec(&dev->object_count); 430 atomic_dec(&dev->object_count);
412 atomic_sub(obj->size, &dev->object_memory); 431 atomic_sub(obj->size, &dev->object_memory);
413 kfree(obj);
414} 432}
433EXPORT_SYMBOL(drm_gem_object_release);
415 434
416/** 435/**
417 * Called after the last reference to the object has been lost. 436 * Called after the last reference to the object has been lost.
@@ -429,8 +448,6 @@ drm_gem_object_free(struct kref *kref)
429 448
430 if (dev->driver->gem_free_object != NULL) 449 if (dev->driver->gem_free_object != NULL)
431 dev->driver->gem_free_object(obj); 450 dev->driver->gem_free_object(obj);
432
433 drm_gem_object_free_common(obj);
434} 451}
435EXPORT_SYMBOL(drm_gem_object_free); 452EXPORT_SYMBOL(drm_gem_object_free);
436 453
@@ -453,8 +470,6 @@ drm_gem_object_free_unlocked(struct kref *kref)
453 dev->driver->gem_free_object(obj); 470 dev->driver->gem_free_object(obj);
454 mutex_unlock(&dev->struct_mutex); 471 mutex_unlock(&dev->struct_mutex);
455 } 472 }
456
457 drm_gem_object_free_common(obj);
458} 473}
459EXPORT_SYMBOL(drm_gem_object_free_unlocked); 474EXPORT_SYMBOL(drm_gem_object_free_unlocked);
460 475
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 76d63394c776..f1f473ea97d3 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -258,8 +258,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
258 drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP; 258 drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
259 /* 18/16. Find actual vertical frame frequency */ 259 /* 18/16. Find actual vertical frame frequency */
260 /* ignore - just set the mode flag for interlaced */ 260 /* ignore - just set the mode flag for interlaced */
261 if (interlaced) 261 if (interlaced) {
262 drm_mode->vtotal *= 2; 262 drm_mode->vtotal *= 2;
263 drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
264 }
263 /* Fill the mode line name */ 265 /* Fill the mode line name */
264 drm_mode_set_name(drm_mode); 266 drm_mode_set_name(drm_mode);
265 if (reduced) 267 if (reduced)
@@ -268,43 +270,35 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
268 else 270 else
269 drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC | 271 drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
270 DRM_MODE_FLAG_NHSYNC); 272 DRM_MODE_FLAG_NHSYNC);
271 if (interlaced)
272 drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
273 273
274 return drm_mode; 274 return drm_mode;
275} 275}
276EXPORT_SYMBOL(drm_cvt_mode); 276EXPORT_SYMBOL(drm_cvt_mode);
277 277
278/** 278/**
279 * drm_gtf_mode - create the modeline based on GTF algorithm 279 * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
280 * 280 *
281 * @dev :drm device 281 * @dev :drm device
282 * @hdisplay :hdisplay size 282 * @hdisplay :hdisplay size
283 * @vdisplay :vdisplay size 283 * @vdisplay :vdisplay size
284 * @vrefresh :vrefresh rate. 284 * @vrefresh :vrefresh rate.
285 * @interlaced :whether the interlace is supported 285 * @interlaced :whether the interlace is supported
286 * @margins :whether the margin is supported 286 * @margins :desired margin size
287 * @GTF_[MCKJ] :extended GTF formula parameters
287 * 288 *
288 * LOCKING. 289 * LOCKING.
289 * none. 290 * none.
290 * 291 *
291 * return the modeline based on GTF algorithm 292 * return the modeline based on full GTF algorithm.
292 *
293 * This function is to create the modeline based on the GTF algorithm.
294 * Generalized Timing Formula is derived from:
295 * GTF Spreadsheet by Andy Morrish (1/5/97)
296 * available at http://www.vesa.org
297 * 293 *
298 * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c. 294 * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
299 * What I have done is to translate it by using integer calculation. 295 * in here multiplied by two. For a C of 40, pass in 80.
300 * I also refer to the function of fb_get_mode in the file of
301 * drivers/video/fbmon.c
302 */ 296 */
303struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, 297struct drm_display_mode *
304 int vdisplay, int vrefresh, 298drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
305 bool interlaced, int margins) 299 int vrefresh, bool interlaced, int margins,
306{ 300 int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
307 /* 1) top/bottom margin size (% of height) - default: 1.8, */ 301{ /* 1) top/bottom margin size (% of height) - default: 1.8, */
308#define GTF_MARGIN_PERCENTAGE 18 302#define GTF_MARGIN_PERCENTAGE 18
309 /* 2) character cell horizontal granularity (pixels) - default 8 */ 303 /* 2) character cell horizontal granularity (pixels) - default 8 */
310#define GTF_CELL_GRAN 8 304#define GTF_CELL_GRAN 8
@@ -316,17 +310,9 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
316#define H_SYNC_PERCENT 8 310#define H_SYNC_PERCENT 8
317 /* min time of vsync + back porch (microsec) */ 311 /* min time of vsync + back porch (microsec) */
318#define MIN_VSYNC_PLUS_BP 550 312#define MIN_VSYNC_PLUS_BP 550
319 /* blanking formula gradient */
320#define GTF_M 600
321 /* blanking formula offset */
322#define GTF_C 40
323 /* blanking formula scaling factor */
324#define GTF_K 128
325 /* blanking formula scaling factor */
326#define GTF_J 20
327 /* C' and M' are part of the Blanking Duty Cycle computation */ 313 /* C' and M' are part of the Blanking Duty Cycle computation */
328#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J) 314#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
329#define GTF_M_PRIME (GTF_K * GTF_M / 256) 315#define GTF_M_PRIME (GTF_K * GTF_M / 256)
330 struct drm_display_mode *drm_mode; 316 struct drm_display_mode *drm_mode;
331 unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd; 317 unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
332 int top_margin, bottom_margin; 318 int top_margin, bottom_margin;
@@ -460,17 +446,61 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
460 446
461 drm_mode->clock = pixel_freq; 447 drm_mode->clock = pixel_freq;
462 448
463 drm_mode_set_name(drm_mode);
464 drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
465
466 if (interlaced) { 449 if (interlaced) {
467 drm_mode->vtotal *= 2; 450 drm_mode->vtotal *= 2;
468 drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; 451 drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
469 } 452 }
470 453
454 drm_mode_set_name(drm_mode);
455 if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
456 drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
457 else
458 drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
459
471 return drm_mode; 460 return drm_mode;
472} 461}
462EXPORT_SYMBOL(drm_gtf_mode_complex);
463
464/**
465 * drm_gtf_mode - create the modeline based on GTF algorithm
466 *
467 * @dev :drm device
468 * @hdisplay :hdisplay size
469 * @vdisplay :vdisplay size
470 * @vrefresh :vrefresh rate.
471 * @interlaced :whether the interlace is supported
472 * @margins :whether the margin is supported
473 *
474 * LOCKING.
475 * none.
476 *
477 * return the modeline based on GTF algorithm
478 *
479 * This function is to create the modeline based on the GTF algorithm.
480 * Generalized Timing Formula is derived from:
481 * GTF Spreadsheet by Andy Morrish (1/5/97)
482 * available at http://www.vesa.org
483 *
484 * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
485 * What I have done is to translate it by using integer calculation.
486 * I also refer to the function of fb_get_mode in the file of
487 * drivers/video/fbmon.c
488 *
489 * Standard GTF parameters:
490 * M = 600
491 * C = 40
492 * K = 128
493 * J = 20
494 */
495struct drm_display_mode *
496drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
497 bool lace, int margins)
498{
499 return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
500 margins, 600, 40 * 2, 128, 20 * 2);
501}
473EXPORT_SYMBOL(drm_gtf_mode); 502EXPORT_SYMBOL(drm_gtf_mode);
503
474/** 504/**
475 * drm_mode_set_name - set the name on a mode 505 * drm_mode_set_name - set the name on a mode
476 * @mode: name will be set in this mode 506 * @mode: name will be set in this mode
@@ -482,8 +512,11 @@ EXPORT_SYMBOL(drm_gtf_mode);
482 */ 512 */
483void drm_mode_set_name(struct drm_display_mode *mode) 513void drm_mode_set_name(struct drm_display_mode *mode)
484{ 514{
485 snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay, 515 bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
486 mode->vdisplay); 516
517 snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
518 mode->hdisplay, mode->vdisplay,
519 interlaced ? "i" : "");
487} 520}
488EXPORT_SYMBOL(drm_mode_set_name); 521EXPORT_SYMBOL(drm_mode_set_name);
489 522
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 25bbd30ed7af..3a3a451d0bf8 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -333,7 +333,7 @@ static struct device_attribute connector_attrs_opt1[] = {
333static struct bin_attribute edid_attr = { 333static struct bin_attribute edid_attr = {
334 .attr.name = "edid", 334 .attr.name = "edid",
335 .attr.mode = 0444, 335 .attr.mode = 0444,
336 .size = 128, 336 .size = 0,
337 .read = edid_show, 337 .read = edid_show,
338}; 338};
339 339
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 288fc50627e2..0d6ff640e1c6 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -70,16 +70,6 @@ struct intel_dvo_dev_ops {
70 void (*dpms)(struct intel_dvo_device *dvo, int mode); 70 void (*dpms)(struct intel_dvo_device *dvo, int mode);
71 71
72 /* 72 /*
73 * Saves the output's state for restoration on VT switch.
74 */
75 void (*save)(struct intel_dvo_device *dvo);
76
77 /*
78 * Restore's the output's state at VT switch.
79 */
80 void (*restore)(struct intel_dvo_device *dvo);
81
82 /*
83 * Callback for testing a video mode for a given output. 73 * Callback for testing a video mode for a given output.
84 * 74 *
85 * This function should only check for cases where a mode can't 75 * This function should only check for cases where a mode can't
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 1184c14ba87d..14d59804acd7 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -159,16 +159,7 @@
159#define CH7017_BANG_LIMIT_CONTROL 0x7f 159#define CH7017_BANG_LIMIT_CONTROL 0x7f
160 160
161struct ch7017_priv { 161struct ch7017_priv {
162 uint8_t save_hapi; 162 uint8_t dummy;
163 uint8_t save_vali;
164 uint8_t save_valo;
165 uint8_t save_ailo;
166 uint8_t save_lvds_pll_vco;
167 uint8_t save_feedback_div;
168 uint8_t save_lvds_control_2;
169 uint8_t save_outputs_enable;
170 uint8_t save_lvds_power_down;
171 uint8_t save_power_management;
172}; 163};
173 164
174static void ch7017_dump_regs(struct intel_dvo_device *dvo); 165static void ch7017_dump_regs(struct intel_dvo_device *dvo);
@@ -401,39 +392,6 @@ do { \
401 DUMP(CH7017_LVDS_POWER_DOWN); 392 DUMP(CH7017_LVDS_POWER_DOWN);
402} 393}
403 394
404static void ch7017_save(struct intel_dvo_device *dvo)
405{
406 struct ch7017_priv *priv = dvo->dev_priv;
407
408 ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi);
409 ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo);
410 ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo);
411 ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco);
412 ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div);
413 ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2);
414 ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable);
415 ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down);
416 ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management);
417}
418
419static void ch7017_restore(struct intel_dvo_device *dvo)
420{
421 struct ch7017_priv *priv = dvo->dev_priv;
422
423 /* Power down before changing mode */
424 ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
425
426 ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi);
427 ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo);
428 ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo);
429 ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco);
430 ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div);
431 ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2);
432 ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable);
433 ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down);
434 ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management);
435}
436
437static void ch7017_destroy(struct intel_dvo_device *dvo) 395static void ch7017_destroy(struct intel_dvo_device *dvo)
438{ 396{
439 struct ch7017_priv *priv = dvo->dev_priv; 397 struct ch7017_priv *priv = dvo->dev_priv;
@@ -451,7 +409,5 @@ struct intel_dvo_dev_ops ch7017_ops = {
451 .mode_set = ch7017_mode_set, 409 .mode_set = ch7017_mode_set,
452 .dpms = ch7017_dpms, 410 .dpms = ch7017_dpms,
453 .dump_regs = ch7017_dump_regs, 411 .dump_regs = ch7017_dump_regs,
454 .save = ch7017_save,
455 .restore = ch7017_restore,
456 .destroy = ch7017_destroy, 412 .destroy = ch7017_destroy,
457}; 413};
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index d56ff5cc22b2..6f1944b24441 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -92,21 +92,10 @@ static struct ch7xxx_id_struct {
92 { CH7301_VID, "CH7301" }, 92 { CH7301_VID, "CH7301" },
93}; 93};
94 94
95struct ch7xxx_reg_state {
96 uint8_t regs[CH7xxx_NUM_REGS];
97};
98
99struct ch7xxx_priv { 95struct ch7xxx_priv {
100 bool quiet; 96 bool quiet;
101
102 struct ch7xxx_reg_state save_reg;
103 struct ch7xxx_reg_state mode_reg;
104 uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT;
105 uint8_t save_TLPF, save_TCT, save_PM, save_IDF;
106}; 97};
107 98
108static void ch7xxx_save(struct intel_dvo_device *dvo);
109
110static char *ch7xxx_get_id(uint8_t vid) 99static char *ch7xxx_get_id(uint8_t vid)
111{ 100{
112 int i; 101 int i;
@@ -312,42 +301,17 @@ static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
312 301
313static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) 302static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
314{ 303{
315 struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
316 int i; 304 int i;
317 305
318 for (i = 0; i < CH7xxx_NUM_REGS; i++) { 306 for (i = 0; i < CH7xxx_NUM_REGS; i++) {
307 uint8_t val;
319 if ((i % 8) == 0 ) 308 if ((i % 8) == 0 )
320 DRM_LOG_KMS("\n %02X: ", i); 309 DRM_LOG_KMS("\n %02X: ", i);
321 DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]); 310 ch7xxx_readb(dvo, i, &val);
311 DRM_LOG_KMS("%02X ", val);
322 } 312 }
323} 313}
324 314
325static void ch7xxx_save(struct intel_dvo_device *dvo)
326{
327 struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
328
329 ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL);
330 ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP);
331 ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD);
332 ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT);
333 ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF);
334 ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM);
335 ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF);
336}
337
338static void ch7xxx_restore(struct intel_dvo_device *dvo)
339{
340 struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
341
342 ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL);
343 ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP);
344 ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD);
345 ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT);
346 ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF);
347 ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF);
348 ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM);
349}
350
351static void ch7xxx_destroy(struct intel_dvo_device *dvo) 315static void ch7xxx_destroy(struct intel_dvo_device *dvo)
352{ 316{
353 struct ch7xxx_priv *ch7xxx = dvo->dev_priv; 317 struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
@@ -365,7 +329,5 @@ struct intel_dvo_dev_ops ch7xxx_ops = {
365 .mode_set = ch7xxx_mode_set, 329 .mode_set = ch7xxx_mode_set,
366 .dpms = ch7xxx_dpms, 330 .dpms = ch7xxx_dpms,
367 .dump_regs = ch7xxx_dump_regs, 331 .dump_regs = ch7xxx_dump_regs,
368 .save = ch7xxx_save,
369 .restore = ch7xxx_restore,
370 .destroy = ch7xxx_destroy, 332 .destroy = ch7xxx_destroy,
371}; 333};
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 24169e528f0f..a2ec3f487202 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -153,9 +153,6 @@ struct ivch_priv {
153 bool quiet; 153 bool quiet;
154 154
155 uint16_t width, height; 155 uint16_t width, height;
156
157 uint16_t save_VR01;
158 uint16_t save_VR40;
159}; 156};
160 157
161 158
@@ -405,22 +402,6 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
405 DRM_LOG_KMS("VR8F: 0x%04x\n", val); 402 DRM_LOG_KMS("VR8F: 0x%04x\n", val);
406} 403}
407 404
408static void ivch_save(struct intel_dvo_device *dvo)
409{
410 struct ivch_priv *priv = dvo->dev_priv;
411
412 ivch_read(dvo, VR01, &priv->save_VR01);
413 ivch_read(dvo, VR40, &priv->save_VR40);
414}
415
416static void ivch_restore(struct intel_dvo_device *dvo)
417{
418 struct ivch_priv *priv = dvo->dev_priv;
419
420 ivch_write(dvo, VR01, priv->save_VR01);
421 ivch_write(dvo, VR40, priv->save_VR40);
422}
423
424static void ivch_destroy(struct intel_dvo_device *dvo) 405static void ivch_destroy(struct intel_dvo_device *dvo)
425{ 406{
426 struct ivch_priv *priv = dvo->dev_priv; 407 struct ivch_priv *priv = dvo->dev_priv;
@@ -434,8 +415,6 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
434struct intel_dvo_dev_ops ivch_ops= { 415struct intel_dvo_dev_ops ivch_ops= {
435 .init = ivch_init, 416 .init = ivch_init,
436 .dpms = ivch_dpms, 417 .dpms = ivch_dpms,
437 .save = ivch_save,
438 .restore = ivch_restore,
439 .mode_valid = ivch_mode_valid, 418 .mode_valid = ivch_mode_valid,
440 .mode_set = ivch_mode_set, 419 .mode_set = ivch_mode_set,
441 .detect = ivch_detect, 420 .detect = ivch_detect,
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 0001c13f0a80..9b8e6765cf26 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -58,17 +58,9 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
58 58
59#define SIL164_REGC 0x0c 59#define SIL164_REGC 0x0c
60 60
61struct sil164_save_rec {
62 uint8_t reg8;
63 uint8_t reg9;
64 uint8_t regc;
65};
66
67struct sil164_priv { 61struct sil164_priv {
68 //I2CDevRec d; 62 //I2CDevRec d;
69 bool quiet; 63 bool quiet;
70 struct sil164_save_rec save_regs;
71 struct sil164_save_rec mode_regs;
72}; 64};
73 65
74#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr)) 66#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
@@ -252,34 +244,6 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
252 DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val); 244 DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
253} 245}
254 246
255static void sil164_save(struct intel_dvo_device *dvo)
256{
257 struct sil164_priv *sil= dvo->dev_priv;
258
259 if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8))
260 return;
261
262 if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9))
263 return;
264
265 if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc))
266 return;
267
268 return;
269}
270
271static void sil164_restore(struct intel_dvo_device *dvo)
272{
273 struct sil164_priv *sil = dvo->dev_priv;
274
275 /* Restore it powered down initially */
276 sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1);
277
278 sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9);
279 sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc);
280 sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8);
281}
282
283static void sil164_destroy(struct intel_dvo_device *dvo) 247static void sil164_destroy(struct intel_dvo_device *dvo)
284{ 248{
285 struct sil164_priv *sil = dvo->dev_priv; 249 struct sil164_priv *sil = dvo->dev_priv;
@@ -297,7 +261,5 @@ struct intel_dvo_dev_ops sil164_ops = {
297 .mode_set = sil164_mode_set, 261 .mode_set = sil164_mode_set,
298 .dpms = sil164_dpms, 262 .dpms = sil164_dpms,
299 .dump_regs = sil164_dump_regs, 263 .dump_regs = sil164_dump_regs,
300 .save = sil164_save,
301 .restore = sil164_restore,
302 .destroy = sil164_destroy, 264 .destroy = sil164_destroy,
303}; 265};
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index c7c391bc116a..66c697bc9b22 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -86,16 +86,8 @@
86#define TFP410_V_RES_LO 0x3C 86#define TFP410_V_RES_LO 0x3C
87#define TFP410_V_RES_HI 0x3D 87#define TFP410_V_RES_HI 0x3D
88 88
89struct tfp410_save_rec {
90 uint8_t ctl1;
91 uint8_t ctl2;
92};
93
94struct tfp410_priv { 89struct tfp410_priv {
95 bool quiet; 90 bool quiet;
96
97 struct tfp410_save_rec saved_reg;
98 struct tfp410_save_rec mode_reg;
99}; 91};
100 92
101static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) 93static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
@@ -293,28 +285,6 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
293 DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val); 285 DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
294} 286}
295 287
296static void tfp410_save(struct intel_dvo_device *dvo)
297{
298 struct tfp410_priv *tfp = dvo->dev_priv;
299
300 if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1))
301 return;
302
303 if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2))
304 return;
305}
306
307static void tfp410_restore(struct intel_dvo_device *dvo)
308{
309 struct tfp410_priv *tfp = dvo->dev_priv;
310
311 /* Restore it powered down initially */
312 tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1);
313
314 tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2);
315 tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1);
316}
317
318static void tfp410_destroy(struct intel_dvo_device *dvo) 288static void tfp410_destroy(struct intel_dvo_device *dvo)
319{ 289{
320 struct tfp410_priv *tfp = dvo->dev_priv; 290 struct tfp410_priv *tfp = dvo->dev_priv;
@@ -332,7 +302,5 @@ struct intel_dvo_dev_ops tfp410_ops = {
332 .mode_set = tfp410_mode_set, 302 .mode_set = tfp410_mode_set,
333 .dpms = tfp410_dpms, 303 .dpms = tfp410_dpms,
334 .dump_regs = tfp410_dump_regs, 304 .dump_regs = tfp410_dump_regs,
335 .save = tfp410_save,
336 .restore = tfp410_restore,
337 .destroy = tfp410_destroy, 305 .destroy = tfp410_destroy,
338}; 306};
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a0b8447b06e7..322070c0c631 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -96,19 +96,18 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
96 spin_lock(lock); 96 spin_lock(lock);
97 list_for_each_entry(obj_priv, head, list) 97 list_for_each_entry(obj_priv, head, list)
98 { 98 {
99 struct drm_gem_object *obj = obj_priv->obj;
100
101 seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s", 99 seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
102 obj, 100 &obj_priv->base,
103 get_pin_flag(obj_priv), 101 get_pin_flag(obj_priv),
104 obj->size, 102 obj_priv->base.size,
105 obj->read_domains, obj->write_domain, 103 obj_priv->base.read_domains,
104 obj_priv->base.write_domain,
106 obj_priv->last_rendering_seqno, 105 obj_priv->last_rendering_seqno,
107 obj_priv->dirty ? " dirty" : "", 106 obj_priv->dirty ? " dirty" : "",
108 obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 107 obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
109 108
110 if (obj->name) 109 if (obj_priv->base.name)
111 seq_printf(m, " (name: %d)", obj->name); 110 seq_printf(m, " (name: %d)", obj_priv->base.name);
112 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 111 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
113 seq_printf(m, " (fence: %d)", obj_priv->fence_reg); 112 seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
114 if (obj_priv->gtt_space != NULL) 113 if (obj_priv->gtt_space != NULL)
@@ -289,7 +288,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
289 spin_lock(&dev_priv->mm.active_list_lock); 288 spin_lock(&dev_priv->mm.active_list_lock);
290 289
291 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
292 obj = obj_priv->obj; 291 obj = &obj_priv->base;
293 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
294 ret = i915_gem_object_get_pages(obj, 0); 293 ret = i915_gem_object_get_pages(obj, 0);
295 if (ret) { 294 if (ret) {
@@ -567,23 +566,14 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
567{ 566{
568 struct drm_info_node *node = (struct drm_info_node *) m->private; 567 struct drm_info_node *node = (struct drm_info_node *) m->private;
569 struct drm_device *dev = node->minor->dev; 568 struct drm_device *dev = node->minor->dev;
570 struct drm_crtc *crtc;
571 drm_i915_private_t *dev_priv = dev->dev_private; 569 drm_i915_private_t *dev_priv = dev->dev_private;
572 bool fbc_enabled = false;
573 570
574 if (!dev_priv->display.fbc_enabled) { 571 if (!I915_HAS_FBC(dev)) {
575 seq_printf(m, "FBC unsupported on this chipset\n"); 572 seq_printf(m, "FBC unsupported on this chipset\n");
576 return 0; 573 return 0;
577 } 574 }
578 575
579 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 576 if (intel_fbc_enabled(dev)) {
580 if (!crtc->enabled)
581 continue;
582 if (dev_priv->display.fbc_enabled(crtc))
583 fbc_enabled = true;
584 }
585
586 if (fbc_enabled) {
587 seq_printf(m, "FBC enabled\n"); 577 seq_printf(m, "FBC enabled\n");
588 } else { 578 } else {
589 seq_printf(m, "FBC disabled: "); 579 seq_printf(m, "FBC disabled: ");
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index c3cfafcbfe7d..851a2f8ed6e6 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1357,13 +1357,12 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1357 1357
1358 dev_priv->cfb_size = size; 1358 dev_priv->cfb_size = size;
1359 1359
1360 intel_disable_fbc(dev);
1360 dev_priv->compressed_fb = compressed_fb; 1361 dev_priv->compressed_fb = compressed_fb;
1361 1362
1362 if (IS_GM45(dev)) { 1363 if (IS_GM45(dev)) {
1363 g4x_disable_fbc(dev);
1364 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 1364 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
1365 } else { 1365 } else {
1366 i8xx_disable_fbc(dev);
1367 I915_WRITE(FBC_CFB_BASE, cfb_base); 1366 I915_WRITE(FBC_CFB_BASE, cfb_base);
1368 I915_WRITE(FBC_LL_BASE, ll_base); 1367 I915_WRITE(FBC_LL_BASE, ll_base);
1369 dev_priv->compressed_llb = compressed_llb; 1368 dev_priv->compressed_llb = compressed_llb;
@@ -1504,7 +1503,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
1504 1503
1505 I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); 1504 I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
1506 1505
1507 drm_helper_initial_config(dev); 1506 intel_fbdev_init(dev);
1508 1507
1509 return 0; 1508 return 0;
1510 1509
@@ -1591,7 +1590,7 @@ static void i915_get_mem_freq(struct drm_device *dev)
1591 */ 1590 */
1592int i915_driver_load(struct drm_device *dev, unsigned long flags) 1591int i915_driver_load(struct drm_device *dev, unsigned long flags)
1593{ 1592{
1594 struct drm_i915_private *dev_priv = dev->dev_private; 1593 struct drm_i915_private *dev_priv;
1595 resource_size_t base, size; 1594 resource_size_t base, size;
1596 int ret = 0, mmio_bar; 1595 int ret = 0, mmio_bar;
1597 uint32_t agp_size, prealloc_size, prealloc_start; 1596 uint32_t agp_size, prealloc_size, prealloc_start;
@@ -1723,6 +1722,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1723 /* Start out suspended */ 1722 /* Start out suspended */
1724 dev_priv->mm.suspended = 1; 1723 dev_priv->mm.suspended = 1;
1725 1724
1725 intel_detect_pch(dev);
1726
1726 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1727 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1727 ret = i915_load_modeset_init(dev, prealloc_start, 1728 ret = i915_load_modeset_init(dev, prealloc_start,
1728 prealloc_size, agp_size); 1729 prealloc_size, agp_size);
@@ -1769,6 +1770,8 @@ int i915_driver_unload(struct drm_device *dev)
1769 } 1770 }
1770 1771
1771 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1772 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1773 intel_modeset_cleanup(dev);
1774
1772 /* 1775 /*
1773 * free the memory space allocated for the child device 1776 * free the memory space allocated for the child device
1774 * config parsed from VBT 1777 * config parsed from VBT
@@ -1792,8 +1795,6 @@ int i915_driver_unload(struct drm_device *dev)
1792 intel_opregion_free(dev, 0); 1795 intel_opregion_free(dev, 0);
1793 1796
1794 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1797 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1795 intel_modeset_cleanup(dev);
1796
1797 i915_gem_free_all_phys_object(dev); 1798 i915_gem_free_all_phys_object(dev);
1798 1799
1799 mutex_lock(&dev->struct_mutex); 1800 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cc03537bb883..5c51e45ab68d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -188,6 +188,35 @@ const static struct pci_device_id pciidlist[] = {
188MODULE_DEVICE_TABLE(pci, pciidlist); 188MODULE_DEVICE_TABLE(pci, pciidlist);
189#endif 189#endif
190 190
191#define INTEL_PCH_DEVICE_ID_MASK 0xff00
192#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
193
194void intel_detect_pch (struct drm_device *dev)
195{
196 struct drm_i915_private *dev_priv = dev->dev_private;
197 struct pci_dev *pch;
198
199 /*
200 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
201 * make graphics device passthrough work easy for VMM, that only
202 * need to expose ISA bridge to let driver know the real hardware
203 * underneath. This is a requirement from virtualization team.
204 */
205 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
206 if (pch) {
207 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
208 int id;
209 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
210
211 if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
212 dev_priv->pch_type = PCH_CPT;
213 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
214 }
215 }
216 pci_dev_put(pch);
217 }
218}
219
191static int i915_drm_freeze(struct drm_device *dev) 220static int i915_drm_freeze(struct drm_device *dev)
192{ 221{
193 struct drm_i915_private *dev_priv = dev->dev_private; 222 struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6e4790065d9e..bf11ad9998db 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -175,7 +175,7 @@ struct drm_i915_error_state {
175 175
176struct drm_i915_display_funcs { 176struct drm_i915_display_funcs {
177 void (*dpms)(struct drm_crtc *crtc, int mode); 177 void (*dpms)(struct drm_crtc *crtc, int mode);
178 bool (*fbc_enabled)(struct drm_crtc *crtc); 178 bool (*fbc_enabled)(struct drm_device *dev);
179 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); 179 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
180 void (*disable_fbc)(struct drm_device *dev); 180 void (*disable_fbc)(struct drm_device *dev);
181 int (*get_display_clock_speed)(struct drm_device *dev); 181 int (*get_display_clock_speed)(struct drm_device *dev);
@@ -222,6 +222,13 @@ enum no_fbc_reason {
222 FBC_NOT_TILED, /* buffer not tiled */ 222 FBC_NOT_TILED, /* buffer not tiled */
223}; 223};
224 224
225enum intel_pch {
226 PCH_IBX, /* Ibexpeak PCH */
227 PCH_CPT, /* Cougarpoint PCH */
228};
229
230struct intel_fbdev;
231
225typedef struct drm_i915_private { 232typedef struct drm_i915_private {
226 struct drm_device *dev; 233 struct drm_device *dev;
227 234
@@ -335,6 +342,9 @@ typedef struct drm_i915_private {
335 /* Display functions */ 342 /* Display functions */
336 struct drm_i915_display_funcs display; 343 struct drm_i915_display_funcs display;
337 344
345 /* PCH chipset type */
346 enum intel_pch pch_type;
347
338 /* Register state */ 348 /* Register state */
339 bool modeset_on_lid; 349 bool modeset_on_lid;
340 u8 saveLBB; 350 u8 saveLBB;
@@ -637,11 +647,14 @@ typedef struct drm_i915_private {
637 647
638 struct drm_mm_node *compressed_fb; 648 struct drm_mm_node *compressed_fb;
639 struct drm_mm_node *compressed_llb; 649 struct drm_mm_node *compressed_llb;
650
651 /* list of fbdev register on this device */
652 struct intel_fbdev *fbdev;
640} drm_i915_private_t; 653} drm_i915_private_t;
641 654
642/** driver private structure attached to each drm_gem_object */ 655/** driver private structure attached to each drm_gem_object */
643struct drm_i915_gem_object { 656struct drm_i915_gem_object {
644 struct drm_gem_object *obj; 657 struct drm_gem_object base;
645 658
646 /** Current space allocated to this object in the GTT, if any. */ 659 /** Current space allocated to this object in the GTT, if any. */
647 struct drm_mm_node *gtt_space; 660 struct drm_mm_node *gtt_space;
@@ -740,7 +753,7 @@ struct drm_i915_gem_object {
740 atomic_t pending_flip; 753 atomic_t pending_flip;
741}; 754};
742 755
743#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private) 756#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
744 757
745/** 758/**
746 * Request queue structure. 759 * Request queue structure.
@@ -902,6 +915,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
902 struct drm_file *file_priv); 915 struct drm_file *file_priv);
903void i915_gem_load(struct drm_device *dev); 916void i915_gem_load(struct drm_device *dev);
904int i915_gem_init_object(struct drm_gem_object *obj); 917int i915_gem_init_object(struct drm_gem_object *obj);
918struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
919 size_t size);
905void i915_gem_free_object(struct drm_gem_object *obj); 920void i915_gem_free_object(struct drm_gem_object *obj);
906int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); 921int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
907void i915_gem_object_unpin(struct drm_gem_object *obj); 922void i915_gem_object_unpin(struct drm_gem_object *obj);
@@ -998,6 +1013,12 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
998extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1013extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
999extern void i8xx_disable_fbc(struct drm_device *dev); 1014extern void i8xx_disable_fbc(struct drm_device *dev);
1000extern void g4x_disable_fbc(struct drm_device *dev); 1015extern void g4x_disable_fbc(struct drm_device *dev);
1016extern void intel_disable_fbc(struct drm_device *dev);
1017extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
1018extern bool intel_fbc_enabled(struct drm_device *dev);
1019
1020extern void intel_detect_pch (struct drm_device *dev);
1021extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1001 1022
1002/** 1023/**
1003 * Lock test for when it's just for synchronization of ring access. 1024 * Lock test for when it's just for synchronization of ring access.
@@ -1130,7 +1151,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1130#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1151#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1131#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) 1152#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1132#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ 1153#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
1133 !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) 1154 !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
1155 !IS_GEN6(dev))
1134#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 1156#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1135/* dsparb controlled by hw only */ 1157/* dsparb controlled by hw only */
1136#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1158#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
@@ -1144,6 +1166,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1144 IS_GEN6(dev)) 1166 IS_GEN6(dev))
1145#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev)) 1167#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
1146 1168
1169#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1170#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1171
1147#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1172#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1148 1173
1149#endif 1174#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ef3d91dda71a..666d75570502 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -124,7 +124,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
124 args->size = roundup(args->size, PAGE_SIZE); 124 args->size = roundup(args->size, PAGE_SIZE);
125 125
126 /* Allocate the new object */ 126 /* Allocate the new object */
127 obj = drm_gem_object_alloc(dev, args->size); 127 obj = i915_gem_alloc_object(dev, args->size);
128 if (obj == NULL) 128 if (obj == NULL)
129 return -ENOMEM; 129 return -ENOMEM;
130 130
@@ -1566,7 +1566,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1566 list_for_each_entry_safe(obj_priv, next, 1566 list_for_each_entry_safe(obj_priv, next,
1567 &dev_priv->mm.gpu_write_list, 1567 &dev_priv->mm.gpu_write_list,
1568 gpu_write_list) { 1568 gpu_write_list) {
1569 struct drm_gem_object *obj = obj_priv->obj; 1569 struct drm_gem_object *obj = &obj_priv->base;
1570 1570
1571 if ((obj->write_domain & flush_domains) == 1571 if ((obj->write_domain & flush_domains) ==
1572 obj->write_domain) { 1572 obj->write_domain) {
@@ -1745,7 +1745,7 @@ i915_gem_retire_request(struct drm_device *dev,
1745 obj_priv = list_first_entry(&dev_priv->mm.active_list, 1745 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1746 struct drm_i915_gem_object, 1746 struct drm_i915_gem_object,
1747 list); 1747 list);
1748 obj = obj_priv->obj; 1748 obj = &obj_priv->base;
1749 1749
1750 /* If the seqno being retired doesn't match the oldest in the 1750 /* If the seqno being retired doesn't match the oldest in the
1751 * list, then the oldest in the list must still be newer than 1751 * list, then the oldest in the list must still be newer than
@@ -2119,7 +2119,7 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2119 2119
2120 /* Try to find the smallest clean object */ 2120 /* Try to find the smallest clean object */
2121 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { 2121 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
2122 struct drm_gem_object *obj = obj_priv->obj; 2122 struct drm_gem_object *obj = &obj_priv->base;
2123 if (obj->size >= min_size) { 2123 if (obj->size >= min_size) {
2124 if ((!obj_priv->dirty || 2124 if ((!obj_priv->dirty ||
2125 i915_gem_object_is_purgeable(obj_priv)) && 2125 i915_gem_object_is_purgeable(obj_priv)) &&
@@ -2253,7 +2253,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2253 2253
2254 /* Find an object that we can immediately reuse */ 2254 /* Find an object that we can immediately reuse */
2255 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { 2255 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
2256 obj = obj_priv->obj; 2256 obj = &obj_priv->base;
2257 if (obj->size >= min_size) 2257 if (obj->size >= min_size)
2258 break; 2258 break;
2259 2259
@@ -2487,7 +2487,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
2487 i = I915_FENCE_REG_NONE; 2487 i = I915_FENCE_REG_NONE;
2488 list_for_each_entry(obj_priv, &dev_priv->mm.fence_list, 2488 list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
2489 fence_list) { 2489 fence_list) {
2490 obj = obj_priv->obj; 2490 obj = &obj_priv->base;
2491 2491
2492 if (obj_priv->pin_count) 2492 if (obj_priv->pin_count)
2493 continue; 2493 continue;
@@ -4471,34 +4471,40 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4471 return 0; 4471 return 0;
4472} 4472}
4473 4473
4474int i915_gem_init_object(struct drm_gem_object *obj) 4474struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4475 size_t size)
4475{ 4476{
4476 struct drm_i915_gem_object *obj_priv; 4477 struct drm_i915_gem_object *obj;
4477 4478
4478 obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL); 4479 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
4479 if (obj_priv == NULL) 4480 if (obj == NULL)
4480 return -ENOMEM; 4481 return NULL;
4481 4482
4482 /* 4483 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4483 * We've just allocated pages from the kernel, 4484 kfree(obj);
4484 * so they've just been written by the CPU with 4485 return NULL;
4485 * zeros. They'll need to be clflushed before we 4486 }
4486 * use them with the GPU.
4487 */
4488 obj->write_domain = I915_GEM_DOMAIN_CPU;
4489 obj->read_domains = I915_GEM_DOMAIN_CPU;
4490 4487
4491 obj_priv->agp_type = AGP_USER_MEMORY; 4488 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4489 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4492 4490
4493 obj->driver_private = obj_priv; 4491 obj->agp_type = AGP_USER_MEMORY;
4494 obj_priv->obj = obj; 4492
4495 obj_priv->fence_reg = I915_FENCE_REG_NONE; 4493 obj->base.driver_private = NULL;
4496 INIT_LIST_HEAD(&obj_priv->list); 4494 obj->fence_reg = I915_FENCE_REG_NONE;
4497 INIT_LIST_HEAD(&obj_priv->gpu_write_list); 4495 INIT_LIST_HEAD(&obj->list);
4498 INIT_LIST_HEAD(&obj_priv->fence_list); 4496 INIT_LIST_HEAD(&obj->gpu_write_list);
4499 obj_priv->madv = I915_MADV_WILLNEED; 4497 INIT_LIST_HEAD(&obj->fence_list);
4498 obj->madv = I915_MADV_WILLNEED;
4500 4499
4501 trace_i915_gem_object_create(obj); 4500 trace_i915_gem_object_create(&obj->base);
4501
4502 return &obj->base;
4503}
4504
4505int i915_gem_init_object(struct drm_gem_object *obj)
4506{
4507 BUG();
4502 4508
4503 return 0; 4509 return 0;
4504} 4510}
@@ -4521,9 +4527,11 @@ void i915_gem_free_object(struct drm_gem_object *obj)
4521 if (obj_priv->mmap_offset) 4527 if (obj_priv->mmap_offset)
4522 i915_gem_free_mmap_offset(obj); 4528 i915_gem_free_mmap_offset(obj);
4523 4529
4530 drm_gem_object_release(obj);
4531
4524 kfree(obj_priv->page_cpu_valid); 4532 kfree(obj_priv->page_cpu_valid);
4525 kfree(obj_priv->bit_17); 4533 kfree(obj_priv->bit_17);
4526 kfree(obj->driver_private); 4534 kfree(obj_priv);
4527} 4535}
4528 4536
4529/** Unbinds all inactive objects. */ 4537/** Unbinds all inactive objects. */
@@ -4536,9 +4544,9 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev)
4536 struct drm_gem_object *obj; 4544 struct drm_gem_object *obj;
4537 int ret; 4545 int ret;
4538 4546
4539 obj = list_first_entry(&dev_priv->mm.inactive_list, 4547 obj = &list_first_entry(&dev_priv->mm.inactive_list,
4540 struct drm_i915_gem_object, 4548 struct drm_i915_gem_object,
4541 list)->obj; 4549 list)->base;
4542 4550
4543 ret = i915_gem_object_unbind(obj); 4551 ret = i915_gem_object_unbind(obj);
4544 if (ret != 0) { 4552 if (ret != 0) {
@@ -4608,7 +4616,7 @@ i915_gem_init_pipe_control(struct drm_device *dev)
4608 struct drm_i915_gem_object *obj_priv; 4616 struct drm_i915_gem_object *obj_priv;
4609 int ret; 4617 int ret;
4610 4618
4611 obj = drm_gem_object_alloc(dev, 4096); 4619 obj = i915_gem_alloc_object(dev, 4096);
4612 if (obj == NULL) { 4620 if (obj == NULL) {
4613 DRM_ERROR("Failed to allocate seqno page\n"); 4621 DRM_ERROR("Failed to allocate seqno page\n");
4614 ret = -ENOMEM; 4622 ret = -ENOMEM;
@@ -4653,7 +4661,7 @@ i915_gem_init_hws(struct drm_device *dev)
4653 if (!I915_NEED_GFX_HWS(dev)) 4661 if (!I915_NEED_GFX_HWS(dev))
4654 return 0; 4662 return 0;
4655 4663
4656 obj = drm_gem_object_alloc(dev, 4096); 4664 obj = i915_gem_alloc_object(dev, 4096);
4657 if (obj == NULL) { 4665 if (obj == NULL) {
4658 DRM_ERROR("Failed to allocate status page\n"); 4666 DRM_ERROR("Failed to allocate status page\n");
4659 ret = -ENOMEM; 4667 ret = -ENOMEM;
@@ -4764,7 +4772,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
4764 if (ret != 0) 4772 if (ret != 0)
4765 return ret; 4773 return ret;
4766 4774
4767 obj = drm_gem_object_alloc(dev, 128 * 1024); 4775 obj = i915_gem_alloc_object(dev, 128 * 1024);
4768 if (obj == NULL) { 4776 if (obj == NULL) {
4769 DRM_ERROR("Failed to allocate ringbuffer\n"); 4777 DRM_ERROR("Failed to allocate ringbuffer\n");
4770 i915_gem_cleanup_hws(dev); 4778 i915_gem_cleanup_hws(dev);
@@ -5185,6 +5193,20 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
5185} 5193}
5186 5194
5187static int 5195static int
5196i915_gpu_is_active(struct drm_device *dev)
5197{
5198 drm_i915_private_t *dev_priv = dev->dev_private;
5199 int lists_empty;
5200
5201 spin_lock(&dev_priv->mm.active_list_lock);
5202 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
5203 list_empty(&dev_priv->mm.active_list);
5204 spin_unlock(&dev_priv->mm.active_list_lock);
5205
5206 return !lists_empty;
5207}
5208
5209static int
5188i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) 5210i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
5189{ 5211{
5190 drm_i915_private_t *dev_priv, *next_dev; 5212 drm_i915_private_t *dev_priv, *next_dev;
@@ -5213,6 +5235,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
5213 5235
5214 spin_lock(&shrink_list_lock); 5236 spin_lock(&shrink_list_lock);
5215 5237
5238rescan:
5216 /* first scan for clean buffers */ 5239 /* first scan for clean buffers */
5217 list_for_each_entry_safe(dev_priv, next_dev, 5240 list_for_each_entry_safe(dev_priv, next_dev,
5218 &shrink_list, mm.shrink_list) { 5241 &shrink_list, mm.shrink_list) {
@@ -5229,7 +5252,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
5229 &dev_priv->mm.inactive_list, 5252 &dev_priv->mm.inactive_list,
5230 list) { 5253 list) {
5231 if (i915_gem_object_is_purgeable(obj_priv)) { 5254 if (i915_gem_object_is_purgeable(obj_priv)) {
5232 i915_gem_object_unbind(obj_priv->obj); 5255 i915_gem_object_unbind(&obj_priv->base);
5233 if (--nr_to_scan <= 0) 5256 if (--nr_to_scan <= 0)
5234 break; 5257 break;
5235 } 5258 }
@@ -5258,7 +5281,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
5258 &dev_priv->mm.inactive_list, 5281 &dev_priv->mm.inactive_list,
5259 list) { 5282 list) {
5260 if (nr_to_scan > 0) { 5283 if (nr_to_scan > 0) {
5261 i915_gem_object_unbind(obj_priv->obj); 5284 i915_gem_object_unbind(&obj_priv->base);
5262 nr_to_scan--; 5285 nr_to_scan--;
5263 } else 5286 } else
5264 cnt++; 5287 cnt++;
@@ -5270,6 +5293,36 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
5270 would_deadlock = 0; 5293 would_deadlock = 0;
5271 } 5294 }
5272 5295
5296 if (nr_to_scan) {
5297 int active = 0;
5298
5299 /*
5300 * We are desperate for pages, so as a last resort, wait
5301 * for the GPU to finish and discard whatever we can.
5302 * This has a dramatic impact to reduce the number of
5303 * OOM-killer events whilst running the GPU aggressively.
5304 */
5305 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5306 struct drm_device *dev = dev_priv->dev;
5307
5308 if (!mutex_trylock(&dev->struct_mutex))
5309 continue;
5310
5311 spin_unlock(&shrink_list_lock);
5312
5313 if (i915_gpu_is_active(dev)) {
5314 i915_gpu_idle(dev);
5315 active++;
5316 }
5317
5318 spin_lock(&shrink_list_lock);
5319 mutex_unlock(&dev->struct_mutex);
5320 }
5321
5322 if (active)
5323 goto rescan;
5324 }
5325
5273 spin_unlock(&shrink_list_lock); 5326 spin_unlock(&shrink_list_lock);
5274 5327
5275 if (would_deadlock) 5328 if (would_deadlock)
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 35507cf53fa3..80f380b1d951 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -39,7 +39,7 @@ i915_verify_inactive(struct drm_device *dev, char *file, int line)
39 struct drm_i915_gem_object *obj_priv; 39 struct drm_i915_gem_object *obj_priv;
40 40
41 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { 41 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
42 obj = obj_priv->obj; 42 obj = &obj_priv->base;
43 if (obj_priv->pin_count || obj_priv->active || 43 if (obj_priv->pin_count || obj_priv->active ||
44 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | 44 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
45 I915_GEM_DOMAIN_GTT))) 45 I915_GEM_DOMAIN_GTT)))
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2b8b969d0c15..a7e4b1f27497 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -169,9 +169,13 @@ void intel_enable_asle (struct drm_device *dev)
169 169
170 if (HAS_PCH_SPLIT(dev)) 170 if (HAS_PCH_SPLIT(dev))
171 ironlake_enable_display_irq(dev_priv, DE_GSE); 171 ironlake_enable_display_irq(dev_priv, DE_GSE);
172 else 172 else {
173 i915_enable_pipestat(dev_priv, 1, 173 i915_enable_pipestat(dev_priv, 1,
174 I915_LEGACY_BLC_EVENT_ENABLE); 174 I915_LEGACY_BLC_EVENT_ENABLE);
175 if (IS_I965G(dev))
176 i915_enable_pipestat(dev_priv, 0,
177 I915_LEGACY_BLC_EVENT_ENABLE);
178 }
175} 179}
176 180
177/** 181/**
@@ -256,17 +260,18 @@ static void i915_hotplug_work_func(struct work_struct *work)
256 hotplug_work); 260 hotplug_work);
257 struct drm_device *dev = dev_priv->dev; 261 struct drm_device *dev = dev_priv->dev;
258 struct drm_mode_config *mode_config = &dev->mode_config; 262 struct drm_mode_config *mode_config = &dev->mode_config;
259 struct drm_connector *connector; 263 struct drm_encoder *encoder;
260 264
261 if (mode_config->num_connector) { 265 if (mode_config->num_encoder) {
262 list_for_each_entry(connector, &mode_config->connector_list, head) { 266 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
263 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 267 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
264 268
265 if (intel_encoder->hot_plug) 269 if (intel_encoder->hot_plug)
266 (*intel_encoder->hot_plug) (intel_encoder); 270 (*intel_encoder->hot_plug) (intel_encoder);
267 } 271 }
268 } 272 }
269 /* Just fire off a uevent and let userspace tell us what to do */ 273 /* Just fire off a uevent and let userspace tell us what to do */
274 intelfb_hotplug(dev, false);
270 drm_sysfs_hotplug_event(dev); 275 drm_sysfs_hotplug_event(dev);
271} 276}
272 277
@@ -608,7 +613,7 @@ static void i915_capture_error_state(struct drm_device *dev)
608 batchbuffer[1] = NULL; 613 batchbuffer[1] = NULL;
609 count = 0; 614 count = 0;
610 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 615 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
611 struct drm_gem_object *obj = obj_priv->obj; 616 struct drm_gem_object *obj = &obj_priv->base;
612 617
613 if (batchbuffer[0] == NULL && 618 if (batchbuffer[0] == NULL &&
614 bbaddr >= obj_priv->gtt_offset && 619 bbaddr >= obj_priv->gtt_offset &&
@@ -644,7 +649,7 @@ static void i915_capture_error_state(struct drm_device *dev)
644 if (error->active_bo) { 649 if (error->active_bo) {
645 int i = 0; 650 int i = 0;
646 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 651 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
647 struct drm_gem_object *obj = obj_priv->obj; 652 struct drm_gem_object *obj = &obj_priv->base;
648 653
649 error->active_bo[i].size = obj->size; 654 error->active_bo[i].size = obj->size;
650 error->active_bo[i].name = obj->name; 655 error->active_bo[i].name = obj->name;
@@ -946,7 +951,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
946 intel_finish_page_flip(dev, 1); 951 intel_finish_page_flip(dev, 1);
947 } 952 }
948 953
949 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || 954 if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
955 (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
950 (iir & I915_ASLE_INTERRUPT)) 956 (iir & I915_ASLE_INTERRUPT))
951 opregion_asle_intr(dev); 957 opregion_asle_intr(dev);
952 958
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4cbc5210fd30..f3e39cc46f0d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1764,6 +1764,14 @@
1764#define DP_LINK_TRAIN_MASK (3 << 28) 1764#define DP_LINK_TRAIN_MASK (3 << 28)
1765#define DP_LINK_TRAIN_SHIFT 28 1765#define DP_LINK_TRAIN_SHIFT 28
1766 1766
1767/* CPT Link training mode */
1768#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
1769#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8)
1770#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8)
1771#define DP_LINK_TRAIN_OFF_CPT (3 << 8)
1772#define DP_LINK_TRAIN_MASK_CPT (7 << 8)
1773#define DP_LINK_TRAIN_SHIFT_CPT 8
1774
1767/* Signal voltages. These are mostly controlled by the other end */ 1775/* Signal voltages. These are mostly controlled by the other end */
1768#define DP_VOLTAGE_0_4 (0 << 25) 1776#define DP_VOLTAGE_0_4 (0 << 25)
1769#define DP_VOLTAGE_0_6 (1 << 25) 1777#define DP_VOLTAGE_0_6 (1 << 25)
@@ -1924,7 +1932,10 @@
1924/* Display & cursor control */ 1932/* Display & cursor control */
1925 1933
1926/* dithering flag on Ironlake */ 1934/* dithering flag on Ironlake */
1927#define PIPE_ENABLE_DITHER (1 << 4) 1935#define PIPE_ENABLE_DITHER (1 << 4)
1936#define PIPE_DITHER_TYPE_MASK (3 << 2)
1937#define PIPE_DITHER_TYPE_SPATIAL (0 << 2)
1938#define PIPE_DITHER_TYPE_ST01 (1 << 2)
1928/* Pipe A */ 1939/* Pipe A */
1929#define PIPEADSL 0x70000 1940#define PIPEADSL 0x70000
1930#define PIPEACONF 0x70008 1941#define PIPEACONF 0x70008
@@ -1988,15 +1999,24 @@
1988 1999
1989#define DSPFW1 0x70034 2000#define DSPFW1 0x70034
1990#define DSPFW_SR_SHIFT 23 2001#define DSPFW_SR_SHIFT 23
2002#define DSPFW_SR_MASK (0x1ff<<23)
1991#define DSPFW_CURSORB_SHIFT 16 2003#define DSPFW_CURSORB_SHIFT 16
2004#define DSPFW_CURSORB_MASK (0x3f<<16)
1992#define DSPFW_PLANEB_SHIFT 8 2005#define DSPFW_PLANEB_SHIFT 8
2006#define DSPFW_PLANEB_MASK (0x7f<<8)
2007#define DSPFW_PLANEA_MASK (0x7f)
1993#define DSPFW2 0x70038 2008#define DSPFW2 0x70038
1994#define DSPFW_CURSORA_MASK 0x00003f00 2009#define DSPFW_CURSORA_MASK 0x00003f00
1995#define DSPFW_CURSORA_SHIFT 8 2010#define DSPFW_CURSORA_SHIFT 8
2011#define DSPFW_PLANEC_MASK (0x7f)
1996#define DSPFW3 0x7003c 2012#define DSPFW3 0x7003c
1997#define DSPFW_HPLL_SR_EN (1<<31) 2013#define DSPFW_HPLL_SR_EN (1<<31)
1998#define DSPFW_CURSOR_SR_SHIFT 24 2014#define DSPFW_CURSOR_SR_SHIFT 24
1999#define PINEVIEW_SELF_REFRESH_EN (1<<30) 2015#define PINEVIEW_SELF_REFRESH_EN (1<<30)
2016#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
2017#define DSPFW_HPLL_CURSOR_SHIFT 16
2018#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
2019#define DSPFW_HPLL_SR_MASK (0x1ff)
2000 2020
2001/* FIFO watermark sizes etc */ 2021/* FIFO watermark sizes etc */
2002#define G4X_FIFO_LINE_SIZE 64 2022#define G4X_FIFO_LINE_SIZE 64
@@ -2023,6 +2043,43 @@
2023#define PINEVIEW_CURSOR_DFT_WM 0 2043#define PINEVIEW_CURSOR_DFT_WM 0
2024#define PINEVIEW_CURSOR_GUARD_WM 5 2044#define PINEVIEW_CURSOR_GUARD_WM 5
2025 2045
2046
2047/* define the Watermark register on Ironlake */
2048#define WM0_PIPEA_ILK 0x45100
2049#define WM0_PIPE_PLANE_MASK (0x7f<<16)
2050#define WM0_PIPE_PLANE_SHIFT 16
2051#define WM0_PIPE_SPRITE_MASK (0x3f<<8)
2052#define WM0_PIPE_SPRITE_SHIFT 8
2053#define WM0_PIPE_CURSOR_MASK (0x1f)
2054
2055#define WM0_PIPEB_ILK 0x45104
2056#define WM1_LP_ILK 0x45108
2057#define WM1_LP_SR_EN (1<<31)
2058#define WM1_LP_LATENCY_SHIFT 24
2059#define WM1_LP_LATENCY_MASK (0x7f<<24)
2060#define WM1_LP_SR_MASK (0x1ff<<8)
2061#define WM1_LP_SR_SHIFT 8
2062#define WM1_LP_CURSOR_MASK (0x3f)
2063
2064/* Memory latency timer register */
2065#define MLTR_ILK 0x11222
2066/* the unit of memory self-refresh latency time is 0.5us */
2067#define ILK_SRLT_MASK 0x3f
2068
2069/* define the fifo size on Ironlake */
2070#define ILK_DISPLAY_FIFO 128
2071#define ILK_DISPLAY_MAXWM 64
2072#define ILK_DISPLAY_DFTWM 8
2073
2074#define ILK_DISPLAY_SR_FIFO 512
2075#define ILK_DISPLAY_MAX_SRWM 0x1ff
2076#define ILK_DISPLAY_DFT_SRWM 0x3f
2077#define ILK_CURSOR_SR_FIFO 64
2078#define ILK_CURSOR_MAX_SRWM 0x3f
2079#define ILK_CURSOR_DFT_SRWM 8
2080
2081#define ILK_FIFO_LINE_SIZE 64
2082
2026/* 2083/*
2027 * The two pipe frame counter registers are not synchronized, so 2084 * The two pipe frame counter registers are not synchronized, so
2028 * reading a stable value is somewhat tricky. The following code 2085 * reading a stable value is somewhat tricky. The following code
@@ -2304,8 +2361,15 @@
2304#define GTIIR 0x44018 2361#define GTIIR 0x44018
2305#define GTIER 0x4401c 2362#define GTIER 0x4401c
2306 2363
2364#define ILK_DISPLAY_CHICKEN2 0x42004
2365#define ILK_DPARB_GATE (1<<22)
2366#define ILK_VSDPFD_FULL (1<<21)
2367#define ILK_DSPCLK_GATE 0x42020
2368#define ILK_DPARB_CLK_GATE (1<<5)
2369
2307#define DISP_ARB_CTL 0x45000 2370#define DISP_ARB_CTL 0x45000
2308#define DISP_TILE_SURFACE_SWIZZLING (1<<13) 2371#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
2372#define DISP_FBC_WM_DIS (1<<15)
2309 2373
2310/* PCH */ 2374/* PCH */
2311 2375
@@ -2316,6 +2380,11 @@
2316#define SDE_PORTB_HOTPLUG (1 << 8) 2380#define SDE_PORTB_HOTPLUG (1 << 8)
2317#define SDE_SDVOB_HOTPLUG (1 << 6) 2381#define SDE_SDVOB_HOTPLUG (1 << 6)
2318#define SDE_HOTPLUG_MASK (0xf << 8) 2382#define SDE_HOTPLUG_MASK (0xf << 8)
2383/* CPT */
2384#define SDE_CRT_HOTPLUG_CPT (1 << 19)
2385#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
2386#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
2387#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
2319 2388
2320#define SDEISR 0xc4000 2389#define SDEISR 0xc4000
2321#define SDEIMR 0xc4004 2390#define SDEIMR 0xc4004
@@ -2407,6 +2476,17 @@
2407#define PCH_SSC4_PARMS 0xc6210 2476#define PCH_SSC4_PARMS 0xc6210
2408#define PCH_SSC4_AUX_PARMS 0xc6214 2477#define PCH_SSC4_AUX_PARMS 0xc6214
2409 2478
2479#define PCH_DPLL_SEL 0xc7000
2480#define TRANSA_DPLL_ENABLE (1<<3)
2481#define TRANSA_DPLLB_SEL (1<<0)
2482#define TRANSA_DPLLA_SEL 0
2483#define TRANSB_DPLL_ENABLE (1<<7)
2484#define TRANSB_DPLLB_SEL (1<<4)
2485#define TRANSB_DPLLA_SEL (0)
2486#define TRANSC_DPLL_ENABLE (1<<11)
2487#define TRANSC_DPLLB_SEL (1<<8)
2488#define TRANSC_DPLLA_SEL (0)
2489
2410/* transcoder */ 2490/* transcoder */
2411 2491
2412#define TRANS_HTOTAL_A 0xe0000 2492#define TRANS_HTOTAL_A 0xe0000
@@ -2493,6 +2573,19 @@
2493#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22) 2573#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
2494#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22) 2574#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
2495#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22) 2575#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
2576/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
2577 SNB has different settings. */
2578/* SNB A-stepping */
2579#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
2580#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
2581#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
2582#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
2583/* SNB B-stepping */
2584#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
2585#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
2586#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
2587#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
2588#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22)
2496#define FDI_DP_PORT_WIDTH_X1 (0<<19) 2589#define FDI_DP_PORT_WIDTH_X1 (0<<19)
2497#define FDI_DP_PORT_WIDTH_X2 (1<<19) 2590#define FDI_DP_PORT_WIDTH_X2 (1<<19)
2498#define FDI_DP_PORT_WIDTH_X3 (2<<19) 2591#define FDI_DP_PORT_WIDTH_X3 (2<<19)
@@ -2525,6 +2618,13 @@
2525#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) 2618#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
2526#define FDI_SEL_RAWCLK (0<<4) 2619#define FDI_SEL_RAWCLK (0<<4)
2527#define FDI_SEL_PCDCLK (1<<4) 2620#define FDI_SEL_PCDCLK (1<<4)
2621/* CPT */
2622#define FDI_AUTO_TRAINING (1<<10)
2623#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
2624#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8)
2625#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
2626#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
2627#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
2528 2628
2529#define FDI_RXA_MISC 0xf0010 2629#define FDI_RXA_MISC 0xf0010
2530#define FDI_RXB_MISC 0xf1010 2630#define FDI_RXB_MISC 0xf1010
@@ -2596,6 +2696,9 @@
2596#define HSYNC_ACTIVE_HIGH (1 << 3) 2696#define HSYNC_ACTIVE_HIGH (1 << 3)
2597#define PORT_DETECTED (1 << 2) 2697#define PORT_DETECTED (1 << 2)
2598 2698
2699/* PCH SDVOB multiplex with HDMIB */
2700#define PCH_SDVOB HDMIB
2701
2599#define HDMIC 0xe1150 2702#define HDMIC 0xe1150
2600#define HDMID 0xe1160 2703#define HDMID 0xe1160
2601 2704
@@ -2653,4 +2756,42 @@
2653#define PCH_DPD_AUX_CH_DATA4 0xe4320 2756#define PCH_DPD_AUX_CH_DATA4 0xe4320
2654#define PCH_DPD_AUX_CH_DATA5 0xe4324 2757#define PCH_DPD_AUX_CH_DATA5 0xe4324
2655 2758
2759/* CPT */
2760#define PORT_TRANS_A_SEL_CPT 0
2761#define PORT_TRANS_B_SEL_CPT (1<<29)
2762#define PORT_TRANS_C_SEL_CPT (2<<29)
2763#define PORT_TRANS_SEL_MASK (3<<29)
2764
2765#define TRANS_DP_CTL_A 0xe0300
2766#define TRANS_DP_CTL_B 0xe1300
2767#define TRANS_DP_CTL_C 0xe2300
2768#define TRANS_DP_OUTPUT_ENABLE (1<<31)
2769#define TRANS_DP_PORT_SEL_B (0<<29)
2770#define TRANS_DP_PORT_SEL_C (1<<29)
2771#define TRANS_DP_PORT_SEL_D (2<<29)
2772#define TRANS_DP_PORT_SEL_MASK (3<<29)
2773#define TRANS_DP_AUDIO_ONLY (1<<26)
2774#define TRANS_DP_ENH_FRAMING (1<<18)
2775#define TRANS_DP_8BPC (0<<9)
2776#define TRANS_DP_10BPC (1<<9)
2777#define TRANS_DP_6BPC (2<<9)
2778#define TRANS_DP_12BPC (3<<9)
2779#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
2780#define TRANS_DP_VSYNC_ACTIVE_LOW 0
2781#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
2782#define TRANS_DP_HSYNC_ACTIVE_LOW 0
2783
2784/* SNB eDP training params */
2785/* SNB A-stepping */
2786#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
2787#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
2788#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
2789#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
2790/* SNB B-stepping */
2791#define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
2792#define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
2793#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
2794#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
2795#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
2796
2656#endif /* _I915_REG_H_ */ 2797#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index ac0d1a73ac22..60a5800fba6e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -600,14 +600,16 @@ void i915_save_display(struct drm_device *dev)
600 } 600 }
601 /* FIXME: save TV & SDVO state */ 601 /* FIXME: save TV & SDVO state */
602 602
603 /* FBC state */ 603 /* Only save FBC state on the platform that supports FBC */
604 if (IS_GM45(dev)) { 604 if (I915_HAS_FBC(dev)) {
605 dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); 605 if (IS_GM45(dev)) {
606 } else { 606 dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
607 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); 607 } else {
608 dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); 608 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
609 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); 609 dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
610 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); 610 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
611 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
612 }
611 } 613 }
612 614
613 /* VGA state */ 615 /* VGA state */
@@ -702,18 +704,19 @@ void i915_restore_display(struct drm_device *dev)
702 } 704 }
703 /* FIXME: restore TV & SDVO state */ 705 /* FIXME: restore TV & SDVO state */
704 706
705 /* FBC info */ 707 /* only restore FBC info on the platform that supports FBC*/
706 if (IS_GM45(dev)) { 708 if (I915_HAS_FBC(dev)) {
707 g4x_disable_fbc(dev); 709 if (IS_GM45(dev)) {
708 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); 710 g4x_disable_fbc(dev);
709 } else { 711 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
710 i8xx_disable_fbc(dev); 712 } else {
711 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); 713 i8xx_disable_fbc(dev);
712 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); 714 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
713 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); 715 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
714 I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); 716 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
717 I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
718 }
715 } 719 }
716
717 /* VGA state */ 720 /* VGA state */
718 if (IS_IRONLAKE(dev)) 721 if (IS_IRONLAKE(dev))
719 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); 722 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 01840d9bc38f..303815321c79 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -115,7 +115,7 @@ TRACE_EVENT(i915_gem_object_get_fence,
115 __entry->obj, __entry->fence, __entry->tiling_mode) 115 __entry->obj, __entry->fence, __entry->tiling_mode)
116); 116);
117 117
118TRACE_EVENT(i915_gem_object_unbind, 118DECLARE_EVENT_CLASS(i915_gem_object,
119 119
120 TP_PROTO(struct drm_gem_object *obj), 120 TP_PROTO(struct drm_gem_object *obj),
121 121
@@ -132,21 +132,18 @@ TRACE_EVENT(i915_gem_object_unbind,
132 TP_printk("obj=%p", __entry->obj) 132 TP_printk("obj=%p", __entry->obj)
133); 133);
134 134
135TRACE_EVENT(i915_gem_object_destroy, 135DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
136 136
137 TP_PROTO(struct drm_gem_object *obj), 137 TP_PROTO(struct drm_gem_object *obj),
138 138
139 TP_ARGS(obj), 139 TP_ARGS(obj)
140);
140 141
141 TP_STRUCT__entry( 142DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
142 __field(struct drm_gem_object *, obj)
143 ),
144 143
145 TP_fast_assign( 144 TP_PROTO(struct drm_gem_object *obj),
146 __entry->obj = obj;
147 ),
148 145
149 TP_printk("obj=%p", __entry->obj) 146 TP_ARGS(obj)
150); 147);
151 148
152/* batch tracing */ 149/* batch tracing */
@@ -197,8 +194,7 @@ TRACE_EVENT(i915_gem_request_flush,
197 __entry->flush_domains, __entry->invalidate_domains) 194 __entry->flush_domains, __entry->invalidate_domains)
198); 195);
199 196
200 197DECLARE_EVENT_CLASS(i915_gem_request,
201TRACE_EVENT(i915_gem_request_complete,
202 198
203 TP_PROTO(struct drm_device *dev, u32 seqno), 199 TP_PROTO(struct drm_device *dev, u32 seqno),
204 200
@@ -217,64 +213,35 @@ TRACE_EVENT(i915_gem_request_complete,
217 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) 213 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
218); 214);
219 215
220TRACE_EVENT(i915_gem_request_retire, 216DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
221 217
222 TP_PROTO(struct drm_device *dev, u32 seqno), 218 TP_PROTO(struct drm_device *dev, u32 seqno),
223 219
224 TP_ARGS(dev, seqno), 220 TP_ARGS(dev, seqno)
225
226 TP_STRUCT__entry(
227 __field(u32, dev)
228 __field(u32, seqno)
229 ),
230
231 TP_fast_assign(
232 __entry->dev = dev->primary->index;
233 __entry->seqno = seqno;
234 ),
235
236 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
237); 221);
238 222
239TRACE_EVENT(i915_gem_request_wait_begin, 223DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
240 224
241 TP_PROTO(struct drm_device *dev, u32 seqno), 225 TP_PROTO(struct drm_device *dev, u32 seqno),
242 226
243 TP_ARGS(dev, seqno), 227 TP_ARGS(dev, seqno)
244
245 TP_STRUCT__entry(
246 __field(u32, dev)
247 __field(u32, seqno)
248 ),
249
250 TP_fast_assign(
251 __entry->dev = dev->primary->index;
252 __entry->seqno = seqno;
253 ),
254
255 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
256); 228);
257 229
258TRACE_EVENT(i915_gem_request_wait_end, 230DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
259 231
260 TP_PROTO(struct drm_device *dev, u32 seqno), 232 TP_PROTO(struct drm_device *dev, u32 seqno),
261 233
262 TP_ARGS(dev, seqno), 234 TP_ARGS(dev, seqno)
235);
263 236
264 TP_STRUCT__entry( 237DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
265 __field(u32, dev)
266 __field(u32, seqno)
267 ),
268 238
269 TP_fast_assign( 239 TP_PROTO(struct drm_device *dev, u32 seqno),
270 __entry->dev = dev->primary->index;
271 __entry->seqno = seqno;
272 ),
273 240
274 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) 241 TP_ARGS(dev, seqno)
275); 242);
276 243
277TRACE_EVENT(i915_ring_wait_begin, 244DECLARE_EVENT_CLASS(i915_ring,
278 245
279 TP_PROTO(struct drm_device *dev), 246 TP_PROTO(struct drm_device *dev),
280 247
@@ -291,21 +258,18 @@ TRACE_EVENT(i915_ring_wait_begin,
291 TP_printk("dev=%u", __entry->dev) 258 TP_printk("dev=%u", __entry->dev)
292); 259);
293 260
294TRACE_EVENT(i915_ring_wait_end, 261DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
295 262
296 TP_PROTO(struct drm_device *dev), 263 TP_PROTO(struct drm_device *dev),
297 264
298 TP_ARGS(dev), 265 TP_ARGS(dev)
266);
299 267
300 TP_STRUCT__entry( 268DEFINE_EVENT(i915_ring, i915_ring_wait_end,
301 __field(u32, dev)
302 ),
303 269
304 TP_fast_assign( 270 TP_PROTO(struct drm_device *dev),
305 __entry->dev = dev->primary->index;
306 ),
307 271
308 TP_printk("dev=%u", __entry->dev) 272 TP_ARGS(dev)
309); 273);
310 274
311#endif /* _I915_TRACE_H_ */ 275#endif /* _I915_TRACE_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 759c2ef72eff..26756cd34e3c 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -136,11 +136,17 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
136 adpa |= ADPA_VSYNC_ACTIVE_HIGH; 136 adpa |= ADPA_VSYNC_ACTIVE_HIGH;
137 137
138 if (intel_crtc->pipe == 0) { 138 if (intel_crtc->pipe == 0) {
139 adpa |= ADPA_PIPE_A_SELECT; 139 if (HAS_PCH_CPT(dev))
140 adpa |= PORT_TRANS_A_SEL_CPT;
141 else
142 adpa |= ADPA_PIPE_A_SELECT;
140 if (!HAS_PCH_SPLIT(dev)) 143 if (!HAS_PCH_SPLIT(dev))
141 I915_WRITE(BCLRPAT_A, 0); 144 I915_WRITE(BCLRPAT_A, 0);
142 } else { 145 } else {
143 adpa |= ADPA_PIPE_B_SELECT; 146 if (HAS_PCH_CPT(dev))
147 adpa |= PORT_TRANS_B_SEL_CPT;
148 else
149 adpa |= ADPA_PIPE_B_SELECT;
144 if (!HAS_PCH_SPLIT(dev)) 150 if (!HAS_PCH_SPLIT(dev))
145 I915_WRITE(BCLRPAT_B, 0); 151 I915_WRITE(BCLRPAT_B, 0);
146 } 152 }
@@ -152,15 +158,21 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
152{ 158{
153 struct drm_device *dev = connector->dev; 159 struct drm_device *dev = connector->dev;
154 struct drm_i915_private *dev_priv = dev->dev_private; 160 struct drm_i915_private *dev_priv = dev->dev_private;
155 u32 adpa; 161 u32 adpa, temp;
156 bool ret; 162 bool ret;
157 163
158 adpa = I915_READ(PCH_ADPA); 164 temp = adpa = I915_READ(PCH_ADPA);
159 165
160 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 166 if (HAS_PCH_CPT(dev)) {
161 /* disable HPD first */ 167 /* Disable DAC before force detect */
162 I915_WRITE(PCH_ADPA, adpa); 168 I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE);
163 (void)I915_READ(PCH_ADPA); 169 (void)I915_READ(PCH_ADPA);
170 } else {
171 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
172 /* disable HPD first */
173 I915_WRITE(PCH_ADPA, adpa);
174 (void)I915_READ(PCH_ADPA);
175 }
164 176
165 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | 177 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
166 ADPA_CRT_HOTPLUG_WARMUP_10MS | 178 ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -176,6 +188,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
176 while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) 188 while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
177 ; 189 ;
178 190
191 if (HAS_PCH_CPT(dev)) {
192 I915_WRITE(PCH_ADPA, temp);
193 (void)I915_READ(PCH_ADPA);
194 }
195
179 /* Check the status to see if both blue and green are on now */ 196 /* Check the status to see if both blue and green are on now */
180 adpa = I915_READ(PCH_ADPA); 197 adpa = I915_READ(PCH_ADPA);
181 adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; 198 adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK;
@@ -245,9 +262,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
245 return false; 262 return false;
246} 263}
247 264
248static bool intel_crt_detect_ddc(struct drm_connector *connector) 265static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
249{ 266{
250 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 267 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
251 268
252 /* CRT should always be at 0, but check anyway */ 269 /* CRT should always be at 0, but check anyway */
253 if (intel_encoder->type != INTEL_OUTPUT_ANALOG) 270 if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
@@ -387,8 +404,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
387static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) 404static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
388{ 405{
389 struct drm_device *dev = connector->dev; 406 struct drm_device *dev = connector->dev;
390 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 407 struct drm_encoder *encoder = intel_attached_encoder(connector);
391 struct drm_encoder *encoder = &intel_encoder->enc; 408 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
392 struct drm_crtc *crtc; 409 struct drm_crtc *crtc;
393 int dpms_mode; 410 int dpms_mode;
394 enum drm_connector_status status; 411 enum drm_connector_status status;
@@ -400,18 +417,19 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
400 return connector_status_disconnected; 417 return connector_status_disconnected;
401 } 418 }
402 419
403 if (intel_crt_detect_ddc(connector)) 420 if (intel_crt_detect_ddc(encoder))
404 return connector_status_connected; 421 return connector_status_connected;
405 422
406 /* for pre-945g platforms use load detect */ 423 /* for pre-945g platforms use load detect */
407 if (encoder->crtc && encoder->crtc->enabled) { 424 if (encoder->crtc && encoder->crtc->enabled) {
408 status = intel_crt_load_detect(encoder->crtc, intel_encoder); 425 status = intel_crt_load_detect(encoder->crtc, intel_encoder);
409 } else { 426 } else {
410 crtc = intel_get_load_detect_pipe(intel_encoder, 427 crtc = intel_get_load_detect_pipe(intel_encoder, connector,
411 NULL, &dpms_mode); 428 NULL, &dpms_mode);
412 if (crtc) { 429 if (crtc) {
413 status = intel_crt_load_detect(crtc, intel_encoder); 430 status = intel_crt_load_detect(crtc, intel_encoder);
414 intel_release_load_detect_pipe(intel_encoder, dpms_mode); 431 intel_release_load_detect_pipe(intel_encoder,
432 connector, dpms_mode);
415 } else 433 } else
416 status = connector_status_unknown; 434 status = connector_status_unknown;
417 } 435 }
@@ -421,9 +439,6 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
421 439
422static void intel_crt_destroy(struct drm_connector *connector) 440static void intel_crt_destroy(struct drm_connector *connector)
423{ 441{
424 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
425
426 intel_i2c_destroy(intel_encoder->ddc_bus);
427 drm_sysfs_connector_remove(connector); 442 drm_sysfs_connector_remove(connector);
428 drm_connector_cleanup(connector); 443 drm_connector_cleanup(connector);
429 kfree(connector); 444 kfree(connector);
@@ -432,29 +447,27 @@ static void intel_crt_destroy(struct drm_connector *connector)
432static int intel_crt_get_modes(struct drm_connector *connector) 447static int intel_crt_get_modes(struct drm_connector *connector)
433{ 448{
434 int ret; 449 int ret;
435 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 450 struct drm_encoder *encoder = intel_attached_encoder(connector);
436 struct i2c_adapter *ddcbus; 451 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
452 struct i2c_adapter *ddc_bus;
437 struct drm_device *dev = connector->dev; 453 struct drm_device *dev = connector->dev;
438 454
439 455
440 ret = intel_ddc_get_modes(intel_encoder); 456 ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
441 if (ret || !IS_G4X(dev)) 457 if (ret || !IS_G4X(dev))
442 goto end; 458 goto end;
443 459
444 ddcbus = intel_encoder->ddc_bus;
445 /* Try to probe digital port for output in DVI-I -> VGA mode. */ 460 /* Try to probe digital port for output in DVI-I -> VGA mode. */
446 intel_encoder->ddc_bus = 461 ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
447 intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
448 462
449 if (!intel_encoder->ddc_bus) { 463 if (!ddc_bus) {
450 intel_encoder->ddc_bus = ddcbus;
451 dev_printk(KERN_ERR, &connector->dev->pdev->dev, 464 dev_printk(KERN_ERR, &connector->dev->pdev->dev,
452 "DDC bus registration failed for CRTDDC_D.\n"); 465 "DDC bus registration failed for CRTDDC_D.\n");
453 goto end; 466 goto end;
454 } 467 }
455 /* Try to get modes by GPIOD port */ 468 /* Try to get modes by GPIOD port */
456 ret = intel_ddc_get_modes(intel_encoder); 469 ret = intel_ddc_get_modes(connector, ddc_bus);
457 intel_i2c_destroy(ddcbus); 470 intel_i2c_destroy(ddc_bus);
458 471
459end: 472end:
460 return ret; 473 return ret;
@@ -491,12 +504,16 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
491static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { 504static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
492 .mode_valid = intel_crt_mode_valid, 505 .mode_valid = intel_crt_mode_valid,
493 .get_modes = intel_crt_get_modes, 506 .get_modes = intel_crt_get_modes,
494 .best_encoder = intel_best_encoder, 507 .best_encoder = intel_attached_encoder,
495}; 508};
496 509
497static void intel_crt_enc_destroy(struct drm_encoder *encoder) 510static void intel_crt_enc_destroy(struct drm_encoder *encoder)
498{ 511{
512 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
513
514 intel_i2c_destroy(intel_encoder->ddc_bus);
499 drm_encoder_cleanup(encoder); 515 drm_encoder_cleanup(encoder);
516 kfree(intel_encoder);
500} 517}
501 518
502static const struct drm_encoder_funcs intel_crt_enc_funcs = { 519static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -507,6 +524,7 @@ void intel_crt_init(struct drm_device *dev)
507{ 524{
508 struct drm_connector *connector; 525 struct drm_connector *connector;
509 struct intel_encoder *intel_encoder; 526 struct intel_encoder *intel_encoder;
527 struct intel_connector *intel_connector;
510 struct drm_i915_private *dev_priv = dev->dev_private; 528 struct drm_i915_private *dev_priv = dev->dev_private;
511 u32 i2c_reg; 529 u32 i2c_reg;
512 530
@@ -514,14 +532,20 @@ void intel_crt_init(struct drm_device *dev)
514 if (!intel_encoder) 532 if (!intel_encoder)
515 return; 533 return;
516 534
517 connector = &intel_encoder->base; 535 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
518 drm_connector_init(dev, &intel_encoder->base, 536 if (!intel_connector) {
537 kfree(intel_encoder);
538 return;
539 }
540
541 connector = &intel_connector->base;
542 drm_connector_init(dev, &intel_connector->base,
519 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 543 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
520 544
521 drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs, 545 drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
522 DRM_MODE_ENCODER_DAC); 546 DRM_MODE_ENCODER_DAC);
523 547
524 drm_mode_connector_attach_encoder(&intel_encoder->base, 548 drm_mode_connector_attach_encoder(&intel_connector->base,
525 &intel_encoder->enc); 549 &intel_encoder->enc);
526 550
527 /* Set up the DDC bus. */ 551 /* Set up the DDC bus. */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c7502b6b1600..e775ce67be33 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -742,12 +742,11 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
742{ 742{
743 struct drm_device *dev = crtc->dev; 743 struct drm_device *dev = crtc->dev;
744 struct drm_mode_config *mode_config = &dev->mode_config; 744 struct drm_mode_config *mode_config = &dev->mode_config;
745 struct drm_connector *l_entry; 745 struct drm_encoder *l_entry;
746 746
747 list_for_each_entry(l_entry, &mode_config->connector_list, head) { 747 list_for_each_entry(l_entry, &mode_config->encoder_list, head) {
748 if (l_entry->encoder && 748 if (l_entry && l_entry->crtc == crtc) {
749 l_entry->encoder->crtc == crtc) { 749 struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
750 struct intel_encoder *intel_encoder = to_intel_encoder(l_entry);
751 if (intel_encoder->type == type) 750 if (intel_encoder->type == type)
752 return true; 751 return true;
753 } 752 }
@@ -755,23 +754,6 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
755 return false; 754 return false;
756} 755}
757 756
758static struct drm_connector *
759intel_pipe_get_connector (struct drm_crtc *crtc)
760{
761 struct drm_device *dev = crtc->dev;
762 struct drm_mode_config *mode_config = &dev->mode_config;
763 struct drm_connector *l_entry, *ret = NULL;
764
765 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
766 if (l_entry->encoder &&
767 l_entry->encoder->crtc == crtc) {
768 ret = l_entry;
769 break;
770 }
771 }
772 return ret;
773}
774
775#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 757#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
776/** 758/**
777 * Returns whether the given set of divisors are valid for a given refclk with 759 * Returns whether the given set of divisors are valid for a given refclk with
@@ -1066,9 +1048,8 @@ void i8xx_disable_fbc(struct drm_device *dev)
1066 DRM_DEBUG_KMS("disabled FBC\n"); 1048 DRM_DEBUG_KMS("disabled FBC\n");
1067} 1049}
1068 1050
1069static bool i8xx_fbc_enabled(struct drm_crtc *crtc) 1051static bool i8xx_fbc_enabled(struct drm_device *dev)
1070{ 1052{
1071 struct drm_device *dev = crtc->dev;
1072 struct drm_i915_private *dev_priv = dev->dev_private; 1053 struct drm_i915_private *dev_priv = dev->dev_private;
1073 1054
1074 return I915_READ(FBC_CONTROL) & FBC_CTL_EN; 1055 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
@@ -1125,14 +1106,43 @@ void g4x_disable_fbc(struct drm_device *dev)
1125 DRM_DEBUG_KMS("disabled FBC\n"); 1106 DRM_DEBUG_KMS("disabled FBC\n");
1126} 1107}
1127 1108
1128static bool g4x_fbc_enabled(struct drm_crtc *crtc) 1109static bool g4x_fbc_enabled(struct drm_device *dev)
1129{ 1110{
1130 struct drm_device *dev = crtc->dev;
1131 struct drm_i915_private *dev_priv = dev->dev_private; 1111 struct drm_i915_private *dev_priv = dev->dev_private;
1132 1112
1133 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 1113 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1134} 1114}
1135 1115
1116bool intel_fbc_enabled(struct drm_device *dev)
1117{
1118 struct drm_i915_private *dev_priv = dev->dev_private;
1119
1120 if (!dev_priv->display.fbc_enabled)
1121 return false;
1122
1123 return dev_priv->display.fbc_enabled(dev);
1124}
1125
1126void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1127{
1128 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
1129
1130 if (!dev_priv->display.enable_fbc)
1131 return;
1132
1133 dev_priv->display.enable_fbc(crtc, interval);
1134}
1135
1136void intel_disable_fbc(struct drm_device *dev)
1137{
1138 struct drm_i915_private *dev_priv = dev->dev_private;
1139
1140 if (!dev_priv->display.disable_fbc)
1141 return;
1142
1143 dev_priv->display.disable_fbc(dev);
1144}
1145
1136/** 1146/**
1137 * intel_update_fbc - enable/disable FBC as needed 1147 * intel_update_fbc - enable/disable FBC as needed
1138 * @crtc: CRTC to point the compressor at 1148 * @crtc: CRTC to point the compressor at
@@ -1167,9 +1177,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1167 if (!i915_powersave) 1177 if (!i915_powersave)
1168 return; 1178 return;
1169 1179
1170 if (!dev_priv->display.fbc_enabled || 1180 if (!I915_HAS_FBC(dev))
1171 !dev_priv->display.enable_fbc ||
1172 !dev_priv->display.disable_fbc)
1173 return; 1181 return;
1174 1182
1175 if (!crtc->fb) 1183 if (!crtc->fb)
@@ -1216,28 +1224,25 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1216 goto out_disable; 1224 goto out_disable;
1217 } 1225 }
1218 1226
1219 if (dev_priv->display.fbc_enabled(crtc)) { 1227 if (intel_fbc_enabled(dev)) {
1220 /* We can re-enable it in this case, but need to update pitch */ 1228 /* We can re-enable it in this case, but need to update pitch */
1221 if (fb->pitch > dev_priv->cfb_pitch) 1229 if ((fb->pitch > dev_priv->cfb_pitch) ||
1222 dev_priv->display.disable_fbc(dev); 1230 (obj_priv->fence_reg != dev_priv->cfb_fence) ||
1223 if (obj_priv->fence_reg != dev_priv->cfb_fence) 1231 (plane != dev_priv->cfb_plane))
1224 dev_priv->display.disable_fbc(dev); 1232 intel_disable_fbc(dev);
1225 if (plane != dev_priv->cfb_plane)
1226 dev_priv->display.disable_fbc(dev);
1227 } 1233 }
1228 1234
1229 if (!dev_priv->display.fbc_enabled(crtc)) { 1235 /* Now try to turn it back on if possible */
1230 /* Now try to turn it back on if possible */ 1236 if (!intel_fbc_enabled(dev))
1231 dev_priv->display.enable_fbc(crtc, 500); 1237 intel_enable_fbc(crtc, 500);
1232 }
1233 1238
1234 return; 1239 return;
1235 1240
1236out_disable: 1241out_disable:
1237 DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); 1242 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1238 /* Multiple disables should be harmless */ 1243 /* Multiple disables should be harmless */
1239 if (dev_priv->display.fbc_enabled(crtc)) 1244 if (intel_fbc_enabled(dev))
1240 dev_priv->display.disable_fbc(dev); 1245 intel_disable_fbc(dev);
1241} 1246}
1242 1247
1243static int 1248static int
@@ -1510,6 +1515,219 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
1510 udelay(500); 1515 udelay(500);
1511} 1516}
1512 1517
1518/* The FDI link training functions for ILK/Ibexpeak. */
1519static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1520{
1521 struct drm_device *dev = crtc->dev;
1522 struct drm_i915_private *dev_priv = dev->dev_private;
1523 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1524 int pipe = intel_crtc->pipe;
1525 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
1526 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1527 int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
1528 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
1529 u32 temp, tries = 0;
1530
1531 /* enable CPU FDI TX and PCH FDI RX */
1532 temp = I915_READ(fdi_tx_reg);
1533 temp |= FDI_TX_ENABLE;
1534 temp &= ~(7 << 19);
1535 temp |= (intel_crtc->fdi_lanes - 1) << 19;
1536 temp &= ~FDI_LINK_TRAIN_NONE;
1537 temp |= FDI_LINK_TRAIN_PATTERN_1;
1538 I915_WRITE(fdi_tx_reg, temp);
1539 I915_READ(fdi_tx_reg);
1540
1541 temp = I915_READ(fdi_rx_reg);
1542 temp &= ~FDI_LINK_TRAIN_NONE;
1543 temp |= FDI_LINK_TRAIN_PATTERN_1;
1544 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
1545 I915_READ(fdi_rx_reg);
1546 udelay(150);
1547
1548 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1549 for train result */
1550 temp = I915_READ(fdi_rx_imr_reg);
1551 temp &= ~FDI_RX_SYMBOL_LOCK;
1552 temp &= ~FDI_RX_BIT_LOCK;
1553 I915_WRITE(fdi_rx_imr_reg, temp);
1554 I915_READ(fdi_rx_imr_reg);
1555 udelay(150);
1556
1557 for (;;) {
1558 temp = I915_READ(fdi_rx_iir_reg);
1559 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1560
1561 if ((temp & FDI_RX_BIT_LOCK)) {
1562 DRM_DEBUG_KMS("FDI train 1 done.\n");
1563 I915_WRITE(fdi_rx_iir_reg,
1564 temp | FDI_RX_BIT_LOCK);
1565 break;
1566 }
1567
1568 tries++;
1569
1570 if (tries > 5) {
1571 DRM_DEBUG_KMS("FDI train 1 fail!\n");
1572 break;
1573 }
1574 }
1575
1576 /* Train 2 */
1577 temp = I915_READ(fdi_tx_reg);
1578 temp &= ~FDI_LINK_TRAIN_NONE;
1579 temp |= FDI_LINK_TRAIN_PATTERN_2;
1580 I915_WRITE(fdi_tx_reg, temp);
1581
1582 temp = I915_READ(fdi_rx_reg);
1583 temp &= ~FDI_LINK_TRAIN_NONE;
1584 temp |= FDI_LINK_TRAIN_PATTERN_2;
1585 I915_WRITE(fdi_rx_reg, temp);
1586 udelay(150);
1587
1588 tries = 0;
1589
1590 for (;;) {
1591 temp = I915_READ(fdi_rx_iir_reg);
1592 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1593
1594 if (temp & FDI_RX_SYMBOL_LOCK) {
1595 I915_WRITE(fdi_rx_iir_reg,
1596 temp | FDI_RX_SYMBOL_LOCK);
1597 DRM_DEBUG_KMS("FDI train 2 done.\n");
1598 break;
1599 }
1600
1601 tries++;
1602
1603 if (tries > 5) {
1604 DRM_DEBUG_KMS("FDI train 2 fail!\n");
1605 break;
1606 }
1607 }
1608
1609 DRM_DEBUG_KMS("FDI train done\n");
1610}
1611
1612static int snb_b_fdi_train_param [] = {
1613 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
1614 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
1615 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
1616 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
1617};
1618
1619/* The FDI link training functions for SNB/Cougarpoint. */
1620static void gen6_fdi_link_train(struct drm_crtc *crtc)
1621{
1622 struct drm_device *dev = crtc->dev;
1623 struct drm_i915_private *dev_priv = dev->dev_private;
1624 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1625 int pipe = intel_crtc->pipe;
1626 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
1627 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1628 int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
1629 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
1630 u32 temp, i;
1631
1632 /* enable CPU FDI TX and PCH FDI RX */
1633 temp = I915_READ(fdi_tx_reg);
1634 temp |= FDI_TX_ENABLE;
1635 temp &= ~(7 << 19);
1636 temp |= (intel_crtc->fdi_lanes - 1) << 19;
1637 temp &= ~FDI_LINK_TRAIN_NONE;
1638 temp |= FDI_LINK_TRAIN_PATTERN_1;
1639 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
1640 /* SNB-B */
1641 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
1642 I915_WRITE(fdi_tx_reg, temp);
1643 I915_READ(fdi_tx_reg);
1644
1645 temp = I915_READ(fdi_rx_reg);
1646 if (HAS_PCH_CPT(dev)) {
1647 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1648 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
1649 } else {
1650 temp &= ~FDI_LINK_TRAIN_NONE;
1651 temp |= FDI_LINK_TRAIN_PATTERN_1;
1652 }
1653 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
1654 I915_READ(fdi_rx_reg);
1655 udelay(150);
1656
1657 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1658 for train result */
1659 temp = I915_READ(fdi_rx_imr_reg);
1660 temp &= ~FDI_RX_SYMBOL_LOCK;
1661 temp &= ~FDI_RX_BIT_LOCK;
1662 I915_WRITE(fdi_rx_imr_reg, temp);
1663 I915_READ(fdi_rx_imr_reg);
1664 udelay(150);
1665
1666 for (i = 0; i < 4; i++ ) {
1667 temp = I915_READ(fdi_tx_reg);
1668 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
1669 temp |= snb_b_fdi_train_param[i];
1670 I915_WRITE(fdi_tx_reg, temp);
1671 udelay(500);
1672
1673 temp = I915_READ(fdi_rx_iir_reg);
1674 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1675
1676 if (temp & FDI_RX_BIT_LOCK) {
1677 I915_WRITE(fdi_rx_iir_reg,
1678 temp | FDI_RX_BIT_LOCK);
1679 DRM_DEBUG_KMS("FDI train 1 done.\n");
1680 break;
1681 }
1682 }
1683 if (i == 4)
1684 DRM_DEBUG_KMS("FDI train 1 fail!\n");
1685
1686 /* Train 2 */
1687 temp = I915_READ(fdi_tx_reg);
1688 temp &= ~FDI_LINK_TRAIN_NONE;
1689 temp |= FDI_LINK_TRAIN_PATTERN_2;
1690 if (IS_GEN6(dev)) {
1691 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
1692 /* SNB-B */
1693 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
1694 }
1695 I915_WRITE(fdi_tx_reg, temp);
1696
1697 temp = I915_READ(fdi_rx_reg);
1698 if (HAS_PCH_CPT(dev)) {
1699 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1700 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
1701 } else {
1702 temp &= ~FDI_LINK_TRAIN_NONE;
1703 temp |= FDI_LINK_TRAIN_PATTERN_2;
1704 }
1705 I915_WRITE(fdi_rx_reg, temp);
1706 udelay(150);
1707
1708 for (i = 0; i < 4; i++ ) {
1709 temp = I915_READ(fdi_tx_reg);
1710 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
1711 temp |= snb_b_fdi_train_param[i];
1712 I915_WRITE(fdi_tx_reg, temp);
1713 udelay(500);
1714
1715 temp = I915_READ(fdi_rx_iir_reg);
1716 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1717
1718 if (temp & FDI_RX_SYMBOL_LOCK) {
1719 I915_WRITE(fdi_rx_iir_reg,
1720 temp | FDI_RX_SYMBOL_LOCK);
1721 DRM_DEBUG_KMS("FDI train 2 done.\n");
1722 break;
1723 }
1724 }
1725 if (i == 4)
1726 DRM_DEBUG_KMS("FDI train 2 fail!\n");
1727
1728 DRM_DEBUG_KMS("FDI train done.\n");
1729}
1730
1513static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) 1731static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1514{ 1732{
1515 struct drm_device *dev = crtc->dev; 1733 struct drm_device *dev = crtc->dev;
@@ -1523,8 +1741,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1523 int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; 1741 int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
1524 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; 1742 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
1525 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; 1743 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1526 int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
1527 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
1528 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; 1744 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
1529 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; 1745 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
1530 int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; 1746 int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
@@ -1541,8 +1757,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1541 int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B; 1757 int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
1542 int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; 1758 int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
1543 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; 1759 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
1760 int trans_dpll_sel = (pipe == 0) ? 0 : 1;
1544 u32 temp; 1761 u32 temp;
1545 int tries = 5, j, n; 1762 int n;
1546 u32 pipe_bpc; 1763 u32 pipe_bpc;
1547 1764
1548 temp = I915_READ(pipeconf_reg); 1765 temp = I915_READ(pipeconf_reg);
@@ -1569,12 +1786,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1569 /* enable eDP PLL */ 1786 /* enable eDP PLL */
1570 ironlake_enable_pll_edp(crtc); 1787 ironlake_enable_pll_edp(crtc);
1571 } else { 1788 } else {
1572 /* enable PCH DPLL */
1573 temp = I915_READ(pch_dpll_reg);
1574 if ((temp & DPLL_VCO_ENABLE) == 0) {
1575 I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
1576 I915_READ(pch_dpll_reg);
1577 }
1578 1789
1579 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 1790 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1580 temp = I915_READ(fdi_rx_reg); 1791 temp = I915_READ(fdi_rx_reg);
@@ -1584,9 +1795,15 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1584 */ 1795 */
1585 temp &= ~(0x7 << 16); 1796 temp &= ~(0x7 << 16);
1586 temp |= (pipe_bpc << 11); 1797 temp |= (pipe_bpc << 11);
1587 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | 1798 temp &= ~(7 << 19);
1588 FDI_SEL_PCDCLK | 1799 temp |= (intel_crtc->fdi_lanes - 1) << 19;
1589 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ 1800 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
1801 I915_READ(fdi_rx_reg);
1802 udelay(200);
1803
1804 /* Switch from Rawclk to PCDclk */
1805 temp = I915_READ(fdi_rx_reg);
1806 I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
1590 I915_READ(fdi_rx_reg); 1807 I915_READ(fdi_rx_reg);
1591 udelay(200); 1808 udelay(200);
1592 1809
@@ -1629,91 +1846,32 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1629 } 1846 }
1630 1847
1631 if (!HAS_eDP) { 1848 if (!HAS_eDP) {
1632 /* enable CPU FDI TX and PCH FDI RX */ 1849 /* For PCH output, training FDI link */
1633 temp = I915_READ(fdi_tx_reg); 1850 if (IS_GEN6(dev))
1634 temp |= FDI_TX_ENABLE; 1851 gen6_fdi_link_train(crtc);
1635 temp |= FDI_DP_PORT_WIDTH_X4; /* default */ 1852 else
1636 temp &= ~FDI_LINK_TRAIN_NONE; 1853 ironlake_fdi_link_train(crtc);
1637 temp |= FDI_LINK_TRAIN_PATTERN_1;
1638 I915_WRITE(fdi_tx_reg, temp);
1639 I915_READ(fdi_tx_reg);
1640
1641 temp = I915_READ(fdi_rx_reg);
1642 temp &= ~FDI_LINK_TRAIN_NONE;
1643 temp |= FDI_LINK_TRAIN_PATTERN_1;
1644 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
1645 I915_READ(fdi_rx_reg);
1646
1647 udelay(150);
1648
1649 /* Train FDI. */
1650 /* umask FDI RX Interrupt symbol_lock and bit_lock bit
1651 for train result */
1652 temp = I915_READ(fdi_rx_imr_reg);
1653 temp &= ~FDI_RX_SYMBOL_LOCK;
1654 temp &= ~FDI_RX_BIT_LOCK;
1655 I915_WRITE(fdi_rx_imr_reg, temp);
1656 I915_READ(fdi_rx_imr_reg);
1657 udelay(150);
1658 1854
1659 temp = I915_READ(fdi_rx_iir_reg); 1855 /* enable PCH DPLL */
1660 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 1856 temp = I915_READ(pch_dpll_reg);
1661 1857 if ((temp & DPLL_VCO_ENABLE) == 0) {
1662 if ((temp & FDI_RX_BIT_LOCK) == 0) { 1858 I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
1663 for (j = 0; j < tries; j++) { 1859 I915_READ(pch_dpll_reg);
1664 temp = I915_READ(fdi_rx_iir_reg);
1665 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
1666 temp);
1667 if (temp & FDI_RX_BIT_LOCK)
1668 break;
1669 udelay(200);
1670 }
1671 if (j != tries)
1672 I915_WRITE(fdi_rx_iir_reg,
1673 temp | FDI_RX_BIT_LOCK);
1674 else
1675 DRM_DEBUG_KMS("train 1 fail\n");
1676 } else {
1677 I915_WRITE(fdi_rx_iir_reg,
1678 temp | FDI_RX_BIT_LOCK);
1679 DRM_DEBUG_KMS("train 1 ok 2!\n");
1680 } 1860 }
1681 temp = I915_READ(fdi_tx_reg); 1861 udelay(200);
1682 temp &= ~FDI_LINK_TRAIN_NONE;
1683 temp |= FDI_LINK_TRAIN_PATTERN_2;
1684 I915_WRITE(fdi_tx_reg, temp);
1685
1686 temp = I915_READ(fdi_rx_reg);
1687 temp &= ~FDI_LINK_TRAIN_NONE;
1688 temp |= FDI_LINK_TRAIN_PATTERN_2;
1689 I915_WRITE(fdi_rx_reg, temp);
1690
1691 udelay(150);
1692 1862
1693 temp = I915_READ(fdi_rx_iir_reg); 1863 if (HAS_PCH_CPT(dev)) {
1694 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 1864 /* Be sure PCH DPLL SEL is set */
1695 1865 temp = I915_READ(PCH_DPLL_SEL);
1696 if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { 1866 if (trans_dpll_sel == 0 &&
1697 for (j = 0; j < tries; j++) { 1867 (temp & TRANSA_DPLL_ENABLE) == 0)
1698 temp = I915_READ(fdi_rx_iir_reg); 1868 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
1699 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", 1869 else if (trans_dpll_sel == 1 &&
1700 temp); 1870 (temp & TRANSB_DPLL_ENABLE) == 0)
1701 if (temp & FDI_RX_SYMBOL_LOCK) 1871 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
1702 break; 1872 I915_WRITE(PCH_DPLL_SEL, temp);
1703 udelay(200); 1873 I915_READ(PCH_DPLL_SEL);
1704 }
1705 if (j != tries) {
1706 I915_WRITE(fdi_rx_iir_reg,
1707 temp | FDI_RX_SYMBOL_LOCK);
1708 DRM_DEBUG_KMS("train 2 ok 1!\n");
1709 } else
1710 DRM_DEBUG_KMS("train 2 fail\n");
1711 } else {
1712 I915_WRITE(fdi_rx_iir_reg,
1713 temp | FDI_RX_SYMBOL_LOCK);
1714 DRM_DEBUG_KMS("train 2 ok 2!\n");
1715 } 1874 }
1716 DRM_DEBUG_KMS("train done\n");
1717 1875
1718 /* set transcoder timing */ 1876 /* set transcoder timing */
1719 I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); 1877 I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
@@ -1724,6 +1882,60 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1724 I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); 1882 I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
1725 I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); 1883 I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
1726 1884
1885 /* enable normal train */
1886 temp = I915_READ(fdi_tx_reg);
1887 temp &= ~FDI_LINK_TRAIN_NONE;
1888 I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
1889 FDI_TX_ENHANCE_FRAME_ENABLE);
1890 I915_READ(fdi_tx_reg);
1891
1892 temp = I915_READ(fdi_rx_reg);
1893 if (HAS_PCH_CPT(dev)) {
1894 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1895 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
1896 } else {
1897 temp &= ~FDI_LINK_TRAIN_NONE;
1898 temp |= FDI_LINK_TRAIN_NONE;
1899 }
1900 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
1901 I915_READ(fdi_rx_reg);
1902
1903 /* wait one idle pattern time */
1904 udelay(100);
1905
1906 /* For PCH DP, enable TRANS_DP_CTL */
1907 if (HAS_PCH_CPT(dev) &&
1908 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
1909 int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
1910 int reg;
1911
1912 reg = I915_READ(trans_dp_ctl);
1913 reg &= ~TRANS_DP_PORT_SEL_MASK;
1914 reg = TRANS_DP_OUTPUT_ENABLE |
1915 TRANS_DP_ENH_FRAMING |
1916 TRANS_DP_VSYNC_ACTIVE_HIGH |
1917 TRANS_DP_HSYNC_ACTIVE_HIGH;
1918
1919 switch (intel_trans_dp_port_sel(crtc)) {
1920 case PCH_DP_B:
1921 reg |= TRANS_DP_PORT_SEL_B;
1922 break;
1923 case PCH_DP_C:
1924 reg |= TRANS_DP_PORT_SEL_C;
1925 break;
1926 case PCH_DP_D:
1927 reg |= TRANS_DP_PORT_SEL_D;
1928 break;
1929 default:
1930 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
1931 reg |= TRANS_DP_PORT_SEL_B;
1932 break;
1933 }
1934
1935 I915_WRITE(trans_dp_ctl, reg);
1936 POSTING_READ(trans_dp_ctl);
1937 }
1938
1727 /* enable PCH transcoder */ 1939 /* enable PCH transcoder */
1728 temp = I915_READ(transconf_reg); 1940 temp = I915_READ(transconf_reg);
1729 /* 1941 /*
@@ -1738,23 +1950,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1738 while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) 1950 while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
1739 ; 1951 ;
1740 1952
1741 /* enable normal */
1742
1743 temp = I915_READ(fdi_tx_reg);
1744 temp &= ~FDI_LINK_TRAIN_NONE;
1745 I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
1746 FDI_TX_ENHANCE_FRAME_ENABLE);
1747 I915_READ(fdi_tx_reg);
1748
1749 temp = I915_READ(fdi_rx_reg);
1750 temp &= ~FDI_LINK_TRAIN_NONE;
1751 I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE |
1752 FDI_RX_ENHANCE_FRAME_ENABLE);
1753 I915_READ(fdi_rx_reg);
1754
1755 /* wait one idle pattern time */
1756 udelay(100);
1757
1758 } 1953 }
1759 1954
1760 intel_crtc_load_lut(crtc); 1955 intel_crtc_load_lut(crtc);
@@ -1805,6 +2000,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1805 I915_READ(pf_ctl_reg); 2000 I915_READ(pf_ctl_reg);
1806 } 2001 }
1807 I915_WRITE(pf_win_size, 0); 2002 I915_WRITE(pf_win_size, 0);
2003 POSTING_READ(pf_win_size);
2004
1808 2005
1809 /* disable CPU FDI tx and PCH FDI rx */ 2006 /* disable CPU FDI tx and PCH FDI rx */
1810 temp = I915_READ(fdi_tx_reg); 2007 temp = I915_READ(fdi_tx_reg);
@@ -1825,11 +2022,18 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1825 temp &= ~FDI_LINK_TRAIN_NONE; 2022 temp &= ~FDI_LINK_TRAIN_NONE;
1826 temp |= FDI_LINK_TRAIN_PATTERN_1; 2023 temp |= FDI_LINK_TRAIN_PATTERN_1;
1827 I915_WRITE(fdi_tx_reg, temp); 2024 I915_WRITE(fdi_tx_reg, temp);
2025 POSTING_READ(fdi_tx_reg);
1828 2026
1829 temp = I915_READ(fdi_rx_reg); 2027 temp = I915_READ(fdi_rx_reg);
1830 temp &= ~FDI_LINK_TRAIN_NONE; 2028 if (HAS_PCH_CPT(dev)) {
1831 temp |= FDI_LINK_TRAIN_PATTERN_1; 2029 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2030 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2031 } else {
2032 temp &= ~FDI_LINK_TRAIN_NONE;
2033 temp |= FDI_LINK_TRAIN_PATTERN_1;
2034 }
1832 I915_WRITE(fdi_rx_reg, temp); 2035 I915_WRITE(fdi_rx_reg, temp);
2036 POSTING_READ(fdi_rx_reg);
1833 2037
1834 udelay(100); 2038 udelay(100);
1835 2039
@@ -1859,6 +2063,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1859 } 2063 }
1860 } 2064 }
1861 } 2065 }
2066
1862 temp = I915_READ(transconf_reg); 2067 temp = I915_READ(transconf_reg);
1863 /* BPC in transcoder is consistent with that in pipeconf */ 2068 /* BPC in transcoder is consistent with that in pipeconf */
1864 temp &= ~PIPE_BPC_MASK; 2069 temp &= ~PIPE_BPC_MASK;
@@ -1867,35 +2072,53 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1867 I915_READ(transconf_reg); 2072 I915_READ(transconf_reg);
1868 udelay(100); 2073 udelay(100);
1869 2074
2075 if (HAS_PCH_CPT(dev)) {
2076 /* disable TRANS_DP_CTL */
2077 int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
2078 int reg;
2079
2080 reg = I915_READ(trans_dp_ctl);
2081 reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
2082 I915_WRITE(trans_dp_ctl, reg);
2083 POSTING_READ(trans_dp_ctl);
2084
2085 /* disable DPLL_SEL */
2086 temp = I915_READ(PCH_DPLL_SEL);
2087 if (trans_dpll_sel == 0)
2088 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
2089 else
2090 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2091 I915_WRITE(PCH_DPLL_SEL, temp);
2092 I915_READ(PCH_DPLL_SEL);
2093
2094 }
2095
1870 /* disable PCH DPLL */ 2096 /* disable PCH DPLL */
1871 temp = I915_READ(pch_dpll_reg); 2097 temp = I915_READ(pch_dpll_reg);
1872 if ((temp & DPLL_VCO_ENABLE) != 0) { 2098 I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
1873 I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); 2099 I915_READ(pch_dpll_reg);
1874 I915_READ(pch_dpll_reg);
1875 }
1876 2100
1877 if (HAS_eDP) { 2101 if (HAS_eDP) {
1878 ironlake_disable_pll_edp(crtc); 2102 ironlake_disable_pll_edp(crtc);
1879 } 2103 }
1880 2104
2105 /* Switch from PCDclk to Rawclk */
1881 temp = I915_READ(fdi_rx_reg); 2106 temp = I915_READ(fdi_rx_reg);
1882 temp &= ~FDI_SEL_PCDCLK; 2107 temp &= ~FDI_SEL_PCDCLK;
1883 I915_WRITE(fdi_rx_reg, temp); 2108 I915_WRITE(fdi_rx_reg, temp);
1884 I915_READ(fdi_rx_reg); 2109 I915_READ(fdi_rx_reg);
1885 2110
2111 /* Disable CPU FDI TX PLL */
2112 temp = I915_READ(fdi_tx_reg);
2113 I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
2114 I915_READ(fdi_tx_reg);
2115 udelay(100);
2116
1886 temp = I915_READ(fdi_rx_reg); 2117 temp = I915_READ(fdi_rx_reg);
1887 temp &= ~FDI_RX_PLL_ENABLE; 2118 temp &= ~FDI_RX_PLL_ENABLE;
1888 I915_WRITE(fdi_rx_reg, temp); 2119 I915_WRITE(fdi_rx_reg, temp);
1889 I915_READ(fdi_rx_reg); 2120 I915_READ(fdi_rx_reg);
1890 2121
1891 /* Disable CPU FDI TX PLL */
1892 temp = I915_READ(fdi_tx_reg);
1893 if ((temp & FDI_TX_PLL_ENABLE) != 0) {
1894 I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
1895 I915_READ(fdi_tx_reg);
1896 udelay(100);
1897 }
1898
1899 /* Wait for the clocks to turn off. */ 2122 /* Wait for the clocks to turn off. */
1900 udelay(100); 2123 udelay(100);
1901 break; 2124 break;
@@ -2331,6 +2554,30 @@ static struct intel_watermark_params i830_wm_info = {
2331 I830_FIFO_LINE_SIZE 2554 I830_FIFO_LINE_SIZE
2332}; 2555};
2333 2556
2557static struct intel_watermark_params ironlake_display_wm_info = {
2558 ILK_DISPLAY_FIFO,
2559 ILK_DISPLAY_MAXWM,
2560 ILK_DISPLAY_DFTWM,
2561 2,
2562 ILK_FIFO_LINE_SIZE
2563};
2564
2565static struct intel_watermark_params ironlake_display_srwm_info = {
2566 ILK_DISPLAY_SR_FIFO,
2567 ILK_DISPLAY_MAX_SRWM,
2568 ILK_DISPLAY_DFT_SRWM,
2569 2,
2570 ILK_FIFO_LINE_SIZE
2571};
2572
2573static struct intel_watermark_params ironlake_cursor_srwm_info = {
2574 ILK_CURSOR_SR_FIFO,
2575 ILK_CURSOR_MAX_SRWM,
2576 ILK_CURSOR_DFT_SRWM,
2577 2,
2578 ILK_FIFO_LINE_SIZE
2579};
2580
2334/** 2581/**
2335 * intel_calculate_wm - calculate watermark level 2582 * intel_calculate_wm - calculate watermark level
2336 * @clock_in_khz: pixel clock 2583 * @clock_in_khz: pixel clock
@@ -2449,66 +2696,6 @@ static void pineview_disable_cxsr(struct drm_device *dev)
2449 DRM_INFO("Big FIFO is disabled\n"); 2696 DRM_INFO("Big FIFO is disabled\n");
2450} 2697}
2451 2698
2452static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
2453 int pixel_size)
2454{
2455 struct drm_i915_private *dev_priv = dev->dev_private;
2456 u32 reg;
2457 unsigned long wm;
2458 struct cxsr_latency *latency;
2459
2460 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
2461 dev_priv->mem_freq);
2462 if (!latency) {
2463 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
2464 pineview_disable_cxsr(dev);
2465 return;
2466 }
2467
2468 /* Display SR */
2469 wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
2470 latency->display_sr);
2471 reg = I915_READ(DSPFW1);
2472 reg &= 0x7fffff;
2473 reg |= wm << 23;
2474 I915_WRITE(DSPFW1, reg);
2475 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
2476
2477 /* cursor SR */
2478 wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
2479 latency->cursor_sr);
2480 reg = I915_READ(DSPFW3);
2481 reg &= ~(0x3f << 24);
2482 reg |= (wm & 0x3f) << 24;
2483 I915_WRITE(DSPFW3, reg);
2484
2485 /* Display HPLL off SR */
2486 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
2487 latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
2488 reg = I915_READ(DSPFW3);
2489 reg &= 0xfffffe00;
2490 reg |= wm & 0x1ff;
2491 I915_WRITE(DSPFW3, reg);
2492
2493 /* cursor HPLL off SR */
2494 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
2495 latency->cursor_hpll_disable);
2496 reg = I915_READ(DSPFW3);
2497 reg &= ~(0x3f << 16);
2498 reg |= (wm & 0x3f) << 16;
2499 I915_WRITE(DSPFW3, reg);
2500 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
2501
2502 /* activate cxsr */
2503 reg = I915_READ(DSPFW3);
2504 reg |= PINEVIEW_SELF_REFRESH_EN;
2505 I915_WRITE(DSPFW3, reg);
2506
2507 DRM_INFO("Big FIFO is enabled\n");
2508
2509 return;
2510}
2511
2512/* 2699/*
2513 * Latency for FIFO fetches is dependent on several factors: 2700 * Latency for FIFO fetches is dependent on several factors:
2514 * - memory configuration (speed, channels) 2701 * - memory configuration (speed, channels)
@@ -2593,6 +2780,71 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
2593 return size; 2780 return size;
2594} 2781}
2595 2782
2783static void pineview_update_wm(struct drm_device *dev, int planea_clock,
2784 int planeb_clock, int sr_hdisplay, int pixel_size)
2785{
2786 struct drm_i915_private *dev_priv = dev->dev_private;
2787 u32 reg;
2788 unsigned long wm;
2789 struct cxsr_latency *latency;
2790 int sr_clock;
2791
2792 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
2793 dev_priv->mem_freq);
2794 if (!latency) {
2795 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
2796 pineview_disable_cxsr(dev);
2797 return;
2798 }
2799
2800 if (!planea_clock || !planeb_clock) {
2801 sr_clock = planea_clock ? planea_clock : planeb_clock;
2802
2803 /* Display SR */
2804 wm = intel_calculate_wm(sr_clock, &pineview_display_wm,
2805 pixel_size, latency->display_sr);
2806 reg = I915_READ(DSPFW1);
2807 reg &= ~DSPFW_SR_MASK;
2808 reg |= wm << DSPFW_SR_SHIFT;
2809 I915_WRITE(DSPFW1, reg);
2810 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
2811
2812 /* cursor SR */
2813 wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm,
2814 pixel_size, latency->cursor_sr);
2815 reg = I915_READ(DSPFW3);
2816 reg &= ~DSPFW_CURSOR_SR_MASK;
2817 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
2818 I915_WRITE(DSPFW3, reg);
2819
2820 /* Display HPLL off SR */
2821 wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm,
2822 pixel_size, latency->display_hpll_disable);
2823 reg = I915_READ(DSPFW3);
2824 reg &= ~DSPFW_HPLL_SR_MASK;
2825 reg |= wm & DSPFW_HPLL_SR_MASK;
2826 I915_WRITE(DSPFW3, reg);
2827
2828 /* cursor HPLL off SR */
2829 wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm,
2830 pixel_size, latency->cursor_hpll_disable);
2831 reg = I915_READ(DSPFW3);
2832 reg &= ~DSPFW_HPLL_CURSOR_MASK;
2833 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
2834 I915_WRITE(DSPFW3, reg);
2835 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
2836
2837 /* activate cxsr */
2838 reg = I915_READ(DSPFW3);
2839 reg |= PINEVIEW_SELF_REFRESH_EN;
2840 I915_WRITE(DSPFW3, reg);
2841 DRM_DEBUG_KMS("Self-refresh is enabled\n");
2842 } else {
2843 pineview_disable_cxsr(dev);
2844 DRM_DEBUG_KMS("Self-refresh is disabled\n");
2845 }
2846}
2847
2596static void g4x_update_wm(struct drm_device *dev, int planea_clock, 2848static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2597 int planeb_clock, int sr_hdisplay, int pixel_size) 2849 int planeb_clock, int sr_hdisplay, int pixel_size)
2598{ 2850{
@@ -2813,6 +3065,108 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
2813 I915_WRITE(FW_BLC, fwater_lo); 3065 I915_WRITE(FW_BLC, fwater_lo);
2814} 3066}
2815 3067
3068#define ILK_LP0_PLANE_LATENCY 700
3069
3070static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3071 int planeb_clock, int sr_hdisplay, int pixel_size)
3072{
3073 struct drm_i915_private *dev_priv = dev->dev_private;
3074 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
3075 int sr_wm, cursor_wm;
3076 unsigned long line_time_us;
3077 int sr_clock, entries_required;
3078 u32 reg_value;
3079
3080 /* Calculate and update the watermark for plane A */
3081 if (planea_clock) {
3082 entries_required = ((planea_clock / 1000) * pixel_size *
3083 ILK_LP0_PLANE_LATENCY) / 1000;
3084 entries_required = DIV_ROUND_UP(entries_required,
3085 ironlake_display_wm_info.cacheline_size);
3086 planea_wm = entries_required +
3087 ironlake_display_wm_info.guard_size;
3088
3089 if (planea_wm > (int)ironlake_display_wm_info.max_wm)
3090 planea_wm = ironlake_display_wm_info.max_wm;
3091
3092 cursora_wm = 16;
3093 reg_value = I915_READ(WM0_PIPEA_ILK);
3094 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
3095 reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
3096 (cursora_wm & WM0_PIPE_CURSOR_MASK);
3097 I915_WRITE(WM0_PIPEA_ILK, reg_value);
3098 DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
3099 "cursor: %d\n", planea_wm, cursora_wm);
3100 }
3101 /* Calculate and update the watermark for plane B */
3102 if (planeb_clock) {
3103 entries_required = ((planeb_clock / 1000) * pixel_size *
3104 ILK_LP0_PLANE_LATENCY) / 1000;
3105 entries_required = DIV_ROUND_UP(entries_required,
3106 ironlake_display_wm_info.cacheline_size);
3107 planeb_wm = entries_required +
3108 ironlake_display_wm_info.guard_size;
3109
3110 if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
3111 planeb_wm = ironlake_display_wm_info.max_wm;
3112
3113 cursorb_wm = 16;
3114 reg_value = I915_READ(WM0_PIPEB_ILK);
3115 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
3116 reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
3117 (cursorb_wm & WM0_PIPE_CURSOR_MASK);
3118 I915_WRITE(WM0_PIPEB_ILK, reg_value);
3119 DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, "
3120 "cursor: %d\n", planeb_wm, cursorb_wm);
3121 }
3122
3123 /*
3124 * Calculate and update the self-refresh watermark only when one
3125 * display plane is used.
3126 */
3127 if (!planea_clock || !planeb_clock) {
3128 int line_count;
3129 /* Read the self-refresh latency. The unit is 0.5us */
3130 int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
3131
3132 sr_clock = planea_clock ? planea_clock : planeb_clock;
3133 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
3134
3135 /* Use ns/us then divide to preserve precision */
3136 line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
3137 / 1000;
3138
3139 /* calculate the self-refresh watermark for display plane */
3140 entries_required = line_count * sr_hdisplay * pixel_size;
3141 entries_required = DIV_ROUND_UP(entries_required,
3142 ironlake_display_srwm_info.cacheline_size);
3143 sr_wm = entries_required +
3144 ironlake_display_srwm_info.guard_size;
3145
3146 /* calculate the self-refresh watermark for display cursor */
3147 entries_required = line_count * pixel_size * 64;
3148 entries_required = DIV_ROUND_UP(entries_required,
3149 ironlake_cursor_srwm_info.cacheline_size);
3150 cursor_wm = entries_required +
3151 ironlake_cursor_srwm_info.guard_size;
3152
3153 /* configure watermark and enable self-refresh */
3154 reg_value = I915_READ(WM1_LP_ILK);
3155 reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
3156 WM1_LP_CURSOR_MASK);
3157 reg_value |= WM1_LP_SR_EN |
3158 (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
3159 (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
3160
3161 I915_WRITE(WM1_LP_ILK, reg_value);
3162 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
3163 "cursor %d\n", sr_wm, cursor_wm);
3164
3165 } else {
3166 /* Turn off self refresh if both pipes are enabled */
3167 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
3168 }
3169}
2816/** 3170/**
2817 * intel_update_watermarks - update FIFO watermark values based on current modes 3171 * intel_update_watermarks - update FIFO watermark values based on current modes
2818 * 3172 *
@@ -2882,12 +3236,6 @@ static void intel_update_watermarks(struct drm_device *dev)
2882 if (enabled <= 0) 3236 if (enabled <= 0)
2883 return; 3237 return;
2884 3238
2885 /* Single plane configs can enable self refresh */
2886 if (enabled == 1 && IS_PINEVIEW(dev))
2887 pineview_enable_cxsr(dev, sr_clock, pixel_size);
2888 else if (IS_PINEVIEW(dev))
2889 pineview_disable_cxsr(dev);
2890
2891 dev_priv->display.update_wm(dev, planea_clock, planeb_clock, 3239 dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
2892 sr_hdisplay, pixel_size); 3240 sr_hdisplay, pixel_size);
2893} 3241}
@@ -2924,7 +3272,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2924 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 3272 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
2925 bool is_edp = false; 3273 bool is_edp = false;
2926 struct drm_mode_config *mode_config = &dev->mode_config; 3274 struct drm_mode_config *mode_config = &dev->mode_config;
2927 struct drm_connector *connector; 3275 struct drm_encoder *encoder;
3276 struct intel_encoder *intel_encoder = NULL;
2928 const intel_limit_t *limit; 3277 const intel_limit_t *limit;
2929 int ret; 3278 int ret;
2930 struct fdi_m_n m_n = {0}; 3279 struct fdi_m_n m_n = {0};
@@ -2935,6 +3284,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2935 int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0; 3284 int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
2936 int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; 3285 int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
2937 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; 3286 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
3287 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
3288 int trans_dpll_sel = (pipe == 0) ? 0 : 1;
2938 int lvds_reg = LVDS; 3289 int lvds_reg = LVDS;
2939 u32 temp; 3290 u32 temp;
2940 int sdvo_pixel_multiply; 3291 int sdvo_pixel_multiply;
@@ -2942,12 +3293,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2942 3293
2943 drm_vblank_pre_modeset(dev, pipe); 3294 drm_vblank_pre_modeset(dev, pipe);
2944 3295
2945 list_for_each_entry(connector, &mode_config->connector_list, head) { 3296 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
2946 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
2947 3297
2948 if (!connector->encoder || connector->encoder->crtc != crtc) 3298 if (!encoder || encoder->crtc != crtc)
2949 continue; 3299 continue;
2950 3300
3301 intel_encoder = enc_to_intel_encoder(encoder);
3302
2951 switch (intel_encoder->type) { 3303 switch (intel_encoder->type) {
2952 case INTEL_OUTPUT_LVDS: 3304 case INTEL_OUTPUT_LVDS:
2953 is_lvds = true; 3305 is_lvds = true;
@@ -3043,14 +3395,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3043 3395
3044 /* FDI link */ 3396 /* FDI link */
3045 if (HAS_PCH_SPLIT(dev)) { 3397 if (HAS_PCH_SPLIT(dev)) {
3046 int lane, link_bw, bpp; 3398 int lane = 0, link_bw, bpp;
3047 /* eDP doesn't require FDI link, so just set DP M/N 3399 /* eDP doesn't require FDI link, so just set DP M/N
3048 according to current link config */ 3400 according to current link config */
3049 if (is_edp) { 3401 if (is_edp) {
3050 struct drm_connector *edp;
3051 target_clock = mode->clock; 3402 target_clock = mode->clock;
3052 edp = intel_pipe_get_connector(crtc); 3403 intel_edp_link_config(intel_encoder,
3053 intel_edp_link_config(to_intel_encoder(edp),
3054 &lane, &link_bw); 3404 &lane, &link_bw);
3055 } else { 3405 } else {
3056 /* DP over FDI requires target mode clock 3406 /* DP over FDI requires target mode clock
@@ -3059,7 +3409,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3059 target_clock = mode->clock; 3409 target_clock = mode->clock;
3060 else 3410 else
3061 target_clock = adjusted_mode->clock; 3411 target_clock = adjusted_mode->clock;
3062 lane = 4;
3063 link_bw = 270000; 3412 link_bw = 270000;
3064 } 3413 }
3065 3414
@@ -3111,6 +3460,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3111 bpp = 24; 3460 bpp = 24;
3112 } 3461 }
3113 3462
3463 if (!lane) {
3464 /*
3465 * Account for spread spectrum to avoid
3466 * oversubscribing the link. Max center spread
3467 * is 2.5%; use 5% for safety's sake.
3468 */
3469 u32 bps = target_clock * bpp * 21 / 20;
3470 lane = bps / (link_bw * 8) + 1;
3471 }
3472
3473 intel_crtc->fdi_lanes = lane;
3474
3114 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); 3475 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
3115 } 3476 }
3116 3477
@@ -3265,11 +3626,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3265 pipeconf &= ~PIPEACONF_DOUBLE_WIDE; 3626 pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
3266 } 3627 }
3267 3628
3268 dspcntr |= DISPLAY_PLANE_ENABLE;
3269 pipeconf |= PIPEACONF_ENABLE;
3270 dpll |= DPLL_VCO_ENABLE;
3271
3272
3273 /* Disable the panel fitter if it was on our pipe */ 3629 /* Disable the panel fitter if it was on our pipe */
3274 if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe) 3630 if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
3275 I915_WRITE(PFIT_CONTROL, 0); 3631 I915_WRITE(PFIT_CONTROL, 0);
@@ -3292,6 +3648,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3292 udelay(150); 3648 udelay(150);
3293 } 3649 }
3294 3650
3651 /* enable transcoder DPLL */
3652 if (HAS_PCH_CPT(dev)) {
3653 temp = I915_READ(PCH_DPLL_SEL);
3654 if (trans_dpll_sel == 0)
3655 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
3656 else
3657 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3658 I915_WRITE(PCH_DPLL_SEL, temp);
3659 I915_READ(PCH_DPLL_SEL);
3660 udelay(150);
3661 }
3662
3295 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 3663 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
3296 * This is an exception to the general rule that mode_set doesn't turn 3664 * This is an exception to the general rule that mode_set doesn't turn
3297 * things on. 3665 * things on.
@@ -3303,7 +3671,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3303 lvds_reg = PCH_LVDS; 3671 lvds_reg = PCH_LVDS;
3304 3672
3305 lvds = I915_READ(lvds_reg); 3673 lvds = I915_READ(lvds_reg);
3306 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; 3674 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
3675 if (pipe == 1) {
3676 if (HAS_PCH_CPT(dev))
3677 lvds |= PORT_TRANS_B_SEL_CPT;
3678 else
3679 lvds |= LVDS_PIPEB_SELECT;
3680 } else {
3681 if (HAS_PCH_CPT(dev))
3682 lvds &= ~PORT_TRANS_SEL_MASK;
3683 else
3684 lvds &= ~LVDS_PIPEB_SELECT;
3685 }
3307 /* set the corresponsding LVDS_BORDER bit */ 3686 /* set the corresponsding LVDS_BORDER bit */
3308 lvds |= dev_priv->lvds_border_bits; 3687 lvds |= dev_priv->lvds_border_bits;
3309 /* Set the B0-B3 data pairs corresponding to whether we're going to 3688 /* Set the B0-B3 data pairs corresponding to whether we're going to
@@ -3321,14 +3700,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3321 /* set the dithering flag */ 3700 /* set the dithering flag */
3322 if (IS_I965G(dev)) { 3701 if (IS_I965G(dev)) {
3323 if (dev_priv->lvds_dither) { 3702 if (dev_priv->lvds_dither) {
3324 if (HAS_PCH_SPLIT(dev)) 3703 if (HAS_PCH_SPLIT(dev)) {
3325 pipeconf |= PIPE_ENABLE_DITHER; 3704 pipeconf |= PIPE_ENABLE_DITHER;
3326 else 3705 pipeconf |= PIPE_DITHER_TYPE_ST01;
3706 } else
3327 lvds |= LVDS_ENABLE_DITHER; 3707 lvds |= LVDS_ENABLE_DITHER;
3328 } else { 3708 } else {
3329 if (HAS_PCH_SPLIT(dev)) 3709 if (HAS_PCH_SPLIT(dev)) {
3330 pipeconf &= ~PIPE_ENABLE_DITHER; 3710 pipeconf &= ~PIPE_ENABLE_DITHER;
3331 else 3711 pipeconf &= ~PIPE_DITHER_TYPE_MASK;
3712 } else
3332 lvds &= ~LVDS_ENABLE_DITHER; 3713 lvds &= ~LVDS_ENABLE_DITHER;
3333 } 3714 }
3334 } 3715 }
@@ -3337,6 +3718,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3337 } 3718 }
3338 if (is_dp) 3719 if (is_dp)
3339 intel_dp_set_m_n(crtc, mode, adjusted_mode); 3720 intel_dp_set_m_n(crtc, mode, adjusted_mode);
3721 else if (HAS_PCH_SPLIT(dev)) {
3722 /* For non-DP output, clear any trans DP clock recovery setting.*/
3723 if (pipe == 0) {
3724 I915_WRITE(TRANSA_DATA_M1, 0);
3725 I915_WRITE(TRANSA_DATA_N1, 0);
3726 I915_WRITE(TRANSA_DP_LINK_M1, 0);
3727 I915_WRITE(TRANSA_DP_LINK_N1, 0);
3728 } else {
3729 I915_WRITE(TRANSB_DATA_M1, 0);
3730 I915_WRITE(TRANSB_DATA_N1, 0);
3731 I915_WRITE(TRANSB_DP_LINK_M1, 0);
3732 I915_WRITE(TRANSB_DP_LINK_N1, 0);
3733 }
3734 }
3340 3735
3341 if (!is_edp) { 3736 if (!is_edp) {
3342 I915_WRITE(fp_reg, fp); 3737 I915_WRITE(fp_reg, fp);
@@ -3411,6 +3806,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3411 /* enable FDI RX PLL too */ 3806 /* enable FDI RX PLL too */
3412 temp = I915_READ(fdi_rx_reg); 3807 temp = I915_READ(fdi_rx_reg);
3413 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); 3808 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
3809 I915_READ(fdi_rx_reg);
3810 udelay(200);
3811
3812 /* enable FDI TX PLL too */
3813 temp = I915_READ(fdi_tx_reg);
3814 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
3815 I915_READ(fdi_tx_reg);
3816
3817 /* enable FDI RX PCDCLK */
3818 temp = I915_READ(fdi_rx_reg);
3819 I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
3820 I915_READ(fdi_rx_reg);
3414 udelay(200); 3821 udelay(200);
3415 } 3822 }
3416 } 3823 }
@@ -3671,6 +4078,7 @@ static struct drm_display_mode load_detect_mode = {
3671}; 4078};
3672 4079
3673struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 4080struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
4081 struct drm_connector *connector,
3674 struct drm_display_mode *mode, 4082 struct drm_display_mode *mode,
3675 int *dpms_mode) 4083 int *dpms_mode)
3676{ 4084{
@@ -3729,7 +4137,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
3729 } 4137 }
3730 4138
3731 encoder->crtc = crtc; 4139 encoder->crtc = crtc;
3732 intel_encoder->base.encoder = encoder; 4140 connector->encoder = encoder;
3733 intel_encoder->load_detect_temp = true; 4141 intel_encoder->load_detect_temp = true;
3734 4142
3735 intel_crtc = to_intel_crtc(crtc); 4143 intel_crtc = to_intel_crtc(crtc);
@@ -3755,7 +4163,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
3755 return crtc; 4163 return crtc;
3756} 4164}
3757 4165
3758void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode) 4166void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
4167 struct drm_connector *connector, int dpms_mode)
3759{ 4168{
3760 struct drm_encoder *encoder = &intel_encoder->enc; 4169 struct drm_encoder *encoder = &intel_encoder->enc;
3761 struct drm_device *dev = encoder->dev; 4170 struct drm_device *dev = encoder->dev;
@@ -3765,7 +4174,7 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpm
3765 4174
3766 if (intel_encoder->load_detect_temp) { 4175 if (intel_encoder->load_detect_temp) {
3767 encoder->crtc = NULL; 4176 encoder->crtc = NULL;
3768 intel_encoder->base.encoder = NULL; 4177 connector->encoder = NULL;
3769 intel_encoder->load_detect_temp = false; 4178 intel_encoder->load_detect_temp = false;
3770 crtc->enabled = drm_helper_crtc_in_use(crtc); 4179 crtc->enabled = drm_helper_crtc_in_use(crtc);
3771 drm_helper_disable_unused_functions(dev); 4180 drm_helper_disable_unused_functions(dev);
@@ -4392,14 +4801,14 @@ struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
4392 return crtc; 4801 return crtc;
4393} 4802}
4394 4803
4395static int intel_connector_clones(struct drm_device *dev, int type_mask) 4804static int intel_encoder_clones(struct drm_device *dev, int type_mask)
4396{ 4805{
4397 int index_mask = 0; 4806 int index_mask = 0;
4398 struct drm_connector *connector; 4807 struct drm_encoder *encoder;
4399 int entry = 0; 4808 int entry = 0;
4400 4809
4401 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 4810 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4402 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 4811 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
4403 if (type_mask & intel_encoder->clone_mask) 4812 if (type_mask & intel_encoder->clone_mask)
4404 index_mask |= (1 << entry); 4813 index_mask |= (1 << entry);
4405 entry++; 4814 entry++;
@@ -4411,7 +4820,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
4411static void intel_setup_outputs(struct drm_device *dev) 4820static void intel_setup_outputs(struct drm_device *dev)
4412{ 4821{
4413 struct drm_i915_private *dev_priv = dev->dev_private; 4822 struct drm_i915_private *dev_priv = dev->dev_private;
4414 struct drm_connector *connector; 4823 struct drm_encoder *encoder;
4415 4824
4416 intel_crt_init(dev); 4825 intel_crt_init(dev);
4417 4826
@@ -4426,9 +4835,8 @@ static void intel_setup_outputs(struct drm_device *dev)
4426 intel_dp_init(dev, DP_A); 4835 intel_dp_init(dev, DP_A);
4427 4836
4428 if (I915_READ(HDMIB) & PORT_DETECTED) { 4837 if (I915_READ(HDMIB) & PORT_DETECTED) {
4429 /* check SDVOB */ 4838 /* PCH SDVOB multiplex with HDMIB */
4430 /* found = intel_sdvo_init(dev, HDMIB); */ 4839 found = intel_sdvo_init(dev, PCH_SDVOB);
4431 found = 0;
4432 if (!found) 4840 if (!found)
4433 intel_hdmi_init(dev, HDMIB); 4841 intel_hdmi_init(dev, HDMIB);
4434 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 4842 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
@@ -4494,12 +4902,11 @@ static void intel_setup_outputs(struct drm_device *dev)
4494 if (SUPPORTS_TV(dev)) 4902 if (SUPPORTS_TV(dev))
4495 intel_tv_init(dev); 4903 intel_tv_init(dev);
4496 4904
4497 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 4905 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4498 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 4906 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
4499 struct drm_encoder *encoder = &intel_encoder->enc;
4500 4907
4501 encoder->possible_crtcs = intel_encoder->crtc_mask; 4908 encoder->possible_crtcs = intel_encoder->crtc_mask;
4502 encoder->possible_clones = intel_connector_clones(dev, 4909 encoder->possible_clones = intel_encoder_clones(dev,
4503 intel_encoder->clone_mask); 4910 intel_encoder->clone_mask);
4504 } 4911 }
4505} 4912}
@@ -4507,10 +4914,6 @@ static void intel_setup_outputs(struct drm_device *dev)
4507static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 4914static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
4508{ 4915{
4509 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 4916 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
4510 struct drm_device *dev = fb->dev;
4511
4512 if (fb->fbdev)
4513 intelfb_remove(dev, fb);
4514 4917
4515 drm_framebuffer_cleanup(fb); 4918 drm_framebuffer_cleanup(fb);
4516 drm_gem_object_unreference_unlocked(intel_fb->obj); 4919 drm_gem_object_unreference_unlocked(intel_fb->obj);
@@ -4533,18 +4936,13 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
4533 .create_handle = intel_user_framebuffer_create_handle, 4936 .create_handle = intel_user_framebuffer_create_handle,
4534}; 4937};
4535 4938
4536int intel_framebuffer_create(struct drm_device *dev, 4939int intel_framebuffer_init(struct drm_device *dev,
4537 struct drm_mode_fb_cmd *mode_cmd, 4940 struct intel_framebuffer *intel_fb,
4538 struct drm_framebuffer **fb, 4941 struct drm_mode_fb_cmd *mode_cmd,
4539 struct drm_gem_object *obj) 4942 struct drm_gem_object *obj)
4540{ 4943{
4541 struct intel_framebuffer *intel_fb;
4542 int ret; 4944 int ret;
4543 4945
4544 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
4545 if (!intel_fb)
4546 return -ENOMEM;
4547
4548 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 4946 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
4549 if (ret) { 4947 if (ret) {
4550 DRM_ERROR("framebuffer init failed %d\n", ret); 4948 DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -4552,40 +4950,40 @@ int intel_framebuffer_create(struct drm_device *dev,
4552 } 4950 }
4553 4951
4554 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 4952 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
4555
4556 intel_fb->obj = obj; 4953 intel_fb->obj = obj;
4557
4558 *fb = &intel_fb->base;
4559
4560 return 0; 4954 return 0;
4561} 4955}
4562 4956
4563
4564static struct drm_framebuffer * 4957static struct drm_framebuffer *
4565intel_user_framebuffer_create(struct drm_device *dev, 4958intel_user_framebuffer_create(struct drm_device *dev,
4566 struct drm_file *filp, 4959 struct drm_file *filp,
4567 struct drm_mode_fb_cmd *mode_cmd) 4960 struct drm_mode_fb_cmd *mode_cmd)
4568{ 4961{
4569 struct drm_gem_object *obj; 4962 struct drm_gem_object *obj;
4570 struct drm_framebuffer *fb; 4963 struct intel_framebuffer *intel_fb;
4571 int ret; 4964 int ret;
4572 4965
4573 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); 4966 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
4574 if (!obj) 4967 if (!obj)
4575 return NULL; 4968 return NULL;
4576 4969
4577 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); 4970 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
4971 if (!intel_fb)
4972 return NULL;
4973
4974 ret = intel_framebuffer_init(dev, intel_fb,
4975 mode_cmd, obj);
4578 if (ret) { 4976 if (ret) {
4579 drm_gem_object_unreference_unlocked(obj); 4977 drm_gem_object_unreference_unlocked(obj);
4978 kfree(intel_fb);
4580 return NULL; 4979 return NULL;
4581 } 4980 }
4582 4981
4583 return fb; 4982 return &intel_fb->base;
4584} 4983}
4585 4984
4586static const struct drm_mode_config_funcs intel_mode_funcs = { 4985static const struct drm_mode_config_funcs intel_mode_funcs = {
4587 .fb_create = intel_user_framebuffer_create, 4986 .fb_create = intel_user_framebuffer_create,
4588 .fb_changed = intelfb_probe,
4589}; 4987};
4590 4988
4591static struct drm_gem_object * 4989static struct drm_gem_object *
@@ -4594,7 +4992,7 @@ intel_alloc_power_context(struct drm_device *dev)
4594 struct drm_gem_object *pwrctx; 4992 struct drm_gem_object *pwrctx;
4595 int ret; 4993 int ret;
4596 4994
4597 pwrctx = drm_gem_object_alloc(dev, 4096); 4995 pwrctx = i915_gem_alloc_object(dev, 4096);
4598 if (!pwrctx) { 4996 if (!pwrctx) {
4599 DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); 4997 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
4600 return NULL; 4998 return NULL;
@@ -4732,6 +5130,25 @@ void intel_init_clock_gating(struct drm_device *dev)
4732 } 5130 }
4733 5131
4734 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); 5132 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
5133
5134 /*
5135 * According to the spec the following bits should be set in
5136 * order to enable memory self-refresh
5137 * The bit 22/21 of 0x42004
5138 * The bit 5 of 0x42020
5139 * The bit 15 of 0x45000
5140 */
5141 if (IS_IRONLAKE(dev)) {
5142 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5143 (I915_READ(ILK_DISPLAY_CHICKEN2) |
5144 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
5145 I915_WRITE(ILK_DSPCLK_GATE,
5146 (I915_READ(ILK_DSPCLK_GATE) |
5147 ILK_DPARB_CLK_GATE));
5148 I915_WRITE(DISP_ARB_CTL,
5149 (I915_READ(DISP_ARB_CTL) |
5150 DISP_FBC_WM_DIS));
5151 }
4735 return; 5152 return;
4736 } else if (IS_G4X(dev)) { 5153 } else if (IS_G4X(dev)) {
4737 uint32_t dspclk_gate; 5154 uint32_t dspclk_gate;
@@ -4809,8 +5226,7 @@ static void intel_init_display(struct drm_device *dev)
4809 else 5226 else
4810 dev_priv->display.dpms = i9xx_crtc_dpms; 5227 dev_priv->display.dpms = i9xx_crtc_dpms;
4811 5228
4812 /* Only mobile has FBC, leave pointers NULL for other chips */ 5229 if (I915_HAS_FBC(dev)) {
4813 if (IS_MOBILE(dev)) {
4814 if (IS_GM45(dev)) { 5230 if (IS_GM45(dev)) {
4815 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 5231 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
4816 dev_priv->display.enable_fbc = g4x_enable_fbc; 5232 dev_priv->display.enable_fbc = g4x_enable_fbc;
@@ -4847,9 +5263,31 @@ static void intel_init_display(struct drm_device *dev)
4847 i830_get_display_clock_speed; 5263 i830_get_display_clock_speed;
4848 5264
4849 /* For FIFO watermark updates */ 5265 /* For FIFO watermark updates */
4850 if (HAS_PCH_SPLIT(dev)) 5266 if (HAS_PCH_SPLIT(dev)) {
4851 dev_priv->display.update_wm = NULL; 5267 if (IS_IRONLAKE(dev)) {
4852 else if (IS_G4X(dev)) 5268 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
5269 dev_priv->display.update_wm = ironlake_update_wm;
5270 else {
5271 DRM_DEBUG_KMS("Failed to get proper latency. "
5272 "Disable CxSR\n");
5273 dev_priv->display.update_wm = NULL;
5274 }
5275 } else
5276 dev_priv->display.update_wm = NULL;
5277 } else if (IS_PINEVIEW(dev)) {
5278 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
5279 dev_priv->fsb_freq,
5280 dev_priv->mem_freq)) {
5281 DRM_INFO("failed to find known CxSR latency "
5282 "(found fsb freq %d, mem freq %d), "
5283 "disabling CxSR\n",
5284 dev_priv->fsb_freq, dev_priv->mem_freq);
5285 /* Disable CxSR and never update its watermark again */
5286 pineview_disable_cxsr(dev);
5287 dev_priv->display.update_wm = NULL;
5288 } else
5289 dev_priv->display.update_wm = pineview_update_wm;
5290 } else if (IS_G4X(dev))
4853 dev_priv->display.update_wm = g4x_update_wm; 5291 dev_priv->display.update_wm = g4x_update_wm;
4854 else if (IS_I965G(dev)) 5292 else if (IS_I965G(dev))
4855 dev_priv->display.update_wm = i965_update_wm; 5293 dev_priv->display.update_wm = i965_update_wm;
@@ -4923,13 +5361,6 @@ void intel_modeset_init(struct drm_device *dev)
4923 (unsigned long)dev); 5361 (unsigned long)dev);
4924 5362
4925 intel_setup_overlay(dev); 5363 intel_setup_overlay(dev);
4926
4927 if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
4928 dev_priv->fsb_freq,
4929 dev_priv->mem_freq))
4930 DRM_INFO("failed to find known CxSR latency "
4931 "(found fsb freq %d, mem freq %d), disabling CxSR\n",
4932 dev_priv->fsb_freq, dev_priv->mem_freq);
4933} 5364}
4934 5365
4935void intel_modeset_cleanup(struct drm_device *dev) 5366void intel_modeset_cleanup(struct drm_device *dev)
@@ -4940,6 +5371,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
4940 5371
4941 mutex_lock(&dev->struct_mutex); 5372 mutex_lock(&dev->struct_mutex);
4942 5373
5374 intel_fbdev_fini(dev);
5375
4943 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 5376 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4944 /* Skip inactive CRTCs */ 5377 /* Skip inactive CRTCs */
4945 if (!crtc->fb) 5378 if (!crtc->fb)
@@ -4974,14 +5407,29 @@ void intel_modeset_cleanup(struct drm_device *dev)
4974} 5407}
4975 5408
4976 5409
4977/* current intel driver doesn't take advantage of encoders 5410/*
4978 always give back the encoder for the connector 5411 * Return which encoder is currently attached for connector.
4979*/ 5412 */
4980struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 5413struct drm_encoder *intel_attached_encoder (struct drm_connector *connector)
4981{ 5414{
4982 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 5415 struct drm_mode_object *obj;
5416 struct drm_encoder *encoder;
5417 int i;
4983 5418
4984 return &intel_encoder->enc; 5419 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
5420 if (connector->encoder_ids[i] == 0)
5421 break;
5422
5423 obj = drm_mode_object_find(connector->dev,
5424 connector->encoder_ids[i],
5425 DRM_MODE_OBJECT_ENCODER);
5426 if (!obj)
5427 continue;
5428
5429 encoder = obj_to_encoder(obj);
5430 return encoder;
5431 }
5432 return NULL;
4985} 5433}
4986 5434
4987/* 5435/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 77e40cfcf216..f6299bb788e5 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -48,8 +48,6 @@ struct intel_dp_priv {
48 uint32_t output_reg; 48 uint32_t output_reg;
49 uint32_t DP; 49 uint32_t DP;
50 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; 50 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
51 uint32_t save_DP;
52 uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE];
53 bool has_audio; 51 bool has_audio;
54 int dpms_mode; 52 int dpms_mode;
55 uint8_t link_bw; 53 uint8_t link_bw;
@@ -141,7 +139,8 @@ static int
141intel_dp_mode_valid(struct drm_connector *connector, 139intel_dp_mode_valid(struct drm_connector *connector,
142 struct drm_display_mode *mode) 140 struct drm_display_mode *mode)
143{ 141{
144 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 142 struct drm_encoder *encoder = intel_attached_encoder(connector);
143 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
145 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); 144 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
146 int max_lanes = intel_dp_max_lane_count(intel_encoder); 145 int max_lanes = intel_dp_max_lane_count(intel_encoder);
147 146
@@ -215,7 +214,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
215{ 214{
216 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 215 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
217 uint32_t output_reg = dp_priv->output_reg; 216 uint32_t output_reg = dp_priv->output_reg;
218 struct drm_device *dev = intel_encoder->base.dev; 217 struct drm_device *dev = intel_encoder->enc.dev;
219 struct drm_i915_private *dev_priv = dev->dev_private; 218 struct drm_i915_private *dev_priv = dev->dev_private;
220 uint32_t ch_ctl = output_reg + 0x10; 219 uint32_t ch_ctl = output_reg + 0x10;
221 uint32_t ch_data = ch_ctl + 4; 220 uint32_t ch_data = ch_ctl + 4;
@@ -224,19 +223,27 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
224 uint32_t ctl; 223 uint32_t ctl;
225 uint32_t status; 224 uint32_t status;
226 uint32_t aux_clock_divider; 225 uint32_t aux_clock_divider;
227 int try; 226 int try, precharge;
228 227
229 /* The clock divider is based off the hrawclk, 228 /* The clock divider is based off the hrawclk,
230 * and would like to run at 2MHz. So, take the 229 * and would like to run at 2MHz. So, take the
231 * hrawclk value and divide by 2 and use that 230 * hrawclk value and divide by 2 and use that
232 */ 231 */
233 if (IS_eDP(intel_encoder)) 232 if (IS_eDP(intel_encoder)) {
234 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 233 if (IS_GEN6(dev))
235 else if (HAS_PCH_SPLIT(dev)) 234 aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
235 else
236 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
237 } else if (HAS_PCH_SPLIT(dev))
236 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ 238 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
237 else 239 else
238 aux_clock_divider = intel_hrawclk(dev) / 2; 240 aux_clock_divider = intel_hrawclk(dev) / 2;
239 241
242 if (IS_GEN6(dev))
243 precharge = 3;
244 else
245 precharge = 5;
246
240 /* Must try at least 3 times according to DP spec */ 247 /* Must try at least 3 times according to DP spec */
241 for (try = 0; try < 5; try++) { 248 for (try = 0; try < 5; try++) {
242 /* Load the send data into the aux channel data registers */ 249 /* Load the send data into the aux channel data registers */
@@ -249,7 +256,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
249 ctl = (DP_AUX_CH_CTL_SEND_BUSY | 256 ctl = (DP_AUX_CH_CTL_SEND_BUSY |
250 DP_AUX_CH_CTL_TIME_OUT_400us | 257 DP_AUX_CH_CTL_TIME_OUT_400us |
251 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 258 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
252 (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 259 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
253 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 260 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
254 DP_AUX_CH_CTL_DONE | 261 DP_AUX_CH_CTL_DONE |
255 DP_AUX_CH_CTL_TIME_OUT_ERROR | 262 DP_AUX_CH_CTL_TIME_OUT_ERROR |
@@ -465,7 +472,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
465} 472}
466 473
467static int 474static int
468intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name) 475intel_dp_i2c_init(struct intel_encoder *intel_encoder,
476 struct intel_connector *intel_connector, const char *name)
469{ 477{
470 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 478 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
471 479
@@ -480,7 +488,7 @@ intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
480 strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); 488 strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
481 dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; 489 dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
482 dp_priv->adapter.algo_data = &dp_priv->algo; 490 dp_priv->adapter.algo_data = &dp_priv->algo;
483 dp_priv->adapter.dev.parent = &intel_encoder->base.kdev; 491 dp_priv->adapter.dev.parent = &intel_connector->base.kdev;
484 492
485 return i2c_dp_aux_add_bus(&dp_priv->adapter); 493 return i2c_dp_aux_add_bus(&dp_priv->adapter);
486} 494}
@@ -555,7 +563,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
555{ 563{
556 struct drm_device *dev = crtc->dev; 564 struct drm_device *dev = crtc->dev;
557 struct drm_mode_config *mode_config = &dev->mode_config; 565 struct drm_mode_config *mode_config = &dev->mode_config;
558 struct drm_connector *connector; 566 struct drm_encoder *encoder;
559 struct drm_i915_private *dev_priv = dev->dev_private; 567 struct drm_i915_private *dev_priv = dev->dev_private;
560 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 568 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
561 int lane_count = 4; 569 int lane_count = 4;
@@ -564,13 +572,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
564 /* 572 /*
565 * Find the lane count in the intel_encoder private 573 * Find the lane count in the intel_encoder private
566 */ 574 */
567 list_for_each_entry(connector, &mode_config->connector_list, head) { 575 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
568 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 576 struct intel_encoder *intel_encoder;
569 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 577 struct intel_dp_priv *dp_priv;
570 578
571 if (!connector->encoder || connector->encoder->crtc != crtc) 579 if (!encoder || encoder->crtc != crtc)
572 continue; 580 continue;
573 581
582 intel_encoder = enc_to_intel_encoder(encoder);
583 dp_priv = intel_encoder->dev_priv;
584
574 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { 585 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
575 lane_count = dp_priv->lane_count; 586 lane_count = dp_priv->lane_count;
576 break; 587 break;
@@ -626,16 +637,24 @@ static void
626intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 637intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
627 struct drm_display_mode *adjusted_mode) 638 struct drm_display_mode *adjusted_mode)
628{ 639{
640 struct drm_device *dev = encoder->dev;
629 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 641 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
630 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 642 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
631 struct drm_crtc *crtc = intel_encoder->enc.crtc; 643 struct drm_crtc *crtc = intel_encoder->enc.crtc;
632 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 644 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
633 645
634 dp_priv->DP = (DP_LINK_TRAIN_OFF | 646 dp_priv->DP = (DP_VOLTAGE_0_4 |
635 DP_VOLTAGE_0_4 | 647 DP_PRE_EMPHASIS_0);
636 DP_PRE_EMPHASIS_0 | 648
637 DP_SYNC_VS_HIGH | 649 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
638 DP_SYNC_HS_HIGH); 650 dp_priv->DP |= DP_SYNC_HS_HIGH;
651 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
652 dp_priv->DP |= DP_SYNC_VS_HIGH;
653
654 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
655 dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT;
656 else
657 dp_priv->DP |= DP_LINK_TRAIN_OFF;
639 658
640 switch (dp_priv->lane_count) { 659 switch (dp_priv->lane_count) {
641 case 1: 660 case 1:
@@ -664,7 +683,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
664 dp_priv->DP |= DP_ENHANCED_FRAMING; 683 dp_priv->DP |= DP_ENHANCED_FRAMING;
665 } 684 }
666 685
667 if (intel_crtc->pipe == 1) 686 /* CPT DP's pipe select is decided in TRANS_DP_CTL */
687 if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
668 dp_priv->DP |= DP_PIPEB_SELECT; 688 dp_priv->DP |= DP_PIPEB_SELECT;
669 689
670 if (IS_eDP(intel_encoder)) { 690 if (IS_eDP(intel_encoder)) {
@@ -704,7 +724,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
704{ 724{
705 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 725 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
706 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 726 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
707 struct drm_device *dev = intel_encoder->base.dev; 727 struct drm_device *dev = encoder->dev;
708 struct drm_i915_private *dev_priv = dev->dev_private; 728 struct drm_i915_private *dev_priv = dev->dev_private;
709 uint32_t dp_reg = I915_READ(dp_priv->output_reg); 729 uint32_t dp_reg = I915_READ(dp_priv->output_reg);
710 730
@@ -749,20 +769,6 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
749 return link_status[r - DP_LANE0_1_STATUS]; 769 return link_status[r - DP_LANE0_1_STATUS];
750} 770}
751 771
752static void
753intel_dp_save(struct drm_connector *connector)
754{
755 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
756 struct drm_device *dev = intel_encoder->base.dev;
757 struct drm_i915_private *dev_priv = dev->dev_private;
758 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
759
760 dp_priv->save_DP = I915_READ(dp_priv->output_reg);
761 intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET,
762 dp_priv->save_link_configuration,
763 sizeof (dp_priv->save_link_configuration));
764}
765
766static uint8_t 772static uint8_t
767intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], 773intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
768 int lane) 774 int lane)
@@ -892,6 +898,25 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
892 return signal_levels; 898 return signal_levels;
893} 899}
894 900
901/* Gen6's DP voltage swing and pre-emphasis control */
902static uint32_t
903intel_gen6_edp_signal_levels(uint8_t train_set)
904{
905 switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) {
906 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
907 return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
908 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
909 return EDP_LINK_TRAIN_400MV_6DB_SNB_B;
910 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
911 return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B;
912 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
913 return EDP_LINK_TRAIN_800MV_0DB_SNB_B;
914 default:
915 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n");
916 return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
917 }
918}
919
895static uint8_t 920static uint8_t
896intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 921intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
897 int lane) 922 int lane)
@@ -948,7 +973,7 @@ intel_dp_set_link_train(struct intel_encoder *intel_encoder,
948 uint8_t train_set[4], 973 uint8_t train_set[4],
949 bool first) 974 bool first)
950{ 975{
951 struct drm_device *dev = intel_encoder->base.dev; 976 struct drm_device *dev = intel_encoder->enc.dev;
952 struct drm_i915_private *dev_priv = dev->dev_private; 977 struct drm_i915_private *dev_priv = dev->dev_private;
953 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 978 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
954 int ret; 979 int ret;
@@ -974,7 +999,7 @@ static void
974intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, 999intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
975 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) 1000 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
976{ 1001{
977 struct drm_device *dev = intel_encoder->base.dev; 1002 struct drm_device *dev = intel_encoder->enc.dev;
978 struct drm_i915_private *dev_priv = dev->dev_private; 1003 struct drm_i915_private *dev_priv = dev->dev_private;
979 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 1004 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
980 uint8_t train_set[4]; 1005 uint8_t train_set[4];
@@ -985,23 +1010,38 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
985 bool channel_eq = false; 1010 bool channel_eq = false;
986 bool first = true; 1011 bool first = true;
987 int tries; 1012 int tries;
1013 u32 reg;
988 1014
989 /* Write the link configuration data */ 1015 /* Write the link configuration data */
990 intel_dp_aux_native_write(intel_encoder, 0x100, 1016 intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET,
991 link_configuration, DP_LINK_CONFIGURATION_SIZE); 1017 link_configuration, DP_LINK_CONFIGURATION_SIZE);
992 1018
993 DP |= DP_PORT_EN; 1019 DP |= DP_PORT_EN;
994 DP &= ~DP_LINK_TRAIN_MASK; 1020 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
1021 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1022 else
1023 DP &= ~DP_LINK_TRAIN_MASK;
995 memset(train_set, 0, 4); 1024 memset(train_set, 0, 4);
996 voltage = 0xff; 1025 voltage = 0xff;
997 tries = 0; 1026 tries = 0;
998 clock_recovery = false; 1027 clock_recovery = false;
999 for (;;) { 1028 for (;;) {
1000 /* Use train_set[0] to set the voltage and pre emphasis values */ 1029 /* Use train_set[0] to set the voltage and pre emphasis values */
1001 uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); 1030 uint32_t signal_levels;
1002 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1031 if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
1032 signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
1033 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1034 } else {
1035 signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
1036 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1037 }
1003 1038
1004 if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1, 1039 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
1040 reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
1041 else
1042 reg = DP | DP_LINK_TRAIN_PAT_1;
1043
1044 if (!intel_dp_set_link_train(intel_encoder, reg,
1005 DP_TRAINING_PATTERN_1, train_set, first)) 1045 DP_TRAINING_PATTERN_1, train_set, first))
1006 break; 1046 break;
1007 first = false; 1047 first = false;
@@ -1041,11 +1081,23 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
1041 channel_eq = false; 1081 channel_eq = false;
1042 for (;;) { 1082 for (;;) {
1043 /* Use train_set[0] to set the voltage and pre emphasis values */ 1083 /* Use train_set[0] to set the voltage and pre emphasis values */
1044 uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); 1084 uint32_t signal_levels;
1045 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1085
1086 if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
1087 signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
1088 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1089 } else {
1090 signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
1091 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1092 }
1093
1094 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
1095 reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
1096 else
1097 reg = DP | DP_LINK_TRAIN_PAT_2;
1046 1098
1047 /* channel eq pattern */ 1099 /* channel eq pattern */
1048 if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2, 1100 if (!intel_dp_set_link_train(intel_encoder, reg,
1049 DP_TRAINING_PATTERN_2, train_set, 1101 DP_TRAINING_PATTERN_2, train_set,
1050 false)) 1102 false))
1051 break; 1103 break;
@@ -1068,7 +1120,12 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
1068 ++tries; 1120 ++tries;
1069 } 1121 }
1070 1122
1071 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); 1123 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
1124 reg = DP | DP_LINK_TRAIN_OFF_CPT;
1125 else
1126 reg = DP | DP_LINK_TRAIN_OFF;
1127
1128 I915_WRITE(dp_priv->output_reg, reg);
1072 POSTING_READ(dp_priv->output_reg); 1129 POSTING_READ(dp_priv->output_reg);
1073 intel_dp_aux_native_write_1(intel_encoder, 1130 intel_dp_aux_native_write_1(intel_encoder,
1074 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); 1131 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
@@ -1077,7 +1134,7 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
1077static void 1134static void
1078intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) 1135intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
1079{ 1136{
1080 struct drm_device *dev = intel_encoder->base.dev; 1137 struct drm_device *dev = intel_encoder->enc.dev;
1081 struct drm_i915_private *dev_priv = dev->dev_private; 1138 struct drm_i915_private *dev_priv = dev->dev_private;
1082 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 1139 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1083 1140
@@ -1090,9 +1147,15 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
1090 udelay(100); 1147 udelay(100);
1091 } 1148 }
1092 1149
1093 DP &= ~DP_LINK_TRAIN_MASK; 1150 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) {
1094 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 1151 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1095 POSTING_READ(dp_priv->output_reg); 1152 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
1153 POSTING_READ(dp_priv->output_reg);
1154 } else {
1155 DP &= ~DP_LINK_TRAIN_MASK;
1156 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
1157 POSTING_READ(dp_priv->output_reg);
1158 }
1096 1159
1097 udelay(17000); 1160 udelay(17000);
1098 1161
@@ -1102,18 +1165,6 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
1102 POSTING_READ(dp_priv->output_reg); 1165 POSTING_READ(dp_priv->output_reg);
1103} 1166}
1104 1167
1105static void
1106intel_dp_restore(struct drm_connector *connector)
1107{
1108 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1109 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1110
1111 if (dp_priv->save_DP & DP_PORT_EN)
1112 intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration);
1113 else
1114 intel_dp_link_down(intel_encoder, dp_priv->save_DP);
1115}
1116
1117/* 1168/*
1118 * According to DP spec 1169 * According to DP spec
1119 * 5.1.2: 1170 * 5.1.2:
@@ -1144,7 +1195,8 @@ intel_dp_check_link_status(struct intel_encoder *intel_encoder)
1144static enum drm_connector_status 1195static enum drm_connector_status
1145ironlake_dp_detect(struct drm_connector *connector) 1196ironlake_dp_detect(struct drm_connector *connector)
1146{ 1197{
1147 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1198 struct drm_encoder *encoder = intel_attached_encoder(connector);
1199 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1148 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 1200 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1149 enum drm_connector_status status; 1201 enum drm_connector_status status;
1150 1202
@@ -1168,8 +1220,9 @@ ironlake_dp_detect(struct drm_connector *connector)
1168static enum drm_connector_status 1220static enum drm_connector_status
1169intel_dp_detect(struct drm_connector *connector) 1221intel_dp_detect(struct drm_connector *connector)
1170{ 1222{
1171 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1223 struct drm_encoder *encoder = intel_attached_encoder(connector);
1172 struct drm_device *dev = intel_encoder->base.dev; 1224 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1225 struct drm_device *dev = intel_encoder->enc.dev;
1173 struct drm_i915_private *dev_priv = dev->dev_private; 1226 struct drm_i915_private *dev_priv = dev->dev_private;
1174 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 1227 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1175 uint32_t temp, bit; 1228 uint32_t temp, bit;
@@ -1180,16 +1233,6 @@ intel_dp_detect(struct drm_connector *connector)
1180 if (HAS_PCH_SPLIT(dev)) 1233 if (HAS_PCH_SPLIT(dev))
1181 return ironlake_dp_detect(connector); 1234 return ironlake_dp_detect(connector);
1182 1235
1183 temp = I915_READ(PORT_HOTPLUG_EN);
1184
1185 I915_WRITE(PORT_HOTPLUG_EN,
1186 temp |
1187 DPB_HOTPLUG_INT_EN |
1188 DPC_HOTPLUG_INT_EN |
1189 DPD_HOTPLUG_INT_EN);
1190
1191 POSTING_READ(PORT_HOTPLUG_EN);
1192
1193 switch (dp_priv->output_reg) { 1236 switch (dp_priv->output_reg) {
1194 case DP_B: 1237 case DP_B:
1195 bit = DPB_HOTPLUG_INT_STATUS; 1238 bit = DPB_HOTPLUG_INT_STATUS;
@@ -1222,15 +1265,16 @@ intel_dp_detect(struct drm_connector *connector)
1222 1265
1223static int intel_dp_get_modes(struct drm_connector *connector) 1266static int intel_dp_get_modes(struct drm_connector *connector)
1224{ 1267{
1225 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1268 struct drm_encoder *encoder = intel_attached_encoder(connector);
1226 struct drm_device *dev = intel_encoder->base.dev; 1269 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1270 struct drm_device *dev = intel_encoder->enc.dev;
1227 struct drm_i915_private *dev_priv = dev->dev_private; 1271 struct drm_i915_private *dev_priv = dev->dev_private;
1228 int ret; 1272 int ret;
1229 1273
1230 /* We should parse the EDID data and find out if it has an audio sink 1274 /* We should parse the EDID data and find out if it has an audio sink
1231 */ 1275 */
1232 1276
1233 ret = intel_ddc_get_modes(intel_encoder); 1277 ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
1234 if (ret) 1278 if (ret)
1235 return ret; 1279 return ret;
1236 1280
@@ -1249,13 +1293,9 @@ static int intel_dp_get_modes(struct drm_connector *connector)
1249static void 1293static void
1250intel_dp_destroy (struct drm_connector *connector) 1294intel_dp_destroy (struct drm_connector *connector)
1251{ 1295{
1252 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1253
1254 if (intel_encoder->i2c_bus)
1255 intel_i2c_destroy(intel_encoder->i2c_bus);
1256 drm_sysfs_connector_remove(connector); 1296 drm_sysfs_connector_remove(connector);
1257 drm_connector_cleanup(connector); 1297 drm_connector_cleanup(connector);
1258 kfree(intel_encoder); 1298 kfree(connector);
1259} 1299}
1260 1300
1261static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 1301static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -1268,8 +1308,6 @@ static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
1268 1308
1269static const struct drm_connector_funcs intel_dp_connector_funcs = { 1309static const struct drm_connector_funcs intel_dp_connector_funcs = {
1270 .dpms = drm_helper_connector_dpms, 1310 .dpms = drm_helper_connector_dpms,
1271 .save = intel_dp_save,
1272 .restore = intel_dp_restore,
1273 .detect = intel_dp_detect, 1311 .detect = intel_dp_detect,
1274 .fill_modes = drm_helper_probe_single_connector_modes, 1312 .fill_modes = drm_helper_probe_single_connector_modes,
1275 .destroy = intel_dp_destroy, 1313 .destroy = intel_dp_destroy,
@@ -1278,12 +1316,17 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
1278static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 1316static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
1279 .get_modes = intel_dp_get_modes, 1317 .get_modes = intel_dp_get_modes,
1280 .mode_valid = intel_dp_mode_valid, 1318 .mode_valid = intel_dp_mode_valid,
1281 .best_encoder = intel_best_encoder, 1319 .best_encoder = intel_attached_encoder,
1282}; 1320};
1283 1321
1284static void intel_dp_enc_destroy(struct drm_encoder *encoder) 1322static void intel_dp_enc_destroy(struct drm_encoder *encoder)
1285{ 1323{
1324 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1325
1326 if (intel_encoder->i2c_bus)
1327 intel_i2c_destroy(intel_encoder->i2c_bus);
1286 drm_encoder_cleanup(encoder); 1328 drm_encoder_cleanup(encoder);
1329 kfree(intel_encoder);
1287} 1330}
1288 1331
1289static const struct drm_encoder_funcs intel_dp_enc_funcs = { 1332static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -1299,12 +1342,35 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
1299 intel_dp_check_link_status(intel_encoder); 1342 intel_dp_check_link_status(intel_encoder);
1300} 1343}
1301 1344
1345/* Return which DP Port should be selected for Transcoder DP control */
1346int
1347intel_trans_dp_port_sel (struct drm_crtc *crtc)
1348{
1349 struct drm_device *dev = crtc->dev;
1350 struct drm_mode_config *mode_config = &dev->mode_config;
1351 struct drm_encoder *encoder;
1352 struct intel_encoder *intel_encoder = NULL;
1353
1354 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
1355 if (!encoder || encoder->crtc != crtc)
1356 continue;
1357
1358 intel_encoder = enc_to_intel_encoder(encoder);
1359 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
1360 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1361 return dp_priv->output_reg;
1362 }
1363 }
1364 return -1;
1365}
1366
1302void 1367void
1303intel_dp_init(struct drm_device *dev, int output_reg) 1368intel_dp_init(struct drm_device *dev, int output_reg)
1304{ 1369{
1305 struct drm_i915_private *dev_priv = dev->dev_private; 1370 struct drm_i915_private *dev_priv = dev->dev_private;
1306 struct drm_connector *connector; 1371 struct drm_connector *connector;
1307 struct intel_encoder *intel_encoder; 1372 struct intel_encoder *intel_encoder;
1373 struct intel_connector *intel_connector;
1308 struct intel_dp_priv *dp_priv; 1374 struct intel_dp_priv *dp_priv;
1309 const char *name = NULL; 1375 const char *name = NULL;
1310 1376
@@ -1313,9 +1379,15 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1313 if (!intel_encoder) 1379 if (!intel_encoder)
1314 return; 1380 return;
1315 1381
1382 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1383 if (!intel_connector) {
1384 kfree(intel_encoder);
1385 return;
1386 }
1387
1316 dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); 1388 dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
1317 1389
1318 connector = &intel_encoder->base; 1390 connector = &intel_connector->base;
1319 drm_connector_init(dev, connector, &intel_dp_connector_funcs, 1391 drm_connector_init(dev, connector, &intel_dp_connector_funcs,
1320 DRM_MODE_CONNECTOR_DisplayPort); 1392 DRM_MODE_CONNECTOR_DisplayPort);
1321 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 1393 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
@@ -1349,7 +1421,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1349 DRM_MODE_ENCODER_TMDS); 1421 DRM_MODE_ENCODER_TMDS);
1350 drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs); 1422 drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
1351 1423
1352 drm_mode_connector_attach_encoder(&intel_encoder->base, 1424 drm_mode_connector_attach_encoder(&intel_connector->base,
1353 &intel_encoder->enc); 1425 &intel_encoder->enc);
1354 drm_sysfs_connector_add(connector); 1426 drm_sysfs_connector_add(connector);
1355 1427
@@ -1378,7 +1450,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1378 break; 1450 break;
1379 } 1451 }
1380 1452
1381 intel_dp_i2c_init(intel_encoder, name); 1453 intel_dp_i2c_init(intel_encoder, intel_connector, name);
1382 1454
1383 intel_encoder->ddc_bus = &dp_priv->adapter; 1455 intel_encoder->ddc_bus = &dp_priv->adapter;
1384 intel_encoder->hot_plug = intel_dp_hot_plug; 1456 intel_encoder->hot_plug = intel_dp_hot_plug;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e30253755f12..3230e8d2ea43 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -96,8 +96,6 @@ struct intel_framebuffer {
96 96
97 97
98struct intel_encoder { 98struct intel_encoder {
99 struct drm_connector base;
100
101 struct drm_encoder enc; 99 struct drm_encoder enc;
102 int type; 100 int type;
103 struct i2c_adapter *i2c_bus; 101 struct i2c_adapter *i2c_bus;
@@ -110,6 +108,11 @@ struct intel_encoder {
110 int clone_mask; 108 int clone_mask;
111}; 109};
112 110
111struct intel_connector {
112 struct drm_connector base;
113 void *dev_priv;
114};
115
113struct intel_crtc; 116struct intel_crtc;
114struct intel_overlay { 117struct intel_overlay {
115 struct drm_device *dev; 118 struct drm_device *dev;
@@ -149,17 +152,18 @@ struct intel_crtc {
149 bool lowfreq_avail; 152 bool lowfreq_avail;
150 struct intel_overlay *overlay; 153 struct intel_overlay *overlay;
151 struct intel_unpin_work *unpin_work; 154 struct intel_unpin_work *unpin_work;
155 int fdi_lanes;
152}; 156};
153 157
154#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 158#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
155#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) 159#define to_intel_connector(x) container_of(x, struct intel_connector, base)
156#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) 160#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
157#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 161#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
158 162
159struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, 163struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
160 const char *name); 164 const char *name);
161void intel_i2c_destroy(struct i2c_adapter *adapter); 165void intel_i2c_destroy(struct i2c_adapter *adapter);
162int intel_ddc_get_modes(struct intel_encoder *intel_encoder); 166int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
163extern bool intel_ddc_probe(struct intel_encoder *intel_encoder); 167extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
164void intel_i2c_quirk_set(struct drm_device *dev, bool enable); 168void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
165void intel_i2c_reset_gmbus(struct drm_device *dev); 169void intel_i2c_reset_gmbus(struct drm_device *dev);
@@ -183,7 +187,7 @@ extern void intel_crtc_load_lut(struct drm_crtc *crtc);
183extern void intel_encoder_prepare (struct drm_encoder *encoder); 187extern void intel_encoder_prepare (struct drm_encoder *encoder);
184extern void intel_encoder_commit (struct drm_encoder *encoder); 188extern void intel_encoder_commit (struct drm_encoder *encoder);
185 189
186extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); 190extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
187 191
188extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 192extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
189 struct drm_crtc *crtc); 193 struct drm_crtc *crtc);
@@ -192,17 +196,16 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
192extern void intel_wait_for_vblank(struct drm_device *dev); 196extern void intel_wait_for_vblank(struct drm_device *dev);
193extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); 197extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
194extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 198extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
199 struct drm_connector *connector,
195 struct drm_display_mode *mode, 200 struct drm_display_mode *mode,
196 int *dpms_mode); 201 int *dpms_mode);
197extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, 202extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
203 struct drm_connector *connector,
198 int dpms_mode); 204 int dpms_mode);
199 205
200extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); 206extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
201extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); 207extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
202extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable); 208extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
203extern int intelfb_probe(struct drm_device *dev);
204extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
205extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
206extern void intelfb_restore(void); 209extern void intelfb_restore(void);
207extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 210extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
208 u16 blue, int regno); 211 u16 blue, int regno);
@@ -212,10 +215,12 @@ extern void intel_init_clock_gating(struct drm_device *dev);
212extern void ironlake_enable_drps(struct drm_device *dev); 215extern void ironlake_enable_drps(struct drm_device *dev);
213extern void ironlake_disable_drps(struct drm_device *dev); 216extern void ironlake_disable_drps(struct drm_device *dev);
214 217
215extern int intel_framebuffer_create(struct drm_device *dev, 218extern int intel_framebuffer_init(struct drm_device *dev,
216 struct drm_mode_fb_cmd *mode_cmd, 219 struct intel_framebuffer *ifb,
217 struct drm_framebuffer **fb, 220 struct drm_mode_fb_cmd *mode_cmd,
218 struct drm_gem_object *obj); 221 struct drm_gem_object *obj);
222extern int intel_fbdev_init(struct drm_device *dev);
223extern void intel_fbdev_fini(struct drm_device *dev);
219 224
220extern void intel_prepare_page_flip(struct drm_device *dev, int plane); 225extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
221extern void intel_finish_page_flip(struct drm_device *dev, int pipe); 226extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
@@ -229,4 +234,6 @@ extern int intel_overlay_put_image(struct drm_device *dev, void *data,
229 struct drm_file *file_priv); 234 struct drm_file *file_priv);
230extern int intel_overlay_attrs(struct drm_device *dev, void *data, 235extern int intel_overlay_attrs(struct drm_device *dev, void *data,
231 struct drm_file *file_priv); 236 struct drm_file *file_priv);
237
238void intelfb_hotplug(struct drm_device *dev, bool polled);
232#endif /* __INTEL_DRV_H__ */ 239#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ebf213c96b9c..227feca7cf8d 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -96,39 +96,11 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
96 } 96 }
97} 97}
98 98
99static void intel_dvo_save(struct drm_connector *connector)
100{
101 struct drm_i915_private *dev_priv = connector->dev->dev_private;
102 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
103 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
104
105 /* Each output should probably just save the registers it touches,
106 * but for now, use more overkill.
107 */
108 dev_priv->saveDVOA = I915_READ(DVOA);
109 dev_priv->saveDVOB = I915_READ(DVOB);
110 dev_priv->saveDVOC = I915_READ(DVOC);
111
112 dvo->dev_ops->save(dvo);
113}
114
115static void intel_dvo_restore(struct drm_connector *connector)
116{
117 struct drm_i915_private *dev_priv = connector->dev->dev_private;
118 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
119 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
120
121 dvo->dev_ops->restore(dvo);
122
123 I915_WRITE(DVOA, dev_priv->saveDVOA);
124 I915_WRITE(DVOB, dev_priv->saveDVOB);
125 I915_WRITE(DVOC, dev_priv->saveDVOC);
126}
127
128static int intel_dvo_mode_valid(struct drm_connector *connector, 99static int intel_dvo_mode_valid(struct drm_connector *connector,
129 struct drm_display_mode *mode) 100 struct drm_display_mode *mode)
130{ 101{
131 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 102 struct drm_encoder *encoder = intel_attached_encoder(connector);
103 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
132 struct intel_dvo_device *dvo = intel_encoder->dev_priv; 104 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
133 105
134 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 106 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -241,7 +213,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
241 */ 213 */
242static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) 214static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
243{ 215{
244 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 216 struct drm_encoder *encoder = intel_attached_encoder(connector);
217 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
245 struct intel_dvo_device *dvo = intel_encoder->dev_priv; 218 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
246 219
247 return dvo->dev_ops->detect(dvo); 220 return dvo->dev_ops->detect(dvo);
@@ -249,7 +222,8 @@ static enum drm_connector_status intel_dvo_detect(struct drm_connector *connecto
249 222
250static int intel_dvo_get_modes(struct drm_connector *connector) 223static int intel_dvo_get_modes(struct drm_connector *connector)
251{ 224{
252 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 225 struct drm_encoder *encoder = intel_attached_encoder(connector);
226 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
253 struct intel_dvo_device *dvo = intel_encoder->dev_priv; 227 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
254 228
255 /* We should probably have an i2c driver get_modes function for those 229 /* We should probably have an i2c driver get_modes function for those
@@ -257,7 +231,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
257 * (TV-out, for example), but for now with just TMDS and LVDS, 231 * (TV-out, for example), but for now with just TMDS and LVDS,
258 * that's not the case. 232 * that's not the case.
259 */ 233 */
260 intel_ddc_get_modes(intel_encoder); 234 intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
261 if (!list_empty(&connector->probed_modes)) 235 if (!list_empty(&connector->probed_modes))
262 return 1; 236 return 1;
263 237
@@ -275,38 +249,10 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
275 249
276static void intel_dvo_destroy (struct drm_connector *connector) 250static void intel_dvo_destroy (struct drm_connector *connector)
277{ 251{
278 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
279 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
280
281 if (dvo) {
282 if (dvo->dev_ops->destroy)
283 dvo->dev_ops->destroy(dvo);
284 if (dvo->panel_fixed_mode)
285 kfree(dvo->panel_fixed_mode);
286 /* no need, in i830_dvoices[] now */
287 //kfree(dvo);
288 }
289 if (intel_encoder->i2c_bus)
290 intel_i2c_destroy(intel_encoder->i2c_bus);
291 if (intel_encoder->ddc_bus)
292 intel_i2c_destroy(intel_encoder->ddc_bus);
293 drm_sysfs_connector_remove(connector); 252 drm_sysfs_connector_remove(connector);
294 drm_connector_cleanup(connector); 253 drm_connector_cleanup(connector);
295 kfree(intel_encoder); 254 kfree(connector);
296}
297
298#ifdef RANDR_GET_CRTC_INTERFACE
299static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
300{
301 struct drm_device *dev = connector->dev;
302 struct drm_i915_private *dev_priv = dev->dev_private;
303 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
304 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
305 int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
306
307 return intel_pipe_to_crtc(pScrn, pipe);
308} 255}
309#endif
310 256
311static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { 257static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
312 .dpms = intel_dvo_dpms, 258 .dpms = intel_dvo_dpms,
@@ -318,8 +264,6 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
318 264
319static const struct drm_connector_funcs intel_dvo_connector_funcs = { 265static const struct drm_connector_funcs intel_dvo_connector_funcs = {
320 .dpms = drm_helper_connector_dpms, 266 .dpms = drm_helper_connector_dpms,
321 .save = intel_dvo_save,
322 .restore = intel_dvo_restore,
323 .detect = intel_dvo_detect, 267 .detect = intel_dvo_detect,
324 .destroy = intel_dvo_destroy, 268 .destroy = intel_dvo_destroy,
325 .fill_modes = drm_helper_probe_single_connector_modes, 269 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -328,12 +272,26 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
328static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { 272static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
329 .mode_valid = intel_dvo_mode_valid, 273 .mode_valid = intel_dvo_mode_valid,
330 .get_modes = intel_dvo_get_modes, 274 .get_modes = intel_dvo_get_modes,
331 .best_encoder = intel_best_encoder, 275 .best_encoder = intel_attached_encoder,
332}; 276};
333 277
334static void intel_dvo_enc_destroy(struct drm_encoder *encoder) 278static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
335{ 279{
280 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
281 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
282
283 if (dvo) {
284 if (dvo->dev_ops->destroy)
285 dvo->dev_ops->destroy(dvo);
286 if (dvo->panel_fixed_mode)
287 kfree(dvo->panel_fixed_mode);
288 }
289 if (intel_encoder->i2c_bus)
290 intel_i2c_destroy(intel_encoder->i2c_bus);
291 if (intel_encoder->ddc_bus)
292 intel_i2c_destroy(intel_encoder->ddc_bus);
336 drm_encoder_cleanup(encoder); 293 drm_encoder_cleanup(encoder);
294 kfree(intel_encoder);
337} 295}
338 296
339static const struct drm_encoder_funcs intel_dvo_enc_funcs = { 297static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
@@ -352,7 +310,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
352{ 310{
353 struct drm_device *dev = connector->dev; 311 struct drm_device *dev = connector->dev;
354 struct drm_i915_private *dev_priv = dev->dev_private; 312 struct drm_i915_private *dev_priv = dev->dev_private;
355 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 313 struct drm_encoder *encoder = intel_attached_encoder(connector);
314 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
356 struct intel_dvo_device *dvo = intel_encoder->dev_priv; 315 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
357 uint32_t dvo_reg = dvo->dvo_reg; 316 uint32_t dvo_reg = dvo->dvo_reg;
358 uint32_t dvo_val = I915_READ(dvo_reg); 317 uint32_t dvo_val = I915_READ(dvo_reg);
@@ -384,6 +343,7 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
384void intel_dvo_init(struct drm_device *dev) 343void intel_dvo_init(struct drm_device *dev)
385{ 344{
386 struct intel_encoder *intel_encoder; 345 struct intel_encoder *intel_encoder;
346 struct intel_connector *intel_connector;
387 struct intel_dvo_device *dvo; 347 struct intel_dvo_device *dvo;
388 struct i2c_adapter *i2cbus = NULL; 348 struct i2c_adapter *i2cbus = NULL;
389 int ret = 0; 349 int ret = 0;
@@ -393,6 +353,12 @@ void intel_dvo_init(struct drm_device *dev)
393 if (!intel_encoder) 353 if (!intel_encoder)
394 return; 354 return;
395 355
356 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
357 if (!intel_connector) {
358 kfree(intel_encoder);
359 return;
360 }
361
396 /* Set up the DDC bus */ 362 /* Set up the DDC bus */
397 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); 363 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
398 if (!intel_encoder->ddc_bus) 364 if (!intel_encoder->ddc_bus)
@@ -400,7 +366,7 @@ void intel_dvo_init(struct drm_device *dev)
400 366
401 /* Now, try to find a controller */ 367 /* Now, try to find a controller */
402 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { 368 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
403 struct drm_connector *connector = &intel_encoder->base; 369 struct drm_connector *connector = &intel_connector->base;
404 int gpio; 370 int gpio;
405 371
406 dvo = &intel_dvo_devices[i]; 372 dvo = &intel_dvo_devices[i];
@@ -471,7 +437,7 @@ void intel_dvo_init(struct drm_device *dev)
471 drm_encoder_helper_add(&intel_encoder->enc, 437 drm_encoder_helper_add(&intel_encoder->enc,
472 &intel_dvo_helper_funcs); 438 &intel_dvo_helper_funcs);
473 439
474 drm_mode_connector_attach_encoder(&intel_encoder->base, 440 drm_mode_connector_attach_encoder(&intel_connector->base,
475 &intel_encoder->enc); 441 &intel_encoder->enc);
476 if (dvo->type == INTEL_DVO_CHIP_LVDS) { 442 if (dvo->type == INTEL_DVO_CHIP_LVDS) {
477 /* For our LVDS chipsets, we should hopefully be able 443 /* For our LVDS chipsets, we should hopefully be able
@@ -496,4 +462,5 @@ void intel_dvo_init(struct drm_device *dev)
496 intel_i2c_destroy(i2cbus); 462 intel_i2c_destroy(i2cbus);
497free_intel: 463free_intel:
498 kfree(intel_encoder); 464 kfree(intel_encoder);
465 kfree(intel_connector);
499} 466}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 8a0b3bcdc7b1..b04e0a86bf9a 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -44,9 +44,10 @@
44#include "i915_drm.h" 44#include "i915_drm.h"
45#include "i915_drv.h" 45#include "i915_drv.h"
46 46
47struct intelfb_par { 47struct intel_fbdev {
48 struct drm_fb_helper helper; 48 struct drm_fb_helper helper;
49 struct intel_framebuffer *intel_fb; 49 struct intel_framebuffer ifb;
50 struct list_head fbdev_list;
50 struct drm_display_mode *our_mode; 51 struct drm_display_mode *our_mode;
51}; 52};
52 53
@@ -54,7 +55,6 @@ static struct fb_ops intelfb_ops = {
54 .owner = THIS_MODULE, 55 .owner = THIS_MODULE,
55 .fb_check_var = drm_fb_helper_check_var, 56 .fb_check_var = drm_fb_helper_check_var,
56 .fb_set_par = drm_fb_helper_set_par, 57 .fb_set_par = drm_fb_helper_set_par,
57 .fb_setcolreg = drm_fb_helper_setcolreg,
58 .fb_fillrect = cfb_fillrect, 58 .fb_fillrect = cfb_fillrect,
59 .fb_copyarea = cfb_copyarea, 59 .fb_copyarea = cfb_copyarea,
60 .fb_imageblit = cfb_imageblit, 60 .fb_imageblit = cfb_imageblit,
@@ -63,62 +63,12 @@ static struct fb_ops intelfb_ops = {
63 .fb_setcmap = drm_fb_helper_setcmap, 63 .fb_setcmap = drm_fb_helper_setcmap,
64}; 64};
65 65
66static struct drm_fb_helper_funcs intel_fb_helper_funcs = { 66static int intelfb_create(struct intel_fbdev *ifbdev,
67 .gamma_set = intel_crtc_fb_gamma_set, 67 struct drm_fb_helper_surface_size *sizes)
68 .gamma_get = intel_crtc_fb_gamma_get,
69};
70
71
72/**
73 * Currently it is assumed that the old framebuffer is reused.
74 *
75 * LOCKING
76 * caller should hold the mode config lock.
77 *
78 */
79int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
80{
81 struct fb_info *info;
82 struct drm_framebuffer *fb;
83 struct drm_display_mode *mode = crtc->desired_mode;
84
85 fb = crtc->fb;
86 if (!fb)
87 return 1;
88
89 info = fb->fbdev;
90 if (!info)
91 return 1;
92
93 if (!mode)
94 return 1;
95
96 info->var.xres = mode->hdisplay;
97 info->var.right_margin = mode->hsync_start - mode->hdisplay;
98 info->var.hsync_len = mode->hsync_end - mode->hsync_start;
99 info->var.left_margin = mode->htotal - mode->hsync_end;
100 info->var.yres = mode->vdisplay;
101 info->var.lower_margin = mode->vsync_start - mode->vdisplay;
102 info->var.vsync_len = mode->vsync_end - mode->vsync_start;
103 info->var.upper_margin = mode->vtotal - mode->vsync_end;
104 info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
105 /* avoid overflow */
106 info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
107
108 return 0;
109}
110EXPORT_SYMBOL(intelfb_resize);
111
112static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
113 uint32_t fb_height, uint32_t surface_width,
114 uint32_t surface_height,
115 uint32_t surface_depth, uint32_t surface_bpp,
116 struct drm_framebuffer **fb_p)
117{ 68{
69 struct drm_device *dev = ifbdev->helper.dev;
118 struct fb_info *info; 70 struct fb_info *info;
119 struct intelfb_par *par;
120 struct drm_framebuffer *fb; 71 struct drm_framebuffer *fb;
121 struct intel_framebuffer *intel_fb;
122 struct drm_mode_fb_cmd mode_cmd; 72 struct drm_mode_fb_cmd mode_cmd;
123 struct drm_gem_object *fbo = NULL; 73 struct drm_gem_object *fbo = NULL;
124 struct drm_i915_gem_object *obj_priv; 74 struct drm_i915_gem_object *obj_priv;
@@ -126,19 +76,19 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
126 int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; 76 int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
127 77
128 /* we don't do packed 24bpp */ 78 /* we don't do packed 24bpp */
129 if (surface_bpp == 24) 79 if (sizes->surface_bpp == 24)
130 surface_bpp = 32; 80 sizes->surface_bpp = 32;
131 81
132 mode_cmd.width = surface_width; 82 mode_cmd.width = sizes->surface_width;
133 mode_cmd.height = surface_height; 83 mode_cmd.height = sizes->surface_height;
134 84
135 mode_cmd.bpp = surface_bpp; 85 mode_cmd.bpp = sizes->surface_bpp;
136 mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); 86 mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
137 mode_cmd.depth = surface_depth; 87 mode_cmd.depth = sizes->surface_depth;
138 88
139 size = mode_cmd.pitch * mode_cmd.height; 89 size = mode_cmd.pitch * mode_cmd.height;
140 size = ALIGN(size, PAGE_SIZE); 90 size = ALIGN(size, PAGE_SIZE);
141 fbo = drm_gem_object_alloc(dev, size); 91 fbo = i915_gem_alloc_object(dev, size);
142 if (!fbo) { 92 if (!fbo) {
143 DRM_ERROR("failed to allocate framebuffer\n"); 93 DRM_ERROR("failed to allocate framebuffer\n");
144 ret = -ENOMEM; 94 ret = -ENOMEM;
@@ -157,39 +107,26 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
157 /* Flush everything out, we'll be doing GTT only from now on */ 107 /* Flush everything out, we'll be doing GTT only from now on */
158 i915_gem_object_set_to_gtt_domain(fbo, 1); 108 i915_gem_object_set_to_gtt_domain(fbo, 1);
159 109
160 ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo); 110 info = framebuffer_alloc(0, device);
161 if (ret) {
162 DRM_ERROR("failed to allocate fb.\n");
163 goto out_unpin;
164 }
165
166 list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
167
168 intel_fb = to_intel_framebuffer(fb);
169 *fb_p = fb;
170
171 info = framebuffer_alloc(sizeof(struct intelfb_par), device);
172 if (!info) { 111 if (!info) {
173 ret = -ENOMEM; 112 ret = -ENOMEM;
174 goto out_unpin; 113 goto out_unpin;
175 } 114 }
176 115
177 par = info->par; 116 info->par = ifbdev;
178 117
179 par->helper.funcs = &intel_fb_helper_funcs; 118 intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
180 par->helper.dev = dev; 119
181 ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 120 fb = &ifbdev->ifb.base;
182 INTELFB_CONN_LIMIT); 121
183 if (ret) 122 ifbdev->helper.fb = fb;
184 goto out_unref; 123 ifbdev->helper.fbdev = info;
185 124
186 strcpy(info->fix.id, "inteldrmfb"); 125 strcpy(info->fix.id, "inteldrmfb");
187 126
188 info->flags = FBINFO_DEFAULT; 127 info->flags = FBINFO_DEFAULT;
189
190 info->fbops = &intelfb_ops; 128 info->fbops = &intelfb_ops;
191 129
192
193 /* setup aperture base/size for vesafb takeover */ 130 /* setup aperture base/size for vesafb takeover */
194 info->aperture_base = dev->mode_config.fb_base; 131 info->aperture_base = dev->mode_config.fb_base;
195 if (IS_I9XX(dev)) 132 if (IS_I9XX(dev))
@@ -208,12 +145,18 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
208 ret = -ENOSPC; 145 ret = -ENOSPC;
209 goto out_unpin; 146 goto out_unpin;
210 } 147 }
148
149 ret = fb_alloc_cmap(&info->cmap, 256, 0);
150 if (ret) {
151 ret = -ENOMEM;
152 goto out_unpin;
153 }
211 info->screen_size = size; 154 info->screen_size = size;
212 155
213// memset(info->screen_base, 0, size); 156// memset(info->screen_base, 0, size);
214 157
215 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); 158 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
216 drm_fb_helper_fill_var(info, fb, fb_width, fb_height); 159 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
217 160
218 /* FIXME: we really shouldn't expose mmio space at all */ 161 /* FIXME: we really shouldn't expose mmio space at all */
219 info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar); 162 info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
@@ -225,14 +168,10 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
225 info->pixmap.flags = FB_PIXMAP_SYSTEM; 168 info->pixmap.flags = FB_PIXMAP_SYSTEM;
226 info->pixmap.scan_align = 1; 169 info->pixmap.scan_align = 1;
227 170
228 fb->fbdev = info;
229
230 par->intel_fb = intel_fb;
231
232 /* To allow resizeing without swapping buffers */
233 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", 171 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
234 intel_fb->base.width, intel_fb->base.height, 172 fb->width, fb->height,
235 obj_priv->gtt_offset, fbo); 173 obj_priv->gtt_offset, fbo);
174
236 175
237 mutex_unlock(&dev->struct_mutex); 176 mutex_unlock(&dev->struct_mutex);
238 vga_switcheroo_client_fb_set(dev->pdev, info); 177 vga_switcheroo_client_fb_set(dev->pdev, info);
@@ -247,35 +186,86 @@ out:
247 return ret; 186 return ret;
248} 187}
249 188
250int intelfb_probe(struct drm_device *dev) 189static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
190 struct drm_fb_helper_surface_size *sizes)
251{ 191{
192 struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
193 int new_fb = 0;
252 int ret; 194 int ret;
253 195
254 DRM_DEBUG_KMS("\n"); 196 if (!helper->fb) {
255 ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create); 197 ret = intelfb_create(ifbdev, sizes);
256 return ret; 198 if (ret)
199 return ret;
200 new_fb = 1;
201 }
202 return new_fb;
257} 203}
258EXPORT_SYMBOL(intelfb_probe);
259 204
260int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) 205void intelfb_hotplug(struct drm_device *dev, bool polled)
261{ 206{
262 struct fb_info *info; 207 drm_i915_private_t *dev_priv = dev->dev_private;
208 drm_helper_fb_hpd_irq_event(&dev_priv->fbdev->helper);
209}
263 210
264 if (!fb) 211static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
265 return -EINVAL; 212 .gamma_set = intel_crtc_fb_gamma_set,
213 .gamma_get = intel_crtc_fb_gamma_get,
214 .fb_probe = intel_fb_find_or_create_single,
215};
266 216
267 info = fb->fbdev; 217int intel_fbdev_destroy(struct drm_device *dev,
218 struct intel_fbdev *ifbdev)
219{
220 struct fb_info *info;
221 struct intel_framebuffer *ifb = &ifbdev->ifb;
268 222
269 if (info) { 223 if (ifbdev->helper.fbdev) {
270 struct intelfb_par *par = info->par; 224 info = ifbdev->helper.fbdev;
271 unregister_framebuffer(info); 225 unregister_framebuffer(info);
272 iounmap(info->screen_base); 226 iounmap(info->screen_base);
273 if (info->par) 227 if (info->cmap.len)
274 drm_fb_helper_free(&par->helper); 228 fb_dealloc_cmap(&info->cmap);
275 framebuffer_release(info); 229 framebuffer_release(info);
276 } 230 }
277 231
232 drm_fb_helper_fini(&ifbdev->helper);
233
234 drm_framebuffer_cleanup(&ifb->base);
235 if (ifb->obj)
236 drm_gem_object_unreference_unlocked(ifb->obj);
237
238 return 0;
239}
240
241int intel_fbdev_init(struct drm_device *dev)
242{
243 struct intel_fbdev *ifbdev;
244 drm_i915_private_t *dev_priv = dev->dev_private;
245
246 ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
247 if (!ifbdev)
248 return -ENOMEM;
249
250 dev_priv->fbdev = ifbdev;
251 ifbdev->helper.funcs = &intel_fb_helper_funcs;
252
253 drm_fb_helper_init(dev, &ifbdev->helper, 2,
254 INTELFB_CONN_LIMIT, false);
255
256 drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
257 drm_fb_helper_initial_config(&ifbdev->helper, 32);
278 return 0; 258 return 0;
279} 259}
280EXPORT_SYMBOL(intelfb_remove); 260
261void intel_fbdev_fini(struct drm_device *dev)
262{
263 drm_i915_private_t *dev_priv = dev->dev_private;
264 if (!dev_priv->fbdev)
265 return;
266
267 intel_fbdev_destroy(dev, dev_priv->fbdev);
268 kfree(dev_priv->fbdev);
269 dev_priv->fbdev = NULL;
270}
281MODULE_LICENSE("GPL and additional rights"); 271MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 48cade0cf7b1..8a1c4eddc030 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -39,7 +39,6 @@
39 39
40struct intel_hdmi_priv { 40struct intel_hdmi_priv {
41 u32 sdvox_reg; 41 u32 sdvox_reg;
42 u32 save_SDVOX;
43 bool has_hdmi_sink; 42 bool has_hdmi_sink;
44}; 43};
45 44
@@ -63,8 +62,12 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
63 if (hdmi_priv->has_hdmi_sink) 62 if (hdmi_priv->has_hdmi_sink)
64 sdvox |= SDVO_AUDIO_ENABLE; 63 sdvox |= SDVO_AUDIO_ENABLE;
65 64
66 if (intel_crtc->pipe == 1) 65 if (intel_crtc->pipe == 1) {
67 sdvox |= SDVO_PIPE_B_SELECT; 66 if (HAS_PCH_CPT(dev))
67 sdvox |= PORT_TRANS_B_SEL_CPT;
68 else
69 sdvox |= SDVO_PIPE_B_SELECT;
70 }
68 71
69 I915_WRITE(hdmi_priv->sdvox_reg, sdvox); 72 I915_WRITE(hdmi_priv->sdvox_reg, sdvox);
70 POSTING_READ(hdmi_priv->sdvox_reg); 73 POSTING_READ(hdmi_priv->sdvox_reg);
@@ -106,27 +109,6 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
106 } 109 }
107} 110}
108 111
109static void intel_hdmi_save(struct drm_connector *connector)
110{
111 struct drm_device *dev = connector->dev;
112 struct drm_i915_private *dev_priv = dev->dev_private;
113 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
114 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
115
116 hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg);
117}
118
119static void intel_hdmi_restore(struct drm_connector *connector)
120{
121 struct drm_device *dev = connector->dev;
122 struct drm_i915_private *dev_priv = dev->dev_private;
123 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
124 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
125
126 I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX);
127 POSTING_READ(hdmi_priv->sdvox_reg);
128}
129
130static int intel_hdmi_mode_valid(struct drm_connector *connector, 112static int intel_hdmi_mode_valid(struct drm_connector *connector,
131 struct drm_display_mode *mode) 113 struct drm_display_mode *mode)
132{ 114{
@@ -151,13 +133,14 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
151static enum drm_connector_status 133static enum drm_connector_status
152intel_hdmi_detect(struct drm_connector *connector) 134intel_hdmi_detect(struct drm_connector *connector)
153{ 135{
154 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 136 struct drm_encoder *encoder = intel_attached_encoder(connector);
137 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
155 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; 138 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
156 struct edid *edid = NULL; 139 struct edid *edid = NULL;
157 enum drm_connector_status status = connector_status_disconnected; 140 enum drm_connector_status status = connector_status_disconnected;
158 141
159 hdmi_priv->has_hdmi_sink = false; 142 hdmi_priv->has_hdmi_sink = false;
160 edid = drm_get_edid(&intel_encoder->base, 143 edid = drm_get_edid(connector,
161 intel_encoder->ddc_bus); 144 intel_encoder->ddc_bus);
162 145
163 if (edid) { 146 if (edid) {
@@ -165,7 +148,7 @@ intel_hdmi_detect(struct drm_connector *connector)
165 status = connector_status_connected; 148 status = connector_status_connected;
166 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 149 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
167 } 150 }
168 intel_encoder->base.display_info.raw_edid = NULL; 151 connector->display_info.raw_edid = NULL;
169 kfree(edid); 152 kfree(edid);
170 } 153 }
171 154
@@ -174,24 +157,21 @@ intel_hdmi_detect(struct drm_connector *connector)
174 157
175static int intel_hdmi_get_modes(struct drm_connector *connector) 158static int intel_hdmi_get_modes(struct drm_connector *connector)
176{ 159{
177 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 160 struct drm_encoder *encoder = intel_attached_encoder(connector);
161 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
178 162
179 /* We should parse the EDID data and find out if it's an HDMI sink so 163 /* We should parse the EDID data and find out if it's an HDMI sink so
180 * we can send audio to it. 164 * we can send audio to it.
181 */ 165 */
182 166
183 return intel_ddc_get_modes(intel_encoder); 167 return intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
184} 168}
185 169
186static void intel_hdmi_destroy(struct drm_connector *connector) 170static void intel_hdmi_destroy(struct drm_connector *connector)
187{ 171{
188 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
189
190 if (intel_encoder->i2c_bus)
191 intel_i2c_destroy(intel_encoder->i2c_bus);
192 drm_sysfs_connector_remove(connector); 172 drm_sysfs_connector_remove(connector);
193 drm_connector_cleanup(connector); 173 drm_connector_cleanup(connector);
194 kfree(intel_encoder); 174 kfree(connector);
195} 175}
196 176
197static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { 177static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
@@ -204,8 +184,6 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
204 184
205static const struct drm_connector_funcs intel_hdmi_connector_funcs = { 185static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
206 .dpms = drm_helper_connector_dpms, 186 .dpms = drm_helper_connector_dpms,
207 .save = intel_hdmi_save,
208 .restore = intel_hdmi_restore,
209 .detect = intel_hdmi_detect, 187 .detect = intel_hdmi_detect,
210 .fill_modes = drm_helper_probe_single_connector_modes, 188 .fill_modes = drm_helper_probe_single_connector_modes,
211 .destroy = intel_hdmi_destroy, 189 .destroy = intel_hdmi_destroy,
@@ -214,12 +192,17 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
214static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { 192static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
215 .get_modes = intel_hdmi_get_modes, 193 .get_modes = intel_hdmi_get_modes,
216 .mode_valid = intel_hdmi_mode_valid, 194 .mode_valid = intel_hdmi_mode_valid,
217 .best_encoder = intel_best_encoder, 195 .best_encoder = intel_attached_encoder,
218}; 196};
219 197
220static void intel_hdmi_enc_destroy(struct drm_encoder *encoder) 198static void intel_hdmi_enc_destroy(struct drm_encoder *encoder)
221{ 199{
200 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
201
202 if (intel_encoder->i2c_bus)
203 intel_i2c_destroy(intel_encoder->i2c_bus);
222 drm_encoder_cleanup(encoder); 204 drm_encoder_cleanup(encoder);
205 kfree(intel_encoder);
223} 206}
224 207
225static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { 208static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@@ -231,15 +214,23 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
231 struct drm_i915_private *dev_priv = dev->dev_private; 214 struct drm_i915_private *dev_priv = dev->dev_private;
232 struct drm_connector *connector; 215 struct drm_connector *connector;
233 struct intel_encoder *intel_encoder; 216 struct intel_encoder *intel_encoder;
217 struct intel_connector *intel_connector;
234 struct intel_hdmi_priv *hdmi_priv; 218 struct intel_hdmi_priv *hdmi_priv;
235 219
236 intel_encoder = kcalloc(sizeof(struct intel_encoder) + 220 intel_encoder = kcalloc(sizeof(struct intel_encoder) +
237 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); 221 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
238 if (!intel_encoder) 222 if (!intel_encoder)
239 return; 223 return;
224
225 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
226 if (!intel_connector) {
227 kfree(intel_encoder);
228 return;
229 }
230
240 hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1); 231 hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
241 232
242 connector = &intel_encoder->base; 233 connector = &intel_connector->base;
243 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, 234 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
244 DRM_MODE_CONNECTOR_HDMIA); 235 DRM_MODE_CONNECTOR_HDMIA);
245 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); 236 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
@@ -285,7 +276,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
285 DRM_MODE_ENCODER_TMDS); 276 DRM_MODE_ENCODER_TMDS);
286 drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs); 277 drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
287 278
288 drm_mode_connector_attach_encoder(&intel_encoder->base, 279 drm_mode_connector_attach_encoder(&intel_connector->base,
289 &intel_encoder->enc); 280 &intel_encoder->enc);
290 drm_sysfs_connector_add(connector); 281 drm_sysfs_connector_add(connector);
291 282
@@ -303,6 +294,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
303err_connector: 294err_connector:
304 drm_connector_cleanup(connector); 295 drm_connector_cleanup(connector);
305 kfree(intel_encoder); 296 kfree(intel_encoder);
297 kfree(intel_connector);
306 298
307 return; 299 return;
308} 300}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b66806a37d37..6a1accd83aec 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -139,75 +139,6 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
139 /* XXX: We never power down the LVDS pairs. */ 139 /* XXX: We never power down the LVDS pairs. */
140} 140}
141 141
142static void intel_lvds_save(struct drm_connector *connector)
143{
144 struct drm_device *dev = connector->dev;
145 struct drm_i915_private *dev_priv = dev->dev_private;
146 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
147 u32 pwm_ctl_reg;
148
149 if (HAS_PCH_SPLIT(dev)) {
150 pp_on_reg = PCH_PP_ON_DELAYS;
151 pp_off_reg = PCH_PP_OFF_DELAYS;
152 pp_ctl_reg = PCH_PP_CONTROL;
153 pp_div_reg = PCH_PP_DIVISOR;
154 pwm_ctl_reg = BLC_PWM_CPU_CTL;
155 } else {
156 pp_on_reg = PP_ON_DELAYS;
157 pp_off_reg = PP_OFF_DELAYS;
158 pp_ctl_reg = PP_CONTROL;
159 pp_div_reg = PP_DIVISOR;
160 pwm_ctl_reg = BLC_PWM_CTL;
161 }
162
163 dev_priv->savePP_ON = I915_READ(pp_on_reg);
164 dev_priv->savePP_OFF = I915_READ(pp_off_reg);
165 dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg);
166 dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg);
167 dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg);
168 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
169 BACKLIGHT_DUTY_CYCLE_MASK);
170
171 /*
172 * If the light is off at server startup, just make it full brightness
173 */
174 if (dev_priv->backlight_duty_cycle == 0)
175 dev_priv->backlight_duty_cycle =
176 intel_lvds_get_max_backlight(dev);
177}
178
179static void intel_lvds_restore(struct drm_connector *connector)
180{
181 struct drm_device *dev = connector->dev;
182 struct drm_i915_private *dev_priv = dev->dev_private;
183 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
184 u32 pwm_ctl_reg;
185
186 if (HAS_PCH_SPLIT(dev)) {
187 pp_on_reg = PCH_PP_ON_DELAYS;
188 pp_off_reg = PCH_PP_OFF_DELAYS;
189 pp_ctl_reg = PCH_PP_CONTROL;
190 pp_div_reg = PCH_PP_DIVISOR;
191 pwm_ctl_reg = BLC_PWM_CPU_CTL;
192 } else {
193 pp_on_reg = PP_ON_DELAYS;
194 pp_off_reg = PP_OFF_DELAYS;
195 pp_ctl_reg = PP_CONTROL;
196 pp_div_reg = PP_DIVISOR;
197 pwm_ctl_reg = BLC_PWM_CTL;
198 }
199
200 I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL);
201 I915_WRITE(pp_on_reg, dev_priv->savePP_ON);
202 I915_WRITE(pp_off_reg, dev_priv->savePP_OFF);
203 I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR);
204 I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL);
205 if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
206 intel_lvds_set_power(dev, true);
207 else
208 intel_lvds_set_power(dev, false);
209}
210
211static int intel_lvds_mode_valid(struct drm_connector *connector, 142static int intel_lvds_mode_valid(struct drm_connector *connector,
212 struct drm_display_mode *mode) 143 struct drm_display_mode *mode)
213{ 144{
@@ -635,12 +566,13 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
635static int intel_lvds_get_modes(struct drm_connector *connector) 566static int intel_lvds_get_modes(struct drm_connector *connector)
636{ 567{
637 struct drm_device *dev = connector->dev; 568 struct drm_device *dev = connector->dev;
638 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 569 struct drm_encoder *encoder = intel_attached_encoder(connector);
570 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
639 struct drm_i915_private *dev_priv = dev->dev_private; 571 struct drm_i915_private *dev_priv = dev->dev_private;
640 int ret = 0; 572 int ret = 0;
641 573
642 if (dev_priv->lvds_edid_good) { 574 if (dev_priv->lvds_edid_good) {
643 ret = intel_ddc_get_modes(intel_encoder); 575 ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
644 576
645 if (ret) 577 if (ret)
646 return ret; 578 return ret;
@@ -717,11 +649,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
717static void intel_lvds_destroy(struct drm_connector *connector) 649static void intel_lvds_destroy(struct drm_connector *connector)
718{ 650{
719 struct drm_device *dev = connector->dev; 651 struct drm_device *dev = connector->dev;
720 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
721 struct drm_i915_private *dev_priv = dev->dev_private; 652 struct drm_i915_private *dev_priv = dev->dev_private;
722 653
723 if (intel_encoder->ddc_bus)
724 intel_i2c_destroy(intel_encoder->ddc_bus);
725 if (dev_priv->lid_notifier.notifier_call) 654 if (dev_priv->lid_notifier.notifier_call)
726 acpi_lid_notifier_unregister(&dev_priv->lid_notifier); 655 acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
727 drm_sysfs_connector_remove(connector); 656 drm_sysfs_connector_remove(connector);
@@ -734,13 +663,14 @@ static int intel_lvds_set_property(struct drm_connector *connector,
734 uint64_t value) 663 uint64_t value)
735{ 664{
736 struct drm_device *dev = connector->dev; 665 struct drm_device *dev = connector->dev;
737 struct intel_encoder *intel_encoder =
738 to_intel_encoder(connector);
739 666
740 if (property == dev->mode_config.scaling_mode_property && 667 if (property == dev->mode_config.scaling_mode_property &&
741 connector->encoder) { 668 connector->encoder) {
742 struct drm_crtc *crtc = connector->encoder->crtc; 669 struct drm_crtc *crtc = connector->encoder->crtc;
670 struct drm_encoder *encoder = connector->encoder;
671 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
743 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; 672 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
673
744 if (value == DRM_MODE_SCALE_NONE) { 674 if (value == DRM_MODE_SCALE_NONE) {
745 DRM_DEBUG_KMS("no scaling not supported\n"); 675 DRM_DEBUG_KMS("no scaling not supported\n");
746 return 0; 676 return 0;
@@ -774,13 +704,11 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
774static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { 704static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
775 .get_modes = intel_lvds_get_modes, 705 .get_modes = intel_lvds_get_modes,
776 .mode_valid = intel_lvds_mode_valid, 706 .mode_valid = intel_lvds_mode_valid,
777 .best_encoder = intel_best_encoder, 707 .best_encoder = intel_attached_encoder,
778}; 708};
779 709
780static const struct drm_connector_funcs intel_lvds_connector_funcs = { 710static const struct drm_connector_funcs intel_lvds_connector_funcs = {
781 .dpms = drm_helper_connector_dpms, 711 .dpms = drm_helper_connector_dpms,
782 .save = intel_lvds_save,
783 .restore = intel_lvds_restore,
784 .detect = intel_lvds_detect, 712 .detect = intel_lvds_detect,
785 .fill_modes = drm_helper_probe_single_connector_modes, 713 .fill_modes = drm_helper_probe_single_connector_modes,
786 .set_property = intel_lvds_set_property, 714 .set_property = intel_lvds_set_property,
@@ -790,7 +718,12 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
790 718
791static void intel_lvds_enc_destroy(struct drm_encoder *encoder) 719static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
792{ 720{
721 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
722
723 if (intel_encoder->ddc_bus)
724 intel_i2c_destroy(intel_encoder->ddc_bus);
793 drm_encoder_cleanup(encoder); 725 drm_encoder_cleanup(encoder);
726 kfree(intel_encoder);
794} 727}
795 728
796static const struct drm_encoder_funcs intel_lvds_enc_funcs = { 729static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
@@ -979,6 +912,7 @@ void intel_lvds_init(struct drm_device *dev)
979{ 912{
980 struct drm_i915_private *dev_priv = dev->dev_private; 913 struct drm_i915_private *dev_priv = dev->dev_private;
981 struct intel_encoder *intel_encoder; 914 struct intel_encoder *intel_encoder;
915 struct intel_connector *intel_connector;
982 struct drm_connector *connector; 916 struct drm_connector *connector;
983 struct drm_encoder *encoder; 917 struct drm_encoder *encoder;
984 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 918 struct drm_display_mode *scan; /* *modes, *bios_mode; */
@@ -1012,19 +946,27 @@ void intel_lvds_init(struct drm_device *dev)
1012 return; 946 return;
1013 } 947 }
1014 948
1015 connector = &intel_encoder->base; 949 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
950 if (!intel_connector) {
951 kfree(intel_encoder);
952 return;
953 }
954
955 connector = &intel_connector->base;
1016 encoder = &intel_encoder->enc; 956 encoder = &intel_encoder->enc;
1017 drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs, 957 drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
1018 DRM_MODE_CONNECTOR_LVDS); 958 DRM_MODE_CONNECTOR_LVDS);
1019 959
1020 drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs, 960 drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
1021 DRM_MODE_ENCODER_LVDS); 961 DRM_MODE_ENCODER_LVDS);
1022 962
1023 drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); 963 drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
1024 intel_encoder->type = INTEL_OUTPUT_LVDS; 964 intel_encoder->type = INTEL_OUTPUT_LVDS;
1025 965
1026 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); 966 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
1027 intel_encoder->crtc_mask = (1 << 1); 967 intel_encoder->crtc_mask = (1 << 1);
968 if (IS_I965G(dev))
969 intel_encoder->crtc_mask |= (1 << 0);
1028 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); 970 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
1029 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); 971 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
1030 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 972 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -1039,7 +981,7 @@ void intel_lvds_init(struct drm_device *dev)
1039 * the initial panel fitting mode will be FULL_SCREEN. 981 * the initial panel fitting mode will be FULL_SCREEN.
1040 */ 982 */
1041 983
1042 drm_connector_attach_property(&intel_encoder->base, 984 drm_connector_attach_property(&intel_connector->base,
1043 dev->mode_config.scaling_mode_property, 985 dev->mode_config.scaling_mode_property,
1044 DRM_MODE_SCALE_FULLSCREEN); 986 DRM_MODE_SCALE_FULLSCREEN);
1045 lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; 987 lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
@@ -1067,7 +1009,7 @@ void intel_lvds_init(struct drm_device *dev)
1067 */ 1009 */
1068 dev_priv->lvds_edid_good = true; 1010 dev_priv->lvds_edid_good = true;
1069 1011
1070 if (!intel_ddc_get_modes(intel_encoder)) 1012 if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus))
1071 dev_priv->lvds_edid_good = false; 1013 dev_priv->lvds_edid_good = false;
1072 1014
1073 list_for_each_entry(scan, &connector->probed_modes, head) { 1015 list_for_each_entry(scan, &connector->probed_modes, head) {
@@ -1151,4 +1093,5 @@ failed:
1151 drm_connector_cleanup(connector); 1093 drm_connector_cleanup(connector);
1152 drm_encoder_cleanup(encoder); 1094 drm_encoder_cleanup(encoder);
1153 kfree(intel_encoder); 1095 kfree(intel_encoder);
1096 kfree(intel_connector);
1154} 1097}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 8e5c83b2d120..4b1fd3d9c73c 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
54 } 54 }
55 }; 55 };
56 56
57 intel_i2c_quirk_set(intel_encoder->base.dev, true); 57 intel_i2c_quirk_set(intel_encoder->enc.dev, true);
58 ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2); 58 ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
59 intel_i2c_quirk_set(intel_encoder->base.dev, false); 59 intel_i2c_quirk_set(intel_encoder->enc.dev, false);
60 if (ret == 2) 60 if (ret == 2)
61 return true; 61 return true;
62 62
@@ -66,22 +66,23 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
66/** 66/**
67 * intel_ddc_get_modes - get modelist from monitor 67 * intel_ddc_get_modes - get modelist from monitor
68 * @connector: DRM connector device to use 68 * @connector: DRM connector device to use
69 * @adapter: i2c adapter
69 * 70 *
70 * Fetch the EDID information from @connector using the DDC bus. 71 * Fetch the EDID information from @connector using the DDC bus.
71 */ 72 */
72int intel_ddc_get_modes(struct intel_encoder *intel_encoder) 73int intel_ddc_get_modes(struct drm_connector *connector,
74 struct i2c_adapter *adapter)
73{ 75{
74 struct edid *edid; 76 struct edid *edid;
75 int ret = 0; 77 int ret = 0;
76 78
77 intel_i2c_quirk_set(intel_encoder->base.dev, true); 79 intel_i2c_quirk_set(connector->dev, true);
78 edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus); 80 edid = drm_get_edid(connector, adapter);
79 intel_i2c_quirk_set(intel_encoder->base.dev, false); 81 intel_i2c_quirk_set(connector->dev, false);
80 if (edid) { 82 if (edid) {
81 drm_mode_connector_update_edid_property(&intel_encoder->base, 83 drm_mode_connector_update_edid_property(connector, edid);
82 edid); 84 ret = drm_add_edid_modes(connector, edid);
83 ret = drm_add_edid_modes(&intel_encoder->base, edid); 85 connector->display_info.raw_edid = NULL;
84 intel_encoder->base.display_info.raw_edid = NULL;
85 kfree(edid); 86 kfree(edid);
86 } 87 }
87 88
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 6d524a1fc271..b0e17b06eb6e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -373,7 +373,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
373 373
374 /* never have the overlay hw on without showing a frame */ 374 /* never have the overlay hw on without showing a frame */
375 BUG_ON(!overlay->vid_bo); 375 BUG_ON(!overlay->vid_bo);
376 obj = overlay->vid_bo->obj; 376 obj = &overlay->vid_bo->base;
377 377
378 i915_gem_object_unpin(obj); 378 i915_gem_object_unpin(obj);
379 drm_gem_object_unreference(obj); 379 drm_gem_object_unreference(obj);
@@ -411,7 +411,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
411 411
412 switch (overlay->hw_wedged) { 412 switch (overlay->hw_wedged) {
413 case RELEASE_OLD_VID: 413 case RELEASE_OLD_VID:
414 obj = overlay->old_vid_bo->obj; 414 obj = &overlay->old_vid_bo->base;
415 i915_gem_object_unpin(obj); 415 i915_gem_object_unpin(obj);
416 drm_gem_object_unreference(obj); 416 drm_gem_object_unreference(obj);
417 overlay->old_vid_bo = NULL; 417 overlay->old_vid_bo = NULL;
@@ -467,7 +467,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
467 if (ret != 0) 467 if (ret != 0)
468 return ret; 468 return ret;
469 469
470 obj = overlay->old_vid_bo->obj; 470 obj = &overlay->old_vid_bo->base;
471 i915_gem_object_unpin(obj); 471 i915_gem_object_unpin(obj);
472 drm_gem_object_unreference(obj); 472 drm_gem_object_unreference(obj);
473 overlay->old_vid_bo = NULL; 473 overlay->old_vid_bo = NULL;
@@ -1341,7 +1341,7 @@ void intel_setup_overlay(struct drm_device *dev)
1341 return; 1341 return;
1342 overlay->dev = dev; 1342 overlay->dev = dev;
1343 1343
1344 reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE); 1344 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
1345 if (!reg_bo) 1345 if (!reg_bo)
1346 goto out_free; 1346 goto out_free;
1347 overlay->reg_bo = to_intel_bo(reg_bo); 1347 overlay->reg_bo = to_intel_bo(reg_bo);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 87d953664cb0..42ceb15da689 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -36,7 +36,18 @@
36#include "i915_drm.h" 36#include "i915_drm.h"
37#include "i915_drv.h" 37#include "i915_drv.h"
38#include "intel_sdvo_regs.h" 38#include "intel_sdvo_regs.h"
39#include <linux/dmi.h> 39
40#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
41#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
42#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
43#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
44
45#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
46 SDVO_TV_MASK)
47
48#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
49#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
50
40 51
41static char *tv_format_names[] = { 52static char *tv_format_names[] = {
42 "NTSC_M" , "NTSC_J" , "NTSC_443", 53 "NTSC_M" , "NTSC_J" , "NTSC_443",
@@ -86,12 +97,6 @@ struct intel_sdvo_priv {
86 /* This is for current tv format name */ 97 /* This is for current tv format name */
87 char *tv_format_name; 98 char *tv_format_name;
88 99
89 /* This contains all current supported TV format */
90 char *tv_format_supported[TV_FORMAT_NUM];
91 int format_supported_num;
92 struct drm_property *tv_format_property;
93 struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
94
95 /** 100 /**
96 * This is set if we treat the device as HDMI, instead of DVI. 101 * This is set if we treat the device as HDMI, instead of DVI.
97 */ 102 */
@@ -112,12 +117,6 @@ struct intel_sdvo_priv {
112 */ 117 */
113 struct drm_display_mode *sdvo_lvds_fixed_mode; 118 struct drm_display_mode *sdvo_lvds_fixed_mode;
114 119
115 /**
116 * Returned SDTV resolutions allowed for the current format, if the
117 * device reported it.
118 */
119 struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
120
121 /* 120 /*
122 * supported encoding mode, used to determine whether HDMI is 121 * supported encoding mode, used to determine whether HDMI is
123 * supported 122 * supported
@@ -130,11 +129,24 @@ struct intel_sdvo_priv {
130 /* Mac mini hack -- use the same DDC as the analog connector */ 129 /* Mac mini hack -- use the same DDC as the analog connector */
131 struct i2c_adapter *analog_ddc_bus; 130 struct i2c_adapter *analog_ddc_bus;
132 131
133 int save_sdvo_mult; 132};
134 u16 save_active_outputs; 133
135 struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; 134struct intel_sdvo_connector {
136 struct intel_sdvo_dtd save_output_dtd[16]; 135 /* Mark the type of connector */
137 u32 save_SDVOX; 136 uint16_t output_flag;
137
138 /* This contains all current supported TV format */
139 char *tv_format_supported[TV_FORMAT_NUM];
140 int format_supported_num;
141 struct drm_property *tv_format_property;
142 struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
143
144 /**
145 * Returned SDTV resolutions allowed for the current format, if the
146 * device reported it.
147 */
148 struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
149
138 /* add the property for the SDVO-TV */ 150 /* add the property for the SDVO-TV */
139 struct drm_property *left_property; 151 struct drm_property *left_property;
140 struct drm_property *right_property; 152 struct drm_property *right_property;
@@ -162,7 +174,12 @@ struct intel_sdvo_priv {
162}; 174};
163 175
164static bool 176static bool
165intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags); 177intel_sdvo_output_setup(struct intel_encoder *intel_encoder,
178 uint16_t flags);
179static void
180intel_sdvo_tv_create_property(struct drm_connector *connector, int type);
181static void
182intel_sdvo_create_enhance_property(struct drm_connector *connector);
166 183
167/** 184/**
168 * Writes the SDVOB or SDVOC with the given value, but always writes both 185 * Writes the SDVOB or SDVOC with the given value, but always writes both
@@ -171,12 +188,18 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags);
171 */ 188 */
172static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val) 189static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
173{ 190{
174 struct drm_device *dev = intel_encoder->base.dev; 191 struct drm_device *dev = intel_encoder->enc.dev;
175 struct drm_i915_private *dev_priv = dev->dev_private; 192 struct drm_i915_private *dev_priv = dev->dev_private;
176 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 193 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
177 u32 bval = val, cval = val; 194 u32 bval = val, cval = val;
178 int i; 195 int i;
179 196
197 if (sdvo_priv->sdvo_reg == PCH_SDVOB) {
198 I915_WRITE(sdvo_priv->sdvo_reg, val);
199 I915_READ(sdvo_priv->sdvo_reg);
200 return;
201 }
202
180 if (sdvo_priv->sdvo_reg == SDVOB) { 203 if (sdvo_priv->sdvo_reg == SDVOB) {
181 cval = I915_READ(SDVOC); 204 cval = I915_READ(SDVOC);
182 } else { 205 } else {
@@ -353,7 +376,8 @@ static const struct _sdvo_cmd_name {
353 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), 376 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
354}; 377};
355 378
356#define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC") 379#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
380#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC")
357#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv) 381#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv)
358 382
359static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd, 383static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
@@ -563,17 +587,6 @@ static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, b
563 return true; 587 return true;
564} 588}
565 589
566static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder,
567 u16 *outputs)
568{
569 u8 status;
570
571 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
572 status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs));
573
574 return (status == SDVO_CMD_STATUS_SUCCESS);
575}
576
577static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder, 590static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
578 u16 outputs) 591 u16 outputs)
579{ 592{
@@ -646,40 +659,6 @@ static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder,
646 return (status == SDVO_CMD_STATUS_SUCCESS); 659 return (status == SDVO_CMD_STATUS_SUCCESS);
647} 660}
648 661
649static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd,
650 struct intel_sdvo_dtd *dtd)
651{
652 u8 status;
653
654 intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0);
655 status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
656 sizeof(dtd->part1));
657 if (status != SDVO_CMD_STATUS_SUCCESS)
658 return false;
659
660 intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0);
661 status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
662 sizeof(dtd->part2));
663 if (status != SDVO_CMD_STATUS_SUCCESS)
664 return false;
665
666 return true;
667}
668
669static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder,
670 struct intel_sdvo_dtd *dtd)
671{
672 return intel_sdvo_get_timing(intel_encoder,
673 SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
674}
675
676static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder,
677 struct intel_sdvo_dtd *dtd)
678{
679 return intel_sdvo_get_timing(intel_encoder,
680 SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
681}
682
683static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd, 662static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
684 struct intel_sdvo_dtd *dtd) 663 struct intel_sdvo_dtd *dtd)
685{ 664{
@@ -767,23 +746,6 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_en
767 return false; 746 return false;
768} 747}
769 748
770static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder)
771{
772 u8 response, status;
773
774 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
775 status = intel_sdvo_read_response(intel_encoder, &response, 1);
776
777 if (status != SDVO_CMD_STATUS_SUCCESS) {
778 DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n");
779 return SDVO_CLOCK_RATE_MULT_1X;
780 } else {
781 DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response);
782 }
783
784 return response;
785}
786
787static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val) 749static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
788{ 750{
789 u8 status; 751 u8 status;
@@ -1071,7 +1033,7 @@ static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder)
1071 memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? 1033 memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
1072 sizeof(format) : sizeof(format_map)); 1034 sizeof(format) : sizeof(format_map));
1073 1035
1074 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map, 1036 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format,
1075 sizeof(format)); 1037 sizeof(format));
1076 1038
1077 status = intel_sdvo_read_response(intel_encoder, NULL, 0); 1039 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
@@ -1101,7 +1063,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1101 /* Set output timings */ 1063 /* Set output timings */
1102 intel_sdvo_get_dtd_from_mode(&output_dtd, mode); 1064 intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
1103 intel_sdvo_set_target_output(intel_encoder, 1065 intel_sdvo_set_target_output(intel_encoder,
1104 dev_priv->controlled_output); 1066 dev_priv->attached_output);
1105 intel_sdvo_set_output_timing(intel_encoder, &output_dtd); 1067 intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
1106 1068
1107 /* Set the input timing to the screen. Assume always input 0. */ 1069 /* Set the input timing to the screen. Assume always input 0. */
@@ -1139,7 +1101,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1139 dev_priv->sdvo_lvds_fixed_mode); 1101 dev_priv->sdvo_lvds_fixed_mode);
1140 1102
1141 intel_sdvo_set_target_output(intel_encoder, 1103 intel_sdvo_set_target_output(intel_encoder,
1142 dev_priv->controlled_output); 1104 dev_priv->attached_output);
1143 intel_sdvo_set_output_timing(intel_encoder, &output_dtd); 1105 intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
1144 1106
1145 /* Set the input timing to the screen. Assume always input 0. */ 1107 /* Set the input timing to the screen. Assume always input 0. */
@@ -1204,7 +1166,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1204 * channel on the motherboard. In a two-input device, the first input 1166 * channel on the motherboard. In a two-input device, the first input
1205 * will be SDVOB and the second SDVOC. 1167 * will be SDVOB and the second SDVOC.
1206 */ 1168 */
1207 in_out.in0 = sdvo_priv->controlled_output; 1169 in_out.in0 = sdvo_priv->attached_output;
1208 in_out.in1 = 0; 1170 in_out.in1 = 0;
1209 1171
1210 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP, 1172 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
@@ -1230,7 +1192,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1230 if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { 1192 if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
1231 /* Set the output timing to the screen */ 1193 /* Set the output timing to the screen */
1232 intel_sdvo_set_target_output(intel_encoder, 1194 intel_sdvo_set_target_output(intel_encoder,
1233 sdvo_priv->controlled_output); 1195 sdvo_priv->attached_output);
1234 intel_sdvo_set_output_timing(intel_encoder, &input_dtd); 1196 intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
1235 } 1197 }
1236 1198
@@ -1352,107 +1314,16 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1352 1314
1353 if (0) 1315 if (0)
1354 intel_sdvo_set_encoder_power_state(intel_encoder, mode); 1316 intel_sdvo_set_encoder_power_state(intel_encoder, mode);
1355 intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output); 1317 intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output);
1356 } 1318 }
1357 return; 1319 return;
1358} 1320}
1359 1321
1360static void intel_sdvo_save(struct drm_connector *connector)
1361{
1362 struct drm_device *dev = connector->dev;
1363 struct drm_i915_private *dev_priv = dev->dev_private;
1364 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1365 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1366 int o;
1367
1368 sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder);
1369 intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs);
1370
1371 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
1372 intel_sdvo_set_target_input(intel_encoder, true, false);
1373 intel_sdvo_get_input_timing(intel_encoder,
1374 &sdvo_priv->save_input_dtd_1);
1375 }
1376
1377 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
1378 intel_sdvo_set_target_input(intel_encoder, false, true);
1379 intel_sdvo_get_input_timing(intel_encoder,
1380 &sdvo_priv->save_input_dtd_2);
1381 }
1382
1383 for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
1384 {
1385 u16 this_output = (1 << o);
1386 if (sdvo_priv->caps.output_flags & this_output)
1387 {
1388 intel_sdvo_set_target_output(intel_encoder, this_output);
1389 intel_sdvo_get_output_timing(intel_encoder,
1390 &sdvo_priv->save_output_dtd[o]);
1391 }
1392 }
1393 if (sdvo_priv->is_tv) {
1394 /* XXX: Save TV format/enhancements. */
1395 }
1396
1397 sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg);
1398}
1399
1400static void intel_sdvo_restore(struct drm_connector *connector)
1401{
1402 struct drm_device *dev = connector->dev;
1403 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1404 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1405 int o;
1406 int i;
1407 bool input1, input2;
1408 u8 status;
1409
1410 intel_sdvo_set_active_outputs(intel_encoder, 0);
1411
1412 for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
1413 {
1414 u16 this_output = (1 << o);
1415 if (sdvo_priv->caps.output_flags & this_output) {
1416 intel_sdvo_set_target_output(intel_encoder, this_output);
1417 intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]);
1418 }
1419 }
1420
1421 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
1422 intel_sdvo_set_target_input(intel_encoder, true, false);
1423 intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1);
1424 }
1425
1426 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
1427 intel_sdvo_set_target_input(intel_encoder, false, true);
1428 intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2);
1429 }
1430
1431 intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult);
1432
1433 if (sdvo_priv->is_tv) {
1434 /* XXX: Restore TV format/enhancements. */
1435 }
1436
1437 intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX);
1438
1439 if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
1440 {
1441 for (i = 0; i < 2; i++)
1442 intel_wait_for_vblank(dev);
1443 status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2);
1444 if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
1445 DRM_DEBUG_KMS("First %s output reported failure to "
1446 "sync\n", SDVO_NAME(sdvo_priv));
1447 }
1448
1449 intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs);
1450}
1451
1452static int intel_sdvo_mode_valid(struct drm_connector *connector, 1322static int intel_sdvo_mode_valid(struct drm_connector *connector,
1453 struct drm_display_mode *mode) 1323 struct drm_display_mode *mode)
1454{ 1324{
1455 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1325 struct drm_encoder *encoder = intel_attached_encoder(connector);
1326 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1456 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1327 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1457 1328
1458 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 1329 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -1490,6 +1361,8 @@ static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, str
1490 return true; 1361 return true;
1491} 1362}
1492 1363
1364/* No use! */
1365#if 0
1493struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) 1366struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
1494{ 1367{
1495 struct drm_connector *connector = NULL; 1368 struct drm_connector *connector = NULL;
@@ -1560,6 +1433,7 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
1560 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); 1433 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
1561 intel_sdvo_read_response(intel_encoder, &response, 2); 1434 intel_sdvo_read_response(intel_encoder, &response, 2);
1562} 1435}
1436#endif
1563 1437
1564static bool 1438static bool
1565intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder) 1439intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
@@ -1598,12 +1472,17 @@ static struct drm_connector *
1598intel_find_analog_connector(struct drm_device *dev) 1472intel_find_analog_connector(struct drm_device *dev)
1599{ 1473{
1600 struct drm_connector *connector; 1474 struct drm_connector *connector;
1475 struct drm_encoder *encoder;
1601 struct intel_encoder *intel_encoder; 1476 struct intel_encoder *intel_encoder;
1602 1477
1603 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1478 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1604 intel_encoder = to_intel_encoder(connector); 1479 intel_encoder = enc_to_intel_encoder(encoder);
1605 if (intel_encoder->type == INTEL_OUTPUT_ANALOG) 1480 if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
1606 return connector; 1481 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1482 if (connector && encoder == intel_attached_encoder(connector))
1483 return connector;
1484 }
1485 }
1607 } 1486 }
1608 return NULL; 1487 return NULL;
1609} 1488}
@@ -1627,12 +1506,13 @@ intel_analog_is_connected(struct drm_device *dev)
1627enum drm_connector_status 1506enum drm_connector_status
1628intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) 1507intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1629{ 1508{
1630 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1509 struct drm_encoder *encoder = intel_attached_encoder(connector);
1510 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1631 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1511 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1632 enum drm_connector_status status = connector_status_connected; 1512 enum drm_connector_status status = connector_status_connected;
1633 struct edid *edid = NULL; 1513 struct edid *edid = NULL;
1634 1514
1635 edid = drm_get_edid(&intel_encoder->base, 1515 edid = drm_get_edid(connector,
1636 intel_encoder->ddc_bus); 1516 intel_encoder->ddc_bus);
1637 1517
1638 /* This is only applied to SDVO cards with multiple outputs */ 1518 /* This is only applied to SDVO cards with multiple outputs */
@@ -1646,7 +1526,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1646 */ 1526 */
1647 while(temp_ddc > 1) { 1527 while(temp_ddc > 1) {
1648 sdvo_priv->ddc_bus = temp_ddc; 1528 sdvo_priv->ddc_bus = temp_ddc;
1649 edid = drm_get_edid(&intel_encoder->base, 1529 edid = drm_get_edid(connector,
1650 intel_encoder->ddc_bus); 1530 intel_encoder->ddc_bus);
1651 if (edid) { 1531 if (edid) {
1652 /* 1532 /*
@@ -1666,8 +1546,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1666 */ 1546 */
1667 if (edid == NULL && 1547 if (edid == NULL &&
1668 sdvo_priv->analog_ddc_bus && 1548 sdvo_priv->analog_ddc_bus &&
1669 !intel_analog_is_connected(intel_encoder->base.dev)) 1549 !intel_analog_is_connected(connector->dev))
1670 edid = drm_get_edid(&intel_encoder->base, 1550 edid = drm_get_edid(connector,
1671 sdvo_priv->analog_ddc_bus); 1551 sdvo_priv->analog_ddc_bus);
1672 if (edid != NULL) { 1552 if (edid != NULL) {
1673 /* Don't report the output as connected if it's a DVI-I 1553 /* Don't report the output as connected if it's a DVI-I
@@ -1682,7 +1562,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1682 } 1562 }
1683 1563
1684 kfree(edid); 1564 kfree(edid);
1685 intel_encoder->base.display_info.raw_edid = NULL; 1565 connector->display_info.raw_edid = NULL;
1686 1566
1687 } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) 1567 } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
1688 status = connector_status_disconnected; 1568 status = connector_status_disconnected;
@@ -1694,8 +1574,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1694{ 1574{
1695 uint16_t response; 1575 uint16_t response;
1696 u8 status; 1576 u8 status;
1697 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1577 struct drm_encoder *encoder = intel_attached_encoder(connector);
1578 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1579 struct intel_connector *intel_connector = to_intel_connector(connector);
1698 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1580 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1581 struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
1582 enum drm_connector_status ret;
1699 1583
1700 intel_sdvo_write_cmd(intel_encoder, 1584 intel_sdvo_write_cmd(intel_encoder,
1701 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); 1585 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
@@ -1713,24 +1597,41 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1713 if (response == 0) 1597 if (response == 0)
1714 return connector_status_disconnected; 1598 return connector_status_disconnected;
1715 1599
1716 if (intel_sdvo_multifunc_encoder(intel_encoder) && 1600 sdvo_priv->attached_output = response;
1717 sdvo_priv->attached_output != response) { 1601
1718 if (sdvo_priv->controlled_output != response && 1602 if ((sdvo_connector->output_flag & response) == 0)
1719 intel_sdvo_output_setup(intel_encoder, response) != true) 1603 ret = connector_status_disconnected;
1720 return connector_status_unknown; 1604 else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
1721 sdvo_priv->attached_output = response; 1605 ret = intel_sdvo_hdmi_sink_detect(connector, response);
1606 else
1607 ret = connector_status_connected;
1608
1609 /* May update encoder flag for like clock for SDVO TV, etc.*/
1610 if (ret == connector_status_connected) {
1611 sdvo_priv->is_tv = false;
1612 sdvo_priv->is_lvds = false;
1613 intel_encoder->needs_tv_clock = false;
1614
1615 if (response & SDVO_TV_MASK) {
1616 sdvo_priv->is_tv = true;
1617 intel_encoder->needs_tv_clock = true;
1618 }
1619 if (response & SDVO_LVDS_MASK)
1620 sdvo_priv->is_lvds = true;
1722 } 1621 }
1723 return intel_sdvo_hdmi_sink_detect(connector, response); 1622
1623 return ret;
1724} 1624}
1725 1625
1726static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) 1626static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1727{ 1627{
1728 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1628 struct drm_encoder *encoder = intel_attached_encoder(connector);
1629 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1729 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1630 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1730 int num_modes; 1631 int num_modes;
1731 1632
1732 /* set the bus switch and get the modes */ 1633 /* set the bus switch and get the modes */
1733 num_modes = intel_ddc_get_modes(intel_encoder); 1634 num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
1734 1635
1735 /* 1636 /*
1736 * Mac mini hack. On this device, the DVI-I connector shares one DDC 1637 * Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1740,17 +1641,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1740 */ 1641 */
1741 if (num_modes == 0 && 1642 if (num_modes == 0 &&
1742 sdvo_priv->analog_ddc_bus && 1643 sdvo_priv->analog_ddc_bus &&
1743 !intel_analog_is_connected(intel_encoder->base.dev)) { 1644 !intel_analog_is_connected(connector->dev)) {
1744 struct i2c_adapter *digital_ddc_bus;
1745
1746 /* Switch to the analog ddc bus and try that 1645 /* Switch to the analog ddc bus and try that
1747 */ 1646 */
1748 digital_ddc_bus = intel_encoder->ddc_bus; 1647 (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus);
1749 intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus;
1750
1751 (void) intel_ddc_get_modes(intel_encoder);
1752
1753 intel_encoder->ddc_bus = digital_ddc_bus;
1754 } 1648 }
1755} 1649}
1756 1650
@@ -1821,8 +1715,9 @@ struct drm_display_mode sdvo_tv_modes[] = {
1821 1715
1822static void intel_sdvo_get_tv_modes(struct drm_connector *connector) 1716static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1823{ 1717{
1824 struct intel_encoder *output = to_intel_encoder(connector); 1718 struct drm_encoder *encoder = intel_attached_encoder(connector);
1825 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1719 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1720 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1826 struct intel_sdvo_sdtv_resolution_request tv_res; 1721 struct intel_sdvo_sdtv_resolution_request tv_res;
1827 uint32_t reply = 0, format_map = 0; 1722 uint32_t reply = 0, format_map = 0;
1828 int i; 1723 int i;
@@ -1842,11 +1737,11 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1842 sizeof(format_map) ? sizeof(format_map) : 1737 sizeof(format_map) ? sizeof(format_map) :
1843 sizeof(struct intel_sdvo_sdtv_resolution_request)); 1738 sizeof(struct intel_sdvo_sdtv_resolution_request));
1844 1739
1845 intel_sdvo_set_target_output(output, sdvo_priv->controlled_output); 1740 intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output);
1846 1741
1847 intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, 1742 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
1848 &tv_res, sizeof(tv_res)); 1743 &tv_res, sizeof(tv_res));
1849 status = intel_sdvo_read_response(output, &reply, 3); 1744 status = intel_sdvo_read_response(intel_encoder, &reply, 3);
1850 if (status != SDVO_CMD_STATUS_SUCCESS) 1745 if (status != SDVO_CMD_STATUS_SUCCESS)
1851 return; 1746 return;
1852 1747
@@ -1863,7 +1758,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1863 1758
1864static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) 1759static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1865{ 1760{
1866 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1761 struct drm_encoder *encoder = intel_attached_encoder(connector);
1762 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1867 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1763 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1868 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1764 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1869 struct drm_display_mode *newmode; 1765 struct drm_display_mode *newmode;
@@ -1873,7 +1769,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1873 * Assume that the preferred modes are 1769 * Assume that the preferred modes are
1874 * arranged in priority order. 1770 * arranged in priority order.
1875 */ 1771 */
1876 intel_ddc_get_modes(intel_encoder); 1772 intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
1877 if (list_empty(&connector->probed_modes) == false) 1773 if (list_empty(&connector->probed_modes) == false)
1878 goto end; 1774 goto end;
1879 1775
@@ -1902,12 +1798,12 @@ end:
1902 1798
1903static int intel_sdvo_get_modes(struct drm_connector *connector) 1799static int intel_sdvo_get_modes(struct drm_connector *connector)
1904{ 1800{
1905 struct intel_encoder *output = to_intel_encoder(connector); 1801 struct intel_connector *intel_connector = to_intel_connector(connector);
1906 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1802 struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
1907 1803
1908 if (sdvo_priv->is_tv) 1804 if (IS_TV(sdvo_connector))
1909 intel_sdvo_get_tv_modes(connector); 1805 intel_sdvo_get_tv_modes(connector);
1910 else if (sdvo_priv->is_lvds == true) 1806 else if (IS_LVDS(sdvo_connector))
1911 intel_sdvo_get_lvds_modes(connector); 1807 intel_sdvo_get_lvds_modes(connector);
1912 else 1808 else
1913 intel_sdvo_get_ddc_modes(connector); 1809 intel_sdvo_get_ddc_modes(connector);
@@ -1920,11 +1816,11 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
1920static 1816static
1921void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) 1817void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1922{ 1818{
1923 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1819 struct intel_connector *intel_connector = to_intel_connector(connector);
1924 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1820 struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
1925 struct drm_device *dev = connector->dev; 1821 struct drm_device *dev = connector->dev;
1926 1822
1927 if (sdvo_priv->is_tv) { 1823 if (IS_TV(sdvo_priv)) {
1928 if (sdvo_priv->left_property) 1824 if (sdvo_priv->left_property)
1929 drm_property_destroy(dev, sdvo_priv->left_property); 1825 drm_property_destroy(dev, sdvo_priv->left_property);
1930 if (sdvo_priv->right_property) 1826 if (sdvo_priv->right_property)
@@ -1937,8 +1833,6 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1937 drm_property_destroy(dev, sdvo_priv->hpos_property); 1833 drm_property_destroy(dev, sdvo_priv->hpos_property);
1938 if (sdvo_priv->vpos_property) 1834 if (sdvo_priv->vpos_property)
1939 drm_property_destroy(dev, sdvo_priv->vpos_property); 1835 drm_property_destroy(dev, sdvo_priv->vpos_property);
1940 }
1941 if (sdvo_priv->is_tv) {
1942 if (sdvo_priv->saturation_property) 1836 if (sdvo_priv->saturation_property)
1943 drm_property_destroy(dev, 1837 drm_property_destroy(dev,
1944 sdvo_priv->saturation_property); 1838 sdvo_priv->saturation_property);
@@ -1948,7 +1842,7 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1948 if (sdvo_priv->hue_property) 1842 if (sdvo_priv->hue_property)
1949 drm_property_destroy(dev, sdvo_priv->hue_property); 1843 drm_property_destroy(dev, sdvo_priv->hue_property);
1950 } 1844 }
1951 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { 1845 if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
1952 if (sdvo_priv->brightness_property) 1846 if (sdvo_priv->brightness_property)
1953 drm_property_destroy(dev, 1847 drm_property_destroy(dev,
1954 sdvo_priv->brightness_property); 1848 sdvo_priv->brightness_property);
@@ -1958,31 +1852,17 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1958 1852
1959static void intel_sdvo_destroy(struct drm_connector *connector) 1853static void intel_sdvo_destroy(struct drm_connector *connector)
1960{ 1854{
1961 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1855 struct intel_connector *intel_connector = to_intel_connector(connector);
1962 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1856 struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
1963 1857
1964 if (intel_encoder->i2c_bus) 1858 if (sdvo_connector->tv_format_property)
1965 intel_i2c_destroy(intel_encoder->i2c_bus);
1966 if (intel_encoder->ddc_bus)
1967 intel_i2c_destroy(intel_encoder->ddc_bus);
1968 if (sdvo_priv->analog_ddc_bus)
1969 intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
1970
1971 if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
1972 drm_mode_destroy(connector->dev,
1973 sdvo_priv->sdvo_lvds_fixed_mode);
1974
1975 if (sdvo_priv->tv_format_property)
1976 drm_property_destroy(connector->dev, 1859 drm_property_destroy(connector->dev,
1977 sdvo_priv->tv_format_property); 1860 sdvo_connector->tv_format_property);
1978
1979 if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
1980 intel_sdvo_destroy_enhance_property(connector);
1981 1861
1862 intel_sdvo_destroy_enhance_property(connector);
1982 drm_sysfs_connector_remove(connector); 1863 drm_sysfs_connector_remove(connector);
1983 drm_connector_cleanup(connector); 1864 drm_connector_cleanup(connector);
1984 1865 kfree(connector);
1985 kfree(intel_encoder);
1986} 1866}
1987 1867
1988static int 1868static int
@@ -1990,9 +1870,11 @@ intel_sdvo_set_property(struct drm_connector *connector,
1990 struct drm_property *property, 1870 struct drm_property *property,
1991 uint64_t val) 1871 uint64_t val)
1992{ 1872{
1993 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1873 struct drm_encoder *encoder = intel_attached_encoder(connector);
1874 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1994 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1875 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1995 struct drm_encoder *encoder = &intel_encoder->enc; 1876 struct intel_connector *intel_connector = to_intel_connector(connector);
1877 struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
1996 struct drm_crtc *crtc = encoder->crtc; 1878 struct drm_crtc *crtc = encoder->crtc;
1997 int ret = 0; 1879 int ret = 0;
1998 bool changed = false; 1880 bool changed = false;
@@ -2003,101 +1885,101 @@ intel_sdvo_set_property(struct drm_connector *connector,
2003 if (ret < 0) 1885 if (ret < 0)
2004 goto out; 1886 goto out;
2005 1887
2006 if (property == sdvo_priv->tv_format_property) { 1888 if (property == sdvo_connector->tv_format_property) {
2007 if (val >= TV_FORMAT_NUM) { 1889 if (val >= TV_FORMAT_NUM) {
2008 ret = -EINVAL; 1890 ret = -EINVAL;
2009 goto out; 1891 goto out;
2010 } 1892 }
2011 if (sdvo_priv->tv_format_name == 1893 if (sdvo_priv->tv_format_name ==
2012 sdvo_priv->tv_format_supported[val]) 1894 sdvo_connector->tv_format_supported[val])
2013 goto out; 1895 goto out;
2014 1896
2015 sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val]; 1897 sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val];
2016 changed = true; 1898 changed = true;
2017 } 1899 }
2018 1900
2019 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { 1901 if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) {
2020 cmd = 0; 1902 cmd = 0;
2021 temp_value = val; 1903 temp_value = val;
2022 if (sdvo_priv->left_property == property) { 1904 if (sdvo_connector->left_property == property) {
2023 drm_connector_property_set_value(connector, 1905 drm_connector_property_set_value(connector,
2024 sdvo_priv->right_property, val); 1906 sdvo_connector->right_property, val);
2025 if (sdvo_priv->left_margin == temp_value) 1907 if (sdvo_connector->left_margin == temp_value)
2026 goto out; 1908 goto out;
2027 1909
2028 sdvo_priv->left_margin = temp_value; 1910 sdvo_connector->left_margin = temp_value;
2029 sdvo_priv->right_margin = temp_value; 1911 sdvo_connector->right_margin = temp_value;
2030 temp_value = sdvo_priv->max_hscan - 1912 temp_value = sdvo_connector->max_hscan -
2031 sdvo_priv->left_margin; 1913 sdvo_connector->left_margin;
2032 cmd = SDVO_CMD_SET_OVERSCAN_H; 1914 cmd = SDVO_CMD_SET_OVERSCAN_H;
2033 } else if (sdvo_priv->right_property == property) { 1915 } else if (sdvo_connector->right_property == property) {
2034 drm_connector_property_set_value(connector, 1916 drm_connector_property_set_value(connector,
2035 sdvo_priv->left_property, val); 1917 sdvo_connector->left_property, val);
2036 if (sdvo_priv->right_margin == temp_value) 1918 if (sdvo_connector->right_margin == temp_value)
2037 goto out; 1919 goto out;
2038 1920
2039 sdvo_priv->left_margin = temp_value; 1921 sdvo_connector->left_margin = temp_value;
2040 sdvo_priv->right_margin = temp_value; 1922 sdvo_connector->right_margin = temp_value;
2041 temp_value = sdvo_priv->max_hscan - 1923 temp_value = sdvo_connector->max_hscan -
2042 sdvo_priv->left_margin; 1924 sdvo_connector->left_margin;
2043 cmd = SDVO_CMD_SET_OVERSCAN_H; 1925 cmd = SDVO_CMD_SET_OVERSCAN_H;
2044 } else if (sdvo_priv->top_property == property) { 1926 } else if (sdvo_connector->top_property == property) {
2045 drm_connector_property_set_value(connector, 1927 drm_connector_property_set_value(connector,
2046 sdvo_priv->bottom_property, val); 1928 sdvo_connector->bottom_property, val);
2047 if (sdvo_priv->top_margin == temp_value) 1929 if (sdvo_connector->top_margin == temp_value)
2048 goto out; 1930 goto out;
2049 1931
2050 sdvo_priv->top_margin = temp_value; 1932 sdvo_connector->top_margin = temp_value;
2051 sdvo_priv->bottom_margin = temp_value; 1933 sdvo_connector->bottom_margin = temp_value;
2052 temp_value = sdvo_priv->max_vscan - 1934 temp_value = sdvo_connector->max_vscan -
2053 sdvo_priv->top_margin; 1935 sdvo_connector->top_margin;
2054 cmd = SDVO_CMD_SET_OVERSCAN_V; 1936 cmd = SDVO_CMD_SET_OVERSCAN_V;
2055 } else if (sdvo_priv->bottom_property == property) { 1937 } else if (sdvo_connector->bottom_property == property) {
2056 drm_connector_property_set_value(connector, 1938 drm_connector_property_set_value(connector,
2057 sdvo_priv->top_property, val); 1939 sdvo_connector->top_property, val);
2058 if (sdvo_priv->bottom_margin == temp_value) 1940 if (sdvo_connector->bottom_margin == temp_value)
2059 goto out; 1941 goto out;
2060 sdvo_priv->top_margin = temp_value; 1942 sdvo_connector->top_margin = temp_value;
2061 sdvo_priv->bottom_margin = temp_value; 1943 sdvo_connector->bottom_margin = temp_value;
2062 temp_value = sdvo_priv->max_vscan - 1944 temp_value = sdvo_connector->max_vscan -
2063 sdvo_priv->top_margin; 1945 sdvo_connector->top_margin;
2064 cmd = SDVO_CMD_SET_OVERSCAN_V; 1946 cmd = SDVO_CMD_SET_OVERSCAN_V;
2065 } else if (sdvo_priv->hpos_property == property) { 1947 } else if (sdvo_connector->hpos_property == property) {
2066 if (sdvo_priv->cur_hpos == temp_value) 1948 if (sdvo_connector->cur_hpos == temp_value)
2067 goto out; 1949 goto out;
2068 1950
2069 cmd = SDVO_CMD_SET_POSITION_H; 1951 cmd = SDVO_CMD_SET_POSITION_H;
2070 sdvo_priv->cur_hpos = temp_value; 1952 sdvo_connector->cur_hpos = temp_value;
2071 } else if (sdvo_priv->vpos_property == property) { 1953 } else if (sdvo_connector->vpos_property == property) {
2072 if (sdvo_priv->cur_vpos == temp_value) 1954 if (sdvo_connector->cur_vpos == temp_value)
2073 goto out; 1955 goto out;
2074 1956
2075 cmd = SDVO_CMD_SET_POSITION_V; 1957 cmd = SDVO_CMD_SET_POSITION_V;
2076 sdvo_priv->cur_vpos = temp_value; 1958 sdvo_connector->cur_vpos = temp_value;
2077 } else if (sdvo_priv->saturation_property == property) { 1959 } else if (sdvo_connector->saturation_property == property) {
2078 if (sdvo_priv->cur_saturation == temp_value) 1960 if (sdvo_connector->cur_saturation == temp_value)
2079 goto out; 1961 goto out;
2080 1962
2081 cmd = SDVO_CMD_SET_SATURATION; 1963 cmd = SDVO_CMD_SET_SATURATION;
2082 sdvo_priv->cur_saturation = temp_value; 1964 sdvo_connector->cur_saturation = temp_value;
2083 } else if (sdvo_priv->contrast_property == property) { 1965 } else if (sdvo_connector->contrast_property == property) {
2084 if (sdvo_priv->cur_contrast == temp_value) 1966 if (sdvo_connector->cur_contrast == temp_value)
2085 goto out; 1967 goto out;
2086 1968
2087 cmd = SDVO_CMD_SET_CONTRAST; 1969 cmd = SDVO_CMD_SET_CONTRAST;
2088 sdvo_priv->cur_contrast = temp_value; 1970 sdvo_connector->cur_contrast = temp_value;
2089 } else if (sdvo_priv->hue_property == property) { 1971 } else if (sdvo_connector->hue_property == property) {
2090 if (sdvo_priv->cur_hue == temp_value) 1972 if (sdvo_connector->cur_hue == temp_value)
2091 goto out; 1973 goto out;
2092 1974
2093 cmd = SDVO_CMD_SET_HUE; 1975 cmd = SDVO_CMD_SET_HUE;
2094 sdvo_priv->cur_hue = temp_value; 1976 sdvo_connector->cur_hue = temp_value;
2095 } else if (sdvo_priv->brightness_property == property) { 1977 } else if (sdvo_connector->brightness_property == property) {
2096 if (sdvo_priv->cur_brightness == temp_value) 1978 if (sdvo_connector->cur_brightness == temp_value)
2097 goto out; 1979 goto out;
2098 1980
2099 cmd = SDVO_CMD_SET_BRIGHTNESS; 1981 cmd = SDVO_CMD_SET_BRIGHTNESS;
2100 sdvo_priv->cur_brightness = temp_value; 1982 sdvo_connector->cur_brightness = temp_value;
2101 } 1983 }
2102 if (cmd) { 1984 if (cmd) {
2103 intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2); 1985 intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
@@ -2127,8 +2009,6 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
2127 2009
2128static const struct drm_connector_funcs intel_sdvo_connector_funcs = { 2010static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
2129 .dpms = drm_helper_connector_dpms, 2011 .dpms = drm_helper_connector_dpms,
2130 .save = intel_sdvo_save,
2131 .restore = intel_sdvo_restore,
2132 .detect = intel_sdvo_detect, 2012 .detect = intel_sdvo_detect,
2133 .fill_modes = drm_helper_probe_single_connector_modes, 2013 .fill_modes = drm_helper_probe_single_connector_modes,
2134 .set_property = intel_sdvo_set_property, 2014 .set_property = intel_sdvo_set_property,
@@ -2138,12 +2018,27 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
2138static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { 2018static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
2139 .get_modes = intel_sdvo_get_modes, 2019 .get_modes = intel_sdvo_get_modes,
2140 .mode_valid = intel_sdvo_mode_valid, 2020 .mode_valid = intel_sdvo_mode_valid,
2141 .best_encoder = intel_best_encoder, 2021 .best_encoder = intel_attached_encoder,
2142}; 2022};
2143 2023
2144static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) 2024static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
2145{ 2025{
2026 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
2027 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2028
2029 if (intel_encoder->i2c_bus)
2030 intel_i2c_destroy(intel_encoder->i2c_bus);
2031 if (intel_encoder->ddc_bus)
2032 intel_i2c_destroy(intel_encoder->ddc_bus);
2033 if (sdvo_priv->analog_ddc_bus)
2034 intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
2035
2036 if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
2037 drm_mode_destroy(encoder->dev,
2038 sdvo_priv->sdvo_lvds_fixed_mode);
2039
2146 drm_encoder_cleanup(encoder); 2040 drm_encoder_cleanup(encoder);
2041 kfree(intel_encoder);
2147} 2042}
2148 2043
2149static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { 2044static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
@@ -2196,12 +2091,15 @@ intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
2196} 2091}
2197 2092
2198static bool 2093static bool
2199intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output) 2094intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device)
2200{ 2095{
2201 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 2096 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
2202 uint8_t status; 2097 uint8_t status;
2203 2098
2204 intel_sdvo_set_target_output(output, sdvo_priv->controlled_output); 2099 if (device == 0)
2100 intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0);
2101 else
2102 intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1);
2205 2103
2206 intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0); 2104 intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
2207 status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1); 2105 status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
@@ -2214,15 +2112,13 @@ static struct intel_encoder *
2214intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan) 2112intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
2215{ 2113{
2216 struct drm_device *dev = chan->drm_dev; 2114 struct drm_device *dev = chan->drm_dev;
2217 struct drm_connector *connector; 2115 struct drm_encoder *encoder;
2218 struct intel_encoder *intel_encoder = NULL; 2116 struct intel_encoder *intel_encoder = NULL;
2219 2117
2220 list_for_each_entry(connector, 2118 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2221 &dev->mode_config.connector_list, head) { 2119 intel_encoder = enc_to_intel_encoder(encoder);
2222 if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) { 2120 if (intel_encoder->ddc_bus == &chan->adapter)
2223 intel_encoder = to_intel_encoder(connector);
2224 break; 2121 break;
2225 }
2226 } 2122 }
2227 return intel_encoder; 2123 return intel_encoder;
2228} 2124}
@@ -2259,7 +2155,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
2259 struct drm_i915_private *dev_priv = dev->dev_private; 2155 struct drm_i915_private *dev_priv = dev->dev_private;
2260 struct sdvo_device_mapping *my_mapping, *other_mapping; 2156 struct sdvo_device_mapping *my_mapping, *other_mapping;
2261 2157
2262 if (sdvo_reg == SDVOB) { 2158 if (IS_SDVOB(sdvo_reg)) {
2263 my_mapping = &dev_priv->sdvo_mappings[0]; 2159 my_mapping = &dev_priv->sdvo_mappings[0];
2264 other_mapping = &dev_priv->sdvo_mappings[1]; 2160 other_mapping = &dev_priv->sdvo_mappings[1];
2265 } else { 2161 } else {
@@ -2284,120 +2180,235 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
2284 /* No SDVO device info is found for another DVO port, 2180 /* No SDVO device info is found for another DVO port,
2285 * so use mapping assumption we had before BIOS parsing. 2181 * so use mapping assumption we had before BIOS parsing.
2286 */ 2182 */
2287 if (sdvo_reg == SDVOB) 2183 if (IS_SDVOB(sdvo_reg))
2288 return 0x70; 2184 return 0x70;
2289 else 2185 else
2290 return 0x72; 2186 return 0x72;
2291} 2187}
2292 2188
2293static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id) 2189static bool
2190intel_sdvo_connector_alloc (struct intel_connector **ret)
2294{ 2191{
2295 DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident); 2192 struct intel_connector *intel_connector;
2296 return 1; 2193 struct intel_sdvo_connector *sdvo_connector;
2194
2195 *ret = kzalloc(sizeof(*intel_connector) +
2196 sizeof(*sdvo_connector), GFP_KERNEL);
2197 if (!*ret)
2198 return false;
2199
2200 intel_connector = *ret;
2201 sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1);
2202 intel_connector->dev_priv = sdvo_connector;
2203
2204 return true;
2297} 2205}
2298 2206
2299static struct dmi_system_id intel_sdvo_bad_tv[] = { 2207static void
2300 { 2208intel_sdvo_connector_create (struct drm_encoder *encoder,
2301 .callback = intel_sdvo_bad_tv_callback, 2209 struct drm_connector *connector)
2302 .ident = "IntelG45/ICH10R/DME1737", 2210{
2303 .matches = { 2211 drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
2304 DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"), 2212 connector->connector_type);
2305 DMI_MATCH(DMI_PRODUCT_NAME, "4800784"),
2306 },
2307 },
2308 2213
2309 { } /* terminating entry */ 2214 drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
2310}; 2215
2216 connector->interlace_allowed = 0;
2217 connector->doublescan_allowed = 0;
2218 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
2219
2220 drm_mode_connector_attach_encoder(connector, encoder);
2221 drm_sysfs_connector_add(connector);
2222}
2311 2223
2312static bool 2224static bool
2313intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) 2225intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device)
2314{ 2226{
2315 struct drm_connector *connector = &intel_encoder->base;
2316 struct drm_encoder *encoder = &intel_encoder->enc; 2227 struct drm_encoder *encoder = &intel_encoder->enc;
2317 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 2228 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2318 bool ret = true, registered = false; 2229 struct drm_connector *connector;
2230 struct intel_connector *intel_connector;
2231 struct intel_sdvo_connector *sdvo_connector;
2232
2233 if (!intel_sdvo_connector_alloc(&intel_connector))
2234 return false;
2235
2236 sdvo_connector = intel_connector->dev_priv;
2237
2238 if (device == 0) {
2239 sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0;
2240 sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
2241 } else if (device == 1) {
2242 sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1;
2243 sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
2244 }
2245
2246 connector = &intel_connector->base;
2247 encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
2248 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
2249
2250 if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode)
2251 && intel_sdvo_get_digital_encoding_mode(intel_encoder, device)
2252 && sdvo_priv->is_hdmi) {
2253 /* enable hdmi encoding mode if supported */
2254 intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
2255 intel_sdvo_set_colorimetry(intel_encoder,
2256 SDVO_COLORIMETRY_RGB256);
2257 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2258 }
2259 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2260 (1 << INTEL_ANALOG_CLONE_BIT);
2261
2262 intel_sdvo_connector_create(encoder, connector);
2263
2264 return true;
2265}
2266
2267static bool
2268intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type)
2269{
2270 struct drm_encoder *encoder = &intel_encoder->enc;
2271 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2272 struct drm_connector *connector;
2273 struct intel_connector *intel_connector;
2274 struct intel_sdvo_connector *sdvo_connector;
2275
2276 if (!intel_sdvo_connector_alloc(&intel_connector))
2277 return false;
2278
2279 connector = &intel_connector->base;
2280 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2281 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2282 sdvo_connector = intel_connector->dev_priv;
2283
2284 sdvo_priv->controlled_output |= type;
2285 sdvo_connector->output_flag = type;
2286
2287 sdvo_priv->is_tv = true;
2288 intel_encoder->needs_tv_clock = true;
2289 intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2290
2291 intel_sdvo_connector_create(encoder, connector);
2292
2293 intel_sdvo_tv_create_property(connector, type);
2294
2295 intel_sdvo_create_enhance_property(connector);
2296
2297 return true;
2298}
2299
2300static bool
2301intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device)
2302{
2303 struct drm_encoder *encoder = &intel_encoder->enc;
2304 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2305 struct drm_connector *connector;
2306 struct intel_connector *intel_connector;
2307 struct intel_sdvo_connector *sdvo_connector;
2308
2309 if (!intel_sdvo_connector_alloc(&intel_connector))
2310 return false;
2311
2312 connector = &intel_connector->base;
2313 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2314 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2315 sdvo_connector = intel_connector->dev_priv;
2316
2317 if (device == 0) {
2318 sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0;
2319 sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
2320 } else if (device == 1) {
2321 sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1;
2322 sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
2323 }
2324
2325 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2326 (1 << INTEL_ANALOG_CLONE_BIT);
2327
2328 intel_sdvo_connector_create(encoder, connector);
2329 return true;
2330}
2331
2332static bool
2333intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device)
2334{
2335 struct drm_encoder *encoder = &intel_encoder->enc;
2336 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2337 struct drm_connector *connector;
2338 struct intel_connector *intel_connector;
2339 struct intel_sdvo_connector *sdvo_connector;
2340
2341 if (!intel_sdvo_connector_alloc(&intel_connector))
2342 return false;
2343
2344 connector = &intel_connector->base;
2345 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
2346 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
2347 sdvo_connector = intel_connector->dev_priv;
2348
2349 sdvo_priv->is_lvds = true;
2350
2351 if (device == 0) {
2352 sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0;
2353 sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
2354 } else if (device == 1) {
2355 sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1;
2356 sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
2357 }
2358
2359 intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
2360 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
2361
2362 intel_sdvo_connector_create(encoder, connector);
2363 intel_sdvo_create_enhance_property(connector);
2364 return true;
2365}
2366
2367static bool
2368intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
2369{
2370 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2319 2371
2320 sdvo_priv->is_tv = false; 2372 sdvo_priv->is_tv = false;
2321 intel_encoder->needs_tv_clock = false; 2373 intel_encoder->needs_tv_clock = false;
2322 sdvo_priv->is_lvds = false; 2374 sdvo_priv->is_lvds = false;
2323 2375
2324 if (device_is_registered(&connector->kdev)) { 2376 /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
2325 drm_sysfs_connector_remove(connector);
2326 registered = true;
2327 }
2328 2377
2329 if (flags & 2378 if (flags & SDVO_OUTPUT_TMDS0)
2330 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { 2379 if (!intel_sdvo_dvi_init(intel_encoder, 0))
2331 if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) 2380 return false;
2332 sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0; 2381
2333 else 2382 if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
2334 sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; 2383 if (!intel_sdvo_dvi_init(intel_encoder, 1))
2335 2384 return false;
2336 encoder->encoder_type = DRM_MODE_ENCODER_TMDS; 2385
2337 connector->connector_type = DRM_MODE_CONNECTOR_DVID; 2386 /* TV has no XXX1 function block */
2338 2387 if (flags & SDVO_OUTPUT_SVID0)
2339 if (intel_sdvo_get_supp_encode(intel_encoder, 2388 if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0))
2340 &sdvo_priv->encode) && 2389 return false;
2341 intel_sdvo_get_digital_encoding_mode(intel_encoder) && 2390
2342 sdvo_priv->is_hdmi) { 2391 if (flags & SDVO_OUTPUT_CVBS0)
2343 /* enable hdmi encoding mode if supported */ 2392 if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0))
2344 intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI); 2393 return false;
2345 intel_sdvo_set_colorimetry(intel_encoder,
2346 SDVO_COLORIMETRY_RGB256);
2347 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2348 intel_encoder->clone_mask =
2349 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2350 (1 << INTEL_ANALOG_CLONE_BIT);
2351 }
2352 } else if ((flags & SDVO_OUTPUT_SVID0) &&
2353 !dmi_check_system(intel_sdvo_bad_tv)) {
2354
2355 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
2356 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2357 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2358 sdvo_priv->is_tv = true;
2359 intel_encoder->needs_tv_clock = true;
2360 intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2361 } else if (flags & SDVO_OUTPUT_RGB0) {
2362
2363 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
2364 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2365 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2366 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2367 (1 << INTEL_ANALOG_CLONE_BIT);
2368 } else if (flags & SDVO_OUTPUT_RGB1) {
2369
2370 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
2371 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2372 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2373 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2374 (1 << INTEL_ANALOG_CLONE_BIT);
2375 } else if (flags & SDVO_OUTPUT_CVBS0) {
2376
2377 sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
2378 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2379 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2380 sdvo_priv->is_tv = true;
2381 intel_encoder->needs_tv_clock = true;
2382 intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2383 } else if (flags & SDVO_OUTPUT_LVDS0) {
2384
2385 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
2386 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
2387 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
2388 sdvo_priv->is_lvds = true;
2389 intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
2390 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
2391 } else if (flags & SDVO_OUTPUT_LVDS1) {
2392
2393 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
2394 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
2395 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
2396 sdvo_priv->is_lvds = true;
2397 intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
2398 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
2399 } else {
2400 2394
2395 if (flags & SDVO_OUTPUT_RGB0)
2396 if (!intel_sdvo_analog_init(intel_encoder, 0))
2397 return false;
2398
2399 if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
2400 if (!intel_sdvo_analog_init(intel_encoder, 1))
2401 return false;
2402
2403 if (flags & SDVO_OUTPUT_LVDS0)
2404 if (!intel_sdvo_lvds_init(intel_encoder, 0))
2405 return false;
2406
2407 if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
2408 if (!intel_sdvo_lvds_init(intel_encoder, 1))
2409 return false;
2410
2411 if ((flags & SDVO_OUTPUT_MASK) == 0) {
2401 unsigned char bytes[2]; 2412 unsigned char bytes[2];
2402 2413
2403 sdvo_priv->controlled_output = 0; 2414 sdvo_priv->controlled_output = 0;
@@ -2405,28 +2416,25 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
2405 DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", 2416 DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
2406 SDVO_NAME(sdvo_priv), 2417 SDVO_NAME(sdvo_priv),
2407 bytes[0], bytes[1]); 2418 bytes[0], bytes[1]);
2408 ret = false; 2419 return false;
2409 } 2420 }
2410 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 2421 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
2411 2422
2412 if (ret && registered) 2423 return true;
2413 ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
2414
2415
2416 return ret;
2417
2418} 2424}
2419 2425
2420static void intel_sdvo_tv_create_property(struct drm_connector *connector) 2426static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type)
2421{ 2427{
2422 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 2428 struct drm_encoder *encoder = intel_attached_encoder(connector);
2429 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
2423 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 2430 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2431 struct intel_connector *intel_connector = to_intel_connector(connector);
2432 struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
2424 struct intel_sdvo_tv_format format; 2433 struct intel_sdvo_tv_format format;
2425 uint32_t format_map, i; 2434 uint32_t format_map, i;
2426 uint8_t status; 2435 uint8_t status;
2427 2436
2428 intel_sdvo_set_target_output(intel_encoder, 2437 intel_sdvo_set_target_output(intel_encoder, type);
2429 sdvo_priv->controlled_output);
2430 2438
2431 intel_sdvo_write_cmd(intel_encoder, 2439 intel_sdvo_write_cmd(intel_encoder,
2432 SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); 2440 SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
@@ -2441,35 +2449,37 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector)
2441 if (format_map == 0) 2449 if (format_map == 0)
2442 return; 2450 return;
2443 2451
2444 sdvo_priv->format_supported_num = 0; 2452 sdvo_connector->format_supported_num = 0;
2445 for (i = 0 ; i < TV_FORMAT_NUM; i++) 2453 for (i = 0 ; i < TV_FORMAT_NUM; i++)
2446 if (format_map & (1 << i)) { 2454 if (format_map & (1 << i)) {
2447 sdvo_priv->tv_format_supported 2455 sdvo_connector->tv_format_supported
2448 [sdvo_priv->format_supported_num++] = 2456 [sdvo_connector->format_supported_num++] =
2449 tv_format_names[i]; 2457 tv_format_names[i];
2450 } 2458 }
2451 2459
2452 2460
2453 sdvo_priv->tv_format_property = 2461 sdvo_connector->tv_format_property =
2454 drm_property_create( 2462 drm_property_create(
2455 connector->dev, DRM_MODE_PROP_ENUM, 2463 connector->dev, DRM_MODE_PROP_ENUM,
2456 "mode", sdvo_priv->format_supported_num); 2464 "mode", sdvo_connector->format_supported_num);
2457 2465
2458 for (i = 0; i < sdvo_priv->format_supported_num; i++) 2466 for (i = 0; i < sdvo_connector->format_supported_num; i++)
2459 drm_property_add_enum( 2467 drm_property_add_enum(
2460 sdvo_priv->tv_format_property, i, 2468 sdvo_connector->tv_format_property, i,
2461 i, sdvo_priv->tv_format_supported[i]); 2469 i, sdvo_connector->tv_format_supported[i]);
2462 2470
2463 sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[0]; 2471 sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0];
2464 drm_connector_attach_property( 2472 drm_connector_attach_property(
2465 connector, sdvo_priv->tv_format_property, 0); 2473 connector, sdvo_connector->tv_format_property, 0);
2466 2474
2467} 2475}
2468 2476
2469static void intel_sdvo_create_enhance_property(struct drm_connector *connector) 2477static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2470{ 2478{
2471 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 2479 struct drm_encoder *encoder = intel_attached_encoder(connector);
2472 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 2480 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
2481 struct intel_connector *intel_connector = to_intel_connector(connector);
2482 struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
2473 struct intel_sdvo_enhancements_reply sdvo_data; 2483 struct intel_sdvo_enhancements_reply sdvo_data;
2474 struct drm_device *dev = connector->dev; 2484 struct drm_device *dev = connector->dev;
2475 uint8_t status; 2485 uint8_t status;
@@ -2488,7 +2498,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2488 DRM_DEBUG_KMS("No enhancement is supported\n"); 2498 DRM_DEBUG_KMS("No enhancement is supported\n");
2489 return; 2499 return;
2490 } 2500 }
2491 if (sdvo_priv->is_tv) { 2501 if (IS_TV(sdvo_priv)) {
2492 /* when horizontal overscan is supported, Add the left/right 2502 /* when horizontal overscan is supported, Add the left/right
2493 * property 2503 * property
2494 */ 2504 */
@@ -2636,8 +2646,6 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2636 "default %d, current %d\n", 2646 "default %d, current %d\n",
2637 data_value[0], data_value[1], response); 2647 data_value[0], data_value[1], response);
2638 } 2648 }
2639 }
2640 if (sdvo_priv->is_tv) {
2641 if (sdvo_data.saturation) { 2649 if (sdvo_data.saturation) {
2642 intel_sdvo_write_cmd(intel_encoder, 2650 intel_sdvo_write_cmd(intel_encoder,
2643 SDVO_CMD_GET_MAX_SATURATION, NULL, 0); 2651 SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
@@ -2733,7 +2741,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2733 data_value[0], data_value[1], response); 2741 data_value[0], data_value[1], response);
2734 } 2742 }
2735 } 2743 }
2736 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { 2744 if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
2737 if (sdvo_data.brightness) { 2745 if (sdvo_data.brightness) {
2738 intel_sdvo_write_cmd(intel_encoder, 2746 intel_sdvo_write_cmd(intel_encoder,
2739 SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); 2747 SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
@@ -2773,12 +2781,11 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2773bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) 2781bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2774{ 2782{
2775 struct drm_i915_private *dev_priv = dev->dev_private; 2783 struct drm_i915_private *dev_priv = dev->dev_private;
2776 struct drm_connector *connector;
2777 struct intel_encoder *intel_encoder; 2784 struct intel_encoder *intel_encoder;
2778 struct intel_sdvo_priv *sdvo_priv; 2785 struct intel_sdvo_priv *sdvo_priv;
2779
2780 u8 ch[0x40]; 2786 u8 ch[0x40];
2781 int i; 2787 int i;
2788 u32 i2c_reg, ddc_reg, analog_ddc_reg;
2782 2789
2783 intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); 2790 intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
2784 if (!intel_encoder) { 2791 if (!intel_encoder) {
@@ -2791,11 +2798,21 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2791 intel_encoder->dev_priv = sdvo_priv; 2798 intel_encoder->dev_priv = sdvo_priv;
2792 intel_encoder->type = INTEL_OUTPUT_SDVO; 2799 intel_encoder->type = INTEL_OUTPUT_SDVO;
2793 2800
2801 if (HAS_PCH_SPLIT(dev)) {
2802 i2c_reg = PCH_GPIOE;
2803 ddc_reg = PCH_GPIOE;
2804 analog_ddc_reg = PCH_GPIOA;
2805 } else {
2806 i2c_reg = GPIOE;
2807 ddc_reg = GPIOE;
2808 analog_ddc_reg = GPIOA;
2809 }
2810
2794 /* setup the DDC bus. */ 2811 /* setup the DDC bus. */
2795 if (sdvo_reg == SDVOB) 2812 if (IS_SDVOB(sdvo_reg))
2796 intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); 2813 intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB");
2797 else 2814 else
2798 intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); 2815 intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC");
2799 2816
2800 if (!intel_encoder->i2c_bus) 2817 if (!intel_encoder->i2c_bus)
2801 goto err_inteloutput; 2818 goto err_inteloutput;
@@ -2809,20 +2826,20 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2809 for (i = 0; i < 0x40; i++) { 2826 for (i = 0; i < 0x40; i++) {
2810 if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) { 2827 if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
2811 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", 2828 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
2812 sdvo_reg == SDVOB ? 'B' : 'C'); 2829 IS_SDVOB(sdvo_reg) ? 'B' : 'C');
2813 goto err_i2c; 2830 goto err_i2c;
2814 } 2831 }
2815 } 2832 }
2816 2833
2817 /* setup the DDC bus. */ 2834 /* setup the DDC bus. */
2818 if (sdvo_reg == SDVOB) { 2835 if (IS_SDVOB(sdvo_reg)) {
2819 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); 2836 intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
2820 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2837 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
2821 "SDVOB/VGA DDC BUS"); 2838 "SDVOB/VGA DDC BUS");
2822 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; 2839 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
2823 } else { 2840 } else {
2824 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); 2841 intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
2825 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2842 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
2826 "SDVOC/VGA DDC BUS"); 2843 "SDVOC/VGA DDC BUS");
2827 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; 2844 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2828 } 2845 }
@@ -2833,40 +2850,20 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2833 /* Wrap with our custom algo which switches to DDC mode */ 2850 /* Wrap with our custom algo which switches to DDC mode */
2834 intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; 2851 intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
2835 2852
2853 /* encoder type will be decided later */
2854 drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0);
2855 drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
2856
2836 /* In default case sdvo lvds is false */ 2857 /* In default case sdvo lvds is false */
2837 intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps); 2858 intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
2838 2859
2839 if (intel_sdvo_output_setup(intel_encoder, 2860 if (intel_sdvo_output_setup(intel_encoder,
2840 sdvo_priv->caps.output_flags) != true) { 2861 sdvo_priv->caps.output_flags) != true) {
2841 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", 2862 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
2842 sdvo_reg == SDVOB ? 'B' : 'C'); 2863 IS_SDVOB(sdvo_reg) ? 'B' : 'C');
2843 goto err_i2c; 2864 goto err_i2c;
2844 } 2865 }
2845 2866
2846
2847 connector = &intel_encoder->base;
2848 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
2849 connector->connector_type);
2850
2851 drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
2852 connector->interlace_allowed = 0;
2853 connector->doublescan_allowed = 0;
2854 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
2855
2856 drm_encoder_init(dev, &intel_encoder->enc,
2857 &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type);
2858
2859 drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
2860
2861 drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
2862 if (sdvo_priv->is_tv)
2863 intel_sdvo_tv_create_property(connector);
2864
2865 if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
2866 intel_sdvo_create_enhance_property(connector);
2867
2868 drm_sysfs_connector_add(connector);
2869
2870 intel_sdvo_select_ddc_bus(sdvo_priv); 2867 intel_sdvo_select_ddc_bus(sdvo_priv);
2871 2868
2872 /* Set the input timing to the screen. Assume always input 0. */ 2869 /* Set the input timing to the screen. Assume always input 0. */
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d7d39b2327df..6d553c29d106 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -916,143 +916,6 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
916 } 916 }
917} 917}
918 918
919static void
920intel_tv_save(struct drm_connector *connector)
921{
922 struct drm_device *dev = connector->dev;
923 struct drm_i915_private *dev_priv = dev->dev_private;
924 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
925 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
926 int i;
927
928 tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
929 tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2);
930 tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3);
931 tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1);
932 tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2);
933 tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3);
934 tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4);
935 tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5);
936 tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6);
937 tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7);
938 tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1);
939 tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2);
940 tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3);
941
942 tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y);
943 tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2);
944 tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U);
945 tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2);
946 tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V);
947 tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2);
948 tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS);
949 tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL);
950 tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS);
951 tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE);
952 tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1);
953 tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2);
954 tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3);
955
956 for (i = 0; i < 60; i++)
957 tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2));
958 for (i = 0; i < 60; i++)
959 tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2));
960 for (i = 0; i < 43; i++)
961 tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2));
962 for (i = 0; i < 43; i++)
963 tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2));
964
965 tv_priv->save_TV_DAC = I915_READ(TV_DAC);
966 tv_priv->save_TV_CTL = I915_READ(TV_CTL);
967}
968
969static void
970intel_tv_restore(struct drm_connector *connector)
971{
972 struct drm_device *dev = connector->dev;
973 struct drm_i915_private *dev_priv = dev->dev_private;
974 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
975 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
976 struct drm_crtc *crtc = connector->encoder->crtc;
977 struct intel_crtc *intel_crtc;
978 int i;
979
980 /* FIXME: No CRTC? */
981 if (!crtc)
982 return;
983
984 intel_crtc = to_intel_crtc(crtc);
985 I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1);
986 I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2);
987 I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3);
988 I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1);
989 I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2);
990 I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3);
991 I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4);
992 I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5);
993 I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6);
994 I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7);
995 I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1);
996 I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2);
997 I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3);
998
999 I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y);
1000 I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2);
1001 I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U);
1002 I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2);
1003 I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V);
1004 I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2);
1005 I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS);
1006 I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL);
1007
1008 {
1009 int pipeconf_reg = (intel_crtc->pipe == 0) ?
1010 PIPEACONF : PIPEBCONF;
1011 int dspcntr_reg = (intel_crtc->plane == 0) ?
1012 DSPACNTR : DSPBCNTR;
1013 int pipeconf = I915_READ(pipeconf_reg);
1014 int dspcntr = I915_READ(dspcntr_reg);
1015 int dspbase_reg = (intel_crtc->plane == 0) ?
1016 DSPAADDR : DSPBADDR;
1017 /* Pipe must be off here */
1018 I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
1019 /* Flush the plane changes */
1020 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1021
1022 if (!IS_I9XX(dev)) {
1023 /* Wait for vblank for the disable to take effect */
1024 intel_wait_for_vblank(dev);
1025 }
1026
1027 I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
1028 /* Wait for vblank for the disable to take effect. */
1029 intel_wait_for_vblank(dev);
1030
1031 /* Filter ctl must be set before TV_WIN_SIZE */
1032 I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1);
1033 I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2);
1034 I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3);
1035 I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS);
1036 I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE);
1037 I915_WRITE(pipeconf_reg, pipeconf);
1038 I915_WRITE(dspcntr_reg, dspcntr);
1039 /* Flush the plane changes */
1040 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1041 }
1042
1043 for (i = 0; i < 60; i++)
1044 I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]);
1045 for (i = 0; i < 60; i++)
1046 I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]);
1047 for (i = 0; i < 43; i++)
1048 I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]);
1049 for (i = 0; i < 43; i++)
1050 I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]);
1051
1052 I915_WRITE(TV_DAC, tv_priv->save_TV_DAC);
1053 I915_WRITE(TV_CTL, tv_priv->save_TV_CTL);
1054}
1055
1056static const struct tv_mode * 919static const struct tv_mode *
1057intel_tv_mode_lookup (char *tv_format) 920intel_tv_mode_lookup (char *tv_format)
1058{ 921{
@@ -1078,7 +941,8 @@ intel_tv_mode_find (struct intel_encoder *intel_encoder)
1078static enum drm_mode_status 941static enum drm_mode_status
1079intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) 942intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
1080{ 943{
1081 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 944 struct drm_encoder *encoder = intel_attached_encoder(connector);
945 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); 946 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1083 947
1084 /* Ensure TV refresh is close to desired refresh */ 948 /* Ensure TV refresh is close to desired refresh */
@@ -1441,7 +1305,8 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder
1441 */ 1305 */
1442static void intel_tv_find_better_format(struct drm_connector *connector) 1306static void intel_tv_find_better_format(struct drm_connector *connector)
1443{ 1307{
1444 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1308 struct drm_encoder *encoder = intel_attached_encoder(connector);
1309 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1445 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; 1310 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1446 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); 1311 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1447 int i; 1312 int i;
@@ -1475,9 +1340,9 @@ intel_tv_detect(struct drm_connector *connector)
1475{ 1340{
1476 struct drm_crtc *crtc; 1341 struct drm_crtc *crtc;
1477 struct drm_display_mode mode; 1342 struct drm_display_mode mode;
1478 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1343 struct drm_encoder *encoder = intel_attached_encoder(connector);
1344 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1479 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; 1345 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1480 struct drm_encoder *encoder = &intel_encoder->enc;
1481 int dpms_mode; 1346 int dpms_mode;
1482 int type = tv_priv->type; 1347 int type = tv_priv->type;
1483 1348
@@ -1487,10 +1352,12 @@ intel_tv_detect(struct drm_connector *connector)
1487 if (encoder->crtc && encoder->crtc->enabled) { 1352 if (encoder->crtc && encoder->crtc->enabled) {
1488 type = intel_tv_detect_type(encoder->crtc, intel_encoder); 1353 type = intel_tv_detect_type(encoder->crtc, intel_encoder);
1489 } else { 1354 } else {
1490 crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode); 1355 crtc = intel_get_load_detect_pipe(intel_encoder, connector,
1356 &mode, &dpms_mode);
1491 if (crtc) { 1357 if (crtc) {
1492 type = intel_tv_detect_type(crtc, intel_encoder); 1358 type = intel_tv_detect_type(crtc, intel_encoder);
1493 intel_release_load_detect_pipe(intel_encoder, dpms_mode); 1359 intel_release_load_detect_pipe(intel_encoder, connector,
1360 dpms_mode);
1494 } else 1361 } else
1495 type = -1; 1362 type = -1;
1496 } 1363 }
@@ -1525,7 +1392,8 @@ static void
1525intel_tv_chose_preferred_modes(struct drm_connector *connector, 1392intel_tv_chose_preferred_modes(struct drm_connector *connector,
1526 struct drm_display_mode *mode_ptr) 1393 struct drm_display_mode *mode_ptr)
1527{ 1394{
1528 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1395 struct drm_encoder *encoder = intel_attached_encoder(connector);
1396 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1529 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); 1397 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1530 1398
1531 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) 1399 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
@@ -1550,7 +1418,8 @@ static int
1550intel_tv_get_modes(struct drm_connector *connector) 1418intel_tv_get_modes(struct drm_connector *connector)
1551{ 1419{
1552 struct drm_display_mode *mode_ptr; 1420 struct drm_display_mode *mode_ptr;
1553 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1421 struct drm_encoder *encoder = intel_attached_encoder(connector);
1422 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1554 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); 1423 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1555 int j, count = 0; 1424 int j, count = 0;
1556 u64 tmp; 1425 u64 tmp;
@@ -1604,11 +1473,9 @@ intel_tv_get_modes(struct drm_connector *connector)
1604static void 1473static void
1605intel_tv_destroy (struct drm_connector *connector) 1474intel_tv_destroy (struct drm_connector *connector)
1606{ 1475{
1607 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1608
1609 drm_sysfs_connector_remove(connector); 1476 drm_sysfs_connector_remove(connector);
1610 drm_connector_cleanup(connector); 1477 drm_connector_cleanup(connector);
1611 kfree(intel_encoder); 1478 kfree(connector);
1612} 1479}
1613 1480
1614 1481
@@ -1617,9 +1484,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1617 uint64_t val) 1484 uint64_t val)
1618{ 1485{
1619 struct drm_device *dev = connector->dev; 1486 struct drm_device *dev = connector->dev;
1620 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1487 struct drm_encoder *encoder = intel_attached_encoder(connector);
1488 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1621 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; 1489 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1622 struct drm_encoder *encoder = &intel_encoder->enc;
1623 struct drm_crtc *crtc = encoder->crtc; 1490 struct drm_crtc *crtc = encoder->crtc;
1624 int ret = 0; 1491 int ret = 0;
1625 bool changed = false; 1492 bool changed = false;
@@ -1676,8 +1543,6 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
1676 1543
1677static const struct drm_connector_funcs intel_tv_connector_funcs = { 1544static const struct drm_connector_funcs intel_tv_connector_funcs = {
1678 .dpms = drm_helper_connector_dpms, 1545 .dpms = drm_helper_connector_dpms,
1679 .save = intel_tv_save,
1680 .restore = intel_tv_restore,
1681 .detect = intel_tv_detect, 1546 .detect = intel_tv_detect,
1682 .destroy = intel_tv_destroy, 1547 .destroy = intel_tv_destroy,
1683 .set_property = intel_tv_set_property, 1548 .set_property = intel_tv_set_property,
@@ -1687,12 +1552,15 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
1687static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { 1552static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
1688 .mode_valid = intel_tv_mode_valid, 1553 .mode_valid = intel_tv_mode_valid,
1689 .get_modes = intel_tv_get_modes, 1554 .get_modes = intel_tv_get_modes,
1690 .best_encoder = intel_best_encoder, 1555 .best_encoder = intel_attached_encoder,
1691}; 1556};
1692 1557
1693static void intel_tv_enc_destroy(struct drm_encoder *encoder) 1558static void intel_tv_enc_destroy(struct drm_encoder *encoder)
1694{ 1559{
1560 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1561
1695 drm_encoder_cleanup(encoder); 1562 drm_encoder_cleanup(encoder);
1563 kfree(intel_encoder);
1696} 1564}
1697 1565
1698static const struct drm_encoder_funcs intel_tv_enc_funcs = { 1566static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1741,6 +1609,7 @@ intel_tv_init(struct drm_device *dev)
1741 struct drm_i915_private *dev_priv = dev->dev_private; 1609 struct drm_i915_private *dev_priv = dev->dev_private;
1742 struct drm_connector *connector; 1610 struct drm_connector *connector;
1743 struct intel_encoder *intel_encoder; 1611 struct intel_encoder *intel_encoder;
1612 struct intel_connector *intel_connector;
1744 struct intel_tv_priv *tv_priv; 1613 struct intel_tv_priv *tv_priv;
1745 u32 tv_dac_on, tv_dac_off, save_tv_dac; 1614 u32 tv_dac_on, tv_dac_off, save_tv_dac;
1746 char **tv_format_names; 1615 char **tv_format_names;
@@ -1786,7 +1655,13 @@ intel_tv_init(struct drm_device *dev)
1786 return; 1655 return;
1787 } 1656 }
1788 1657
1789 connector = &intel_encoder->base; 1658 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1659 if (!intel_connector) {
1660 kfree(intel_encoder);
1661 return;
1662 }
1663
1664 connector = &intel_connector->base;
1790 1665
1791 drm_connector_init(dev, connector, &intel_tv_connector_funcs, 1666 drm_connector_init(dev, connector, &intel_tv_connector_funcs,
1792 DRM_MODE_CONNECTOR_SVIDEO); 1667 DRM_MODE_CONNECTOR_SVIDEO);
@@ -1794,7 +1669,7 @@ intel_tv_init(struct drm_device *dev)
1794 drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs, 1669 drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
1795 DRM_MODE_ENCODER_TVDAC); 1670 DRM_MODE_ENCODER_TVDAC);
1796 1671
1797 drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); 1672 drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
1798 tv_priv = (struct intel_tv_priv *)(intel_encoder + 1); 1673 tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
1799 intel_encoder->type = INTEL_OUTPUT_TVOUT; 1674 intel_encoder->type = INTEL_OUTPUT_TVOUT;
1800 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 1675 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 957d17629840..fb164efada3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -225,7 +225,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
225 225
226 nouveau_bo_placement_set(nvbo, memtype, 0); 226 nouveau_bo_placement_set(nvbo, memtype, 0);
227 227
228 ret = ttm_bo_validate(bo, &nvbo->placement, false, false); 228 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
229 if (ret == 0) { 229 if (ret == 0) {
230 switch (bo->mem.mem_type) { 230 switch (bo->mem.mem_type) {
231 case TTM_PL_VRAM: 231 case TTM_PL_VRAM:
@@ -261,7 +261,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
261 261
262 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); 262 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
263 263
264 ret = ttm_bo_validate(bo, &nvbo->placement, false, false); 264 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
265 if (ret == 0) { 265 if (ret == 0) {
266 switch (bo->mem.mem_type) { 266 switch (bo->mem.mem_type) {
267 case TTM_PL_VRAM: 267 case TTM_PL_VRAM:
@@ -391,25 +391,16 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
391 break; 391 break;
392 case TTM_PL_VRAM: 392 case TTM_PL_VRAM:
393 man->flags = TTM_MEMTYPE_FLAG_FIXED | 393 man->flags = TTM_MEMTYPE_FLAG_FIXED |
394 TTM_MEMTYPE_FLAG_MAPPABLE | 394 TTM_MEMTYPE_FLAG_MAPPABLE;
395 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
396 man->available_caching = TTM_PL_FLAG_UNCACHED | 395 man->available_caching = TTM_PL_FLAG_UNCACHED |
397 TTM_PL_FLAG_WC; 396 TTM_PL_FLAG_WC;
398 man->default_caching = TTM_PL_FLAG_WC; 397 man->default_caching = TTM_PL_FLAG_WC;
399
400 man->io_addr = NULL;
401 man->io_offset = drm_get_resource_start(dev, 1);
402 man->io_size = drm_get_resource_len(dev, 1);
403 if (man->io_size > dev_priv->vram_size)
404 man->io_size = dev_priv->vram_size;
405
406 man->gpu_offset = dev_priv->vm_vram_base; 398 man->gpu_offset = dev_priv->vm_vram_base;
407 break; 399 break;
408 case TTM_PL_TT: 400 case TTM_PL_TT:
409 switch (dev_priv->gart_info.type) { 401 switch (dev_priv->gart_info.type) {
410 case NOUVEAU_GART_AGP: 402 case NOUVEAU_GART_AGP:
411 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | 403 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
412 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
413 man->available_caching = TTM_PL_FLAG_UNCACHED; 404 man->available_caching = TTM_PL_FLAG_UNCACHED;
414 man->default_caching = TTM_PL_FLAG_UNCACHED; 405 man->default_caching = TTM_PL_FLAG_UNCACHED;
415 break; 406 break;
@@ -424,10 +415,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
424 dev_priv->gart_info.type); 415 dev_priv->gart_info.type);
425 return -EINVAL; 416 return -EINVAL;
426 } 417 }
427
428 man->io_offset = dev_priv->gart_info.aper_base;
429 man->io_size = dev_priv->gart_info.aper_size;
430 man->io_addr = NULL;
431 man->gpu_offset = dev_priv->vm_gart_base; 418 man->gpu_offset = dev_priv->vm_gart_base;
432 break; 419 break;
433 default: 420 default:
@@ -462,7 +449,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
462 449
463static int 450static int
464nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, 451nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
465 struct nouveau_bo *nvbo, bool evict, bool no_wait, 452 struct nouveau_bo *nvbo, bool evict,
453 bool no_wait_reserve, bool no_wait_gpu,
466 struct ttm_mem_reg *new_mem) 454 struct ttm_mem_reg *new_mem)
467{ 455{
468 struct nouveau_fence *fence = NULL; 456 struct nouveau_fence *fence = NULL;
@@ -473,7 +461,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
473 return ret; 461 return ret;
474 462
475 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, 463 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
476 evict, no_wait, new_mem); 464 evict, no_wait_reserve, no_wait_gpu, new_mem);
477 if (nvbo->channel && nvbo->channel != chan) 465 if (nvbo->channel && nvbo->channel != chan)
478 ret = nouveau_fence_wait(fence, NULL, false, false); 466 ret = nouveau_fence_wait(fence, NULL, false, false);
479 nouveau_fence_unref((void *)&fence); 467 nouveau_fence_unref((void *)&fence);
@@ -497,7 +485,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
497 485
498static int 486static int
499nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, 487nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
500 int no_wait, struct ttm_mem_reg *new_mem) 488 bool no_wait_reserve, bool no_wait_gpu,
489 struct ttm_mem_reg *new_mem)
501{ 490{
502 struct nouveau_bo *nvbo = nouveau_bo(bo); 491 struct nouveau_bo *nvbo = nouveau_bo(bo);
503 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 492 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
@@ -575,12 +564,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
575 dst_offset += (PAGE_SIZE * line_count); 564 dst_offset += (PAGE_SIZE * line_count);
576 } 565 }
577 566
578 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem); 567 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
579} 568}
580 569
581static int 570static int
582nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, 571nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
583 bool no_wait, struct ttm_mem_reg *new_mem) 572 bool no_wait_reserve, bool no_wait_gpu,
573 struct ttm_mem_reg *new_mem)
584{ 574{
585 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 575 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
586 struct ttm_placement placement; 576 struct ttm_placement placement;
@@ -593,7 +583,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
593 583
594 tmp_mem = *new_mem; 584 tmp_mem = *new_mem;
595 tmp_mem.mm_node = NULL; 585 tmp_mem.mm_node = NULL;
596 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); 586 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
597 if (ret) 587 if (ret)
598 return ret; 588 return ret;
599 589
@@ -601,11 +591,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
601 if (ret) 591 if (ret)
602 goto out; 592 goto out;
603 593
604 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem); 594 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
605 if (ret) 595 if (ret)
606 goto out; 596 goto out;
607 597
608 ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem); 598 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
609out: 599out:
610 if (tmp_mem.mm_node) { 600 if (tmp_mem.mm_node) {
611 spin_lock(&bo->bdev->glob->lru_lock); 601 spin_lock(&bo->bdev->glob->lru_lock);
@@ -618,7 +608,8 @@ out:
618 608
619static int 609static int
620nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, 610nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
621 bool no_wait, struct ttm_mem_reg *new_mem) 611 bool no_wait_reserve, bool no_wait_gpu,
612 struct ttm_mem_reg *new_mem)
622{ 613{
623 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 614 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
624 struct ttm_placement placement; 615 struct ttm_placement placement;
@@ -631,15 +622,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
631 622
632 tmp_mem = *new_mem; 623 tmp_mem = *new_mem;
633 tmp_mem.mm_node = NULL; 624 tmp_mem.mm_node = NULL;
634 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); 625 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
635 if (ret) 626 if (ret)
636 return ret; 627 return ret;
637 628
638 ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem); 629 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
639 if (ret) 630 if (ret)
640 goto out; 631 goto out;
641 632
642 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); 633 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
643 if (ret) 634 if (ret)
644 goto out; 635 goto out;
645 636
@@ -706,7 +697,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
706 697
707static int 698static int
708nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, 699nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
709 bool no_wait, struct ttm_mem_reg *new_mem) 700 bool no_wait_reserve, bool no_wait_gpu,
701 struct ttm_mem_reg *new_mem)
710{ 702{
711 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 703 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
712 struct nouveau_bo *nvbo = nouveau_bo(bo); 704 struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -721,7 +713,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
721 /* Software copy if the card isn't up and running yet. */ 713 /* Software copy if the card isn't up and running yet. */
722 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || 714 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
723 !dev_priv->channel) { 715 !dev_priv->channel) {
724 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 716 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
725 goto out; 717 goto out;
726 } 718 }
727 719
@@ -735,17 +727,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
735 727
736 /* Hardware assisted copy. */ 728 /* Hardware assisted copy. */
737 if (new_mem->mem_type == TTM_PL_SYSTEM) 729 if (new_mem->mem_type == TTM_PL_SYSTEM)
738 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem); 730 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
739 else if (old_mem->mem_type == TTM_PL_SYSTEM) 731 else if (old_mem->mem_type == TTM_PL_SYSTEM)
740 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem); 732 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
741 else 733 else
742 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); 734 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
743 735
744 if (!ret) 736 if (!ret)
745 goto out; 737 goto out;
746 738
747 /* Fallback to software copy. */ 739 /* Fallback to software copy. */
748 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 740 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
749 741
750out: 742out:
751 if (ret) 743 if (ret)
@@ -762,6 +754,55 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
762 return 0; 754 return 0;
763} 755}
764 756
757static int
758nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
759{
760 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
761 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
762 struct drm_device *dev = dev_priv->dev;
763
764 mem->bus.addr = NULL;
765 mem->bus.offset = 0;
766 mem->bus.size = mem->num_pages << PAGE_SHIFT;
767 mem->bus.base = 0;
768 mem->bus.is_iomem = false;
769 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
770 return -EINVAL;
771 switch (mem->mem_type) {
772 case TTM_PL_SYSTEM:
773 /* System memory */
774 return 0;
775 case TTM_PL_TT:
776#if __OS_HAS_AGP
777 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
778 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
779 mem->bus.base = dev_priv->gart_info.aper_base;
780 mem->bus.is_iomem = true;
781 }
782#endif
783 break;
784 case TTM_PL_VRAM:
785 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
786 mem->bus.base = drm_get_resource_start(dev, 1);
787 mem->bus.is_iomem = true;
788 break;
789 default:
790 return -EINVAL;
791 }
792 return 0;
793}
794
795static void
796nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
797{
798}
799
800static int
801nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
802{
803 return 0;
804}
805
765struct ttm_bo_driver nouveau_bo_driver = { 806struct ttm_bo_driver nouveau_bo_driver = {
766 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, 807 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
767 .invalidate_caches = nouveau_bo_invalidate_caches, 808 .invalidate_caches = nouveau_bo_invalidate_caches,
@@ -774,5 +815,8 @@ struct ttm_bo_driver nouveau_bo_driver = {
774 .sync_obj_flush = nouveau_fence_flush, 815 .sync_obj_flush = nouveau_fence_flush,
775 .sync_obj_unref = nouveau_fence_unref, 816 .sync_obj_unref = nouveau_fence_unref,
776 .sync_obj_ref = nouveau_fence_ref, 817 .sync_obj_ref = nouveau_fence_ref,
818 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
819 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
820 .io_mem_free = &nouveau_ttm_io_mem_free,
777}; 821};
778 822
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index a251886a0ce6..7933de4aff2e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -33,6 +33,8 @@
33#include "drmP.h" 33#include "drmP.h"
34#include "nouveau_drv.h" 34#include "nouveau_drv.h"
35 35
36#include <ttm/ttm_page_alloc.h>
37
36static int 38static int
37nouveau_debugfs_channel_info(struct seq_file *m, void *data) 39nouveau_debugfs_channel_info(struct seq_file *m, void *data)
38{ 40{
@@ -159,6 +161,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
159 { "chipset", nouveau_debugfs_chipset_info, 0, NULL }, 161 { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
160 { "memory", nouveau_debugfs_memory_info, 0, NULL }, 162 { "memory", nouveau_debugfs_memory_info, 0, NULL },
161 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, 163 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
164 { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
162}; 165};
163#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) 166#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
164 167
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index cf1c5c0a0abe..9d7928f40fdf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -34,10 +34,6 @@ static void
34nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) 34nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
35{ 35{
36 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 36 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
37 struct drm_device *dev = drm_fb->dev;
38
39 if (drm_fb->fbdev)
40 nouveau_fbcon_remove(dev, drm_fb);
41 37
42 if (fb->nvbo) 38 if (fb->nvbo)
43 drm_gem_object_unreference_unlocked(fb->nvbo->gem); 39 drm_gem_object_unreference_unlocked(fb->nvbo->gem);
@@ -61,27 +57,20 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
61 .create_handle = nouveau_user_framebuffer_create_handle, 57 .create_handle = nouveau_user_framebuffer_create_handle,
62}; 58};
63 59
64struct drm_framebuffer * 60int
65nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo, 61nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
66 struct drm_mode_fb_cmd *mode_cmd) 62 struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo)
67{ 63{
68 struct nouveau_framebuffer *fb;
69 int ret; 64 int ret;
70 65
71 fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); 66 ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs);
72 if (!fb)
73 return NULL;
74
75 ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
76 if (ret) { 67 if (ret) {
77 kfree(fb); 68 return ret;
78 return NULL;
79 } 69 }
80 70
81 drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd); 71 drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd);
82 72 nouveau_fb->nvbo = nvbo;
83 fb->nvbo = nvbo; 73 return 0;
84 return &fb->base;
85} 74}
86 75
87static struct drm_framebuffer * 76static struct drm_framebuffer *
@@ -89,24 +78,28 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
89 struct drm_file *file_priv, 78 struct drm_file *file_priv,
90 struct drm_mode_fb_cmd *mode_cmd) 79 struct drm_mode_fb_cmd *mode_cmd)
91{ 80{
92 struct drm_framebuffer *fb; 81 struct nouveau_framebuffer *nouveau_fb;
93 struct drm_gem_object *gem; 82 struct drm_gem_object *gem;
83 int ret;
94 84
95 gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); 85 gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
96 if (!gem) 86 if (!gem)
97 return NULL; 87 return NULL;
98 88
99 fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd); 89 nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
100 if (!fb) { 90 if (!nouveau_fb)
91 return NULL;
92
93 ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
94 if (ret) {
101 drm_gem_object_unreference(gem); 95 drm_gem_object_unreference(gem);
102 return NULL; 96 return NULL;
103 } 97 }
104 98
105 return fb; 99 return &nouveau_fb->base;
106} 100}
107 101
108const struct drm_mode_config_funcs nouveau_mode_config_funcs = { 102const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
109 .fb_create = nouveau_user_framebuffer_create, 103 .fb_create = nouveau_user_framebuffer_create,
110 .fb_changed = nouveau_fbcon_probe,
111}; 104};
112 105
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 1de974acbc65..c6079e36669d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -153,7 +153,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
153 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 153 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
154 struct nouveau_channel *chan; 154 struct nouveau_channel *chan;
155 struct drm_crtc *crtc; 155 struct drm_crtc *crtc;
156 uint32_t fbdev_flags;
157 int ret, i; 156 int ret, i;
158 157
159 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 158 if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -163,8 +162,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
163 return 0; 162 return 0;
164 163
165 NV_INFO(dev, "Disabling fbcon acceleration...\n"); 164 NV_INFO(dev, "Disabling fbcon acceleration...\n");
166 fbdev_flags = dev_priv->fbdev_info->flags; 165 nouveau_fbcon_save_disable_accel(dev);
167 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
168 166
169 NV_INFO(dev, "Unpinning framebuffer(s)...\n"); 167 NV_INFO(dev, "Unpinning framebuffer(s)...\n");
170 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 168 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -230,9 +228,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
230 } 228 }
231 229
232 acquire_console_sem(); 230 acquire_console_sem();
233 fb_set_suspend(dev_priv->fbdev_info, 1); 231 nouveau_fbcon_set_suspend(dev, 1);
234 release_console_sem(); 232 release_console_sem();
235 dev_priv->fbdev_info->flags = fbdev_flags; 233 nouveau_fbcon_restore_accel(dev);
236 return 0; 234 return 0;
237 235
238out_abort: 236out_abort:
@@ -250,14 +248,12 @@ nouveau_pci_resume(struct pci_dev *pdev)
250 struct drm_nouveau_private *dev_priv = dev->dev_private; 248 struct drm_nouveau_private *dev_priv = dev->dev_private;
251 struct nouveau_engine *engine = &dev_priv->engine; 249 struct nouveau_engine *engine = &dev_priv->engine;
252 struct drm_crtc *crtc; 250 struct drm_crtc *crtc;
253 uint32_t fbdev_flags;
254 int ret, i; 251 int ret, i;
255 252
256 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 253 if (!drm_core_check_feature(dev, DRIVER_MODESET))
257 return -ENODEV; 254 return -ENODEV;
258 255
259 fbdev_flags = dev_priv->fbdev_info->flags; 256 nouveau_fbcon_save_disable_accel(dev);
260 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
261 257
262 NV_INFO(dev, "We're back, enabling device...\n"); 258 NV_INFO(dev, "We're back, enabling device...\n");
263 pci_set_power_state(pdev, PCI_D0); 259 pci_set_power_state(pdev, PCI_D0);
@@ -332,13 +328,14 @@ nouveau_pci_resume(struct pci_dev *pdev)
332 } 328 }
333 329
334 acquire_console_sem(); 330 acquire_console_sem();
335 fb_set_suspend(dev_priv->fbdev_info, 0); 331 nouveau_fbcon_set_suspend(dev, 0);
336 release_console_sem(); 332 release_console_sem();
337 333
338 nouveau_fbcon_zfill(dev); 334 nouveau_fbcon_zfill_all(dev);
339 335
340 drm_helper_resume_force_mode(dev); 336 drm_helper_resume_force_mode(dev);
341 dev_priv->fbdev_info->flags = fbdev_flags; 337
338 nouveau_fbcon_restore_accel(dev);
342 return 0; 339 return 0;
343} 340}
344 341
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index ace630aa89e1..5b47b79f45e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -535,6 +535,7 @@ struct drm_nouveau_private {
535 535
536 struct fb_info *fbdev_info; 536 struct fb_info *fbdev_info;
537 537
538 int fifo_alloc_count;
538 struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; 539 struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
539 540
540 struct nouveau_engine engine; 541 struct nouveau_engine engine;
@@ -621,6 +622,8 @@ struct drm_nouveau_private {
621 struct { 622 struct {
622 struct dentry *channel_root; 623 struct dentry *channel_root;
623 } debugfs; 624 } debugfs;
625
626 struct nouveau_fbdev *nfbdev;
624}; 627};
625 628
626static inline struct drm_nouveau_private * 629static inline struct drm_nouveau_private *
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index 4a3f31aa1949..d432134b71e0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -40,8 +40,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
40 40
41extern const struct drm_mode_config_funcs nouveau_mode_config_funcs; 41extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
42 42
43struct drm_framebuffer * 43int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
44nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *, 44 struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo);
45 struct drm_mode_fb_cmd *);
46
47#endif /* __NOUVEAU_FB_H__ */ 45#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8e7dc1d4912a..f29fa8c117ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -52,8 +52,8 @@
52static int 52static int
53nouveau_fbcon_sync(struct fb_info *info) 53nouveau_fbcon_sync(struct fb_info *info)
54{ 54{
55 struct nouveau_fbcon_par *par = info->par; 55 struct nouveau_fbdev *nfbdev = info->par;
56 struct drm_device *dev = par->dev; 56 struct drm_device *dev = nfbdev->dev;
57 struct drm_nouveau_private *dev_priv = dev->dev_private; 57 struct drm_nouveau_private *dev_priv = dev->dev_private;
58 struct nouveau_channel *chan = dev_priv->channel; 58 struct nouveau_channel *chan = dev_priv->channel;
59 int ret, i; 59 int ret, i;
@@ -97,7 +97,6 @@ static struct fb_ops nouveau_fbcon_ops = {
97 .owner = THIS_MODULE, 97 .owner = THIS_MODULE,
98 .fb_check_var = drm_fb_helper_check_var, 98 .fb_check_var = drm_fb_helper_check_var,
99 .fb_set_par = drm_fb_helper_set_par, 99 .fb_set_par = drm_fb_helper_set_par,
100 .fb_setcolreg = drm_fb_helper_setcolreg,
101 .fb_fillrect = cfb_fillrect, 100 .fb_fillrect = cfb_fillrect,
102 .fb_copyarea = cfb_copyarea, 101 .fb_copyarea = cfb_copyarea,
103 .fb_imageblit = cfb_imageblit, 102 .fb_imageblit = cfb_imageblit,
@@ -111,7 +110,6 @@ static struct fb_ops nv04_fbcon_ops = {
111 .owner = THIS_MODULE, 110 .owner = THIS_MODULE,
112 .fb_check_var = drm_fb_helper_check_var, 111 .fb_check_var = drm_fb_helper_check_var,
113 .fb_set_par = drm_fb_helper_set_par, 112 .fb_set_par = drm_fb_helper_set_par,
114 .fb_setcolreg = drm_fb_helper_setcolreg,
115 .fb_fillrect = nv04_fbcon_fillrect, 113 .fb_fillrect = nv04_fbcon_fillrect,
116 .fb_copyarea = nv04_fbcon_copyarea, 114 .fb_copyarea = nv04_fbcon_copyarea,
117 .fb_imageblit = nv04_fbcon_imageblit, 115 .fb_imageblit = nv04_fbcon_imageblit,
@@ -125,7 +123,6 @@ static struct fb_ops nv50_fbcon_ops = {
125 .owner = THIS_MODULE, 123 .owner = THIS_MODULE,
126 .fb_check_var = drm_fb_helper_check_var, 124 .fb_check_var = drm_fb_helper_check_var,
127 .fb_set_par = drm_fb_helper_set_par, 125 .fb_set_par = drm_fb_helper_set_par,
128 .fb_setcolreg = drm_fb_helper_setcolreg,
129 .fb_fillrect = nv50_fbcon_fillrect, 126 .fb_fillrect = nv50_fbcon_fillrect,
130 .fb_copyarea = nv50_fbcon_copyarea, 127 .fb_copyarea = nv50_fbcon_copyarea,
131 .fb_imageblit = nv50_fbcon_imageblit, 128 .fb_imageblit = nv50_fbcon_imageblit,
@@ -155,11 +152,6 @@ static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
155 *blue = nv_crtc->lut.b[regno]; 152 *blue = nv_crtc->lut.b[regno];
156} 153}
157 154
158static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
159 .gamma_set = nouveau_fbcon_gamma_set,
160 .gamma_get = nouveau_fbcon_gamma_get
161};
162
163#if defined(__i386__) || defined(__x86_64__) 155#if defined(__i386__) || defined(__x86_64__)
164static bool 156static bool
165nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev) 157nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev)
@@ -198,11 +190,10 @@ not_fb:
198} 190}
199#endif 191#endif
200 192
201void 193static void
202nouveau_fbcon_zfill(struct drm_device *dev) 194nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
203{ 195{
204 struct drm_nouveau_private *dev_priv = dev->dev_private; 196 struct fb_info *info = nfbdev->helper.fbdev;
205 struct fb_info *info = dev_priv->fbdev_info;
206 struct fb_fillrect rect; 197 struct fb_fillrect rect;
207 198
208 /* Clear the entire fbcon. The drm will program every connector 199 /* Clear the entire fbcon. The drm will program every connector
@@ -218,14 +209,12 @@ nouveau_fbcon_zfill(struct drm_device *dev)
218} 209}
219 210
220static int 211static int
221nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, 212nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
222 uint32_t fb_height, uint32_t surface_width, 213 struct drm_fb_helper_surface_size *sizes)
223 uint32_t surface_height, uint32_t surface_depth,
224 uint32_t surface_bpp, struct drm_framebuffer **pfb)
225{ 214{
215 struct drm_device *dev = nfbdev->dev;
226 struct drm_nouveau_private *dev_priv = dev->dev_private; 216 struct drm_nouveau_private *dev_priv = dev->dev_private;
227 struct fb_info *info; 217 struct fb_info *info;
228 struct nouveau_fbcon_par *par;
229 struct drm_framebuffer *fb; 218 struct drm_framebuffer *fb;
230 struct nouveau_framebuffer *nouveau_fb; 219 struct nouveau_framebuffer *nouveau_fb;
231 struct nouveau_bo *nvbo; 220 struct nouveau_bo *nvbo;
@@ -233,13 +222,13 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
233 struct device *device = &dev->pdev->dev; 222 struct device *device = &dev->pdev->dev;
234 int size, ret; 223 int size, ret;
235 224
236 mode_cmd.width = surface_width; 225 mode_cmd.width = sizes->surface_width;
237 mode_cmd.height = surface_height; 226 mode_cmd.height = sizes->surface_height;
238 227
239 mode_cmd.bpp = surface_bpp; 228 mode_cmd.bpp = sizes->surface_bpp;
240 mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3); 229 mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
241 mode_cmd.pitch = roundup(mode_cmd.pitch, 256); 230 mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
242 mode_cmd.depth = surface_depth; 231 mode_cmd.depth = sizes->surface_depth;
243 232
244 size = mode_cmd.pitch * mode_cmd.height; 233 size = mode_cmd.pitch * mode_cmd.height;
245 size = roundup(size, PAGE_SIZE); 234 size = roundup(size, PAGE_SIZE);
@@ -268,31 +257,28 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
268 257
269 mutex_lock(&dev->struct_mutex); 258 mutex_lock(&dev->struct_mutex);
270 259
271 fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd); 260 info = framebuffer_alloc(0, device);
272 if (!fb) { 261 if (!info) {
273 ret = -ENOMEM; 262 ret = -ENOMEM;
274 NV_ERROR(dev, "failed to allocate fb.\n");
275 goto out_unref; 263 goto out_unref;
276 } 264 }
277 265
278 list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); 266 ret = fb_alloc_cmap(&info->cmap, 256, 0);
279 267 if (ret) {
280 nouveau_fb = nouveau_framebuffer(fb);
281 *pfb = fb;
282
283 info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device);
284 if (!info) {
285 ret = -ENOMEM; 268 ret = -ENOMEM;
286 goto out_unref; 269 goto out_unref;
287 } 270 }
288 271
289 par = info->par; 272 info->par = nfbdev;
290 par->helper.funcs = &nouveau_fbcon_helper_funcs; 273
291 par->helper.dev = dev; 274 nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo);
292 ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4); 275
293 if (ret) 276 nouveau_fb = &nfbdev->nouveau_fb;
294 goto out_unref; 277 fb = &nouveau_fb->base;
295 dev_priv->fbdev_info = info; 278
279 /* setup helper */
280 nfbdev->helper.fb = fb;
281 nfbdev->helper.fbdev = info;
296 282
297 strcpy(info->fix.id, "nouveaufb"); 283 strcpy(info->fix.id, "nouveaufb");
298 if (nouveau_nofbaccel) 284 if (nouveau_nofbaccel)
@@ -310,7 +296,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
310 info->screen_size = size; 296 info->screen_size = size;
311 297
312 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); 298 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
313 drm_fb_helper_fill_var(info, fb, fb_width, fb_height); 299 drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
314 300
315 /* FIXME: we really shouldn't expose mmio space at all */ 301 /* FIXME: we really shouldn't expose mmio space at all */
316 info->fix.mmio_start = pci_resource_start(dev->pdev, 1); 302 info->fix.mmio_start = pci_resource_start(dev->pdev, 1);
@@ -343,11 +329,6 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
343 info->pixmap.flags = FB_PIXMAP_SYSTEM; 329 info->pixmap.flags = FB_PIXMAP_SYSTEM;
344 info->pixmap.scan_align = 1; 330 info->pixmap.scan_align = 1;
345 331
346 fb->fbdev = info;
347
348 par->nouveau_fb = nouveau_fb;
349 par->dev = dev;
350
351 if (dev_priv->channel && !nouveau_nofbaccel) { 332 if (dev_priv->channel && !nouveau_nofbaccel) {
352 switch (dev_priv->card_type) { 333 switch (dev_priv->card_type) {
353 case NV_50: 334 case NV_50:
@@ -361,7 +342,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
361 }; 342 };
362 } 343 }
363 344
364 nouveau_fbcon_zfill(dev); 345 nouveau_fbcon_zfill(dev, nfbdev);
365 346
366 /* To allow resizeing without swapping buffers */ 347 /* To allow resizeing without swapping buffers */
367 NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n", 348 NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
@@ -379,44 +360,129 @@ out:
379 return ret; 360 return ret;
380} 361}
381 362
382int 363static int
383nouveau_fbcon_probe(struct drm_device *dev) 364nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
365 struct drm_fb_helper_surface_size *sizes)
384{ 366{
385 NV_DEBUG_KMS(dev, "\n"); 367 struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper;
368 int new_fb = 0;
369 int ret;
370
371 if (!helper->fb) {
372 ret = nouveau_fbcon_create(nfbdev, sizes);
373 if (ret)
374 return ret;
375 new_fb = 1;
376 }
377 return new_fb;
378}
386 379
387 return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create); 380void nouveau_fbcon_hotplug(struct drm_device *dev)
381{
382 struct drm_nouveau_private *dev_priv = dev->dev_private;
383 drm_helper_fb_hpd_irq_event(&dev_priv->nfbdev->helper);
384}
385
386static void nouveau_fbcon_output_status_changed(struct drm_fb_helper *fb_helper)
387{
388 drm_helper_fb_hotplug_event(fb_helper, true);
388} 389}
389 390
390int 391int
391nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb) 392nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
392{ 393{
393 struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb); 394 struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb;
394 struct fb_info *info; 395 struct fb_info *info;
395 396
396 if (!fb) 397 if (nfbdev->helper.fbdev) {
397 return -EINVAL; 398 info = nfbdev->helper.fbdev;
398
399 info = fb->fbdev;
400 if (info) {
401 struct nouveau_fbcon_par *par = info->par;
402
403 unregister_framebuffer(info); 399 unregister_framebuffer(info);
400 if (info->cmap.len)
401 fb_dealloc_cmap(&info->cmap);
402 framebuffer_release(info);
403 }
404
405 if (nouveau_fb->nvbo) {
404 nouveau_bo_unmap(nouveau_fb->nvbo); 406 nouveau_bo_unmap(nouveau_fb->nvbo);
405 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 407 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
406 nouveau_fb->nvbo = NULL; 408 nouveau_fb->nvbo = NULL;
407 if (par)
408 drm_fb_helper_free(&par->helper);
409 framebuffer_release(info);
410 } 409 }
411 410 drm_fb_helper_fini(&nfbdev->helper);
411 drm_framebuffer_cleanup(&nouveau_fb->base);
412 return 0; 412 return 0;
413} 413}
414 414
415void nouveau_fbcon_gpu_lockup(struct fb_info *info) 415void nouveau_fbcon_gpu_lockup(struct fb_info *info)
416{ 416{
417 struct nouveau_fbcon_par *par = info->par; 417 struct nouveau_fbdev *nfbdev = info->par;
418 struct drm_device *dev = par->dev; 418 struct drm_device *dev = nfbdev->dev;
419 419
420 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 420 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
421 info->flags |= FBINFO_HWACCEL_DISABLED; 421 info->flags |= FBINFO_HWACCEL_DISABLED;
422} 422}
423
424static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
425 .gamma_set = nouveau_fbcon_gamma_set,
426 .gamma_get = nouveau_fbcon_gamma_get,
427 .fb_probe = nouveau_fbcon_find_or_create_single,
428 .fb_output_status_changed = nouveau_fbcon_output_status_changed,
429};
430
431
432int nouveau_fbcon_init(struct drm_device *dev)
433{
434 struct drm_nouveau_private *dev_priv = dev->dev_private;
435 struct nouveau_fbdev *nfbdev;
436
437 nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
438 if (!nfbdev)
439 return -ENOMEM;
440
441 nfbdev->dev = dev;
442 dev_priv->nfbdev = nfbdev;
443 nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
444
445 drm_fb_helper_init(dev, &nfbdev->helper,
446 2, 4, true);
447 drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
448 drm_fb_helper_initial_config(&nfbdev->helper, 32);
449 return 0;
450}
451
452void nouveau_fbcon_fini(struct drm_device *dev)
453{
454 struct drm_nouveau_private *dev_priv = dev->dev_private;
455
456 if (!dev_priv->nfbdev)
457 return;
458
459 nouveau_fbcon_destroy(dev, dev_priv->nfbdev);
460 kfree(dev_priv->nfbdev);
461 dev_priv->nfbdev = NULL;
462}
463
464void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
465{
466 struct drm_nouveau_private *dev_priv = dev->dev_private;
467
468 dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags;
469 dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
470}
471
472void nouveau_fbcon_restore_accel(struct drm_device *dev)
473{
474 struct drm_nouveau_private *dev_priv = dev->dev_private;
475 dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags;
476}
477
478void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
479{
480 struct drm_nouveau_private *dev_priv = dev->dev_private;
481 fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
482}
483
484void nouveau_fbcon_zfill_all(struct drm_device *dev)
485{
486 struct drm_nouveau_private *dev_priv = dev->dev_private;
487 nouveau_fbcon_zfill(dev, dev_priv->nfbdev);
488}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index f9c34e1a8c11..bf8e00d4de65 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -29,16 +29,16 @@
29 29
30#include "drm_fb_helper.h" 30#include "drm_fb_helper.h"
31 31
32struct nouveau_fbcon_par { 32#include "nouveau_fb.h"
33struct nouveau_fbdev {
33 struct drm_fb_helper helper; 34 struct drm_fb_helper helper;
35 struct nouveau_framebuffer nouveau_fb;
36 struct list_head fbdev_list;
34 struct drm_device *dev; 37 struct drm_device *dev;
35 struct nouveau_framebuffer *nouveau_fb; 38 unsigned int saved_flags;
36}; 39};
37 40
38int nouveau_fbcon_probe(struct drm_device *dev);
39int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
40void nouveau_fbcon_restore(void); 41void nouveau_fbcon_restore(void);
41void nouveau_fbcon_zfill(struct drm_device *dev);
42 42
43void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); 43void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
44void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); 44void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
@@ -50,5 +50,14 @@ void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
50int nv50_fbcon_accel_init(struct fb_info *info); 50int nv50_fbcon_accel_init(struct fb_info *info);
51 51
52void nouveau_fbcon_gpu_lockup(struct fb_info *info); 52void nouveau_fbcon_gpu_lockup(struct fb_info *info);
53
54int nouveau_fbcon_init(struct drm_device *dev);
55void nouveau_fbcon_fini(struct drm_device *dev);
56void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
57void nouveau_fbcon_zfill_all(struct drm_device *dev);
58void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
59void nouveau_fbcon_restore_accel(struct drm_device *dev);
60
61void nouveau_fbcon_hotplug(struct drm_device *dev);
53#endif /* __NV50_FBCON_H__ */ 62#endif /* __NV50_FBCON_H__ */
54 63
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 1bc0b38a5167..69c76cf93407 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -57,6 +57,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
57 } 57 }
58 58
59 ttm_bo_unref(&bo); 59 ttm_bo_unref(&bo);
60
61 drm_gem_object_release(gem);
62 kfree(gem);
60} 63}
61 64
62int 65int
@@ -382,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
382 385
383 nvbo->channel = chan; 386 nvbo->channel = chan;
384 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, 387 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
385 false, false); 388 false, false, false);
386 nvbo->channel = NULL; 389 nvbo->channel = NULL;
387 if (unlikely(ret)) { 390 if (unlikely(ret)) {
388 NV_ERROR(dev, "fail ttm_validate\n"); 391 NV_ERROR(dev, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 13e73cee4c44..53360f156063 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -1204,7 +1204,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
1204{ 1204{
1205 struct drm_device *dev = (struct drm_device *)arg; 1205 struct drm_device *dev = (struct drm_device *)arg;
1206 struct drm_nouveau_private *dev_priv = dev->dev_private; 1206 struct drm_nouveau_private *dev_priv = dev->dev_private;
1207 uint32_t status, fbdev_flags = 0; 1207 uint32_t status;
1208 unsigned long flags; 1208 unsigned long flags;
1209 1209
1210 status = nv_rd32(dev, NV03_PMC_INTR_0); 1210 status = nv_rd32(dev, NV03_PMC_INTR_0);
@@ -1213,11 +1213,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
1213 1213
1214 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 1214 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
1215 1215
1216 if (dev_priv->fbdev_info) {
1217 fbdev_flags = dev_priv->fbdev_info->flags;
1218 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
1219 }
1220
1221 if (status & NV_PMC_INTR_0_PFIFO_PENDING) { 1216 if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
1222 nouveau_fifo_irq_handler(dev); 1217 nouveau_fifo_irq_handler(dev);
1223 status &= ~NV_PMC_INTR_0_PFIFO_PENDING; 1218 status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
@@ -1247,9 +1242,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
1247 if (status) 1242 if (status)
1248 NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status); 1243 NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
1249 1244
1250 if (dev_priv->fbdev_info)
1251 dev_priv->fbdev_info->flags = fbdev_flags;
1252
1253 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 1245 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
1254 1246
1255 return IRQ_HANDLED; 1247 return IRQ_HANDLED;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index e1710640a278..92100a9678ba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -34,6 +34,7 @@
34 34
35#include "nouveau_drv.h" 35#include "nouveau_drv.h"
36#include "nouveau_drm.h" 36#include "nouveau_drm.h"
37#include "nouveau_fbcon.h"
37#include "nv50_display.h" 38#include "nv50_display.h"
38 39
39static void nouveau_stub_takedown(struct drm_device *dev) {} 40static void nouveau_stub_takedown(struct drm_device *dev) {}
@@ -516,7 +517,7 @@ nouveau_card_init(struct drm_device *dev)
516 dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; 517 dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
517 518
518 if (drm_core_check_feature(dev, DRIVER_MODESET)) 519 if (drm_core_check_feature(dev, DRIVER_MODESET))
519 drm_helper_initial_config(dev); 520 nouveau_fbcon_init(dev);
520 521
521 return 0; 522 return 0;
522 523
@@ -563,6 +564,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
563 NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state); 564 NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
564 565
565 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) { 566 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
567
566 nouveau_backlight_exit(dev); 568 nouveau_backlight_exit(dev);
567 569
568 if (dev_priv->channel) { 570 if (dev_priv->channel) {
@@ -794,6 +796,7 @@ int nouveau_unload(struct drm_device *dev)
794 struct drm_nouveau_private *dev_priv = dev->dev_private; 796 struct drm_nouveau_private *dev_priv = dev->dev_private;
795 797
796 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 798 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
799 nouveau_fbcon_fini(dev);
797 if (dev_priv->card_type >= NV_50) 800 if (dev_priv->card_type >= NV_50)
798 nv50_display_destroy(dev); 801 nv50_display_destroy(dev);
799 else 802 else
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 813b25cec726..603090ee6ac7 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -30,8 +30,8 @@
30void 30void
31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
32{ 32{
33 struct nouveau_fbcon_par *par = info->par; 33 struct nouveau_fbdev *nfbdev = info->par;
34 struct drm_device *dev = par->dev; 34 struct drm_device *dev = nfbdev->dev;
35 struct drm_nouveau_private *dev_priv = dev->dev_private; 35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_channel *chan = dev_priv->channel; 36 struct nouveau_channel *chan = dev_priv->channel;
37 37
@@ -57,8 +57,8 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
57void 57void
58nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 58nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
59{ 59{
60 struct nouveau_fbcon_par *par = info->par; 60 struct nouveau_fbdev *nfbdev = info->par;
61 struct drm_device *dev = par->dev; 61 struct drm_device *dev = nfbdev->dev;
62 struct drm_nouveau_private *dev_priv = dev->dev_private; 62 struct drm_nouveau_private *dev_priv = dev->dev_private;
63 struct nouveau_channel *chan = dev_priv->channel; 63 struct nouveau_channel *chan = dev_priv->channel;
64 64
@@ -91,8 +91,8 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
91void 91void
92nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 92nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
93{ 93{
94 struct nouveau_fbcon_par *par = info->par; 94 struct nouveau_fbdev *nfbdev = info->par;
95 struct drm_device *dev = par->dev; 95 struct drm_device *dev = nfbdev->dev;
96 struct drm_nouveau_private *dev_priv = dev->dev_private; 96 struct drm_nouveau_private *dev_priv = dev->dev_private;
97 struct nouveau_channel *chan = dev_priv->channel; 97 struct nouveau_channel *chan = dev_priv->channel;
98 uint32_t fg; 98 uint32_t fg;
@@ -179,8 +179,8 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
179int 179int
180nv04_fbcon_accel_init(struct fb_info *info) 180nv04_fbcon_accel_init(struct fb_info *info)
181{ 181{
182 struct nouveau_fbcon_par *par = info->par; 182 struct nouveau_fbdev *nfbdev = info->par;
183 struct drm_device *dev = par->dev; 183 struct drm_device *dev = nfbdev->dev;
184 struct drm_nouveau_private *dev_priv = dev->dev_private; 184 struct drm_nouveau_private *dev_priv = dev->dev_private;
185 struct nouveau_channel *chan = dev_priv->channel; 185 struct nouveau_channel *chan = dev_priv->channel;
186 const int sub = NvSubCtxSurf2D; 186 const int sub = NvSubCtxSurf2D;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 649db4c1b690..f9b304866e66 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -29,6 +29,7 @@
29#include "nouveau_encoder.h" 29#include "nouveau_encoder.h"
30#include "nouveau_connector.h" 30#include "nouveau_connector.h"
31#include "nouveau_fb.h" 31#include "nouveau_fb.h"
32#include "nouveau_fbcon.h"
32#include "drm_crtc_helper.h" 33#include "drm_crtc_helper.h"
33 34
34static void 35static void
@@ -945,6 +946,8 @@ nv50_display_irq_hotplug_bh(struct work_struct *work)
945 nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054)); 946 nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
946 if (dev_priv->chipset >= 0x90) 947 if (dev_priv->chipset >= 0x90)
947 nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074)); 948 nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
949
950 nouveau_fbcon_hotplug(dev);
948} 951}
949 952
950void 953void
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index a8c70e7e9184..6bf025c6fc6f 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -6,8 +6,8 @@
6void 6void
7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
8{ 8{
9 struct nouveau_fbcon_par *par = info->par; 9 struct nouveau_fbdev *nfbdev = info->par;
10 struct drm_device *dev = par->dev; 10 struct drm_device *dev = nfbdev->dev;
11 struct drm_nouveau_private *dev_priv = dev->dev_private; 11 struct drm_nouveau_private *dev_priv = dev->dev_private;
12 struct nouveau_channel *chan = dev_priv->channel; 12 struct nouveau_channel *chan = dev_priv->channel;
13 13
@@ -49,8 +49,8 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
49void 49void
50nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 50nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
51{ 51{
52 struct nouveau_fbcon_par *par = info->par; 52 struct nouveau_fbdev *nfbdev = info->par;
53 struct drm_device *dev = par->dev; 53 struct drm_device *dev = nfbdev->dev;
54 struct drm_nouveau_private *dev_priv = dev->dev_private; 54 struct drm_nouveau_private *dev_priv = dev->dev_private;
55 struct nouveau_channel *chan = dev_priv->channel; 55 struct nouveau_channel *chan = dev_priv->channel;
56 56
@@ -84,8 +84,8 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
84void 84void
85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
86{ 86{
87 struct nouveau_fbcon_par *par = info->par; 87 struct nouveau_fbdev *nfbdev = info->par;
88 struct drm_device *dev = par->dev; 88 struct drm_device *dev = nfbdev->dev;
89 struct drm_nouveau_private *dev_priv = dev->dev_private; 89 struct drm_nouveau_private *dev_priv = dev->dev_private;
90 struct nouveau_channel *chan = dev_priv->channel; 90 struct nouveau_channel *chan = dev_priv->channel;
91 uint32_t width, dwords, *data = (uint32_t *)image->data; 91 uint32_t width, dwords, *data = (uint32_t *)image->data;
@@ -152,8 +152,8 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
152int 152int
153nv50_fbcon_accel_init(struct fb_info *info) 153nv50_fbcon_accel_init(struct fb_info *info)
154{ 154{
155 struct nouveau_fbcon_par *par = info->par; 155 struct nouveau_fbdev *nfbdev = info->par;
156 struct drm_device *dev = par->dev; 156 struct drm_device *dev = nfbdev->dev;
157 struct drm_nouveau_private *dev_priv = dev->dev_private; 157 struct drm_nouveau_private *dev_priv = dev->dev_private;
158 struct nouveau_channel *chan = dev_priv->channel; 158 struct nouveau_channel *chan = dev_priv->channel;
159 struct nouveau_gpuobj *eng2d = NULL; 159 struct nouveau_gpuobj *eng2d = NULL;
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 27e2c715be11..2ebcb979dd7e 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -5742,6 +5742,9 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
5742#define ATOM_PP_THERMALCONTROLLER_RV6xx 7 5742#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
5743#define ATOM_PP_THERMALCONTROLLER_RV770 8 5743#define ATOM_PP_THERMALCONTROLLER_RV770 8
5744#define ATOM_PP_THERMALCONTROLLER_ADT7473 9 5744#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
5745#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
5746#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
5747#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
5745 5748
5746typedef struct _ATOM_PPLIB_STATE 5749typedef struct _ATOM_PPLIB_STATE
5747{ 5750{
@@ -5749,6 +5752,26 @@ typedef struct _ATOM_PPLIB_STATE
5749 UCHAR ucClockStateIndices[1]; // variable-sized 5752 UCHAR ucClockStateIndices[1]; // variable-sized
5750} ATOM_PPLIB_STATE; 5753} ATOM_PPLIB_STATE;
5751 5754
5755typedef struct _ATOM_PPLIB_FANTABLE
5756{
5757 UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
5758 UCHAR ucTHyst; // Temperature hysteresis. Integer.
5759 USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
5760 USHORT usTMed; // The middle temperature where we change slopes.
5761 USHORT usTHigh; // The high point above TMed for adjusting the second slope.
5762 USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
5763 USHORT usPWMMed; // The PWM value (in percent) at TMed.
5764 USHORT usPWMHigh; // The PWM value at THigh.
5765} ATOM_PPLIB_FANTABLE;
5766
5767typedef struct _ATOM_PPLIB_EXTENDEDHEADER
5768{
5769 USHORT usSize;
5770 ULONG ulMaxEngineClock; // For Overdrive.
5771 ULONG ulMaxMemoryClock; // For Overdrive.
5772 // Add extra system parameters here, always adjust size to include all fields.
5773} ATOM_PPLIB_EXTENDEDHEADER;
5774
5752//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps 5775//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
5753#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1 5776#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
5754#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2 5777#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
@@ -5762,6 +5785,12 @@ typedef struct _ATOM_PPLIB_STATE
5762#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512 5785#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
5763#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024 5786#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
5764#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048 5787#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
5788#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
5789#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
5790#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
5791#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
5792#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
5793#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
5765 5794
5766typedef struct _ATOM_PPLIB_POWERPLAYTABLE 5795typedef struct _ATOM_PPLIB_POWERPLAYTABLE
5767{ 5796{
@@ -5797,6 +5826,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
5797 5826
5798} ATOM_PPLIB_POWERPLAYTABLE; 5827} ATOM_PPLIB_POWERPLAYTABLE;
5799 5828
5829typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
5830{
5831 ATOM_PPLIB_POWERPLAYTABLE basicTable;
5832 UCHAR ucNumCustomThermalPolicy;
5833 USHORT usCustomThermalPolicyArrayOffset;
5834}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
5835
5836typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
5837{
5838 ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
5839 USHORT usFormatID; // To be used ONLY by PPGen.
5840 USHORT usFanTableOffset;
5841 USHORT usExtendendedHeaderOffset;
5842} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
5843
5800//// ATOM_PPLIB_NONCLOCK_INFO::usClassification 5844//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
5801#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 5845#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
5802#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 5846#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
@@ -5816,7 +5860,9 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
5816#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400 5860#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
5817#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800 5861#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
5818#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000 5862#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
5819// remaining 3 bits are reserved 5863#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
5864#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
5865#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
5820 5866
5821//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings 5867//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
5822#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 5868#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
@@ -5840,9 +5886,15 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
5840 5886
5841#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000 5887#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
5842#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000 5888#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
5889#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
5843#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000 5890#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
5844 5891
5845#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000 5892//memory related flags
5893#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
5894
5895//M3 Arb //2bits, current 3 sets of parameters in total
5896#define ATOM_PPLIB_M3ARB_MASK 0x00060000
5897#define ATOM_PPLIB_M3ARB_SHIFT 17
5846 5898
5847// Contained in an array starting at the offset 5899// Contained in an array starting at the offset
5848// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. 5900// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
@@ -5860,6 +5912,9 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO
5860// Contained in an array starting at the offset 5912// Contained in an array starting at the offset
5861// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. 5913// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
5862// referenced from ATOM_PPLIB_STATE::ucClockStateIndices 5914// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
5915#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
5916#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
5917
5863typedef struct _ATOM_PPLIB_R600_CLOCK_INFO 5918typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
5864{ 5919{
5865 USHORT usEngineClockLow; 5920 USHORT usEngineClockLow;
@@ -5882,6 +5937,23 @@ typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
5882#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4 5937#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
5883#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8 5938#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
5884#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16 5939#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
5940#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
5941
5942typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
5943{
5944 USHORT usEngineClockLow;
5945 UCHAR ucEngineClockHigh;
5946
5947 USHORT usMemoryClockLow;
5948 UCHAR ucMemoryClockHigh;
5949
5950 USHORT usVDDC;
5951 USHORT usVDDCI;
5952 USHORT usUnused;
5953
5954 ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
5955
5956} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
5885 5957
5886typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO 5958typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
5887 5959
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a87990b3ae84..3feca6aec4c4 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -249,17 +249,13 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
249 if (ASIC_IS_DCE3(rdev)) 249 if (ASIC_IS_DCE3(rdev))
250 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); 250 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
251 atombios_blank_crtc(crtc, ATOM_DISABLE); 251 atombios_blank_crtc(crtc, ATOM_DISABLE);
252 /* XXX re-enable when interrupt support is added */ 252 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
253 if (!ASIC_IS_DCE4(rdev))
254 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
255 radeon_crtc_load_lut(crtc); 253 radeon_crtc_load_lut(crtc);
256 break; 254 break;
257 case DRM_MODE_DPMS_STANDBY: 255 case DRM_MODE_DPMS_STANDBY:
258 case DRM_MODE_DPMS_SUSPEND: 256 case DRM_MODE_DPMS_SUSPEND:
259 case DRM_MODE_DPMS_OFF: 257 case DRM_MODE_DPMS_OFF:
260 /* XXX re-enable when interrupt support is added */ 258 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
261 if (!ASIC_IS_DCE4(rdev))
262 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
263 atombios_blank_crtc(crtc, ATOM_ENABLE); 259 atombios_blank_crtc(crtc, ATOM_ENABLE);
264 if (ASIC_IS_DCE3(rdev)) 260 if (ASIC_IS_DCE3(rdev))
265 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); 261 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e8f447e20507..b3d168fb89e5 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -28,39 +28,194 @@
28#include "radeon.h" 28#include "radeon.h"
29#include "radeon_asic.h" 29#include "radeon_asic.h"
30#include "radeon_drm.h" 30#include "radeon_drm.h"
31#include "rv770d.h" 31#include "evergreend.h"
32#include "atom.h" 32#include "atom.h"
33#include "avivod.h" 33#include "avivod.h"
34#include "evergreen_reg.h" 34#include "evergreen_reg.h"
35 35
36#define EVERGREEN_PFP_UCODE_SIZE 1120
37#define EVERGREEN_PM4_UCODE_SIZE 1376
38
36static void evergreen_gpu_init(struct radeon_device *rdev); 39static void evergreen_gpu_init(struct radeon_device *rdev);
37void evergreen_fini(struct radeon_device *rdev); 40void evergreen_fini(struct radeon_device *rdev);
38 41
39bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 42bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
40{ 43{
41 bool connected = false; 44 bool connected = false;
42 /* XXX */ 45
46 switch (hpd) {
47 case RADEON_HPD_1:
48 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
49 connected = true;
50 break;
51 case RADEON_HPD_2:
52 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
53 connected = true;
54 break;
55 case RADEON_HPD_3:
56 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
57 connected = true;
58 break;
59 case RADEON_HPD_4:
60 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
61 connected = true;
62 break;
63 case RADEON_HPD_5:
64 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
65 connected = true;
66 break;
67 case RADEON_HPD_6:
68 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
69 connected = true;
70 break;
71 default:
72 break;
73 }
74
43 return connected; 75 return connected;
44} 76}
45 77
46void evergreen_hpd_set_polarity(struct radeon_device *rdev, 78void evergreen_hpd_set_polarity(struct radeon_device *rdev,
47 enum radeon_hpd_id hpd) 79 enum radeon_hpd_id hpd)
48{ 80{
49 /* XXX */ 81 u32 tmp;
82 bool connected = evergreen_hpd_sense(rdev, hpd);
83
84 switch (hpd) {
85 case RADEON_HPD_1:
86 tmp = RREG32(DC_HPD1_INT_CONTROL);
87 if (connected)
88 tmp &= ~DC_HPDx_INT_POLARITY;
89 else
90 tmp |= DC_HPDx_INT_POLARITY;
91 WREG32(DC_HPD1_INT_CONTROL, tmp);
92 break;
93 case RADEON_HPD_2:
94 tmp = RREG32(DC_HPD2_INT_CONTROL);
95 if (connected)
96 tmp &= ~DC_HPDx_INT_POLARITY;
97 else
98 tmp |= DC_HPDx_INT_POLARITY;
99 WREG32(DC_HPD2_INT_CONTROL, tmp);
100 break;
101 case RADEON_HPD_3:
102 tmp = RREG32(DC_HPD3_INT_CONTROL);
103 if (connected)
104 tmp &= ~DC_HPDx_INT_POLARITY;
105 else
106 tmp |= DC_HPDx_INT_POLARITY;
107 WREG32(DC_HPD3_INT_CONTROL, tmp);
108 break;
109 case RADEON_HPD_4:
110 tmp = RREG32(DC_HPD4_INT_CONTROL);
111 if (connected)
112 tmp &= ~DC_HPDx_INT_POLARITY;
113 else
114 tmp |= DC_HPDx_INT_POLARITY;
115 WREG32(DC_HPD4_INT_CONTROL, tmp);
116 break;
117 case RADEON_HPD_5:
118 tmp = RREG32(DC_HPD5_INT_CONTROL);
119 if (connected)
120 tmp &= ~DC_HPDx_INT_POLARITY;
121 else
122 tmp |= DC_HPDx_INT_POLARITY;
123 WREG32(DC_HPD5_INT_CONTROL, tmp);
124 break;
125 case RADEON_HPD_6:
126 tmp = RREG32(DC_HPD6_INT_CONTROL);
127 if (connected)
128 tmp &= ~DC_HPDx_INT_POLARITY;
129 else
130 tmp |= DC_HPDx_INT_POLARITY;
131 WREG32(DC_HPD6_INT_CONTROL, tmp);
132 break;
133 default:
134 break;
135 }
50} 136}
51 137
52void evergreen_hpd_init(struct radeon_device *rdev) 138void evergreen_hpd_init(struct radeon_device *rdev)
53{ 139{
54 /* XXX */ 140 struct drm_device *dev = rdev->ddev;
141 struct drm_connector *connector;
142 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
143 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
144
145 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
146 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
147 switch (radeon_connector->hpd.hpd) {
148 case RADEON_HPD_1:
149 WREG32(DC_HPD1_CONTROL, tmp);
150 rdev->irq.hpd[0] = true;
151 break;
152 case RADEON_HPD_2:
153 WREG32(DC_HPD2_CONTROL, tmp);
154 rdev->irq.hpd[1] = true;
155 break;
156 case RADEON_HPD_3:
157 WREG32(DC_HPD3_CONTROL, tmp);
158 rdev->irq.hpd[2] = true;
159 break;
160 case RADEON_HPD_4:
161 WREG32(DC_HPD4_CONTROL, tmp);
162 rdev->irq.hpd[3] = true;
163 break;
164 case RADEON_HPD_5:
165 WREG32(DC_HPD5_CONTROL, tmp);
166 rdev->irq.hpd[4] = true;
167 break;
168 case RADEON_HPD_6:
169 WREG32(DC_HPD6_CONTROL, tmp);
170 rdev->irq.hpd[5] = true;
171 break;
172 default:
173 break;
174 }
175 }
176 if (rdev->irq.installed)
177 evergreen_irq_set(rdev);
55} 178}
56 179
57 180void evergreen_hpd_fini(struct radeon_device *rdev)
58void evergreen_bandwidth_update(struct radeon_device *rdev)
59{ 181{
60 /* XXX */ 182 struct drm_device *dev = rdev->ddev;
183 struct drm_connector *connector;
184
185 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
186 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
187 switch (radeon_connector->hpd.hpd) {
188 case RADEON_HPD_1:
189 WREG32(DC_HPD1_CONTROL, 0);
190 rdev->irq.hpd[0] = false;
191 break;
192 case RADEON_HPD_2:
193 WREG32(DC_HPD2_CONTROL, 0);
194 rdev->irq.hpd[1] = false;
195 break;
196 case RADEON_HPD_3:
197 WREG32(DC_HPD3_CONTROL, 0);
198 rdev->irq.hpd[2] = false;
199 break;
200 case RADEON_HPD_4:
201 WREG32(DC_HPD4_CONTROL, 0);
202 rdev->irq.hpd[3] = false;
203 break;
204 case RADEON_HPD_5:
205 WREG32(DC_HPD5_CONTROL, 0);
206 rdev->irq.hpd[4] = false;
207 break;
208 case RADEON_HPD_6:
209 WREG32(DC_HPD6_CONTROL, 0);
210 rdev->irq.hpd[5] = false;
211 break;
212 default:
213 break;
214 }
215 }
61} 216}
62 217
63void evergreen_hpd_fini(struct radeon_device *rdev) 218void evergreen_bandwidth_update(struct radeon_device *rdev)
64{ 219{
65 /* XXX */ 220 /* XXX */
66} 221}
@@ -83,10 +238,31 @@ static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
83/* 238/*
84 * GART 239 * GART
85 */ 240 */
241void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
242{
243 unsigned i;
244 u32 tmp;
245
246 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
247 for (i = 0; i < rdev->usec_timeout; i++) {
248 /* read MC_STATUS */
249 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
250 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
251 if (tmp == 2) {
252 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
253 return;
254 }
255 if (tmp) {
256 return;
257 }
258 udelay(1);
259 }
260}
261
86int evergreen_pcie_gart_enable(struct radeon_device *rdev) 262int evergreen_pcie_gart_enable(struct radeon_device *rdev)
87{ 263{
88 u32 tmp; 264 u32 tmp;
89 int r, i; 265 int r;
90 266
91 if (rdev->gart.table.vram.robj == NULL) { 267 if (rdev->gart.table.vram.robj == NULL) {
92 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 268 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
@@ -121,10 +297,9 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
121 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 297 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
122 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 298 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
123 (u32)(rdev->dummy_page.addr >> 12)); 299 (u32)(rdev->dummy_page.addr >> 12));
124 for (i = 1; i < 7; i++) 300 WREG32(VM_CONTEXT1_CNTL, 0);
125 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
126 301
127 r600_pcie_gart_tlb_flush(rdev); 302 evergreen_pcie_gart_tlb_flush(rdev);
128 rdev->gart.ready = true; 303 rdev->gart.ready = true;
129 return 0; 304 return 0;
130} 305}
@@ -132,11 +307,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
132void evergreen_pcie_gart_disable(struct radeon_device *rdev) 307void evergreen_pcie_gart_disable(struct radeon_device *rdev)
133{ 308{
134 u32 tmp; 309 u32 tmp;
135 int i, r; 310 int r;
136 311
137 /* Disable all tables */ 312 /* Disable all tables */
138 for (i = 0; i < 7; i++) 313 WREG32(VM_CONTEXT0_CNTL, 0);
139 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 314 WREG32(VM_CONTEXT1_CNTL, 0);
140 315
141 /* Setup L2 cache */ 316 /* Setup L2 cache */
142 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | 317 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
@@ -173,7 +348,6 @@ void evergreen_pcie_gart_fini(struct radeon_device *rdev)
173void evergreen_agp_enable(struct radeon_device *rdev) 348void evergreen_agp_enable(struct radeon_device *rdev)
174{ 349{
175 u32 tmp; 350 u32 tmp;
176 int i;
177 351
178 /* Setup L2 cache */ 352 /* Setup L2 cache */
179 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 353 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -193,8 +367,8 @@ void evergreen_agp_enable(struct radeon_device *rdev)
193 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 367 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
194 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 368 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
195 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 369 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
196 for (i = 0; i < 7; i++) 370 WREG32(VM_CONTEXT0_CNTL, 0);
197 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 371 WREG32(VM_CONTEXT1_CNTL, 0);
198} 372}
199 373
200static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) 374static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -400,40 +574,656 @@ static void evergreen_mc_program(struct radeon_device *rdev)
400 rv515_vga_render_disable(rdev); 574 rv515_vga_render_disable(rdev);
401} 575}
402 576
403#if 0
404/* 577/*
405 * CP. 578 * CP.
406 */ 579 */
407static void evergreen_cp_stop(struct radeon_device *rdev)
408{
409 /* XXX */
410}
411
412 580
413static int evergreen_cp_load_microcode(struct radeon_device *rdev) 581static int evergreen_cp_load_microcode(struct radeon_device *rdev)
414{ 582{
415 /* XXX */ 583 const __be32 *fw_data;
584 int i;
585
586 if (!rdev->me_fw || !rdev->pfp_fw)
587 return -EINVAL;
416 588
589 r700_cp_stop(rdev);
590 WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
591
592 fw_data = (const __be32 *)rdev->pfp_fw->data;
593 WREG32(CP_PFP_UCODE_ADDR, 0);
594 for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
595 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
596 WREG32(CP_PFP_UCODE_ADDR, 0);
597
598 fw_data = (const __be32 *)rdev->me_fw->data;
599 WREG32(CP_ME_RAM_WADDR, 0);
600 for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
601 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
602
603 WREG32(CP_PFP_UCODE_ADDR, 0);
604 WREG32(CP_ME_RAM_WADDR, 0);
605 WREG32(CP_ME_RAM_RADDR, 0);
417 return 0; 606 return 0;
418} 607}
419 608
609int evergreen_cp_resume(struct radeon_device *rdev)
610{
611 u32 tmp;
612 u32 rb_bufsz;
613 int r;
614
615 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
616 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
617 SOFT_RESET_PA |
618 SOFT_RESET_SH |
619 SOFT_RESET_VGT |
620 SOFT_RESET_SX));
621 RREG32(GRBM_SOFT_RESET);
622 mdelay(15);
623 WREG32(GRBM_SOFT_RESET, 0);
624 RREG32(GRBM_SOFT_RESET);
625
626 /* Set ring buffer size */
627 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
628 tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
629#ifdef __BIG_ENDIAN
630 tmp |= BUF_SWAP_32BIT;
631#endif
632 WREG32(CP_RB_CNTL, tmp);
633 WREG32(CP_SEM_WAIT_TIMER, 0x4);
634
635 /* Set the write pointer delay */
636 WREG32(CP_RB_WPTR_DELAY, 0);
637
638 /* Initialize the ring buffer's read and write pointers */
639 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
640 WREG32(CP_RB_RPTR_WR, 0);
641 WREG32(CP_RB_WPTR, 0);
642 WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
643 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
644 mdelay(1);
645 WREG32(CP_RB_CNTL, tmp);
646
647 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
648 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
649
650 rdev->cp.rptr = RREG32(CP_RB_RPTR);
651 rdev->cp.wptr = RREG32(CP_RB_WPTR);
652
653 r600_cp_start(rdev);
654 rdev->cp.ready = true;
655 r = radeon_ring_test(rdev);
656 if (r) {
657 rdev->cp.ready = false;
658 return r;
659 }
660 return 0;
661}
420 662
421/* 663/*
422 * Core functions 664 * Core functions
423 */ 665 */
424static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes, 666static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
667 u32 num_tile_pipes,
425 u32 num_backends, 668 u32 num_backends,
426 u32 backend_disable_mask) 669 u32 backend_disable_mask)
427{ 670{
428 u32 backend_map = 0; 671 u32 backend_map = 0;
672 u32 enabled_backends_mask = 0;
673 u32 enabled_backends_count = 0;
674 u32 cur_pipe;
675 u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
676 u32 cur_backend = 0;
677 u32 i;
678 bool force_no_swizzle;
679
680 if (num_tile_pipes > EVERGREEN_MAX_PIPES)
681 num_tile_pipes = EVERGREEN_MAX_PIPES;
682 if (num_tile_pipes < 1)
683 num_tile_pipes = 1;
684 if (num_backends > EVERGREEN_MAX_BACKENDS)
685 num_backends = EVERGREEN_MAX_BACKENDS;
686 if (num_backends < 1)
687 num_backends = 1;
688
689 for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
690 if (((backend_disable_mask >> i) & 1) == 0) {
691 enabled_backends_mask |= (1 << i);
692 ++enabled_backends_count;
693 }
694 if (enabled_backends_count == num_backends)
695 break;
696 }
697
698 if (enabled_backends_count == 0) {
699 enabled_backends_mask = 1;
700 enabled_backends_count = 1;
701 }
702
703 if (enabled_backends_count != num_backends)
704 num_backends = enabled_backends_count;
705
706 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
707 switch (rdev->family) {
708 case CHIP_CEDAR:
709 case CHIP_REDWOOD:
710 force_no_swizzle = false;
711 break;
712 case CHIP_CYPRESS:
713 case CHIP_HEMLOCK:
714 case CHIP_JUNIPER:
715 default:
716 force_no_swizzle = true;
717 break;
718 }
719 if (force_no_swizzle) {
720 bool last_backend_enabled = false;
721
722 force_no_swizzle = false;
723 for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
724 if (((enabled_backends_mask >> i) & 1) == 1) {
725 if (last_backend_enabled)
726 force_no_swizzle = true;
727 last_backend_enabled = true;
728 } else
729 last_backend_enabled = false;
730 }
731 }
732
733 switch (num_tile_pipes) {
734 case 1:
735 case 3:
736 case 5:
737 case 7:
738 DRM_ERROR("odd number of pipes!\n");
739 break;
740 case 2:
741 swizzle_pipe[0] = 0;
742 swizzle_pipe[1] = 1;
743 break;
744 case 4:
745 if (force_no_swizzle) {
746 swizzle_pipe[0] = 0;
747 swizzle_pipe[1] = 1;
748 swizzle_pipe[2] = 2;
749 swizzle_pipe[3] = 3;
750 } else {
751 swizzle_pipe[0] = 0;
752 swizzle_pipe[1] = 2;
753 swizzle_pipe[2] = 1;
754 swizzle_pipe[3] = 3;
755 }
756 break;
757 case 6:
758 if (force_no_swizzle) {
759 swizzle_pipe[0] = 0;
760 swizzle_pipe[1] = 1;
761 swizzle_pipe[2] = 2;
762 swizzle_pipe[3] = 3;
763 swizzle_pipe[4] = 4;
764 swizzle_pipe[5] = 5;
765 } else {
766 swizzle_pipe[0] = 0;
767 swizzle_pipe[1] = 2;
768 swizzle_pipe[2] = 4;
769 swizzle_pipe[3] = 1;
770 swizzle_pipe[4] = 3;
771 swizzle_pipe[5] = 5;
772 }
773 break;
774 case 8:
775 if (force_no_swizzle) {
776 swizzle_pipe[0] = 0;
777 swizzle_pipe[1] = 1;
778 swizzle_pipe[2] = 2;
779 swizzle_pipe[3] = 3;
780 swizzle_pipe[4] = 4;
781 swizzle_pipe[5] = 5;
782 swizzle_pipe[6] = 6;
783 swizzle_pipe[7] = 7;
784 } else {
785 swizzle_pipe[0] = 0;
786 swizzle_pipe[1] = 2;
787 swizzle_pipe[2] = 4;
788 swizzle_pipe[3] = 6;
789 swizzle_pipe[4] = 1;
790 swizzle_pipe[5] = 3;
791 swizzle_pipe[6] = 5;
792 swizzle_pipe[7] = 7;
793 }
794 break;
795 }
796
797 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
798 while (((1 << cur_backend) & enabled_backends_mask) == 0)
799 cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
800
801 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
802
803 cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
804 }
429 805
430 return backend_map; 806 return backend_map;
431} 807}
432#endif
433 808
434static void evergreen_gpu_init(struct radeon_device *rdev) 809static void evergreen_gpu_init(struct radeon_device *rdev)
435{ 810{
436 /* XXX */ 811 u32 cc_rb_backend_disable = 0;
812 u32 cc_gc_shader_pipe_config;
813 u32 gb_addr_config = 0;
814 u32 mc_shared_chmap, mc_arb_ramcfg;
815 u32 gb_backend_map;
816 u32 grbm_gfx_index;
817 u32 sx_debug_1;
818 u32 smx_dc_ctl0;
819 u32 sq_config;
820 u32 sq_lds_resource_mgmt;
821 u32 sq_gpr_resource_mgmt_1;
822 u32 sq_gpr_resource_mgmt_2;
823 u32 sq_gpr_resource_mgmt_3;
824 u32 sq_thread_resource_mgmt;
825 u32 sq_thread_resource_mgmt_2;
826 u32 sq_stack_resource_mgmt_1;
827 u32 sq_stack_resource_mgmt_2;
828 u32 sq_stack_resource_mgmt_3;
829 u32 vgt_cache_invalidation;
830 u32 hdp_host_path_cntl;
831 int i, j, num_shader_engines, ps_thread_count;
832
833 switch (rdev->family) {
834 case CHIP_CYPRESS:
835 case CHIP_HEMLOCK:
836 rdev->config.evergreen.num_ses = 2;
837 rdev->config.evergreen.max_pipes = 4;
838 rdev->config.evergreen.max_tile_pipes = 8;
839 rdev->config.evergreen.max_simds = 10;
840 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
841 rdev->config.evergreen.max_gprs = 256;
842 rdev->config.evergreen.max_threads = 248;
843 rdev->config.evergreen.max_gs_threads = 32;
844 rdev->config.evergreen.max_stack_entries = 512;
845 rdev->config.evergreen.sx_num_of_sets = 4;
846 rdev->config.evergreen.sx_max_export_size = 256;
847 rdev->config.evergreen.sx_max_export_pos_size = 64;
848 rdev->config.evergreen.sx_max_export_smx_size = 192;
849 rdev->config.evergreen.max_hw_contexts = 8;
850 rdev->config.evergreen.sq_num_cf_insts = 2;
851
852 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
853 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
854 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
855 break;
856 case CHIP_JUNIPER:
857 rdev->config.evergreen.num_ses = 1;
858 rdev->config.evergreen.max_pipes = 4;
859 rdev->config.evergreen.max_tile_pipes = 4;
860 rdev->config.evergreen.max_simds = 10;
861 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
862 rdev->config.evergreen.max_gprs = 256;
863 rdev->config.evergreen.max_threads = 248;
864 rdev->config.evergreen.max_gs_threads = 32;
865 rdev->config.evergreen.max_stack_entries = 512;
866 rdev->config.evergreen.sx_num_of_sets = 4;
867 rdev->config.evergreen.sx_max_export_size = 256;
868 rdev->config.evergreen.sx_max_export_pos_size = 64;
869 rdev->config.evergreen.sx_max_export_smx_size = 192;
870 rdev->config.evergreen.max_hw_contexts = 8;
871 rdev->config.evergreen.sq_num_cf_insts = 2;
872
873 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
874 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
875 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
876 break;
877 case CHIP_REDWOOD:
878 rdev->config.evergreen.num_ses = 1;
879 rdev->config.evergreen.max_pipes = 4;
880 rdev->config.evergreen.max_tile_pipes = 4;
881 rdev->config.evergreen.max_simds = 5;
882 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
883 rdev->config.evergreen.max_gprs = 256;
884 rdev->config.evergreen.max_threads = 248;
885 rdev->config.evergreen.max_gs_threads = 32;
886 rdev->config.evergreen.max_stack_entries = 256;
887 rdev->config.evergreen.sx_num_of_sets = 4;
888 rdev->config.evergreen.sx_max_export_size = 256;
889 rdev->config.evergreen.sx_max_export_pos_size = 64;
890 rdev->config.evergreen.sx_max_export_smx_size = 192;
891 rdev->config.evergreen.max_hw_contexts = 8;
892 rdev->config.evergreen.sq_num_cf_insts = 2;
893
894 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
895 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
896 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
897 break;
898 case CHIP_CEDAR:
899 default:
900 rdev->config.evergreen.num_ses = 1;
901 rdev->config.evergreen.max_pipes = 2;
902 rdev->config.evergreen.max_tile_pipes = 2;
903 rdev->config.evergreen.max_simds = 2;
904 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
905 rdev->config.evergreen.max_gprs = 256;
906 rdev->config.evergreen.max_threads = 192;
907 rdev->config.evergreen.max_gs_threads = 16;
908 rdev->config.evergreen.max_stack_entries = 256;
909 rdev->config.evergreen.sx_num_of_sets = 4;
910 rdev->config.evergreen.sx_max_export_size = 128;
911 rdev->config.evergreen.sx_max_export_pos_size = 32;
912 rdev->config.evergreen.sx_max_export_smx_size = 96;
913 rdev->config.evergreen.max_hw_contexts = 4;
914 rdev->config.evergreen.sq_num_cf_insts = 1;
915
916 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
917 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
918 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
919 break;
920 }
921
922 /* Initialize HDP */
923 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
924 WREG32((0x2c14 + j), 0x00000000);
925 WREG32((0x2c18 + j), 0x00000000);
926 WREG32((0x2c1c + j), 0x00000000);
927 WREG32((0x2c20 + j), 0x00000000);
928 WREG32((0x2c24 + j), 0x00000000);
929 }
930
931 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
932
933 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
934
935 cc_gc_shader_pipe_config |=
936 INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
937 & EVERGREEN_MAX_PIPES_MASK);
938 cc_gc_shader_pipe_config |=
939 INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
940 & EVERGREEN_MAX_SIMDS_MASK);
941
942 cc_rb_backend_disable =
943 BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
944 & EVERGREEN_MAX_BACKENDS_MASK);
945
946
947 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
948 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
949
950 switch (rdev->config.evergreen.max_tile_pipes) {
951 case 1:
952 default:
953 gb_addr_config |= NUM_PIPES(0);
954 break;
955 case 2:
956 gb_addr_config |= NUM_PIPES(1);
957 break;
958 case 4:
959 gb_addr_config |= NUM_PIPES(2);
960 break;
961 case 8:
962 gb_addr_config |= NUM_PIPES(3);
963 break;
964 }
965
966 gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
967 gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
968 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
969 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
970 gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
971 gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
972
973 if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
974 gb_addr_config |= ROW_SIZE(2);
975 else
976 gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
977
978 if (rdev->ddev->pdev->device == 0x689e) {
979 u32 efuse_straps_4;
980 u32 efuse_straps_3;
981 u8 efuse_box_bit_131_124;
982
983 WREG32(RCU_IND_INDEX, 0x204);
984 efuse_straps_4 = RREG32(RCU_IND_DATA);
985 WREG32(RCU_IND_INDEX, 0x203);
986 efuse_straps_3 = RREG32(RCU_IND_DATA);
987 efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
988
989 switch(efuse_box_bit_131_124) {
990 case 0x00:
991 gb_backend_map = 0x76543210;
992 break;
993 case 0x55:
994 gb_backend_map = 0x77553311;
995 break;
996 case 0x56:
997 gb_backend_map = 0x77553300;
998 break;
999 case 0x59:
1000 gb_backend_map = 0x77552211;
1001 break;
1002 case 0x66:
1003 gb_backend_map = 0x77443300;
1004 break;
1005 case 0x99:
1006 gb_backend_map = 0x66552211;
1007 break;
1008 case 0x5a:
1009 gb_backend_map = 0x77552200;
1010 break;
1011 case 0xaa:
1012 gb_backend_map = 0x66442200;
1013 break;
1014 case 0x95:
1015 gb_backend_map = 0x66553311;
1016 break;
1017 default:
1018 DRM_ERROR("bad backend map, using default\n");
1019 gb_backend_map =
1020 evergreen_get_tile_pipe_to_backend_map(rdev,
1021 rdev->config.evergreen.max_tile_pipes,
1022 rdev->config.evergreen.max_backends,
1023 ((EVERGREEN_MAX_BACKENDS_MASK <<
1024 rdev->config.evergreen.max_backends) &
1025 EVERGREEN_MAX_BACKENDS_MASK));
1026 break;
1027 }
1028 } else if (rdev->ddev->pdev->device == 0x68b9) {
1029 u32 efuse_straps_3;
1030 u8 efuse_box_bit_127_124;
1031
1032 WREG32(RCU_IND_INDEX, 0x203);
1033 efuse_straps_3 = RREG32(RCU_IND_DATA);
1034 efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28;
1035
1036 switch(efuse_box_bit_127_124) {
1037 case 0x0:
1038 gb_backend_map = 0x00003210;
1039 break;
1040 case 0x5:
1041 case 0x6:
1042 case 0x9:
1043 case 0xa:
1044 gb_backend_map = 0x00003311;
1045 break;
1046 default:
1047 DRM_ERROR("bad backend map, using default\n");
1048 gb_backend_map =
1049 evergreen_get_tile_pipe_to_backend_map(rdev,
1050 rdev->config.evergreen.max_tile_pipes,
1051 rdev->config.evergreen.max_backends,
1052 ((EVERGREEN_MAX_BACKENDS_MASK <<
1053 rdev->config.evergreen.max_backends) &
1054 EVERGREEN_MAX_BACKENDS_MASK));
1055 break;
1056 }
1057 } else
1058 gb_backend_map =
1059 evergreen_get_tile_pipe_to_backend_map(rdev,
1060 rdev->config.evergreen.max_tile_pipes,
1061 rdev->config.evergreen.max_backends,
1062 ((EVERGREEN_MAX_BACKENDS_MASK <<
1063 rdev->config.evergreen.max_backends) &
1064 EVERGREEN_MAX_BACKENDS_MASK));
1065
1066 WREG32(GB_BACKEND_MAP, gb_backend_map);
1067 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1068 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1069 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1070
1071 num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
1072 grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
1073
1074 for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
1075 u32 rb = cc_rb_backend_disable | (0xf0 << 16);
1076 u32 sp = cc_gc_shader_pipe_config;
1077 u32 gfx = grbm_gfx_index | SE_INDEX(i);
1078
1079 if (i == num_shader_engines) {
1080 rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
1081 sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
1082 }
1083
1084 WREG32(GRBM_GFX_INDEX, gfx);
1085 WREG32(RLC_GFX_INDEX, gfx);
1086
1087 WREG32(CC_RB_BACKEND_DISABLE, rb);
1088 WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
1089 WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
1090 WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
1091 }
1092
1093 grbm_gfx_index |= SE_BROADCAST_WRITES;
1094 WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
1095 WREG32(RLC_GFX_INDEX, grbm_gfx_index);
1096
1097 WREG32(CGTS_SYS_TCC_DISABLE, 0);
1098 WREG32(CGTS_TCC_DISABLE, 0);
1099 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
1100 WREG32(CGTS_USER_TCC_DISABLE, 0);
1101
1102 /* set HW defaults for 3D engine */
1103 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
1104 ROQ_IB2_START(0x2b)));
1105
1106 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
1107
1108 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
1109 SYNC_GRADIENT |
1110 SYNC_WALKER |
1111 SYNC_ALIGNER));
1112
1113 sx_debug_1 = RREG32(SX_DEBUG_1);
1114 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
1115 WREG32(SX_DEBUG_1, sx_debug_1);
1116
1117
1118 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
1119 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
1120 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
1121 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
1122
1123 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
1124 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
1125 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
1126
1127 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
1128 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
1129 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
1130
1131 WREG32(VGT_NUM_INSTANCES, 1);
1132 WREG32(SPI_CONFIG_CNTL, 0);
1133 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
1134 WREG32(CP_PERFMON_CNTL, 0);
1135
1136 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
1137 FETCH_FIFO_HIWATER(0x4) |
1138 DONE_FIFO_HIWATER(0xe0) |
1139 ALU_UPDATE_FIFO_HIWATER(0x8)));
1140
1141 sq_config = RREG32(SQ_CONFIG);
1142 sq_config &= ~(PS_PRIO(3) |
1143 VS_PRIO(3) |
1144 GS_PRIO(3) |
1145 ES_PRIO(3));
1146 sq_config |= (VC_ENABLE |
1147 EXPORT_SRC_C |
1148 PS_PRIO(0) |
1149 VS_PRIO(1) |
1150 GS_PRIO(2) |
1151 ES_PRIO(3));
1152
1153 if (rdev->family == CHIP_CEDAR)
1154 /* no vertex cache */
1155 sq_config &= ~VC_ENABLE;
1156
1157 sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
1158
1159 sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
1160 sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
1161 sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
1162 sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
1163 sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
1164 sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
1165 sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
1166
1167 if (rdev->family == CHIP_CEDAR)
1168 ps_thread_count = 96;
1169 else
1170 ps_thread_count = 128;
1171
1172 sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
1173 sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
1174 sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
1175 sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
1176 sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
1177 sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
1178
1179 sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
1180 sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
1181 sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
1182 sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
1183 sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
1184 sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
1185
1186 WREG32(SQ_CONFIG, sq_config);
1187 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1188 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1189 WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
1190 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1191 WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
1192 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1193 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1194 WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
1195 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
1196 WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
1197
1198 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
1199 FORCE_EOV_MAX_REZ_CNT(255)));
1200
1201 if (rdev->family == CHIP_CEDAR)
1202 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
1203 else
1204 vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
1205 vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
1206 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
1207
1208 WREG32(VGT_GS_VERTEX_REUSE, 16);
1209 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1210
1211 WREG32(CB_PERF_CTR0_SEL_0, 0);
1212 WREG32(CB_PERF_CTR0_SEL_1, 0);
1213 WREG32(CB_PERF_CTR1_SEL_0, 0);
1214 WREG32(CB_PERF_CTR1_SEL_1, 0);
1215 WREG32(CB_PERF_CTR2_SEL_0, 0);
1216 WREG32(CB_PERF_CTR2_SEL_1, 0);
1217 WREG32(CB_PERF_CTR3_SEL_0, 0);
1218 WREG32(CB_PERF_CTR3_SEL_1, 0);
1219
1220 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
1221 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1222
1223 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
1224
1225 udelay(50);
1226
437} 1227}
438 1228
439int evergreen_mc_init(struct radeon_device *rdev) 1229int evergreen_mc_init(struct radeon_device *rdev)
@@ -476,26 +1266,616 @@ int evergreen_mc_init(struct radeon_device *rdev)
476 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 1266 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
477 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 1267 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
478 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1268 rdev->mc.visible_vram_size = rdev->mc.aper_size;
479 /* FIXME remove this once we support unmappable VRAM */
480 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
481 rdev->mc.mc_vram_size = rdev->mc.aper_size;
482 rdev->mc.real_vram_size = rdev->mc.aper_size;
483 }
484 r600_vram_gtt_location(rdev, &rdev->mc); 1269 r600_vram_gtt_location(rdev, &rdev->mc);
485 radeon_update_bandwidth_info(rdev); 1270 radeon_update_bandwidth_info(rdev);
486 1271
487 return 0; 1272 return 0;
488} 1273}
489 1274
490int evergreen_gpu_reset(struct radeon_device *rdev) 1275bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
491{ 1276{
492 /* FIXME: implement for evergreen */ 1277 /* FIXME: implement for evergreen */
1278 return false;
1279}
1280
1281static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
1282{
1283 struct evergreen_mc_save save;
1284 u32 srbm_reset = 0;
1285 u32 grbm_reset = 0;
1286
1287 dev_info(rdev->dev, "GPU softreset \n");
1288 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
1289 RREG32(GRBM_STATUS));
1290 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
1291 RREG32(GRBM_STATUS_SE0));
1292 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
1293 RREG32(GRBM_STATUS_SE1));
1294 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
1295 RREG32(SRBM_STATUS));
1296 evergreen_mc_stop(rdev, &save);
1297 if (evergreen_mc_wait_for_idle(rdev)) {
1298 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1299 }
1300 /* Disable CP parsing/prefetching */
1301 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
1302
1303 /* reset all the gfx blocks */
1304 grbm_reset = (SOFT_RESET_CP |
1305 SOFT_RESET_CB |
1306 SOFT_RESET_DB |
1307 SOFT_RESET_PA |
1308 SOFT_RESET_SC |
1309 SOFT_RESET_SPI |
1310 SOFT_RESET_SH |
1311 SOFT_RESET_SX |
1312 SOFT_RESET_TC |
1313 SOFT_RESET_TA |
1314 SOFT_RESET_VC |
1315 SOFT_RESET_VGT);
1316
1317 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
1318 WREG32(GRBM_SOFT_RESET, grbm_reset);
1319 (void)RREG32(GRBM_SOFT_RESET);
1320 udelay(50);
1321 WREG32(GRBM_SOFT_RESET, 0);
1322 (void)RREG32(GRBM_SOFT_RESET);
1323
1324 /* reset all the system blocks */
1325 srbm_reset = SRBM_SOFT_RESET_ALL_MASK;
1326
1327 dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
1328 WREG32(SRBM_SOFT_RESET, srbm_reset);
1329 (void)RREG32(SRBM_SOFT_RESET);
1330 udelay(50);
1331 WREG32(SRBM_SOFT_RESET, 0);
1332 (void)RREG32(SRBM_SOFT_RESET);
1333 /* Wait a little for things to settle down */
1334 udelay(50);
1335 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
1336 RREG32(GRBM_STATUS));
1337 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
1338 RREG32(GRBM_STATUS_SE0));
1339 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
1340 RREG32(GRBM_STATUS_SE1));
1341 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
1342 RREG32(SRBM_STATUS));
1343 /* After reset we need to reinit the asic as GPU often endup in an
1344 * incoherent state.
1345 */
1346 atom_asic_init(rdev->mode_info.atom_context);
1347 evergreen_mc_resume(rdev, &save);
1348 return 0;
1349}
1350
1351int evergreen_asic_reset(struct radeon_device *rdev)
1352{
1353 return evergreen_gpu_soft_reset(rdev);
1354}
1355
1356/* Interrupts */
1357
1358u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
1359{
1360 switch (crtc) {
1361 case 0:
1362 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
1363 case 1:
1364 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
1365 case 2:
1366 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
1367 case 3:
1368 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
1369 case 4:
1370 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
1371 case 5:
1372 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
1373 default:
1374 return 0;
1375 }
1376}
1377
1378void evergreen_disable_interrupt_state(struct radeon_device *rdev)
1379{
1380 u32 tmp;
1381
1382 WREG32(CP_INT_CNTL, 0);
1383 WREG32(GRBM_INT_CNTL, 0);
1384 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
1385 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
1386 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
1387 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
1388 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
1389 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
1390
1391 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
1392 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
1393 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
1394 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
1395 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
1396 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
1397
1398 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
1399 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
1400
1401 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
1402 WREG32(DC_HPD1_INT_CONTROL, tmp);
1403 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
1404 WREG32(DC_HPD2_INT_CONTROL, tmp);
1405 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
1406 WREG32(DC_HPD3_INT_CONTROL, tmp);
1407 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
1408 WREG32(DC_HPD4_INT_CONTROL, tmp);
1409 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
1410 WREG32(DC_HPD5_INT_CONTROL, tmp);
1411 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
1412 WREG32(DC_HPD6_INT_CONTROL, tmp);
1413
1414}
1415
1416int evergreen_irq_set(struct radeon_device *rdev)
1417{
1418 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
1419 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
1420 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
1421
1422 if (!rdev->irq.installed) {
1423 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
1424 return -EINVAL;
1425 }
1426 /* don't enable anything if the ih is disabled */
1427 if (!rdev->ih.enabled) {
1428 r600_disable_interrupts(rdev);
1429 /* force the active interrupt state to all disabled */
1430 evergreen_disable_interrupt_state(rdev);
1431 return 0;
1432 }
1433
1434 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
1435 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
1436 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
1437 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
1438 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
1439 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
1440
1441 if (rdev->irq.sw_int) {
1442 DRM_DEBUG("evergreen_irq_set: sw int\n");
1443 cp_int_cntl |= RB_INT_ENABLE;
1444 }
1445 if (rdev->irq.crtc_vblank_int[0]) {
1446 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
1447 crtc1 |= VBLANK_INT_MASK;
1448 }
1449 if (rdev->irq.crtc_vblank_int[1]) {
1450 DRM_DEBUG("evergreen_irq_set: vblank 1\n");
1451 crtc2 |= VBLANK_INT_MASK;
1452 }
1453 if (rdev->irq.crtc_vblank_int[2]) {
1454 DRM_DEBUG("evergreen_irq_set: vblank 2\n");
1455 crtc3 |= VBLANK_INT_MASK;
1456 }
1457 if (rdev->irq.crtc_vblank_int[3]) {
1458 DRM_DEBUG("evergreen_irq_set: vblank 3\n");
1459 crtc4 |= VBLANK_INT_MASK;
1460 }
1461 if (rdev->irq.crtc_vblank_int[4]) {
1462 DRM_DEBUG("evergreen_irq_set: vblank 4\n");
1463 crtc5 |= VBLANK_INT_MASK;
1464 }
1465 if (rdev->irq.crtc_vblank_int[5]) {
1466 DRM_DEBUG("evergreen_irq_set: vblank 5\n");
1467 crtc6 |= VBLANK_INT_MASK;
1468 }
1469 if (rdev->irq.hpd[0]) {
1470 DRM_DEBUG("evergreen_irq_set: hpd 1\n");
1471 hpd1 |= DC_HPDx_INT_EN;
1472 }
1473 if (rdev->irq.hpd[1]) {
1474 DRM_DEBUG("evergreen_irq_set: hpd 2\n");
1475 hpd2 |= DC_HPDx_INT_EN;
1476 }
1477 if (rdev->irq.hpd[2]) {
1478 DRM_DEBUG("evergreen_irq_set: hpd 3\n");
1479 hpd3 |= DC_HPDx_INT_EN;
1480 }
1481 if (rdev->irq.hpd[3]) {
1482 DRM_DEBUG("evergreen_irq_set: hpd 4\n");
1483 hpd4 |= DC_HPDx_INT_EN;
1484 }
1485 if (rdev->irq.hpd[4]) {
1486 DRM_DEBUG("evergreen_irq_set: hpd 5\n");
1487 hpd5 |= DC_HPDx_INT_EN;
1488 }
1489 if (rdev->irq.hpd[5]) {
1490 DRM_DEBUG("evergreen_irq_set: hpd 6\n");
1491 hpd6 |= DC_HPDx_INT_EN;
1492 }
1493
1494 WREG32(CP_INT_CNTL, cp_int_cntl);
1495
1496 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
1497 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
1498 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
1499 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
1500 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
1501 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
1502
1503 WREG32(DC_HPD1_INT_CONTROL, hpd1);
1504 WREG32(DC_HPD2_INT_CONTROL, hpd2);
1505 WREG32(DC_HPD3_INT_CONTROL, hpd3);
1506 WREG32(DC_HPD4_INT_CONTROL, hpd4);
1507 WREG32(DC_HPD5_INT_CONTROL, hpd5);
1508 WREG32(DC_HPD6_INT_CONTROL, hpd6);
1509
493 return 0; 1510 return 0;
494} 1511}
495 1512
1513static inline void evergreen_irq_ack(struct radeon_device *rdev,
1514 u32 *disp_int,
1515 u32 *disp_int_cont,
1516 u32 *disp_int_cont2,
1517 u32 *disp_int_cont3,
1518 u32 *disp_int_cont4,
1519 u32 *disp_int_cont5)
1520{
1521 u32 tmp;
1522
1523 *disp_int = RREG32(DISP_INTERRUPT_STATUS);
1524 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
1525 *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
1526 *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
1527 *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
1528 *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
1529
1530 if (*disp_int & LB_D1_VBLANK_INTERRUPT)
1531 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
1532 if (*disp_int & LB_D1_VLINE_INTERRUPT)
1533 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
1534
1535 if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT)
1536 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
1537 if (*disp_int_cont & LB_D2_VLINE_INTERRUPT)
1538 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
1539
1540 if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
1541 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
1542 if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
1543 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
1544
1545 if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
1546 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
1547 if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
1548 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
1549
1550 if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
1551 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
1552 if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
1553 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
1554
1555 if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
1556 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
1557 if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
1558 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
1559
1560 if (*disp_int & DC_HPD1_INTERRUPT) {
1561 tmp = RREG32(DC_HPD1_INT_CONTROL);
1562 tmp |= DC_HPDx_INT_ACK;
1563 WREG32(DC_HPD1_INT_CONTROL, tmp);
1564 }
1565 if (*disp_int_cont & DC_HPD2_INTERRUPT) {
1566 tmp = RREG32(DC_HPD2_INT_CONTROL);
1567 tmp |= DC_HPDx_INT_ACK;
1568 WREG32(DC_HPD2_INT_CONTROL, tmp);
1569 }
1570 if (*disp_int_cont2 & DC_HPD3_INTERRUPT) {
1571 tmp = RREG32(DC_HPD3_INT_CONTROL);
1572 tmp |= DC_HPDx_INT_ACK;
1573 WREG32(DC_HPD3_INT_CONTROL, tmp);
1574 }
1575 if (*disp_int_cont3 & DC_HPD4_INTERRUPT) {
1576 tmp = RREG32(DC_HPD4_INT_CONTROL);
1577 tmp |= DC_HPDx_INT_ACK;
1578 WREG32(DC_HPD4_INT_CONTROL, tmp);
1579 }
1580 if (*disp_int_cont4 & DC_HPD5_INTERRUPT) {
1581 tmp = RREG32(DC_HPD5_INT_CONTROL);
1582 tmp |= DC_HPDx_INT_ACK;
1583 WREG32(DC_HPD5_INT_CONTROL, tmp);
1584 }
1585 if (*disp_int_cont5 & DC_HPD6_INTERRUPT) {
1586 tmp = RREG32(DC_HPD5_INT_CONTROL);
1587 tmp |= DC_HPDx_INT_ACK;
1588 WREG32(DC_HPD6_INT_CONTROL, tmp);
1589 }
1590}
1591
1592void evergreen_irq_disable(struct radeon_device *rdev)
1593{
1594 u32 disp_int, disp_int_cont, disp_int_cont2;
1595 u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
1596
1597 r600_disable_interrupts(rdev);
1598 /* Wait and acknowledge irq */
1599 mdelay(1);
1600 evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
1601 &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
1602 evergreen_disable_interrupt_state(rdev);
1603}
1604
1605static void evergreen_irq_suspend(struct radeon_device *rdev)
1606{
1607 evergreen_irq_disable(rdev);
1608 r600_rlc_stop(rdev);
1609}
1610
1611static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
1612{
1613 u32 wptr, tmp;
1614
1615 /* XXX use writeback */
1616 wptr = RREG32(IH_RB_WPTR);
1617
1618 if (wptr & RB_OVERFLOW) {
1619 /* When a ring buffer overflow happen start parsing interrupt
1620 * from the last not overwritten vector (wptr + 16). Hopefully
1621 * this should allow us to catchup.
1622 */
1623 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
1624 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
1625 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
1626 tmp = RREG32(IH_RB_CNTL);
1627 tmp |= IH_WPTR_OVERFLOW_CLEAR;
1628 WREG32(IH_RB_CNTL, tmp);
1629 }
1630 return (wptr & rdev->ih.ptr_mask);
1631}
1632
1633int evergreen_irq_process(struct radeon_device *rdev)
1634{
1635 u32 wptr = evergreen_get_ih_wptr(rdev);
1636 u32 rptr = rdev->ih.rptr;
1637 u32 src_id, src_data;
1638 u32 ring_index;
1639 u32 disp_int, disp_int_cont, disp_int_cont2;
1640 u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
1641 unsigned long flags;
1642 bool queue_hotplug = false;
1643
1644 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
1645 if (!rdev->ih.enabled)
1646 return IRQ_NONE;
1647
1648 spin_lock_irqsave(&rdev->ih.lock, flags);
1649
1650 if (rptr == wptr) {
1651 spin_unlock_irqrestore(&rdev->ih.lock, flags);
1652 return IRQ_NONE;
1653 }
1654 if (rdev->shutdown) {
1655 spin_unlock_irqrestore(&rdev->ih.lock, flags);
1656 return IRQ_NONE;
1657 }
1658
1659restart_ih:
1660 /* display interrupts */
1661 evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
1662 &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
1663
1664 rdev->ih.wptr = wptr;
1665 while (rptr != wptr) {
1666 /* wptr/rptr are in bytes! */
1667 ring_index = rptr / 4;
1668 src_id = rdev->ih.ring[ring_index] & 0xff;
1669 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
1670
1671 switch (src_id) {
1672 case 1: /* D1 vblank/vline */
1673 switch (src_data) {
1674 case 0: /* D1 vblank */
1675 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
1676 drm_handle_vblank(rdev->ddev, 0);
1677 wake_up(&rdev->irq.vblank_queue);
1678 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
1679 DRM_DEBUG("IH: D1 vblank\n");
1680 }
1681 break;
1682 case 1: /* D1 vline */
1683 if (disp_int & LB_D1_VLINE_INTERRUPT) {
1684 disp_int &= ~LB_D1_VLINE_INTERRUPT;
1685 DRM_DEBUG("IH: D1 vline\n");
1686 }
1687 break;
1688 default:
1689 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
1690 break;
1691 }
1692 break;
1693 case 2: /* D2 vblank/vline */
1694 switch (src_data) {
1695 case 0: /* D2 vblank */
1696 if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
1697 drm_handle_vblank(rdev->ddev, 1);
1698 wake_up(&rdev->irq.vblank_queue);
1699 disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
1700 DRM_DEBUG("IH: D2 vblank\n");
1701 }
1702 break;
1703 case 1: /* D2 vline */
1704 if (disp_int_cont & LB_D2_VLINE_INTERRUPT) {
1705 disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
1706 DRM_DEBUG("IH: D2 vline\n");
1707 }
1708 break;
1709 default:
1710 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
1711 break;
1712 }
1713 break;
1714 case 3: /* D3 vblank/vline */
1715 switch (src_data) {
1716 case 0: /* D3 vblank */
1717 if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
1718 drm_handle_vblank(rdev->ddev, 2);
1719 wake_up(&rdev->irq.vblank_queue);
1720 disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
1721 DRM_DEBUG("IH: D3 vblank\n");
1722 }
1723 break;
1724 case 1: /* D3 vline */
1725 if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
1726 disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
1727 DRM_DEBUG("IH: D3 vline\n");
1728 }
1729 break;
1730 default:
1731 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
1732 break;
1733 }
1734 break;
1735 case 4: /* D4 vblank/vline */
1736 switch (src_data) {
1737 case 0: /* D4 vblank */
1738 if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
1739 drm_handle_vblank(rdev->ddev, 3);
1740 wake_up(&rdev->irq.vblank_queue);
1741 disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
1742 DRM_DEBUG("IH: D4 vblank\n");
1743 }
1744 break;
1745 case 1: /* D4 vline */
1746 if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
1747 disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
1748 DRM_DEBUG("IH: D4 vline\n");
1749 }
1750 break;
1751 default:
1752 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
1753 break;
1754 }
1755 break;
1756 case 5: /* D5 vblank/vline */
1757 switch (src_data) {
1758 case 0: /* D5 vblank */
1759 if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
1760 drm_handle_vblank(rdev->ddev, 4);
1761 wake_up(&rdev->irq.vblank_queue);
1762 disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
1763 DRM_DEBUG("IH: D5 vblank\n");
1764 }
1765 break;
1766 case 1: /* D5 vline */
1767 if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
1768 disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
1769 DRM_DEBUG("IH: D5 vline\n");
1770 }
1771 break;
1772 default:
1773 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
1774 break;
1775 }
1776 break;
1777 case 6: /* D6 vblank/vline */
1778 switch (src_data) {
1779 case 0: /* D6 vblank */
1780 if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
1781 drm_handle_vblank(rdev->ddev, 5);
1782 wake_up(&rdev->irq.vblank_queue);
1783 disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
1784 DRM_DEBUG("IH: D6 vblank\n");
1785 }
1786 break;
1787 case 1: /* D6 vline */
1788 if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
1789 disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
1790 DRM_DEBUG("IH: D6 vline\n");
1791 }
1792 break;
1793 default:
1794 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
1795 break;
1796 }
1797 break;
1798 case 42: /* HPD hotplug */
1799 switch (src_data) {
1800 case 0:
1801 if (disp_int & DC_HPD1_INTERRUPT) {
1802 disp_int &= ~DC_HPD1_INTERRUPT;
1803 queue_hotplug = true;
1804 DRM_DEBUG("IH: HPD1\n");
1805 }
1806 break;
1807 case 1:
1808 if (disp_int_cont & DC_HPD2_INTERRUPT) {
1809 disp_int_cont &= ~DC_HPD2_INTERRUPT;
1810 queue_hotplug = true;
1811 DRM_DEBUG("IH: HPD2\n");
1812 }
1813 break;
1814 case 2:
1815 if (disp_int_cont2 & DC_HPD3_INTERRUPT) {
1816 disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
1817 queue_hotplug = true;
1818 DRM_DEBUG("IH: HPD3\n");
1819 }
1820 break;
1821 case 3:
1822 if (disp_int_cont3 & DC_HPD4_INTERRUPT) {
1823 disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
1824 queue_hotplug = true;
1825 DRM_DEBUG("IH: HPD4\n");
1826 }
1827 break;
1828 case 4:
1829 if (disp_int_cont4 & DC_HPD5_INTERRUPT) {
1830 disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
1831 queue_hotplug = true;
1832 DRM_DEBUG("IH: HPD5\n");
1833 }
1834 break;
1835 case 5:
1836 if (disp_int_cont5 & DC_HPD6_INTERRUPT) {
1837 disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
1838 queue_hotplug = true;
1839 DRM_DEBUG("IH: HPD6\n");
1840 }
1841 break;
1842 default:
1843 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
1844 break;
1845 }
1846 break;
1847 case 176: /* CP_INT in ring buffer */
1848 case 177: /* CP_INT in IB1 */
1849 case 178: /* CP_INT in IB2 */
1850 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
1851 radeon_fence_process(rdev);
1852 break;
1853 case 181: /* CP EOP event */
1854 DRM_DEBUG("IH: CP EOP\n");
1855 break;
1856 default:
1857 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
1858 break;
1859 }
1860
1861 /* wptr/rptr are in bytes! */
1862 rptr += 16;
1863 rptr &= rdev->ih.ptr_mask;
1864 }
1865 /* make sure wptr hasn't changed while processing */
1866 wptr = evergreen_get_ih_wptr(rdev);
1867 if (wptr != rdev->ih.wptr)
1868 goto restart_ih;
1869 if (queue_hotplug)
1870 queue_work(rdev->wq, &rdev->hotplug_work);
1871 rdev->ih.rptr = rptr;
1872 WREG32(IH_RB_RPTR, rdev->ih.rptr);
1873 spin_unlock_irqrestore(&rdev->ih.lock, flags);
1874 return IRQ_HANDLED;
1875}
1876
496static int evergreen_startup(struct radeon_device *rdev) 1877static int evergreen_startup(struct radeon_device *rdev)
497{ 1878{
498#if 0
499 int r; 1879 int r;
500 1880
501 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 1881 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
@@ -505,17 +1885,15 @@ static int evergreen_startup(struct radeon_device *rdev)
505 return r; 1885 return r;
506 } 1886 }
507 } 1887 }
508#endif 1888
509 evergreen_mc_program(rdev); 1889 evergreen_mc_program(rdev);
510#if 0
511 if (rdev->flags & RADEON_IS_AGP) { 1890 if (rdev->flags & RADEON_IS_AGP) {
512 evergreem_agp_enable(rdev); 1891 evergreen_agp_enable(rdev);
513 } else { 1892 } else {
514 r = evergreen_pcie_gart_enable(rdev); 1893 r = evergreen_pcie_gart_enable(rdev);
515 if (r) 1894 if (r)
516 return r; 1895 return r;
517 } 1896 }
518#endif
519 evergreen_gpu_init(rdev); 1897 evergreen_gpu_init(rdev);
520#if 0 1898#if 0
521 if (!rdev->r600_blit.shader_obj) { 1899 if (!rdev->r600_blit.shader_obj) {
@@ -536,6 +1914,7 @@ static int evergreen_startup(struct radeon_device *rdev)
536 DRM_ERROR("failed to pin blit object %d\n", r); 1914 DRM_ERROR("failed to pin blit object %d\n", r);
537 return r; 1915 return r;
538 } 1916 }
1917#endif
539 1918
540 /* Enable IRQ */ 1919 /* Enable IRQ */
541 r = r600_irq_init(rdev); 1920 r = r600_irq_init(rdev);
@@ -544,7 +1923,7 @@ static int evergreen_startup(struct radeon_device *rdev)
544 radeon_irq_kms_fini(rdev); 1923 radeon_irq_kms_fini(rdev);
545 return r; 1924 return r;
546 } 1925 }
547 r600_irq_set(rdev); 1926 evergreen_irq_set(rdev);
548 1927
549 r = radeon_ring_init(rdev, rdev->cp.ring_size); 1928 r = radeon_ring_init(rdev, rdev->cp.ring_size);
550 if (r) 1929 if (r)
@@ -552,12 +1931,12 @@ static int evergreen_startup(struct radeon_device *rdev)
552 r = evergreen_cp_load_microcode(rdev); 1931 r = evergreen_cp_load_microcode(rdev);
553 if (r) 1932 if (r)
554 return r; 1933 return r;
555 r = r600_cp_resume(rdev); 1934 r = evergreen_cp_resume(rdev);
556 if (r) 1935 if (r)
557 return r; 1936 return r;
558 /* write back buffer are not vital so don't worry about failure */ 1937 /* write back buffer are not vital so don't worry about failure */
559 r600_wb_enable(rdev); 1938 r600_wb_enable(rdev);
560#endif 1939
561 return 0; 1940 return 0;
562} 1941}
563 1942
@@ -582,13 +1961,13 @@ int evergreen_resume(struct radeon_device *rdev)
582 DRM_ERROR("r600 startup failed on resume\n"); 1961 DRM_ERROR("r600 startup failed on resume\n");
583 return r; 1962 return r;
584 } 1963 }
585#if 0 1964
586 r = r600_ib_test(rdev); 1965 r = r600_ib_test(rdev);
587 if (r) { 1966 if (r) {
588 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1967 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
589 return r; 1968 return r;
590 } 1969 }
591#endif 1970
592 return r; 1971 return r;
593 1972
594} 1973}
@@ -597,12 +1976,14 @@ int evergreen_suspend(struct radeon_device *rdev)
597{ 1976{
598#if 0 1977#if 0
599 int r; 1978 int r;
600 1979#endif
601 /* FIXME: we should wait for ring to be empty */ 1980 /* FIXME: we should wait for ring to be empty */
602 r700_cp_stop(rdev); 1981 r700_cp_stop(rdev);
603 rdev->cp.ready = false; 1982 rdev->cp.ready = false;
1983 evergreen_irq_suspend(rdev);
604 r600_wb_disable(rdev); 1984 r600_wb_disable(rdev);
605 evergreen_pcie_gart_disable(rdev); 1985 evergreen_pcie_gart_disable(rdev);
1986#if 0
606 /* unpin shaders bo */ 1987 /* unpin shaders bo */
607 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 1988 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
608 if (likely(r == 0)) { 1989 if (likely(r == 0)) {
@@ -702,7 +2083,7 @@ int evergreen_init(struct radeon_device *rdev)
702 r = radeon_bo_init(rdev); 2083 r = radeon_bo_init(rdev);
703 if (r) 2084 if (r)
704 return r; 2085 return r;
705#if 0 2086
706 r = radeon_irq_kms_init(rdev); 2087 r = radeon_irq_kms_init(rdev);
707 if (r) 2088 if (r)
708 return r; 2089 return r;
@@ -716,14 +2097,16 @@ int evergreen_init(struct radeon_device *rdev)
716 r = r600_pcie_gart_init(rdev); 2097 r = r600_pcie_gart_init(rdev);
717 if (r) 2098 if (r)
718 return r; 2099 return r;
719#endif 2100
720 rdev->accel_working = false; 2101 rdev->accel_working = false;
721 r = evergreen_startup(rdev); 2102 r = evergreen_startup(rdev);
722 if (r) { 2103 if (r) {
723 evergreen_suspend(rdev); 2104 dev_err(rdev->dev, "disabling GPU acceleration\n");
724 /*r600_wb_fini(rdev);*/ 2105 r700_cp_fini(rdev);
725 /*radeon_ring_fini(rdev);*/ 2106 r600_wb_fini(rdev);
726 /*evergreen_pcie_gart_fini(rdev);*/ 2107 r600_irq_fini(rdev);
2108 radeon_irq_kms_fini(rdev);
2109 evergreen_pcie_gart_fini(rdev);
727 rdev->accel_working = false; 2110 rdev->accel_working = false;
728 } 2111 }
729 if (rdev->accel_working) { 2112 if (rdev->accel_working) {
@@ -744,15 +2127,12 @@ int evergreen_init(struct radeon_device *rdev)
744void evergreen_fini(struct radeon_device *rdev) 2127void evergreen_fini(struct radeon_device *rdev)
745{ 2128{
746 radeon_pm_fini(rdev); 2129 radeon_pm_fini(rdev);
747 evergreen_suspend(rdev); 2130 /*r600_blit_fini(rdev);*/
748#if 0 2131 r700_cp_fini(rdev);
749 r600_blit_fini(rdev); 2132 r600_wb_fini(rdev);
750 r600_irq_fini(rdev); 2133 r600_irq_fini(rdev);
751 radeon_irq_kms_fini(rdev); 2134 radeon_irq_kms_fini(rdev);
752 radeon_ring_fini(rdev);
753 r600_wb_fini(rdev);
754 evergreen_pcie_gart_fini(rdev); 2135 evergreen_pcie_gart_fini(rdev);
755#endif
756 radeon_gem_fini(rdev); 2136 radeon_gem_fini(rdev);
757 radeon_fence_driver_fini(rdev); 2137 radeon_fence_driver_fini(rdev);
758 radeon_clocks_fini(rdev); 2138 radeon_clocks_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
new file mode 100644
index 000000000000..93e9e17ad54a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -0,0 +1,556 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#ifndef EVERGREEND_H
25#define EVERGREEND_H
26
27#define EVERGREEN_MAX_SH_GPRS 256
28#define EVERGREEN_MAX_TEMP_GPRS 16
29#define EVERGREEN_MAX_SH_THREADS 256
30#define EVERGREEN_MAX_SH_STACK_ENTRIES 4096
31#define EVERGREEN_MAX_FRC_EOV_CNT 16384
32#define EVERGREEN_MAX_BACKENDS 8
33#define EVERGREEN_MAX_BACKENDS_MASK 0xFF
34#define EVERGREEN_MAX_SIMDS 16
35#define EVERGREEN_MAX_SIMDS_MASK 0xFFFF
36#define EVERGREEN_MAX_PIPES 8
37#define EVERGREEN_MAX_PIPES_MASK 0xFF
38#define EVERGREEN_MAX_LDS_NUM 0xFFFF
39
40/* Registers */
41
42#define RCU_IND_INDEX 0x100
43#define RCU_IND_DATA 0x104
44
45#define GRBM_GFX_INDEX 0x802C
46#define INSTANCE_INDEX(x) ((x) << 0)
47#define SE_INDEX(x) ((x) << 16)
48#define INSTANCE_BROADCAST_WRITES (1 << 30)
49#define SE_BROADCAST_WRITES (1 << 31)
50#define RLC_GFX_INDEX 0x3fC4
51#define CC_GC_SHADER_PIPE_CONFIG 0x8950
52#define WRITE_DIS (1 << 0)
53#define CC_RB_BACKEND_DISABLE 0x98F4
54#define BACKEND_DISABLE(x) ((x) << 16)
55#define GB_ADDR_CONFIG 0x98F8
56#define NUM_PIPES(x) ((x) << 0)
57#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
58#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
59#define NUM_SHADER_ENGINES(x) ((x) << 12)
60#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
61#define NUM_GPUS(x) ((x) << 20)
62#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
63#define ROW_SIZE(x) ((x) << 28)
64#define GB_BACKEND_MAP 0x98FC
65#define DMIF_ADDR_CONFIG 0xBD4
66#define HDP_ADDR_CONFIG 0x2F48
67
68#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
69#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
70
71#define CGTS_SYS_TCC_DISABLE 0x3F90
72#define CGTS_TCC_DISABLE 0x9148
73#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
74#define CGTS_USER_TCC_DISABLE 0x914C
75
76#define CONFIG_MEMSIZE 0x5428
77
78#define CP_ME_CNTL 0x86D8
79#define CP_ME_HALT (1 << 28)
80#define CP_PFP_HALT (1 << 26)
81#define CP_ME_RAM_DATA 0xC160
82#define CP_ME_RAM_RADDR 0xC158
83#define CP_ME_RAM_WADDR 0xC15C
84#define CP_MEQ_THRESHOLDS 0x8764
85#define STQ_SPLIT(x) ((x) << 0)
86#define CP_PERFMON_CNTL 0x87FC
87#define CP_PFP_UCODE_ADDR 0xC150
88#define CP_PFP_UCODE_DATA 0xC154
89#define CP_QUEUE_THRESHOLDS 0x8760
90#define ROQ_IB1_START(x) ((x) << 0)
91#define ROQ_IB2_START(x) ((x) << 8)
92#define CP_RB_BASE 0xC100
93#define CP_RB_CNTL 0xC104
94#define RB_BUFSZ(x) ((x) << 0)
95#define RB_BLKSZ(x) ((x) << 8)
96#define RB_NO_UPDATE (1 << 27)
97#define RB_RPTR_WR_ENA (1 << 31)
98#define BUF_SWAP_32BIT (2 << 16)
99#define CP_RB_RPTR 0x8700
100#define CP_RB_RPTR_ADDR 0xC10C
101#define CP_RB_RPTR_ADDR_HI 0xC110
102#define CP_RB_RPTR_WR 0xC108
103#define CP_RB_WPTR 0xC114
104#define CP_RB_WPTR_ADDR 0xC118
105#define CP_RB_WPTR_ADDR_HI 0xC11C
106#define CP_RB_WPTR_DELAY 0x8704
107#define CP_SEM_WAIT_TIMER 0x85BC
108#define CP_DEBUG 0xC1FC
109
110
111#define GC_USER_SHADER_PIPE_CONFIG 0x8954
112#define INACTIVE_QD_PIPES(x) ((x) << 8)
113#define INACTIVE_QD_PIPES_MASK 0x0000FF00
114#define INACTIVE_SIMDS(x) ((x) << 16)
115#define INACTIVE_SIMDS_MASK 0x00FF0000
116
117#define GRBM_CNTL 0x8000
118#define GRBM_READ_TIMEOUT(x) ((x) << 0)
119#define GRBM_SOFT_RESET 0x8020
120#define SOFT_RESET_CP (1 << 0)
121#define SOFT_RESET_CB (1 << 1)
122#define SOFT_RESET_DB (1 << 3)
123#define SOFT_RESET_PA (1 << 5)
124#define SOFT_RESET_SC (1 << 6)
125#define SOFT_RESET_SPI (1 << 8)
126#define SOFT_RESET_SH (1 << 9)
127#define SOFT_RESET_SX (1 << 10)
128#define SOFT_RESET_TC (1 << 11)
129#define SOFT_RESET_TA (1 << 12)
130#define SOFT_RESET_VC (1 << 13)
131#define SOFT_RESET_VGT (1 << 14)
132
133#define GRBM_STATUS 0x8010
134#define CMDFIFO_AVAIL_MASK 0x0000000F
135#define SRBM_RQ_PENDING (1 << 5)
136#define CF_RQ_PENDING (1 << 7)
137#define PF_RQ_PENDING (1 << 8)
138#define GRBM_EE_BUSY (1 << 10)
139#define SX_CLEAN (1 << 11)
140#define DB_CLEAN (1 << 12)
141#define CB_CLEAN (1 << 13)
142#define TA_BUSY (1 << 14)
143#define VGT_BUSY_NO_DMA (1 << 16)
144#define VGT_BUSY (1 << 17)
145#define SX_BUSY (1 << 20)
146#define SH_BUSY (1 << 21)
147#define SPI_BUSY (1 << 22)
148#define SC_BUSY (1 << 24)
149#define PA_BUSY (1 << 25)
150#define DB_BUSY (1 << 26)
151#define CP_COHERENCY_BUSY (1 << 28)
152#define CP_BUSY (1 << 29)
153#define CB_BUSY (1 << 30)
154#define GUI_ACTIVE (1 << 31)
155#define GRBM_STATUS_SE0 0x8014
156#define GRBM_STATUS_SE1 0x8018
157#define SE_SX_CLEAN (1 << 0)
158#define SE_DB_CLEAN (1 << 1)
159#define SE_CB_CLEAN (1 << 2)
160#define SE_TA_BUSY (1 << 25)
161#define SE_SX_BUSY (1 << 26)
162#define SE_SPI_BUSY (1 << 27)
163#define SE_SH_BUSY (1 << 28)
164#define SE_SC_BUSY (1 << 29)
165#define SE_DB_BUSY (1 << 30)
166#define SE_CB_BUSY (1 << 31)
167
168#define HDP_HOST_PATH_CNTL 0x2C00
169#define HDP_NONSURFACE_BASE 0x2C04
170#define HDP_NONSURFACE_INFO 0x2C08
171#define HDP_NONSURFACE_SIZE 0x2C0C
172#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
173#define HDP_TILING_CONFIG 0x2F3C
174
175#define MC_SHARED_CHMAP 0x2004
176#define NOOFCHAN_SHIFT 12
177#define NOOFCHAN_MASK 0x00003000
178
179#define MC_ARB_RAMCFG 0x2760
180#define NOOFBANK_SHIFT 0
181#define NOOFBANK_MASK 0x00000003
182#define NOOFRANK_SHIFT 2
183#define NOOFRANK_MASK 0x00000004
184#define NOOFROWS_SHIFT 3
185#define NOOFROWS_MASK 0x00000038
186#define NOOFCOLS_SHIFT 6
187#define NOOFCOLS_MASK 0x000000C0
188#define CHANSIZE_SHIFT 8
189#define CHANSIZE_MASK 0x00000100
190#define BURSTLENGTH_SHIFT 9
191#define BURSTLENGTH_MASK 0x00000200
192#define CHANSIZE_OVERRIDE (1 << 11)
193#define MC_VM_AGP_TOP 0x2028
194#define MC_VM_AGP_BOT 0x202C
195#define MC_VM_AGP_BASE 0x2030
196#define MC_VM_FB_LOCATION 0x2024
197#define MC_VM_MB_L1_TLB0_CNTL 0x2234
198#define MC_VM_MB_L1_TLB1_CNTL 0x2238
199#define MC_VM_MB_L1_TLB2_CNTL 0x223C
200#define MC_VM_MB_L1_TLB3_CNTL 0x2240
201#define ENABLE_L1_TLB (1 << 0)
202#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
203#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
204#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
205#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
206#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
207#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
208#define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15)
209#define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18)
210#define MC_VM_MD_L1_TLB0_CNTL 0x2654
211#define MC_VM_MD_L1_TLB1_CNTL 0x2658
212#define MC_VM_MD_L1_TLB2_CNTL 0x265C
213#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
214#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
215#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
216
217#define PA_CL_ENHANCE 0x8A14
218#define CLIP_VTX_REORDER_ENA (1 << 0)
219#define NUM_CLIP_SEQ(x) ((x) << 1)
220#define PA_SC_AA_CONFIG 0x28C04
221#define PA_SC_CLIPRECT_RULE 0x2820C
222#define PA_SC_EDGERULE 0x28230
223#define PA_SC_FIFO_SIZE 0x8BCC
224#define SC_PRIM_FIFO_SIZE(x) ((x) << 0)
225#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
226#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
227#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
228#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
229#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
230#define PA_SC_LINE_STIPPLE 0x28A0C
231#define PA_SC_LINE_STIPPLE_STATE 0x8B10
232
233#define SCRATCH_REG0 0x8500
234#define SCRATCH_REG1 0x8504
235#define SCRATCH_REG2 0x8508
236#define SCRATCH_REG3 0x850C
237#define SCRATCH_REG4 0x8510
238#define SCRATCH_REG5 0x8514
239#define SCRATCH_REG6 0x8518
240#define SCRATCH_REG7 0x851C
241#define SCRATCH_UMSK 0x8540
242#define SCRATCH_ADDR 0x8544
243
244#define SMX_DC_CTL0 0xA020
245#define USE_HASH_FUNCTION (1 << 0)
246#define NUMBER_OF_SETS(x) ((x) << 1)
247#define FLUSH_ALL_ON_EVENT (1 << 10)
248#define STALL_ON_EVENT (1 << 11)
249#define SMX_EVENT_CTL 0xA02C
250#define ES_FLUSH_CTL(x) ((x) << 0)
251#define GS_FLUSH_CTL(x) ((x) << 3)
252#define ACK_FLUSH_CTL(x) ((x) << 6)
253#define SYNC_FLUSH_CTL (1 << 8)
254
255#define SPI_CONFIG_CNTL 0x9100
256#define GPR_WRITE_PRIORITY(x) ((x) << 0)
257#define SPI_CONFIG_CNTL_1 0x913C
258#define VTX_DONE_DELAY(x) ((x) << 0)
259#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
260#define SPI_INPUT_Z 0x286D8
261#define SPI_PS_IN_CONTROL_0 0x286CC
262#define NUM_INTERP(x) ((x)<<0)
263#define POSITION_ENA (1<<8)
264#define POSITION_CENTROID (1<<9)
265#define POSITION_ADDR(x) ((x)<<10)
266#define PARAM_GEN(x) ((x)<<15)
267#define PARAM_GEN_ADDR(x) ((x)<<19)
268#define BARYC_SAMPLE_CNTL(x) ((x)<<26)
269#define PERSP_GRADIENT_ENA (1<<28)
270#define LINEAR_GRADIENT_ENA (1<<29)
271#define POSITION_SAMPLE (1<<30)
272#define BARYC_AT_SAMPLE_ENA (1<<31)
273
274#define SQ_CONFIG 0x8C00
275#define VC_ENABLE (1 << 0)
276#define EXPORT_SRC_C (1 << 1)
277#define CS_PRIO(x) ((x) << 18)
278#define LS_PRIO(x) ((x) << 20)
279#define HS_PRIO(x) ((x) << 22)
280#define PS_PRIO(x) ((x) << 24)
281#define VS_PRIO(x) ((x) << 26)
282#define GS_PRIO(x) ((x) << 28)
283#define ES_PRIO(x) ((x) << 30)
284#define SQ_GPR_RESOURCE_MGMT_1 0x8C04
285#define NUM_PS_GPRS(x) ((x) << 0)
286#define NUM_VS_GPRS(x) ((x) << 16)
287#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
288#define SQ_GPR_RESOURCE_MGMT_2 0x8C08
289#define NUM_GS_GPRS(x) ((x) << 0)
290#define NUM_ES_GPRS(x) ((x) << 16)
291#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C
292#define NUM_HS_GPRS(x) ((x) << 0)
293#define NUM_LS_GPRS(x) ((x) << 16)
294#define SQ_THREAD_RESOURCE_MGMT 0x8C18
295#define NUM_PS_THREADS(x) ((x) << 0)
296#define NUM_VS_THREADS(x) ((x) << 8)
297#define NUM_GS_THREADS(x) ((x) << 16)
298#define NUM_ES_THREADS(x) ((x) << 24)
299#define SQ_THREAD_RESOURCE_MGMT_2 0x8C1C
300#define NUM_HS_THREADS(x) ((x) << 0)
301#define NUM_LS_THREADS(x) ((x) << 8)
302#define SQ_STACK_RESOURCE_MGMT_1 0x8C20
303#define NUM_PS_STACK_ENTRIES(x) ((x) << 0)
304#define NUM_VS_STACK_ENTRIES(x) ((x) << 16)
305#define SQ_STACK_RESOURCE_MGMT_2 0x8C24
306#define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
307#define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
308#define SQ_STACK_RESOURCE_MGMT_3 0x8C28
309#define NUM_HS_STACK_ENTRIES(x) ((x) << 0)
310#define NUM_LS_STACK_ENTRIES(x) ((x) << 16)
311#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
312#define SQ_LDS_RESOURCE_MGMT 0x8E2C
313
314#define SQ_MS_FIFO_SIZES 0x8CF0
315#define CACHE_FIFO_SIZE(x) ((x) << 0)
316#define FETCH_FIFO_HIWATER(x) ((x) << 8)
317#define DONE_FIFO_HIWATER(x) ((x) << 16)
318#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
319
320#define SX_DEBUG_1 0x9058
321#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
322#define SX_EXPORT_BUFFER_SIZES 0x900C
323#define COLOR_BUFFER_SIZE(x) ((x) << 0)
324#define POSITION_BUFFER_SIZE(x) ((x) << 8)
325#define SMX_BUFFER_SIZE(x) ((x) << 16)
326#define SX_MISC 0x28350
327
328#define CB_PERF_CTR0_SEL_0 0x9A20
329#define CB_PERF_CTR0_SEL_1 0x9A24
330#define CB_PERF_CTR1_SEL_0 0x9A28
331#define CB_PERF_CTR1_SEL_1 0x9A2C
332#define CB_PERF_CTR2_SEL_0 0x9A30
333#define CB_PERF_CTR2_SEL_1 0x9A34
334#define CB_PERF_CTR3_SEL_0 0x9A38
335#define CB_PERF_CTR3_SEL_1 0x9A3C
336
337#define TA_CNTL_AUX 0x9508
338#define DISABLE_CUBE_WRAP (1 << 0)
339#define DISABLE_CUBE_ANISO (1 << 1)
340#define SYNC_GRADIENT (1 << 24)
341#define SYNC_WALKER (1 << 25)
342#define SYNC_ALIGNER (1 << 26)
343
344#define VGT_CACHE_INVALIDATION 0x88C4
345#define CACHE_INVALIDATION(x) ((x) << 0)
346#define VC_ONLY 0
347#define TC_ONLY 1
348#define VC_AND_TC 2
349#define AUTO_INVLD_EN(x) ((x) << 6)
350#define NO_AUTO 0
351#define ES_AUTO 1
352#define GS_AUTO 2
353#define ES_AND_GS_AUTO 3
354#define VGT_GS_VERTEX_REUSE 0x88D4
355#define VGT_NUM_INSTANCES 0x8974
356#define VGT_OUT_DEALLOC_CNTL 0x28C5C
357#define DEALLOC_DIST_MASK 0x0000007F
358#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
359#define VTX_REUSE_DEPTH_MASK 0x000000FF
360
361#define VM_CONTEXT0_CNTL 0x1410
362#define ENABLE_CONTEXT (1 << 0)
363#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
364#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
365#define VM_CONTEXT1_CNTL 0x1414
366#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
367#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
368#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
369#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
370#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
371#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
372#define RESPONSE_TYPE_MASK 0x000000F0
373#define RESPONSE_TYPE_SHIFT 4
374#define VM_L2_CNTL 0x1400
375#define ENABLE_L2_CACHE (1 << 0)
376#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
377#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
378#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14)
379#define VM_L2_CNTL2 0x1404
380#define INVALIDATE_ALL_L1_TLBS (1 << 0)
381#define INVALIDATE_L2_CACHE (1 << 1)
382#define VM_L2_CNTL3 0x1408
383#define BANK_SELECT(x) ((x) << 0)
384#define CACHE_UPDATE_MODE(x) ((x) << 6)
385#define VM_L2_STATUS 0x140C
386#define L2_BUSY (1 << 0)
387
388#define WAIT_UNTIL 0x8040
389
390#define SRBM_STATUS 0x0E50
391#define SRBM_SOFT_RESET 0x0E60
392#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
393#define SOFT_RESET_BIF (1 << 1)
394#define SOFT_RESET_CG (1 << 2)
395#define SOFT_RESET_DC (1 << 5)
396#define SOFT_RESET_GRBM (1 << 8)
397#define SOFT_RESET_HDP (1 << 9)
398#define SOFT_RESET_IH (1 << 10)
399#define SOFT_RESET_MC (1 << 11)
400#define SOFT_RESET_RLC (1 << 13)
401#define SOFT_RESET_ROM (1 << 14)
402#define SOFT_RESET_SEM (1 << 15)
403#define SOFT_RESET_VMC (1 << 17)
404#define SOFT_RESET_TST (1 << 21)
405#define SOFT_RESET_REGBB (1 << 22)
406#define SOFT_RESET_ORB (1 << 23)
407
408#define IH_RB_CNTL 0x3e00
409# define IH_RB_ENABLE (1 << 0)
410# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
411# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
412# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
413# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
414# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
415# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
416#define IH_RB_BASE 0x3e04
417#define IH_RB_RPTR 0x3e08
418#define IH_RB_WPTR 0x3e0c
419# define RB_OVERFLOW (1 << 0)
420# define WPTR_OFFSET_MASK 0x3fffc
421#define IH_RB_WPTR_ADDR_HI 0x3e10
422#define IH_RB_WPTR_ADDR_LO 0x3e14
423#define IH_CNTL 0x3e18
424# define ENABLE_INTR (1 << 0)
425# define IH_MC_SWAP(x) ((x) << 2)
426# define IH_MC_SWAP_NONE 0
427# define IH_MC_SWAP_16BIT 1
428# define IH_MC_SWAP_32BIT 2
429# define IH_MC_SWAP_64BIT 3
430# define RPTR_REARM (1 << 4)
431# define MC_WRREQ_CREDIT(x) ((x) << 15)
432# define MC_WR_CLEAN_CNT(x) ((x) << 20)
433
434#define CP_INT_CNTL 0xc124
435# define CNTX_BUSY_INT_ENABLE (1 << 19)
436# define CNTX_EMPTY_INT_ENABLE (1 << 20)
437# define SCRATCH_INT_ENABLE (1 << 25)
438# define TIME_STAMP_INT_ENABLE (1 << 26)
439# define IB2_INT_ENABLE (1 << 29)
440# define IB1_INT_ENABLE (1 << 30)
441# define RB_INT_ENABLE (1 << 31)
442#define CP_INT_STATUS 0xc128
443# define SCRATCH_INT_STAT (1 << 25)
444# define TIME_STAMP_INT_STAT (1 << 26)
445# define IB2_INT_STAT (1 << 29)
446# define IB1_INT_STAT (1 << 30)
447# define RB_INT_STAT (1 << 31)
448
449#define GRBM_INT_CNTL 0x8060
450# define RDERR_INT_ENABLE (1 << 0)
451# define GUI_IDLE_INT_ENABLE (1 << 19)
452
453/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
454#define CRTC_STATUS_FRAME_COUNT 0x6e98
455
456/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
457#define VLINE_STATUS 0x6bb8
458# define VLINE_OCCURRED (1 << 0)
459# define VLINE_ACK (1 << 4)
460# define VLINE_STAT (1 << 12)
461# define VLINE_INTERRUPT (1 << 16)
462# define VLINE_INTERRUPT_TYPE (1 << 17)
463/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
464#define VBLANK_STATUS 0x6bbc
465# define VBLANK_OCCURRED (1 << 0)
466# define VBLANK_ACK (1 << 4)
467# define VBLANK_STAT (1 << 12)
468# define VBLANK_INTERRUPT (1 << 16)
469# define VBLANK_INTERRUPT_TYPE (1 << 17)
470
471/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
472#define INT_MASK 0x6b40
473# define VBLANK_INT_MASK (1 << 0)
474# define VLINE_INT_MASK (1 << 4)
475
476#define DISP_INTERRUPT_STATUS 0x60f4
477# define LB_D1_VLINE_INTERRUPT (1 << 2)
478# define LB_D1_VBLANK_INTERRUPT (1 << 3)
479# define DC_HPD1_INTERRUPT (1 << 17)
480# define DC_HPD1_RX_INTERRUPT (1 << 18)
481# define DACA_AUTODETECT_INTERRUPT (1 << 22)
482# define DACB_AUTODETECT_INTERRUPT (1 << 23)
483# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
484# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
485#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8
486# define LB_D2_VLINE_INTERRUPT (1 << 2)
487# define LB_D2_VBLANK_INTERRUPT (1 << 3)
488# define DC_HPD2_INTERRUPT (1 << 17)
489# define DC_HPD2_RX_INTERRUPT (1 << 18)
490# define DISP_TIMER_INTERRUPT (1 << 24)
491#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc
492# define LB_D3_VLINE_INTERRUPT (1 << 2)
493# define LB_D3_VBLANK_INTERRUPT (1 << 3)
494# define DC_HPD3_INTERRUPT (1 << 17)
495# define DC_HPD3_RX_INTERRUPT (1 << 18)
496#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100
497# define LB_D4_VLINE_INTERRUPT (1 << 2)
498# define LB_D4_VBLANK_INTERRUPT (1 << 3)
499# define DC_HPD4_INTERRUPT (1 << 17)
500# define DC_HPD4_RX_INTERRUPT (1 << 18)
501#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c
502# define LB_D5_VLINE_INTERRUPT (1 << 2)
503# define LB_D5_VBLANK_INTERRUPT (1 << 3)
504# define DC_HPD5_INTERRUPT (1 << 17)
505# define DC_HPD5_RX_INTERRUPT (1 << 18)
506#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050
507# define LB_D6_VLINE_INTERRUPT (1 << 2)
508# define LB_D6_VBLANK_INTERRUPT (1 << 3)
509# define DC_HPD6_INTERRUPT (1 << 17)
510# define DC_HPD6_RX_INTERRUPT (1 << 18)
511
512/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
513#define GRPH_INT_STATUS 0x6858
514# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
515# define GRPH_PFLIP_INT_CLEAR (1 << 8)
516/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
517#define GRPH_INT_CONTROL 0x685c
518# define GRPH_PFLIP_INT_MASK (1 << 0)
519# define GRPH_PFLIP_INT_TYPE (1 << 8)
520
521#define DACA_AUTODETECT_INT_CONTROL 0x66c8
522#define DACB_AUTODETECT_INT_CONTROL 0x67c8
523
524#define DC_HPD1_INT_STATUS 0x601c
525#define DC_HPD2_INT_STATUS 0x6028
526#define DC_HPD3_INT_STATUS 0x6034
527#define DC_HPD4_INT_STATUS 0x6040
528#define DC_HPD5_INT_STATUS 0x604c
529#define DC_HPD6_INT_STATUS 0x6058
530# define DC_HPDx_INT_STATUS (1 << 0)
531# define DC_HPDx_SENSE (1 << 1)
532# define DC_HPDx_RX_INT_STATUS (1 << 8)
533
534#define DC_HPD1_INT_CONTROL 0x6020
535#define DC_HPD2_INT_CONTROL 0x602c
536#define DC_HPD3_INT_CONTROL 0x6038
537#define DC_HPD4_INT_CONTROL 0x6044
538#define DC_HPD5_INT_CONTROL 0x6050
539#define DC_HPD6_INT_CONTROL 0x605c
540# define DC_HPDx_INT_ACK (1 << 0)
541# define DC_HPDx_INT_POLARITY (1 << 8)
542# define DC_HPDx_INT_EN (1 << 16)
543# define DC_HPDx_RX_INT_ACK (1 << 20)
544# define DC_HPDx_RX_INT_EN (1 << 24)
545
546#define DC_HPD1_CONTROL 0x6024
547#define DC_HPD2_CONTROL 0x6030
548#define DC_HPD3_CONTROL 0x603c
549#define DC_HPD4_CONTROL 0x6048
550#define DC_HPD5_CONTROL 0x6054
551#define DC_HPD6_CONTROL 0x6060
552# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
553# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
554# define DC_HPDx_EN (1 << 28)
555
556#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index cf60c0b3ef15..4de41b0ad5ce 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -663,26 +663,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
663 if (r100_debugfs_cp_init(rdev)) { 663 if (r100_debugfs_cp_init(rdev)) {
664 DRM_ERROR("Failed to register debugfs file for CP !\n"); 664 DRM_ERROR("Failed to register debugfs file for CP !\n");
665 } 665 }
666 /* Reset CP */
667 tmp = RREG32(RADEON_CP_CSQ_STAT);
668 if ((tmp & (1 << 31))) {
669 DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
670 WREG32(RADEON_CP_CSQ_MODE, 0);
671 WREG32(RADEON_CP_CSQ_CNTL, 0);
672 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
673 tmp = RREG32(RADEON_RBBM_SOFT_RESET);
674 mdelay(2);
675 WREG32(RADEON_RBBM_SOFT_RESET, 0);
676 tmp = RREG32(RADEON_RBBM_SOFT_RESET);
677 mdelay(2);
678 tmp = RREG32(RADEON_CP_CSQ_STAT);
679 if ((tmp & (1 << 31))) {
680 DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
681 }
682 } else {
683 DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
684 }
685
686 if (!rdev->me_fw) { 666 if (!rdev->me_fw) {
687 r = r100_cp_init_microcode(rdev); 667 r = r100_cp_init_microcode(rdev);
688 if (r) { 668 if (r) {
@@ -787,39 +767,6 @@ void r100_cp_disable(struct radeon_device *rdev)
787 } 767 }
788} 768}
789 769
790int r100_cp_reset(struct radeon_device *rdev)
791{
792 uint32_t tmp;
793 bool reinit_cp;
794 int i;
795
796 reinit_cp = rdev->cp.ready;
797 rdev->cp.ready = false;
798 WREG32(RADEON_CP_CSQ_MODE, 0);
799 WREG32(RADEON_CP_CSQ_CNTL, 0);
800 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
801 (void)RREG32(RADEON_RBBM_SOFT_RESET);
802 udelay(200);
803 WREG32(RADEON_RBBM_SOFT_RESET, 0);
804 /* Wait to prevent race in RBBM_STATUS */
805 mdelay(1);
806 for (i = 0; i < rdev->usec_timeout; i++) {
807 tmp = RREG32(RADEON_RBBM_STATUS);
808 if (!(tmp & (1 << 16))) {
809 DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
810 tmp);
811 if (reinit_cp) {
812 return r100_cp_init(rdev, rdev->cp.ring_size);
813 }
814 return 0;
815 }
816 DRM_UDELAY(1);
817 }
818 tmp = RREG32(RADEON_RBBM_STATUS);
819 DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
820 return -1;
821}
822
823void r100_cp_commit(struct radeon_device *rdev) 770void r100_cp_commit(struct radeon_device *rdev)
824{ 771{
825 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); 772 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
@@ -1733,76 +1680,163 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
1733 return -1; 1680 return -1;
1734} 1681}
1735 1682
1736void r100_gpu_init(struct radeon_device *rdev) 1683void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
1737{ 1684{
1738 /* TODO: anythings to do here ? pipes ? */ 1685 lockup->last_cp_rptr = cp->rptr;
1739 r100_hdp_reset(rdev); 1686 lockup->last_jiffies = jiffies;
1740} 1687}
1741 1688
1742void r100_hdp_reset(struct radeon_device *rdev) 1689/**
1690 * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
1691 * @rdev: radeon device structure
1692 * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
1693 * @cp: radeon_cp structure holding CP information
1694 *
1695 * We don't need to initialize the lockup tracking information as we will either
1696 * have CP rptr to a different value of jiffies wrap around which will force
1697 * initialization of the lockup tracking informations.
1698 *
1699 * A possible false positivie is if we get call after while and last_cp_rptr ==
1700 * the current CP rptr, even if it's unlikely it might happen. To avoid this
1701 * if the elapsed time since last call is bigger than 2 second than we return
1702 * false and update the tracking information. Due to this the caller must call
1703 * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
1704 * the fencing code should be cautious about that.
1705 *
1706 * Caller should write to the ring to force CP to do something so we don't get
1707 * false positive when CP is just gived nothing to do.
1708 *
1709 **/
1710bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
1711{
1712 unsigned long cjiffies, elapsed;
1713
1714 cjiffies = jiffies;
1715 if (!time_after(cjiffies, lockup->last_jiffies)) {
1716 /* likely a wrap around */
1717 lockup->last_cp_rptr = cp->rptr;
1718 lockup->last_jiffies = jiffies;
1719 return false;
1720 }
1721 if (cp->rptr != lockup->last_cp_rptr) {
1722 /* CP is still working no lockup */
1723 lockup->last_cp_rptr = cp->rptr;
1724 lockup->last_jiffies = jiffies;
1725 return false;
1726 }
1727 elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
1728 if (elapsed >= 3000) {
1729 /* very likely the improbable case where current
1730 * rptr is equal to last recorded, a while ago, rptr
1731 * this is more likely a false positive update tracking
1732 * information which should force us to be recall at
1733 * latter point
1734 */
1735 lockup->last_cp_rptr = cp->rptr;
1736 lockup->last_jiffies = jiffies;
1737 return false;
1738 }
1739 if (elapsed >= 1000) {
1740 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
1741 return true;
1742 }
1743 /* give a chance to the GPU ... */
1744 return false;
1745}
1746
1747bool r100_gpu_is_lockup(struct radeon_device *rdev)
1743{ 1748{
1744 uint32_t tmp; 1749 u32 rbbm_status;
1750 int r;
1745 1751
1746 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; 1752 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
1747 tmp |= (7 << 28); 1753 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
1748 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); 1754 r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
1749 (void)RREG32(RADEON_HOST_PATH_CNTL); 1755 return false;
1750 udelay(200); 1756 }
1751 WREG32(RADEON_RBBM_SOFT_RESET, 0); 1757 /* force CP activities */
1752 WREG32(RADEON_HOST_PATH_CNTL, tmp); 1758 r = radeon_ring_lock(rdev, 2);
1753 (void)RREG32(RADEON_HOST_PATH_CNTL); 1759 if (!r) {
1760 /* PACKET2 NOP */
1761 radeon_ring_write(rdev, 0x80000000);
1762 radeon_ring_write(rdev, 0x80000000);
1763 radeon_ring_unlock_commit(rdev);
1764 }
1765 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
1766 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
1754} 1767}
1755 1768
1756int r100_rb2d_reset(struct radeon_device *rdev) 1769void r100_bm_disable(struct radeon_device *rdev)
1757{ 1770{
1758 uint32_t tmp; 1771 u32 tmp;
1759 int i;
1760 1772
1761 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2); 1773 /* disable bus mastering */
1762 (void)RREG32(RADEON_RBBM_SOFT_RESET); 1774 tmp = RREG32(R_000030_BUS_CNTL);
1763 udelay(200); 1775 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
1764 WREG32(RADEON_RBBM_SOFT_RESET, 0); 1776 mdelay(1);
1765 /* Wait to prevent race in RBBM_STATUS */ 1777 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
1778 mdelay(1);
1779 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
1780 tmp = RREG32(RADEON_BUS_CNTL);
1781 mdelay(1);
1782 pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
1783 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
1766 mdelay(1); 1784 mdelay(1);
1767 for (i = 0; i < rdev->usec_timeout; i++) {
1768 tmp = RREG32(RADEON_RBBM_STATUS);
1769 if (!(tmp & (1 << 26))) {
1770 DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
1771 tmp);
1772 return 0;
1773 }
1774 DRM_UDELAY(1);
1775 }
1776 tmp = RREG32(RADEON_RBBM_STATUS);
1777 DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
1778 return -1;
1779} 1785}
1780 1786
1781int r100_gpu_reset(struct radeon_device *rdev) 1787int r100_asic_reset(struct radeon_device *rdev)
1782{ 1788{
1783 uint32_t status; 1789 struct r100_mc_save save;
1790 u32 status, tmp;
1784 1791
1785 /* reset order likely matter */ 1792 r100_mc_stop(rdev, &save);
1786 status = RREG32(RADEON_RBBM_STATUS); 1793 status = RREG32(R_000E40_RBBM_STATUS);
1787 /* reset HDP */ 1794 if (!G_000E40_GUI_ACTIVE(status)) {
1788 r100_hdp_reset(rdev); 1795 return 0;
1789 /* reset rb2d */
1790 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
1791 r100_rb2d_reset(rdev);
1792 } 1796 }
1793 /* TODO: reset 3D engine */ 1797 status = RREG32(R_000E40_RBBM_STATUS);
1798 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
1799 /* stop CP */
1800 WREG32(RADEON_CP_CSQ_CNTL, 0);
1801 tmp = RREG32(RADEON_CP_RB_CNTL);
1802 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
1803 WREG32(RADEON_CP_RB_RPTR_WR, 0);
1804 WREG32(RADEON_CP_RB_WPTR, 0);
1805 WREG32(RADEON_CP_RB_CNTL, tmp);
1806 /* save PCI state */
1807 pci_save_state(rdev->pdev);
1808 /* disable bus mastering */
1809 r100_bm_disable(rdev);
1810 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
1811 S_0000F0_SOFT_RESET_RE(1) |
1812 S_0000F0_SOFT_RESET_PP(1) |
1813 S_0000F0_SOFT_RESET_RB(1));
1814 RREG32(R_0000F0_RBBM_SOFT_RESET);
1815 mdelay(500);
1816 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
1817 mdelay(1);
1818 status = RREG32(R_000E40_RBBM_STATUS);
1819 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
1794 /* reset CP */ 1820 /* reset CP */
1795 status = RREG32(RADEON_RBBM_STATUS); 1821 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
1796 if (status & (1 << 16)) { 1822 RREG32(R_0000F0_RBBM_SOFT_RESET);
1797 r100_cp_reset(rdev); 1823 mdelay(500);
1798 } 1824 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
1825 mdelay(1);
1826 status = RREG32(R_000E40_RBBM_STATUS);
1827 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
1828 /* restore PCI & busmastering */
1829 pci_restore_state(rdev->pdev);
1830 r100_enable_bm(rdev);
1799 /* Check if GPU is idle */ 1831 /* Check if GPU is idle */
1800 status = RREG32(RADEON_RBBM_STATUS); 1832 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
1801 if (status & RADEON_RBBM_ACTIVE) { 1833 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
1802 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); 1834 dev_err(rdev->dev, "failed to reset GPU\n");
1835 rdev->gpu_lockup = true;
1803 return -1; 1836 return -1;
1804 } 1837 }
1805 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); 1838 r100_mc_resume(rdev, &save);
1839 dev_info(rdev->dev, "GPU reset succeed\n");
1806 return 0; 1840 return 0;
1807} 1841}
1808 1842
@@ -2002,11 +2036,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
2002 else 2036 else
2003 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2037 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2004 } 2038 }
2005 /* FIXME remove this once we support unmappable VRAM */
2006 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
2007 rdev->mc.mc_vram_size = rdev->mc.aper_size;
2008 rdev->mc.real_vram_size = rdev->mc.aper_size;
2009 }
2010} 2039}
2011 2040
2012void r100_vga_set_state(struct radeon_device *rdev, bool state) 2041void r100_vga_set_state(struct radeon_device *rdev, bool state)
@@ -3399,7 +3428,7 @@ static int r100_startup(struct radeon_device *rdev)
3399 /* Resume clock */ 3428 /* Resume clock */
3400 r100_clock_startup(rdev); 3429 r100_clock_startup(rdev);
3401 /* Initialize GPU configuration (# pipes, ...) */ 3430 /* Initialize GPU configuration (# pipes, ...) */
3402 r100_gpu_init(rdev); 3431// r100_gpu_init(rdev);
3403 /* Initialize GART (initialize after TTM so we can allocate 3432 /* Initialize GART (initialize after TTM so we can allocate
3404 * memory through TTM but finalize after TTM) */ 3433 * memory through TTM but finalize after TTM) */
3405 r100_enable_bm(rdev); 3434 r100_enable_bm(rdev);
@@ -3436,7 +3465,7 @@ int r100_resume(struct radeon_device *rdev)
3436 /* Resume clock before doing reset */ 3465 /* Resume clock before doing reset */
3437 r100_clock_startup(rdev); 3466 r100_clock_startup(rdev);
3438 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 3467 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3439 if (radeon_gpu_reset(rdev)) { 3468 if (radeon_asic_reset(rdev)) {
3440 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 3469 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3441 RREG32(R_000E40_RBBM_STATUS), 3470 RREG32(R_000E40_RBBM_STATUS),
3442 RREG32(R_0007C0_CP_STAT)); 3471 RREG32(R_0007C0_CP_STAT));
@@ -3505,7 +3534,7 @@ int r100_init(struct radeon_device *rdev)
3505 return r; 3534 return r;
3506 } 3535 }
3507 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 3536 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3508 if (radeon_gpu_reset(rdev)) { 3537 if (radeon_asic_reset(rdev)) {
3509 dev_warn(rdev->dev, 3538 dev_warn(rdev->dev,
3510 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 3539 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3511 RREG32(R_000E40_RBBM_STATUS), 3540 RREG32(R_000E40_RBBM_STATUS),
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index df29a630c466..de8abd104ab7 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -74,6 +74,134 @@
74#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) 74#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
75 75
76/* Registers */ 76/* Registers */
77#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
78#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
79#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
80#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
81#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
82#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
83#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
84#define S_0000F0_SOFT_RESET_SE(x) (((x) & 0x1) << 2)
85#define G_0000F0_SOFT_RESET_SE(x) (((x) >> 2) & 0x1)
86#define C_0000F0_SOFT_RESET_SE 0xFFFFFFFB
87#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
88#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
89#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
90#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
91#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
92#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
93#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
94#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
95#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
96#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
97#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
98#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
99#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
100#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
101#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
102#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
103#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
104#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
105#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
106#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
107#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
108#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
109#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
110#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
111#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
112#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
113#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
114#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
115#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
116#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
117#define R_000030_BUS_CNTL 0x000030
118#define S_000030_BUS_DBL_RESYNC(x) (((x) & 0x1) << 0)
119#define G_000030_BUS_DBL_RESYNC(x) (((x) >> 0) & 0x1)
120#define C_000030_BUS_DBL_RESYNC 0xFFFFFFFE
121#define S_000030_BUS_MSTR_RESET(x) (((x) & 0x1) << 1)
122#define G_000030_BUS_MSTR_RESET(x) (((x) >> 1) & 0x1)
123#define C_000030_BUS_MSTR_RESET 0xFFFFFFFD
124#define S_000030_BUS_FLUSH_BUF(x) (((x) & 0x1) << 2)
125#define G_000030_BUS_FLUSH_BUF(x) (((x) >> 2) & 0x1)
126#define C_000030_BUS_FLUSH_BUF 0xFFFFFFFB
127#define S_000030_BUS_STOP_REQ_DIS(x) (((x) & 0x1) << 3)
128#define G_000030_BUS_STOP_REQ_DIS(x) (((x) >> 3) & 0x1)
129#define C_000030_BUS_STOP_REQ_DIS 0xFFFFFFF7
130#define S_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 4)
131#define G_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) >> 4) & 0x1)
132#define C_000030_BUS_PM4_READ_COMBINE_EN 0xFFFFFFEF
133#define S_000030_BUS_WRT_COMBINE_EN(x) (((x) & 0x1) << 5)
134#define G_000030_BUS_WRT_COMBINE_EN(x) (((x) >> 5) & 0x1)
135#define C_000030_BUS_WRT_COMBINE_EN 0xFFFFFFDF
136#define S_000030_BUS_MASTER_DIS(x) (((x) & 0x1) << 6)
137#define G_000030_BUS_MASTER_DIS(x) (((x) >> 6) & 0x1)
138#define C_000030_BUS_MASTER_DIS 0xFFFFFFBF
139#define S_000030_BIOS_ROM_WRT_EN(x) (((x) & 0x1) << 7)
140#define G_000030_BIOS_ROM_WRT_EN(x) (((x) >> 7) & 0x1)
141#define C_000030_BIOS_ROM_WRT_EN 0xFFFFFF7F
142#define S_000030_BM_DAC_CRIPPLE(x) (((x) & 0x1) << 8)
143#define G_000030_BM_DAC_CRIPPLE(x) (((x) >> 8) & 0x1)
144#define C_000030_BM_DAC_CRIPPLE 0xFFFFFEFF
145#define S_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 9)
146#define G_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) >> 9) & 0x1)
147#define C_000030_BUS_NON_PM4_READ_COMBINE_EN 0xFFFFFDFF
148#define S_000030_BUS_XFERD_DISCARD_EN(x) (((x) & 0x1) << 10)
149#define G_000030_BUS_XFERD_DISCARD_EN(x) (((x) >> 10) & 0x1)
150#define C_000030_BUS_XFERD_DISCARD_EN 0xFFFFFBFF
151#define S_000030_BUS_SGL_READ_DISABLE(x) (((x) & 0x1) << 11)
152#define G_000030_BUS_SGL_READ_DISABLE(x) (((x) >> 11) & 0x1)
153#define C_000030_BUS_SGL_READ_DISABLE 0xFFFFF7FF
154#define S_000030_BIOS_DIS_ROM(x) (((x) & 0x1) << 12)
155#define G_000030_BIOS_DIS_ROM(x) (((x) >> 12) & 0x1)
156#define C_000030_BIOS_DIS_ROM 0xFFFFEFFF
157#define S_000030_BUS_PCI_READ_RETRY_EN(x) (((x) & 0x1) << 13)
158#define G_000030_BUS_PCI_READ_RETRY_EN(x) (((x) >> 13) & 0x1)
159#define C_000030_BUS_PCI_READ_RETRY_EN 0xFFFFDFFF
160#define S_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) & 0x1) << 14)
161#define G_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) >> 14) & 0x1)
162#define C_000030_BUS_AGP_AD_STEPPING_EN 0xFFFFBFFF
163#define S_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) & 0x1) << 15)
164#define G_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) >> 15) & 0x1)
165#define C_000030_BUS_PCI_WRT_RETRY_EN 0xFFFF7FFF
166#define S_000030_BUS_RETRY_WS(x) (((x) & 0xF) << 16)
167#define G_000030_BUS_RETRY_WS(x) (((x) >> 16) & 0xF)
168#define C_000030_BUS_RETRY_WS 0xFFF0FFFF
169#define S_000030_BUS_MSTR_RD_MULT(x) (((x) & 0x1) << 20)
170#define G_000030_BUS_MSTR_RD_MULT(x) (((x) >> 20) & 0x1)
171#define C_000030_BUS_MSTR_RD_MULT 0xFFEFFFFF
172#define S_000030_BUS_MSTR_RD_LINE(x) (((x) & 0x1) << 21)
173#define G_000030_BUS_MSTR_RD_LINE(x) (((x) >> 21) & 0x1)
174#define C_000030_BUS_MSTR_RD_LINE 0xFFDFFFFF
175#define S_000030_BUS_SUSPEND(x) (((x) & 0x1) << 22)
176#define G_000030_BUS_SUSPEND(x) (((x) >> 22) & 0x1)
177#define C_000030_BUS_SUSPEND 0xFFBFFFFF
178#define S_000030_LAT_16X(x) (((x) & 0x1) << 23)
179#define G_000030_LAT_16X(x) (((x) >> 23) & 0x1)
180#define C_000030_LAT_16X 0xFF7FFFFF
181#define S_000030_BUS_RD_DISCARD_EN(x) (((x) & 0x1) << 24)
182#define G_000030_BUS_RD_DISCARD_EN(x) (((x) >> 24) & 0x1)
183#define C_000030_BUS_RD_DISCARD_EN 0xFEFFFFFF
184#define S_000030_ENFRCWRDY(x) (((x) & 0x1) << 25)
185#define G_000030_ENFRCWRDY(x) (((x) >> 25) & 0x1)
186#define C_000030_ENFRCWRDY 0xFDFFFFFF
187#define S_000030_BUS_MSTR_WS(x) (((x) & 0x1) << 26)
188#define G_000030_BUS_MSTR_WS(x) (((x) >> 26) & 0x1)
189#define C_000030_BUS_MSTR_WS 0xFBFFFFFF
190#define S_000030_BUS_PARKING_DIS(x) (((x) & 0x1) << 27)
191#define G_000030_BUS_PARKING_DIS(x) (((x) >> 27) & 0x1)
192#define C_000030_BUS_PARKING_DIS 0xF7FFFFFF
193#define S_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) & 0x1) << 28)
194#define G_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) >> 28) & 0x1)
195#define C_000030_BUS_MSTR_DISCONNECT_EN 0xEFFFFFFF
196#define S_000030_SERR_EN(x) (((x) & 0x1) << 29)
197#define G_000030_SERR_EN(x) (((x) >> 29) & 0x1)
198#define C_000030_SERR_EN 0xDFFFFFFF
199#define S_000030_BUS_READ_BURST(x) (((x) & 0x1) << 30)
200#define G_000030_BUS_READ_BURST(x) (((x) >> 30) & 0x1)
201#define C_000030_BUS_READ_BURST 0xBFFFFFFF
202#define S_000030_BUS_RDY_READ_DLY(x) (((x) & 0x1) << 31)
203#define G_000030_BUS_RDY_READ_DLY(x) (((x) >> 31) & 0x1)
204#define C_000030_BUS_RDY_READ_DLY 0x7FFFFFFF
77#define R_000040_GEN_INT_CNTL 0x000040 205#define R_000040_GEN_INT_CNTL 0x000040
78#define S_000040_CRTC_VBLANK(x) (((x) & 0x1) << 0) 206#define S_000040_CRTC_VBLANK(x) (((x) & 0x1) << 0)
79#define G_000040_CRTC_VBLANK(x) (((x) >> 0) & 0x1) 207#define G_000040_CRTC_VBLANK(x) (((x) >> 0) & 0x1)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index a5ff8076b423..6d9569e002f7 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -27,8 +27,9 @@
27 */ 27 */
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include "drmP.h" 30#include <drm/drmP.h>
31#include "drm.h" 31#include <drm/drm.h>
32#include <drm/drm_crtc_helper.h>
32#include "radeon_reg.h" 33#include "radeon_reg.h"
33#include "radeon.h" 34#include "radeon.h"
34#include "radeon_asic.h" 35#include "radeon_asic.h"
@@ -151,6 +152,10 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
151 u32 tmp; 152 u32 tmp;
152 int r; 153 int r;
153 154
155 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
156 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
157 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
158 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
154 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 159 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
155 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 160 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
156 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); 161 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
@@ -323,7 +328,6 @@ void r300_gpu_init(struct radeon_device *rdev)
323{ 328{
324 uint32_t gb_tile_config, tmp; 329 uint32_t gb_tile_config, tmp;
325 330
326 r100_hdp_reset(rdev);
327 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || 331 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
328 (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) { 332 (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
329 /* r300,r350 */ 333 /* r300,r350 */
@@ -375,89 +379,85 @@ void r300_gpu_init(struct radeon_device *rdev)
375 rdev->num_gb_pipes, rdev->num_z_pipes); 379 rdev->num_gb_pipes, rdev->num_z_pipes);
376} 380}
377 381
378int r300_ga_reset(struct radeon_device *rdev) 382bool r300_gpu_is_lockup(struct radeon_device *rdev)
379{ 383{
380 uint32_t tmp; 384 u32 rbbm_status;
381 bool reinit_cp; 385 int r;
382 int i;
383 386
384 reinit_cp = rdev->cp.ready; 387 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
385 rdev->cp.ready = false; 388 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
386 for (i = 0; i < rdev->usec_timeout; i++) { 389 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
387 WREG32(RADEON_CP_CSQ_MODE, 0); 390 return false;
388 WREG32(RADEON_CP_CSQ_CNTL, 0);
389 WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
390 (void)RREG32(RADEON_RBBM_SOFT_RESET);
391 udelay(200);
392 WREG32(RADEON_RBBM_SOFT_RESET, 0);
393 /* Wait to prevent race in RBBM_STATUS */
394 mdelay(1);
395 tmp = RREG32(RADEON_RBBM_STATUS);
396 if (tmp & ((1 << 20) | (1 << 26))) {
397 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
398 /* GA still busy soft reset it */
399 WREG32(0x429C, 0x200);
400 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
401 WREG32(R300_RE_SCISSORS_TL, 0);
402 WREG32(R300_RE_SCISSORS_BR, 0);
403 WREG32(0x24AC, 0);
404 }
405 /* Wait to prevent race in RBBM_STATUS */
406 mdelay(1);
407 tmp = RREG32(RADEON_RBBM_STATUS);
408 if (!(tmp & ((1 << 20) | (1 << 26)))) {
409 break;
410 }
411 } 391 }
412 for (i = 0; i < rdev->usec_timeout; i++) { 392 /* force CP activities */
413 tmp = RREG32(RADEON_RBBM_STATUS); 393 r = radeon_ring_lock(rdev, 2);
414 if (!(tmp & ((1 << 20) | (1 << 26)))) { 394 if (!r) {
415 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", 395 /* PACKET2 NOP */
416 tmp); 396 radeon_ring_write(rdev, 0x80000000);
417 if (reinit_cp) { 397 radeon_ring_write(rdev, 0x80000000);
418 return r100_cp_init(rdev, rdev->cp.ring_size); 398 radeon_ring_unlock_commit(rdev);
419 }
420 return 0;
421 }
422 DRM_UDELAY(1);
423 } 399 }
424 tmp = RREG32(RADEON_RBBM_STATUS); 400 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
425 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); 401 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
426 return -1;
427} 402}
428 403
429int r300_gpu_reset(struct radeon_device *rdev) 404int r300_asic_reset(struct radeon_device *rdev)
430{ 405{
431 uint32_t status; 406 struct r100_mc_save save;
432 407 u32 status, tmp;
433 /* reset order likely matter */ 408
434 status = RREG32(RADEON_RBBM_STATUS); 409 r100_mc_stop(rdev, &save);
435 /* reset HDP */ 410 status = RREG32(R_000E40_RBBM_STATUS);
436 r100_hdp_reset(rdev); 411 if (!G_000E40_GUI_ACTIVE(status)) {
437 /* reset rb2d */ 412 return 0;
438 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
439 r100_rb2d_reset(rdev);
440 }
441 /* reset GA */
442 if (status & ((1 << 20) | (1 << 26))) {
443 r300_ga_reset(rdev);
444 }
445 /* reset CP */
446 status = RREG32(RADEON_RBBM_STATUS);
447 if (status & (1 << 16)) {
448 r100_cp_reset(rdev);
449 } 413 }
414 status = RREG32(R_000E40_RBBM_STATUS);
415 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
416 /* stop CP */
417 WREG32(RADEON_CP_CSQ_CNTL, 0);
418 tmp = RREG32(RADEON_CP_RB_CNTL);
419 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
420 WREG32(RADEON_CP_RB_RPTR_WR, 0);
421 WREG32(RADEON_CP_RB_WPTR, 0);
422 WREG32(RADEON_CP_RB_CNTL, tmp);
423 /* save PCI state */
424 pci_save_state(rdev->pdev);
425 /* disable bus mastering */
426 r100_bm_disable(rdev);
427 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
428 S_0000F0_SOFT_RESET_GA(1));
429 RREG32(R_0000F0_RBBM_SOFT_RESET);
430 mdelay(500);
431 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
432 mdelay(1);
433 status = RREG32(R_000E40_RBBM_STATUS);
434 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
435 /* resetting the CP seems to be problematic sometimes it end up
436 * hard locking the computer, but it's necessary for successfull
437 * reset more test & playing is needed on R3XX/R4XX to find a
438 * reliable (if any solution)
439 */
440 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
441 RREG32(R_0000F0_RBBM_SOFT_RESET);
442 mdelay(500);
443 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
444 mdelay(1);
445 status = RREG32(R_000E40_RBBM_STATUS);
446 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
447 /* restore PCI & busmastering */
448 pci_restore_state(rdev->pdev);
449 r100_enable_bm(rdev);
450 /* Check if GPU is idle */ 450 /* Check if GPU is idle */
451 status = RREG32(RADEON_RBBM_STATUS); 451 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
452 if (status & RADEON_RBBM_ACTIVE) { 452 dev_err(rdev->dev, "failed to reset GPU\n");
453 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); 453 rdev->gpu_lockup = true;
454 return -1; 454 return -1;
455 } 455 }
456 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); 456 r100_mc_resume(rdev, &save);
457 dev_info(rdev->dev, "GPU reset succeed\n");
457 return 0; 458 return 0;
458} 459}
459 460
460
461/* 461/*
462 * r300,r350,rv350,rv380 VRAM info 462 * r300,r350,rv350,rv380 VRAM info
463 */ 463 */
@@ -1316,7 +1316,7 @@ int r300_resume(struct radeon_device *rdev)
1316 /* Resume clock before doing reset */ 1316 /* Resume clock before doing reset */
1317 r300_clock_startup(rdev); 1317 r300_clock_startup(rdev);
1318 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 1318 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1319 if (radeon_gpu_reset(rdev)) { 1319 if (radeon_asic_reset(rdev)) {
1320 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 1320 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1321 RREG32(R_000E40_RBBM_STATUS), 1321 RREG32(R_000E40_RBBM_STATUS),
1322 RREG32(R_0007C0_CP_STAT)); 1322 RREG32(R_0007C0_CP_STAT));
@@ -1387,7 +1387,7 @@ int r300_init(struct radeon_device *rdev)
1387 return r; 1387 return r;
1388 } 1388 }
1389 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 1389 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1390 if (radeon_gpu_reset(rdev)) { 1390 if (radeon_asic_reset(rdev)) {
1391 dev_warn(rdev->dev, 1391 dev_warn(rdev->dev,
1392 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 1392 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1393 RREG32(R_000E40_RBBM_STATUS), 1393 RREG32(R_000E40_RBBM_STATUS),
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 4c73114f0de9..968a33317fbf 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -209,7 +209,52 @@
209#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) 209#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
210#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) 210#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
211#define C_000E40_GUI_ACTIVE 0x7FFFFFFF 211#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
212 212#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
213#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
214#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
215#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
216#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
217#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
218#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
219#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
220#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
221#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
222#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
223#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
224#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
225#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
226#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
227#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
228#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
229#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
230#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
231#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
232#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
233#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
234#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
235#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
236#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
237#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
238#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
239#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
240#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
241#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
242#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
243#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
244#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
245#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
246#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
247#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
248#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
249#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
250#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
251#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
252#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
253#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
254#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
255#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
256#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
257#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
213 258
214#define R_00000D_SCLK_CNTL 0x00000D 259#define R_00000D_SCLK_CNTL 0x00000D
215#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) 260#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c2bda4ad62e7..be092d243f84 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -241,7 +241,7 @@ int r420_resume(struct radeon_device *rdev)
241 /* Resume clock before doing reset */ 241 /* Resume clock before doing reset */
242 r420_clock_resume(rdev); 242 r420_clock_resume(rdev);
243 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 243 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
244 if (radeon_gpu_reset(rdev)) { 244 if (radeon_asic_reset(rdev)) {
245 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 245 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
246 RREG32(R_000E40_RBBM_STATUS), 246 RREG32(R_000E40_RBBM_STATUS),
247 RREG32(R_0007C0_CP_STAT)); 247 RREG32(R_0007C0_CP_STAT));
@@ -322,7 +322,7 @@ int r420_init(struct radeon_device *rdev)
322 } 322 }
323 } 323 }
324 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 324 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
325 if (radeon_gpu_reset(rdev)) { 325 if (radeon_asic_reset(rdev)) {
326 dev_warn(rdev->dev, 326 dev_warn(rdev->dev,
327 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 327 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
328 RREG32(R_000E40_RBBM_STATUS), 328 RREG32(R_000E40_RBBM_STATUS),
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 3c44b8d39318..870111e26bd1 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -53,7 +53,6 @@ static void r520_gpu_init(struct radeon_device *rdev)
53{ 53{
54 unsigned pipe_select_current, gb_pipe_select, tmp; 54 unsigned pipe_select_current, gb_pipe_select, tmp;
55 55
56 r100_hdp_reset(rdev);
57 rv515_vga_render_disable(rdev); 56 rv515_vga_render_disable(rdev);
58 /* 57 /*
59 * DST_PIPE_CONFIG 0x170C 58 * DST_PIPE_CONFIG 0x170C
@@ -209,7 +208,7 @@ int r520_resume(struct radeon_device *rdev)
209 /* Resume clock before doing reset */ 208 /* Resume clock before doing reset */
210 rv515_clock_startup(rdev); 209 rv515_clock_startup(rdev);
211 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 210 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
212 if (radeon_gpu_reset(rdev)) { 211 if (radeon_asic_reset(rdev)) {
213 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 212 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
214 RREG32(R_000E40_RBBM_STATUS), 213 RREG32(R_000E40_RBBM_STATUS),
215 RREG32(R_0007C0_CP_STAT)); 214 RREG32(R_0007C0_CP_STAT));
@@ -246,7 +245,7 @@ int r520_init(struct radeon_device *rdev)
246 return -EINVAL; 245 return -EINVAL;
247 } 246 }
248 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 247 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
249 if (radeon_gpu_reset(rdev)) { 248 if (radeon_asic_reset(rdev)) {
250 dev_warn(rdev->dev, 249 dev_warn(rdev->dev,
251 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 250 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
252 RREG32(R_000E40_RBBM_STATUS), 251 RREG32(R_000E40_RBBM_STATUS),
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 8f3454e2056a..2ec423c3f3f8 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -44,6 +44,9 @@
44#define R700_PFP_UCODE_SIZE 848 44#define R700_PFP_UCODE_SIZE 848
45#define R700_PM4_UCODE_SIZE 1360 45#define R700_PM4_UCODE_SIZE 1360
46#define R700_RLC_UCODE_SIZE 1024 46#define R700_RLC_UCODE_SIZE 1024
47#define EVERGREEN_PFP_UCODE_SIZE 1120
48#define EVERGREEN_PM4_UCODE_SIZE 1376
49#define EVERGREEN_RLC_UCODE_SIZE 768
47 50
48/* Firmware Names */ 51/* Firmware Names */
49MODULE_FIRMWARE("radeon/R600_pfp.bin"); 52MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -68,6 +71,18 @@ MODULE_FIRMWARE("radeon/RV710_pfp.bin");
68MODULE_FIRMWARE("radeon/RV710_me.bin"); 71MODULE_FIRMWARE("radeon/RV710_me.bin");
69MODULE_FIRMWARE("radeon/R600_rlc.bin"); 72MODULE_FIRMWARE("radeon/R600_rlc.bin");
70MODULE_FIRMWARE("radeon/R700_rlc.bin"); 73MODULE_FIRMWARE("radeon/R700_rlc.bin");
74MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75MODULE_FIRMWARE("radeon/CEDAR_me.bin");
76MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
77MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
79MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
80MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
82MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
83MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
84MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
85MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
71 86
72int r600_debugfs_mc_info_init(struct radeon_device *rdev); 87int r600_debugfs_mc_info_init(struct radeon_device *rdev);
73 88
@@ -75,6 +90,7 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev);
75int r600_mc_wait_for_idle(struct radeon_device *rdev); 90int r600_mc_wait_for_idle(struct radeon_device *rdev);
76void r600_gpu_init(struct radeon_device *rdev); 91void r600_gpu_init(struct radeon_device *rdev);
77void r600_fini(struct radeon_device *rdev); 92void r600_fini(struct radeon_device *rdev);
93void r600_irq_disable(struct radeon_device *rdev);
78 94
79/* hpd for digital panel detect/disconnect */ 95/* hpd for digital panel detect/disconnect */
80bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 96bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
@@ -714,11 +730,6 @@ int r600_mc_init(struct radeon_device *rdev)
714 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 730 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
715 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 731 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
716 rdev->mc.visible_vram_size = rdev->mc.aper_size; 732 rdev->mc.visible_vram_size = rdev->mc.aper_size;
717 /* FIXME remove this once we support unmappable VRAM */
718 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
719 rdev->mc.mc_vram_size = rdev->mc.aper_size;
720 rdev->mc.real_vram_size = rdev->mc.aper_size;
721 }
722 r600_vram_gtt_location(rdev, &rdev->mc); 733 r600_vram_gtt_location(rdev, &rdev->mc);
723 734
724 if (rdev->flags & RADEON_IS_IGP) 735 if (rdev->flags & RADEON_IS_IGP)
@@ -750,7 +761,6 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
750 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) | 761 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
751 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | 762 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
752 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); 763 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
753 u32 srbm_reset = 0;
754 u32 tmp; 764 u32 tmp;
755 765
756 dev_info(rdev->dev, "GPU softreset \n"); 766 dev_info(rdev->dev, "GPU softreset \n");
@@ -765,7 +775,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
765 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 775 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
766 } 776 }
767 /* Disable CP parsing/prefetching */ 777 /* Disable CP parsing/prefetching */
768 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff)); 778 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
769 /* Check if any of the rendering block is busy and reset it */ 779 /* Check if any of the rendering block is busy and reset it */
770 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || 780 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
771 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { 781 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
@@ -784,72 +794,56 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
784 S_008020_SOFT_RESET_VGT(1); 794 S_008020_SOFT_RESET_VGT(1);
785 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); 795 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
786 WREG32(R_008020_GRBM_SOFT_RESET, tmp); 796 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
787 (void)RREG32(R_008020_GRBM_SOFT_RESET); 797 RREG32(R_008020_GRBM_SOFT_RESET);
788 udelay(50); 798 mdelay(15);
789 WREG32(R_008020_GRBM_SOFT_RESET, 0); 799 WREG32(R_008020_GRBM_SOFT_RESET, 0);
790 (void)RREG32(R_008020_GRBM_SOFT_RESET);
791 } 800 }
792 /* Reset CP (we always reset CP) */ 801 /* Reset CP (we always reset CP) */
793 tmp = S_008020_SOFT_RESET_CP(1); 802 tmp = S_008020_SOFT_RESET_CP(1);
794 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); 803 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
795 WREG32(R_008020_GRBM_SOFT_RESET, tmp); 804 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
796 (void)RREG32(R_008020_GRBM_SOFT_RESET); 805 RREG32(R_008020_GRBM_SOFT_RESET);
797 udelay(50); 806 mdelay(15);
798 WREG32(R_008020_GRBM_SOFT_RESET, 0); 807 WREG32(R_008020_GRBM_SOFT_RESET, 0);
799 (void)RREG32(R_008020_GRBM_SOFT_RESET);
800 /* Reset others GPU block if necessary */
801 if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
802 srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
803 if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
804 srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
805 if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
806 srbm_reset |= S_000E60_SOFT_RESET_IH(1);
807 if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
808 srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
809 if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
810 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
811 if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
812 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
813 if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
814 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
815 if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
816 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
817 if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
818 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
819 if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
820 srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
821 if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
822 srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
823 if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
824 srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
825 dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
826 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
827 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
828 udelay(50);
829 WREG32(R_000E60_SRBM_SOFT_RESET, 0);
830 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
831 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
832 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
833 udelay(50);
834 WREG32(R_000E60_SRBM_SOFT_RESET, 0);
835 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
836 /* Wait a little for things to settle down */ 808 /* Wait a little for things to settle down */
837 udelay(50); 809 mdelay(1);
838 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", 810 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
839 RREG32(R_008010_GRBM_STATUS)); 811 RREG32(R_008010_GRBM_STATUS));
840 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", 812 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
841 RREG32(R_008014_GRBM_STATUS2)); 813 RREG32(R_008014_GRBM_STATUS2));
842 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", 814 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
843 RREG32(R_000E50_SRBM_STATUS)); 815 RREG32(R_000E50_SRBM_STATUS));
844 /* After reset we need to reinit the asic as GPU often endup in an
845 * incoherent state.
846 */
847 atom_asic_init(rdev->mode_info.atom_context);
848 rv515_mc_resume(rdev, &save); 816 rv515_mc_resume(rdev, &save);
849 return 0; 817 return 0;
850} 818}
851 819
852int r600_gpu_reset(struct radeon_device *rdev) 820bool r600_gpu_is_lockup(struct radeon_device *rdev)
821{
822 u32 srbm_status;
823 u32 grbm_status;
824 u32 grbm_status2;
825 int r;
826
827 srbm_status = RREG32(R_000E50_SRBM_STATUS);
828 grbm_status = RREG32(R_008010_GRBM_STATUS);
829 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
830 if (!G_008010_GUI_ACTIVE(grbm_status)) {
831 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
832 return false;
833 }
834 /* force CP activities */
835 r = radeon_ring_lock(rdev, 2);
836 if (!r) {
837 /* PACKET2 NOP */
838 radeon_ring_write(rdev, 0x80000000);
839 radeon_ring_write(rdev, 0x80000000);
840 radeon_ring_unlock_commit(rdev);
841 }
842 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
843 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
844}
845
846int r600_asic_reset(struct radeon_device *rdev)
853{ 847{
854 return r600_gpu_soft_reset(rdev); 848 return r600_gpu_soft_reset(rdev);
855} 849}
@@ -1467,10 +1461,31 @@ int r600_init_microcode(struct radeon_device *rdev)
1467 chip_name = "RV710"; 1461 chip_name = "RV710";
1468 rlc_chip_name = "R700"; 1462 rlc_chip_name = "R700";
1469 break; 1463 break;
1464 case CHIP_CEDAR:
1465 chip_name = "CEDAR";
1466 rlc_chip_name = "CEDAR";
1467 break;
1468 case CHIP_REDWOOD:
1469 chip_name = "REDWOOD";
1470 rlc_chip_name = "REDWOOD";
1471 break;
1472 case CHIP_JUNIPER:
1473 chip_name = "JUNIPER";
1474 rlc_chip_name = "JUNIPER";
1475 break;
1476 case CHIP_CYPRESS:
1477 case CHIP_HEMLOCK:
1478 chip_name = "CYPRESS";
1479 rlc_chip_name = "CYPRESS";
1480 break;
1470 default: BUG(); 1481 default: BUG();
1471 } 1482 }
1472 1483
1473 if (rdev->family >= CHIP_RV770) { 1484 if (rdev->family >= CHIP_CEDAR) {
1485 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
1486 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
1487 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
1488 } else if (rdev->family >= CHIP_RV770) {
1474 pfp_req_size = R700_PFP_UCODE_SIZE * 4; 1489 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1475 me_req_size = R700_PM4_UCODE_SIZE * 4; 1490 me_req_size = R700_PM4_UCODE_SIZE * 4;
1476 rlc_req_size = R700_RLC_UCODE_SIZE * 4; 1491 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
@@ -1584,12 +1599,15 @@ int r600_cp_start(struct radeon_device *rdev)
1584 } 1599 }
1585 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); 1600 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1586 radeon_ring_write(rdev, 0x1); 1601 radeon_ring_write(rdev, 0x1);
1587 if (rdev->family < CHIP_RV770) { 1602 if (rdev->family >= CHIP_CEDAR) {
1588 radeon_ring_write(rdev, 0x3); 1603 radeon_ring_write(rdev, 0x0);
1589 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1); 1604 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
1590 } else { 1605 } else if (rdev->family >= CHIP_RV770) {
1591 radeon_ring_write(rdev, 0x0); 1606 radeon_ring_write(rdev, 0x0);
1592 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); 1607 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
1608 } else {
1609 radeon_ring_write(rdev, 0x3);
1610 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1593 } 1611 }
1594 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 1612 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1595 radeon_ring_write(rdev, 0); 1613 radeon_ring_write(rdev, 0);
@@ -2290,10 +2308,11 @@ static void r600_ih_ring_fini(struct radeon_device *rdev)
2290 } 2308 }
2291} 2309}
2292 2310
2293static void r600_rlc_stop(struct radeon_device *rdev) 2311void r600_rlc_stop(struct radeon_device *rdev)
2294{ 2312{
2295 2313
2296 if (rdev->family >= CHIP_RV770) { 2314 if ((rdev->family >= CHIP_RV770) &&
2315 (rdev->family <= CHIP_RV740)) {
2297 /* r7xx asics need to soft reset RLC before halting */ 2316 /* r7xx asics need to soft reset RLC before halting */
2298 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC); 2317 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2299 RREG32(SRBM_SOFT_RESET); 2318 RREG32(SRBM_SOFT_RESET);
@@ -2330,7 +2349,12 @@ static int r600_rlc_init(struct radeon_device *rdev)
2330 WREG32(RLC_UCODE_CNTL, 0); 2349 WREG32(RLC_UCODE_CNTL, 0);
2331 2350
2332 fw_data = (const __be32 *)rdev->rlc_fw->data; 2351 fw_data = (const __be32 *)rdev->rlc_fw->data;
2333 if (rdev->family >= CHIP_RV770) { 2352 if (rdev->family >= CHIP_CEDAR) {
2353 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2354 WREG32(RLC_UCODE_ADDR, i);
2355 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2356 }
2357 } else if (rdev->family >= CHIP_RV770) {
2334 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { 2358 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2335 WREG32(RLC_UCODE_ADDR, i); 2359 WREG32(RLC_UCODE_ADDR, i);
2336 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 2360 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
@@ -2360,7 +2384,7 @@ static void r600_enable_interrupts(struct radeon_device *rdev)
2360 rdev->ih.enabled = true; 2384 rdev->ih.enabled = true;
2361} 2385}
2362 2386
2363static void r600_disable_interrupts(struct radeon_device *rdev) 2387void r600_disable_interrupts(struct radeon_device *rdev)
2364{ 2388{
2365 u32 ih_rb_cntl = RREG32(IH_RB_CNTL); 2389 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2366 u32 ih_cntl = RREG32(IH_CNTL); 2390 u32 ih_cntl = RREG32(IH_CNTL);
@@ -2475,7 +2499,10 @@ int r600_irq_init(struct radeon_device *rdev)
2475 WREG32(IH_CNTL, ih_cntl); 2499 WREG32(IH_CNTL, ih_cntl);
2476 2500
2477 /* force the active interrupt state to all disabled */ 2501 /* force the active interrupt state to all disabled */
2478 r600_disable_interrupt_state(rdev); 2502 if (rdev->family >= CHIP_CEDAR)
2503 evergreen_disable_interrupt_state(rdev);
2504 else
2505 r600_disable_interrupt_state(rdev);
2479 2506
2480 /* enable irqs */ 2507 /* enable irqs */
2481 r600_enable_interrupts(rdev); 2508 r600_enable_interrupts(rdev);
@@ -2485,7 +2512,7 @@ int r600_irq_init(struct radeon_device *rdev)
2485 2512
2486void r600_irq_suspend(struct radeon_device *rdev) 2513void r600_irq_suspend(struct radeon_device *rdev)
2487{ 2514{
2488 r600_disable_interrupts(rdev); 2515 r600_irq_disable(rdev);
2489 r600_rlc_stop(rdev); 2516 r600_rlc_stop(rdev);
2490} 2517}
2491 2518
@@ -2500,6 +2527,7 @@ int r600_irq_set(struct radeon_device *rdev)
2500 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; 2527 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2501 u32 mode_int = 0; 2528 u32 mode_int = 0;
2502 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; 2529 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2530 u32 hdmi1, hdmi2;
2503 2531
2504 if (!rdev->irq.installed) { 2532 if (!rdev->irq.installed) {
2505 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 2533 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
@@ -2513,7 +2541,9 @@ int r600_irq_set(struct radeon_device *rdev)
2513 return 0; 2541 return 0;
2514 } 2542 }
2515 2543
2544 hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
2516 if (ASIC_IS_DCE3(rdev)) { 2545 if (ASIC_IS_DCE3(rdev)) {
2546 hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
2517 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 2547 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2518 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; 2548 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2519 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; 2549 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2523,6 +2553,7 @@ int r600_irq_set(struct radeon_device *rdev)
2523 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 2553 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2524 } 2554 }
2525 } else { 2555 } else {
2556 hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
2526 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN; 2557 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2527 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN; 2558 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2528 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN; 2559 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2564,10 +2595,20 @@ int r600_irq_set(struct radeon_device *rdev)
2564 DRM_DEBUG("r600_irq_set: hpd 6\n"); 2595 DRM_DEBUG("r600_irq_set: hpd 6\n");
2565 hpd6 |= DC_HPDx_INT_EN; 2596 hpd6 |= DC_HPDx_INT_EN;
2566 } 2597 }
2598 if (rdev->irq.hdmi[0]) {
2599 DRM_DEBUG("r600_irq_set: hdmi 1\n");
2600 hdmi1 |= R600_HDMI_INT_EN;
2601 }
2602 if (rdev->irq.hdmi[1]) {
2603 DRM_DEBUG("r600_irq_set: hdmi 2\n");
2604 hdmi2 |= R600_HDMI_INT_EN;
2605 }
2567 2606
2568 WREG32(CP_INT_CNTL, cp_int_cntl); 2607 WREG32(CP_INT_CNTL, cp_int_cntl);
2569 WREG32(DxMODE_INT_MASK, mode_int); 2608 WREG32(DxMODE_INT_MASK, mode_int);
2609 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
2570 if (ASIC_IS_DCE3(rdev)) { 2610 if (ASIC_IS_DCE3(rdev)) {
2611 WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
2571 WREG32(DC_HPD1_INT_CONTROL, hpd1); 2612 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2572 WREG32(DC_HPD2_INT_CONTROL, hpd2); 2613 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2573 WREG32(DC_HPD3_INT_CONTROL, hpd3); 2614 WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -2577,6 +2618,7 @@ int r600_irq_set(struct radeon_device *rdev)
2577 WREG32(DC_HPD6_INT_CONTROL, hpd6); 2618 WREG32(DC_HPD6_INT_CONTROL, hpd6);
2578 } 2619 }
2579 } else { 2620 } else {
2621 WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
2580 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); 2622 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
2581 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); 2623 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
2582 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3); 2624 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
@@ -2660,6 +2702,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
2660 WREG32(DC_HPD6_INT_CONTROL, tmp); 2702 WREG32(DC_HPD6_INT_CONTROL, tmp);
2661 } 2703 }
2662 } 2704 }
2705 if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
2706 WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
2707 }
2708 if (ASIC_IS_DCE3(rdev)) {
2709 if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
2710 WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
2711 }
2712 } else {
2713 if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
2714 WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
2715 }
2716 }
2663} 2717}
2664 2718
2665void r600_irq_disable(struct radeon_device *rdev) 2719void r600_irq_disable(struct radeon_device *rdev)
@@ -2713,6 +2767,8 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
2713 * 19 1 FP Hot plug detection B 2767 * 19 1 FP Hot plug detection B
2714 * 19 2 DAC A auto-detection 2768 * 19 2 DAC A auto-detection
2715 * 19 3 DAC B auto-detection 2769 * 19 3 DAC B auto-detection
2770 * 21 4 HDMI block A
2771 * 21 5 HDMI block B
2716 * 176 - CP_INT RB 2772 * 176 - CP_INT RB
2717 * 177 - CP_INT IB1 2773 * 177 - CP_INT IB1
2718 * 178 - CP_INT IB2 2774 * 178 - CP_INT IB2
@@ -2852,6 +2908,10 @@ restart_ih:
2852 break; 2908 break;
2853 } 2909 }
2854 break; 2910 break;
2911 case 21: /* HDMI */
2912 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
2913 r600_audio_schedule_polling(rdev);
2914 break;
2855 case 176: /* CP_INT in ring buffer */ 2915 case 176: /* CP_INT in ring buffer */
2856 case 177: /* CP_INT in IB1 */ 2916 case 177: /* CP_INT in IB1 */
2857 case 178: /* CP_INT in IB2 */ 2917 case 178: /* CP_INT in IB2 */
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 1d898051c631..2b26553c352c 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -44,7 +44,7 @@ static int r600_audio_chipset_supported(struct radeon_device *rdev)
44/* 44/*
45 * current number of channels 45 * current number of channels
46 */ 46 */
47static int r600_audio_channels(struct radeon_device *rdev) 47int r600_audio_channels(struct radeon_device *rdev)
48{ 48{
49 return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1; 49 return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
50} 50}
@@ -52,7 +52,7 @@ static int r600_audio_channels(struct radeon_device *rdev)
52/* 52/*
53 * current bits per sample 53 * current bits per sample
54 */ 54 */
55static int r600_audio_bits_per_sample(struct radeon_device *rdev) 55int r600_audio_bits_per_sample(struct radeon_device *rdev)
56{ 56{
57 uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4; 57 uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
58 switch (value) { 58 switch (value) {
@@ -71,7 +71,7 @@ static int r600_audio_bits_per_sample(struct radeon_device *rdev)
71/* 71/*
72 * current sampling rate in HZ 72 * current sampling rate in HZ
73 */ 73 */
74static int r600_audio_rate(struct radeon_device *rdev) 74int r600_audio_rate(struct radeon_device *rdev)
75{ 75{
76 uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL); 76 uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
77 uint32_t result; 77 uint32_t result;
@@ -90,7 +90,7 @@ static int r600_audio_rate(struct radeon_device *rdev)
90/* 90/*
91 * iec 60958 status bits 91 * iec 60958 status bits
92 */ 92 */
93static uint8_t r600_audio_status_bits(struct radeon_device *rdev) 93uint8_t r600_audio_status_bits(struct radeon_device *rdev)
94{ 94{
95 return RREG32(R600_AUDIO_STATUS_BITS) & 0xff; 95 return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
96} 96}
@@ -98,12 +98,21 @@ static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
98/* 98/*
99 * iec 60958 category code 99 * iec 60958 category code
100 */ 100 */
101static uint8_t r600_audio_category_code(struct radeon_device *rdev) 101uint8_t r600_audio_category_code(struct radeon_device *rdev)
102{ 102{
103 return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff; 103 return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
104} 104}
105 105
106/* 106/*
107 * schedule next audio update event
108 */
109void r600_audio_schedule_polling(struct radeon_device *rdev)
110{
111 mod_timer(&rdev->audio_timer,
112 jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
113}
114
115/*
107 * update all hdmi interfaces with current audio parameters 116 * update all hdmi interfaces with current audio parameters
108 */ 117 */
109static void r600_audio_update_hdmi(unsigned long param) 118static void r600_audio_update_hdmi(unsigned long param)
@@ -118,7 +127,7 @@ static void r600_audio_update_hdmi(unsigned long param)
118 uint8_t category_code = r600_audio_category_code(rdev); 127 uint8_t category_code = r600_audio_category_code(rdev);
119 128
120 struct drm_encoder *encoder; 129 struct drm_encoder *encoder;
121 int changes = 0; 130 int changes = 0, still_going = 0;
122 131
123 changes |= channels != rdev->audio_channels; 132 changes |= channels != rdev->audio_channels;
124 changes |= rate != rdev->audio_rate; 133 changes |= rate != rdev->audio_rate;
@@ -135,15 +144,13 @@ static void r600_audio_update_hdmi(unsigned long param)
135 } 144 }
136 145
137 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 146 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
147 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
148 still_going |= radeon_encoder->audio_polling_active;
138 if (changes || r600_hdmi_buffer_status_changed(encoder)) 149 if (changes || r600_hdmi_buffer_status_changed(encoder))
139 r600_hdmi_update_audio_settings( 150 r600_hdmi_update_audio_settings(encoder);
140 encoder, channels,
141 rate, bps, status_bits,
142 category_code);
143 } 151 }
144 152
145 mod_timer(&rdev->audio_timer, 153 if(still_going) r600_audio_schedule_polling(rdev);
146 jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
147} 154}
148 155
149/* 156/*
@@ -176,9 +183,34 @@ int r600_audio_init(struct radeon_device *rdev)
176 r600_audio_update_hdmi, 183 r600_audio_update_hdmi,
177 (unsigned long)rdev); 184 (unsigned long)rdev);
178 185
186 return 0;
187}
188
189/*
190 * enable the polling timer, to check for status changes
191 */
192void r600_audio_enable_polling(struct drm_encoder *encoder)
193{
194 struct drm_device *dev = encoder->dev;
195 struct radeon_device *rdev = dev->dev_private;
196 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
197
198 DRM_DEBUG("r600_audio_enable_polling: %d", radeon_encoder->audio_polling_active);
199 if (radeon_encoder->audio_polling_active)
200 return;
201
202 radeon_encoder->audio_polling_active = 1;
179 mod_timer(&rdev->audio_timer, jiffies + 1); 203 mod_timer(&rdev->audio_timer, jiffies + 1);
204}
180 205
181 return 0; 206/*
207 * disable the polling timer, so we get no more status updates
208 */
209void r600_audio_disable_polling(struct drm_encoder *encoder)
210{
211 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
212 DRM_DEBUG("r600_audio_disable_polling: %d", radeon_encoder->audio_polling_active);
213 radeon_encoder->audio_polling_active = 0;
182} 214}
183 215
184/* 216/*
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index f6c6c77db7e0..d13622ae74e9 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -447,6 +447,9 @@ int r600_blit_init(struct radeon_device *rdev)
447 u32 packet2s[16]; 447 u32 packet2s[16];
448 int num_packet2s = 0; 448 int num_packet2s = 0;
449 449
450 /* don't reinitialize blit */
451 if (rdev->r600_blit.shader_obj)
452 return 0;
450 mutex_init(&rdev->r600_blit.mutex); 453 mutex_init(&rdev->r600_blit.mutex);
451 rdev->r600_blit.state_offset = 0; 454 rdev->r600_blit.state_offset = 0;
452 455
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 2616b822ba68..40b1aca6d1f4 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -290,17 +290,15 @@ void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
290 if (!offset) 290 if (!offset)
291 return; 291 return;
292 292
293 if (r600_hdmi_is_audio_buffer_filled(encoder)) { 293 if (!radeon_encoder->hdmi_audio_workaround ||
294 /* disable audio workaround and start delivering of audio frames */ 294 r600_hdmi_is_audio_buffer_filled(encoder)) {
295 WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
296 295
297 } else if (radeon_encoder->hdmi_audio_workaround) { 296 /* disable audio workaround */
298 /* enable audio workaround and start delivering of audio frames */ 297 WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
299 WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
300 298
301 } else { 299 } else {
302 /* disable audio workaround and stop delivering of audio frames */ 300 /* enable audio workaround */
303 WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001); 301 WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
304 } 302 }
305} 303}
306 304
@@ -345,25 +343,23 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
345 343
346 /* audio packets per line, does anyone know how to calc this ? */ 344 /* audio packets per line, does anyone know how to calc this ? */
347 WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000); 345 WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
348
349 /* update? reset? don't realy know */
350 WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000);
351} 346}
352 347
353/* 348/*
354 * update settings with current parameters from audio engine 349 * update settings with current parameters from audio engine
355 */ 350 */
356void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, 351void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
357 int channels,
358 int rate,
359 int bps,
360 uint8_t status_bits,
361 uint8_t category_code)
362{ 352{
363 struct drm_device *dev = encoder->dev; 353 struct drm_device *dev = encoder->dev;
364 struct radeon_device *rdev = dev->dev_private; 354 struct radeon_device *rdev = dev->dev_private;
365 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; 355 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
366 356
357 int channels = r600_audio_channels(rdev);
358 int rate = r600_audio_rate(rdev);
359 int bps = r600_audio_bits_per_sample(rdev);
360 uint8_t status_bits = r600_audio_status_bits(rdev);
361 uint8_t category_code = r600_audio_category_code(rdev);
362
367 uint32_t iec; 363 uint32_t iec;
368 364
369 if (!offset) 365 if (!offset)
@@ -415,9 +411,6 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
415 r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0); 411 r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
416 412
417 r600_hdmi_audio_workaround(encoder); 413 r600_hdmi_audio_workaround(encoder);
418
419 /* update? reset? don't realy know */
420 WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
421} 414}
422 415
423static int r600_hdmi_find_free_block(struct drm_device *dev) 416static int r600_hdmi_find_free_block(struct drm_device *dev)
@@ -486,6 +479,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
486 struct drm_device *dev = encoder->dev; 479 struct drm_device *dev = encoder->dev;
487 struct radeon_device *rdev = dev->dev_private; 480 struct radeon_device *rdev = dev->dev_private;
488 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 481 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
482 uint32_t offset;
489 483
490 if (ASIC_IS_DCE4(rdev)) 484 if (ASIC_IS_DCE4(rdev))
491 return; 485 return;
@@ -499,10 +493,10 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
499 } 493 }
500 } 494 }
501 495
496 offset = radeon_encoder->hdmi_offset;
502 if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) { 497 if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
503 WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1); 498 WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
504 } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { 499 } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
505 int offset = radeon_encoder->hdmi_offset;
506 switch (radeon_encoder->encoder_id) { 500 switch (radeon_encoder->encoder_id) {
507 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: 501 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
508 WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4); 502 WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
@@ -518,6 +512,21 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
518 } 512 }
519 } 513 }
520 514
515 if (rdev->irq.installed
516 && rdev->family != CHIP_RS600
517 && rdev->family != CHIP_RS690
518 && rdev->family != CHIP_RS740) {
519
520 /* if irq is available use it */
521 rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
522 radeon_irq_set(rdev);
523
524 r600_audio_disable_polling(encoder);
525 } else {
526 /* if not fallback to polling */
527 r600_audio_enable_polling(encoder);
528 }
529
521 DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n", 530 DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
522 radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); 531 radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
523} 532}
@@ -530,22 +539,30 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
530 struct drm_device *dev = encoder->dev; 539 struct drm_device *dev = encoder->dev;
531 struct radeon_device *rdev = dev->dev_private; 540 struct radeon_device *rdev = dev->dev_private;
532 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 541 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
542 uint8_t offset;
533 543
534 if (ASIC_IS_DCE4(rdev)) 544 if (ASIC_IS_DCE4(rdev))
535 return; 545 return;
536 546
537 if (!radeon_encoder->hdmi_offset) { 547 offset = radeon_encoder->hdmi_offset;
548 if (!offset) {
538 dev_err(rdev->dev, "Disabling not enabled HDMI\n"); 549 dev_err(rdev->dev, "Disabling not enabled HDMI\n");
539 return; 550 return;
540 } 551 }
541 552
542 DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n", 553 DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
543 radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); 554 offset, radeon_encoder->encoder_id);
555
556 /* disable irq */
557 rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = false;
558 radeon_irq_set(rdev);
559
560 /* disable polling */
561 r600_audio_disable_polling(encoder);
544 562
545 if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) { 563 if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
546 WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1); 564 WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
547 } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { 565 } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
548 int offset = radeon_encoder->hdmi_offset;
549 switch (radeon_encoder->encoder_id) { 566 switch (radeon_encoder->encoder_id) {
550 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: 567 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
551 WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4); 568 WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 7b1d22370f6e..d84612ae47e0 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -157,33 +157,36 @@
157#define R600_HDMI_BLOCK3 0x7800 157#define R600_HDMI_BLOCK3 0x7800
158 158
159/* HDMI registers */ 159/* HDMI registers */
160#define R600_HDMI_ENABLE 0x00 160#define R600_HDMI_ENABLE 0x00
161#define R600_HDMI_STATUS 0x04 161#define R600_HDMI_STATUS 0x04
162#define R600_HDMI_CNTL 0x08 162# define R600_HDMI_INT_PENDING (1 << 29)
163#define R600_HDMI_UNKNOWN_0 0x0C 163#define R600_HDMI_CNTL 0x08
164#define R600_HDMI_AUDIOCNTL 0x10 164# define R600_HDMI_INT_EN (1 << 28)
165#define R600_HDMI_VIDEOCNTL 0x14 165# define R600_HDMI_INT_ACK (1 << 29)
166#define R600_HDMI_VERSION 0x18 166#define R600_HDMI_UNKNOWN_0 0x0C
167#define R600_HDMI_UNKNOWN_1 0x28 167#define R600_HDMI_AUDIOCNTL 0x10
168#define R600_HDMI_VIDEOINFOFRAME_0 0x54 168#define R600_HDMI_VIDEOCNTL 0x14
169#define R600_HDMI_VIDEOINFOFRAME_1 0x58 169#define R600_HDMI_VERSION 0x18
170#define R600_HDMI_VIDEOINFOFRAME_2 0x5c 170#define R600_HDMI_UNKNOWN_1 0x28
171#define R600_HDMI_VIDEOINFOFRAME_3 0x60 171#define R600_HDMI_VIDEOINFOFRAME_0 0x54
172#define R600_HDMI_32kHz_CTS 0xac 172#define R600_HDMI_VIDEOINFOFRAME_1 0x58
173#define R600_HDMI_32kHz_N 0xb0 173#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
174#define R600_HDMI_44_1kHz_CTS 0xb4 174#define R600_HDMI_VIDEOINFOFRAME_3 0x60
175#define R600_HDMI_44_1kHz_N 0xb8 175#define R600_HDMI_32kHz_CTS 0xac
176#define R600_HDMI_48kHz_CTS 0xbc 176#define R600_HDMI_32kHz_N 0xb0
177#define R600_HDMI_48kHz_N 0xc0 177#define R600_HDMI_44_1kHz_CTS 0xb4
178#define R600_HDMI_AUDIOINFOFRAME_0 0xcc 178#define R600_HDMI_44_1kHz_N 0xb8
179#define R600_HDMI_AUDIOINFOFRAME_1 0xd0 179#define R600_HDMI_48kHz_CTS 0xbc
180#define R600_HDMI_IEC60958_1 0xd4 180#define R600_HDMI_48kHz_N 0xc0
181#define R600_HDMI_IEC60958_2 0xd8 181#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
182#define R600_HDMI_UNKNOWN_2 0xdc 182#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
183#define R600_HDMI_AUDIO_DEBUG_0 0xe0 183#define R600_HDMI_IEC60958_1 0xd4
184#define R600_HDMI_AUDIO_DEBUG_1 0xe4 184#define R600_HDMI_IEC60958_2 0xd8
185#define R600_HDMI_AUDIO_DEBUG_2 0xe8 185#define R600_HDMI_UNKNOWN_2 0xdc
186#define R600_HDMI_AUDIO_DEBUG_3 0xec 186#define R600_HDMI_AUDIO_DEBUG_0 0xe0
187#define R600_HDMI_AUDIO_DEBUG_1 0xe4
188#define R600_HDMI_AUDIO_DEBUG_2 0xe8
189#define R600_HDMI_AUDIO_DEBUG_3 0xec
187 190
188/* HDMI additional config base register addresses */ 191/* HDMI additional config base register addresses */
189#define R600_HDMI_CONFIG1 0x7600 192#define R600_HDMI_CONFIG1 0x7600
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 034218c3dbbb..ab29d972a167 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -99,6 +99,7 @@ extern int radeon_hw_i2c;
99 * symbol; 99 * symbol;
100 */ 100 */
101#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 101#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
102#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
102/* RADEON_IB_POOL_SIZE must be a power of 2 */ 103/* RADEON_IB_POOL_SIZE must be a power of 2 */
103#define RADEON_IB_POOL_SIZE 16 104#define RADEON_IB_POOL_SIZE 16
104#define RADEON_DEBUGFS_MAX_NUM_FILES 32 105#define RADEON_DEBUGFS_MAX_NUM_FILES 32
@@ -182,7 +183,8 @@ struct radeon_fence_driver {
182 uint32_t scratch_reg; 183 uint32_t scratch_reg;
183 atomic_t seq; 184 atomic_t seq;
184 uint32_t last_seq; 185 uint32_t last_seq;
185 unsigned long count_timeout; 186 unsigned long last_jiffies;
187 unsigned long last_timeout;
186 wait_queue_head_t queue; 188 wait_queue_head_t queue;
187 rwlock_t lock; 189 rwlock_t lock;
188 struct list_head created; 190 struct list_head created;
@@ -197,7 +199,6 @@ struct radeon_fence {
197 struct list_head list; 199 struct list_head list;
198 /* protected by radeon_fence.lock */ 200 /* protected by radeon_fence.lock */
199 uint32_t seq; 201 uint32_t seq;
200 unsigned long timeout;
201 bool emited; 202 bool emited;
202 bool signaled; 203 bool signaled;
203}; 204};
@@ -371,10 +372,12 @@ struct radeon_irq {
371 bool installed; 372 bool installed;
372 bool sw_int; 373 bool sw_int;
373 /* FIXME: use a define max crtc rather than hardcode it */ 374 /* FIXME: use a define max crtc rather than hardcode it */
374 bool crtc_vblank_int[2]; 375 bool crtc_vblank_int[6];
375 wait_queue_head_t vblank_queue; 376 wait_queue_head_t vblank_queue;
376 /* FIXME: use defines for max hpd/dacs */ 377 /* FIXME: use defines for max hpd/dacs */
377 bool hpd[6]; 378 bool hpd[6];
379 /* FIXME: use defines for max HDMI blocks */
380 bool hdmi[2];
378 spinlock_t sw_lock; 381 spinlock_t sw_lock;
379 int sw_refcount; 382 int sw_refcount;
380}; 383};
@@ -746,7 +749,8 @@ struct radeon_asic {
746 int (*resume)(struct radeon_device *rdev); 749 int (*resume)(struct radeon_device *rdev);
747 int (*suspend)(struct radeon_device *rdev); 750 int (*suspend)(struct radeon_device *rdev);
748 void (*vga_set_state)(struct radeon_device *rdev, bool state); 751 void (*vga_set_state)(struct radeon_device *rdev, bool state);
749 int (*gpu_reset)(struct radeon_device *rdev); 752 bool (*gpu_is_lockup)(struct radeon_device *rdev);
753 int (*asic_reset)(struct radeon_device *rdev);
750 void (*gart_tlb_flush)(struct radeon_device *rdev); 754 void (*gart_tlb_flush)(struct radeon_device *rdev);
751 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); 755 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
752 int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); 756 int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
@@ -804,39 +808,72 @@ struct radeon_asic {
804/* 808/*
805 * Asic structures 809 * Asic structures
806 */ 810 */
811struct r100_gpu_lockup {
812 unsigned long last_jiffies;
813 u32 last_cp_rptr;
814};
815
807struct r100_asic { 816struct r100_asic {
808 const unsigned *reg_safe_bm; 817 const unsigned *reg_safe_bm;
809 unsigned reg_safe_bm_size; 818 unsigned reg_safe_bm_size;
810 u32 hdp_cntl; 819 u32 hdp_cntl;
820 struct r100_gpu_lockup lockup;
811}; 821};
812 822
813struct r300_asic { 823struct r300_asic {
814 const unsigned *reg_safe_bm; 824 const unsigned *reg_safe_bm;
815 unsigned reg_safe_bm_size; 825 unsigned reg_safe_bm_size;
816 u32 resync_scratch; 826 u32 resync_scratch;
817 u32 hdp_cntl; 827 u32 hdp_cntl;
828 struct r100_gpu_lockup lockup;
818}; 829};
819 830
820struct r600_asic { 831struct r600_asic {
821 unsigned max_pipes; 832 unsigned max_pipes;
822 unsigned max_tile_pipes; 833 unsigned max_tile_pipes;
823 unsigned max_simds; 834 unsigned max_simds;
824 unsigned max_backends; 835 unsigned max_backends;
825 unsigned max_gprs; 836 unsigned max_gprs;
826 unsigned max_threads; 837 unsigned max_threads;
827 unsigned max_stack_entries; 838 unsigned max_stack_entries;
828 unsigned max_hw_contexts; 839 unsigned max_hw_contexts;
829 unsigned max_gs_threads; 840 unsigned max_gs_threads;
830 unsigned sx_max_export_size; 841 unsigned sx_max_export_size;
831 unsigned sx_max_export_pos_size; 842 unsigned sx_max_export_pos_size;
832 unsigned sx_max_export_smx_size; 843 unsigned sx_max_export_smx_size;
833 unsigned sq_num_cf_insts; 844 unsigned sq_num_cf_insts;
834 unsigned tiling_nbanks; 845 unsigned tiling_nbanks;
835 unsigned tiling_npipes; 846 unsigned tiling_npipes;
836 unsigned tiling_group_size; 847 unsigned tiling_group_size;
848 struct r100_gpu_lockup lockup;
837}; 849};
838 850
839struct rv770_asic { 851struct rv770_asic {
852 unsigned max_pipes;
853 unsigned max_tile_pipes;
854 unsigned max_simds;
855 unsigned max_backends;
856 unsigned max_gprs;
857 unsigned max_threads;
858 unsigned max_stack_entries;
859 unsigned max_hw_contexts;
860 unsigned max_gs_threads;
861 unsigned sx_max_export_size;
862 unsigned sx_max_export_pos_size;
863 unsigned sx_max_export_smx_size;
864 unsigned sq_num_cf_insts;
865 unsigned sx_num_of_sets;
866 unsigned sc_prim_fifo_size;
867 unsigned sc_hiz_tile_fifo_size;
868 unsigned sc_earlyz_tile_fifo_fize;
869 unsigned tiling_nbanks;
870 unsigned tiling_npipes;
871 unsigned tiling_group_size;
872 struct r100_gpu_lockup lockup;
873};
874
875struct evergreen_asic {
876 unsigned num_ses;
840 unsigned max_pipes; 877 unsigned max_pipes;
841 unsigned max_tile_pipes; 878 unsigned max_tile_pipes;
842 unsigned max_simds; 879 unsigned max_simds;
@@ -853,7 +890,7 @@ struct rv770_asic {
853 unsigned sx_num_of_sets; 890 unsigned sx_num_of_sets;
854 unsigned sc_prim_fifo_size; 891 unsigned sc_prim_fifo_size;
855 unsigned sc_hiz_tile_fifo_size; 892 unsigned sc_hiz_tile_fifo_size;
856 unsigned sc_earlyz_tile_fifo_fize; 893 unsigned sc_earlyz_tile_fifo_size;
857 unsigned tiling_nbanks; 894 unsigned tiling_nbanks;
858 unsigned tiling_npipes; 895 unsigned tiling_npipes;
859 unsigned tiling_group_size; 896 unsigned tiling_group_size;
@@ -864,6 +901,7 @@ union radeon_asic_config {
864 struct r100_asic r100; 901 struct r100_asic r100;
865 struct r600_asic r600; 902 struct r600_asic r600;
866 struct rv770_asic rv770; 903 struct rv770_asic rv770;
904 struct evergreen_asic evergreen;
867}; 905};
868 906
869/* 907/*
@@ -927,9 +965,6 @@ struct radeon_device {
927 bool is_atom_bios; 965 bool is_atom_bios;
928 uint16_t bios_header_start; 966 uint16_t bios_header_start;
929 struct radeon_bo *stollen_vga_memory; 967 struct radeon_bo *stollen_vga_memory;
930 struct fb_info *fbdev_info;
931 struct radeon_bo *fbdev_rbo;
932 struct radeon_framebuffer *fbdev_rfb;
933 /* Register mmio */ 968 /* Register mmio */
934 resource_size_t rmmio_base; 969 resource_size_t rmmio_base;
935 resource_size_t rmmio_size; 970 resource_size_t rmmio_size;
@@ -1145,7 +1180,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1145#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) 1180#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
1146#define radeon_cs_parse(p) rdev->asic->cs_parse((p)) 1181#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
1147#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 1182#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
1148#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) 1183#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
1184#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
1149#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) 1185#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
1150#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) 1186#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
1151#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) 1187#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
@@ -1176,6 +1212,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1176 1212
1177/* Common functions */ 1213/* Common functions */
1178/* AGP */ 1214/* AGP */
1215extern int radeon_gpu_reset(struct radeon_device *rdev);
1179extern void radeon_agp_disable(struct radeon_device *rdev); 1216extern void radeon_agp_disable(struct radeon_device *rdev);
1180extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); 1217extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
1181extern void radeon_gart_restore(struct radeon_device *rdev); 1218extern void radeon_gart_restore(struct radeon_device *rdev);
@@ -1200,6 +1237,8 @@ extern int radeon_resume_kms(struct drm_device *dev);
1200extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 1237extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1201 1238
1202/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ 1239/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
1240extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
1241extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
1203 1242
1204/* rv200,rv250,rv280 */ 1243/* rv200,rv250,rv280 */
1205extern void r200_set_safe_registers(struct radeon_device *rdev); 1244extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1260,6 +1299,7 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
1260extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 1299extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
1261extern bool r600_card_posted(struct radeon_device *rdev); 1300extern bool r600_card_posted(struct radeon_device *rdev);
1262extern void r600_cp_stop(struct radeon_device *rdev); 1301extern void r600_cp_stop(struct radeon_device *rdev);
1302extern int r600_cp_start(struct radeon_device *rdev);
1263extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); 1303extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
1264extern int r600_cp_resume(struct radeon_device *rdev); 1304extern int r600_cp_resume(struct radeon_device *rdev);
1265extern void r600_cp_fini(struct radeon_device *rdev); 1305extern void r600_cp_fini(struct radeon_device *rdev);
@@ -1276,29 +1316,39 @@ extern void r600_scratch_init(struct radeon_device *rdev);
1276extern int r600_blit_init(struct radeon_device *rdev); 1316extern int r600_blit_init(struct radeon_device *rdev);
1277extern void r600_blit_fini(struct radeon_device *rdev); 1317extern void r600_blit_fini(struct radeon_device *rdev);
1278extern int r600_init_microcode(struct radeon_device *rdev); 1318extern int r600_init_microcode(struct radeon_device *rdev);
1279extern int r600_gpu_reset(struct radeon_device *rdev); 1319extern int r600_asic_reset(struct radeon_device *rdev);
1280/* r600 irq */ 1320/* r600 irq */
1281extern int r600_irq_init(struct radeon_device *rdev); 1321extern int r600_irq_init(struct radeon_device *rdev);
1282extern void r600_irq_fini(struct radeon_device *rdev); 1322extern void r600_irq_fini(struct radeon_device *rdev);
1283extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); 1323extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
1284extern int r600_irq_set(struct radeon_device *rdev); 1324extern int r600_irq_set(struct radeon_device *rdev);
1285extern void r600_irq_suspend(struct radeon_device *rdev); 1325extern void r600_irq_suspend(struct radeon_device *rdev);
1326extern void r600_disable_interrupts(struct radeon_device *rdev);
1327extern void r600_rlc_stop(struct radeon_device *rdev);
1286/* r600 audio */ 1328/* r600 audio */
1287extern int r600_audio_init(struct radeon_device *rdev); 1329extern int r600_audio_init(struct radeon_device *rdev);
1288extern int r600_audio_tmds_index(struct drm_encoder *encoder); 1330extern int r600_audio_tmds_index(struct drm_encoder *encoder);
1289extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); 1331extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
1332extern int r600_audio_channels(struct radeon_device *rdev);
1333extern int r600_audio_bits_per_sample(struct radeon_device *rdev);
1334extern int r600_audio_rate(struct radeon_device *rdev);
1335extern uint8_t r600_audio_status_bits(struct radeon_device *rdev);
1336extern uint8_t r600_audio_category_code(struct radeon_device *rdev);
1337extern void r600_audio_schedule_polling(struct radeon_device *rdev);
1338extern void r600_audio_enable_polling(struct drm_encoder *encoder);
1339extern void r600_audio_disable_polling(struct drm_encoder *encoder);
1290extern void r600_audio_fini(struct radeon_device *rdev); 1340extern void r600_audio_fini(struct radeon_device *rdev);
1291extern void r600_hdmi_init(struct drm_encoder *encoder); 1341extern void r600_hdmi_init(struct drm_encoder *encoder);
1292extern void r600_hdmi_enable(struct drm_encoder *encoder); 1342extern void r600_hdmi_enable(struct drm_encoder *encoder);
1293extern void r600_hdmi_disable(struct drm_encoder *encoder); 1343extern void r600_hdmi_disable(struct drm_encoder *encoder);
1294extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 1344extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1295extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); 1345extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
1296extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, 1346extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
1297 int channels, 1347
1298 int rate, 1348extern void r700_cp_stop(struct radeon_device *rdev);
1299 int bps, 1349extern void r700_cp_fini(struct radeon_device *rdev);
1300 uint8_t status_bits, 1350extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
1301 uint8_t category_code); 1351extern int evergreen_irq_set(struct radeon_device *rdev);
1302 1352
1303/* evergreen */ 1353/* evergreen */
1304struct evergreen_mc_save { 1354struct evergreen_mc_save {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index a4b4bc9fa322..f835333c1b69 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -134,7 +134,8 @@ static struct radeon_asic r100_asic = {
134 .suspend = &r100_suspend, 134 .suspend = &r100_suspend,
135 .resume = &r100_resume, 135 .resume = &r100_resume,
136 .vga_set_state = &r100_vga_set_state, 136 .vga_set_state = &r100_vga_set_state,
137 .gpu_reset = &r100_gpu_reset, 137 .gpu_is_lockup = &r100_gpu_is_lockup,
138 .asic_reset = &r100_asic_reset,
138 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 139 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
139 .gart_set_page = &r100_pci_gart_set_page, 140 .gart_set_page = &r100_pci_gart_set_page,
140 .cp_commit = &r100_cp_commit, 141 .cp_commit = &r100_cp_commit,
@@ -172,7 +173,8 @@ static struct radeon_asic r200_asic = {
172 .suspend = &r100_suspend, 173 .suspend = &r100_suspend,
173 .resume = &r100_resume, 174 .resume = &r100_resume,
174 .vga_set_state = &r100_vga_set_state, 175 .vga_set_state = &r100_vga_set_state,
175 .gpu_reset = &r100_gpu_reset, 176 .gpu_is_lockup = &r100_gpu_is_lockup,
177 .asic_reset = &r100_asic_reset,
176 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 178 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
177 .gart_set_page = &r100_pci_gart_set_page, 179 .gart_set_page = &r100_pci_gart_set_page,
178 .cp_commit = &r100_cp_commit, 180 .cp_commit = &r100_cp_commit,
@@ -209,7 +211,8 @@ static struct radeon_asic r300_asic = {
209 .suspend = &r300_suspend, 211 .suspend = &r300_suspend,
210 .resume = &r300_resume, 212 .resume = &r300_resume,
211 .vga_set_state = &r100_vga_set_state, 213 .vga_set_state = &r100_vga_set_state,
212 .gpu_reset = &r300_gpu_reset, 214 .gpu_is_lockup = &r300_gpu_is_lockup,
215 .asic_reset = &r300_asic_reset,
213 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 216 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
214 .gart_set_page = &r100_pci_gart_set_page, 217 .gart_set_page = &r100_pci_gart_set_page,
215 .cp_commit = &r100_cp_commit, 218 .cp_commit = &r100_cp_commit,
@@ -247,7 +250,8 @@ static struct radeon_asic r300_asic_pcie = {
247 .suspend = &r300_suspend, 250 .suspend = &r300_suspend,
248 .resume = &r300_resume, 251 .resume = &r300_resume,
249 .vga_set_state = &r100_vga_set_state, 252 .vga_set_state = &r100_vga_set_state,
250 .gpu_reset = &r300_gpu_reset, 253 .gpu_is_lockup = &r300_gpu_is_lockup,
254 .asic_reset = &r300_asic_reset,
251 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 255 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
252 .gart_set_page = &rv370_pcie_gart_set_page, 256 .gart_set_page = &rv370_pcie_gart_set_page,
253 .cp_commit = &r100_cp_commit, 257 .cp_commit = &r100_cp_commit,
@@ -284,7 +288,8 @@ static struct radeon_asic r420_asic = {
284 .suspend = &r420_suspend, 288 .suspend = &r420_suspend,
285 .resume = &r420_resume, 289 .resume = &r420_resume,
286 .vga_set_state = &r100_vga_set_state, 290 .vga_set_state = &r100_vga_set_state,
287 .gpu_reset = &r300_gpu_reset, 291 .gpu_is_lockup = &r300_gpu_is_lockup,
292 .asic_reset = &r300_asic_reset,
288 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 293 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
289 .gart_set_page = &rv370_pcie_gart_set_page, 294 .gart_set_page = &rv370_pcie_gart_set_page,
290 .cp_commit = &r100_cp_commit, 295 .cp_commit = &r100_cp_commit,
@@ -322,7 +327,8 @@ static struct radeon_asic rs400_asic = {
322 .suspend = &rs400_suspend, 327 .suspend = &rs400_suspend,
323 .resume = &rs400_resume, 328 .resume = &rs400_resume,
324 .vga_set_state = &r100_vga_set_state, 329 .vga_set_state = &r100_vga_set_state,
325 .gpu_reset = &r300_gpu_reset, 330 .gpu_is_lockup = &r300_gpu_is_lockup,
331 .asic_reset = &r300_asic_reset,
326 .gart_tlb_flush = &rs400_gart_tlb_flush, 332 .gart_tlb_flush = &rs400_gart_tlb_flush,
327 .gart_set_page = &rs400_gart_set_page, 333 .gart_set_page = &rs400_gart_set_page,
328 .cp_commit = &r100_cp_commit, 334 .cp_commit = &r100_cp_commit,
@@ -360,7 +366,8 @@ static struct radeon_asic rs600_asic = {
360 .suspend = &rs600_suspend, 366 .suspend = &rs600_suspend,
361 .resume = &rs600_resume, 367 .resume = &rs600_resume,
362 .vga_set_state = &r100_vga_set_state, 368 .vga_set_state = &r100_vga_set_state,
363 .gpu_reset = &r300_gpu_reset, 369 .gpu_is_lockup = &r300_gpu_is_lockup,
370 .asic_reset = &rs600_asic_reset,
364 .gart_tlb_flush = &rs600_gart_tlb_flush, 371 .gart_tlb_flush = &rs600_gart_tlb_flush,
365 .gart_set_page = &rs600_gart_set_page, 372 .gart_set_page = &rs600_gart_set_page,
366 .cp_commit = &r100_cp_commit, 373 .cp_commit = &r100_cp_commit,
@@ -398,7 +405,8 @@ static struct radeon_asic rs690_asic = {
398 .suspend = &rs690_suspend, 405 .suspend = &rs690_suspend,
399 .resume = &rs690_resume, 406 .resume = &rs690_resume,
400 .vga_set_state = &r100_vga_set_state, 407 .vga_set_state = &r100_vga_set_state,
401 .gpu_reset = &r300_gpu_reset, 408 .gpu_is_lockup = &r300_gpu_is_lockup,
409 .asic_reset = &rs600_asic_reset,
402 .gart_tlb_flush = &rs400_gart_tlb_flush, 410 .gart_tlb_flush = &rs400_gart_tlb_flush,
403 .gart_set_page = &rs400_gart_set_page, 411 .gart_set_page = &rs400_gart_set_page,
404 .cp_commit = &r100_cp_commit, 412 .cp_commit = &r100_cp_commit,
@@ -436,7 +444,8 @@ static struct radeon_asic rv515_asic = {
436 .suspend = &rv515_suspend, 444 .suspend = &rv515_suspend,
437 .resume = &rv515_resume, 445 .resume = &rv515_resume,
438 .vga_set_state = &r100_vga_set_state, 446 .vga_set_state = &r100_vga_set_state,
439 .gpu_reset = &rv515_gpu_reset, 447 .gpu_is_lockup = &r300_gpu_is_lockup,
448 .asic_reset = &rs600_asic_reset,
440 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 449 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
441 .gart_set_page = &rv370_pcie_gart_set_page, 450 .gart_set_page = &rv370_pcie_gart_set_page,
442 .cp_commit = &r100_cp_commit, 451 .cp_commit = &r100_cp_commit,
@@ -474,7 +483,8 @@ static struct radeon_asic r520_asic = {
474 .suspend = &rv515_suspend, 483 .suspend = &rv515_suspend,
475 .resume = &r520_resume, 484 .resume = &r520_resume,
476 .vga_set_state = &r100_vga_set_state, 485 .vga_set_state = &r100_vga_set_state,
477 .gpu_reset = &rv515_gpu_reset, 486 .gpu_is_lockup = &r300_gpu_is_lockup,
487 .asic_reset = &rs600_asic_reset,
478 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 488 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
479 .gart_set_page = &rv370_pcie_gart_set_page, 489 .gart_set_page = &rv370_pcie_gart_set_page,
480 .cp_commit = &r100_cp_commit, 490 .cp_commit = &r100_cp_commit,
@@ -513,7 +523,8 @@ static struct radeon_asic r600_asic = {
513 .resume = &r600_resume, 523 .resume = &r600_resume,
514 .cp_commit = &r600_cp_commit, 524 .cp_commit = &r600_cp_commit,
515 .vga_set_state = &r600_vga_set_state, 525 .vga_set_state = &r600_vga_set_state,
516 .gpu_reset = &r600_gpu_reset, 526 .gpu_is_lockup = &r600_gpu_is_lockup,
527 .asic_reset = &r600_asic_reset,
517 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 528 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
518 .gart_set_page = &rs600_gart_set_page, 529 .gart_set_page = &rs600_gart_set_page,
519 .ring_test = &r600_ring_test, 530 .ring_test = &r600_ring_test,
@@ -549,8 +560,9 @@ static struct radeon_asic rs780_asic = {
549 .suspend = &r600_suspend, 560 .suspend = &r600_suspend,
550 .resume = &r600_resume, 561 .resume = &r600_resume,
551 .cp_commit = &r600_cp_commit, 562 .cp_commit = &r600_cp_commit,
563 .gpu_is_lockup = &r600_gpu_is_lockup,
552 .vga_set_state = &r600_vga_set_state, 564 .vga_set_state = &r600_vga_set_state,
553 .gpu_reset = &r600_gpu_reset, 565 .asic_reset = &r600_asic_reset,
554 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 566 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
555 .gart_set_page = &rs600_gart_set_page, 567 .gart_set_page = &rs600_gart_set_page,
556 .ring_test = &r600_ring_test, 568 .ring_test = &r600_ring_test,
@@ -586,7 +598,8 @@ static struct radeon_asic rv770_asic = {
586 .suspend = &rv770_suspend, 598 .suspend = &rv770_suspend,
587 .resume = &rv770_resume, 599 .resume = &rv770_resume,
588 .cp_commit = &r600_cp_commit, 600 .cp_commit = &r600_cp_commit,
589 .gpu_reset = &rv770_gpu_reset, 601 .asic_reset = &r600_asic_reset,
602 .gpu_is_lockup = &r600_gpu_is_lockup,
590 .vga_set_state = &r600_vga_set_state, 603 .vga_set_state = &r600_vga_set_state,
591 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 604 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
592 .gart_set_page = &rs600_gart_set_page, 605 .gart_set_page = &rs600_gart_set_page,
@@ -622,16 +635,17 @@ static struct radeon_asic evergreen_asic = {
622 .fini = &evergreen_fini, 635 .fini = &evergreen_fini,
623 .suspend = &evergreen_suspend, 636 .suspend = &evergreen_suspend,
624 .resume = &evergreen_resume, 637 .resume = &evergreen_resume,
625 .cp_commit = NULL, 638 .cp_commit = &r600_cp_commit,
626 .gpu_reset = &evergreen_gpu_reset, 639 .gpu_is_lockup = &evergreen_gpu_is_lockup,
640 .asic_reset = &evergreen_asic_reset,
627 .vga_set_state = &r600_vga_set_state, 641 .vga_set_state = &r600_vga_set_state,
628 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 642 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
629 .gart_set_page = &rs600_gart_set_page, 643 .gart_set_page = &rs600_gart_set_page,
630 .ring_test = NULL, 644 .ring_test = &r600_ring_test,
631 .ring_ib_execute = NULL, 645 .ring_ib_execute = &r600_ring_ib_execute,
632 .irq_set = NULL, 646 .irq_set = &evergreen_irq_set,
633 .irq_process = NULL, 647 .irq_process = &evergreen_irq_process,
634 .get_vblank_counter = NULL, 648 .get_vblank_counter = &evergreen_get_vblank_counter,
635 .fence_ring_emit = NULL, 649 .fence_ring_emit = NULL,
636 .cs_parse = NULL, 650 .cs_parse = NULL,
637 .copy_blit = NULL, 651 .copy_blit = NULL,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index a0b8280663d1..ef2c7ba1bdc9 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -60,7 +60,8 @@ int r100_resume(struct radeon_device *rdev);
60uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); 60uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
61void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 61void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
62void r100_vga_set_state(struct radeon_device *rdev, bool state); 62void r100_vga_set_state(struct radeon_device *rdev, bool state);
63int r100_gpu_reset(struct radeon_device *rdev); 63bool r100_gpu_is_lockup(struct radeon_device *rdev);
64int r100_asic_reset(struct radeon_device *rdev);
64u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); 65u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
65void r100_pci_gart_tlb_flush(struct radeon_device *rdev); 66void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
66int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 67int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
@@ -110,8 +111,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev);
110void r100_wb_disable(struct radeon_device *rdev); 111void r100_wb_disable(struct radeon_device *rdev);
111void r100_wb_fini(struct radeon_device *rdev); 112void r100_wb_fini(struct radeon_device *rdev);
112int r100_wb_init(struct radeon_device *rdev); 113int r100_wb_init(struct radeon_device *rdev);
113void r100_hdp_reset(struct radeon_device *rdev);
114int r100_rb2d_reset(struct radeon_device *rdev);
115int r100_cp_reset(struct radeon_device *rdev); 114int r100_cp_reset(struct radeon_device *rdev);
116void r100_vga_render_disable(struct radeon_device *rdev); 115void r100_vga_render_disable(struct radeon_device *rdev);
117int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 116int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
@@ -126,7 +125,7 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
126 unsigned idx); 125 unsigned idx);
127void r100_enable_bm(struct radeon_device *rdev); 126void r100_enable_bm(struct radeon_device *rdev);
128void r100_set_common_regs(struct radeon_device *rdev); 127void r100_set_common_regs(struct radeon_device *rdev);
129 128void r100_bm_disable(struct radeon_device *rdev);
130/* 129/*
131 * r200,rv250,rs300,rv280 130 * r200,rv250,rs300,rv280
132 */ 131 */
@@ -134,7 +133,7 @@ extern int r200_copy_dma(struct radeon_device *rdev,
134 uint64_t src_offset, 133 uint64_t src_offset,
135 uint64_t dst_offset, 134 uint64_t dst_offset,
136 unsigned num_pages, 135 unsigned num_pages,
137 struct radeon_fence *fence); 136 struct radeon_fence *fence);
138 137
139/* 138/*
140 * r300,r350,rv350,rv380 139 * r300,r350,rv350,rv380
@@ -143,7 +142,8 @@ extern int r300_init(struct radeon_device *rdev);
143extern void r300_fini(struct radeon_device *rdev); 142extern void r300_fini(struct radeon_device *rdev);
144extern int r300_suspend(struct radeon_device *rdev); 143extern int r300_suspend(struct radeon_device *rdev);
145extern int r300_resume(struct radeon_device *rdev); 144extern int r300_resume(struct radeon_device *rdev);
146extern int r300_gpu_reset(struct radeon_device *rdev); 145extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
146extern int r300_asic_reset(struct radeon_device *rdev);
147extern void r300_ring_start(struct radeon_device *rdev); 147extern void r300_ring_start(struct radeon_device *rdev);
148extern void r300_fence_ring_emit(struct radeon_device *rdev, 148extern void r300_fence_ring_emit(struct radeon_device *rdev,
149 struct radeon_fence *fence); 149 struct radeon_fence *fence);
@@ -178,6 +178,7 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
178/* 178/*
179 * rs600. 179 * rs600.
180 */ 180 */
181extern int rs600_asic_reset(struct radeon_device *rdev);
181extern int rs600_init(struct radeon_device *rdev); 182extern int rs600_init(struct radeon_device *rdev);
182extern void rs600_fini(struct radeon_device *rdev); 183extern void rs600_fini(struct radeon_device *rdev);
183extern int rs600_suspend(struct radeon_device *rdev); 184extern int rs600_suspend(struct radeon_device *rdev);
@@ -212,7 +213,6 @@ void rs690_bandwidth_update(struct radeon_device *rdev);
212 */ 213 */
213int rv515_init(struct radeon_device *rdev); 214int rv515_init(struct radeon_device *rdev);
214void rv515_fini(struct radeon_device *rdev); 215void rv515_fini(struct radeon_device *rdev);
215int rv515_gpu_reset(struct radeon_device *rdev);
216uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); 216uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
217void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 217void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
218void rv515_ring_start(struct radeon_device *rdev); 218void rv515_ring_start(struct radeon_device *rdev);
@@ -252,7 +252,8 @@ int r600_copy_dma(struct radeon_device *rdev,
252 struct radeon_fence *fence); 252 struct radeon_fence *fence);
253int r600_irq_process(struct radeon_device *rdev); 253int r600_irq_process(struct radeon_device *rdev);
254int r600_irq_set(struct radeon_device *rdev); 254int r600_irq_set(struct radeon_device *rdev);
255int r600_gpu_reset(struct radeon_device *rdev); 255bool r600_gpu_is_lockup(struct radeon_device *rdev);
256int r600_asic_reset(struct radeon_device *rdev);
256int r600_set_surface_reg(struct radeon_device *rdev, int reg, 257int r600_set_surface_reg(struct radeon_device *rdev, int reg,
257 uint32_t tiling_flags, uint32_t pitch, 258 uint32_t tiling_flags, uint32_t pitch,
258 uint32_t offset, uint32_t obj_size); 259 uint32_t offset, uint32_t obj_size);
@@ -276,20 +277,25 @@ int rv770_init(struct radeon_device *rdev);
276void rv770_fini(struct radeon_device *rdev); 277void rv770_fini(struct radeon_device *rdev);
277int rv770_suspend(struct radeon_device *rdev); 278int rv770_suspend(struct radeon_device *rdev);
278int rv770_resume(struct radeon_device *rdev); 279int rv770_resume(struct radeon_device *rdev);
279int rv770_gpu_reset(struct radeon_device *rdev);
280 280
281/* 281/*
282 * evergreen 282 * evergreen
283 */ 283 */
284void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
284int evergreen_init(struct radeon_device *rdev); 285int evergreen_init(struct radeon_device *rdev);
285void evergreen_fini(struct radeon_device *rdev); 286void evergreen_fini(struct radeon_device *rdev);
286int evergreen_suspend(struct radeon_device *rdev); 287int evergreen_suspend(struct radeon_device *rdev);
287int evergreen_resume(struct radeon_device *rdev); 288int evergreen_resume(struct radeon_device *rdev);
288int evergreen_gpu_reset(struct radeon_device *rdev); 289bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
290int evergreen_asic_reset(struct radeon_device *rdev);
289void evergreen_bandwidth_update(struct radeon_device *rdev); 291void evergreen_bandwidth_update(struct radeon_device *rdev);
290void evergreen_hpd_init(struct radeon_device *rdev); 292void evergreen_hpd_init(struct radeon_device *rdev);
291void evergreen_hpd_fini(struct radeon_device *rdev); 293void evergreen_hpd_fini(struct radeon_device *rdev);
292bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 294bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
293void evergreen_hpd_set_polarity(struct radeon_device *rdev, 295void evergreen_hpd_set_polarity(struct radeon_device *rdev,
294 enum radeon_hpd_id hpd); 296 enum radeon_hpd_id hpd);
297u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
298int evergreen_irq_set(struct radeon_device *rdev);
299int evergreen_irq_process(struct radeon_device *rdev);
300
295#endif 301#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 9916d825401c..1d05debdd604 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1462,6 +1462,10 @@ static const char *pp_lib_thermal_controller_names[] = {
1462 "RV6xx", 1462 "RV6xx",
1463 "RV770", 1463 "RV770",
1464 "ADT7473", 1464 "ADT7473",
1465 "External GPIO",
1466 "Evergreen",
1467 "ADT7473 with internal",
1468
1465}; 1469};
1466 1470
1467union power_info { 1471union power_info {
@@ -1707,15 +1711,21 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1707 break; 1711 break;
1708 } 1712 }
1709 } 1713 }
1710 } else if (frev == 4) { 1714 } else {
1711 /* add the i2c bus for thermal/fan chip */ 1715 /* add the i2c bus for thermal/fan chip */
1712 /* no support for internal controller yet */ 1716 /* no support for internal controller yet */
1713 if (power_info->info_4.sThermalController.ucType > 0) { 1717 if (power_info->info_4.sThermalController.ucType > 0) {
1714 if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || 1718 if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
1715 (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770)) { 1719 (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770) ||
1720 (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) {
1716 DRM_INFO("Internal thermal controller %s fan control\n", 1721 DRM_INFO("Internal thermal controller %s fan control\n",
1717 (power_info->info_4.sThermalController.ucFanParameters & 1722 (power_info->info_4.sThermalController.ucFanParameters &
1718 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 1723 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
1724 } else if ((power_info->info_4.sThermalController.ucType ==
1725 ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
1726 (power_info->info_4.sThermalController.ucType ==
1727 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) {
1728 DRM_INFO("Special thermal controller config\n");
1719 } else { 1729 } else {
1720 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", 1730 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
1721 pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType], 1731 pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType],
@@ -1763,6 +1773,36 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1763 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = 1773 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
1764 clock_info->usVDDC; 1774 clock_info->usVDDC;
1765 mode_index++; 1775 mode_index++;
1776 } else if (ASIC_IS_DCE4(rdev)) {
1777 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info =
1778 (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *)
1779 (mode_info->atom_context->bios +
1780 data_offset +
1781 le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
1782 (power_state->ucClockStateIndices[j] *
1783 power_info->info_4.ucClockInfoSize));
1784 sclk = le16_to_cpu(clock_info->usEngineClockLow);
1785 sclk |= clock_info->ucEngineClockHigh << 16;
1786 mclk = le16_to_cpu(clock_info->usMemoryClockLow);
1787 mclk |= clock_info->ucMemoryClockHigh << 16;
1788 rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
1789 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
1790 /* skip invalid modes */
1791 if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
1792 (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
1793 continue;
1794 /* skip overclock modes for now */
1795 if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk >
1796 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
1797 (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
1798 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
1799 continue;
1800 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
1801 VOLTAGE_SW;
1802 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
1803 clock_info->usVDDC;
1804 /* XXX usVDDCI */
1805 mode_index++;
1766 } else { 1806 } else {
1767 struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info = 1807 struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
1768 (struct _ATOM_PPLIB_R600_CLOCK_INFO *) 1808 (struct _ATOM_PPLIB_R600_CLOCK_INFO *)
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 37db8adb2748..0f1fd9254e30 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -450,17 +450,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
450{ 450{
451 int edid_info; 451 int edid_info;
452 struct edid *edid; 452 struct edid *edid;
453 unsigned char *raw;
453 edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE); 454 edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
454 if (!edid_info) 455 if (!edid_info)
455 return false; 456 return false;
456 457
457 edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1), 458 raw = rdev->bios + edid_info;
458 GFP_KERNEL); 459 edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL);
459 if (edid == NULL) 460 if (edid == NULL)
460 return false; 461 return false;
461 462
462 memcpy((unsigned char *)edid, 463 memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1));
463 (unsigned char *)(rdev->bios + edid_info), EDID_LENGTH);
464 464
465 if (!drm_edid_is_valid(edid)) { 465 if (!drm_edid_is_valid(edid)) {
466 kfree(edid); 466 kfree(edid);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 4559a53d5e57..40a24c941f20 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1041,7 +1041,6 @@ radeon_add_atom_connector(struct drm_device *dev,
1041 struct radeon_connector_atom_dig *radeon_dig_connector; 1041 struct radeon_connector_atom_dig *radeon_dig_connector;
1042 uint32_t subpixel_order = SubPixelNone; 1042 uint32_t subpixel_order = SubPixelNone;
1043 bool shared_ddc = false; 1043 bool shared_ddc = false;
1044 int ret;
1045 1044
1046 /* fixme - tv/cv/din */ 1045 /* fixme - tv/cv/din */
1047 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 1046 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -1076,9 +1075,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1076 switch (connector_type) { 1075 switch (connector_type) {
1077 case DRM_MODE_CONNECTOR_VGA: 1076 case DRM_MODE_CONNECTOR_VGA:
1078 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1077 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1079 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 1078 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1080 if (ret)
1081 goto failed;
1082 if (i2c_bus->valid) { 1079 if (i2c_bus->valid) {
1083 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); 1080 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
1084 if (!radeon_connector->ddc_bus) 1081 if (!radeon_connector->ddc_bus)
@@ -1091,9 +1088,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1091 break; 1088 break;
1092 case DRM_MODE_CONNECTOR_DVIA: 1089 case DRM_MODE_CONNECTOR_DVIA:
1093 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1090 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1094 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 1091 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1095 if (ret)
1096 goto failed;
1097 if (i2c_bus->valid) { 1092 if (i2c_bus->valid) {
1098 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 1093 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
1099 if (!radeon_connector->ddc_bus) 1094 if (!radeon_connector->ddc_bus)
@@ -1113,9 +1108,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1113 radeon_dig_connector->igp_lane_info = igp_lane_info; 1108 radeon_dig_connector->igp_lane_info = igp_lane_info;
1114 radeon_connector->con_priv = radeon_dig_connector; 1109 radeon_connector->con_priv = radeon_dig_connector;
1115 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1110 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
1116 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 1111 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
1117 if (ret)
1118 goto failed;
1119 if (i2c_bus->valid) { 1112 if (i2c_bus->valid) {
1120 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 1113 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
1121 if (!radeon_connector->ddc_bus) 1114 if (!radeon_connector->ddc_bus)
@@ -1141,9 +1134,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1141 radeon_dig_connector->igp_lane_info = igp_lane_info; 1134 radeon_dig_connector->igp_lane_info = igp_lane_info;
1142 radeon_connector->con_priv = radeon_dig_connector; 1135 radeon_connector->con_priv = radeon_dig_connector;
1143 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1136 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
1144 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 1137 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
1145 if (ret)
1146 goto failed;
1147 if (i2c_bus->valid) { 1138 if (i2c_bus->valid) {
1148 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI"); 1139 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
1149 if (!radeon_connector->ddc_bus) 1140 if (!radeon_connector->ddc_bus)
@@ -1163,9 +1154,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1163 radeon_dig_connector->igp_lane_info = igp_lane_info; 1154 radeon_dig_connector->igp_lane_info = igp_lane_info;
1164 radeon_connector->con_priv = radeon_dig_connector; 1155 radeon_connector->con_priv = radeon_dig_connector;
1165 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); 1156 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
1166 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); 1157 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1167 if (ret)
1168 goto failed;
1169 if (i2c_bus->valid) { 1158 if (i2c_bus->valid) {
1170 /* add DP i2c bus */ 1159 /* add DP i2c bus */
1171 if (connector_type == DRM_MODE_CONNECTOR_eDP) 1160 if (connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -1191,9 +1180,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1191 case DRM_MODE_CONNECTOR_9PinDIN: 1180 case DRM_MODE_CONNECTOR_9PinDIN:
1192 if (radeon_tv == 1) { 1181 if (radeon_tv == 1) {
1193 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1182 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1194 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1183 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1195 if (ret)
1196 goto failed;
1197 radeon_connector->dac_load_detect = true; 1184 radeon_connector->dac_load_detect = true;
1198 drm_connector_attach_property(&radeon_connector->base, 1185 drm_connector_attach_property(&radeon_connector->base,
1199 rdev->mode_info.load_detect_property, 1186 rdev->mode_info.load_detect_property,
@@ -1211,9 +1198,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1211 radeon_dig_connector->igp_lane_info = igp_lane_info; 1198 radeon_dig_connector->igp_lane_info = igp_lane_info;
1212 radeon_connector->con_priv = radeon_dig_connector; 1199 radeon_connector->con_priv = radeon_dig_connector;
1213 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 1200 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
1214 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); 1201 drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
1215 if (ret)
1216 goto failed;
1217 if (i2c_bus->valid) { 1202 if (i2c_bus->valid) {
1218 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); 1203 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
1219 if (!radeon_connector->ddc_bus) 1204 if (!radeon_connector->ddc_bus)
@@ -1250,7 +1235,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
1250 struct drm_connector *connector; 1235 struct drm_connector *connector;
1251 struct radeon_connector *radeon_connector; 1236 struct radeon_connector *radeon_connector;
1252 uint32_t subpixel_order = SubPixelNone; 1237 uint32_t subpixel_order = SubPixelNone;
1253 int ret;
1254 1238
1255 /* fixme - tv/cv/din */ 1239 /* fixme - tv/cv/din */
1256 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 1240 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -1278,9 +1262,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1278 switch (connector_type) { 1262 switch (connector_type) {
1279 case DRM_MODE_CONNECTOR_VGA: 1263 case DRM_MODE_CONNECTOR_VGA:
1280 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1264 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1281 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 1265 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1282 if (ret)
1283 goto failed;
1284 if (i2c_bus->valid) { 1266 if (i2c_bus->valid) {
1285 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); 1267 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
1286 if (!radeon_connector->ddc_bus) 1268 if (!radeon_connector->ddc_bus)
@@ -1293,9 +1275,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1293 break; 1275 break;
1294 case DRM_MODE_CONNECTOR_DVIA: 1276 case DRM_MODE_CONNECTOR_DVIA:
1295 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1277 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1296 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 1278 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1297 if (ret)
1298 goto failed;
1299 if (i2c_bus->valid) { 1279 if (i2c_bus->valid) {
1300 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 1280 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
1301 if (!radeon_connector->ddc_bus) 1281 if (!radeon_connector->ddc_bus)
@@ -1309,9 +1289,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1309 case DRM_MODE_CONNECTOR_DVII: 1289 case DRM_MODE_CONNECTOR_DVII:
1310 case DRM_MODE_CONNECTOR_DVID: 1290 case DRM_MODE_CONNECTOR_DVID:
1311 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1291 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
1312 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 1292 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
1313 if (ret)
1314 goto failed;
1315 if (i2c_bus->valid) { 1293 if (i2c_bus->valid) {
1316 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 1294 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
1317 if (!radeon_connector->ddc_bus) 1295 if (!radeon_connector->ddc_bus)
@@ -1330,9 +1308,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1330 case DRM_MODE_CONNECTOR_9PinDIN: 1308 case DRM_MODE_CONNECTOR_9PinDIN:
1331 if (radeon_tv == 1) { 1309 if (radeon_tv == 1) {
1332 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1310 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1333 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1311 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1334 if (ret)
1335 goto failed;
1336 radeon_connector->dac_load_detect = true; 1312 radeon_connector->dac_load_detect = true;
1337 /* RS400,RC410,RS480 chipset seems to report a lot 1313 /* RS400,RC410,RS480 chipset seems to report a lot
1338 * of false positive on load detect, we haven't yet 1314 * of false positive on load detect, we haven't yet
@@ -1351,9 +1327,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1351 break; 1327 break;
1352 case DRM_MODE_CONNECTOR_LVDS: 1328 case DRM_MODE_CONNECTOR_LVDS:
1353 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 1329 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
1354 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); 1330 drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
1355 if (ret)
1356 goto failed;
1357 if (i2c_bus->valid) { 1331 if (i2c_bus->valid) {
1358 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); 1332 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
1359 if (!radeon_connector->ddc_bus) 1333 if (!radeon_connector->ddc_bus)
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index f9b0fe002c0a..ae0fb7356e62 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -220,10 +220,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
220 int r; 220 int r;
221 221
222 mutex_lock(&rdev->cs_mutex); 222 mutex_lock(&rdev->cs_mutex);
223 if (rdev->gpu_lockup) {
224 mutex_unlock(&rdev->cs_mutex);
225 return -EINVAL;
226 }
227 /* initialize parser */ 223 /* initialize parser */
228 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 224 memset(&parser, 0, sizeof(struct radeon_cs_parser));
229 parser.filp = filp; 225 parser.filp = filp;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7b629e305560..26217ffe0355 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -671,7 +671,7 @@ int radeon_device_init(struct radeon_device *rdev,
671 /* Acceleration not working on AGP card try again 671 /* Acceleration not working on AGP card try again
672 * with fallback to PCI or PCIE GART 672 * with fallback to PCI or PCIE GART
673 */ 673 */
674 radeon_gpu_reset(rdev); 674 radeon_asic_reset(rdev);
675 radeon_fini(rdev); 675 radeon_fini(rdev);
676 radeon_agp_disable(rdev); 676 radeon_agp_disable(rdev);
677 r = radeon_init(rdev); 677 r = radeon_init(rdev);
@@ -691,6 +691,8 @@ void radeon_device_fini(struct radeon_device *rdev)
691{ 691{
692 DRM_INFO("radeon: finishing device.\n"); 692 DRM_INFO("radeon: finishing device.\n");
693 rdev->shutdown = true; 693 rdev->shutdown = true;
694 /* evict vram memory */
695 radeon_bo_evict_vram(rdev);
694 radeon_fini(rdev); 696 radeon_fini(rdev);
695 destroy_workqueue(rdev->wq); 697 destroy_workqueue(rdev->wq);
696 vga_switcheroo_unregister_client(rdev->pdev); 698 vga_switcheroo_unregister_client(rdev->pdev);
@@ -728,9 +730,10 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
728 continue; 730 continue;
729 } 731 }
730 robj = rfb->obj->driver_private; 732 robj = rfb->obj->driver_private;
731 if (robj != rdev->fbdev_rbo) { 733 /* don't unpin kernel fb objects */
734 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
732 r = radeon_bo_reserve(robj, false); 735 r = radeon_bo_reserve(robj, false);
733 if (unlikely(r == 0)) { 736 if (r == 0) {
734 radeon_bo_unpin(robj); 737 radeon_bo_unpin(robj);
735 radeon_bo_unreserve(robj); 738 radeon_bo_unreserve(robj);
736 } 739 }
@@ -755,7 +758,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
755 pci_set_power_state(dev->pdev, PCI_D3hot); 758 pci_set_power_state(dev->pdev, PCI_D3hot);
756 } 759 }
757 acquire_console_sem(); 760 acquire_console_sem();
758 fb_set_suspend(rdev->fbdev_info, 1); 761 radeon_fbdev_set_suspend(rdev, 1);
759 release_console_sem(); 762 release_console_sem();
760 return 0; 763 return 0;
761} 764}
@@ -779,7 +782,7 @@ int radeon_resume_kms(struct drm_device *dev)
779 radeon_agp_resume(rdev); 782 radeon_agp_resume(rdev);
780 radeon_resume(rdev); 783 radeon_resume(rdev);
781 radeon_restore_bios_scratch_regs(rdev); 784 radeon_restore_bios_scratch_regs(rdev);
782 fb_set_suspend(rdev->fbdev_info, 0); 785 radeon_fbdev_set_suspend(rdev, 0);
783 release_console_sem(); 786 release_console_sem();
784 787
785 /* reset hpd state */ 788 /* reset hpd state */
@@ -789,6 +792,26 @@ int radeon_resume_kms(struct drm_device *dev)
789 return 0; 792 return 0;
790} 793}
791 794
795int radeon_gpu_reset(struct radeon_device *rdev)
796{
797 int r;
798
799 radeon_save_bios_scratch_regs(rdev);
800 radeon_suspend(rdev);
801
802 r = radeon_asic_reset(rdev);
803 if (!r) {
804 dev_info(rdev->dev, "GPU reset succeed\n");
805 radeon_resume(rdev);
806 radeon_restore_bios_scratch_regs(rdev);
807 drm_helper_resume_force_mode(rdev->ddev);
808 return 0;
809 }
810 /* bad news, how to tell it to userspace ? */
811 dev_info(rdev->dev, "GPU reset failed\n");
812 return r;
813}
814
792 815
793/* 816/*
794 * Debugfs 817 * Debugfs
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bb1c122cad21..ce5163ed1fa6 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -831,10 +831,6 @@ void radeon_compute_pll(struct radeon_pll *pll,
831static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) 831static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
832{ 832{
833 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 833 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
834 struct drm_device *dev = fb->dev;
835
836 if (fb->fbdev)
837 radeonfb_remove(dev, fb);
838 834
839 if (radeon_fb->obj) 835 if (radeon_fb->obj)
840 drm_gem_object_unreference_unlocked(radeon_fb->obj); 836 drm_gem_object_unreference_unlocked(radeon_fb->obj);
@@ -856,21 +852,15 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
856 .create_handle = radeon_user_framebuffer_create_handle, 852 .create_handle = radeon_user_framebuffer_create_handle,
857}; 853};
858 854
859struct drm_framebuffer * 855void
860radeon_framebuffer_create(struct drm_device *dev, 856radeon_framebuffer_init(struct drm_device *dev,
861 struct drm_mode_fb_cmd *mode_cmd, 857 struct radeon_framebuffer *rfb,
862 struct drm_gem_object *obj) 858 struct drm_mode_fb_cmd *mode_cmd,
859 struct drm_gem_object *obj)
863{ 860{
864 struct radeon_framebuffer *radeon_fb; 861 rfb->obj = obj;
865 862 drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
866 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); 863 drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
867 if (radeon_fb == NULL) {
868 return NULL;
869 }
870 drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs);
871 drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd);
872 radeon_fb->obj = obj;
873 return &radeon_fb->base;
874} 864}
875 865
876static struct drm_framebuffer * 866static struct drm_framebuffer *
@@ -879,6 +869,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
879 struct drm_mode_fb_cmd *mode_cmd) 869 struct drm_mode_fb_cmd *mode_cmd)
880{ 870{
881 struct drm_gem_object *obj; 871 struct drm_gem_object *obj;
872 struct radeon_framebuffer *radeon_fb;
882 873
883 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); 874 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
884 if (obj == NULL) { 875 if (obj == NULL) {
@@ -886,12 +877,19 @@ radeon_user_framebuffer_create(struct drm_device *dev,
886 "can't create framebuffer\n", mode_cmd->handle); 877 "can't create framebuffer\n", mode_cmd->handle);
887 return NULL; 878 return NULL;
888 } 879 }
889 return radeon_framebuffer_create(dev, mode_cmd, obj); 880
881 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
882 if (radeon_fb == NULL) {
883 return NULL;
884 }
885
886 radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
887
888 return &radeon_fb->base;
890} 889}
891 890
892static const struct drm_mode_config_funcs radeon_mode_funcs = { 891static const struct drm_mode_config_funcs radeon_mode_funcs = {
893 .fb_create = radeon_user_framebuffer_create, 892 .fb_create = radeon_user_framebuffer_create,
894 .fb_changed = radeonfb_probe,
895}; 893};
896 894
897struct drm_prop_enum_list { 895struct drm_prop_enum_list {
@@ -1031,12 +1029,14 @@ int radeon_modeset_init(struct radeon_device *rdev)
1031 } 1029 }
1032 /* initialize hpd */ 1030 /* initialize hpd */
1033 radeon_hpd_init(rdev); 1031 radeon_hpd_init(rdev);
1034 drm_helper_initial_config(rdev->ddev); 1032
1033 radeon_fbdev_init(rdev);
1035 return 0; 1034 return 0;
1036} 1035}
1037 1036
1038void radeon_modeset_fini(struct radeon_device *rdev) 1037void radeon_modeset_fini(struct radeon_device *rdev)
1039{ 1038{
1039 radeon_fbdev_fini(rdev);
1040 kfree(rdev->mode_info.bios_hardcoded_edid); 1040 kfree(rdev->mode_info.bios_hardcoded_edid);
1041 1041
1042 if (rdev->mode_info.mode_config_initialized) { 1042 if (rdev->mode_info.mode_config_initialized) {
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 9ac57a09784b..fcb5b52727b0 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -23,10 +23,6 @@
23 * Authors: 23 * Authors:
24 * David Airlie 24 * David Airlie
25 */ 25 */
26 /*
27 * Modularization
28 */
29
30#include <linux/module.h> 26#include <linux/module.h>
31#include <linux/slab.h> 27#include <linux/slab.h>
32#include <linux/fb.h> 28#include <linux/fb.h>
@@ -42,17 +38,21 @@
42 38
43#include <linux/vga_switcheroo.h> 39#include <linux/vga_switcheroo.h>
44 40
45struct radeon_fb_device { 41/* object hierarchy -
42 this contains a helper + a radeon fb
43 the helper contains a pointer to radeon framebuffer baseclass.
44*/
45struct radeon_fbdev {
46 struct drm_fb_helper helper; 46 struct drm_fb_helper helper;
47 struct radeon_framebuffer *rfb; 47 struct radeon_framebuffer rfb;
48 struct radeon_device *rdev; 48 struct list_head fbdev_list;
49 struct radeon_device *rdev;
49}; 50};
50 51
51static struct fb_ops radeonfb_ops = { 52static struct fb_ops radeonfb_ops = {
52 .owner = THIS_MODULE, 53 .owner = THIS_MODULE,
53 .fb_check_var = drm_fb_helper_check_var, 54 .fb_check_var = drm_fb_helper_check_var,
54 .fb_set_par = drm_fb_helper_set_par, 55 .fb_set_par = drm_fb_helper_set_par,
55 .fb_setcolreg = drm_fb_helper_setcolreg,
56 .fb_fillrect = cfb_fillrect, 56 .fb_fillrect = cfb_fillrect,
57 .fb_copyarea = cfb_copyarea, 57 .fb_copyarea = cfb_copyarea,
58 .fb_imageblit = cfb_imageblit, 58 .fb_imageblit = cfb_imageblit,
@@ -61,45 +61,6 @@ static struct fb_ops radeonfb_ops = {
61 .fb_setcmap = drm_fb_helper_setcmap, 61 .fb_setcmap = drm_fb_helper_setcmap,
62}; 62};
63 63
64/**
65 * Currently it is assumed that the old framebuffer is reused.
66 *
67 * LOCKING
68 * caller should hold the mode config lock.
69 *
70 */
71int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
72{
73 struct fb_info *info;
74 struct drm_framebuffer *fb;
75 struct drm_display_mode *mode = crtc->desired_mode;
76
77 fb = crtc->fb;
78 if (fb == NULL) {
79 return 1;
80 }
81 info = fb->fbdev;
82 if (info == NULL) {
83 return 1;
84 }
85 if (mode == NULL) {
86 return 1;
87 }
88 info->var.xres = mode->hdisplay;
89 info->var.right_margin = mode->hsync_start - mode->hdisplay;
90 info->var.hsync_len = mode->hsync_end - mode->hsync_start;
91 info->var.left_margin = mode->htotal - mode->hsync_end;
92 info->var.yres = mode->vdisplay;
93 info->var.lower_margin = mode->vsync_start - mode->vdisplay;
94 info->var.vsync_len = mode->vsync_end - mode->vsync_start;
95 info->var.upper_margin = mode->vtotal - mode->vsync_end;
96 info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
97 /* avoid overflow */
98 info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
99
100 return 0;
101}
102EXPORT_SYMBOL(radeonfb_resize);
103 64
104static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) 65static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
105{ 66{
@@ -125,57 +86,44 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo
125 return aligned; 86 return aligned;
126} 87}
127 88
128static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { 89static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
129 .gamma_set = radeon_crtc_fb_gamma_set, 90{
130 .gamma_get = radeon_crtc_fb_gamma_get, 91 struct radeon_bo *rbo = gobj->driver_private;
131}; 92 int ret;
93
94 ret = radeon_bo_reserve(rbo, false);
95 if (likely(ret == 0)) {
96 radeon_bo_kunmap(rbo);
97 radeon_bo_unreserve(rbo);
98 }
99 drm_gem_object_unreference_unlocked(gobj);
100}
132 101
133int radeonfb_create(struct drm_device *dev, 102static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
134 uint32_t fb_width, uint32_t fb_height, 103 struct drm_mode_fb_cmd *mode_cmd,
135 uint32_t surface_width, uint32_t surface_height, 104 struct drm_gem_object **gobj_p)
136 uint32_t surface_depth, uint32_t surface_bpp,
137 struct drm_framebuffer **fb_p)
138{ 105{
139 struct radeon_device *rdev = dev->dev_private; 106 struct radeon_device *rdev = rfbdev->rdev;
140 struct fb_info *info;
141 struct radeon_fb_device *rfbdev;
142 struct drm_framebuffer *fb = NULL;
143 struct radeon_framebuffer *rfb;
144 struct drm_mode_fb_cmd mode_cmd;
145 struct drm_gem_object *gobj = NULL; 107 struct drm_gem_object *gobj = NULL;
146 struct radeon_bo *rbo = NULL; 108 struct radeon_bo *rbo = NULL;
147 struct device *device = &rdev->pdev->dev;
148 int size, aligned_size, ret;
149 u64 fb_gpuaddr;
150 void *fbptr = NULL;
151 unsigned long tmp;
152 bool fb_tiled = false; /* useful for testing */ 109 bool fb_tiled = false; /* useful for testing */
153 u32 tiling_flags = 0; 110 u32 tiling_flags = 0;
111 int ret;
112 int aligned_size, size;
154 113
155 mode_cmd.width = surface_width;
156 mode_cmd.height = surface_height;
157
158 /* avivo can't scanout real 24bpp */
159 if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
160 surface_bpp = 32;
161
162 mode_cmd.bpp = surface_bpp;
163 /* need to align pitch with crtc limits */ 114 /* need to align pitch with crtc limits */
164 mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); 115 mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
165 mode_cmd.depth = surface_depth;
166 116
167 size = mode_cmd.pitch * mode_cmd.height; 117 size = mode_cmd->pitch * mode_cmd->height;
168 aligned_size = ALIGN(size, PAGE_SIZE); 118 aligned_size = ALIGN(size, PAGE_SIZE);
169
170 ret = radeon_gem_object_create(rdev, aligned_size, 0, 119 ret = radeon_gem_object_create(rdev, aligned_size, 0,
171 RADEON_GEM_DOMAIN_VRAM, 120 RADEON_GEM_DOMAIN_VRAM,
172 false, ttm_bo_type_kernel, 121 false, ttm_bo_type_kernel,
173 &gobj); 122 &gobj);
174 if (ret) { 123 if (ret) {
175 printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", 124 printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
176 surface_width, surface_height); 125 aligned_size);
177 ret = -ENOMEM; 126 return -ENOMEM;
178 goto out;
179 } 127 }
180 rbo = gobj->driver_private; 128 rbo = gobj->driver_private;
181 129
@@ -183,7 +131,7 @@ int radeonfb_create(struct drm_device *dev,
183 tiling_flags = RADEON_TILING_MACRO; 131 tiling_flags = RADEON_TILING_MACRO;
184 132
185#ifdef __BIG_ENDIAN 133#ifdef __BIG_ENDIAN
186 switch (mode_cmd.bpp) { 134 switch (mode_cmd->bpp) {
187 case 32: 135 case 32:
188 tiling_flags |= RADEON_TILING_SWAP_32BIT; 136 tiling_flags |= RADEON_TILING_SWAP_32BIT;
189 break; 137 break;
@@ -196,57 +144,81 @@ int radeonfb_create(struct drm_device *dev,
196 144
197 if (tiling_flags) { 145 if (tiling_flags) {
198 ret = radeon_bo_set_tiling_flags(rbo, 146 ret = radeon_bo_set_tiling_flags(rbo,
199 tiling_flags | RADEON_TILING_SURFACE, 147 tiling_flags | RADEON_TILING_SURFACE,
200 mode_cmd.pitch); 148 mode_cmd->pitch);
201 if (ret) 149 if (ret)
202 dev_err(rdev->dev, "FB failed to set tiling flags\n"); 150 dev_err(rdev->dev, "FB failed to set tiling flags\n");
203 } 151 }
204 mutex_lock(&rdev->ddev->struct_mutex); 152
205 fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); 153
206 if (fb == NULL) {
207 DRM_ERROR("failed to allocate fb.\n");
208 ret = -ENOMEM;
209 goto out_unref;
210 }
211 ret = radeon_bo_reserve(rbo, false); 154 ret = radeon_bo_reserve(rbo, false);
212 if (unlikely(ret != 0)) 155 if (unlikely(ret != 0))
213 goto out_unref; 156 goto out_unref;
214 ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); 157 ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL);
215 if (ret) { 158 if (ret) {
216 radeon_bo_unreserve(rbo); 159 radeon_bo_unreserve(rbo);
217 goto out_unref; 160 goto out_unref;
218 } 161 }
219 if (fb_tiled) 162 if (fb_tiled)
220 radeon_bo_check_tiling(rbo, 0, 0); 163 radeon_bo_check_tiling(rbo, 0, 0);
221 ret = radeon_bo_kmap(rbo, &fbptr); 164 ret = radeon_bo_kmap(rbo, NULL);
222 radeon_bo_unreserve(rbo); 165 radeon_bo_unreserve(rbo);
223 if (ret) { 166 if (ret) {
224 goto out_unref; 167 goto out_unref;
225 } 168 }
226 169
227 list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); 170 *gobj_p = gobj;
171 return 0;
172out_unref:
173 radeonfb_destroy_pinned_object(gobj);
174 *gobj_p = NULL;
175 return ret;
176}
177
178static int radeonfb_create(struct radeon_fbdev *rfbdev,
179 struct drm_fb_helper_surface_size *sizes)
180{
181 struct radeon_device *rdev = rfbdev->rdev;
182 struct fb_info *info;
183 struct drm_framebuffer *fb = NULL;
184 struct drm_mode_fb_cmd mode_cmd;
185 struct drm_gem_object *gobj = NULL;
186 struct radeon_bo *rbo = NULL;
187 struct device *device = &rdev->pdev->dev;
188 int ret;
189 unsigned long tmp;
190
191 mode_cmd.width = sizes->surface_width;
192 mode_cmd.height = sizes->surface_height;
193
194 /* avivo can't scanout real 24bpp */
195 if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
196 sizes->surface_bpp = 32;
197
198 mode_cmd.bpp = sizes->surface_bpp;
199 mode_cmd.depth = sizes->surface_depth;
228 200
229 *fb_p = fb; 201 ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
230 rfb = to_radeon_framebuffer(fb); 202 rbo = gobj->driver_private;
231 rdev->fbdev_rfb = rfb;
232 rdev->fbdev_rbo = rbo;
233 203
234 info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); 204 /* okay we have an object now allocate the framebuffer */
205 info = framebuffer_alloc(0, device);
235 if (info == NULL) { 206 if (info == NULL) {
236 ret = -ENOMEM; 207 ret = -ENOMEM;
237 goto out_unref; 208 goto out_unref;
238 } 209 }
239 210
240 rdev->fbdev_info = info; 211 info->par = rfbdev;
241 rfbdev = info->par;
242 rfbdev->helper.funcs = &radeon_fb_helper_funcs;
243 rfbdev->helper.dev = dev;
244 ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc,
245 RADEONFB_CONN_LIMIT);
246 if (ret)
247 goto out_unref;
248 212
249 memset_io(fbptr, 0x0, aligned_size); 213 radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
214
215 fb = &rfbdev->rfb.base;
216
217 /* setup helper */
218 rfbdev->helper.fb = fb;
219 rfbdev->helper.fbdev = info;
220
221 memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
250 222
251 strcpy(info->fix.id, "radeondrmfb"); 223 strcpy(info->fix.id, "radeondrmfb");
252 224
@@ -255,13 +227,13 @@ int radeonfb_create(struct drm_device *dev,
255 info->flags = FBINFO_DEFAULT; 227 info->flags = FBINFO_DEFAULT;
256 info->fbops = &radeonfb_ops; 228 info->fbops = &radeonfb_ops;
257 229
258 tmp = fb_gpuaddr - rdev->mc.vram_start; 230 tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
259 info->fix.smem_start = rdev->mc.aper_base + tmp; 231 info->fix.smem_start = rdev->mc.aper_base + tmp;
260 info->fix.smem_len = size; 232 info->fix.smem_len = radeon_bo_size(rbo);
261 info->screen_base = fbptr; 233 info->screen_base = rbo->kptr;
262 info->screen_size = size; 234 info->screen_size = radeon_bo_size(rbo);
263 235
264 drm_fb_helper_fill_var(info, fb, fb_width, fb_height); 236 drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
265 237
266 /* setup aperture base/size for vesafb takeover */ 238 /* setup aperture base/size for vesafb takeover */
267 info->aperture_base = rdev->ddev->mode_config.fb_base; 239 info->aperture_base = rdev->ddev->mode_config.fb_base;
@@ -274,44 +246,55 @@ int radeonfb_create(struct drm_device *dev,
274 info->pixmap.access_align = 32; 246 info->pixmap.access_align = 32;
275 info->pixmap.flags = FB_PIXMAP_SYSTEM; 247 info->pixmap.flags = FB_PIXMAP_SYSTEM;
276 info->pixmap.scan_align = 1; 248 info->pixmap.scan_align = 1;
249
277 if (info->screen_base == NULL) { 250 if (info->screen_base == NULL) {
278 ret = -ENOSPC; 251 ret = -ENOSPC;
279 goto out_unref; 252 goto out_unref;
280 } 253 }
254
255 ret = fb_alloc_cmap(&info->cmap, 256, 0);
256 if (ret) {
257 ret = -ENOMEM;
258 goto out_unref;
259 }
260
281 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); 261 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
282 DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); 262 DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
283 DRM_INFO("size %lu\n", (unsigned long)size); 263 DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
284 DRM_INFO("fb depth is %d\n", fb->depth); 264 DRM_INFO("fb depth is %d\n", fb->depth);
285 DRM_INFO(" pitch is %d\n", fb->pitch); 265 DRM_INFO(" pitch is %d\n", fb->pitch);
286 266
287 fb->fbdev = info;
288 rfbdev->rfb = rfb;
289 rfbdev->rdev = rdev;
290
291 mutex_unlock(&rdev->ddev->struct_mutex);
292 vga_switcheroo_client_fb_set(rdev->ddev->pdev, info); 267 vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
293 return 0; 268 return 0;
294 269
295out_unref: 270out_unref:
296 if (rbo) { 271 if (rbo) {
297 ret = radeon_bo_reserve(rbo, false); 272
298 if (likely(ret == 0)) {
299 radeon_bo_kunmap(rbo);
300 radeon_bo_unreserve(rbo);
301 }
302 } 273 }
303 if (fb && ret) { 274 if (fb && ret) {
304 list_del(&fb->filp_head);
305 drm_gem_object_unreference(gobj); 275 drm_gem_object_unreference(gobj);
306 drm_framebuffer_cleanup(fb); 276 drm_framebuffer_cleanup(fb);
307 kfree(fb); 277 kfree(fb);
308 } 278 }
309 drm_gem_object_unreference(gobj);
310 mutex_unlock(&rdev->ddev->struct_mutex);
311out:
312 return ret; 279 return ret;
313} 280}
314 281
282static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
283 struct drm_fb_helper_surface_size *sizes)
284{
285 struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
286 int new_fb = 0;
287 int ret;
288
289 if (!helper->fb) {
290 ret = radeonfb_create(rfbdev, sizes);
291 if (ret)
292 return ret;
293 new_fb = 1;
294 }
295 return new_fb;
296}
297
315static char *mode_option; 298static char *mode_option;
316int radeon_parse_options(char *options) 299int radeon_parse_options(char *options)
317{ 300{
@@ -328,46 +311,111 @@ int radeon_parse_options(char *options)
328 return 0; 311 return 0;
329} 312}
330 313
331int radeonfb_probe(struct drm_device *dev) 314void radeonfb_hotplug(struct drm_device *dev, bool polled)
332{ 315{
333 struct radeon_device *rdev = dev->dev_private; 316 struct radeon_device *rdev = dev->dev_private;
334 int bpp_sel = 32;
335 317
336 /* select 8 bpp console on RN50 or 16MB cards */ 318 drm_helper_fb_hpd_irq_event(&rdev->mode_info.rfbdev->helper);
337 if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) 319}
338 bpp_sel = 8;
339 320
340 return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create); 321static void radeon_fb_output_status_changed(struct drm_fb_helper *fb_helper)
322{
323 drm_helper_fb_hotplug_event(fb_helper, true);
341} 324}
342 325
343int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) 326static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
344{ 327{
345 struct fb_info *info; 328 struct fb_info *info;
346 struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb); 329 struct radeon_framebuffer *rfb = &rfbdev->rfb;
347 struct radeon_bo *rbo; 330 struct radeon_bo *rbo;
348 int r; 331 int r;
349 332
350 if (!fb) { 333 if (rfbdev->helper.fbdev) {
351 return -EINVAL; 334 info = rfbdev->helper.fbdev;
335
336 unregister_framebuffer(info);
337 if (info->cmap.len)
338 fb_dealloc_cmap(&info->cmap);
339 framebuffer_release(info);
352 } 340 }
353 info = fb->fbdev; 341
354 if (info) { 342 if (rfb->obj) {
355 struct radeon_fb_device *rfbdev = info->par;
356 rbo = rfb->obj->driver_private; 343 rbo = rfb->obj->driver_private;
357 unregister_framebuffer(info);
358 r = radeon_bo_reserve(rbo, false); 344 r = radeon_bo_reserve(rbo, false);
359 if (likely(r == 0)) { 345 if (likely(r == 0)) {
360 radeon_bo_kunmap(rbo); 346 radeon_bo_kunmap(rbo);
361 radeon_bo_unpin(rbo); 347 radeon_bo_unpin(rbo);
362 radeon_bo_unreserve(rbo); 348 radeon_bo_unreserve(rbo);
363 } 349 }
364 drm_fb_helper_free(&rfbdev->helper); 350 drm_gem_object_unreference_unlocked(rfb->obj);
365 framebuffer_release(info);
366 } 351 }
352 drm_fb_helper_fini(&rfbdev->helper);
353 drm_framebuffer_cleanup(&rfb->base);
367 354
368 printk(KERN_INFO "unregistered panic notifier\n"); 355 return 0;
356}
369 357
358static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
359 .gamma_set = radeon_crtc_fb_gamma_set,
360 .gamma_get = radeon_crtc_fb_gamma_get,
361 .fb_probe = radeon_fb_find_or_create_single,
362 .fb_output_status_changed = radeon_fb_output_status_changed,
363};
364
365int radeon_fbdev_init(struct radeon_device *rdev)
366{
367 struct radeon_fbdev *rfbdev;
368 int bpp_sel = 32;
369
370 /* select 8 bpp console on RN50 or 16MB cards */
371 if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
372 bpp_sel = 8;
373
374 rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL);
375 if (!rfbdev)
376 return -ENOMEM;
377
378 rfbdev->rdev = rdev;
379 rdev->mode_info.rfbdev = rfbdev;
380 rfbdev->helper.funcs = &radeon_fb_helper_funcs;
381
382 drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
383 rdev->num_crtc,
384 RADEONFB_CONN_LIMIT, true);
385 drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
386 drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
370 return 0; 387 return 0;
388
389}
390
391void radeon_fbdev_fini(struct radeon_device *rdev)
392{
393 if (!rdev->mode_info.rfbdev)
394 return;
395
396 radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev);
397 kfree(rdev->mode_info.rfbdev);
398 rdev->mode_info.rfbdev = NULL;
399}
400
401void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
402{
403 fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
404}
405
406int radeon_fbdev_total_size(struct radeon_device *rdev)
407{
408 struct radeon_bo *robj;
409 int size = 0;
410
411 robj = rdev->mode_info.rfbdev->rfb.obj->driver_private;
412 size += radeon_bo_size(robj);
413 return size;
414}
415
416bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
417{
418 if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private)
419 return true;
420 return false;
371} 421}
372EXPORT_SYMBOL(radeonfb_remove);
373MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index d90f95b405c5..b1f9a81b5d1d 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -58,7 +58,6 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
58 radeon_fence_ring_emit(rdev, fence); 58 radeon_fence_ring_emit(rdev, fence);
59 59
60 fence->emited = true; 60 fence->emited = true;
61 fence->timeout = jiffies + ((2000 * HZ) / 1000);
62 list_del(&fence->list); 61 list_del(&fence->list);
63 list_add_tail(&fence->list, &rdev->fence_drv.emited); 62 list_add_tail(&fence->list, &rdev->fence_drv.emited);
64 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 63 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
@@ -71,15 +70,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
71 struct list_head *i, *n; 70 struct list_head *i, *n;
72 uint32_t seq; 71 uint32_t seq;
73 bool wake = false; 72 bool wake = false;
73 unsigned long cjiffies;
74 74
75 if (rdev == NULL) {
76 return true;
77 }
78 if (rdev->shutdown) {
79 return true;
80 }
81 seq = RREG32(rdev->fence_drv.scratch_reg); 75 seq = RREG32(rdev->fence_drv.scratch_reg);
82 rdev->fence_drv.last_seq = seq; 76 if (seq != rdev->fence_drv.last_seq) {
77 rdev->fence_drv.last_seq = seq;
78 rdev->fence_drv.last_jiffies = jiffies;
79 rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
80 } else {
81 cjiffies = jiffies;
82 if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
83 cjiffies -= rdev->fence_drv.last_jiffies;
84 if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
85 /* update the timeout */
86 rdev->fence_drv.last_timeout -= cjiffies;
87 } else {
88 /* the 500ms timeout is elapsed we should test
89 * for GPU lockup
90 */
91 rdev->fence_drv.last_timeout = 1;
92 }
93 } else {
94 /* wrap around update last jiffies, we will just wait
95 * a little longer
96 */
97 rdev->fence_drv.last_jiffies = cjiffies;
98 }
99 return false;
100 }
83 n = NULL; 101 n = NULL;
84 list_for_each(i, &rdev->fence_drv.emited) { 102 list_for_each(i, &rdev->fence_drv.emited) {
85 fence = list_entry(i, struct radeon_fence, list); 103 fence = list_entry(i, struct radeon_fence, list);
@@ -171,9 +189,8 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
171int radeon_fence_wait(struct radeon_fence *fence, bool intr) 189int radeon_fence_wait(struct radeon_fence *fence, bool intr)
172{ 190{
173 struct radeon_device *rdev; 191 struct radeon_device *rdev;
174 unsigned long cur_jiffies; 192 unsigned long irq_flags, timeout;
175 unsigned long timeout; 193 u32 seq;
176 bool expired = false;
177 int r; 194 int r;
178 195
179 if (fence == NULL) { 196 if (fence == NULL) {
@@ -184,21 +201,18 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
184 if (radeon_fence_signaled(fence)) { 201 if (radeon_fence_signaled(fence)) {
185 return 0; 202 return 0;
186 } 203 }
187 204 timeout = rdev->fence_drv.last_timeout;
188retry: 205retry:
189 cur_jiffies = jiffies; 206 /* save current sequence used to check for GPU lockup */
190 timeout = HZ / 100; 207 seq = rdev->fence_drv.last_seq;
191 if (time_after(fence->timeout, cur_jiffies)) {
192 timeout = fence->timeout - cur_jiffies;
193 }
194
195 if (intr) { 208 if (intr) {
196 radeon_irq_kms_sw_irq_get(rdev); 209 radeon_irq_kms_sw_irq_get(rdev);
197 r = wait_event_interruptible_timeout(rdev->fence_drv.queue, 210 r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
198 radeon_fence_signaled(fence), timeout); 211 radeon_fence_signaled(fence), timeout);
199 radeon_irq_kms_sw_irq_put(rdev); 212 radeon_irq_kms_sw_irq_put(rdev);
200 if (unlikely(r < 0)) 213 if (unlikely(r < 0)) {
201 return r; 214 return r;
215 }
202 } else { 216 } else {
203 radeon_irq_kms_sw_irq_get(rdev); 217 radeon_irq_kms_sw_irq_get(rdev);
204 r = wait_event_timeout(rdev->fence_drv.queue, 218 r = wait_event_timeout(rdev->fence_drv.queue,
@@ -206,38 +220,36 @@ retry:
206 radeon_irq_kms_sw_irq_put(rdev); 220 radeon_irq_kms_sw_irq_put(rdev);
207 } 221 }
208 if (unlikely(!radeon_fence_signaled(fence))) { 222 if (unlikely(!radeon_fence_signaled(fence))) {
209 if (unlikely(r == 0)) { 223 /* we were interrupted for some reason and fence isn't
210 expired = true; 224 * isn't signaled yet, resume wait
225 */
226 if (r) {
227 timeout = r;
228 goto retry;
211 } 229 }
212 if (unlikely(expired)) { 230 /* don't protect read access to rdev->fence_drv.last_seq
213 timeout = 1; 231 * if we experiencing a lockup the value doesn't change
214 if (time_after(cur_jiffies, fence->timeout)) { 232 */
215 timeout = cur_jiffies - fence->timeout; 233 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
216 } 234 /* good news we believe it's a lockup */
217 timeout = jiffies_to_msecs(timeout); 235 WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
218 if (timeout > 500) { 236 /* FIXME: what should we do ? marking everyone
219 DRM_ERROR("fence(%p:0x%08X) %lums timeout " 237 * as signaled for now
220 "going to reset GPU\n", 238 */
221 fence, fence->seq, timeout); 239 rdev->gpu_lockup = true;
222 radeon_gpu_reset(rdev); 240 r = radeon_gpu_reset(rdev);
223 WREG32(rdev->fence_drv.scratch_reg, fence->seq); 241 if (r)
224 } 242 return r;
243 WREG32(rdev->fence_drv.scratch_reg, fence->seq);
244 rdev->gpu_lockup = false;
225 } 245 }
246 timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
247 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
248 rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
249 rdev->fence_drv.last_jiffies = jiffies;
250 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
226 goto retry; 251 goto retry;
227 } 252 }
228 if (unlikely(expired)) {
229 rdev->fence_drv.count_timeout++;
230 cur_jiffies = jiffies;
231 timeout = 1;
232 if (time_after(cur_jiffies, fence->timeout)) {
233 timeout = cur_jiffies - fence->timeout;
234 }
235 timeout = jiffies_to_msecs(timeout);
236 DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
237 fence, fence->seq, timeout);
238 DRM_ERROR("last signaled fence(0x%08X)\n",
239 rdev->fence_drv.last_seq);
240 }
241 return 0; 253 return 0;
242} 254}
243 255
@@ -333,7 +345,6 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
333 INIT_LIST_HEAD(&rdev->fence_drv.created); 345 INIT_LIST_HEAD(&rdev->fence_drv.created);
334 INIT_LIST_HEAD(&rdev->fence_drv.emited); 346 INIT_LIST_HEAD(&rdev->fence_drv.emited);
335 INIT_LIST_HEAD(&rdev->fence_drv.signaled); 347 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
336 rdev->fence_drv.count_timeout = 0;
337 init_waitqueue_head(&rdev->fence_drv.queue); 348 init_waitqueue_head(&rdev->fence_drv.queue);
338 rdev->fence_drv.initialized = true; 349 rdev->fence_drv.initialized = true;
339 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 350 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 1770d3c07fd0..e65b90317fab 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -173,7 +173,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
173 int i, j; 173 int i, j;
174 174
175 if (!rdev->gart.ready) { 175 if (!rdev->gart.ready) {
176 DRM_ERROR("trying to bind memory to unitialized GART !\n"); 176 WARN(1, "trying to bind memory to unitialized GART !\n");
177 return -EINVAL; 177 return -EINVAL;
178 } 178 }
179 t = offset / RADEON_GPU_PAGE_SIZE; 179 t = offset / RADEON_GPU_PAGE_SIZE;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ef92d147d8f0..a72a3ee5d69b 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -44,6 +44,9 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
44 if (robj) { 44 if (robj) {
45 radeon_bo_unref(&robj); 45 radeon_bo_unref(&robj);
46 } 46 }
47
48 drm_gem_object_release(gobj);
49 kfree(gobj);
47} 50}
48 51
49int radeon_gem_object_create(struct radeon_device *rdev, int size, 52int radeon_gem_object_create(struct radeon_device *rdev, int size,
@@ -158,8 +161,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
158 args->vram_visible = rdev->mc.real_vram_size; 161 args->vram_visible = rdev->mc.real_vram_size;
159 if (rdev->stollen_vga_memory) 162 if (rdev->stollen_vga_memory)
160 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); 163 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
161 if (rdev->fbdev_rbo) 164 args->vram_visible -= radeon_fbdev_total_size(rdev);
162 args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
163 args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 - 165 args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
164 RADEON_IB_POOL_SIZE*64*1024; 166 RADEON_IB_POOL_SIZE*64*1024;
165 return 0; 167 return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a212041e8b0b..a95907aa7eae 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -55,6 +55,8 @@ static void radeon_hotplug_work_func(struct work_struct *work)
55 radeon_connector_hotplug(connector); 55 radeon_connector_hotplug(connector);
56 } 56 }
57 /* Just fire off a uevent and let userspace tell us what to do */ 57 /* Just fire off a uevent and let userspace tell us what to do */
58 radeonfb_hotplug(dev, false);
59
58 drm_sysfs_hotplug_event(dev); 60 drm_sysfs_hotplug_event(dev);
59} 61}
60 62
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 5413fcd63086..a2bc31465e4f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -39,6 +39,7 @@
39#include <linux/i2c-algo-bit.h> 39#include <linux/i2c-algo-bit.h>
40#include "radeon_fixed.h" 40#include "radeon_fixed.h"
41 41
42struct radeon_bo;
42struct radeon_device; 43struct radeon_device;
43 44
44#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) 45#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
@@ -202,6 +203,8 @@ enum radeon_dvo_chip {
202 DVO_SIL1178, 203 DVO_SIL1178,
203}; 204};
204 205
206struct radeon_fbdev;
207
205struct radeon_mode_info { 208struct radeon_mode_info {
206 struct atom_context *atom_context; 209 struct atom_context *atom_context;
207 struct card_info *atom_card_info; 210 struct card_info *atom_card_info;
@@ -218,6 +221,9 @@ struct radeon_mode_info {
218 struct drm_property *tmds_pll_property; 221 struct drm_property *tmds_pll_property;
219 /* hardcoded DFP edid from BIOS */ 222 /* hardcoded DFP edid from BIOS */
220 struct edid *bios_hardcoded_edid; 223 struct edid *bios_hardcoded_edid;
224
225 /* pointer to fbdev info structure */
226 struct radeon_fbdev *rfbdev;
221}; 227};
222 228
223#define MAX_H_CODE_TIMING_LEN 32 229#define MAX_H_CODE_TIMING_LEN 32
@@ -339,6 +345,7 @@ struct radeon_encoder {
339 enum radeon_rmx_type rmx_type; 345 enum radeon_rmx_type rmx_type;
340 struct drm_display_mode native_mode; 346 struct drm_display_mode native_mode;
341 void *enc_priv; 347 void *enc_priv;
348 int audio_polling_active;
342 int hdmi_offset; 349 int hdmi_offset;
343 int hdmi_config_offset; 350 int hdmi_config_offset;
344 int hdmi_audio_workaround; 351 int hdmi_audio_workaround;
@@ -532,11 +539,10 @@ extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
532 u16 blue, int regno); 539 u16 blue, int regno);
533extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 540extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
534 u16 *blue, int regno); 541 u16 *blue, int regno);
535struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev, 542void radeon_framebuffer_init(struct drm_device *dev,
536 struct drm_mode_fb_cmd *mode_cmd, 543 struct radeon_framebuffer *rfb,
537 struct drm_gem_object *obj); 544 struct drm_mode_fb_cmd *mode_cmd,
538 545 struct drm_gem_object *obj);
539int radeonfb_probe(struct drm_device *dev);
540 546
541int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); 547int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
542bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev); 548bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
@@ -575,4 +581,12 @@ void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder,
575void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, 581void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
576 struct drm_display_mode *mode, 582 struct drm_display_mode *mode,
577 struct drm_display_mode *adjusted_mode); 583 struct drm_display_mode *adjusted_mode);
584
585/* fbdev layer */
586int radeon_fbdev_init(struct radeon_device *rdev);
587void radeon_fbdev_fini(struct radeon_device *rdev);
588void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
589int radeon_fbdev_total_size(struct radeon_device *rdev);
590bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
591void radeonfb_hotplug(struct drm_device *dev, bool polled);
578#endif 592#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 122774742bd5..6a8617bac142 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -192,7 +192,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
192 } 192 }
193 for (i = 0; i < bo->placement.num_placement; i++) 193 for (i = 0; i < bo->placement.num_placement; i++)
194 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 194 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
195 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 195 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
196 if (likely(r == 0)) { 196 if (likely(r == 0)) {
197 bo->pin_count = 1; 197 bo->pin_count = 1;
198 if (gpu_addr != NULL) 198 if (gpu_addr != NULL)
@@ -216,7 +216,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
216 return 0; 216 return 0;
217 for (i = 0; i < bo->placement.num_placement; i++) 217 for (i = 0; i < bo->placement.num_placement; i++)
218 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 218 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
219 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 219 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
220 if (unlikely(r != 0)) 220 if (unlikely(r != 0))
221 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 221 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
222 return r; 222 return r;
@@ -331,7 +331,7 @@ int radeon_bo_list_validate(struct list_head *head)
331 lobj->rdomain); 331 lobj->rdomain);
332 } 332 }
333 r = ttm_bo_validate(&bo->tbo, &bo->placement, 333 r = ttm_bo_validate(&bo->tbo, &bo->placement,
334 true, false); 334 true, false, false);
335 if (unlikely(r)) 335 if (unlikely(r))
336 return r; 336 return r;
337 } 337 }
@@ -499,11 +499,33 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
499 radeon_bo_check_tiling(rbo, 0, 1); 499 radeon_bo_check_tiling(rbo, 0, 1);
500} 500}
501 501
502void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 502int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
503{ 503{
504 struct radeon_device *rdev;
504 struct radeon_bo *rbo; 505 struct radeon_bo *rbo;
506 unsigned long offset, size;
507 int r;
508
505 if (!radeon_ttm_bo_is_radeon_bo(bo)) 509 if (!radeon_ttm_bo_is_radeon_bo(bo))
506 return; 510 return 0;
507 rbo = container_of(bo, struct radeon_bo, tbo); 511 rbo = container_of(bo, struct radeon_bo, tbo);
508 radeon_bo_check_tiling(rbo, 0, 0); 512 radeon_bo_check_tiling(rbo, 0, 0);
513 rdev = rbo->rdev;
514 if (bo->mem.mem_type == TTM_PL_VRAM) {
515 size = bo->mem.num_pages << PAGE_SHIFT;
516 offset = bo->mem.mm_node->start << PAGE_SHIFT;
517 if ((offset + size) > rdev->mc.visible_vram_size) {
518 /* hurrah the memory is not visible ! */
519 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
520 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
521 r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
522 if (unlikely(r != 0))
523 return r;
524 offset = bo->mem.mm_node->start << PAGE_SHIFT;
525 /* this should not happen */
526 if ((offset + size) > rdev->mc.visible_vram_size)
527 return -EINVAL;
528 }
529 }
530 return 0;
509} 531}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 7ab43de1e244..353998dc2c03 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -168,6 +168,6 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
168 bool force_drop); 168 bool force_drop);
169extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, 169extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
170 struct ttm_mem_reg *mem); 170 struct ttm_mem_reg *mem);
171extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 171extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
172extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); 172extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
173#endif 173#endif
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d031b6863082..af98f45954b3 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -33,6 +33,7 @@
33#include <ttm/ttm_bo_driver.h> 33#include <ttm/ttm_bo_driver.h>
34#include <ttm/ttm_placement.h> 34#include <ttm/ttm_placement.h>
35#include <ttm/ttm_module.h> 35#include <ttm/ttm_module.h>
36#include <ttm/ttm_page_alloc.h>
36#include <drm/drmP.h> 37#include <drm/drmP.h>
37#include <drm/radeon_drm.h> 38#include <drm/radeon_drm.h>
38#include <linux/seq_file.h> 39#include <linux/seq_file.h>
@@ -162,34 +163,21 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
162 (unsigned)type); 163 (unsigned)type);
163 return -EINVAL; 164 return -EINVAL;
164 } 165 }
165 man->io_offset = rdev->mc.agp_base;
166 man->io_size = rdev->mc.gtt_size;
167 man->io_addr = NULL;
168 if (!rdev->ddev->agp->cant_use_aperture) 166 if (!rdev->ddev->agp->cant_use_aperture)
169 man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | 167 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
170 TTM_MEMTYPE_FLAG_MAPPABLE;
171 man->available_caching = TTM_PL_FLAG_UNCACHED | 168 man->available_caching = TTM_PL_FLAG_UNCACHED |
172 TTM_PL_FLAG_WC; 169 TTM_PL_FLAG_WC;
173 man->default_caching = TTM_PL_FLAG_WC; 170 man->default_caching = TTM_PL_FLAG_WC;
174 } else
175#endif
176 {
177 man->io_offset = 0;
178 man->io_size = 0;
179 man->io_addr = NULL;
180 } 171 }
172#endif
181 break; 173 break;
182 case TTM_PL_VRAM: 174 case TTM_PL_VRAM:
183 /* "On-card" video ram */ 175 /* "On-card" video ram */
184 man->gpu_offset = rdev->mc.vram_start; 176 man->gpu_offset = rdev->mc.vram_start;
185 man->flags = TTM_MEMTYPE_FLAG_FIXED | 177 man->flags = TTM_MEMTYPE_FLAG_FIXED |
186 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
187 TTM_MEMTYPE_FLAG_MAPPABLE; 178 TTM_MEMTYPE_FLAG_MAPPABLE;
188 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; 179 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
189 man->default_caching = TTM_PL_FLAG_WC; 180 man->default_caching = TTM_PL_FLAG_WC;
190 man->io_addr = NULL;
191 man->io_offset = rdev->mc.aper_base;
192 man->io_size = rdev->mc.aper_size;
193 break; 181 break;
194 default: 182 default:
195 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); 183 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
@@ -244,9 +232,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
244} 232}
245 233
246static int radeon_move_blit(struct ttm_buffer_object *bo, 234static int radeon_move_blit(struct ttm_buffer_object *bo,
247 bool evict, int no_wait, 235 bool evict, int no_wait_reserve, bool no_wait_gpu,
248 struct ttm_mem_reg *new_mem, 236 struct ttm_mem_reg *new_mem,
249 struct ttm_mem_reg *old_mem) 237 struct ttm_mem_reg *old_mem)
250{ 238{
251 struct radeon_device *rdev; 239 struct radeon_device *rdev;
252 uint64_t old_start, new_start; 240 uint64_t old_start, new_start;
@@ -290,13 +278,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
290 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); 278 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
291 /* FIXME: handle copy error */ 279 /* FIXME: handle copy error */
292 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, 280 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
293 evict, no_wait, new_mem); 281 evict, no_wait_reserve, no_wait_gpu, new_mem);
294 radeon_fence_unref(&fence); 282 radeon_fence_unref(&fence);
295 return r; 283 return r;
296} 284}
297 285
298static int radeon_move_vram_ram(struct ttm_buffer_object *bo, 286static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
299 bool evict, bool interruptible, bool no_wait, 287 bool evict, bool interruptible,
288 bool no_wait_reserve, bool no_wait_gpu,
300 struct ttm_mem_reg *new_mem) 289 struct ttm_mem_reg *new_mem)
301{ 290{
302 struct radeon_device *rdev; 291 struct radeon_device *rdev;
@@ -317,7 +306,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
317 placement.busy_placement = &placements; 306 placement.busy_placement = &placements;
318 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 307 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
319 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 308 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
320 interruptible, no_wait); 309 interruptible, no_wait_reserve, no_wait_gpu);
321 if (unlikely(r)) { 310 if (unlikely(r)) {
322 return r; 311 return r;
323 } 312 }
@@ -331,11 +320,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
331 if (unlikely(r)) { 320 if (unlikely(r)) {
332 goto out_cleanup; 321 goto out_cleanup;
333 } 322 }
334 r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem); 323 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
335 if (unlikely(r)) { 324 if (unlikely(r)) {
336 goto out_cleanup; 325 goto out_cleanup;
337 } 326 }
338 r = ttm_bo_move_ttm(bo, true, no_wait, new_mem); 327 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
339out_cleanup: 328out_cleanup:
340 if (tmp_mem.mm_node) { 329 if (tmp_mem.mm_node) {
341 struct ttm_bo_global *glob = rdev->mman.bdev.glob; 330 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
@@ -349,7 +338,8 @@ out_cleanup:
349} 338}
350 339
351static int radeon_move_ram_vram(struct ttm_buffer_object *bo, 340static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
352 bool evict, bool interruptible, bool no_wait, 341 bool evict, bool interruptible,
342 bool no_wait_reserve, bool no_wait_gpu,
353 struct ttm_mem_reg *new_mem) 343 struct ttm_mem_reg *new_mem)
354{ 344{
355 struct radeon_device *rdev; 345 struct radeon_device *rdev;
@@ -369,15 +359,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
369 placement.num_busy_placement = 1; 359 placement.num_busy_placement = 1;
370 placement.busy_placement = &placements; 360 placement.busy_placement = &placements;
371 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 361 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
372 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait); 362 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
373 if (unlikely(r)) { 363 if (unlikely(r)) {
374 return r; 364 return r;
375 } 365 }
376 r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem); 366 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
377 if (unlikely(r)) { 367 if (unlikely(r)) {
378 goto out_cleanup; 368 goto out_cleanup;
379 } 369 }
380 r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem); 370 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
381 if (unlikely(r)) { 371 if (unlikely(r)) {
382 goto out_cleanup; 372 goto out_cleanup;
383 } 373 }
@@ -394,8 +384,9 @@ out_cleanup:
394} 384}
395 385
396static int radeon_bo_move(struct ttm_buffer_object *bo, 386static int radeon_bo_move(struct ttm_buffer_object *bo,
397 bool evict, bool interruptible, bool no_wait, 387 bool evict, bool interruptible,
398 struct ttm_mem_reg *new_mem) 388 bool no_wait_reserve, bool no_wait_gpu,
389 struct ttm_mem_reg *new_mem)
399{ 390{
400 struct radeon_device *rdev; 391 struct radeon_device *rdev;
401 struct ttm_mem_reg *old_mem = &bo->mem; 392 struct ttm_mem_reg *old_mem = &bo->mem;
@@ -422,23 +413,66 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
422 if (old_mem->mem_type == TTM_PL_VRAM && 413 if (old_mem->mem_type == TTM_PL_VRAM &&
423 new_mem->mem_type == TTM_PL_SYSTEM) { 414 new_mem->mem_type == TTM_PL_SYSTEM) {
424 r = radeon_move_vram_ram(bo, evict, interruptible, 415 r = radeon_move_vram_ram(bo, evict, interruptible,
425 no_wait, new_mem); 416 no_wait_reserve, no_wait_gpu, new_mem);
426 } else if (old_mem->mem_type == TTM_PL_SYSTEM && 417 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
427 new_mem->mem_type == TTM_PL_VRAM) { 418 new_mem->mem_type == TTM_PL_VRAM) {
428 r = radeon_move_ram_vram(bo, evict, interruptible, 419 r = radeon_move_ram_vram(bo, evict, interruptible,
429 no_wait, new_mem); 420 no_wait_reserve, no_wait_gpu, new_mem);
430 } else { 421 } else {
431 r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); 422 r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
432 } 423 }
433 424
434 if (r) { 425 if (r) {
435memcpy: 426memcpy:
436 r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 427 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
437 } 428 }
438
439 return r; 429 return r;
440} 430}
441 431
432static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
433{
434 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
435 struct radeon_device *rdev = radeon_get_rdev(bdev);
436
437 mem->bus.addr = NULL;
438 mem->bus.offset = 0;
439 mem->bus.size = mem->num_pages << PAGE_SHIFT;
440 mem->bus.base = 0;
441 mem->bus.is_iomem = false;
442 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
443 return -EINVAL;
444 switch (mem->mem_type) {
445 case TTM_PL_SYSTEM:
446 /* system memory */
447 return 0;
448 case TTM_PL_TT:
449#if __OS_HAS_AGP
450 if (rdev->flags & RADEON_IS_AGP) {
451 /* RADEON_IS_AGP is set only if AGP is active */
452 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
453 mem->bus.base = rdev->mc.agp_base;
454 mem->bus.is_iomem = true;
455 }
456#endif
457 break;
458 case TTM_PL_VRAM:
459 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
460 /* check if it's visible */
461 if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
462 return -EINVAL;
463 mem->bus.base = rdev->mc.aper_base;
464 mem->bus.is_iomem = true;
465 break;
466 default:
467 return -EINVAL;
468 }
469 return 0;
470}
471
472static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
473{
474}
475
442static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, 476static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
443 bool lazy, bool interruptible) 477 bool lazy, bool interruptible)
444{ 478{
@@ -479,6 +513,8 @@ static struct ttm_bo_driver radeon_bo_driver = {
479 .sync_obj_ref = &radeon_sync_obj_ref, 513 .sync_obj_ref = &radeon_sync_obj_ref,
480 .move_notify = &radeon_bo_move_notify, 514 .move_notify = &radeon_bo_move_notify,
481 .fault_reserve_notify = &radeon_bo_fault_reserve_notify, 515 .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
516 .io_mem_reserve = &radeon_ttm_io_mem_reserve,
517 .io_mem_free = &radeon_ttm_io_mem_free,
482}; 518};
483 519
484int radeon_ttm_init(struct radeon_device *rdev) 520int radeon_ttm_init(struct radeon_device *rdev)
@@ -745,8 +781,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
745static int radeon_ttm_debugfs_init(struct radeon_device *rdev) 781static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
746{ 782{
747#if defined(CONFIG_DEBUG_FS) 783#if defined(CONFIG_DEBUG_FS)
748 static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES]; 784 static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
749 static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32]; 785 static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
750 unsigned i; 786 unsigned i;
751 787
752 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { 788 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
@@ -763,7 +799,13 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
763 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager; 799 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
764 800
765 } 801 }
766 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES); 802 /* Add ttm page pool to debugfs */
803 sprintf(radeon_mem_types_names[i], "ttm_page_pool");
804 radeon_mem_types_list[i].name = radeon_mem_types_names[i];
805 radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
806 radeon_mem_types_list[i].driver_features = 0;
807 radeon_mem_types_list[i].data = NULL;
808 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
767 809
768#endif 810#endif
769 return 0; 811 return 0;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 1a41cb268b72..dc76fe76eb25 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -243,8 +243,6 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
243 243
244void rs400_gpu_init(struct radeon_device *rdev) 244void rs400_gpu_init(struct radeon_device *rdev)
245{ 245{
246 /* FIXME: HDP same place on rs400 ? */
247 r100_hdp_reset(rdev);
248 /* FIXME: is this correct ? */ 246 /* FIXME: is this correct ? */
249 r420_pipes_init(rdev); 247 r420_pipes_init(rdev);
250 if (rs400_mc_wait_for_idle(rdev)) { 248 if (rs400_mc_wait_for_idle(rdev)) {
@@ -433,7 +431,7 @@ int rs400_resume(struct radeon_device *rdev)
433 /* setup MC before calling post tables */ 431 /* setup MC before calling post tables */
434 rs400_mc_program(rdev); 432 rs400_mc_program(rdev);
435 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 433 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
436 if (radeon_gpu_reset(rdev)) { 434 if (radeon_asic_reset(rdev)) {
437 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 435 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
438 RREG32(R_000E40_RBBM_STATUS), 436 RREG32(R_000E40_RBBM_STATUS),
439 RREG32(R_0007C0_CP_STAT)); 437 RREG32(R_0007C0_CP_STAT));
@@ -497,7 +495,7 @@ int rs400_init(struct radeon_device *rdev)
497 return r; 495 return r;
498 } 496 }
499 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 497 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
500 if (radeon_gpu_reset(rdev)) { 498 if (radeon_asic_reset(rdev)) {
501 dev_warn(rdev->dev, 499 dev_warn(rdev->dev,
502 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 500 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
503 RREG32(R_000E40_RBBM_STATUS), 501 RREG32(R_000E40_RBBM_STATUS),
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index a81bc7a21e14..5e3f21861f45 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -147,6 +147,78 @@ void rs600_hpd_fini(struct radeon_device *rdev)
147 } 147 }
148} 148}
149 149
150void rs600_bm_disable(struct radeon_device *rdev)
151{
152 u32 tmp;
153
154 /* disable bus mastering */
155 pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
156 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
157 mdelay(1);
158}
159
160int rs600_asic_reset(struct radeon_device *rdev)
161{
162 u32 status, tmp;
163
164 struct rv515_mc_save save;
165
166 /* Stops all mc clients */
167 rv515_mc_stop(rdev, &save);
168 status = RREG32(R_000E40_RBBM_STATUS);
169 if (!G_000E40_GUI_ACTIVE(status)) {
170 return 0;
171 }
172 status = RREG32(R_000E40_RBBM_STATUS);
173 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
174 /* stop CP */
175 WREG32(RADEON_CP_CSQ_CNTL, 0);
176 tmp = RREG32(RADEON_CP_RB_CNTL);
177 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
178 WREG32(RADEON_CP_RB_RPTR_WR, 0);
179 WREG32(RADEON_CP_RB_WPTR, 0);
180 WREG32(RADEON_CP_RB_CNTL, tmp);
181 pci_save_state(rdev->pdev);
182 /* disable bus mastering */
183 rs600_bm_disable(rdev);
184 /* reset GA+VAP */
185 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
186 S_0000F0_SOFT_RESET_GA(1));
187 RREG32(R_0000F0_RBBM_SOFT_RESET);
188 mdelay(500);
189 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
190 mdelay(1);
191 status = RREG32(R_000E40_RBBM_STATUS);
192 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
193 /* reset CP */
194 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
195 RREG32(R_0000F0_RBBM_SOFT_RESET);
196 mdelay(500);
197 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
198 mdelay(1);
199 status = RREG32(R_000E40_RBBM_STATUS);
200 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
201 /* reset MC */
202 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
203 RREG32(R_0000F0_RBBM_SOFT_RESET);
204 mdelay(500);
205 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
206 mdelay(1);
207 status = RREG32(R_000E40_RBBM_STATUS);
208 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
209 /* restore PCI & busmastering */
210 pci_restore_state(rdev->pdev);
211 /* Check if GPU is idle */
212 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
213 dev_err(rdev->dev, "failed to reset GPU\n");
214 rdev->gpu_lockup = true;
215 return -1;
216 }
217 rv515_mc_resume(rdev, &save);
218 dev_info(rdev->dev, "GPU reset succeed\n");
219 return 0;
220}
221
150/* 222/*
151 * GART. 223 * GART.
152 */ 224 */
@@ -454,7 +526,6 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
454 526
455void rs600_gpu_init(struct radeon_device *rdev) 527void rs600_gpu_init(struct radeon_device *rdev)
456{ 528{
457 r100_hdp_reset(rdev);
458 r420_pipes_init(rdev); 529 r420_pipes_init(rdev);
459 /* Wait for mc idle */ 530 /* Wait for mc idle */
460 if (rs600_mc_wait_for_idle(rdev)) 531 if (rs600_mc_wait_for_idle(rdev))
@@ -601,7 +672,7 @@ int rs600_resume(struct radeon_device *rdev)
601 /* Resume clock before doing reset */ 672 /* Resume clock before doing reset */
602 rv515_clock_startup(rdev); 673 rv515_clock_startup(rdev);
603 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 674 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
604 if (radeon_gpu_reset(rdev)) { 675 if (radeon_asic_reset(rdev)) {
605 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 676 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
606 RREG32(R_000E40_RBBM_STATUS), 677 RREG32(R_000E40_RBBM_STATUS),
607 RREG32(R_0007C0_CP_STAT)); 678 RREG32(R_0007C0_CP_STAT));
@@ -664,7 +735,7 @@ int rs600_init(struct radeon_device *rdev)
664 return -EINVAL; 735 return -EINVAL;
665 } 736 }
666 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 737 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
667 if (radeon_gpu_reset(rdev)) { 738 if (radeon_asic_reset(rdev)) {
668 dev_warn(rdev->dev, 739 dev_warn(rdev->dev,
669 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 740 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
670 RREG32(R_000E40_RBBM_STATUS), 741 RREG32(R_000E40_RBBM_STATUS),
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index e52d2695510b..08c4bebd3011 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -178,6 +178,52 @@
178#define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0) 178#define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0)
179#define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF) 179#define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
180#define C_000074_MC_IND_DATA 0x00000000 180#define C_000074_MC_IND_DATA 0x00000000
181#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
182#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
183#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
184#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
185#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
186#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
187#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
188#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
189#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
190#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
191#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
192#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
193#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
194#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
195#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
196#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
197#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
198#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
199#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
200#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
201#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
202#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
203#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
204#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
205#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
206#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
207#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
208#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
209#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
210#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
211#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
212#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
213#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
214#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
215#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
216#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
217#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
218#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
219#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
220#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
221#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
222#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
223#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
224#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
225#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
226#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
181#define R_000134_HDP_FB_LOCATION 0x000134 227#define R_000134_HDP_FB_LOCATION 0x000134
182#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0) 228#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
183#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF) 229#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index bbf3da790fd5..56a0aec84af2 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -48,8 +48,6 @@ static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
48 48
49static void rs690_gpu_init(struct radeon_device *rdev) 49static void rs690_gpu_init(struct radeon_device *rdev)
50{ 50{
51 /* FIXME: HDP same place on rs690 ? */
52 r100_hdp_reset(rdev);
53 /* FIXME: is this correct ? */ 51 /* FIXME: is this correct ? */
54 r420_pipes_init(rdev); 52 r420_pipes_init(rdev);
55 if (rs690_mc_wait_for_idle(rdev)) { 53 if (rs690_mc_wait_for_idle(rdev)) {
@@ -653,7 +651,7 @@ int rs690_resume(struct radeon_device *rdev)
653 /* Resume clock before doing reset */ 651 /* Resume clock before doing reset */
654 rv515_clock_startup(rdev); 652 rv515_clock_startup(rdev);
655 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 653 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
656 if (radeon_gpu_reset(rdev)) { 654 if (radeon_asic_reset(rdev)) {
657 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 655 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
658 RREG32(R_000E40_RBBM_STATUS), 656 RREG32(R_000E40_RBBM_STATUS),
659 RREG32(R_0007C0_CP_STAT)); 657 RREG32(R_0007C0_CP_STAT));
@@ -717,7 +715,7 @@ int rs690_init(struct radeon_device *rdev)
717 return -EINVAL; 715 return -EINVAL;
718 } 716 }
719 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 717 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
720 if (radeon_gpu_reset(rdev)) { 718 if (radeon_asic_reset(rdev)) {
721 dev_warn(rdev->dev, 719 dev_warn(rdev->dev,
722 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 720 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
723 RREG32(R_000E40_RBBM_STATUS), 721 RREG32(R_000E40_RBBM_STATUS),
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 9035121f4b58..c513473d72ae 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -147,16 +147,11 @@ void rv515_gpu_init(struct radeon_device *rdev)
147{ 147{
148 unsigned pipe_select_current, gb_pipe_select, tmp; 148 unsigned pipe_select_current, gb_pipe_select, tmp;
149 149
150 r100_hdp_reset(rdev);
151 r100_rb2d_reset(rdev);
152
153 if (r100_gui_wait_for_idle(rdev)) { 150 if (r100_gui_wait_for_idle(rdev)) {
154 printk(KERN_WARNING "Failed to wait GUI idle while " 151 printk(KERN_WARNING "Failed to wait GUI idle while "
155 "reseting GPU. Bad things might happen.\n"); 152 "reseting GPU. Bad things might happen.\n");
156 } 153 }
157
158 rv515_vga_render_disable(rdev); 154 rv515_vga_render_disable(rdev);
159
160 r420_pipes_init(rdev); 155 r420_pipes_init(rdev);
161 gb_pipe_select = RREG32(0x402C); 156 gb_pipe_select = RREG32(0x402C);
162 tmp = RREG32(0x170C); 157 tmp = RREG32(0x170C);
@@ -174,91 +169,6 @@ void rv515_gpu_init(struct radeon_device *rdev)
174 } 169 }
175} 170}
176 171
177int rv515_ga_reset(struct radeon_device *rdev)
178{
179 uint32_t tmp;
180 bool reinit_cp;
181 int i;
182
183 reinit_cp = rdev->cp.ready;
184 rdev->cp.ready = false;
185 for (i = 0; i < rdev->usec_timeout; i++) {
186 WREG32(CP_CSQ_MODE, 0);
187 WREG32(CP_CSQ_CNTL, 0);
188 WREG32(RBBM_SOFT_RESET, 0x32005);
189 (void)RREG32(RBBM_SOFT_RESET);
190 udelay(200);
191 WREG32(RBBM_SOFT_RESET, 0);
192 /* Wait to prevent race in RBBM_STATUS */
193 mdelay(1);
194 tmp = RREG32(RBBM_STATUS);
195 if (tmp & ((1 << 20) | (1 << 26))) {
196 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
197 /* GA still busy soft reset it */
198 WREG32(0x429C, 0x200);
199 WREG32(VAP_PVS_STATE_FLUSH_REG, 0);
200 WREG32(0x43E0, 0);
201 WREG32(0x43E4, 0);
202 WREG32(0x24AC, 0);
203 }
204 /* Wait to prevent race in RBBM_STATUS */
205 mdelay(1);
206 tmp = RREG32(RBBM_STATUS);
207 if (!(tmp & ((1 << 20) | (1 << 26)))) {
208 break;
209 }
210 }
211 for (i = 0; i < rdev->usec_timeout; i++) {
212 tmp = RREG32(RBBM_STATUS);
213 if (!(tmp & ((1 << 20) | (1 << 26)))) {
214 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
215 tmp);
216 DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
217 DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
218 DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
219 if (reinit_cp) {
220 return r100_cp_init(rdev, rdev->cp.ring_size);
221 }
222 return 0;
223 }
224 DRM_UDELAY(1);
225 }
226 tmp = RREG32(RBBM_STATUS);
227 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
228 return -1;
229}
230
231int rv515_gpu_reset(struct radeon_device *rdev)
232{
233 uint32_t status;
234
235 /* reset order likely matter */
236 status = RREG32(RBBM_STATUS);
237 /* reset HDP */
238 r100_hdp_reset(rdev);
239 /* reset rb2d */
240 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
241 r100_rb2d_reset(rdev);
242 }
243 /* reset GA */
244 if (status & ((1 << 20) | (1 << 26))) {
245 rv515_ga_reset(rdev);
246 }
247 /* reset CP */
248 status = RREG32(RBBM_STATUS);
249 if (status & (1 << 16)) {
250 r100_cp_reset(rdev);
251 }
252 /* Check if GPU is idle */
253 status = RREG32(RBBM_STATUS);
254 if (status & (1 << 31)) {
255 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
256 return -1;
257 }
258 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
259 return 0;
260}
261
262static void rv515_vram_get_type(struct radeon_device *rdev) 172static void rv515_vram_get_type(struct radeon_device *rdev)
263{ 173{
264 uint32_t tmp; 174 uint32_t tmp;
@@ -335,7 +245,7 @@ static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
335 245
336 tmp = RREG32(0x2140); 246 tmp = RREG32(0x2140);
337 seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp); 247 seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
338 radeon_gpu_reset(rdev); 248 radeon_asic_reset(rdev);
339 tmp = RREG32(0x425C); 249 tmp = RREG32(0x425C);
340 seq_printf(m, "GA_IDLE 0x%08x\n", tmp); 250 seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
341 return 0; 251 return 0;
@@ -503,7 +413,7 @@ int rv515_resume(struct radeon_device *rdev)
503 /* Resume clock before doing reset */ 413 /* Resume clock before doing reset */
504 rv515_clock_startup(rdev); 414 rv515_clock_startup(rdev);
505 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 415 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
506 if (radeon_gpu_reset(rdev)) { 416 if (radeon_asic_reset(rdev)) {
507 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 417 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
508 RREG32(R_000E40_RBBM_STATUS), 418 RREG32(R_000E40_RBBM_STATUS),
509 RREG32(R_0007C0_CP_STAT)); 419 RREG32(R_0007C0_CP_STAT));
@@ -573,7 +483,7 @@ int rv515_init(struct radeon_device *rdev)
573 return -EINVAL; 483 return -EINVAL;
574 } 484 }
575 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 485 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
576 if (radeon_gpu_reset(rdev)) { 486 if (radeon_asic_reset(rdev)) {
577 dev_warn(rdev->dev, 487 dev_warn(rdev->dev,
578 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 488 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
579 RREG32(R_000E40_RBBM_STATUS), 489 RREG32(R_000E40_RBBM_STATUS),
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
index fc216e49384d..590309a710b1 100644
--- a/drivers/gpu/drm/radeon/rv515d.h
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -217,6 +217,52 @@
217#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) 217#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
218 218
219/* Registers */ 219/* Registers */
220#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
221#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
222#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
223#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
224#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
225#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
226#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
227#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
228#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
229#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
230#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
231#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
232#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
233#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
234#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
235#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
236#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
237#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
238#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
239#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
240#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
241#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
242#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
243#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
244#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
245#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
246#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
247#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
248#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
249#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
250#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
251#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
252#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
253#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
254#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
255#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
256#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
257#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
258#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
259#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
260#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
261#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
262#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
263#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
264#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
265#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
220#define R_0000F8_CONFIG_MEMSIZE 0x0000F8 266#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
221#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0) 267#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
222#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF) 268#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 97958a64df1a..a74683e18612 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -237,7 +237,6 @@ void r700_cp_stop(struct radeon_device *rdev)
237 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 237 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
238} 238}
239 239
240
241static int rv770_cp_load_microcode(struct radeon_device *rdev) 240static int rv770_cp_load_microcode(struct radeon_device *rdev)
242{ 241{
243 const __be32 *fw_data; 242 const __be32 *fw_data;
@@ -272,6 +271,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
272 return 0; 271 return 0;
273} 272}
274 273
274void r700_cp_fini(struct radeon_device *rdev)
275{
276 r700_cp_stop(rdev);
277 radeon_ring_fini(rdev);
278}
275 279
276/* 280/*
277 * Core functions 281 * Core functions
@@ -906,23 +910,12 @@ int rv770_mc_init(struct radeon_device *rdev)
906 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 910 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
907 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 911 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
908 rdev->mc.visible_vram_size = rdev->mc.aper_size; 912 rdev->mc.visible_vram_size = rdev->mc.aper_size;
909 /* FIXME remove this once we support unmappable VRAM */
910 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
911 rdev->mc.mc_vram_size = rdev->mc.aper_size;
912 rdev->mc.real_vram_size = rdev->mc.aper_size;
913 }
914 r600_vram_gtt_location(rdev, &rdev->mc); 913 r600_vram_gtt_location(rdev, &rdev->mc);
915 radeon_update_bandwidth_info(rdev); 914 radeon_update_bandwidth_info(rdev);
916 915
917 return 0; 916 return 0;
918} 917}
919 918
920int rv770_gpu_reset(struct radeon_device *rdev)
921{
922 /* FIXME: implement any rv770 specific bits */
923 return r600_gpu_reset(rdev);
924}
925
926static int rv770_startup(struct radeon_device *rdev) 919static int rv770_startup(struct radeon_device *rdev)
927{ 920{
928 int r; 921 int r;
@@ -1132,7 +1125,7 @@ int rv770_init(struct radeon_device *rdev)
1132 r = rv770_startup(rdev); 1125 r = rv770_startup(rdev);
1133 if (r) { 1126 if (r) {
1134 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1127 dev_err(rdev->dev, "disabling GPU acceleration\n");
1135 r600_cp_fini(rdev); 1128 r700_cp_fini(rdev);
1136 r600_wb_fini(rdev); 1129 r600_wb_fini(rdev);
1137 r600_irq_fini(rdev); 1130 r600_irq_fini(rdev);
1138 radeon_irq_kms_fini(rdev); 1131 radeon_irq_kms_fini(rdev);
@@ -1166,7 +1159,7 @@ void rv770_fini(struct radeon_device *rdev)
1166{ 1159{
1167 radeon_pm_fini(rdev); 1160 radeon_pm_fini(rdev);
1168 r600_blit_fini(rdev); 1161 r600_blit_fini(rdev);
1169 r600_cp_fini(rdev); 1162 r700_cp_fini(rdev);
1170 r600_wb_fini(rdev); 1163 r600_wb_fini(rdev);
1171 r600_irq_fini(rdev); 1164 r600_irq_fini(rdev);
1172 radeon_irq_kms_fini(rdev); 1165 radeon_irq_kms_fini(rdev);
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 1e138f5bae09..4256e2006476 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,6 +4,6 @@
4ccflags-y := -Iinclude/drm 4ccflags-y := -Iinclude/drm
5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ 5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \ 6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
7 ttm_object.o ttm_lock.o ttm_execbuf_util.o 7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
8 8
9obj-$(CONFIG_DRM_TTM) += ttm.o 9obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index dd47b2a9a791..3b5b094b1397 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -79,8 +79,6 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
79 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type); 79 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
80 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags); 80 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
81 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset); 81 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
82 printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
83 printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
84 printk(KERN_ERR TTM_PFX " size: %llu\n", man->size); 82 printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
85 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n", 83 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
86 man->available_caching); 84 man->available_caching);
@@ -357,7 +355,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
357 355
358static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 356static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
359 struct ttm_mem_reg *mem, 357 struct ttm_mem_reg *mem,
360 bool evict, bool interruptible, bool no_wait) 358 bool evict, bool interruptible,
359 bool no_wait_reserve, bool no_wait_gpu)
361{ 360{
362 struct ttm_bo_device *bdev = bo->bdev; 361 struct ttm_bo_device *bdev = bo->bdev;
363 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 362 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -402,12 +401,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
402 401
403 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 402 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
404 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 403 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
405 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); 404 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
406 else if (bdev->driver->move) 405 else if (bdev->driver->move)
407 ret = bdev->driver->move(bo, evict, interruptible, 406 ret = bdev->driver->move(bo, evict, interruptible,
408 no_wait, mem); 407 no_wait_reserve, no_wait_gpu, mem);
409 else 408 else
410 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem); 409 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
411 410
412 if (ret) 411 if (ret)
413 goto out_err; 412 goto out_err;
@@ -606,7 +605,7 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
606EXPORT_SYMBOL(ttm_bo_unref); 605EXPORT_SYMBOL(ttm_bo_unref);
607 606
608static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 607static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
609 bool no_wait) 608 bool no_wait_reserve, bool no_wait_gpu)
610{ 609{
611 struct ttm_bo_device *bdev = bo->bdev; 610 struct ttm_bo_device *bdev = bo->bdev;
612 struct ttm_bo_global *glob = bo->glob; 611 struct ttm_bo_global *glob = bo->glob;
@@ -615,7 +614,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
615 int ret = 0; 614 int ret = 0;
616 615
617 spin_lock(&bo->lock); 616 spin_lock(&bo->lock);
618 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 617 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
619 spin_unlock(&bo->lock); 618 spin_unlock(&bo->lock);
620 619
621 if (unlikely(ret != 0)) { 620 if (unlikely(ret != 0)) {
@@ -631,6 +630,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
631 630
632 evict_mem = bo->mem; 631 evict_mem = bo->mem;
633 evict_mem.mm_node = NULL; 632 evict_mem.mm_node = NULL;
633 evict_mem.bus.io_reserved = false;
634 634
635 placement.fpfn = 0; 635 placement.fpfn = 0;
636 placement.lpfn = 0; 636 placement.lpfn = 0;
@@ -638,7 +638,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
638 placement.num_busy_placement = 0; 638 placement.num_busy_placement = 0;
639 bdev->driver->evict_flags(bo, &placement); 639 bdev->driver->evict_flags(bo, &placement);
640 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 640 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
641 no_wait); 641 no_wait_reserve, no_wait_gpu);
642 if (ret) { 642 if (ret) {
643 if (ret != -ERESTARTSYS) { 643 if (ret != -ERESTARTSYS) {
644 printk(KERN_ERR TTM_PFX 644 printk(KERN_ERR TTM_PFX
@@ -650,7 +650,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
650 } 650 }
651 651
652 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 652 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
653 no_wait); 653 no_wait_reserve, no_wait_gpu);
654 if (ret) { 654 if (ret) {
655 if (ret != -ERESTARTSYS) 655 if (ret != -ERESTARTSYS)
656 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); 656 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
@@ -670,7 +670,8 @@ out:
670 670
671static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 671static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
672 uint32_t mem_type, 672 uint32_t mem_type,
673 bool interruptible, bool no_wait) 673 bool interruptible, bool no_wait_reserve,
674 bool no_wait_gpu)
674{ 675{
675 struct ttm_bo_global *glob = bdev->glob; 676 struct ttm_bo_global *glob = bdev->glob;
676 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 677 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -687,11 +688,11 @@ retry:
687 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); 688 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
688 kref_get(&bo->list_kref); 689 kref_get(&bo->list_kref);
689 690
690 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 691 ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
691 692
692 if (unlikely(ret == -EBUSY)) { 693 if (unlikely(ret == -EBUSY)) {
693 spin_unlock(&glob->lru_lock); 694 spin_unlock(&glob->lru_lock);
694 if (likely(!no_wait)) 695 if (likely(!no_wait_gpu))
695 ret = ttm_bo_wait_unreserved(bo, interruptible); 696 ret = ttm_bo_wait_unreserved(bo, interruptible);
696 697
697 kref_put(&bo->list_kref, ttm_bo_release_list); 698 kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -713,7 +714,7 @@ retry:
713 while (put_count--) 714 while (put_count--)
714 kref_put(&bo->list_kref, ttm_bo_ref_bug); 715 kref_put(&bo->list_kref, ttm_bo_ref_bug);
715 716
716 ret = ttm_bo_evict(bo, interruptible, no_wait); 717 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
717 ttm_bo_unreserve(bo); 718 ttm_bo_unreserve(bo);
718 719
719 kref_put(&bo->list_kref, ttm_bo_release_list); 720 kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -764,7 +765,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
764 uint32_t mem_type, 765 uint32_t mem_type,
765 struct ttm_placement *placement, 766 struct ttm_placement *placement,
766 struct ttm_mem_reg *mem, 767 struct ttm_mem_reg *mem,
767 bool interruptible, bool no_wait) 768 bool interruptible,
769 bool no_wait_reserve,
770 bool no_wait_gpu)
768{ 771{
769 struct ttm_bo_device *bdev = bo->bdev; 772 struct ttm_bo_device *bdev = bo->bdev;
770 struct ttm_bo_global *glob = bdev->glob; 773 struct ttm_bo_global *glob = bdev->glob;
@@ -785,7 +788,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
785 } 788 }
786 spin_unlock(&glob->lru_lock); 789 spin_unlock(&glob->lru_lock);
787 ret = ttm_mem_evict_first(bdev, mem_type, interruptible, 790 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
788 no_wait); 791 no_wait_reserve, no_wait_gpu);
789 if (unlikely(ret != 0)) 792 if (unlikely(ret != 0))
790 return ret; 793 return ret;
791 } while (1); 794 } while (1);
@@ -855,7 +858,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
855int ttm_bo_mem_space(struct ttm_buffer_object *bo, 858int ttm_bo_mem_space(struct ttm_buffer_object *bo,
856 struct ttm_placement *placement, 859 struct ttm_placement *placement,
857 struct ttm_mem_reg *mem, 860 struct ttm_mem_reg *mem,
858 bool interruptible, bool no_wait) 861 bool interruptible, bool no_wait_reserve,
862 bool no_wait_gpu)
859{ 863{
860 struct ttm_bo_device *bdev = bo->bdev; 864 struct ttm_bo_device *bdev = bo->bdev;
861 struct ttm_mem_type_manager *man; 865 struct ttm_mem_type_manager *man;
@@ -952,7 +956,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
952 } 956 }
953 957
954 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 958 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
955 interruptible, no_wait); 959 interruptible, no_wait_reserve, no_wait_gpu);
956 if (ret == 0 && mem->mm_node) { 960 if (ret == 0 && mem->mm_node) {
957 mem->placement = cur_flags; 961 mem->placement = cur_flags;
958 mem->mm_node->private = bo; 962 mem->mm_node->private = bo;
@@ -978,7 +982,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu);
978 982
979int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 983int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
980 struct ttm_placement *placement, 984 struct ttm_placement *placement,
981 bool interruptible, bool no_wait) 985 bool interruptible, bool no_wait_reserve,
986 bool no_wait_gpu)
982{ 987{
983 struct ttm_bo_global *glob = bo->glob; 988 struct ttm_bo_global *glob = bo->glob;
984 int ret = 0; 989 int ret = 0;
@@ -992,20 +997,21 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
992 * instead of doing it here. 997 * instead of doing it here.
993 */ 998 */
994 spin_lock(&bo->lock); 999 spin_lock(&bo->lock);
995 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 1000 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
996 spin_unlock(&bo->lock); 1001 spin_unlock(&bo->lock);
997 if (ret) 1002 if (ret)
998 return ret; 1003 return ret;
999 mem.num_pages = bo->num_pages; 1004 mem.num_pages = bo->num_pages;
1000 mem.size = mem.num_pages << PAGE_SHIFT; 1005 mem.size = mem.num_pages << PAGE_SHIFT;
1001 mem.page_alignment = bo->mem.page_alignment; 1006 mem.page_alignment = bo->mem.page_alignment;
1007 mem.bus.io_reserved = false;
1002 /* 1008 /*
1003 * Determine where to move the buffer. 1009 * Determine where to move the buffer.
1004 */ 1010 */
1005 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait); 1011 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
1006 if (ret) 1012 if (ret)
1007 goto out_unlock; 1013 goto out_unlock;
1008 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); 1014 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1009out_unlock: 1015out_unlock:
1010 if (ret && mem.mm_node) { 1016 if (ret && mem.mm_node) {
1011 spin_lock(&glob->lru_lock); 1017 spin_lock(&glob->lru_lock);
@@ -1039,7 +1045,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1039 1045
1040int ttm_bo_validate(struct ttm_buffer_object *bo, 1046int ttm_bo_validate(struct ttm_buffer_object *bo,
1041 struct ttm_placement *placement, 1047 struct ttm_placement *placement,
1042 bool interruptible, bool no_wait) 1048 bool interruptible, bool no_wait_reserve,
1049 bool no_wait_gpu)
1043{ 1050{
1044 int ret; 1051 int ret;
1045 1052
@@ -1054,7 +1061,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1054 */ 1061 */
1055 ret = ttm_bo_mem_compat(placement, &bo->mem); 1062 ret = ttm_bo_mem_compat(placement, &bo->mem);
1056 if (ret < 0) { 1063 if (ret < 0) {
1057 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait); 1064 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
1058 if (ret) 1065 if (ret)
1059 return ret; 1066 return ret;
1060 } else { 1067 } else {
@@ -1153,6 +1160,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1153 bo->mem.num_pages = bo->num_pages; 1160 bo->mem.num_pages = bo->num_pages;
1154 bo->mem.mm_node = NULL; 1161 bo->mem.mm_node = NULL;
1155 bo->mem.page_alignment = page_alignment; 1162 bo->mem.page_alignment = page_alignment;
1163 bo->mem.bus.io_reserved = false;
1156 bo->buffer_start = buffer_start & PAGE_MASK; 1164 bo->buffer_start = buffer_start & PAGE_MASK;
1157 bo->priv_flags = 0; 1165 bo->priv_flags = 0;
1158 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1166 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1175,7 +1183,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1175 goto out_err; 1183 goto out_err;
1176 } 1184 }
1177 1185
1178 ret = ttm_bo_validate(bo, placement, interruptible, false); 1186 ret = ttm_bo_validate(bo, placement, interruptible, false, false);
1179 if (ret) 1187 if (ret)
1180 goto out_err; 1188 goto out_err;
1181 1189
@@ -1249,7 +1257,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1249 spin_lock(&glob->lru_lock); 1257 spin_lock(&glob->lru_lock);
1250 while (!list_empty(&man->lru)) { 1258 while (!list_empty(&man->lru)) {
1251 spin_unlock(&glob->lru_lock); 1259 spin_unlock(&glob->lru_lock);
1252 ret = ttm_mem_evict_first(bdev, mem_type, false, false); 1260 ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
1253 if (ret) { 1261 if (ret) {
1254 if (allow_errors) { 1262 if (allow_errors) {
1255 return ret; 1263 return ret;
@@ -1553,26 +1561,6 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1553 return true; 1561 return true;
1554} 1562}
1555 1563
1556int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1557 struct ttm_mem_reg *mem,
1558 unsigned long *bus_base,
1559 unsigned long *bus_offset, unsigned long *bus_size)
1560{
1561 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1562
1563 *bus_size = 0;
1564 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1565 return -EINVAL;
1566
1567 if (ttm_mem_reg_is_pci(bdev, mem)) {
1568 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1569 *bus_size = mem->num_pages << PAGE_SHIFT;
1570 *bus_base = man->io_offset;
1571 }
1572
1573 return 0;
1574}
1575
1576void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1564void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1577{ 1565{
1578 struct ttm_bo_device *bdev = bo->bdev; 1566 struct ttm_bo_device *bdev = bo->bdev;
@@ -1581,8 +1569,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1581 1569
1582 if (!bdev->dev_mapping) 1570 if (!bdev->dev_mapping)
1583 return; 1571 return;
1584
1585 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); 1572 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1573 ttm_mem_io_free(bdev, &bo->mem);
1586} 1574}
1587EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1575EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1588 1576
@@ -1839,7 +1827,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1839 evict_mem.mem_type = TTM_PL_SYSTEM; 1827 evict_mem.mem_type = TTM_PL_SYSTEM;
1840 1828
1841 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1829 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1842 false, false); 1830 false, false, false);
1843 if (unlikely(ret != 0)) 1831 if (unlikely(ret != 0))
1844 goto out; 1832 goto out;
1845 } 1833 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d764e82e799b..13012a1f1486 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -50,7 +50,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
50} 50}
51 51
52int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 52int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
53 bool evict, bool no_wait, struct ttm_mem_reg *new_mem) 53 bool evict, bool no_wait_reserve,
54 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
54{ 55{
55 struct ttm_tt *ttm = bo->ttm; 56 struct ttm_tt *ttm = bo->ttm;
56 struct ttm_mem_reg *old_mem = &bo->mem; 57 struct ttm_mem_reg *old_mem = &bo->mem;
@@ -81,30 +82,51 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
81} 82}
82EXPORT_SYMBOL(ttm_bo_move_ttm); 83EXPORT_SYMBOL(ttm_bo_move_ttm);
83 84
85int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
86{
87 int ret;
88
89 if (!mem->bus.io_reserved) {
90 mem->bus.io_reserved = true;
91 ret = bdev->driver->io_mem_reserve(bdev, mem);
92 if (unlikely(ret != 0))
93 return ret;
94 }
95 return 0;
96}
97
98void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
99{
100 if (bdev->driver->io_mem_reserve) {
101 if (mem->bus.io_reserved) {
102 mem->bus.io_reserved = false;
103 bdev->driver->io_mem_free(bdev, mem);
104 }
105 }
106}
107
84int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 108int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
85 void **virtual) 109 void **virtual)
86{ 110{
87 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
88 unsigned long bus_offset;
89 unsigned long bus_size;
90 unsigned long bus_base;
91 int ret; 111 int ret;
92 void *addr; 112 void *addr;
93 113
94 *virtual = NULL; 114 *virtual = NULL;
95 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size); 115 ret = ttm_mem_io_reserve(bdev, mem);
96 if (ret || bus_size == 0) 116 if (ret || !mem->bus.is_iomem)
97 return ret; 117 return ret;
98 118
99 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) 119 if (mem->bus.addr) {
100 addr = (void *)(((u8 *) man->io_addr) + bus_offset); 120 addr = mem->bus.addr;
101 else { 121 } else {
102 if (mem->placement & TTM_PL_FLAG_WC) 122 if (mem->placement & TTM_PL_FLAG_WC)
103 addr = ioremap_wc(bus_base + bus_offset, bus_size); 123 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
104 else 124 else
105 addr = ioremap_nocache(bus_base + bus_offset, bus_size); 125 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
106 if (!addr) 126 if (!addr) {
127 ttm_mem_io_free(bdev, mem);
107 return -ENOMEM; 128 return -ENOMEM;
129 }
108 } 130 }
109 *virtual = addr; 131 *virtual = addr;
110 return 0; 132 return 0;
@@ -117,8 +139,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
117 139
118 man = &bdev->man[mem->mem_type]; 140 man = &bdev->man[mem->mem_type];
119 141
120 if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) 142 if (virtual && mem->bus.addr == NULL)
121 iounmap(virtual); 143 iounmap(virtual);
144 ttm_mem_io_free(bdev, mem);
122} 145}
123 146
124static int ttm_copy_io_page(void *dst, void *src, unsigned long page) 147static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -208,7 +231,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
208} 231}
209 232
210int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 233int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
211 bool evict, bool no_wait, struct ttm_mem_reg *new_mem) 234 bool evict, bool no_wait_reserve, bool no_wait_gpu,
235 struct ttm_mem_reg *new_mem)
212{ 236{
213 struct ttm_bo_device *bdev = bo->bdev; 237 struct ttm_bo_device *bdev = bo->bdev;
214 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 238 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
@@ -369,26 +393,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
369EXPORT_SYMBOL(ttm_io_prot); 393EXPORT_SYMBOL(ttm_io_prot);
370 394
371static int ttm_bo_ioremap(struct ttm_buffer_object *bo, 395static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
372 unsigned long bus_base, 396 unsigned long offset,
373 unsigned long bus_offset, 397 unsigned long size,
374 unsigned long bus_size,
375 struct ttm_bo_kmap_obj *map) 398 struct ttm_bo_kmap_obj *map)
376{ 399{
377 struct ttm_bo_device *bdev = bo->bdev;
378 struct ttm_mem_reg *mem = &bo->mem; 400 struct ttm_mem_reg *mem = &bo->mem;
379 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
380 401
381 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) { 402 if (bo->mem.bus.addr) {
382 map->bo_kmap_type = ttm_bo_map_premapped; 403 map->bo_kmap_type = ttm_bo_map_premapped;
383 map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); 404 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
384 } else { 405 } else {
385 map->bo_kmap_type = ttm_bo_map_iomap; 406 map->bo_kmap_type = ttm_bo_map_iomap;
386 if (mem->placement & TTM_PL_FLAG_WC) 407 if (mem->placement & TTM_PL_FLAG_WC)
387 map->virtual = ioremap_wc(bus_base + bus_offset, 408 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
388 bus_size); 409 size);
389 else 410 else
390 map->virtual = ioremap_nocache(bus_base + bus_offset, 411 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
391 bus_size); 412 size);
392 } 413 }
393 return (!map->virtual) ? -ENOMEM : 0; 414 return (!map->virtual) ? -ENOMEM : 0;
394} 415}
@@ -441,13 +462,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
441 unsigned long start_page, unsigned long num_pages, 462 unsigned long start_page, unsigned long num_pages,
442 struct ttm_bo_kmap_obj *map) 463 struct ttm_bo_kmap_obj *map)
443{ 464{
465 unsigned long offset, size;
444 int ret; 466 int ret;
445 unsigned long bus_base;
446 unsigned long bus_offset;
447 unsigned long bus_size;
448 467
449 BUG_ON(!list_empty(&bo->swap)); 468 BUG_ON(!list_empty(&bo->swap));
450 map->virtual = NULL; 469 map->virtual = NULL;
470 map->bo = bo;
451 if (num_pages > bo->num_pages) 471 if (num_pages > bo->num_pages)
452 return -EINVAL; 472 return -EINVAL;
453 if (start_page > bo->num_pages) 473 if (start_page > bo->num_pages)
@@ -456,16 +476,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
456 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) 476 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
457 return -EPERM; 477 return -EPERM;
458#endif 478#endif
459 ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base, 479 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
460 &bus_offset, &bus_size);
461 if (ret) 480 if (ret)
462 return ret; 481 return ret;
463 if (bus_size == 0) { 482 if (!bo->mem.bus.is_iomem) {
464 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); 483 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
465 } else { 484 } else {
466 bus_offset += start_page << PAGE_SHIFT; 485 offset = start_page << PAGE_SHIFT;
467 bus_size = num_pages << PAGE_SHIFT; 486 size = num_pages << PAGE_SHIFT;
468 return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); 487 return ttm_bo_ioremap(bo, offset, size, map);
469 } 488 }
470} 489}
471EXPORT_SYMBOL(ttm_bo_kmap); 490EXPORT_SYMBOL(ttm_bo_kmap);
@@ -477,6 +496,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
477 switch (map->bo_kmap_type) { 496 switch (map->bo_kmap_type) {
478 case ttm_bo_map_iomap: 497 case ttm_bo_map_iomap:
479 iounmap(map->virtual); 498 iounmap(map->virtual);
499 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
480 break; 500 break;
481 case ttm_bo_map_vmap: 501 case ttm_bo_map_vmap:
482 vunmap(map->virtual); 502 vunmap(map->virtual);
@@ -494,39 +514,11 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
494} 514}
495EXPORT_SYMBOL(ttm_bo_kunmap); 515EXPORT_SYMBOL(ttm_bo_kunmap);
496 516
497int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
498 unsigned long dst_offset,
499 unsigned long *pfn, pgprot_t *prot)
500{
501 struct ttm_mem_reg *mem = &bo->mem;
502 struct ttm_bo_device *bdev = bo->bdev;
503 unsigned long bus_offset;
504 unsigned long bus_size;
505 unsigned long bus_base;
506 int ret;
507 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
508 &bus_size);
509 if (ret)
510 return -EINVAL;
511 if (bus_size != 0)
512 *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
513 else
514 if (!bo->ttm)
515 return -EINVAL;
516 else
517 *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
518 dst_offset >>
519 PAGE_SHIFT));
520 *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
521 PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
522
523 return 0;
524}
525
526int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 517int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
527 void *sync_obj, 518 void *sync_obj,
528 void *sync_obj_arg, 519 void *sync_obj_arg,
529 bool evict, bool no_wait, 520 bool evict, bool no_wait_reserve,
521 bool no_wait_gpu,
530 struct ttm_mem_reg *new_mem) 522 struct ttm_mem_reg *new_mem)
531{ 523{
532 struct ttm_bo_device *bdev = bo->bdev; 524 struct ttm_bo_device *bdev = bo->bdev;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 668dbe8b8dd3..fe6cb77899f4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -74,9 +74,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
74 struct ttm_buffer_object *bo = (struct ttm_buffer_object *) 74 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
75 vma->vm_private_data; 75 vma->vm_private_data;
76 struct ttm_bo_device *bdev = bo->bdev; 76 struct ttm_bo_device *bdev = bo->bdev;
77 unsigned long bus_base;
78 unsigned long bus_offset;
79 unsigned long bus_size;
80 unsigned long page_offset; 77 unsigned long page_offset;
81 unsigned long page_last; 78 unsigned long page_last;
82 unsigned long pfn; 79 unsigned long pfn;
@@ -84,7 +81,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
84 struct page *page; 81 struct page *page;
85 int ret; 82 int ret;
86 int i; 83 int i;
87 bool is_iomem;
88 unsigned long address = (unsigned long)vmf->virtual_address; 84 unsigned long address = (unsigned long)vmf->virtual_address;
89 int retval = VM_FAULT_NOPAGE; 85 int retval = VM_FAULT_NOPAGE;
90 86
@@ -101,8 +97,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
101 return VM_FAULT_NOPAGE; 97 return VM_FAULT_NOPAGE;
102 } 98 }
103 99
104 if (bdev->driver->fault_reserve_notify) 100 if (bdev->driver->fault_reserve_notify) {
105 bdev->driver->fault_reserve_notify(bo); 101 ret = bdev->driver->fault_reserve_notify(bo);
102 switch (ret) {
103 case 0:
104 break;
105 case -EBUSY:
106 set_need_resched();
107 case -ERESTARTSYS:
108 retval = VM_FAULT_NOPAGE;
109 goto out_unlock;
110 default:
111 retval = VM_FAULT_SIGBUS;
112 goto out_unlock;
113 }
114 }
106 115
107 /* 116 /*
108 * Wait for buffer data in transit, due to a pipelined 117 * Wait for buffer data in transit, due to a pipelined
@@ -122,15 +131,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
122 spin_unlock(&bo->lock); 131 spin_unlock(&bo->lock);
123 132
124 133
125 ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset, 134 ret = ttm_mem_io_reserve(bdev, &bo->mem);
126 &bus_size); 135 if (ret) {
127 if (unlikely(ret != 0)) {
128 retval = VM_FAULT_SIGBUS; 136 retval = VM_FAULT_SIGBUS;
129 goto out_unlock; 137 goto out_unlock;
130 } 138 }
131 139
132 is_iomem = (bus_size != 0);
133
134 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 140 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
135 bo->vm_node->start - vma->vm_pgoff; 141 bo->vm_node->start - vma->vm_pgoff;
136 page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + 142 page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
@@ -154,8 +160,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
154 * vma->vm_page_prot when the object changes caching policy, with 160 * vma->vm_page_prot when the object changes caching policy, with
155 * the correct locks held. 161 * the correct locks held.
156 */ 162 */
157 163 if (bo->mem.bus.is_iomem) {
158 if (is_iomem) {
159 vma->vm_page_prot = ttm_io_prot(bo->mem.placement, 164 vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
160 vma->vm_page_prot); 165 vma->vm_page_prot);
161 } else { 166 } else {
@@ -171,10 +176,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
171 */ 176 */
172 177
173 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { 178 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
174 179 if (bo->mem.bus.is_iomem)
175 if (is_iomem) 180 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
176 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
177 page_offset;
178 else { 181 else {
179 page = ttm_tt_get_page(ttm, page_offset); 182 page = ttm_tt_get_page(ttm, page_offset);
180 if (unlikely(!page && i == 0)) { 183 if (unlikely(!page && i == 0)) {
@@ -198,7 +201,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
198 retval = 201 retval =
199 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; 202 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
200 goto out_unlock; 203 goto out_unlock;
201
202 } 204 }
203 205
204 address += PAGE_SIZE; 206 address += PAGE_SIZE;
@@ -221,8 +223,7 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
221 223
222static void ttm_bo_vm_close(struct vm_area_struct *vma) 224static void ttm_bo_vm_close(struct vm_area_struct *vma)
223{ 225{
224 struct ttm_buffer_object *bo = 226 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
225 (struct ttm_buffer_object *)vma->vm_private_data;
226 227
227 ttm_bo_unref(&bo); 228 ttm_bo_unref(&bo);
228 vma->vm_private_data = NULL; 229 vma->vm_private_data = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 801b702566e6..e70ddd82dc02 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -27,6 +27,7 @@
27 27
28#include "ttm/ttm_memory.h" 28#include "ttm/ttm_memory.h"
29#include "ttm/ttm_module.h" 29#include "ttm/ttm_module.h"
30#include "ttm/ttm_page_alloc.h"
30#include <linux/spinlock.h> 31#include <linux/spinlock.h>
31#include <linux/sched.h> 32#include <linux/sched.h>
32#include <linux/wait.h> 33#include <linux/wait.h>
@@ -393,6 +394,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
393 "Zone %7s: Available graphics memory: %llu kiB.\n", 394 "Zone %7s: Available graphics memory: %llu kiB.\n",
394 zone->name, (unsigned long long) zone->max_mem >> 10); 395 zone->name, (unsigned long long) zone->max_mem >> 10);
395 } 396 }
397 ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
396 return 0; 398 return 0;
397out_no_zone: 399out_no_zone:
398 ttm_mem_global_release(glob); 400 ttm_mem_global_release(glob);
@@ -405,6 +407,9 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
405 unsigned int i; 407 unsigned int i;
406 struct ttm_mem_zone *zone; 408 struct ttm_mem_zone *zone;
407 409
410 /* let the page allocator first stop the shrink work. */
411 ttm_page_alloc_fini();
412
408 flush_workqueue(glob->swap_queue); 413 flush_workqueue(glob->swap_queue);
409 destroy_workqueue(glob->swap_queue); 414 destroy_workqueue(glob->swap_queue);
410 glob->swap_queue = NULL; 415 glob->swap_queue = NULL;
@@ -412,7 +417,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
412 zone = glob->zones[i]; 417 zone = glob->zones[i];
413 kobject_del(&zone->kobj); 418 kobject_del(&zone->kobj);
414 kobject_put(&zone->kobj); 419 kobject_put(&zone->kobj);
415 } 420 }
416 kobject_del(&glob->kobj); 421 kobject_del(&glob->kobj);
417 kobject_put(&glob->kobj); 422 kobject_put(&glob->kobj);
418} 423}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
new file mode 100644
index 000000000000..0d9a42c2394f
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -0,0 +1,845 @@
1/*
2 * Copyright (c) Red Hat Inc.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie <airlied@redhat.com>
24 * Jerome Glisse <jglisse@redhat.com>
25 * Pauli Nieminen <suokkos@gmail.com>
26 */
27
28/* simple list based uncached page pool
29 * - Pool collects resently freed pages for reuse
30 * - Use page->lru to keep a free list
31 * - doesn't track currently in use pages
32 */
33#include <linux/list.h>
34#include <linux/spinlock.h>
35#include <linux/highmem.h>
36#include <linux/mm_types.h>
37#include <linux/module.h>
38#include <linux/mm.h>
39#include <linux/seq_file.h> /* for seq_printf */
40#include <linux/slab.h>
41
42#include <asm/atomic.h>
43#include <asm/agp.h>
44
45#include "ttm/ttm_bo_driver.h"
46#include "ttm/ttm_page_alloc.h"
47
48
49#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
50#define SMALL_ALLOCATION 16
51#define FREE_ALL_PAGES (~0U)
52/* times are in msecs */
53#define PAGE_FREE_INTERVAL 1000
54
55/**
56 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
57 *
58 * @lock: Protects the shared pool from concurrnet access. Must be used with
59 * irqsave/irqrestore variants because pool allocator maybe called from
60 * delayed work.
61 * @fill_lock: Prevent concurrent calls to fill.
62 * @list: Pool of free uc/wc pages for fast reuse.
63 * @gfp_flags: Flags to pass for alloc_page.
64 * @npages: Number of pages in pool.
65 */
66struct ttm_page_pool {
67 spinlock_t lock;
68 bool fill_lock;
69 struct list_head list;
70 int gfp_flags;
71 unsigned npages;
72 char *name;
73 unsigned long nfrees;
74 unsigned long nrefills;
75};
76
77/**
78 * Limits for the pool. They are handled without locks because only place where
79 * they may change is in sysfs store. They won't have immediate effect anyway
80 * so forcing serialiazation to access them is pointless.
81 */
82
83struct ttm_pool_opts {
84 unsigned alloc_size;
85 unsigned max_size;
86 unsigned small;
87};
88
89#define NUM_POOLS 4
90
91/**
92 * struct ttm_pool_manager - Holds memory pools for fst allocation
93 *
94 * Manager is read only object for pool code so it doesn't need locking.
95 *
96 * @free_interval: minimum number of jiffies between freeing pages from pool.
97 * @page_alloc_inited: reference counting for pool allocation.
98 * @work: Work that is used to shrink the pool. Work is only run when there is
99 * some pages to free.
100 * @small_allocation: Limit in number of pages what is small allocation.
101 *
102 * @pools: All pool objects in use.
103 **/
104struct ttm_pool_manager {
105 struct kobject kobj;
106 struct shrinker mm_shrink;
107 atomic_t page_alloc_inited;
108 struct ttm_pool_opts options;
109
110 union {
111 struct ttm_page_pool pools[NUM_POOLS];
112 struct {
113 struct ttm_page_pool wc_pool;
114 struct ttm_page_pool uc_pool;
115 struct ttm_page_pool wc_pool_dma32;
116 struct ttm_page_pool uc_pool_dma32;
117 } ;
118 };
119};
120
121static struct attribute ttm_page_pool_max = {
122 .name = "pool_max_size",
123 .mode = S_IRUGO | S_IWUSR
124};
125static struct attribute ttm_page_pool_small = {
126 .name = "pool_small_allocation",
127 .mode = S_IRUGO | S_IWUSR
128};
129static struct attribute ttm_page_pool_alloc_size = {
130 .name = "pool_allocation_size",
131 .mode = S_IRUGO | S_IWUSR
132};
133
134static struct attribute *ttm_pool_attrs[] = {
135 &ttm_page_pool_max,
136 &ttm_page_pool_small,
137 &ttm_page_pool_alloc_size,
138 NULL
139};
140
141static void ttm_pool_kobj_release(struct kobject *kobj)
142{
143 struct ttm_pool_manager *m =
144 container_of(kobj, struct ttm_pool_manager, kobj);
145 (void)m;
146}
147
148static ssize_t ttm_pool_store(struct kobject *kobj,
149 struct attribute *attr, const char *buffer, size_t size)
150{
151 struct ttm_pool_manager *m =
152 container_of(kobj, struct ttm_pool_manager, kobj);
153 int chars;
154 unsigned val;
155 chars = sscanf(buffer, "%u", &val);
156 if (chars == 0)
157 return size;
158
159 /* Convert kb to number of pages */
160 val = val / (PAGE_SIZE >> 10);
161
162 if (attr == &ttm_page_pool_max)
163 m->options.max_size = val;
164 else if (attr == &ttm_page_pool_small)
165 m->options.small = val;
166 else if (attr == &ttm_page_pool_alloc_size) {
167 if (val > NUM_PAGES_TO_ALLOC*8) {
168 printk(KERN_ERR "[ttm] Setting allocation size to %lu "
169 "is not allowed. Recomended size is "
170 "%lu\n",
171 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
172 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
173 return size;
174 } else if (val > NUM_PAGES_TO_ALLOC) {
175 printk(KERN_WARNING "[ttm] Setting allocation size to "
176 "larger than %lu is not recomended.\n",
177 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
178 }
179 m->options.alloc_size = val;
180 }
181
182 return size;
183}
184
185static ssize_t ttm_pool_show(struct kobject *kobj,
186 struct attribute *attr, char *buffer)
187{
188 struct ttm_pool_manager *m =
189 container_of(kobj, struct ttm_pool_manager, kobj);
190 unsigned val = 0;
191
192 if (attr == &ttm_page_pool_max)
193 val = m->options.max_size;
194 else if (attr == &ttm_page_pool_small)
195 val = m->options.small;
196 else if (attr == &ttm_page_pool_alloc_size)
197 val = m->options.alloc_size;
198
199 val = val * (PAGE_SIZE >> 10);
200
201 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
202}
203
204static const struct sysfs_ops ttm_pool_sysfs_ops = {
205 .show = &ttm_pool_show,
206 .store = &ttm_pool_store,
207};
208
209static struct kobj_type ttm_pool_kobj_type = {
210 .release = &ttm_pool_kobj_release,
211 .sysfs_ops = &ttm_pool_sysfs_ops,
212 .default_attrs = ttm_pool_attrs,
213};
214
215static struct ttm_pool_manager _manager = {
216 .page_alloc_inited = ATOMIC_INIT(0)
217};
218
219#ifndef CONFIG_X86
220static int set_pages_array_wb(struct page **pages, int addrinarray)
221{
222#ifdef TTM_HAS_AGP
223 int i;
224
225 for (i = 0; i < addrinarray; i++)
226 unmap_page_from_agp(pages[i]);
227#endif
228 return 0;
229}
230
231static int set_pages_array_wc(struct page **pages, int addrinarray)
232{
233#ifdef TTM_HAS_AGP
234 int i;
235
236 for (i = 0; i < addrinarray; i++)
237 map_page_into_agp(pages[i]);
238#endif
239 return 0;
240}
241
242static int set_pages_array_uc(struct page **pages, int addrinarray)
243{
244#ifdef TTM_HAS_AGP
245 int i;
246
247 for (i = 0; i < addrinarray; i++)
248 map_page_into_agp(pages[i]);
249#endif
250 return 0;
251}
252#endif
253
254/**
255 * Select the right pool or requested caching state and ttm flags. */
256static struct ttm_page_pool *ttm_get_pool(int flags,
257 enum ttm_caching_state cstate)
258{
259 int pool_index;
260
261 if (cstate == tt_cached)
262 return NULL;
263
264 if (cstate == tt_wc)
265 pool_index = 0x0;
266 else
267 pool_index = 0x1;
268
269 if (flags & TTM_PAGE_FLAG_DMA32)
270 pool_index |= 0x2;
271
272 return &_manager.pools[pool_index];
273}
274
275/* set memory back to wb and free the pages. */
276static void ttm_pages_put(struct page *pages[], unsigned npages)
277{
278 unsigned i;
279 if (set_pages_array_wb(pages, npages))
280 printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
281 npages);
282 for (i = 0; i < npages; ++i)
283 __free_page(pages[i]);
284}
285
286static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
287 unsigned freed_pages)
288{
289 pool->npages -= freed_pages;
290 pool->nfrees += freed_pages;
291}
292
293/**
294 * Free pages from pool.
295 *
296 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
297 * number of pages in one go.
298 *
299 * @pool: to free the pages from
300 * @free_all: If set to true will free all pages in pool
301 **/
302static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
303{
304 unsigned long irq_flags;
305 struct page *p;
306 struct page **pages_to_free;
307 unsigned freed_pages = 0,
308 npages_to_free = nr_free;
309
310 if (NUM_PAGES_TO_ALLOC < nr_free)
311 npages_to_free = NUM_PAGES_TO_ALLOC;
312
313 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
314 GFP_KERNEL);
315 if (!pages_to_free) {
316 printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
317 return 0;
318 }
319
320restart:
321 spin_lock_irqsave(&pool->lock, irq_flags);
322
323 list_for_each_entry_reverse(p, &pool->list, lru) {
324 if (freed_pages >= npages_to_free)
325 break;
326
327 pages_to_free[freed_pages++] = p;
328 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
329 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
330 /* remove range of pages from the pool */
331 __list_del(p->lru.prev, &pool->list);
332
333 ttm_pool_update_free_locked(pool, freed_pages);
334 /**
335 * Because changing page caching is costly
336 * we unlock the pool to prevent stalling.
337 */
338 spin_unlock_irqrestore(&pool->lock, irq_flags);
339
340 ttm_pages_put(pages_to_free, freed_pages);
341 if (likely(nr_free != FREE_ALL_PAGES))
342 nr_free -= freed_pages;
343
344 if (NUM_PAGES_TO_ALLOC >= nr_free)
345 npages_to_free = nr_free;
346 else
347 npages_to_free = NUM_PAGES_TO_ALLOC;
348
349 freed_pages = 0;
350
351 /* free all so restart the processing */
352 if (nr_free)
353 goto restart;
354
355 /* Not allowed to fall tough or break because
356 * following context is inside spinlock while we are
357 * outside here.
358 */
359 goto out;
360
361 }
362 }
363
364 /* remove range of pages from the pool */
365 if (freed_pages) {
366 __list_del(&p->lru, &pool->list);
367
368 ttm_pool_update_free_locked(pool, freed_pages);
369 nr_free -= freed_pages;
370 }
371
372 spin_unlock_irqrestore(&pool->lock, irq_flags);
373
374 if (freed_pages)
375 ttm_pages_put(pages_to_free, freed_pages);
376out:
377 kfree(pages_to_free);
378 return nr_free;
379}
380
381/* Get good estimation how many pages are free in pools */
382static int ttm_pool_get_num_unused_pages(void)
383{
384 unsigned i;
385 int total = 0;
386 for (i = 0; i < NUM_POOLS; ++i)
387 total += _manager.pools[i].npages;
388
389 return total;
390}
391
392/**
393 * Calback for mm to request pool to reduce number of page held.
394 */
395static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
396{
397 static atomic_t start_pool = ATOMIC_INIT(0);
398 unsigned i;
399 unsigned pool_offset = atomic_add_return(1, &start_pool);
400 struct ttm_page_pool *pool;
401
402 pool_offset = pool_offset % NUM_POOLS;
403 /* select start pool in round robin fashion */
404 for (i = 0; i < NUM_POOLS; ++i) {
405 unsigned nr_free = shrink_pages;
406 if (shrink_pages == 0)
407 break;
408 pool = &_manager.pools[(i + pool_offset)%NUM_POOLS];
409 shrink_pages = ttm_page_pool_free(pool, nr_free);
410 }
411 /* return estimated number of unused pages in pool */
412 return ttm_pool_get_num_unused_pages();
413}
414
415static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
416{
417 manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
418 manager->mm_shrink.seeks = 1;
419 register_shrinker(&manager->mm_shrink);
420}
421
422static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
423{
424 unregister_shrinker(&manager->mm_shrink);
425}
426
427static int ttm_set_pages_caching(struct page **pages,
428 enum ttm_caching_state cstate, unsigned cpages)
429{
430 int r = 0;
431 /* Set page caching */
432 switch (cstate) {
433 case tt_uncached:
434 r = set_pages_array_uc(pages, cpages);
435 if (r)
436 printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
437 cpages);
438 break;
439 case tt_wc:
440 r = set_pages_array_wc(pages, cpages);
441 if (r)
442 printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
443 cpages);
444 break;
445 default:
446 break;
447 }
448 return r;
449}
450
451/**
452 * Free pages the pages that failed to change the caching state. If there is
453 * any pages that have changed their caching state already put them to the
454 * pool.
455 */
456static void ttm_handle_caching_state_failure(struct list_head *pages,
457 int ttm_flags, enum ttm_caching_state cstate,
458 struct page **failed_pages, unsigned cpages)
459{
460 unsigned i;
461 /* Failed pages has to be reed */
462 for (i = 0; i < cpages; ++i) {
463 list_del(&failed_pages[i]->lru);
464 __free_page(failed_pages[i]);
465 }
466}
467
468/**
469 * Allocate new pages with correct caching.
470 *
471 * This function is reentrant if caller updates count depending on number of
472 * pages returned in pages array.
473 */
474static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
475 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
476{
477 struct page **caching_array;
478 struct page *p;
479 int r = 0;
480 unsigned i, cpages;
481 unsigned max_cpages = min(count,
482 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
483
484 /* allocate array for page caching change */
485 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
486
487 if (!caching_array) {
488 printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
489 return -ENOMEM;
490 }
491
492 for (i = 0, cpages = 0; i < count; ++i) {
493 p = alloc_page(gfp_flags);
494
495 if (!p) {
496 printk(KERN_ERR "[ttm] unable to get page %u\n", i);
497
498 /* store already allocated pages in the pool after
499 * setting the caching state */
500 if (cpages) {
501 r = ttm_set_pages_caching(caching_array, cstate, cpages);
502 if (r)
503 ttm_handle_caching_state_failure(pages,
504 ttm_flags, cstate,
505 caching_array, cpages);
506 }
507 r = -ENOMEM;
508 goto out;
509 }
510
511#ifdef CONFIG_HIGHMEM
512 /* gfp flags of highmem page should never be dma32 so we
513 * we should be fine in such case
514 */
515 if (!PageHighMem(p))
516#endif
517 {
518 caching_array[cpages++] = p;
519 if (cpages == max_cpages) {
520
521 r = ttm_set_pages_caching(caching_array,
522 cstate, cpages);
523 if (r) {
524 ttm_handle_caching_state_failure(pages,
525 ttm_flags, cstate,
526 caching_array, cpages);
527 goto out;
528 }
529 cpages = 0;
530 }
531 }
532
533 list_add(&p->lru, pages);
534 }
535
536 if (cpages) {
537 r = ttm_set_pages_caching(caching_array, cstate, cpages);
538 if (r)
539 ttm_handle_caching_state_failure(pages,
540 ttm_flags, cstate,
541 caching_array, cpages);
542 }
543out:
544 kfree(caching_array);
545
546 return r;
547}
548
549/**
550 * Fill the given pool if there isn't enough pages and requested number of
551 * pages is small.
552 */
553static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
554 int ttm_flags, enum ttm_caching_state cstate, unsigned count,
555 unsigned long *irq_flags)
556{
557 struct page *p;
558 int r;
559 unsigned cpages = 0;
560 /**
561 * Only allow one pool fill operation at a time.
562 * If pool doesn't have enough pages for the allocation new pages are
563 * allocated from outside of pool.
564 */
565 if (pool->fill_lock)
566 return;
567
568 pool->fill_lock = true;
569
570 /* If allocation request is small and there is not enough
571 * pages in pool we fill the pool first */
572 if (count < _manager.options.small
573 && count > pool->npages) {
574 struct list_head new_pages;
575 unsigned alloc_size = _manager.options.alloc_size;
576
577 /**
578 * Can't change page caching if in irqsave context. We have to
579 * drop the pool->lock.
580 */
581 spin_unlock_irqrestore(&pool->lock, *irq_flags);
582
583 INIT_LIST_HEAD(&new_pages);
584 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
585 cstate, alloc_size);
586 spin_lock_irqsave(&pool->lock, *irq_flags);
587
588 if (!r) {
589 list_splice(&new_pages, &pool->list);
590 ++pool->nrefills;
591 pool->npages += alloc_size;
592 } else {
593 printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
594 /* If we have any pages left put them to the pool. */
595 list_for_each_entry(p, &pool->list, lru) {
596 ++cpages;
597 }
598 list_splice(&new_pages, &pool->list);
599 pool->npages += cpages;
600 }
601
602 }
603 pool->fill_lock = false;
604}
605
606/**
607 * Cut count nubmer of pages from the pool and put them to return list
608 *
609 * @return count of pages still to allocate to fill the request.
610 */
611static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
612 struct list_head *pages, int ttm_flags,
613 enum ttm_caching_state cstate, unsigned count)
614{
615 unsigned long irq_flags;
616 struct list_head *p;
617 unsigned i;
618
619 spin_lock_irqsave(&pool->lock, irq_flags);
620 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
621
622 if (count >= pool->npages) {
623 /* take all pages from the pool */
624 list_splice_init(&pool->list, pages);
625 count -= pool->npages;
626 pool->npages = 0;
627 goto out;
628 }
629 /* find the last pages to include for requested number of pages. Split
630 * pool to begin and halves to reduce search space. */
631 if (count <= pool->npages/2) {
632 i = 0;
633 list_for_each(p, &pool->list) {
634 if (++i == count)
635 break;
636 }
637 } else {
638 i = pool->npages + 1;
639 list_for_each_prev(p, &pool->list) {
640 if (--i == count)
641 break;
642 }
643 }
644 /* Cut count number of pages from pool */
645 list_cut_position(pages, &pool->list, p);
646 pool->npages -= count;
647 count = 0;
648out:
649 spin_unlock_irqrestore(&pool->lock, irq_flags);
650 return count;
651}
652
653/*
654 * On success pages list will hold count number of correctly
655 * cached pages.
656 */
657int ttm_get_pages(struct list_head *pages, int flags,
658 enum ttm_caching_state cstate, unsigned count)
659{
660 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
661 struct page *p = NULL;
662 int gfp_flags = 0;
663 int r;
664
665 /* set zero flag for page allocation if required */
666 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
667 gfp_flags |= __GFP_ZERO;
668
669 /* No pool for cached pages */
670 if (pool == NULL) {
671 if (flags & TTM_PAGE_FLAG_DMA32)
672 gfp_flags |= GFP_DMA32;
673 else
674 gfp_flags |= __GFP_HIGHMEM;
675
676 for (r = 0; r < count; ++r) {
677 p = alloc_page(gfp_flags);
678 if (!p) {
679
680 printk(KERN_ERR "[ttm] unable to allocate page.");
681 return -ENOMEM;
682 }
683
684 list_add(&p->lru, pages);
685 }
686 return 0;
687 }
688
689
690 /* combine zero flag to pool flags */
691 gfp_flags |= pool->gfp_flags;
692
693 /* First we take pages from the pool */
694 count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
695
696 /* clear the pages coming from the pool if requested */
697 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
698 list_for_each_entry(p, pages, lru) {
699 clear_page(page_address(p));
700 }
701 }
702
703 /* If pool didn't have enough pages allocate new one. */
704 if (count > 0) {
705 /* ttm_alloc_new_pages doesn't reference pool so we can run
706 * multiple requests in parallel.
707 **/
708 r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
709 if (r) {
710 /* If there is any pages in the list put them back to
711 * the pool. */
712 printk(KERN_ERR "[ttm] Failed to allocate extra pages "
713 "for large request.");
714 ttm_put_pages(pages, 0, flags, cstate);
715 return r;
716 }
717 }
718
719
720 return 0;
721}
722
723/* Put all pages in pages list to correct pool to wait for reuse */
724void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
725 enum ttm_caching_state cstate)
726{
727 unsigned long irq_flags;
728 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
729 struct page *p, *tmp;
730
731 if (pool == NULL) {
732 /* No pool for this memory type so free the pages */
733
734 list_for_each_entry_safe(p, tmp, pages, lru) {
735 __free_page(p);
736 }
737 /* Make the pages list empty */
738 INIT_LIST_HEAD(pages);
739 return;
740 }
741 if (page_count == 0) {
742 list_for_each_entry_safe(p, tmp, pages, lru) {
743 ++page_count;
744 }
745 }
746
747 spin_lock_irqsave(&pool->lock, irq_flags);
748 list_splice_init(pages, &pool->list);
749 pool->npages += page_count;
750 /* Check that we don't go over the pool limit */
751 page_count = 0;
752 if (pool->npages > _manager.options.max_size) {
753 page_count = pool->npages - _manager.options.max_size;
754 /* free at least NUM_PAGES_TO_ALLOC number of pages
755 * to reduce calls to set_memory_wb */
756 if (page_count < NUM_PAGES_TO_ALLOC)
757 page_count = NUM_PAGES_TO_ALLOC;
758 }
759 spin_unlock_irqrestore(&pool->lock, irq_flags);
760 if (page_count)
761 ttm_page_pool_free(pool, page_count);
762}
763
764static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
765 char *name)
766{
767 spin_lock_init(&pool->lock);
768 pool->fill_lock = false;
769 INIT_LIST_HEAD(&pool->list);
770 pool->npages = pool->nfrees = 0;
771 pool->gfp_flags = flags;
772 pool->name = name;
773}
774
775int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
776{
777 int ret;
778 if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
779 return 0;
780
781 printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
782
783 ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
784
785 ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc");
786
787 ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32,
788 "wc dma");
789
790 ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32,
791 "uc dma");
792
793 _manager.options.max_size = max_pages;
794 _manager.options.small = SMALL_ALLOCATION;
795 _manager.options.alloc_size = NUM_PAGES_TO_ALLOC;
796
797 kobject_init(&_manager.kobj, &ttm_pool_kobj_type);
798 ret = kobject_add(&_manager.kobj, &glob->kobj, "pool");
799 if (unlikely(ret != 0)) {
800 kobject_put(&_manager.kobj);
801 return ret;
802 }
803
804 ttm_pool_mm_shrink_init(&_manager);
805
806 return 0;
807}
808
809void ttm_page_alloc_fini()
810{
811 int i;
812
813 if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
814 return;
815
816 printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
817 ttm_pool_mm_shrink_fini(&_manager);
818
819 for (i = 0; i < NUM_POOLS; ++i)
820 ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES);
821
822 kobject_put(&_manager.kobj);
823}
824
825int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
826{
827 struct ttm_page_pool *p;
828 unsigned i;
829 char *h[] = {"pool", "refills", "pages freed", "size"};
830 if (atomic_read(&_manager.page_alloc_inited) == 0) {
831 seq_printf(m, "No pool allocator running.\n");
832 return 0;
833 }
834 seq_printf(m, "%6s %12s %13s %8s\n",
835 h[0], h[1], h[2], h[3]);
836 for (i = 0; i < NUM_POOLS; ++i) {
837 p = &_manager.pools[i];
838
839 seq_printf(m, "%6s %12ld %13ld %8d\n",
840 p->name, p->nrefills,
841 p->nfrees, p->npages);
842 }
843 return 0;
844}
845EXPORT_SYMBOL(ttm_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index d5fd5b8faeb3..a7bab87a548b 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -39,6 +39,7 @@
39#include "ttm/ttm_module.h" 39#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h" 40#include "ttm/ttm_bo_driver.h"
41#include "ttm/ttm_placement.h" 41#include "ttm/ttm_placement.h"
42#include "ttm/ttm_page_alloc.h"
42 43
43static int ttm_tt_swapin(struct ttm_tt *ttm); 44static int ttm_tt_swapin(struct ttm_tt *ttm);
44 45
@@ -56,21 +57,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
56 ttm->pages = NULL; 57 ttm->pages = NULL;
57} 58}
58 59
59static struct page *ttm_tt_alloc_page(unsigned page_flags)
60{
61 gfp_t gfp_flags = GFP_USER;
62
63 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
64 gfp_flags |= __GFP_ZERO;
65
66 if (page_flags & TTM_PAGE_FLAG_DMA32)
67 gfp_flags |= __GFP_DMA32;
68 else
69 gfp_flags |= __GFP_HIGHMEM;
70
71 return alloc_page(gfp_flags);
72}
73
74static void ttm_tt_free_user_pages(struct ttm_tt *ttm) 60static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
75{ 61{
76 int write; 62 int write;
@@ -111,15 +97,21 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
111static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) 97static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
112{ 98{
113 struct page *p; 99 struct page *p;
100 struct list_head h;
114 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 101 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
115 int ret; 102 int ret;
116 103
117 while (NULL == (p = ttm->pages[index])) { 104 while (NULL == (p = ttm->pages[index])) {
118 p = ttm_tt_alloc_page(ttm->page_flags);
119 105
120 if (!p) 106 INIT_LIST_HEAD(&h);
107
108 ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
109
110 if (ret != 0)
121 return NULL; 111 return NULL;
122 112
113 p = list_first_entry(&h, struct page, lru);
114
123 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); 115 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
124 if (unlikely(ret != 0)) 116 if (unlikely(ret != 0))
125 goto out_err; 117 goto out_err;
@@ -228,10 +220,10 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
228 if (ttm->caching_state == c_state) 220 if (ttm->caching_state == c_state)
229 return 0; 221 return 0;
230 222
231 if (c_state != tt_cached) { 223 if (ttm->state == tt_unpopulated) {
232 ret = ttm_tt_populate(ttm); 224 /* Change caching but don't populate */
233 if (unlikely(ret != 0)) 225 ttm->caching_state = c_state;
234 return ret; 226 return 0;
235 } 227 }
236 228
237 if (ttm->caching_state == tt_cached) 229 if (ttm->caching_state == tt_cached)
@@ -282,13 +274,17 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
282static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) 274static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
283{ 275{
284 int i; 276 int i;
277 unsigned count = 0;
278 struct list_head h;
285 struct page *cur_page; 279 struct page *cur_page;
286 struct ttm_backend *be = ttm->be; 280 struct ttm_backend *be = ttm->be;
287 281
282 INIT_LIST_HEAD(&h);
283
288 if (be) 284 if (be)
289 be->func->clear(be); 285 be->func->clear(be);
290 (void)ttm_tt_set_caching(ttm, tt_cached);
291 for (i = 0; i < ttm->num_pages; ++i) { 286 for (i = 0; i < ttm->num_pages; ++i) {
287
292 cur_page = ttm->pages[i]; 288 cur_page = ttm->pages[i];
293 ttm->pages[i] = NULL; 289 ttm->pages[i] = NULL;
294 if (cur_page) { 290 if (cur_page) {
@@ -298,9 +294,11 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
298 "Leaking pages.\n"); 294 "Leaking pages.\n");
299 ttm_mem_global_free_page(ttm->glob->mem_glob, 295 ttm_mem_global_free_page(ttm->glob->mem_glob,
300 cur_page); 296 cur_page);
301 __free_page(cur_page); 297 list_add(&cur_page->lru, &h);
298 count++;
302 } 299 }
303 } 300 }
301 ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
304 ttm->state = tt_unpopulated; 302 ttm->state = tt_unpopulated;
305 ttm->first_himem_page = ttm->num_pages; 303 ttm->first_himem_page = ttm->num_pages;
306 ttm->last_lomem_page = -1; 304 ttm->last_lomem_page = -1;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 825ebe3d89d5..c4f5114aee7c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -137,9 +137,6 @@ int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
137int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 137int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
138 struct ttm_mem_type_manager *man) 138 struct ttm_mem_type_manager *man)
139{ 139{
140 struct vmw_private *dev_priv =
141 container_of(bdev, struct vmw_private, bdev);
142
143 switch (type) { 140 switch (type) {
144 case TTM_PL_SYSTEM: 141 case TTM_PL_SYSTEM:
145 /* System memory */ 142 /* System memory */
@@ -151,11 +148,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
151 case TTM_PL_VRAM: 148 case TTM_PL_VRAM:
152 /* "On-card" video ram */ 149 /* "On-card" video ram */
153 man->gpu_offset = 0; 150 man->gpu_offset = 0;
154 man->io_offset = dev_priv->vram_start; 151 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
155 man->io_size = dev_priv->vram_size;
156 man->flags = TTM_MEMTYPE_FLAG_FIXED |
157 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
158 man->io_addr = NULL;
159 man->available_caching = TTM_PL_MASK_CACHING; 152 man->available_caching = TTM_PL_MASK_CACHING;
160 man->default_caching = TTM_PL_FLAG_WC; 153 man->default_caching = TTM_PL_FLAG_WC;
161 break; 154 break;
@@ -193,6 +186,42 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo)
193 vmw_dmabuf_gmr_unbind(bo); 186 vmw_dmabuf_gmr_unbind(bo);
194} 187}
195 188
189static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
190{
191 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
192 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
193
194 mem->bus.addr = NULL;
195 mem->bus.is_iomem = false;
196 mem->bus.offset = 0;
197 mem->bus.size = mem->num_pages << PAGE_SHIFT;
198 mem->bus.base = 0;
199 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
200 return -EINVAL;
201 switch (mem->mem_type) {
202 case TTM_PL_SYSTEM:
203 /* System memory */
204 return 0;
205 case TTM_PL_VRAM:
206 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
207 mem->bus.base = dev_priv->vram_start;
208 mem->bus.is_iomem = true;
209 break;
210 default:
211 return -EINVAL;
212 }
213 return 0;
214}
215
216static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
217{
218}
219
220static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
221{
222 return 0;
223}
224
196/** 225/**
197 * FIXME: We're using the old vmware polling method to sync. 226 * FIXME: We're using the old vmware polling method to sync.
198 * Do this with fences instead. 227 * Do this with fences instead.
@@ -248,5 +277,8 @@ struct ttm_bo_driver vmw_bo_driver = {
248 .sync_obj_unref = vmw_sync_obj_unref, 277 .sync_obj_unref = vmw_sync_obj_unref,
249 .sync_obj_ref = vmw_sync_obj_ref, 278 .sync_obj_ref = vmw_sync_obj_ref,
250 .move_notify = vmw_move_notify, 279 .move_notify = vmw_move_notify,
251 .swap_notify = vmw_swap_notify 280 .swap_notify = vmw_swap_notify,
281 .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
282 .io_mem_reserve = &vmw_ttm_io_mem_reserve,
283 .io_mem_free = &vmw_ttm_io_mem_free,
252}; 284};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 0897359b3e4e..dbd36b8910cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -570,7 +570,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
570 * Put BO in VRAM, only if there is space. 570 * Put BO in VRAM, only if there is space.
571 */ 571 */
572 572
573 ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false); 573 ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
574 if (unlikely(ret == -ERESTARTSYS)) 574 if (unlikely(ret == -ERESTARTSYS))
575 return ret; 575 return ret;
576 576
@@ -590,7 +590,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
590 * previous contents. 590 * previous contents.
591 */ 591 */
592 592
593 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); 593 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
594 return ret; 594 return ret;
595} 595}
596 596
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index a93367041cdc..80125ffc4e28 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -628,7 +628,7 @@ int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
628 if (unlikely(ret != 0)) 628 if (unlikely(ret != 0))
629 return ret; 629 return ret;
630 630
631 ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false); 631 ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
632 ttm_bo_unreserve(bo); 632 ttm_bo_unreserve(bo);
633 633
634 return ret; 634 return ret;
@@ -652,7 +652,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
652 if (unlikely(ret != 0)) 652 if (unlikely(ret != 0))
653 goto err_unlock; 653 goto err_unlock;
654 654
655 ret = ttm_bo_validate(bo, &ne_placement, false, false); 655 ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
656 ttm_bo_unreserve(bo); 656 ttm_bo_unreserve(bo);
657err_unlock: 657err_unlock:
658 ttm_write_unlock(&vmw_priv->active_master->lock); 658 ttm_write_unlock(&vmw_priv->active_master->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 31f9afed0a63..bbc7c4c30bc7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -752,14 +752,8 @@ err_not_scanout:
752 return NULL; 752 return NULL;
753} 753}
754 754
755static int vmw_kms_fb_changed(struct drm_device *dev)
756{
757 return 0;
758}
759
760static struct drm_mode_config_funcs vmw_kms_funcs = { 755static struct drm_mode_config_funcs vmw_kms_funcs = {
761 .fb_create = vmw_kms_fb_create, 756 .fb_create = vmw_kms_fb_create,
762 .fb_changed = vmw_kms_fb_changed,
763}; 757};
764 758
765int vmw_kms_init(struct vmw_private *dev_priv) 759int vmw_kms_init(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 5b6eabeb7f51..ad566c85b075 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -118,7 +118,7 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
118 if (pin) 118 if (pin)
119 overlay_placement = &vmw_vram_ne_placement; 119 overlay_placement = &vmw_vram_ne_placement;
120 120
121 ret = ttm_bo_validate(bo, overlay_placement, interruptible, false); 121 ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
122 122
123 ttm_bo_unreserve(bo); 123 ttm_bo_unreserve(bo);
124 124