aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-02-15 04:24:31 -0500
committerJiri Kosina <jkosina@suse.cz>2011-02-15 04:24:31 -0500
commit0a9d59a2461477bd9ed143c01af9df3f8f00fa81 (patch)
treedf997d1cfb0786427a0df1fbd6f0640fa4248cf4 /drivers/gpu
parenta23ce6da9677d245aa0aadc99f4197030350ab54 (diff)
parent795abaf1e4e188c4171e3cd3dbb11a9fcacaf505 (diff)
Merge branch 'master' into for-next
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig6
-rw-r--r--drivers/gpu/drm/drm_crtc.c20
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c33
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c45
-rw-r--r--drivers/gpu/drm/drm_irq.c7
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c35
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c18
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c27
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c47
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h6
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c17
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c59
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c3
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c82
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h4
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c46
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c182
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mc.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c57
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c74
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c43
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h2
-rw-r--r--drivers/gpu/drm/radeon/r100.c23
-rw-r--r--drivers/gpu/drm/radeon/r300.c18
-rw-r--r--drivers/gpu/drm/radeon/r420.c2
-rw-r--r--drivers/gpu/drm/radeon/r520.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c34
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c73
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c132
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h23
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen2
-rw-r--r--drivers/gpu/drm/radeon/rs400.c15
-rw-r--r--drivers/gpu/drm/radeon/rs600.c16
-rw-r--r--drivers/gpu/drm/radeon/rv515.c10
-rw-r--r--drivers/gpu/drm/radeon/rv770.c28
-rw-r--r--drivers/gpu/stub/Kconfig3
-rw-r--r--drivers/gpu/vga/Kconfig2
-rw-r--r--drivers/gpu/vga/vgaarb.c2
79 files changed, 913 insertions, 590 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 7af443672626..0902d4460039 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -23,7 +23,7 @@ config DRM_KMS_HELPER
23 tristate 23 tristate
24 depends on DRM 24 depends on DRM
25 select FB 25 select FB
26 select FRAMEBUFFER_CONSOLE if !EMBEDDED 26 select FRAMEBUFFER_CONSOLE if !EXPERT
27 help 27 help
28 FB and CRTC helpers for KMS drivers. 28 FB and CRTC helpers for KMS drivers.
29 29
@@ -100,14 +100,16 @@ config DRM_I830
100config DRM_I915 100config DRM_I915
101 tristate "i915 driver" 101 tristate "i915 driver"
102 depends on AGP_INTEL 102 depends on AGP_INTEL
103 # we need shmfs for the swappable backing store, and in particular
104 # the shmem_readpage() which depends upon tmpfs
103 select SHMEM 105 select SHMEM
106 select TMPFS
104 select DRM_KMS_HELPER 107 select DRM_KMS_HELPER
105 select FB_CFB_FILLRECT 108 select FB_CFB_FILLRECT
106 select FB_CFB_COPYAREA 109 select FB_CFB_COPYAREA
107 select FB_CFB_IMAGEBLIT 110 select FB_CFB_IMAGEBLIT
108 # i915 depends on ACPI_VIDEO when ACPI is enabled 111 # i915 depends on ACPI_VIDEO when ACPI is enabled
109 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 112 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
110 select VIDEO_OUTPUT_CONTROL if ACPI
111 select BACKLIGHT_CLASS_DEVICE if ACPI 113 select BACKLIGHT_CLASS_DEVICE if ACPI
112 select INPUT if ACPI 114 select INPUT if ACPI
113 select ACPI_VIDEO if ACPI 115 select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 2baa6708e44c..654faa803dcb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2674,3 +2674,23 @@ out:
2674 mutex_unlock(&dev->mode_config.mutex); 2674 mutex_unlock(&dev->mode_config.mutex);
2675 return ret; 2675 return ret;
2676} 2676}
2677
2678void drm_mode_config_reset(struct drm_device *dev)
2679{
2680 struct drm_crtc *crtc;
2681 struct drm_encoder *encoder;
2682 struct drm_connector *connector;
2683
2684 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2685 if (crtc->funcs->reset)
2686 crtc->funcs->reset(crtc);
2687
2688 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
2689 if (encoder->funcs->reset)
2690 encoder->funcs->reset(encoder);
2691
2692 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
2693 if (connector->funcs->reset)
2694 connector->funcs->reset(connector);
2695}
2696EXPORT_SYMBOL(drm_mode_config_reset);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 952b3d4fb2a6..92369655dca3 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -343,13 +343,12 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
343 struct drm_encoder *encoder; 343 struct drm_encoder *encoder;
344 bool ret = true; 344 bool ret = true;
345 345
346 adjusted_mode = drm_mode_duplicate(dev, mode);
347
348 crtc->enabled = drm_helper_crtc_in_use(crtc); 346 crtc->enabled = drm_helper_crtc_in_use(crtc);
349
350 if (!crtc->enabled) 347 if (!crtc->enabled)
351 return true; 348 return true;
352 349
350 adjusted_mode = drm_mode_duplicate(dev, mode);
351
353 saved_hwmode = crtc->hwmode; 352 saved_hwmode = crtc->hwmode;
354 saved_mode = crtc->mode; 353 saved_mode = crtc->mode;
355 saved_x = crtc->x; 354 saved_x = crtc->x;
@@ -437,10 +436,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
437 */ 436 */
438 drm_calc_timestamping_constants(crtc); 437 drm_calc_timestamping_constants(crtc);
439 438
440 /* XXX free adjustedmode */
441 drm_mode_destroy(dev, adjusted_mode);
442 /* FIXME: add subpixel order */ 439 /* FIXME: add subpixel order */
443done: 440done:
441 drm_mode_destroy(dev, adjusted_mode);
444 if (!ret) { 442 if (!ret) {
445 crtc->hwmode = saved_hwmode; 443 crtc->hwmode = saved_hwmode;
446 crtc->mode = saved_mode; 444 crtc->mode = saved_mode;
@@ -497,14 +495,17 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
497 495
498 crtc_funcs = set->crtc->helper_private; 496 crtc_funcs = set->crtc->helper_private;
499 497
498 if (!set->mode)
499 set->fb = NULL;
500
500 if (set->fb) { 501 if (set->fb) {
501 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", 502 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
502 set->crtc->base.id, set->fb->base.id, 503 set->crtc->base.id, set->fb->base.id,
503 (int)set->num_connectors, set->x, set->y); 504 (int)set->num_connectors, set->x, set->y);
504 } else { 505 } else {
505 DRM_DEBUG_KMS("[CRTC:%d] [NOFB] #connectors=%d (x y) (%i %i)\n", 506 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
506 set->crtc->base.id, (int)set->num_connectors, 507 set->mode = NULL;
507 set->x, set->y); 508 set->num_connectors = 0;
508 } 509 }
509 510
510 dev = set->crtc->dev; 511 dev = set->crtc->dev;
@@ -649,8 +650,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
649 mode_changed = true; 650 mode_changed = true;
650 651
651 if (mode_changed) { 652 if (mode_changed) {
652 set->crtc->enabled = (set->mode != NULL); 653 set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
653 if (set->mode != NULL) { 654 if (set->crtc->enabled) {
654 DRM_DEBUG_KMS("attempting to set mode from" 655 DRM_DEBUG_KMS("attempting to set mode from"
655 " userspace\n"); 656 " userspace\n");
656 drm_mode_debug_printmodeline(set->mode); 657 drm_mode_debug_printmodeline(set->mode);
@@ -665,6 +666,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
665 ret = -EINVAL; 666 ret = -EINVAL;
666 goto fail; 667 goto fail;
667 } 668 }
669 DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
670 for (i = 0; i < set->num_connectors; i++) {
671 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
672 drm_get_connector_name(set->connectors[i]));
673 set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
674 }
668 } 675 }
669 drm_helper_disable_unused_functions(dev); 676 drm_helper_disable_unused_functions(dev);
670 } else if (fb_changed) { 677 } else if (fb_changed) {
@@ -681,12 +688,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
681 goto fail; 688 goto fail;
682 } 689 }
683 } 690 }
684 DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
685 for (i = 0; i < set->num_connectors; i++) {
686 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
687 drm_get_connector_name(set->connectors[i]));
688 set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
689 }
690 691
691 kfree(save_connectors); 692 kfree(save_connectors);
692 kfree(save_encoders); 693 kfree(save_encoders);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0307d601f5e5..6977a1ce9d98 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -607,25 +607,6 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
607} 607}
608EXPORT_SYMBOL(drm_fb_helper_fini); 608EXPORT_SYMBOL(drm_fb_helper_fini);
609 609
610void drm_fb_helper_fill_fix(struct fb_info *info, struct drm_framebuffer *fb)
611{
612 info->fix.type = FB_TYPE_PACKED_PIXELS;
613 info->fix.visual = fb->depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
614 FB_VISUAL_TRUECOLOR;
615 info->fix.mmio_start = 0;
616 info->fix.mmio_len = 0;
617 info->fix.type_aux = 0;
618 info->fix.xpanstep = 1; /* doing it in hw */
619 info->fix.ypanstep = 1; /* doing it in hw */
620 info->fix.ywrapstep = 0;
621 info->fix.accel = FB_ACCEL_NONE;
622 info->fix.type_aux = 0;
623
624 info->fix.line_length = fb->pitch;
625 return;
626}
627EXPORT_SYMBOL(drm_fb_helper_fill_fix);
628
629static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, 610static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
630 u16 blue, u16 regno, struct fb_info *info) 611 u16 blue, u16 regno, struct fb_info *info)
631{ 612{
@@ -835,7 +816,6 @@ int drm_fb_helper_set_par(struct fb_info *info)
835 mutex_unlock(&dev->mode_config.mutex); 816 mutex_unlock(&dev->mode_config.mutex);
836 return ret; 817 return ret;
837 } 818 }
838 drm_fb_helper_fill_fix(info, fb_helper->fb);
839 } 819 }
840 mutex_unlock(&dev->mode_config.mutex); 820 mutex_unlock(&dev->mode_config.mutex);
841 821
@@ -973,7 +953,6 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
973 953
974 if (new_fb) { 954 if (new_fb) {
975 info->var.pixclock = 0; 955 info->var.pixclock = 0;
976 drm_fb_helper_fill_fix(info, fb_helper->fb);
977 if (register_framebuffer(info) < 0) { 956 if (register_framebuffer(info) < 0) {
978 return -EINVAL; 957 return -EINVAL;
979 } 958 }
@@ -1000,6 +979,26 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
1000} 979}
1001EXPORT_SYMBOL(drm_fb_helper_single_fb_probe); 980EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
1002 981
982void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
983 uint32_t depth)
984{
985 info->fix.type = FB_TYPE_PACKED_PIXELS;
986 info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
987 FB_VISUAL_TRUECOLOR;
988 info->fix.mmio_start = 0;
989 info->fix.mmio_len = 0;
990 info->fix.type_aux = 0;
991 info->fix.xpanstep = 1; /* doing it in hw */
992 info->fix.ypanstep = 1; /* doing it in hw */
993 info->fix.ywrapstep = 0;
994 info->fix.accel = FB_ACCEL_NONE;
995 info->fix.type_aux = 0;
996
997 info->fix.line_length = pitch;
998 return;
999}
1000EXPORT_SYMBOL(drm_fb_helper_fill_fix);
1001
1003void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, 1002void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
1004 uint32_t fb_width, uint32_t fb_height) 1003 uint32_t fb_width, uint32_t fb_height)
1005{ 1004{
@@ -1534,11 +1533,11 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1534} 1533}
1535EXPORT_SYMBOL(drm_fb_helper_hotplug_event); 1534EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
1536 1535
1537/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EMBEDDED) 1536/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
1538 * but the module doesn't depend on any fb console symbols. At least 1537 * but the module doesn't depend on any fb console symbols. At least
1539 * attempt to load fbcon to avoid leaving the system without a usable console. 1538 * attempt to load fbcon to avoid leaving the system without a usable console.
1540 */ 1539 */
1541#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EMBEDDED) 1540#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
1542static int __init drm_fb_helper_modinit(void) 1541static int __init drm_fb_helper_modinit(void)
1543{ 1542{
1544 const char *name = "fbcon"; 1543 const char *name = "fbcon";
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0054e957203f..3dadfa2a8528 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1250,7 +1250,7 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
1250 * Drivers should call this routine in their vblank interrupt handlers to 1250 * Drivers should call this routine in their vblank interrupt handlers to
1251 * update the vblank counter and send any signals that may be pending. 1251 * update the vblank counter and send any signals that may be pending.
1252 */ 1252 */
1253void drm_handle_vblank(struct drm_device *dev, int crtc) 1253bool drm_handle_vblank(struct drm_device *dev, int crtc)
1254{ 1254{
1255 u32 vblcount; 1255 u32 vblcount;
1256 s64 diff_ns; 1256 s64 diff_ns;
@@ -1258,7 +1258,7 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
1258 unsigned long irqflags; 1258 unsigned long irqflags;
1259 1259
1260 if (!dev->num_crtcs) 1260 if (!dev->num_crtcs)
1261 return; 1261 return false;
1262 1262
1263 /* Need timestamp lock to prevent concurrent execution with 1263 /* Need timestamp lock to prevent concurrent execution with
1264 * vblank enable/disable, as this would cause inconsistent 1264 * vblank enable/disable, as this would cause inconsistent
@@ -1269,7 +1269,7 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
1269 /* Vblank irq handling disabled. Nothing to do. */ 1269 /* Vblank irq handling disabled. Nothing to do. */
1270 if (!dev->vblank_enabled[crtc]) { 1270 if (!dev->vblank_enabled[crtc]) {
1271 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 1271 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
1272 return; 1272 return false;
1273 } 1273 }
1274 1274
1275 /* Fetch corresponding timestamp for this vblank interval from 1275 /* Fetch corresponding timestamp for this vblank interval from
@@ -1311,5 +1311,6 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
1311 drm_handle_vblank_events(dev, crtc); 1311 drm_handle_vblank_events(dev, crtc);
1312 1312
1313 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 1313 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
1314 return true;
1314} 1315}
1315EXPORT_SYMBOL(drm_handle_vblank); 1316EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 19a3d58044dd..3601466c5502 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -703,7 +703,7 @@ static void print_error_buffers(struct seq_file *m,
703 seq_printf(m, "%s [%d]:\n", name, count); 703 seq_printf(m, "%s [%d]:\n", name, count);
704 704
705 while (count--) { 705 while (count--) {
706 seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s%s", 706 seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s",
707 err->gtt_offset, 707 err->gtt_offset,
708 err->size, 708 err->size,
709 err->read_domains, 709 err->read_domains,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 844f3c972b04..17bd766f2081 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -152,7 +152,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
152{ 152{
153 drm_i915_private_t *dev_priv = dev->dev_private; 153 drm_i915_private_t *dev_priv = dev->dev_private;
154 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 154 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
155 struct intel_ring_buffer *ring = LP_RING(dev_priv); 155 int ret;
156 156
157 master_priv->sarea = drm_getsarea(dev); 157 master_priv->sarea = drm_getsarea(dev);
158 if (master_priv->sarea) { 158 if (master_priv->sarea) {
@@ -163,33 +163,22 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
163 } 163 }
164 164
165 if (init->ring_size != 0) { 165 if (init->ring_size != 0) {
166 if (ring->obj != NULL) { 166 if (LP_RING(dev_priv)->obj != NULL) {
167 i915_dma_cleanup(dev); 167 i915_dma_cleanup(dev);
168 DRM_ERROR("Client tried to initialize ringbuffer in " 168 DRM_ERROR("Client tried to initialize ringbuffer in "
169 "GEM mode\n"); 169 "GEM mode\n");
170 return -EINVAL; 170 return -EINVAL;
171 } 171 }
172 172
173 ring->size = init->ring_size; 173 ret = intel_render_ring_init_dri(dev,
174 174 init->ring_start,
175 ring->map.offset = init->ring_start; 175 init->ring_size);
176 ring->map.size = init->ring_size; 176 if (ret) {
177 ring->map.type = 0;
178 ring->map.flags = 0;
179 ring->map.mtrr = 0;
180
181 drm_core_ioremap_wc(&ring->map, dev);
182
183 if (ring->map.handle == NULL) {
184 i915_dma_cleanup(dev); 177 i915_dma_cleanup(dev);
185 DRM_ERROR("can not ioremap virtual address for" 178 return ret;
186 " ring buffer\n");
187 return -ENOMEM;
188 } 179 }
189 } 180 }
190 181
191 ring->virtual_start = ring->map.handle;
192
193 dev_priv->cpp = init->cpp; 182 dev_priv->cpp = init->cpp;
194 dev_priv->back_offset = init->back_offset; 183 dev_priv->back_offset = init->back_offset;
195 dev_priv->front_offset = init->front_offset; 184 dev_priv->front_offset = init->front_offset;
@@ -1226,9 +1215,15 @@ static int i915_load_modeset_init(struct drm_device *dev)
1226 if (ret) 1215 if (ret)
1227 DRM_INFO("failed to find VBIOS tables\n"); 1216 DRM_INFO("failed to find VBIOS tables\n");
1228 1217
1229 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 1218 /* If we have > 1 VGA cards, then we need to arbitrate access
1219 * to the common VGA resources.
1220 *
1221 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1222 * then we do not take part in VGA arbitration and the
1223 * vga_client_register() fails with -ENODEV.
1224 */
1230 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 1225 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1231 if (ret) 1226 if (ret && ret != -ENODEV)
1232 goto cleanup_ringbuffer; 1227 goto cleanup_ringbuffer;
1233 1228
1234 intel_register_dsm_handler(); 1229 intel_register_dsm_handler();
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0de75a23f8e7..cfb56d0ff367 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -49,6 +49,9 @@ module_param_named(powersave, i915_powersave, int, 0600);
49unsigned int i915_lvds_downclock = 0; 49unsigned int i915_lvds_downclock = 0;
50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
51 51
52unsigned int i915_panel_use_ssc = 1;
53module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
54
52bool i915_try_reset = true; 55bool i915_try_reset = true;
53module_param_named(reset, i915_try_reset, bool, 0600); 56module_param_named(reset, i915_try_reset, bool, 0600);
54 57
@@ -57,7 +60,7 @@ extern int intel_agp_enabled;
57 60
58#define INTEL_VGA_DEVICE(id, info) { \ 61#define INTEL_VGA_DEVICE(id, info) { \
59 .class = PCI_CLASS_DISPLAY_VGA << 8, \ 62 .class = PCI_CLASS_DISPLAY_VGA << 8, \
60 .class_mask = 0xffff00, \ 63 .class_mask = 0xff0000, \
61 .vendor = 0x8086, \ 64 .vendor = 0x8086, \
62 .device = id, \ 65 .device = id, \
63 .subvendor = PCI_ANY_ID, \ 66 .subvendor = PCI_ANY_ID, \
@@ -351,6 +354,7 @@ static int i915_drm_thaw(struct drm_device *dev)
351 error = i915_gem_init_ringbuffer(dev); 354 error = i915_gem_init_ringbuffer(dev);
352 mutex_unlock(&dev->struct_mutex); 355 mutex_unlock(&dev->struct_mutex);
353 356
357 drm_mode_config_reset(dev);
354 drm_irq_install(dev); 358 drm_irq_install(dev);
355 359
356 /* Resume the modeset for every activated CRTC */ 360 /* Resume the modeset for every activated CRTC */
@@ -539,6 +543,7 @@ int i915_reset(struct drm_device *dev, u8 flags)
539 543
540 mutex_unlock(&dev->struct_mutex); 544 mutex_unlock(&dev->struct_mutex);
541 drm_irq_uninstall(dev); 545 drm_irq_uninstall(dev);
546 drm_mode_config_reset(dev);
542 drm_irq_install(dev); 547 drm_irq_install(dev);
543 mutex_lock(&dev->struct_mutex); 548 mutex_lock(&dev->struct_mutex);
544 } 549 }
@@ -563,6 +568,14 @@ int i915_reset(struct drm_device *dev, u8 flags)
563static int __devinit 568static int __devinit
564i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 569i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
565{ 570{
571 /* Only bind to function 0 of the device. Early generations
572 * used function 1 as a placeholder for multi-head. This causes
573 * us confusion instead, especially on the systems where both
574 * functions have the same PCI-ID!
575 */
576 if (PCI_FUNC(pdev->devfn))
577 return -ENODEV;
578
566 return drm_get_pci_dev(pdev, ent, &driver); 579 return drm_get_pci_dev(pdev, ent, &driver);
567} 580}
568 581
@@ -749,6 +762,9 @@ static int __init i915_init(void)
749 driver.driver_features &= ~DRIVER_MODESET; 762 driver.driver_features &= ~DRIVER_MODESET;
750#endif 763#endif
751 764
765 if (!(driver.driver_features & DRIVER_MODESET))
766 driver.get_vblank_timestamp = NULL;
767
752 return drm_init(&driver); 768 return drm_init(&driver);
753} 769}
754 770
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 385fc7ec39d3..a0149c619cdd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -543,8 +543,11 @@ typedef struct drm_i915_private {
543 /** List of all objects in gtt_space. Used to restore gtt 543 /** List of all objects in gtt_space. Used to restore gtt
544 * mappings on resume */ 544 * mappings on resume */
545 struct list_head gtt_list; 545 struct list_head gtt_list;
546 /** End of mappable part of GTT */ 546
547 /** Usable portion of the GTT for GEM */
548 unsigned long gtt_start;
547 unsigned long gtt_mappable_end; 549 unsigned long gtt_mappable_end;
550 unsigned long gtt_end;
548 551
549 struct io_mapping *gtt_mapping; 552 struct io_mapping *gtt_mapping;
550 int gtt_mtrr; 553 int gtt_mtrr;
@@ -954,6 +957,7 @@ extern int i915_max_ioctl;
954extern unsigned int i915_fbpercrtc; 957extern unsigned int i915_fbpercrtc;
955extern unsigned int i915_powersave; 958extern unsigned int i915_powersave;
956extern unsigned int i915_lvds_downclock; 959extern unsigned int i915_lvds_downclock;
960extern unsigned int i915_panel_use_ssc;
957 961
958extern int i915_suspend(struct drm_device *dev, pm_message_t state); 962extern int i915_suspend(struct drm_device *dev, pm_message_t state);
959extern int i915_resume(struct drm_device *dev); 963extern int i915_resume(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3dfc848ff755..cf4f74c7c6fb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -140,12 +140,16 @@ void i915_gem_do_init(struct drm_device *dev,
140{ 140{
141 drm_i915_private_t *dev_priv = dev->dev_private; 141 drm_i915_private_t *dev_priv = dev->dev_private;
142 142
143 drm_mm_init(&dev_priv->mm.gtt_space, start, 143 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
144 end - start);
145 144
145 dev_priv->mm.gtt_start = start;
146 dev_priv->mm.gtt_mappable_end = mappable_end;
147 dev_priv->mm.gtt_end = end;
146 dev_priv->mm.gtt_total = end - start; 148 dev_priv->mm.gtt_total = end - start;
147 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; 149 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
148 dev_priv->mm.gtt_mappable_end = mappable_end; 150
151 /* Take over this portion of the GTT */
152 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
149} 153}
150 154
151int 155int
@@ -1857,7 +1861,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1857 1861
1858 seqno = ring->get_seqno(ring); 1862 seqno = ring->get_seqno(ring);
1859 1863
1860 for (i = 0; i < I915_NUM_RINGS; i++) 1864 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1861 if (seqno >= ring->sync_seqno[i]) 1865 if (seqno >= ring->sync_seqno[i])
1862 ring->sync_seqno[i] = 0; 1866 ring->sync_seqno[i] = 0;
1863 1867
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index e69834341ef0..d2f445e825f2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -464,8 +464,6 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
464 int ret; 464 int ret;
465 465
466 list_for_each_entry(obj, objects, exec_list) { 466 list_for_each_entry(obj, objects, exec_list) {
467 obj->base.pending_read_domains = 0;
468 obj->base.pending_write_domain = 0;
469 ret = i915_gem_execbuffer_relocate_object(obj, eb); 467 ret = i915_gem_execbuffer_relocate_object(obj, eb);
470 if (ret) 468 if (ret)
471 return ret; 469 return ret;
@@ -505,6 +503,9 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
505 list_move(&obj->exec_list, &ordered_objects); 503 list_move(&obj->exec_list, &ordered_objects);
506 else 504 else
507 list_move_tail(&obj->exec_list, &ordered_objects); 505 list_move_tail(&obj->exec_list, &ordered_objects);
506
507 obj->base.pending_read_domains = 0;
508 obj->base.pending_write_domain = 0;
508 } 509 }
509 list_splice(&ordered_objects, objects); 510 list_splice(&ordered_objects, objects);
510 511
@@ -636,6 +637,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
636{ 637{
637 struct drm_i915_gem_relocation_entry *reloc; 638 struct drm_i915_gem_relocation_entry *reloc;
638 struct drm_i915_gem_object *obj; 639 struct drm_i915_gem_object *obj;
640 int *reloc_offset;
639 int i, total, ret; 641 int i, total, ret;
640 642
641 /* We may process another execbuffer during the unlock... */ 643 /* We may process another execbuffer during the unlock... */
@@ -653,8 +655,11 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
653 for (i = 0; i < count; i++) 655 for (i = 0; i < count; i++)
654 total += exec[i].relocation_count; 656 total += exec[i].relocation_count;
655 657
658 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
656 reloc = drm_malloc_ab(total, sizeof(*reloc)); 659 reloc = drm_malloc_ab(total, sizeof(*reloc));
657 if (reloc == NULL) { 660 if (reloc == NULL || reloc_offset == NULL) {
661 drm_free_large(reloc);
662 drm_free_large(reloc_offset);
658 mutex_lock(&dev->struct_mutex); 663 mutex_lock(&dev->struct_mutex);
659 return -ENOMEM; 664 return -ENOMEM;
660 } 665 }
@@ -672,6 +677,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
672 goto err; 677 goto err;
673 } 678 }
674 679
680 reloc_offset[i] = total;
675 total += exec[i].relocation_count; 681 total += exec[i].relocation_count;
676 } 682 }
677 683
@@ -705,17 +711,12 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
705 if (ret) 711 if (ret)
706 goto err; 712 goto err;
707 713
708 total = 0;
709 list_for_each_entry(obj, objects, exec_list) { 714 list_for_each_entry(obj, objects, exec_list) {
710 obj->base.pending_read_domains = 0; 715 int offset = obj->exec_entry - exec;
711 obj->base.pending_write_domain = 0;
712 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, 716 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
713 reloc + total); 717 reloc + reloc_offset[offset]);
714 if (ret) 718 if (ret)
715 goto err; 719 goto err;
716
717 total += exec->relocation_count;
718 exec++;
719 } 720 }
720 721
721 /* Leave the user relocations as are, this is the painfully slow path, 722 /* Leave the user relocations as are, this is the painfully slow path,
@@ -726,6 +727,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
726 727
727err: 728err:
728 drm_free_large(reloc); 729 drm_free_large(reloc);
730 drm_free_large(reloc_offset);
729 return ret; 731 return ret;
730} 732}
731 733
@@ -770,7 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
770 if (from == NULL || to == from) 772 if (from == NULL || to == from)
771 return 0; 773 return 0;
772 774
773 if (INTEL_INFO(obj->base.dev)->gen < 6) 775 /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */
776 if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev))
774 return i915_gem_object_wait_rendering(obj, true); 777 return i915_gem_object_wait_rendering(obj, true);
775 778
776 idx = intel_ring_sync_index(from, to); 779 idx = intel_ring_sync_index(from, to);
@@ -1172,7 +1175,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1172 goto err; 1175 goto err;
1173 1176
1174 seqno = i915_gem_next_request_seqno(dev, ring); 1177 seqno = i915_gem_next_request_seqno(dev, ring);
1175 for (i = 0; i < I915_NUM_RINGS-1; i++) { 1178 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
1176 if (seqno < ring->sync_seqno[i]) { 1179 if (seqno < ring->sync_seqno[i]) {
1177 /* The GPU can not handle its semaphore value wrapping, 1180 /* The GPU can not handle its semaphore value wrapping,
1178 * so every billion or so execbuffers, we need to stall 1181 * so every billion or so execbuffers, we need to stall
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 70433ae50ac8..b0abdc64aa9f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -34,6 +34,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
34 struct drm_i915_private *dev_priv = dev->dev_private; 34 struct drm_i915_private *dev_priv = dev->dev_private;
35 struct drm_i915_gem_object *obj; 35 struct drm_i915_gem_object *obj;
36 36
37 /* First fill our portion of the GTT with scratch pages */
38 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
39 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
40
37 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 41 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
38 i915_gem_clflush_object(obj); 42 i915_gem_clflush_object(obj);
39 43
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e418e8bb61e6..97f946dcc1aa 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -274,24 +274,35 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
274 return ret; 274 return ret;
275} 275}
276 276
277int i915_get_vblank_timestamp(struct drm_device *dev, int crtc, 277int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
278 int *max_error, 278 int *max_error,
279 struct timeval *vblank_time, 279 struct timeval *vblank_time,
280 unsigned flags) 280 unsigned flags)
281{ 281{
282 struct drm_crtc *drmcrtc; 282 struct drm_i915_private *dev_priv = dev->dev_private;
283 struct drm_crtc *crtc;
283 284
284 if (crtc < 0 || crtc >= dev->num_crtcs) { 285 if (pipe < 0 || pipe >= dev_priv->num_pipe) {
285 DRM_ERROR("Invalid crtc %d\n", crtc); 286 DRM_ERROR("Invalid crtc %d\n", pipe);
286 return -EINVAL; 287 return -EINVAL;
287 } 288 }
288 289
289 /* Get drm_crtc to timestamp: */ 290 /* Get drm_crtc to timestamp: */
290 drmcrtc = intel_get_crtc_for_pipe(dev, crtc); 291 crtc = intel_get_crtc_for_pipe(dev, pipe);
292 if (crtc == NULL) {
293 DRM_ERROR("Invalid crtc %d\n", pipe);
294 return -EINVAL;
295 }
296
297 if (!crtc->enabled) {
298 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
299 return -EBUSY;
300 }
291 301
292 /* Helper routine in DRM core does all the work: */ 302 /* Helper routine in DRM core does all the work: */
293 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, 303 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
294 vblank_time, flags, drmcrtc); 304 vblank_time, flags,
305 crtc);
295} 306}
296 307
297/* 308/*
@@ -348,8 +359,12 @@ static void notify_ring(struct drm_device *dev,
348 struct intel_ring_buffer *ring) 359 struct intel_ring_buffer *ring)
349{ 360{
350 struct drm_i915_private *dev_priv = dev->dev_private; 361 struct drm_i915_private *dev_priv = dev->dev_private;
351 u32 seqno = ring->get_seqno(ring); 362 u32 seqno;
352 363
364 if (ring->obj == NULL)
365 return;
366
367 seqno = ring->get_seqno(ring);
353 trace_i915_gem_request_complete(dev, seqno); 368 trace_i915_gem_request_complete(dev, seqno);
354 369
355 ring->irq_seqno = seqno; 370 ring->irq_seqno = seqno;
@@ -720,7 +735,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
720 if (obj->ring != ring) 735 if (obj->ring != ring)
721 continue; 736 continue;
722 737
723 if (!i915_seqno_passed(obj->last_rendering_seqno, seqno)) 738 if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
724 continue; 739 continue;
725 740
726 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 741 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
@@ -831,6 +846,8 @@ static void i915_capture_error_state(struct drm_device *dev)
831 i++; 846 i++;
832 error->pinned_bo_count = i - error->active_bo_count; 847 error->pinned_bo_count = i - error->active_bo_count;
833 848
849 error->active_bo = NULL;
850 error->pinned_bo = NULL;
834 if (i) { 851 if (i) {
835 error->active_bo = kmalloc(sizeof(*error->active_bo)*i, 852 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
836 GFP_ATOMIC); 853 GFP_ATOMIC);
@@ -1179,18 +1196,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1179 intel_finish_page_flip_plane(dev, 1); 1196 intel_finish_page_flip_plane(dev, 1);
1180 } 1197 }
1181 1198
1182 if (pipea_stats & vblank_status) { 1199 if (pipea_stats & vblank_status &&
1200 drm_handle_vblank(dev, 0)) {
1183 vblank++; 1201 vblank++;
1184 drm_handle_vblank(dev, 0);
1185 if (!dev_priv->flip_pending_is_done) { 1202 if (!dev_priv->flip_pending_is_done) {
1186 i915_pageflip_stall_check(dev, 0); 1203 i915_pageflip_stall_check(dev, 0);
1187 intel_finish_page_flip(dev, 0); 1204 intel_finish_page_flip(dev, 0);
1188 } 1205 }
1189 } 1206 }
1190 1207
1191 if (pipeb_stats & vblank_status) { 1208 if (pipeb_stats & vblank_status &&
1209 drm_handle_vblank(dev, 1)) {
1192 vblank++; 1210 vblank++;
1193 drm_handle_vblank(dev, 1);
1194 if (!dev_priv->flip_pending_is_done) { 1211 if (!dev_priv->flip_pending_is_done) {
1195 i915_pageflip_stall_check(dev, 1); 1212 i915_pageflip_stall_check(dev, 1);
1196 intel_finish_page_flip(dev, 1); 1213 intel_finish_page_flip(dev, 1);
@@ -1278,12 +1295,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1278 if (master_priv->sarea_priv) 1295 if (master_priv->sarea_priv)
1279 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1296 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1280 1297
1281 ret = -ENODEV;
1282 if (ring->irq_get(ring)) { 1298 if (ring->irq_get(ring)) {
1283 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, 1299 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
1284 READ_BREADCRUMB(dev_priv) >= irq_nr); 1300 READ_BREADCRUMB(dev_priv) >= irq_nr);
1285 ring->irq_put(ring); 1301 ring->irq_put(ring);
1286 } 1302 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
1303 ret = -EBUSY;
1287 1304
1288 if (ret == -EBUSY) { 1305 if (ret == -EBUSY) {
1289 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 1306 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 40a407f41f61..5cfc68940f17 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -513,6 +513,10 @@
513#define GEN6_BLITTER_SYNC_STATUS (1 << 24) 513#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
514#define GEN6_BLITTER_USER_INTERRUPT (1 << 22) 514#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
515 515
516#define GEN6_BLITTER_ECOSKPD 0x221d0
517#define GEN6_BLITTER_LOCK_SHIFT 16
518#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
519
516#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 520#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
517#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16) 521#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
518#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0) 522#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
@@ -2626,6 +2630,8 @@
2626#define DISPLAY_PORT_PLL_BIOS_2 0x46014 2630#define DISPLAY_PORT_PLL_BIOS_2 0x46014
2627 2631
2628#define PCH_DSPCLK_GATE_D 0x42020 2632#define PCH_DSPCLK_GATE_D 0x42020
2633# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
2634# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
2629# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) 2635# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
2630# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) 2636# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
2631 2637
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b0b1200ed650..0b44956c336b 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -264,17 +264,12 @@ parse_general_features(struct drm_i915_private *dev_priv,
264 dev_priv->int_crt_support = general->int_crt_support; 264 dev_priv->int_crt_support = general->int_crt_support;
265 dev_priv->lvds_use_ssc = general->enable_ssc; 265 dev_priv->lvds_use_ssc = general->enable_ssc;
266 266
267 if (dev_priv->lvds_use_ssc) { 267 if (IS_I85X(dev))
268 if (IS_I85X(dev)) 268 dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
269 dev_priv->lvds_ssc_freq = 269 else if (IS_GEN5(dev) || IS_GEN6(dev))
270 general->ssc_freq ? 66 : 48; 270 dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120;
271 else if (IS_GEN5(dev) || IS_GEN6(dev)) 271 else
272 dev_priv->lvds_ssc_freq = 272 dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
273 general->ssc_freq ? 100 : 120;
274 else
275 dev_priv->lvds_ssc_freq =
276 general->ssc_freq ? 100 : 96;
277 }
278 } 273 }
279} 274}
280 275
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 17035b87ee46..8a77ff4a7237 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -535,6 +535,15 @@ static int intel_crt_set_property(struct drm_connector *connector,
535 return 0; 535 return 0;
536} 536}
537 537
538static void intel_crt_reset(struct drm_connector *connector)
539{
540 struct drm_device *dev = connector->dev;
541 struct intel_crt *crt = intel_attached_crt(connector);
542
543 if (HAS_PCH_SPLIT(dev))
544 crt->force_hotplug_required = 1;
545}
546
538/* 547/*
539 * Routines for controlling stuff on the analog port 548 * Routines for controlling stuff on the analog port
540 */ 549 */
@@ -548,6 +557,7 @@ static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
548}; 557};
549 558
550static const struct drm_connector_funcs intel_crt_connector_funcs = { 559static const struct drm_connector_funcs intel_crt_connector_funcs = {
560 .reset = intel_crt_reset,
551 .dpms = drm_helper_connector_dpms, 561 .dpms = drm_helper_connector_dpms,
552 .detect = intel_crt_detect, 562 .detect = intel_crt_detect,
553 .fill_modes = drm_helper_probe_single_connector_modes, 563 .fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 25d96889d7d2..7e42aa586504 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1213,6 +1213,26 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
1213 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 1213 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1214} 1214}
1215 1215
1216static void sandybridge_blit_fbc_update(struct drm_device *dev)
1217{
1218 struct drm_i915_private *dev_priv = dev->dev_private;
1219 u32 blt_ecoskpd;
1220
1221 /* Make sure blitter notifies FBC of writes */
1222 __gen6_force_wake_get(dev_priv);
1223 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1224 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1225 GEN6_BLITTER_LOCK_SHIFT;
1226 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1227 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1228 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1229 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1230 GEN6_BLITTER_LOCK_SHIFT);
1231 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1232 POSTING_READ(GEN6_BLITTER_ECOSKPD);
1233 __gen6_force_wake_put(dev_priv);
1234}
1235
1216static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1236static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1217{ 1237{
1218 struct drm_device *dev = crtc->dev; 1238 struct drm_device *dev = crtc->dev;
@@ -1266,6 +1286,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1266 I915_WRITE(SNB_DPFC_CTL_SA, 1286 I915_WRITE(SNB_DPFC_CTL_SA,
1267 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); 1287 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
1268 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); 1288 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1289 sandybridge_blit_fbc_update(dev);
1269 } 1290 }
1270 1291
1271 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 1292 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
@@ -3822,6 +3843,11 @@ static void intel_update_watermarks(struct drm_device *dev)
3822 sr_hdisplay, sr_htotal, pixel_size); 3843 sr_hdisplay, sr_htotal, pixel_size);
3823} 3844}
3824 3845
3846static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
3847{
3848 return dev_priv->lvds_use_ssc && i915_panel_use_ssc;
3849}
3850
3825static int intel_crtc_mode_set(struct drm_crtc *crtc, 3851static int intel_crtc_mode_set(struct drm_crtc *crtc,
3826 struct drm_display_mode *mode, 3852 struct drm_display_mode *mode,
3827 struct drm_display_mode *adjusted_mode, 3853 struct drm_display_mode *adjusted_mode,
@@ -3884,7 +3910,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3884 num_connectors++; 3910 num_connectors++;
3885 } 3911 }
3886 3912
3887 if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) { 3913 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
3888 refclk = dev_priv->lvds_ssc_freq * 1000; 3914 refclk = dev_priv->lvds_ssc_freq * 1000;
3889 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 3915 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
3890 refclk / 1000); 3916 refclk / 1000);
@@ -4059,7 +4085,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4059 udelay(200); 4085 udelay(200);
4060 4086
4061 if (has_edp_encoder) { 4087 if (has_edp_encoder) {
4062 if (dev_priv->lvds_use_ssc) { 4088 if (intel_panel_use_ssc(dev_priv)) {
4063 temp |= DREF_SSC1_ENABLE; 4089 temp |= DREF_SSC1_ENABLE;
4064 I915_WRITE(PCH_DREF_CONTROL, temp); 4090 I915_WRITE(PCH_DREF_CONTROL, temp);
4065 4091
@@ -4070,13 +4096,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4070 4096
4071 /* Enable CPU source on CPU attached eDP */ 4097 /* Enable CPU source on CPU attached eDP */
4072 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4098 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4073 if (dev_priv->lvds_use_ssc) 4099 if (intel_panel_use_ssc(dev_priv))
4074 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 4100 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4075 else 4101 else
4076 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 4102 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4077 } else { 4103 } else {
4078 /* Enable SSC on PCH eDP if needed */ 4104 /* Enable SSC on PCH eDP if needed */
4079 if (dev_priv->lvds_use_ssc) { 4105 if (intel_panel_use_ssc(dev_priv)) {
4080 DRM_ERROR("enabling SSC on PCH\n"); 4106 DRM_ERROR("enabling SSC on PCH\n");
4081 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; 4107 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
4082 } 4108 }
@@ -4104,7 +4130,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4104 int factor = 21; 4130 int factor = 21;
4105 4131
4106 if (is_lvds) { 4132 if (is_lvds) {
4107 if ((dev_priv->lvds_use_ssc && 4133 if ((intel_panel_use_ssc(dev_priv) &&
4108 dev_priv->lvds_ssc_freq == 100) || 4134 dev_priv->lvds_ssc_freq == 100) ||
4109 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) 4135 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
4110 factor = 25; 4136 factor = 25;
@@ -4183,7 +4209,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4183 /* XXX: just matching BIOS for now */ 4209 /* XXX: just matching BIOS for now */
4184 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 4210 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
4185 dpll |= 3; 4211 dpll |= 3;
4186 else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) 4212 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4187 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 4213 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4188 else 4214 else
4189 dpll |= PLL_REF_INPUT_DREFCLK; 4215 dpll |= PLL_REF_INPUT_DREFCLK;
@@ -5525,6 +5551,18 @@ cleanup_work:
5525 return ret; 5551 return ret;
5526} 5552}
5527 5553
5554static void intel_crtc_reset(struct drm_crtc *crtc)
5555{
5556 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5557
5558 /* Reset flags back to the 'unknown' status so that they
5559 * will be correctly set on the initial modeset.
5560 */
5561 intel_crtc->cursor_addr = 0;
5562 intel_crtc->dpms_mode = -1;
5563 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5564}
5565
5528static struct drm_crtc_helper_funcs intel_helper_funcs = { 5566static struct drm_crtc_helper_funcs intel_helper_funcs = {
5529 .dpms = intel_crtc_dpms, 5567 .dpms = intel_crtc_dpms,
5530 .mode_fixup = intel_crtc_mode_fixup, 5568 .mode_fixup = intel_crtc_mode_fixup,
@@ -5536,6 +5574,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = {
5536}; 5574};
5537 5575
5538static const struct drm_crtc_funcs intel_crtc_funcs = { 5576static const struct drm_crtc_funcs intel_crtc_funcs = {
5577 .reset = intel_crtc_reset,
5539 .cursor_set = intel_crtc_cursor_set, 5578 .cursor_set = intel_crtc_cursor_set,
5540 .cursor_move = intel_crtc_cursor_move, 5579 .cursor_move = intel_crtc_cursor_move,
5541 .gamma_set = intel_crtc_gamma_set, 5580 .gamma_set = intel_crtc_gamma_set,
@@ -5626,9 +5665,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5626 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 5665 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
5627 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 5666 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
5628 5667
5629 intel_crtc->cursor_addr = 0; 5668 intel_crtc_reset(&intel_crtc->base);
5630 intel_crtc->dpms_mode = -1;
5631 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5632 5669
5633 if (HAS_PCH_SPLIT(dev)) { 5670 if (HAS_PCH_SPLIT(dev)) {
5634 intel_helper_funcs.prepare = ironlake_crtc_prepare; 5671 intel_helper_funcs.prepare = ironlake_crtc_prepare;
@@ -6281,7 +6318,9 @@ void intel_enable_clock_gating(struct drm_device *dev)
6281 6318
6282 if (IS_GEN5(dev)) { 6319 if (IS_GEN5(dev)) {
6283 /* Required for FBC */ 6320 /* Required for FBC */
6284 dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; 6321 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
6322 DPFCRUNIT_CLOCK_GATE_DISABLE |
6323 DPFDUNIT_CLOCK_GATE_DISABLE;
6285 /* Required for CxSR */ 6324 /* Required for CxSR */
6286 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; 6325 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
6287 6326
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index ee145a257287..512782728e51 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -148,6 +148,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
148 148
149// memset(info->screen_base, 0, size); 149// memset(info->screen_base, 0, size);
150 150
151 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
151 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); 152 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
152 153
153 info->pixmap.size = 64*1024; 154 info->pixmap.size = 64*1024;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 8f4f6bd33ee9..ace8d5d30dd2 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -704,6 +704,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
704 }, 704 },
705 { 705 {
706 .callback = intel_no_lvds_dmi_callback, 706 .callback = intel_no_lvds_dmi_callback,
707 .ident = "AOpen i915GMm-HFS",
708 .matches = {
709 DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
710 DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
711 },
712 },
713 {
714 .callback = intel_no_lvds_dmi_callback,
707 .ident = "Aopen i945GTt-VFA", 715 .ident = "Aopen i945GTt-VFA",
708 .matches = { 716 .matches = {
709 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), 717 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index f295a7aaadf9..64fd64443ca6 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <linux/acpi.h> 28#include <linux/acpi.h>
29#include <linux/acpi_io.h>
29#include <acpi/video.h> 30#include <acpi/video.h>
30 31
31#include "drmP.h" 32#include "drmP.h"
@@ -476,7 +477,7 @@ int intel_opregion_setup(struct drm_device *dev)
476 return -ENOTSUPP; 477 return -ENOTSUPP;
477 } 478 }
478 479
479 base = ioremap(asls, OPREGION_SIZE); 480 base = acpi_os_ioremap(asls, OPREGION_SIZE);
480 if (!base) 481 if (!base)
481 return -ENOMEM; 482 return -ENOMEM;
482 483
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e00d200df3db..c65992df458d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -278,6 +278,6 @@ void intel_panel_setup_backlight(struct drm_device *dev)
278{ 278{
279 struct drm_i915_private *dev_priv = dev->dev_private; 279 struct drm_i915_private *dev_priv = dev->dev_private;
280 280
281 dev_priv->backlight_level = intel_panel_get_max_backlight(dev); 281 dev_priv->backlight_level = intel_panel_get_backlight(dev);
282 dev_priv->backlight_enabled = dev_priv->backlight_level != 0; 282 dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
283} 283}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 03e337072517..6218fa97aa1e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,14 @@
34#include "i915_trace.h" 34#include "i915_trace.h"
35#include "intel_drv.h" 35#include "intel_drv.h"
36 36
37static inline int ring_space(struct intel_ring_buffer *ring)
38{
39 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
40 if (space < 0)
41 space += ring->size;
42 return space;
43}
44
37static u32 i915_gem_get_seqno(struct drm_device *dev) 45static u32 i915_gem_get_seqno(struct drm_device *dev)
38{ 46{
39 drm_i915_private_t *dev_priv = dev->dev_private; 47 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -204,11 +212,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
204 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 212 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
205 i915_kernel_lost_context(ring->dev); 213 i915_kernel_lost_context(ring->dev);
206 else { 214 else {
207 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 215 ring->head = I915_READ_HEAD(ring);
208 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 216 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
209 ring->space = ring->head - (ring->tail + 8); 217 ring->space = ring_space(ring);
210 if (ring->space < 0)
211 ring->space += ring->size;
212 } 218 }
213 219
214 return 0; 220 return 0;
@@ -921,7 +927,7 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
921 } 927 }
922 928
923 ring->tail = 0; 929 ring->tail = 0;
924 ring->space = ring->head - 8; 930 ring->space = ring_space(ring);
925 931
926 return 0; 932 return 0;
927} 933}
@@ -933,20 +939,22 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
933 unsigned long end; 939 unsigned long end;
934 u32 head; 940 u32 head;
935 941
942 /* If the reported head position has wrapped or hasn't advanced,
943 * fallback to the slow and accurate path.
944 */
945 head = intel_read_status_page(ring, 4);
946 if (head > ring->head) {
947 ring->head = head;
948 ring->space = ring_space(ring);
949 if (ring->space >= n)
950 return 0;
951 }
952
936 trace_i915_ring_wait_begin (dev); 953 trace_i915_ring_wait_begin (dev);
937 end = jiffies + 3 * HZ; 954 end = jiffies + 3 * HZ;
938 do { 955 do {
939 /* If the reported head position has wrapped or hasn't advanced, 956 ring->head = I915_READ_HEAD(ring);
940 * fallback to the slow and accurate path. 957 ring->space = ring_space(ring);
941 */
942 head = intel_read_status_page(ring, 4);
943 if (head < ring->actual_head)
944 head = I915_READ_HEAD(ring);
945 ring->actual_head = head;
946 ring->head = head & HEAD_ADDR;
947 ring->space = ring->head - (ring->tail + 8);
948 if (ring->space < 0)
949 ring->space += ring->size;
950 if (ring->space >= n) { 958 if (ring->space >= n) {
951 trace_i915_ring_wait_end(dev); 959 trace_i915_ring_wait_end(dev);
952 return 0; 960 return 0;
@@ -1291,6 +1299,48 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1291 return intel_init_ring_buffer(dev, ring); 1299 return intel_init_ring_buffer(dev, ring);
1292} 1300}
1293 1301
1302int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1303{
1304 drm_i915_private_t *dev_priv = dev->dev_private;
1305 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1306
1307 *ring = render_ring;
1308 if (INTEL_INFO(dev)->gen >= 6) {
1309 ring->add_request = gen6_add_request;
1310 ring->irq_get = gen6_render_ring_get_irq;
1311 ring->irq_put = gen6_render_ring_put_irq;
1312 } else if (IS_GEN5(dev)) {
1313 ring->add_request = pc_render_add_request;
1314 ring->get_seqno = pc_render_get_seqno;
1315 }
1316
1317 ring->dev = dev;
1318 INIT_LIST_HEAD(&ring->active_list);
1319 INIT_LIST_HEAD(&ring->request_list);
1320 INIT_LIST_HEAD(&ring->gpu_write_list);
1321
1322 ring->size = size;
1323 ring->effective_size = ring->size;
1324 if (IS_I830(ring->dev))
1325 ring->effective_size -= 128;
1326
1327 ring->map.offset = start;
1328 ring->map.size = size;
1329 ring->map.type = 0;
1330 ring->map.flags = 0;
1331 ring->map.mtrr = 0;
1332
1333 drm_core_ioremap_wc(&ring->map, dev);
1334 if (ring->map.handle == NULL) {
1335 DRM_ERROR("can not ioremap virtual address for"
1336 " ring buffer\n");
1337 return -ENOMEM;
1338 }
1339
1340 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1341 return 0;
1342}
1343
1294int intel_init_bsd_ring_buffer(struct drm_device *dev) 1344int intel_init_bsd_ring_buffer(struct drm_device *dev)
1295{ 1345{
1296 drm_i915_private_t *dev_priv = dev->dev_private; 1346 drm_i915_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index be9087e4c9be..6d6fde85a636 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -47,7 +47,6 @@ struct intel_ring_buffer {
47 struct drm_device *dev; 47 struct drm_device *dev;
48 struct drm_i915_gem_object *obj; 48 struct drm_i915_gem_object *obj;
49 49
50 u32 actual_head;
51 u32 head; 50 u32 head;
52 u32 tail; 51 u32 tail;
53 int space; 52 int space;
@@ -167,4 +166,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
167u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); 166u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
168void intel_ring_setup_status_page(struct intel_ring_buffer *ring); 167void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
169 168
169/* DRI warts */
170int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
171
170#endif /* _INTEL_RINGBUFFER_H_ */ 172#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 45cd37652a37..6a09c1413d60 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -473,20 +473,6 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
473 return false; 473 return false;
474 } 474 }
475 475
476 i = 3;
477 while (status == SDVO_CMD_STATUS_PENDING && i--) {
478 if (!intel_sdvo_read_byte(intel_sdvo,
479 SDVO_I2C_CMD_STATUS,
480 &status))
481 return false;
482 }
483 if (status != SDVO_CMD_STATUS_SUCCESS) {
484 DRM_DEBUG_KMS("command returns response %s [%d]\n",
485 status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
486 status);
487 return false;
488 }
489
490 return true; 476 return true;
491} 477}
492 478
@@ -497,6 +483,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
497 u8 status; 483 u8 status;
498 int i; 484 int i;
499 485
486 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
487
500 /* 488 /*
501 * The documentation states that all commands will be 489 * The documentation states that all commands will be
502 * processed within 15µs, and that we need only poll 490 * processed within 15µs, and that we need only poll
@@ -505,14 +493,19 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
505 * 493 *
506 * Check 5 times in case the hardware failed to read the docs. 494 * Check 5 times in case the hardware failed to read the docs.
507 */ 495 */
508 do { 496 if (!intel_sdvo_read_byte(intel_sdvo,
497 SDVO_I2C_CMD_STATUS,
498 &status))
499 goto log_fail;
500
501 while (status == SDVO_CMD_STATUS_PENDING && retry--) {
502 udelay(15);
509 if (!intel_sdvo_read_byte(intel_sdvo, 503 if (!intel_sdvo_read_byte(intel_sdvo,
510 SDVO_I2C_CMD_STATUS, 504 SDVO_I2C_CMD_STATUS,
511 &status)) 505 &status))
512 return false; 506 goto log_fail;
513 } while (status == SDVO_CMD_STATUS_PENDING && --retry); 507 }
514 508
515 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
516 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) 509 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
517 DRM_LOG_KMS("(%s)", cmd_status_names[status]); 510 DRM_LOG_KMS("(%s)", cmd_status_names[status]);
518 else 511 else
@@ -533,7 +526,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
533 return true; 526 return true;
534 527
535log_fail: 528log_fail:
536 DRM_LOG_KMS("\n"); 529 DRM_LOG_KMS("... failed\n");
537 return false; 530 return false;
538} 531}
539 532
@@ -550,6 +543,7 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
550static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, 543static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
551 u8 ddc_bus) 544 u8 ddc_bus)
552{ 545{
546 /* This must be the immediately preceding write before the i2c xfer */
553 return intel_sdvo_write_cmd(intel_sdvo, 547 return intel_sdvo_write_cmd(intel_sdvo,
554 SDVO_CMD_SET_CONTROL_BUS_SWITCH, 548 SDVO_CMD_SET_CONTROL_BUS_SWITCH,
555 &ddc_bus, 1); 549 &ddc_bus, 1);
@@ -557,7 +551,10 @@ static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
557 551
558static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) 552static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
559{ 553{
560 return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len); 554 if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
555 return false;
556
557 return intel_sdvo_read_response(intel_sdvo, NULL, 0);
561} 558}
562 559
563static bool 560static bool
@@ -859,18 +856,21 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
859 856
860 intel_dip_infoframe_csum(&avi_if); 857 intel_dip_infoframe_csum(&avi_if);
861 858
862 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, 859 if (!intel_sdvo_set_value(intel_sdvo,
860 SDVO_CMD_SET_HBUF_INDEX,
863 set_buf_index, 2)) 861 set_buf_index, 2))
864 return false; 862 return false;
865 863
866 for (i = 0; i < sizeof(avi_if); i += 8) { 864 for (i = 0; i < sizeof(avi_if); i += 8) {
867 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, 865 if (!intel_sdvo_set_value(intel_sdvo,
866 SDVO_CMD_SET_HBUF_DATA,
868 data, 8)) 867 data, 8))
869 return false; 868 return false;
870 data++; 869 data++;
871 } 870 }
872 871
873 return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, 872 return intel_sdvo_set_value(intel_sdvo,
873 SDVO_CMD_SET_HBUF_TXRATE,
874 &tx_rate, 1); 874 &tx_rate, 1);
875} 875}
876 876
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 21d6c29c2d21..de70959b9ed5 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -8,7 +8,7 @@ config DRM_NOUVEAU
8 select FB_CFB_COPYAREA 8 select FB_CFB_COPYAREA
9 select FB_CFB_IMAGEBLIT 9 select FB_CFB_IMAGEBLIT
10 select FB 10 select FB
11 select FRAMEBUFFER_CONSOLE if !EMBEDDED 11 select FRAMEBUFFER_CONSOLE if !EXPERT
12 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT 12 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
13 select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT 13 select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
14 help 14 help
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 2aef5cd3acf5..49e5e99917e2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6310,6 +6310,9 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
6310static bool 6310static bool
6311apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) 6311apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
6312{ 6312{
6313 struct drm_nouveau_private *dev_priv = dev->dev_private;
6314 struct dcb_table *dcb = &dev_priv->vbios.dcb;
6315
6313 /* Dell Precision M6300 6316 /* Dell Precision M6300
6314 * DCB entry 2: 02025312 00000010 6317 * DCB entry 2: 02025312 00000010
6315 * DCB entry 3: 02026312 00000020 6318 * DCB entry 3: 02026312 00000020
@@ -6327,6 +6330,18 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
6327 return false; 6330 return false;
6328 } 6331 }
6329 6332
6333 /* GeForce3 Ti 200
6334 *
6335 * DCB reports an LVDS output that should be TMDS:
6336 * DCB entry 1: f2005014 ffffffff
6337 */
6338 if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
6339 if (*conn == 0xf2005014 && *conf == 0xffffffff) {
6340 fabricate_dcb_output(dcb, OUTPUT_TMDS, 1, 1, 1);
6341 return false;
6342 }
6343 }
6344
6330 return true; 6345 return true;
6331} 6346}
6332 6347
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 13bb672a16f4..f658a04eecf9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -234,9 +234,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
234 pci_set_power_state(pdev, PCI_D3hot); 234 pci_set_power_state(pdev, PCI_D3hot);
235 } 235 }
236 236
237 acquire_console_sem(); 237 console_lock();
238 nouveau_fbcon_set_suspend(dev, 1); 238 nouveau_fbcon_set_suspend(dev, 1);
239 release_console_sem(); 239 console_unlock();
240 nouveau_fbcon_restore_accel(dev); 240 nouveau_fbcon_restore_accel(dev);
241 return 0; 241 return 0;
242 242
@@ -359,9 +359,9 @@ nouveau_pci_resume(struct pci_dev *pdev)
359 nv_crtc->lut.depth = 0; 359 nv_crtc->lut.depth = 0;
360 } 360 }
361 361
362 acquire_console_sem(); 362 console_lock();
363 nouveau_fbcon_set_suspend(dev, 0); 363 nouveau_fbcon_set_suspend(dev, 0);
364 release_console_sem(); 364 console_unlock();
365 365
366 nouveau_fbcon_zfill_all(dev); 366 nouveau_fbcon_zfill_all(dev);
367 367
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 46e32573b3a3..9821fcacc3d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -160,6 +160,7 @@ enum nouveau_flags {
160#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) 160#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
161#define NVOBJ_FLAG_ZERO_FREE (1 << 2) 161#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
162#define NVOBJ_FLAG_VM (1 << 3) 162#define NVOBJ_FLAG_VM (1 << 3)
163#define NVOBJ_FLAG_VM_USER (1 << 4)
163 164
164#define NVOBJ_CINST_GLOBAL 0xdeadbeef 165#define NVOBJ_CINST_GLOBAL 0xdeadbeef
165 166
@@ -847,9 +848,6 @@ extern void nv10_mem_put_tile_region(struct drm_device *dev,
847 struct nouveau_fence *fence); 848 struct nouveau_fence *fence);
848extern const struct ttm_mem_type_manager_func nouveau_vram_manager; 849extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
849 850
850/* nvc0_vram.c */
851extern const struct ttm_mem_type_manager_func nvc0_vram_manager;
852
853/* nouveau_notifier.c */ 851/* nouveau_notifier.c */
854extern int nouveau_notifier_init_channel(struct nouveau_channel *); 852extern int nouveau_notifier_init_channel(struct nouveau_channel *);
855extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); 853extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
@@ -1576,6 +1574,20 @@ nv_match_device(struct drm_device *dev, unsigned device,
1576 dev->pdev->subsystem_device == sub_device; 1574 dev->pdev->subsystem_device == sub_device;
1577} 1575}
1578 1576
1577/* returns 1 if device is one of the nv4x using the 0x4497 object class,
1578 * helpful to determine a number of other hardware features
1579 */
1580static inline int
1581nv44_graph_class(struct drm_device *dev)
1582{
1583 struct drm_nouveau_private *dev_priv = dev->dev_private;
1584
1585 if ((dev_priv->chipset & 0xf0) == 0x60)
1586 return 1;
1587
1588 return !(0x0baf & (1 << (dev_priv->chipset & 0x0f)));
1589}
1590
1579/* memory type/access flags, do not match hardware values */ 1591/* memory type/access flags, do not match hardware values */
1580#define NV_MEM_ACCESS_RO 1 1592#define NV_MEM_ACCESS_RO 1
1581#define NV_MEM_ACCESS_WO 2 1593#define NV_MEM_ACCESS_WO 2
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index a26d04740c88..60769d2f9a66 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -352,13 +352,14 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
352 FBINFO_HWACCEL_IMAGEBLIT; 352 FBINFO_HWACCEL_IMAGEBLIT;
353 info->flags |= FBINFO_CAN_FORCE_OUTPUT; 353 info->flags |= FBINFO_CAN_FORCE_OUTPUT;
354 info->fbops = &nouveau_fbcon_sw_ops; 354 info->fbops = &nouveau_fbcon_sw_ops;
355 info->fix.smem_start = dev->mode_config.fb_base + 355 info->fix.smem_start = nvbo->bo.mem.bus.base +
356 (nvbo->bo.mem.start << PAGE_SHIFT); 356 nvbo->bo.mem.bus.offset;
357 info->fix.smem_len = size; 357 info->fix.smem_len = size;
358 358
359 info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); 359 info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
360 info->screen_size = size; 360 info->screen_size = size;
361 361
362 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
362 drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); 363 drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
363 364
364 /* Set aperture base/size for vesafb takeover */ 365 /* Set aperture base/size for vesafb takeover */
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 69044eb104bb..26347b7cd872 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -742,30 +742,24 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
742{ 742{
743 struct nouveau_mm *mm = man->priv; 743 struct nouveau_mm *mm = man->priv;
744 struct nouveau_mm_node *r; 744 struct nouveau_mm_node *r;
745 u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {}; 745 u32 total = 0, free = 0;
746 int i;
747 746
748 mutex_lock(&mm->mutex); 747 mutex_lock(&mm->mutex);
749 list_for_each_entry(r, &mm->nodes, nl_entry) { 748 list_for_each_entry(r, &mm->nodes, nl_entry) {
750 printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n", 749 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
751 prefix, r->free ? "free" : "used", r->type, 750 prefix, r->type, ((u64)r->offset << 12),
752 ((u64)r->offset << 12),
753 (((u64)r->offset + r->length) << 12)); 751 (((u64)r->offset + r->length) << 12));
752
754 total += r->length; 753 total += r->length;
755 ttotal[r->type] += r->length; 754 if (!r->type)
756 if (r->free) 755 free += r->length;
757 tfree[r->type] += r->length;
758 else
759 tused[r->type] += r->length;
760 } 756 }
761 mutex_unlock(&mm->mutex); 757 mutex_unlock(&mm->mutex);
762 758
763 printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12); 759 printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
764 for (i = 0; i < 3; i++) { 760 prefix, (u64)total << 12, (u64)free << 12);
765 printk(KERN_DEBUG "%s type %d: 0x%010llx, " 761 printk(KERN_DEBUG "%s block: 0x%08x\n",
766 "used 0x%010llx, free 0x%010llx\n", prefix, 762 prefix, mm->block_size << 12);
767 i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
768 }
769} 763}
770 764
771const struct ttm_mem_type_manager_func nouveau_vram_manager = { 765const struct ttm_mem_type_manager_func nouveau_vram_manager = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index cdbb11eb701b..8844b50c3e54 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -48,175 +48,76 @@ region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
48 48
49 b->offset = a->offset; 49 b->offset = a->offset;
50 b->length = size; 50 b->length = size;
51 b->free = a->free;
52 b->type = a->type; 51 b->type = a->type;
53 a->offset += size; 52 a->offset += size;
54 a->length -= size; 53 a->length -= size;
55 list_add_tail(&b->nl_entry, &a->nl_entry); 54 list_add_tail(&b->nl_entry, &a->nl_entry);
56 if (b->free) 55 if (b->type == 0)
57 list_add_tail(&b->fl_entry, &a->fl_entry); 56 list_add_tail(&b->fl_entry, &a->fl_entry);
58 return b; 57 return b;
59} 58}
60 59
61static struct nouveau_mm_node * 60#define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \
62nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this) 61 list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
63{
64 struct nouveau_mm_node *prev, *next;
65
66 /* try to merge with free adjacent entries of same type */
67 prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
68 if (this->nl_entry.prev != &rmm->nodes) {
69 if (prev->free && prev->type == this->type) {
70 prev->length += this->length;
71 region_put(rmm, this);
72 this = prev;
73 }
74 }
75
76 next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
77 if (this->nl_entry.next != &rmm->nodes) {
78 if (next->free && next->type == this->type) {
79 next->offset = this->offset;
80 next->length += this->length;
81 region_put(rmm, this);
82 this = next;
83 }
84 }
85
86 return this;
87}
88 62
89void 63void
90nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this) 64nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
91{ 65{
92 u32 block_s, block_l; 66 struct nouveau_mm_node *prev = node(this, prev);
67 struct nouveau_mm_node *next = node(this, next);
93 68
94 this->free = true;
95 list_add(&this->fl_entry, &rmm->free); 69 list_add(&this->fl_entry, &rmm->free);
96 this = nouveau_mm_merge(rmm, this); 70 this->type = 0;
97
98 /* any entirely free blocks now? we'll want to remove typing
99 * on them now so they can be use for any memory allocation
100 */
101 block_s = roundup(this->offset, rmm->block_size);
102 if (block_s + rmm->block_size > this->offset + this->length)
103 return;
104 71
105 /* split off any still-typed region at the start */ 72 if (prev && prev->type == 0) {
106 if (block_s != this->offset) { 73 prev->length += this->length;
107 if (!region_split(rmm, this, block_s - this->offset)) 74 region_put(rmm, this);
108 return; 75 this = prev;
109 } 76 }
110 77
111 /* split off the soon-to-be-untyped block(s) */ 78 if (next && next->type == 0) {
112 block_l = rounddown(this->length, rmm->block_size); 79 next->offset = this->offset;
113 if (block_l != this->length) { 80 next->length += this->length;
114 this = region_split(rmm, this, block_l); 81 region_put(rmm, this);
115 if (!this)
116 return;
117 } 82 }
118
119 /* mark as having no type, and retry merge with any adjacent
120 * untyped blocks
121 */
122 this->type = 0;
123 nouveau_mm_merge(rmm, this);
124} 83}
125 84
126int 85int
127nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, 86nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
128 u32 align, struct nouveau_mm_node **pnode) 87 u32 align, struct nouveau_mm_node **pnode)
129{ 88{
130 struct nouveau_mm_node *this, *tmp, *next; 89 struct nouveau_mm_node *prev, *this, *next;
131 u32 splitoff, avail, alloc; 90 u32 min = size_nc ? size_nc : size;
132 91 u32 align_mask = align - 1;
133 list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) { 92 u32 splitoff;
134 next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); 93 u32 s, e;
135 if (this->nl_entry.next == &rmm->nodes) 94
136 next = NULL; 95 list_for_each_entry(this, &rmm->free, fl_entry) {
137 96 e = this->offset + this->length;
138 /* skip wrongly typed blocks */ 97 s = this->offset;
139 if (this->type && this->type != type) 98
99 prev = node(this, prev);
100 if (prev && prev->type != type)
101 s = roundup(s, rmm->block_size);
102
103 next = node(this, next);
104 if (next && next->type != type)
105 e = rounddown(e, rmm->block_size);
106
107 s = (s + align_mask) & ~align_mask;
108 e &= ~align_mask;
109 if (s > e || e - s < min)
140 continue; 110 continue;
141 111
142 /* account for alignment */ 112 splitoff = s - this->offset;
143 splitoff = this->offset & (align - 1); 113 if (splitoff && !region_split(rmm, this, splitoff))
144 if (splitoff) 114 return -ENOMEM;
145 splitoff = align - splitoff;
146
147 if (this->length <= splitoff)
148 continue;
149
150 /* determine total memory available from this, and
151 * the next block (if appropriate)
152 */
153 avail = this->length;
154 if (next && next->free && (!next->type || next->type == type))
155 avail += next->length;
156
157 avail -= splitoff;
158
159 /* determine allocation size */
160 if (size_nc) {
161 alloc = min(avail, size);
162 alloc = rounddown(alloc, size_nc);
163 if (alloc == 0)
164 continue;
165 } else {
166 alloc = size;
167 if (avail < alloc)
168 continue;
169 }
170
171 /* untyped block, split off a chunk that's a multiple
172 * of block_size and type it
173 */
174 if (!this->type) {
175 u32 block = roundup(alloc + splitoff, rmm->block_size);
176 if (this->length < block)
177 continue;
178
179 this = region_split(rmm, this, block);
180 if (!this)
181 return -ENOMEM;
182
183 this->type = type;
184 }
185
186 /* stealing memory from adjacent block */
187 if (alloc > this->length) {
188 u32 amount = alloc - (this->length - splitoff);
189
190 if (!next->type) {
191 amount = roundup(amount, rmm->block_size);
192
193 next = region_split(rmm, next, amount);
194 if (!next)
195 return -ENOMEM;
196
197 next->type = type;
198 }
199
200 this->length += amount;
201 next->offset += amount;
202 next->length -= amount;
203 if (!next->length) {
204 list_del(&next->nl_entry);
205 list_del(&next->fl_entry);
206 kfree(next);
207 }
208 }
209
210 if (splitoff) {
211 if (!region_split(rmm, this, splitoff))
212 return -ENOMEM;
213 }
214 115
215 this = region_split(rmm, this, alloc); 116 this = region_split(rmm, this, min(size, e - s));
216 if (this == NULL) 117 if (!this)
217 return -ENOMEM; 118 return -ENOMEM;
218 119
219 this->free = false; 120 this->type = type;
220 list_del(&this->fl_entry); 121 list_del(&this->fl_entry);
221 *pnode = this; 122 *pnode = this;
222 return 0; 123 return 0;
@@ -234,7 +135,6 @@ nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
234 heap = kzalloc(sizeof(*heap), GFP_KERNEL); 135 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
235 if (!heap) 136 if (!heap)
236 return -ENOMEM; 137 return -ENOMEM;
237 heap->free = true;
238 heap->offset = roundup(offset, block); 138 heap->offset = roundup(offset, block);
239 heap->length = rounddown(offset + length, block) - heap->offset; 139 heap->length = rounddown(offset + length, block) - heap->offset;
240 140
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index af3844933036..798eaf39691c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -30,9 +30,7 @@ struct nouveau_mm_node {
30 struct list_head fl_entry; 30 struct list_head fl_entry;
31 struct list_head rl_entry; 31 struct list_head rl_entry;
32 32
33 bool free; 33 u8 type;
34 int type;
35
36 u32 offset; 34 u32 offset;
37 u32 length; 35 u32 length;
38}; 36};
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index fb846a3fef15..f05c0cddfeca 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -443,7 +443,7 @@ nouveau_hwmon_fini(struct drm_device *dev)
443 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 443 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
444 444
445 if (pm->hwmon) { 445 if (pm->hwmon) {
446 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); 446 sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
447 hwmon_device_unregister(pm->hwmon); 447 hwmon_device_unregister(pm->hwmon);
448 } 448 }
449#endif 449#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 7ecc4adc1e45..8d9968e1cba8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -265,8 +265,8 @@ nouveau_temp_probe_i2c(struct drm_device *dev)
265 struct i2c_board_info info[] = { 265 struct i2c_board_info info[] = {
266 { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 266 { I2C_BOARD_INFO("w83l785ts", 0x2d) },
267 { I2C_BOARD_INFO("w83781d", 0x2d) }, 267 { I2C_BOARD_INFO("w83781d", 0x2d) },
268 { I2C_BOARD_INFO("f75375", 0x2e) },
269 { I2C_BOARD_INFO("adt7473", 0x2e) }, 268 { I2C_BOARD_INFO("adt7473", 0x2e) },
269 { I2C_BOARD_INFO("f75375", 0x2e) },
270 { I2C_BOARD_INFO("lm99", 0x4c) }, 270 { I2C_BOARD_INFO("lm99", 0x4c) },
271 { } 271 { }
272 }; 272 };
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 19ef92a0375a..8870d72388c8 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -451,8 +451,7 @@ nv40_graph_register(struct drm_device *dev)
451 NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ 451 NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
452 452
453 /* curie */ 453 /* curie */
454 if (dev_priv->chipset >= 0x60 || 454 if (nv44_graph_class(dev))
455 0x00005450 & (1 << (dev_priv->chipset & 0x0f)))
456 NVOBJ_CLASS(dev, 0x4497, GR); 455 NVOBJ_CLASS(dev, 0x4497, GR);
457 else 456 else
458 NVOBJ_CLASS(dev, 0x4097, GR); 457 NVOBJ_CLASS(dev, 0x4097, GR);
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index ce585093264e..f70447d131d7 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -118,17 +118,6 @@
118 */ 118 */
119 119
120static int 120static int
121nv40_graph_4097(struct drm_device *dev)
122{
123 struct drm_nouveau_private *dev_priv = dev->dev_private;
124
125 if ((dev_priv->chipset & 0xf0) == 0x60)
126 return 0;
127
128 return !!(0x0baf & (1 << dev_priv->chipset));
129}
130
131static int
132nv40_graph_vs_count(struct drm_device *dev) 121nv40_graph_vs_count(struct drm_device *dev)
133{ 122{
134 struct drm_nouveau_private *dev_priv = dev->dev_private; 123 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -219,7 +208,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
219 gr_def(ctx, 0x4009dc, 0x80000000); 208 gr_def(ctx, 0x4009dc, 0x80000000);
220 } else { 209 } else {
221 cp_ctx(ctx, 0x400840, 20); 210 cp_ctx(ctx, 0x400840, 20);
222 if (!nv40_graph_4097(ctx->dev)) { 211 if (nv44_graph_class(ctx->dev)) {
223 for (i = 0; i < 8; i++) 212 for (i = 0; i < 8; i++)
224 gr_def(ctx, 0x400860 + (i * 4), 0x00000001); 213 gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
225 } 214 }
@@ -228,7 +217,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
228 gr_def(ctx, 0x400888, 0x00000040); 217 gr_def(ctx, 0x400888, 0x00000040);
229 cp_ctx(ctx, 0x400894, 11); 218 cp_ctx(ctx, 0x400894, 11);
230 gr_def(ctx, 0x400894, 0x00000040); 219 gr_def(ctx, 0x400894, 0x00000040);
231 if (nv40_graph_4097(ctx->dev)) { 220 if (!nv44_graph_class(ctx->dev)) {
232 for (i = 0; i < 8; i++) 221 for (i = 0; i < 8; i++)
233 gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000); 222 gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
234 } 223 }
@@ -546,7 +535,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
546static void 535static void
547nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx) 536nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
548{ 537{
549 int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084; 538 int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684;
550 539
551 cp_out (ctx, 0x300000); 540 cp_out (ctx, 0x300000);
552 cp_lsr (ctx, len - 4); 541 cp_lsr (ctx, len - 4);
@@ -582,11 +571,11 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
582 } else { 571 } else {
583 b0_offset = 0x1d40/4; /* 2200 */ 572 b0_offset = 0x1d40/4; /* 2200 */
584 b1_offset = 0x3f40/4; /* 0b00 : 0a40 */ 573 b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
585 vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4; 574 vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4;
586 } 575 }
587 576
588 cp_lsr(ctx, vs_len * vs_nr + 0x300/4); 577 cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
589 cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029); 578 cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041);
590 579
591 offset = ctx->ctxvals_pos; 580 offset = ctx->ctxvals_pos;
592 ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len)); 581 ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
index e4e72c12ab6a..03c0d4c3f355 100644
--- a/drivers/gpu/drm/nouveau/nv40_mc.c
+++ b/drivers/gpu/drm/nouveau/nv40_mc.c
@@ -6,27 +6,17 @@
6int 6int
7nv40_mc_init(struct drm_device *dev) 7nv40_mc_init(struct drm_device *dev)
8{ 8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10 uint32_t tmp;
11
12 /* Power up everything, resetting each individual unit will 9 /* Power up everything, resetting each individual unit will
13 * be done later if needed. 10 * be done later if needed.
14 */ 11 */
15 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF); 12 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
16 13
17 switch (dev_priv->chipset) { 14 if (nv44_graph_class(dev)) {
18 case 0x44: 15 u32 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
19 case 0x46: /* G72 */
20 case 0x4e:
21 case 0x4c: /* C51_G7X */
22 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
23 nv_wr32(dev, NV40_PMC_1700, tmp); 16 nv_wr32(dev, NV40_PMC_1700, tmp);
24 nv_wr32(dev, NV40_PMC_1704, 0); 17 nv_wr32(dev, NV40_PMC_1704, 0);
25 nv_wr32(dev, NV40_PMC_1708, 0); 18 nv_wr32(dev, NV40_PMC_1708, 0);
26 nv_wr32(dev, NV40_PMC_170C, tmp); 19 nv_wr32(dev, NV40_PMC_170C, tmp);
27 break;
28 default:
29 break;
30 } 20 }
31 21
32 return 0; 22 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index 14e24e906ee8..0ea090f4244a 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -283,8 +283,7 @@ nv50_evo_create(struct drm_device *dev)
283 nv50_evo_channel_del(&dev_priv->evo); 283 nv50_evo_channel_del(&dev_priv->evo);
284 return ret; 284 return ret;
285 } 285 }
286 } else 286 } else {
287 if (dev_priv->chipset != 0x50) {
288 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19, 287 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
289 0, 0xffffffff, 0x00010000); 288 0, 0xffffffff, 0x00010000);
290 if (ret) { 289 if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 2d7ea75a09d4..37e21d2be95b 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -256,6 +256,7 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
256 struct drm_device *dev = chan->dev; 256 struct drm_device *dev = chan->dev;
257 struct drm_nouveau_private *dev_priv = dev->dev_private; 257 struct drm_nouveau_private *dev_priv = dev->dev_private;
258 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 258 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
259 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
259 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 260 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
260 unsigned long flags; 261 unsigned long flags;
261 262
@@ -265,6 +266,7 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
265 return; 266 return;
266 267
267 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 268 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
269 pfifo->reassign(dev, false);
268 pgraph->fifo_access(dev, false); 270 pgraph->fifo_access(dev, false);
269 271
270 if (pgraph->channel(dev) == chan) 272 if (pgraph->channel(dev) == chan)
@@ -275,6 +277,7 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
275 dev_priv->engine.instmem.flush(dev); 277 dev_priv->engine.instmem.flush(dev);
276 278
277 pgraph->fifo_access(dev, true); 279 pgraph->fifo_access(dev, true);
280 pfifo->reassign(dev, true);
278 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 281 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
279 282
280 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); 283 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 2e1b1cd19a4b..ea0041810ae3 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -332,8 +332,11 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
332 gpuobj->vinst = node->vram->offset; 332 gpuobj->vinst = node->vram->offset;
333 333
334 if (gpuobj->flags & NVOBJ_FLAG_VM) { 334 if (gpuobj->flags & NVOBJ_FLAG_VM) {
335 ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, 335 u32 flags = NV_MEM_ACCESS_RW;
336 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS, 336 if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
337 flags |= NV_MEM_ACCESS_SYS;
338
339 ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags,
337 &node->chan_vma); 340 &node->chan_vma);
338 if (ret) { 341 if (ret) {
339 vram->put(dev, &node->vram); 342 vram->put(dev, &node->vram);
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 38e523e10995..459ff08241e5 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -45,11 +45,6 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
45 } 45 }
46 46
47 if (phys & 1) { 47 if (phys & 1) {
48 if (dev_priv->vram_sys_base) {
49 phys += dev_priv->vram_sys_base;
50 phys |= 0x30;
51 }
52
53 if (coverage <= 32 * 1024 * 1024) 48 if (coverage <= 32 * 1024 * 1024)
54 phys |= 0x60; 49 phys |= 0x60;
55 else if (coverage <= 64 * 1024 * 1024) 50 else if (coverage <= 64 * 1024 * 1024)
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 5feacd5d5fa4..eb18a7e89f5b 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -31,6 +31,7 @@
31#include "nvc0_graph.h" 31#include "nvc0_graph.h"
32 32
33static void nvc0_graph_isr(struct drm_device *); 33static void nvc0_graph_isr(struct drm_device *);
34static void nvc0_runk140_isr(struct drm_device *);
34static int nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan); 35static int nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan);
35 36
36void 37void
@@ -105,7 +106,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
105 if (ret) 106 if (ret)
106 return ret; 107 return ret;
107 108
108 ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, NVOBJ_FLAG_VM, 109 ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096,
110 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
109 &grch->unk418810); 111 &grch->unk418810);
110 if (ret) 112 if (ret)
111 return ret; 113 return ret;
@@ -280,6 +282,7 @@ nvc0_graph_destroy(struct drm_device *dev)
280 return; 282 return;
281 283
282 nouveau_irq_unregister(dev, 12); 284 nouveau_irq_unregister(dev, 12);
285 nouveau_irq_unregister(dev, 25);
283 286
284 nouveau_gpuobj_ref(NULL, &priv->unk4188b8); 287 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
285 nouveau_gpuobj_ref(NULL, &priv->unk4188b4); 288 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
@@ -389,6 +392,7 @@ nvc0_graph_create(struct drm_device *dev)
389 } 392 }
390 393
391 nouveau_irq_register(dev, 12, nvc0_graph_isr); 394 nouveau_irq_register(dev, 12, nvc0_graph_isr);
395 nouveau_irq_register(dev, 25, nvc0_runk140_isr);
392 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */ 396 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
393 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */ 397 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
394 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */ 398 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
@@ -511,8 +515,8 @@ nvc0_graph_init_gpc_1(struct drm_device *dev)
511 nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000); 515 nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000);
512 nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000); 516 nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000);
513 nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000); 517 nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000);
514 nv_wr32(dev, TP_UNIT(gpc, tp, 0xe44), 0x001ffffe); 518 nv_wr32(dev, TP_UNIT(gpc, tp, 0x644), 0x001ffffe);
515 nv_wr32(dev, TP_UNIT(gpc, tp, 0xe4c), 0x0000000f); 519 nv_wr32(dev, TP_UNIT(gpc, tp, 0x64c), 0x0000000f);
516 } 520 }
517 nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff); 521 nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
518 nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff); 522 nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
@@ -776,3 +780,19 @@ nvc0_graph_isr(struct drm_device *dev)
776 780
777 nv_wr32(dev, 0x400500, 0x00010001); 781 nv_wr32(dev, 0x400500, 0x00010001);
778} 782}
783
784static void
785nvc0_runk140_isr(struct drm_device *dev)
786{
787 u32 units = nv_rd32(dev, 0x00017c) & 0x1f;
788
789 while (units) {
790 u32 unit = ffs(units) - 1;
791 u32 reg = 0x140000 + unit * 0x2000;
792 u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0);
793 u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0);
794
795 NV_INFO(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1);
796 units &= ~(1 << unit);
797 }
798}
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index b9e68b2d30aa..f880ff776db8 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -1830,7 +1830,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1830 1830
1831 for (tp = 0, id = 0; tp < 4; tp++) { 1831 for (tp = 0, id = 0; tp < 4; tp++) {
1832 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 1832 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
1833 if (tp <= priv->tp_nr[gpc]) { 1833 if (tp < priv->tp_nr[gpc]) {
1834 nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id); 1834 nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id);
1835 nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id); 1835 nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id);
1836 nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id); 1836 nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id);
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index 4b9251bb0ff4..e4e83c2caf5b 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -48,8 +48,8 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
48 phys >>= 8; 48 phys >>= 8;
49 49
50 phys |= 0x00000001; /* present */ 50 phys |= 0x00000001; /* present */
51// if (vma->access & NV_MEM_ACCESS_SYS) 51 if (vma->access & NV_MEM_ACCESS_SYS)
52// phys |= 0x00000002; 52 phys |= 0x00000002;
53 53
54 phys |= ((u64)target << 32); 54 phys |= ((u64)target << 32);
55 phys |= ((u64)memtype << 36); 55 phys |= ((u64)memtype << 36);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b0ab185b86f6..b1537000a104 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -555,6 +555,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
555 dp_clock = dig_connector->dp_clock; 555 dp_clock = dig_connector->dp_clock;
556 } 556 }
557 } 557 }
558/* this might work properly with the new pll algo */
558#if 0 /* doesn't work properly on some laptops */ 559#if 0 /* doesn't work properly on some laptops */
559 /* use recommended ref_div for ss */ 560 /* use recommended ref_div for ss */
560 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 561 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
@@ -572,6 +573,11 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
572 adjusted_clock = mode->clock * 2; 573 adjusted_clock = mode->clock * 2;
573 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) 574 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
574 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; 575 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
576 /* rv515 needs more testing with this option */
577 if (rdev->family != CHIP_RV515) {
578 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
579 pll->flags |= RADEON_PLL_IS_LCD;
580 }
575 } else { 581 } else {
576 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 582 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
577 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; 583 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -606,14 +612,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
606 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 612 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
607 args.v1.ucTransmitterID = radeon_encoder->encoder_id; 613 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
608 args.v1.ucEncodeMode = encoder_mode; 614 args.v1.ucEncodeMode = encoder_mode;
609 if (encoder_mode == ATOM_ENCODER_MODE_DP) { 615 if (ss_enabled)
610 if (ss_enabled)
611 args.v1.ucConfig |=
612 ADJUST_DISPLAY_CONFIG_SS_ENABLE;
613 } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
614 args.v1.ucConfig |= 616 args.v1.ucConfig |=
615 ADJUST_DISPLAY_CONFIG_SS_ENABLE; 617 ADJUST_DISPLAY_CONFIG_SS_ENABLE;
616 }
617 618
618 atom_execute_table(rdev->mode_info.atom_context, 619 atom_execute_table(rdev->mode_info.atom_context,
619 index, (uint32_t *)&args); 620 index, (uint32_t *)&args);
@@ -624,12 +625,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
624 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; 625 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
625 args.v3.sInput.ucEncodeMode = encoder_mode; 626 args.v3.sInput.ucEncodeMode = encoder_mode;
626 args.v3.sInput.ucDispPllConfig = 0; 627 args.v3.sInput.ucDispPllConfig = 0;
628 if (ss_enabled)
629 args.v3.sInput.ucDispPllConfig |=
630 DISPPLL_CONFIG_SS_ENABLE;
627 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 631 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
628 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 632 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
629 if (encoder_mode == ATOM_ENCODER_MODE_DP) { 633 if (encoder_mode == ATOM_ENCODER_MODE_DP) {
630 if (ss_enabled)
631 args.v3.sInput.ucDispPllConfig |=
632 DISPPLL_CONFIG_SS_ENABLE;
633 args.v3.sInput.ucDispPllConfig |= 634 args.v3.sInput.ucDispPllConfig |=
634 DISPPLL_CONFIG_COHERENT_MODE; 635 DISPPLL_CONFIG_COHERENT_MODE;
635 /* 16200 or 27000 */ 636 /* 16200 or 27000 */
@@ -649,18 +650,11 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
649 } 650 }
650 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 651 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
651 if (encoder_mode == ATOM_ENCODER_MODE_DP) { 652 if (encoder_mode == ATOM_ENCODER_MODE_DP) {
652 if (ss_enabled)
653 args.v3.sInput.ucDispPllConfig |=
654 DISPPLL_CONFIG_SS_ENABLE;
655 args.v3.sInput.ucDispPllConfig |= 653 args.v3.sInput.ucDispPllConfig |=
656 DISPPLL_CONFIG_COHERENT_MODE; 654 DISPPLL_CONFIG_COHERENT_MODE;
657 /* 16200 or 27000 */ 655 /* 16200 or 27000 */
658 args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); 656 args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
659 } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { 657 } else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) {
660 if (ss_enabled)
661 args.v3.sInput.ucDispPllConfig |=
662 DISPPLL_CONFIG_SS_ENABLE;
663 } else {
664 if (mode->clock > 165000) 658 if (mode->clock > 165000)
665 args.v3.sInput.ucDispPllConfig |= 659 args.v3.sInput.ucDispPllConfig |=
666 DISPPLL_CONFIG_DUAL_LINK; 660 DISPPLL_CONFIG_DUAL_LINK;
@@ -963,8 +957,16 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
963 /* adjust pixel clock as needed */ 957 /* adjust pixel clock as needed */
964 adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); 958 adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
965 959
966 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 960 /* rv515 seems happier with the old algo */
967 &ref_div, &post_div); 961 if (rdev->family == CHIP_RV515)
962 radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
963 &ref_div, &post_div);
964 else if (ASIC_IS_AVIVO(rdev))
965 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
966 &ref_div, &post_div);
967 else
968 radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
969 &ref_div, &post_div);
968 970
969 atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss); 971 atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
970 972
@@ -1006,6 +1008,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
1006 struct radeon_bo *rbo; 1008 struct radeon_bo *rbo;
1007 uint64_t fb_location; 1009 uint64_t fb_location;
1008 uint32_t fb_format, fb_pitch_pixels, tiling_flags; 1010 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
1011 u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
1009 int r; 1012 int r;
1010 1013
1011 /* no fb bound */ 1014 /* no fb bound */
@@ -1057,11 +1060,17 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
1057 case 16: 1060 case 16:
1058 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | 1061 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1059 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565)); 1062 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
1063#ifdef __BIG_ENDIAN
1064 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1065#endif
1060 break; 1066 break;
1061 case 24: 1067 case 24:
1062 case 32: 1068 case 32:
1063 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | 1069 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
1064 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888)); 1070 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
1071#ifdef __BIG_ENDIAN
1072 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
1073#endif
1065 break; 1074 break;
1066 default: 1075 default:
1067 DRM_ERROR("Unsupported screen depth %d\n", 1076 DRM_ERROR("Unsupported screen depth %d\n",
@@ -1106,6 +1115,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
1106 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 1115 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
1107 (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); 1116 (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
1108 WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); 1117 WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
1118 WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
1109 1119
1110 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); 1120 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
1111 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); 1121 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
@@ -1162,6 +1172,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1162 struct drm_framebuffer *target_fb; 1172 struct drm_framebuffer *target_fb;
1163 uint64_t fb_location; 1173 uint64_t fb_location;
1164 uint32_t fb_format, fb_pitch_pixels, tiling_flags; 1174 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
1175 u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
1165 int r; 1176 int r;
1166 1177
1167 /* no fb bound */ 1178 /* no fb bound */
@@ -1215,12 +1226,18 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1215 fb_format = 1226 fb_format =
1216 AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | 1227 AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
1217 AVIVO_D1GRPH_CONTROL_16BPP_RGB565; 1228 AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
1229#ifdef __BIG_ENDIAN
1230 fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
1231#endif
1218 break; 1232 break;
1219 case 24: 1233 case 24:
1220 case 32: 1234 case 32:
1221 fb_format = 1235 fb_format =
1222 AVIVO_D1GRPH_CONTROL_DEPTH_32BPP | 1236 AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
1223 AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888; 1237 AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
1238#ifdef __BIG_ENDIAN
1239 fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
1240#endif
1224 break; 1241 break;
1225 default: 1242 default:
1226 DRM_ERROR("Unsupported screen depth %d\n", 1243 DRM_ERROR("Unsupported screen depth %d\n",
@@ -1260,6 +1277,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1260 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + 1277 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
1261 radeon_crtc->crtc_offset, (u32) fb_location); 1278 radeon_crtc->crtc_offset, (u32) fb_location);
1262 WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); 1279 WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
1280 if (rdev->family >= CHIP_R600)
1281 WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
1263 1282
1264 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); 1283 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
1265 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); 1284 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 4e7778d44b8d..695de9a38506 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -187,9 +187,9 @@ static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
187int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock) 187int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
188{ 188{
189 int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock); 189 int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
190 int bw = dp_lanes_for_mode_clock(dpcd, mode_clock); 190 int dp_clock = dp_link_clock_for_mode_clock(dpcd, mode_clock);
191 191
192 if ((lanes == 0) || (bw == 0)) 192 if ((lanes == 0) || (dp_clock == 0))
193 return MODE_CLOCK_HIGH; 193 return MODE_CLOCK_HIGH;
194 194
195 return MODE_OK; 195 return MODE_OK;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7fe8ebdcdc0e..ffdc8332b76e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -97,26 +97,29 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
97} 97}
98 98
99/* get temperature in millidegrees */ 99/* get temperature in millidegrees */
100u32 evergreen_get_temp(struct radeon_device *rdev) 100int evergreen_get_temp(struct radeon_device *rdev)
101{ 101{
102 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> 102 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
103 ASIC_T_SHIFT; 103 ASIC_T_SHIFT;
104 u32 actual_temp = 0; 104 u32 actual_temp = 0;
105 105
106 if ((temp >> 10) & 1) 106 if (temp & 0x400)
107 actual_temp = 0; 107 actual_temp = -256;
108 else if ((temp >> 9) & 1) 108 else if (temp & 0x200)
109 actual_temp = 255; 109 actual_temp = 255;
110 else 110 else if (temp & 0x100) {
111 actual_temp = (temp >> 1) & 0xff; 111 actual_temp = temp & 0x1ff;
112 actual_temp |= ~0x1ff;
113 } else
114 actual_temp = temp & 0xff;
112 115
113 return actual_temp * 1000; 116 return (actual_temp * 1000) / 2;
114} 117}
115 118
116u32 sumo_get_temp(struct radeon_device *rdev) 119int sumo_get_temp(struct radeon_device *rdev)
117{ 120{
118 u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff; 121 u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
119 u32 actual_temp = (temp >> 1) & 0xff; 122 int actual_temp = temp - 49;
120 123
121 return actual_temp * 1000; 124 return actual_temp * 1000;
122} 125}
@@ -1182,6 +1185,18 @@ static void evergreen_mc_program(struct radeon_device *rdev)
1182/* 1185/*
1183 * CP. 1186 * CP.
1184 */ 1187 */
1188void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1189{
1190 /* set to DX10/11 mode */
1191 radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
1192 radeon_ring_write(rdev, 1);
1193 /* FIXME: implement */
1194 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1195 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
1196 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
1197 radeon_ring_write(rdev, ib->length_dw);
1198}
1199
1185 1200
1186static int evergreen_cp_load_microcode(struct radeon_device *rdev) 1201static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1187{ 1202{
@@ -1233,7 +1248,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
1233 cp_me = 0xff; 1248 cp_me = 0xff;
1234 WREG32(CP_ME_CNTL, cp_me); 1249 WREG32(CP_ME_CNTL, cp_me);
1235 1250
1236 r = radeon_ring_lock(rdev, evergreen_default_size + 15); 1251 r = radeon_ring_lock(rdev, evergreen_default_size + 19);
1237 if (r) { 1252 if (r) {
1238 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1253 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1239 return r; 1254 return r;
@@ -1266,6 +1281,11 @@ static int evergreen_cp_start(struct radeon_device *rdev)
1266 radeon_ring_write(rdev, 0xffffffff); 1281 radeon_ring_write(rdev, 0xffffffff);
1267 radeon_ring_write(rdev, 0xffffffff); 1282 radeon_ring_write(rdev, 0xffffffff);
1268 1283
1284 radeon_ring_write(rdev, 0xc0026900);
1285 radeon_ring_write(rdev, 0x00000316);
1286 radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1287 radeon_ring_write(rdev, 0x00000010); /* */
1288
1269 radeon_ring_unlock_commit(rdev); 1289 radeon_ring_unlock_commit(rdev);
1270 1290
1271 return 0; 1291 return 0;
@@ -2072,6 +2092,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2072 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); 2092 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
2073 2093
2074 WREG32(VGT_GS_VERTEX_REUSE, 16); 2094 WREG32(VGT_GS_VERTEX_REUSE, 16);
2095 WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
2075 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 2096 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2076 2097
2077 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); 2098 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
@@ -2201,6 +2222,9 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
2201 struct evergreen_mc_save save; 2222 struct evergreen_mc_save save;
2202 u32 grbm_reset = 0; 2223 u32 grbm_reset = 0;
2203 2224
2225 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2226 return 0;
2227
2204 dev_info(rdev->dev, "GPU softreset \n"); 2228 dev_info(rdev->dev, "GPU softreset \n");
2205 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 2229 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2206 RREG32(GRBM_STATUS)); 2230 RREG32(GRBM_STATUS));
@@ -3002,31 +3026,6 @@ int evergreen_copy_blit(struct radeon_device *rdev,
3002 return 0; 3026 return 0;
3003} 3027}
3004 3028
3005static bool evergreen_card_posted(struct radeon_device *rdev)
3006{
3007 u32 reg;
3008
3009 /* first check CRTCs */
3010 if (rdev->flags & RADEON_IS_IGP)
3011 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
3012 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
3013 else
3014 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
3015 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
3016 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
3017 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
3018 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
3019 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
3020 if (reg & EVERGREEN_CRTC_MASTER_EN)
3021 return true;
3022
3023 /* then check MEM_SIZE, in case the crtcs are off */
3024 if (RREG32(CONFIG_MEMSIZE))
3025 return true;
3026
3027 return false;
3028}
3029
3030/* Plan is to move initialization in that function and use 3029/* Plan is to move initialization in that function and use
3031 * helper function so that radeon_device_init pretty much 3030 * helper function so that radeon_device_init pretty much
3032 * do nothing more than calling asic specific function. This 3031 * do nothing more than calling asic specific function. This
@@ -3063,7 +3062,7 @@ int evergreen_init(struct radeon_device *rdev)
3063 if (radeon_asic_reset(rdev)) 3062 if (radeon_asic_reset(rdev))
3064 dev_warn(rdev->dev, "GPU reset failed !\n"); 3063 dev_warn(rdev->dev, "GPU reset failed !\n");
3065 /* Post card if necessary */ 3064 /* Post card if necessary */
3066 if (!evergreen_card_posted(rdev)) { 3065 if (!radeon_card_posted(rdev)) {
3067 if (!rdev->bios) { 3066 if (!rdev->bios) {
3068 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 3067 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3069 return -EINVAL; 3068 return -EINVAL;
@@ -3158,6 +3157,9 @@ static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
3158{ 3157{
3159 u32 link_width_cntl, speed_cntl; 3158 u32 link_width_cntl, speed_cntl;
3160 3159
3160 if (radeon_pcie_gen2 == 0)
3161 return;
3162
3161 if (rdev->flags & RADEON_IS_IGP) 3163 if (rdev->flags & RADEON_IS_IGP)
3162 return; 3164 return;
3163 3165
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index b758dc7f2f2c..a1ba4b3053d0 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -232,7 +232,7 @@ draw_auto(struct radeon_device *rdev)
232 232
233} 233}
234 234
235/* emits 30 */ 235/* emits 36 */
236static void 236static void
237set_default_state(struct radeon_device *rdev) 237set_default_state(struct radeon_device *rdev)
238{ 238{
@@ -245,6 +245,8 @@ set_default_state(struct radeon_device *rdev)
245 int num_hs_threads, num_ls_threads; 245 int num_hs_threads, num_ls_threads;
246 int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; 246 int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
247 int num_hs_stack_entries, num_ls_stack_entries; 247 int num_hs_stack_entries, num_ls_stack_entries;
248 u64 gpu_addr;
249 int dwords;
248 250
249 switch (rdev->family) { 251 switch (rdev->family) {
250 case CHIP_CEDAR: 252 case CHIP_CEDAR:
@@ -497,6 +499,18 @@ set_default_state(struct radeon_device *rdev)
497 radeon_ring_write(rdev, 0x00000000); 499 radeon_ring_write(rdev, 0x00000000);
498 radeon_ring_write(rdev, 0x00000000); 500 radeon_ring_write(rdev, 0x00000000);
499 501
502 /* set to DX10/11 mode */
503 radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
504 radeon_ring_write(rdev, 1);
505
506 /* emit an IB pointing at default state */
507 dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
508 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
509 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
510 radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
511 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
512 radeon_ring_write(rdev, dwords);
513
500} 514}
501 515
502static inline uint32_t i2f(uint32_t input) 516static inline uint32_t i2f(uint32_t input)
@@ -527,8 +541,10 @@ static inline uint32_t i2f(uint32_t input)
527int evergreen_blit_init(struct radeon_device *rdev) 541int evergreen_blit_init(struct radeon_device *rdev)
528{ 542{
529 u32 obj_size; 543 u32 obj_size;
530 int r; 544 int r, dwords;
531 void *ptr; 545 void *ptr;
546 u32 packet2s[16];
547 int num_packet2s = 0;
532 548
533 /* pin copy shader into vram if already initialized */ 549 /* pin copy shader into vram if already initialized */
534 if (rdev->r600_blit.shader_obj) 550 if (rdev->r600_blit.shader_obj)
@@ -536,8 +552,17 @@ int evergreen_blit_init(struct radeon_device *rdev)
536 552
537 mutex_init(&rdev->r600_blit.mutex); 553 mutex_init(&rdev->r600_blit.mutex);
538 rdev->r600_blit.state_offset = 0; 554 rdev->r600_blit.state_offset = 0;
539 rdev->r600_blit.state_len = 0; 555
540 obj_size = 0; 556 rdev->r600_blit.state_len = evergreen_default_size;
557
558 dwords = rdev->r600_blit.state_len;
559 while (dwords & 0xf) {
560 packet2s[num_packet2s++] = PACKET2(0);
561 dwords++;
562 }
563
564 obj_size = dwords * 4;
565 obj_size = ALIGN(obj_size, 256);
541 566
542 rdev->r600_blit.vs_offset = obj_size; 567 rdev->r600_blit.vs_offset = obj_size;
543 obj_size += evergreen_vs_size * 4; 568 obj_size += evergreen_vs_size * 4;
@@ -567,6 +592,12 @@ int evergreen_blit_init(struct radeon_device *rdev)
567 return r; 592 return r;
568 } 593 }
569 594
595 memcpy_toio(ptr + rdev->r600_blit.state_offset,
596 evergreen_default_state, rdev->r600_blit.state_len * 4);
597
598 if (num_packet2s)
599 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
600 packet2s, num_packet2s * 4);
570 memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4); 601 memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4);
571 memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4); 602 memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4);
572 radeon_bo_kunmap(rdev->r600_blit.shader_obj); 603 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
@@ -652,7 +683,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
652 /* calculate number of loops correctly */ 683 /* calculate number of loops correctly */
653 ring_size = num_loops * dwords_per_loop; 684 ring_size = num_loops * dwords_per_loop;
654 /* set default + shaders */ 685 /* set default + shaders */
655 ring_size += 46; /* shaders + def state */ 686 ring_size += 52; /* shaders + def state */
656 ring_size += 10; /* fence emit for VB IB */ 687 ring_size += 10; /* fence emit for VB IB */
657 ring_size += 5; /* done copy */ 688 ring_size += 5; /* done copy */
658 ring_size += 10; /* fence emit for done copy */ 689 ring_size += 10; /* fence emit for done copy */
@@ -660,7 +691,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
660 if (r) 691 if (r)
661 return r; 692 return r;
662 693
663 set_default_state(rdev); /* 30 */ 694 set_default_state(rdev); /* 36 */
664 set_shaders(rdev); /* 16 */ 695 set_shaders(rdev); /* 16 */
665 return 0; 696 return 0;
666} 697}
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 36d32d83d866..afec1aca2a73 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -240,6 +240,7 @@
240#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) 240#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
241#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) 241#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
242#define PA_SC_LINE_STIPPLE 0x28A0C 242#define PA_SC_LINE_STIPPLE 0x28A0C
243#define PA_SU_LINE_STIPPLE_VALUE 0x8A60
243#define PA_SC_LINE_STIPPLE_STATE 0x8B10 244#define PA_SC_LINE_STIPPLE_STATE 0x8B10
244 245
245#define SCRATCH_REG0 0x8500 246#define SCRATCH_REG0 0x8500
@@ -652,6 +653,7 @@
652#define PACKET3_DISPATCH_DIRECT 0x15 653#define PACKET3_DISPATCH_DIRECT 0x15
653#define PACKET3_DISPATCH_INDIRECT 0x16 654#define PACKET3_DISPATCH_INDIRECT 0x16
654#define PACKET3_INDIRECT_BUFFER_END 0x17 655#define PACKET3_INDIRECT_BUFFER_END 0x17
656#define PACKET3_MODE_CONTROL 0x18
655#define PACKET3_SET_PREDICATION 0x20 657#define PACKET3_SET_PREDICATION 0x20
656#define PACKET3_REG_RMW 0x21 658#define PACKET3_REG_RMW 0x21
657#define PACKET3_COND_EXEC 0x22 659#define PACKET3_COND_EXEC 0x22
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index f637595b14e1..5f15820efe12 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1031,8 +1031,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1031 WREG32(RADEON_CP_CSQ_MODE, 1031 WREG32(RADEON_CP_CSQ_MODE,
1032 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 1032 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
1033 REG_SET(RADEON_INDIRECT1_START, indirect1_start)); 1033 REG_SET(RADEON_INDIRECT1_START, indirect1_start));
1034 WREG32(0x718, 0); 1034 WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
1035 WREG32(0x744, 0x00004D4D); 1035 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
1036 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); 1036 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
1037 radeon_ring_start(rdev); 1037 radeon_ring_start(rdev);
1038 r = radeon_ring_test(rdev); 1038 r = radeon_ring_test(rdev);
@@ -2086,12 +2086,13 @@ int r100_asic_reset(struct radeon_device *rdev)
2086{ 2086{
2087 struct r100_mc_save save; 2087 struct r100_mc_save save;
2088 u32 status, tmp; 2088 u32 status, tmp;
2089 int ret = 0;
2089 2090
2090 r100_mc_stop(rdev, &save);
2091 status = RREG32(R_000E40_RBBM_STATUS); 2091 status = RREG32(R_000E40_RBBM_STATUS);
2092 if (!G_000E40_GUI_ACTIVE(status)) { 2092 if (!G_000E40_GUI_ACTIVE(status)) {
2093 return 0; 2093 return 0;
2094 } 2094 }
2095 r100_mc_stop(rdev, &save);
2095 status = RREG32(R_000E40_RBBM_STATUS); 2096 status = RREG32(R_000E40_RBBM_STATUS);
2096 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2097 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2097 /* stop CP */ 2098 /* stop CP */
@@ -2131,11 +2132,11 @@ int r100_asic_reset(struct radeon_device *rdev)
2131 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { 2132 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
2132 dev_err(rdev->dev, "failed to reset GPU\n"); 2133 dev_err(rdev->dev, "failed to reset GPU\n");
2133 rdev->gpu_lockup = true; 2134 rdev->gpu_lockup = true;
2134 return -1; 2135 ret = -1;
2135 } 2136 } else
2137 dev_info(rdev->dev, "GPU reset succeed\n");
2136 r100_mc_resume(rdev, &save); 2138 r100_mc_resume(rdev, &save);
2137 dev_info(rdev->dev, "GPU reset succeed\n"); 2139 return ret;
2138 return 0;
2139} 2140}
2140 2141
2141void r100_set_common_regs(struct radeon_device *rdev) 2142void r100_set_common_regs(struct radeon_device *rdev)
@@ -2346,10 +2347,10 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
2346 2347
2347 temp = RREG32(RADEON_CONFIG_CNTL); 2348 temp = RREG32(RADEON_CONFIG_CNTL);
2348 if (state == false) { 2349 if (state == false) {
2349 temp &= ~(1<<8); 2350 temp &= ~RADEON_CFG_VGA_RAM_EN;
2350 temp |= (1<<9); 2351 temp |= RADEON_CFG_VGA_IO_DIS;
2351 } else { 2352 } else {
2352 temp &= ~(1<<9); 2353 temp &= ~RADEON_CFG_VGA_IO_DIS;
2353 } 2354 }
2354 WREG32(RADEON_CONFIG_CNTL, temp); 2355 WREG32(RADEON_CONFIG_CNTL, temp);
2355} 2356}
@@ -3521,7 +3522,7 @@ int r100_ring_test(struct radeon_device *rdev)
3521 if (i < rdev->usec_timeout) { 3522 if (i < rdev->usec_timeout) {
3522 DRM_INFO("ring test succeeded in %d usecs\n", i); 3523 DRM_INFO("ring test succeeded in %d usecs\n", i);
3523 } else { 3524 } else {
3524 DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n", 3525 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
3525 scratch, tmp); 3526 scratch, tmp);
3526 r = -EINVAL; 3527 r = -EINVAL;
3527 } 3528 }
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index fae5e709f270..55fe5ba7def3 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -69,6 +69,9 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
69 mb(); 69 mb();
70} 70}
71 71
72#define R300_PTE_WRITEABLE (1 << 2)
73#define R300_PTE_READABLE (1 << 3)
74
72int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 75int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
73{ 76{
74 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; 77 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
@@ -78,7 +81,7 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
78 } 81 }
79 addr = (lower_32_bits(addr) >> 8) | 82 addr = (lower_32_bits(addr) >> 8) |
80 ((upper_32_bits(addr) & 0xff) << 24) | 83 ((upper_32_bits(addr) & 0xff) << 24) |
81 0xc; 84 R300_PTE_WRITEABLE | R300_PTE_READABLE;
82 /* on x86 we want this to be CPU endian, on powerpc 85 /* on x86 we want this to be CPU endian, on powerpc
83 * on powerpc without HW swappers, it'll get swapped on way 86 * on powerpc without HW swappers, it'll get swapped on way
84 * into VRAM - so no need for cpu_to_le32 on VRAM tables */ 87 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
@@ -135,7 +138,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
135 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); 138 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
136 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); 139 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
137 /* Clear error */ 140 /* Clear error */
138 WREG32_PCIE(0x18, 0); 141 WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0);
139 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 142 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
140 tmp |= RADEON_PCIE_TX_GART_EN; 143 tmp |= RADEON_PCIE_TX_GART_EN;
141 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 144 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
@@ -405,12 +408,13 @@ int r300_asic_reset(struct radeon_device *rdev)
405{ 408{
406 struct r100_mc_save save; 409 struct r100_mc_save save;
407 u32 status, tmp; 410 u32 status, tmp;
411 int ret = 0;
408 412
409 r100_mc_stop(rdev, &save);
410 status = RREG32(R_000E40_RBBM_STATUS); 413 status = RREG32(R_000E40_RBBM_STATUS);
411 if (!G_000E40_GUI_ACTIVE(status)) { 414 if (!G_000E40_GUI_ACTIVE(status)) {
412 return 0; 415 return 0;
413 } 416 }
417 r100_mc_stop(rdev, &save);
414 status = RREG32(R_000E40_RBBM_STATUS); 418 status = RREG32(R_000E40_RBBM_STATUS);
415 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 419 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
416 /* stop CP */ 420 /* stop CP */
@@ -451,11 +455,11 @@ int r300_asic_reset(struct radeon_device *rdev)
451 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 455 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
452 dev_err(rdev->dev, "failed to reset GPU\n"); 456 dev_err(rdev->dev, "failed to reset GPU\n");
453 rdev->gpu_lockup = true; 457 rdev->gpu_lockup = true;
454 return -1; 458 ret = -1;
455 } 459 } else
460 dev_info(rdev->dev, "GPU reset succeed\n");
456 r100_mc_resume(rdev, &save); 461 r100_mc_resume(rdev, &save);
457 dev_info(rdev->dev, "GPU reset succeed\n"); 462 return ret;
458 return 0;
459} 463}
460 464
461/* 465/*
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c387346f93a9..0b59ed7c7d2c 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -96,7 +96,7 @@ void r420_pipes_init(struct radeon_device *rdev)
96 "programming pipes. Bad things might happen.\n"); 96 "programming pipes. Bad things might happen.\n");
97 } 97 }
98 /* get max number of pipes */ 98 /* get max number of pipes */
99 gb_pipe_select = RREG32(0x402C); 99 gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
100 num_pipes = ((gb_pipe_select >> 12) & 3) + 1; 100 num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
101 101
102 /* SE chips have 1 pipe */ 102 /* SE chips have 1 pipe */
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 3c8677f9e385..2ce80d976568 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -79,8 +79,8 @@ static void r520_gpu_init(struct radeon_device *rdev)
79 WREG32(0x4128, 0xFF); 79 WREG32(0x4128, 0xFF);
80 } 80 }
81 r420_pipes_init(rdev); 81 r420_pipes_init(rdev);
82 gb_pipe_select = RREG32(0x402C); 82 gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
83 tmp = RREG32(0x170C); 83 tmp = RREG32(R300_DST_PIPE_CONFIG);
84 pipe_select_current = (tmp >> 2) & 3; 84 pipe_select_current = (tmp >> 2) & 3;
85 tmp = (1 << pipe_select_current) | 85 tmp = (1 << pipe_select_current) |
86 (((gb_pipe_select >> 8) & 0xF) << 4); 86 (((gb_pipe_select >> 8) & 0xF) << 4);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6b50716267c0..650672a0f5ad 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -97,12 +97,16 @@ void r600_irq_disable(struct radeon_device *rdev);
97static void r600_pcie_gen2_enable(struct radeon_device *rdev); 97static void r600_pcie_gen2_enable(struct radeon_device *rdev);
98 98
99/* get temperature in millidegrees */ 99/* get temperature in millidegrees */
100u32 rv6xx_get_temp(struct radeon_device *rdev) 100int rv6xx_get_temp(struct radeon_device *rdev)
101{ 101{
102 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> 102 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
103 ASIC_T_SHIFT; 103 ASIC_T_SHIFT;
104 int actual_temp = temp & 0xff;
104 105
105 return temp * 1000; 106 if (temp & 0x100)
107 actual_temp -= 256;
108
109 return actual_temp * 1000;
106} 110}
107 111
108void r600_pm_get_dynpm_state(struct radeon_device *rdev) 112void r600_pm_get_dynpm_state(struct radeon_device *rdev)
@@ -1287,6 +1291,9 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
1287 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); 1291 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1288 u32 tmp; 1292 u32 tmp;
1289 1293
1294 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1295 return 0;
1296
1290 dev_info(rdev->dev, "GPU softreset \n"); 1297 dev_info(rdev->dev, "GPU softreset \n");
1291 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", 1298 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1292 RREG32(R_008010_GRBM_STATUS)); 1299 RREG32(R_008010_GRBM_STATUS));
@@ -2358,24 +2365,6 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2358 /* FIXME: implement */ 2365 /* FIXME: implement */
2359} 2366}
2360 2367
2361
2362bool r600_card_posted(struct radeon_device *rdev)
2363{
2364 uint32_t reg;
2365
2366 /* first check CRTCs */
2367 reg = RREG32(D1CRTC_CONTROL) |
2368 RREG32(D2CRTC_CONTROL);
2369 if (reg & CRTC_EN)
2370 return true;
2371
2372 /* then check MEM_SIZE, in case the crtcs are off */
2373 if (RREG32(CONFIG_MEMSIZE))
2374 return true;
2375
2376 return false;
2377}
2378
2379int r600_startup(struct radeon_device *rdev) 2368int r600_startup(struct radeon_device *rdev)
2380{ 2369{
2381 int r; 2370 int r;
@@ -2536,7 +2525,7 @@ int r600_init(struct radeon_device *rdev)
2536 if (r) 2525 if (r)
2537 return r; 2526 return r;
2538 /* Post card if necessary */ 2527 /* Post card if necessary */
2539 if (!r600_card_posted(rdev)) { 2528 if (!radeon_card_posted(rdev)) {
2540 if (!rdev->bios) { 2529 if (!rdev->bios) {
2541 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 2530 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2542 return -EINVAL; 2531 return -EINVAL;
@@ -3658,6 +3647,9 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
3658 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; 3647 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
3659 u16 link_cntl2; 3648 u16 link_cntl2;
3660 3649
3650 if (radeon_pcie_gen2 == 0)
3651 return;
3652
3661 if (rdev->flags & RADEON_IS_IGP) 3653 if (rdev->flags & RADEON_IS_IGP)
3662 return; 3654 return;
3663 3655
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 33cda016b083..f869897c7456 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -81,7 +81,11 @@
81#define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720 81#define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720
82#define R600_LOW_VID_LOWER_GPIO_CNTL 0x724 82#define R600_LOW_VID_LOWER_GPIO_CNTL 0x724
83 83
84 84#define R600_D1GRPH_SWAP_CONTROL 0x610C
85# define R600_D1GRPH_SWAP_ENDIAN_NONE (0 << 0)
86# define R600_D1GRPH_SWAP_ENDIAN_16BIT (1 << 0)
87# define R600_D1GRPH_SWAP_ENDIAN_32BIT (2 << 0)
88# define R600_D1GRPH_SWAP_ENDIAN_64BIT (3 << 0)
85 89
86#define R600_HDP_NONSURFACE_BASE 0x2c04 90#define R600_HDP_NONSURFACE_BASE 0x2c04
87 91
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index e9486630a467..56c48b67ef3d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -92,6 +92,7 @@ extern int radeon_tv;
92extern int radeon_audio; 92extern int radeon_audio;
93extern int radeon_disp_priority; 93extern int radeon_disp_priority;
94extern int radeon_hw_i2c; 94extern int radeon_hw_i2c;
95extern int radeon_pcie_gen2;
95 96
96/* 97/*
97 * Copy from radeon_drv.h so we don't have to include both and have conflicting 98 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -178,10 +179,10 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev);
178void radeon_atombios_get_power_modes(struct radeon_device *rdev); 179void radeon_atombios_get_power_modes(struct radeon_device *rdev);
179void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); 180void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
180void rs690_pm_info(struct radeon_device *rdev); 181void rs690_pm_info(struct radeon_device *rdev);
181extern u32 rv6xx_get_temp(struct radeon_device *rdev); 182extern int rv6xx_get_temp(struct radeon_device *rdev);
182extern u32 rv770_get_temp(struct radeon_device *rdev); 183extern int rv770_get_temp(struct radeon_device *rdev);
183extern u32 evergreen_get_temp(struct radeon_device *rdev); 184extern int evergreen_get_temp(struct radeon_device *rdev);
184extern u32 sumo_get_temp(struct radeon_device *rdev); 185extern int sumo_get_temp(struct radeon_device *rdev);
185 186
186/* 187/*
187 * Fences. 188 * Fences.
@@ -811,8 +812,7 @@ struct radeon_pm {
811 fixed20_12 sclk; 812 fixed20_12 sclk;
812 fixed20_12 mclk; 813 fixed20_12 mclk;
813 fixed20_12 needed_bandwidth; 814 fixed20_12 needed_bandwidth;
814 /* XXX: use a define for num power modes */ 815 struct radeon_power_state *power_state;
815 struct radeon_power_state power_state[8];
816 /* number of valid power states */ 816 /* number of valid power states */
817 int num_power_states; 817 int num_power_states;
818 int current_power_state_index; 818 int current_power_state_index;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 3a1b16186224..e75d63b8e21d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -759,7 +759,7 @@ static struct radeon_asic evergreen_asic = {
759 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, 759 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
760 .gart_set_page = &rs600_gart_set_page, 760 .gart_set_page = &rs600_gart_set_page,
761 .ring_test = &r600_ring_test, 761 .ring_test = &r600_ring_test,
762 .ring_ib_execute = &r600_ring_ib_execute, 762 .ring_ib_execute = &evergreen_ring_ib_execute,
763 .irq_set = &evergreen_irq_set, 763 .irq_set = &evergreen_irq_set,
764 .irq_process = &evergreen_irq_process, 764 .irq_process = &evergreen_irq_process,
765 .get_vblank_counter = &evergreen_get_vblank_counter, 765 .get_vblank_counter = &evergreen_get_vblank_counter,
@@ -805,7 +805,7 @@ static struct radeon_asic sumo_asic = {
805 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, 805 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
806 .gart_set_page = &rs600_gart_set_page, 806 .gart_set_page = &rs600_gart_set_page,
807 .ring_test = &r600_ring_test, 807 .ring_test = &r600_ring_test,
808 .ring_ib_execute = &r600_ring_ib_execute, 808 .ring_ib_execute = &evergreen_ring_ib_execute,
809 .irq_set = &evergreen_irq_set, 809 .irq_set = &evergreen_irq_set,
810 .irq_process = &evergreen_irq_process, 810 .irq_process = &evergreen_irq_process,
811 .get_vblank_counter = &evergreen_get_vblank_counter, 811 .get_vblank_counter = &evergreen_get_vblank_counter,
@@ -848,7 +848,7 @@ static struct radeon_asic btc_asic = {
848 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, 848 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
849 .gart_set_page = &rs600_gart_set_page, 849 .gart_set_page = &rs600_gart_set_page,
850 .ring_test = &r600_ring_test, 850 .ring_test = &r600_ring_test,
851 .ring_ib_execute = &r600_ring_ib_execute, 851 .ring_ib_execute = &evergreen_ring_ib_execute,
852 .irq_set = &evergreen_irq_set, 852 .irq_set = &evergreen_irq_set,
853 .irq_process = &evergreen_irq_process, 853 .irq_process = &evergreen_irq_process,
854 .get_vblank_counter = &evergreen_get_vblank_counter, 854 .get_vblank_counter = &evergreen_get_vblank_counter,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index e01f07718539..c59bd98a2029 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -355,6 +355,7 @@ int evergreen_resume(struct radeon_device *rdev);
355bool evergreen_gpu_is_lockup(struct radeon_device *rdev); 355bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
356int evergreen_asic_reset(struct radeon_device *rdev); 356int evergreen_asic_reset(struct radeon_device *rdev);
357void evergreen_bandwidth_update(struct radeon_device *rdev); 357void evergreen_bandwidth_update(struct radeon_device *rdev);
358void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
358int evergreen_copy_blit(struct radeon_device *rdev, 359int evergreen_copy_blit(struct radeon_device *rdev,
359 uint64_t src_offset, uint64_t dst_offset, 360 uint64_t src_offset, uint64_t dst_offset,
360 unsigned num_pages, struct radeon_fence *fence); 361 unsigned num_pages, struct radeon_fence *fence);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 1573202a6418..5c1cc7ad9a15 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -387,15 +387,11 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
387 *line_mux = 0x90; 387 *line_mux = 0x90;
388 } 388 }
389 389
390 /* mac rv630 */ 390 /* mac rv630, rv730, others */
391 if ((dev->pdev->device == 0x9588) && 391 if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
392 (dev->pdev->subsystem_vendor == 0x106b) && 392 (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
393 (dev->pdev->subsystem_device == 0x00a6)) { 393 *connector_type = DRM_MODE_CONNECTOR_9PinDIN;
394 if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) && 394 *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
395 (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
396 *connector_type = DRM_MODE_CONNECTOR_9PinDIN;
397 *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
398 }
399 } 395 }
400 396
401 /* ASUS HD 3600 XT board lists the DVI port as HDMI */ 397 /* ASUS HD 3600 XT board lists the DVI port as HDMI */
@@ -1167,16 +1163,6 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1167 p1pll->pll_out_min = 64800; 1163 p1pll->pll_out_min = 64800;
1168 else 1164 else
1169 p1pll->pll_out_min = 20000; 1165 p1pll->pll_out_min = 20000;
1170 } else if (p1pll->pll_out_min > 64800) {
1171 /* Limiting the pll output range is a good thing generally as
1172 * it limits the number of possible pll combinations for a given
1173 * frequency presumably to the ones that work best on each card.
1174 * However, certain duallink DVI monitors seem to like
1175 * pll combinations that would be limited by this at least on
1176 * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
1177 * family.
1178 */
1179 p1pll->pll_out_min = 64800;
1180 } 1166 }
1181 1167
1182 p1pll->pll_in_min = 1168 p1pll->pll_in_min =
@@ -1991,6 +1977,9 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
1991 num_modes = power_info->info.ucNumOfPowerModeEntries; 1977 num_modes = power_info->info.ucNumOfPowerModeEntries;
1992 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) 1978 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
1993 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; 1979 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
1980 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
1981 if (!rdev->pm.power_state)
1982 return state_index;
1994 /* last mode is usually default, array is low to high */ 1983 /* last mode is usually default, array is low to high */
1995 for (i = 0; i < num_modes; i++) { 1984 for (i = 0; i < num_modes; i++) {
1996 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 1985 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
@@ -2342,6 +2331,10 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
2342 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2331 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2343 2332
2344 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); 2333 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
2334 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
2335 power_info->pplib.ucNumStates, GFP_KERNEL);
2336 if (!rdev->pm.power_state)
2337 return state_index;
2345 /* first mode is usually default, followed by low to high */ 2338 /* first mode is usually default, followed by low to high */
2346 for (i = 0; i < power_info->pplib.ucNumStates; i++) { 2339 for (i = 0; i < power_info->pplib.ucNumStates; i++) {
2347 mode_index = 0; 2340 mode_index = 0;
@@ -2422,6 +2415,10 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2422 non_clock_info_array = (struct NonClockInfoArray *) 2415 non_clock_info_array = (struct NonClockInfoArray *)
2423 (mode_info->atom_context->bios + data_offset + 2416 (mode_info->atom_context->bios + data_offset +
2424 power_info->pplib.usNonClockInfoArrayOffset); 2417 power_info->pplib.usNonClockInfoArrayOffset);
2418 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
2419 state_array->ucNumEntries, GFP_KERNEL);
2420 if (!rdev->pm.power_state)
2421 return state_index;
2425 for (i = 0; i < state_array->ucNumEntries; i++) { 2422 for (i = 0; i < state_array->ucNumEntries; i++) {
2426 mode_index = 0; 2423 mode_index = 0;
2427 power_state = (union pplib_power_state *)&state_array->states[i]; 2424 power_state = (union pplib_power_state *)&state_array->states[i];
@@ -2495,19 +2492,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
2495 break; 2492 break;
2496 } 2493 }
2497 } else { 2494 } else {
2498 /* add the default mode */ 2495 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
2499 rdev->pm.power_state[state_index].type = 2496 if (rdev->pm.power_state) {
2500 POWER_STATE_TYPE_DEFAULT; 2497 /* add the default mode */
2501 rdev->pm.power_state[state_index].num_clock_modes = 1; 2498 rdev->pm.power_state[state_index].type =
2502 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; 2499 POWER_STATE_TYPE_DEFAULT;
2503 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; 2500 rdev->pm.power_state[state_index].num_clock_modes = 1;
2504 rdev->pm.power_state[state_index].default_clock_mode = 2501 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
2505 &rdev->pm.power_state[state_index].clock_info[0]; 2502 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
2506 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 2503 rdev->pm.power_state[state_index].default_clock_mode =
2507 rdev->pm.power_state[state_index].pcie_lanes = 16; 2504 &rdev->pm.power_state[state_index].clock_info[0];
2508 rdev->pm.default_power_state_index = state_index; 2505 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2509 rdev->pm.power_state[state_index].flags = 0; 2506 rdev->pm.power_state[state_index].pcie_lanes = 16;
2510 state_index++; 2507 rdev->pm.default_power_state_index = state_index;
2508 rdev->pm.power_state[state_index].flags = 0;
2509 state_index++;
2510 }
2511 } 2511 }
2512 2512
2513 rdev->pm.num_power_states = state_index; 2513 rdev->pm.num_power_states = state_index;
@@ -2623,7 +2623,7 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
2623 bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; 2623 bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
2624 2624
2625 /* tell the bios not to handle mode switching */ 2625 /* tell the bios not to handle mode switching */
2626 bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE); 2626 bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
2627 2627
2628 if (rdev->family >= CHIP_R600) { 2628 if (rdev->family >= CHIP_R600) {
2629 WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); 2629 WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
@@ -2674,10 +2674,13 @@ void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock)
2674 else 2674 else
2675 bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); 2675 bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
2676 2676
2677 if (lock) 2677 if (lock) {
2678 bios_6_scratch |= ATOM_S6_CRITICAL_STATE; 2678 bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
2679 else 2679 bios_6_scratch &= ~ATOM_S6_ACC_MODE;
2680 } else {
2680 bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE; 2681 bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
2682 bios_6_scratch |= ATOM_S6_ACC_MODE;
2683 }
2681 2684
2682 if (rdev->family >= CHIP_R600) 2685 if (rdev->family >= CHIP_R600)
2683 WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); 2686 WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 591fcae8f224..d27ef74590cd 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2442,6 +2442,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
2442 2442
2443 rdev->pm.default_power_state_index = -1; 2443 rdev->pm.default_power_state_index = -1;
2444 2444
2445 /* allocate 2 power states */
2446 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
2447 if (!rdev->pm.power_state) {
2448 rdev->pm.default_power_state_index = state_index;
2449 rdev->pm.num_power_states = 0;
2450
2451 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
2452 rdev->pm.current_clock_mode_index = 0;
2453 return;
2454 }
2455
2445 if (rdev->flags & RADEON_IS_MOBILITY) { 2456 if (rdev->flags & RADEON_IS_MOBILITY) {
2446 offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); 2457 offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
2447 if (offset) { 2458 if (offset) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 26091d602b84..0d478932b1a9 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -891,9 +891,9 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
891 pci_disable_device(dev->pdev); 891 pci_disable_device(dev->pdev);
892 pci_set_power_state(dev->pdev, PCI_D3hot); 892 pci_set_power_state(dev->pdev, PCI_D3hot);
893 } 893 }
894 acquire_console_sem(); 894 console_lock();
895 radeon_fbdev_set_suspend(rdev, 1); 895 radeon_fbdev_set_suspend(rdev, 1);
896 release_console_sem(); 896 console_unlock();
897 return 0; 897 return 0;
898} 898}
899 899
@@ -905,11 +905,11 @@ int radeon_resume_kms(struct drm_device *dev)
905 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 905 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
906 return 0; 906 return 0;
907 907
908 acquire_console_sem(); 908 console_lock();
909 pci_set_power_state(dev->pdev, PCI_D0); 909 pci_set_power_state(dev->pdev, PCI_D0);
910 pci_restore_state(dev->pdev); 910 pci_restore_state(dev->pdev);
911 if (pci_enable_device(dev->pdev)) { 911 if (pci_enable_device(dev->pdev)) {
912 release_console_sem(); 912 console_unlock();
913 return -1; 913 return -1;
914 } 914 }
915 pci_set_master(dev->pdev); 915 pci_set_master(dev->pdev);
@@ -920,7 +920,7 @@ int radeon_resume_kms(struct drm_device *dev)
920 radeon_restore_bios_scratch_regs(rdev); 920 radeon_restore_bios_scratch_regs(rdev);
921 921
922 radeon_fbdev_set_suspend(rdev, 0); 922 radeon_fbdev_set_suspend(rdev, 0);
923 release_console_sem(); 923 console_unlock();
924 924
925 /* reset hpd state */ 925 /* reset hpd state */
926 radeon_hpd_init(rdev); 926 radeon_hpd_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index d26dabf878d9..2eff98cfd728 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -780,6 +780,115 @@ static int radeon_ddc_dump(struct drm_connector *connector)
780 return ret; 780 return ret;
781} 781}
782 782
783/* avivo */
784static void avivo_get_fb_div(struct radeon_pll *pll,
785 u32 target_clock,
786 u32 post_div,
787 u32 ref_div,
788 u32 *fb_div,
789 u32 *frac_fb_div)
790{
791 u32 tmp = post_div * ref_div;
792
793 tmp *= target_clock;
794 *fb_div = tmp / pll->reference_freq;
795 *frac_fb_div = tmp % pll->reference_freq;
796}
797
798static u32 avivo_get_post_div(struct radeon_pll *pll,
799 u32 target_clock)
800{
801 u32 vco, post_div, tmp;
802
803 if (pll->flags & RADEON_PLL_USE_POST_DIV)
804 return pll->post_div;
805
806 if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
807 if (pll->flags & RADEON_PLL_IS_LCD)
808 vco = pll->lcd_pll_out_min;
809 else
810 vco = pll->pll_out_min;
811 } else {
812 if (pll->flags & RADEON_PLL_IS_LCD)
813 vco = pll->lcd_pll_out_max;
814 else
815 vco = pll->pll_out_max;
816 }
817
818 post_div = vco / target_clock;
819 tmp = vco % target_clock;
820
821 if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
822 if (tmp)
823 post_div++;
824 } else {
825 if (!tmp)
826 post_div--;
827 }
828
829 return post_div;
830}
831
832#define MAX_TOLERANCE 10
833
834void radeon_compute_pll_avivo(struct radeon_pll *pll,
835 u32 freq,
836 u32 *dot_clock_p,
837 u32 *fb_div_p,
838 u32 *frac_fb_div_p,
839 u32 *ref_div_p,
840 u32 *post_div_p)
841{
842 u32 target_clock = freq / 10;
843 u32 post_div = avivo_get_post_div(pll, target_clock);
844 u32 ref_div = pll->min_ref_div;
845 u32 fb_div = 0, frac_fb_div = 0, tmp;
846
847 if (pll->flags & RADEON_PLL_USE_REF_DIV)
848 ref_div = pll->reference_div;
849
850 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
851 avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div);
852 frac_fb_div = (100 * frac_fb_div) / pll->reference_freq;
853 if (frac_fb_div >= 5) {
854 frac_fb_div -= 5;
855 frac_fb_div = frac_fb_div / 10;
856 frac_fb_div++;
857 }
858 if (frac_fb_div >= 10) {
859 fb_div++;
860 frac_fb_div = 0;
861 }
862 } else {
863 while (ref_div <= pll->max_ref_div) {
864 avivo_get_fb_div(pll, target_clock, post_div, ref_div,
865 &fb_div, &frac_fb_div);
866 if (frac_fb_div >= (pll->reference_freq / 2))
867 fb_div++;
868 frac_fb_div = 0;
869 tmp = (pll->reference_freq * fb_div) / (post_div * ref_div);
870 tmp = (tmp * 10000) / target_clock;
871
872 if (tmp > (10000 + MAX_TOLERANCE))
873 ref_div++;
874 else if (tmp >= (10000 - MAX_TOLERANCE))
875 break;
876 else
877 ref_div++;
878 }
879 }
880
881 *dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) /
882 (ref_div * post_div * 10);
883 *fb_div_p = fb_div;
884 *frac_fb_div_p = frac_fb_div;
885 *ref_div_p = ref_div;
886 *post_div_p = post_div;
887 DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n",
888 *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div);
889}
890
891/* pre-avivo */
783static inline uint32_t radeon_div(uint64_t n, uint32_t d) 892static inline uint32_t radeon_div(uint64_t n, uint32_t d)
784{ 893{
785 uint64_t mod; 894 uint64_t mod;
@@ -790,13 +899,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d)
790 return n; 899 return n;
791} 900}
792 901
793void radeon_compute_pll(struct radeon_pll *pll, 902void radeon_compute_pll_legacy(struct radeon_pll *pll,
794 uint64_t freq, 903 uint64_t freq,
795 uint32_t *dot_clock_p, 904 uint32_t *dot_clock_p,
796 uint32_t *fb_div_p, 905 uint32_t *fb_div_p,
797 uint32_t *frac_fb_div_p, 906 uint32_t *frac_fb_div_p,
798 uint32_t *ref_div_p, 907 uint32_t *ref_div_p,
799 uint32_t *post_div_p) 908 uint32_t *post_div_p)
800{ 909{
801 uint32_t min_ref_div = pll->min_ref_div; 910 uint32_t min_ref_div = pll->min_ref_div;
802 uint32_t max_ref_div = pll->max_ref_div; 911 uint32_t max_ref_div = pll->max_ref_div;
@@ -826,6 +935,9 @@ void radeon_compute_pll(struct radeon_pll *pll,
826 pll_out_max = pll->pll_out_max; 935 pll_out_max = pll->pll_out_max;
827 } 936 }
828 937
938 if (pll_out_min > 64800)
939 pll_out_min = 64800;
940
829 if (pll->flags & RADEON_PLL_USE_REF_DIV) 941 if (pll->flags & RADEON_PLL_USE_REF_DIV)
830 min_ref_div = max_ref_div = pll->reference_div; 942 min_ref_div = max_ref_div = pll->reference_div;
831 else { 943 else {
@@ -849,7 +961,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
849 max_fractional_feed_div = pll->max_frac_feedback_div; 961 max_fractional_feed_div = pll->max_frac_feedback_div;
850 } 962 }
851 963
852 for (post_div = max_post_div; post_div >= min_post_div; --post_div) { 964 for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
853 uint32_t ref_div; 965 uint32_t ref_div;
854 966
855 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) 967 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
@@ -965,6 +1077,10 @@ void radeon_compute_pll(struct radeon_pll *pll,
965 *frac_fb_div_p = best_frac_feedback_div; 1077 *frac_fb_div_p = best_frac_feedback_div;
966 *ref_div_p = best_ref_div; 1078 *ref_div_p = best_ref_div;
967 *post_div_p = best_post_div; 1079 *post_div_p = best_post_div;
1080 DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
1081 freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div,
1082 best_ref_div, best_post_div);
1083
968} 1084}
969 1085
970static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) 1086static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index be5cb4f28c29..275b26a708d6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -48,7 +48,7 @@
48 * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen 48 * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
49 * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) 49 * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs 50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
51 * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK 51 * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
52 */ 52 */
53#define KMS_DRIVER_MAJOR 2 53#define KMS_DRIVER_MAJOR 2
54#define KMS_DRIVER_MINOR 8 54#define KMS_DRIVER_MINOR 8
@@ -104,6 +104,7 @@ int radeon_tv = 1;
104int radeon_audio = 1; 104int radeon_audio = 1;
105int radeon_disp_priority = 0; 105int radeon_disp_priority = 0;
106int radeon_hw_i2c = 0; 106int radeon_hw_i2c = 0;
107int radeon_pcie_gen2 = 0;
107 108
108MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 109MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
109module_param_named(no_wb, radeon_no_wb, int, 0444); 110module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -147,6 +148,9 @@ module_param_named(disp_priority, radeon_disp_priority, int, 0444);
147MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); 148MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
148module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); 149module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
149 150
151MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)");
152module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
153
150static int radeon_suspend(struct drm_device *dev, pm_message_t state) 154static int radeon_suspend(struct drm_device *dev, pm_message_t state)
151{ 155{
152 drm_radeon_private_t *dev_priv = dev->dev_private; 156 drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 8fd184286c0b..d4a542247618 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -641,7 +641,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
641 switch (connector->connector_type) { 641 switch (connector->connector_type) {
642 case DRM_MODE_CONNECTOR_DVII: 642 case DRM_MODE_CONNECTOR_DVII:
643 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 643 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
644 if (drm_detect_monitor_audio(radeon_connector->edid)) { 644 if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
645 /* fix me */ 645 /* fix me */
646 if (ASIC_IS_DCE4(rdev)) 646 if (ASIC_IS_DCE4(rdev))
647 return ATOM_ENCODER_MODE_DVI; 647 return ATOM_ENCODER_MODE_DVI;
@@ -655,7 +655,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
655 case DRM_MODE_CONNECTOR_DVID: 655 case DRM_MODE_CONNECTOR_DVID:
656 case DRM_MODE_CONNECTOR_HDMIA: 656 case DRM_MODE_CONNECTOR_HDMIA:
657 default: 657 default:
658 if (drm_detect_monitor_audio(radeon_connector->edid)) { 658 if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
659 /* fix me */ 659 /* fix me */
660 if (ASIC_IS_DCE4(rdev)) 660 if (ASIC_IS_DCE4(rdev))
661 return ATOM_ENCODER_MODE_DVI; 661 return ATOM_ENCODER_MODE_DVI;
@@ -673,7 +673,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
673 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 673 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
674 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 674 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
675 return ATOM_ENCODER_MODE_DP; 675 return ATOM_ENCODER_MODE_DP;
676 else if (drm_detect_monitor_audio(radeon_connector->edid)) { 676 else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
677 /* fix me */ 677 /* fix me */
678 if (ASIC_IS_DCE4(rdev)) 678 if (ASIC_IS_DCE4(rdev))
679 return ATOM_ENCODER_MODE_DVI; 679 return ATOM_ENCODER_MODE_DVI;
@@ -1063,7 +1063,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action)
1063 if (!ASIC_IS_DCE4(rdev)) 1063 if (!ASIC_IS_DCE4(rdev))
1064 return; 1064 return;
1065 1065
1066 if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) || 1066 if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
1067 (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) 1067 (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
1068 return; 1068 return;
1069 1069
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index ca32e9c1e91d..66324b5bb5ba 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -225,6 +225,8 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
225 225
226 strcpy(info->fix.id, "radeondrmfb"); 226 strcpy(info->fix.id, "radeondrmfb");
227 227
228 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
229
228 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; 230 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
229 info->fbops = &radeonfb_ops; 231 info->fbops = &radeonfb_ops;
230 232
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a289646e8aa4..9ec830c77af0 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -110,11 +110,14 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
110 110
111int radeon_irq_kms_init(struct radeon_device *rdev) 111int radeon_irq_kms_init(struct radeon_device *rdev)
112{ 112{
113 int i;
113 int r = 0; 114 int r = 0;
114 115
115 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); 116 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
116 117
117 spin_lock_init(&rdev->irq.sw_lock); 118 spin_lock_init(&rdev->irq.sw_lock);
119 for (i = 0; i < rdev->num_crtc; i++)
120 spin_lock_init(&rdev->irq.pflip_lock[i]);
118 r = drm_vblank_init(rdev->ddev, rdev->num_crtc); 121 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
119 if (r) { 122 if (r) {
120 return r; 123 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 28a53e4a925f..8387d32caaa7 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -201,6 +201,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
201 } 201 }
202 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value); 202 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
203 break; 203 break;
204 case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
205 /* return clock value in KHz */
206 value = rdev->clock.spll.reference_freq * 10;
207 break;
204 default: 208 default:
205 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 209 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
206 return -EINVAL; 210 return -EINVAL;
@@ -243,6 +247,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
243 struct radeon_device *rdev = dev->dev_private; 247 struct radeon_device *rdev = dev->dev_private;
244 if (rdev->hyperz_filp == file_priv) 248 if (rdev->hyperz_filp == file_priv)
245 rdev->hyperz_filp = NULL; 249 rdev->hyperz_filp = NULL;
250 if (rdev->cmask_filp == file_priv)
251 rdev->cmask_filp = NULL;
246} 252}
247 253
248/* 254/*
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index ace2e6384d40..cf0638c3b7c7 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -778,9 +778,9 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
778 DRM_DEBUG_KMS("\n"); 778 DRM_DEBUG_KMS("\n");
779 779
780 if (!use_bios_divs) { 780 if (!use_bios_divs) {
781 radeon_compute_pll(pll, mode->clock, 781 radeon_compute_pll_legacy(pll, mode->clock,
782 &freq, &feedback_div, &frac_fb_div, 782 &freq, &feedback_div, &frac_fb_div,
783 &reference_div, &post_divider); 783 &reference_div, &post_divider);
784 784
785 for (post_div = &post_divs[0]; post_div->divider; ++post_div) { 785 for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
786 if (post_div->divider == post_divider) 786 if (post_div->divider == post_divider)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 12bdeab91c86..6794cdf91f28 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -149,6 +149,7 @@ struct radeon_tmds_pll {
149#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) 149#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
150#define RADEON_PLL_USE_POST_DIV (1 << 12) 150#define RADEON_PLL_USE_POST_DIV (1 << 12)
151#define RADEON_PLL_IS_LCD (1 << 13) 151#define RADEON_PLL_IS_LCD (1 << 13)
152#define RADEON_PLL_PREFER_MINM_OVER_MAXP (1 << 14)
152 153
153struct radeon_pll { 154struct radeon_pll {
154 /* reference frequency */ 155 /* reference frequency */
@@ -510,13 +511,21 @@ extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
510 struct radeon_atom_ss *ss, 511 struct radeon_atom_ss *ss,
511 int id, u32 clock); 512 int id, u32 clock);
512 513
513extern void radeon_compute_pll(struct radeon_pll *pll, 514extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
514 uint64_t freq, 515 uint64_t freq,
515 uint32_t *dot_clock_p, 516 uint32_t *dot_clock_p,
516 uint32_t *fb_div_p, 517 uint32_t *fb_div_p,
517 uint32_t *frac_fb_div_p, 518 uint32_t *frac_fb_div_p,
518 uint32_t *ref_div_p, 519 uint32_t *ref_div_p,
519 uint32_t *post_div_p); 520 uint32_t *post_div_p);
521
522extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
523 u32 freq,
524 u32 *dot_clock_p,
525 u32 *fb_div_p,
526 u32 *frac_fb_div_p,
527 u32 *ref_div_p,
528 u32 *post_div_p);
520 529
521extern void radeon_setup_encoder_clones(struct drm_device *dev); 530extern void radeon_setup_encoder_clones(struct drm_device *dev);
522 531
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 3b1b2bf9cdd5..2aed03bde4b2 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -430,7 +430,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
430{ 430{
431 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 431 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
432 struct radeon_device *rdev = ddev->dev_private; 432 struct radeon_device *rdev = ddev->dev_private;
433 u32 temp; 433 int temp;
434 434
435 switch (rdev->pm.int_thermal_type) { 435 switch (rdev->pm.int_thermal_type) {
436 case THERMAL_TYPE_RV6XX: 436 case THERMAL_TYPE_RV6XX:
@@ -646,6 +646,9 @@ void radeon_pm_fini(struct radeon_device *rdev)
646#endif 646#endif
647 } 647 }
648 648
649 if (rdev->pm.power_state)
650 kfree(rdev->pm.power_state);
651
649 radeon_hwmon_fini(rdev); 652 radeon_hwmon_fini(rdev);
650} 653}
651 654
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 3cd4dace57c7..ec93a75369e6 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -375,6 +375,8 @@
375#define RADEON_CONFIG_APER_SIZE 0x0108 375#define RADEON_CONFIG_APER_SIZE 0x0108
376#define RADEON_CONFIG_BONDS 0x00e8 376#define RADEON_CONFIG_BONDS 0x00e8
377#define RADEON_CONFIG_CNTL 0x00e0 377#define RADEON_CONFIG_CNTL 0x00e0
378# define RADEON_CFG_VGA_RAM_EN (1 << 8)
379# define RADEON_CFG_VGA_IO_DIS (1 << 9)
378# define RADEON_CFG_ATI_REV_A11 (0 << 16) 380# define RADEON_CFG_ATI_REV_A11 (0 << 16)
379# define RADEON_CFG_ATI_REV_A12 (1 << 16) 381# define RADEON_CFG_ATI_REV_A12 (1 << 16)
380# define RADEON_CFG_ATI_REV_A13 (2 << 16) 382# define RADEON_CFG_ATI_REV_A13 (2 << 16)
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index ac40fd39d787..9177f9191837 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -439,7 +439,7 @@ evergreen 0x9400
4390x000286EC SPI_COMPUTE_NUM_THREAD_X 4390x000286EC SPI_COMPUTE_NUM_THREAD_X
4400x000286F0 SPI_COMPUTE_NUM_THREAD_Y 4400x000286F0 SPI_COMPUTE_NUM_THREAD_Y
4410x000286F4 SPI_COMPUTE_NUM_THREAD_Z 4410x000286F4 SPI_COMPUTE_NUM_THREAD_Z
4420x000286F8 GDS_ADDR_SIZE 4420x00028724 GDS_ADDR_SIZE
4430x00028780 CB_BLEND0_CONTROL 4430x00028780 CB_BLEND0_CONTROL
4440x00028784 CB_BLEND1_CONTROL 4440x00028784 CB_BLEND1_CONTROL
4450x00028788 CB_BLEND2_CONTROL 4450x00028788 CB_BLEND2_CONTROL
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 5512e4e5e636..c76283d9eb3d 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -203,6 +203,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
203 radeon_gart_table_ram_free(rdev); 203 radeon_gart_table_ram_free(rdev);
204} 204}
205 205
206#define RS400_PTE_WRITEABLE (1 << 2)
207#define RS400_PTE_READABLE (1 << 3)
208
206int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 209int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
207{ 210{
208 uint32_t entry; 211 uint32_t entry;
@@ -213,7 +216,7 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
213 216
214 entry = (lower_32_bits(addr) & PAGE_MASK) | 217 entry = (lower_32_bits(addr) & PAGE_MASK) |
215 ((upper_32_bits(addr) & 0xff) << 4) | 218 ((upper_32_bits(addr) & 0xff) << 4) |
216 0xc; 219 RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
217 entry = cpu_to_le32(entry); 220 entry = cpu_to_le32(entry);
218 rdev->gart.table.ram.ptr[i] = entry; 221 rdev->gart.table.ram.ptr[i] = entry;
219 return 0; 222 return 0;
@@ -226,8 +229,8 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
226 229
227 for (i = 0; i < rdev->usec_timeout; i++) { 230 for (i = 0; i < rdev->usec_timeout; i++) {
228 /* read MC_STATUS */ 231 /* read MC_STATUS */
229 tmp = RREG32(0x0150); 232 tmp = RREG32(RADEON_MC_STATUS);
230 if (tmp & (1 << 2)) { 233 if (tmp & RADEON_MC_IDLE) {
231 return 0; 234 return 0;
232 } 235 }
233 DRM_UDELAY(1); 236 DRM_UDELAY(1);
@@ -241,7 +244,7 @@ void rs400_gpu_init(struct radeon_device *rdev)
241 r420_pipes_init(rdev); 244 r420_pipes_init(rdev);
242 if (rs400_mc_wait_for_idle(rdev)) { 245 if (rs400_mc_wait_for_idle(rdev)) {
243 printk(KERN_WARNING "rs400: Failed to wait MC idle while " 246 printk(KERN_WARNING "rs400: Failed to wait MC idle while "
244 "programming pipes. Bad things might happen. %08x\n", RREG32(0x150)); 247 "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
245 } 248 }
246} 249}
247 250
@@ -300,9 +303,9 @@ static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
300 seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp); 303 seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
301 tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION); 304 tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
302 seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp); 305 seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
303 tmp = RREG32_MC(0x100); 306 tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
304 seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp); 307 seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
305 tmp = RREG32(0x134); 308 tmp = RREG32(RS690_HDP_FB_LOCATION);
306 seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp); 309 seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
307 } else { 310 } else {
308 tmp = RREG32(RADEON_AGP_BASE); 311 tmp = RREG32(RADEON_AGP_BASE);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index b4192acaab5f..5afe294ed51f 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -339,16 +339,16 @@ void rs600_bm_disable(struct radeon_device *rdev)
339 339
340int rs600_asic_reset(struct radeon_device *rdev) 340int rs600_asic_reset(struct radeon_device *rdev)
341{ 341{
342 u32 status, tmp;
343
344 struct rv515_mc_save save; 342 struct rv515_mc_save save;
343 u32 status, tmp;
344 int ret = 0;
345 345
346 /* Stops all mc clients */
347 rv515_mc_stop(rdev, &save);
348 status = RREG32(R_000E40_RBBM_STATUS); 346 status = RREG32(R_000E40_RBBM_STATUS);
349 if (!G_000E40_GUI_ACTIVE(status)) { 347 if (!G_000E40_GUI_ACTIVE(status)) {
350 return 0; 348 return 0;
351 } 349 }
350 /* Stops all mc clients */
351 rv515_mc_stop(rdev, &save);
352 status = RREG32(R_000E40_RBBM_STATUS); 352 status = RREG32(R_000E40_RBBM_STATUS);
353 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 353 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
354 /* stop CP */ 354 /* stop CP */
@@ -392,11 +392,11 @@ int rs600_asic_reset(struct radeon_device *rdev)
392 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 392 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
393 dev_err(rdev->dev, "failed to reset GPU\n"); 393 dev_err(rdev->dev, "failed to reset GPU\n");
394 rdev->gpu_lockup = true; 394 rdev->gpu_lockup = true;
395 return -1; 395 ret = -1;
396 } 396 } else
397 dev_info(rdev->dev, "GPU reset succeed\n");
397 rv515_mc_resume(rdev, &save); 398 rv515_mc_resume(rdev, &save);
398 dev_info(rdev->dev, "GPU reset succeed\n"); 399 return ret;
399 return 0;
400} 400}
401 401
402/* 402/*
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 5d569f41f4ae..64b57af93714 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -69,13 +69,13 @@ void rv515_ring_start(struct radeon_device *rdev)
69 ISYNC_CPSCRATCH_IDLEGUI); 69 ISYNC_CPSCRATCH_IDLEGUI);
70 radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); 70 radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
71 radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); 71 radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
72 radeon_ring_write(rdev, PACKET0(0x170C, 0)); 72 radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
73 radeon_ring_write(rdev, 1 << 31); 73 radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
74 radeon_ring_write(rdev, PACKET0(GB_SELECT, 0)); 74 radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
75 radeon_ring_write(rdev, 0); 75 radeon_ring_write(rdev, 0);
76 radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0)); 76 radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
77 radeon_ring_write(rdev, 0); 77 radeon_ring_write(rdev, 0);
78 radeon_ring_write(rdev, PACKET0(0x42C8, 0)); 78 radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0));
79 radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); 79 radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
80 radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0)); 80 radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
81 radeon_ring_write(rdev, 0); 81 radeon_ring_write(rdev, 0);
@@ -153,8 +153,8 @@ void rv515_gpu_init(struct radeon_device *rdev)
153 } 153 }
154 rv515_vga_render_disable(rdev); 154 rv515_vga_render_disable(rdev);
155 r420_pipes_init(rdev); 155 r420_pipes_init(rdev);
156 gb_pipe_select = RREG32(0x402C); 156 gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
157 tmp = RREG32(0x170C); 157 tmp = RREG32(R300_DST_PIPE_CONFIG);
158 pipe_select_current = (tmp >> 2) & 3; 158 pipe_select_current = (tmp >> 2) & 3;
159 tmp = (1 << pipe_select_current) | 159 tmp = (1 << pipe_select_current) |
160 (((gb_pipe_select >> 8) & 0xF) << 4); 160 (((gb_pipe_select >> 8) & 0xF) << 4);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 3a264aa3a79a..2211a323db41 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -78,18 +78,23 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
78} 78}
79 79
80/* get temperature in millidegrees */ 80/* get temperature in millidegrees */
81u32 rv770_get_temp(struct radeon_device *rdev) 81int rv770_get_temp(struct radeon_device *rdev)
82{ 82{
83 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> 83 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
84 ASIC_T_SHIFT; 84 ASIC_T_SHIFT;
85 u32 actual_temp = 0; 85 int actual_temp;
86 86
87 if ((temp >> 9) & 1) 87 if (temp & 0x400)
88 actual_temp = 0; 88 actual_temp = -256;
89 else 89 else if (temp & 0x200)
90 actual_temp = (temp >> 1) & 0xff; 90 actual_temp = 255;
91 91 else if (temp & 0x100) {
92 return actual_temp * 1000; 92 actual_temp = temp & 0x1ff;
93 actual_temp |= ~0x1ff;
94 } else
95 actual_temp = temp & 0xff;
96
97 return (actual_temp * 1000) / 2;
93} 98}
94 99
95void rv770_pm_misc(struct radeon_device *rdev) 100void rv770_pm_misc(struct radeon_device *rdev)
@@ -1268,7 +1273,7 @@ int rv770_init(struct radeon_device *rdev)
1268 if (r) 1273 if (r)
1269 return r; 1274 return r;
1270 /* Post card if necessary */ 1275 /* Post card if necessary */
1271 if (!r600_card_posted(rdev)) { 1276 if (!radeon_card_posted(rdev)) {
1272 if (!rdev->bios) { 1277 if (!rdev->bios) {
1273 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 1278 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1274 return -EINVAL; 1279 return -EINVAL;
@@ -1372,6 +1377,9 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
1372 u32 link_width_cntl, lanes, speed_cntl, tmp; 1377 u32 link_width_cntl, lanes, speed_cntl, tmp;
1373 u16 link_cntl2; 1378 u16 link_cntl2;
1374 1379
1380 if (radeon_pcie_gen2 == 0)
1381 return;
1382
1375 if (rdev->flags & RADEON_IS_IGP) 1383 if (rdev->flags & RADEON_IS_IGP)
1376 return; 1384 return;
1377 1385
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 0e1edd7311ff..70e60a4bb678 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -1,12 +1,13 @@
1config STUB_POULSBO 1config STUB_POULSBO
2 tristate "Intel GMA500 Stub Driver" 2 tristate "Intel GMA500 Stub Driver"
3 depends on PCI 3 depends on PCI
4 depends on NET # for THERMAL
4 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled 5 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
5 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 6 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
6 select VIDEO_OUTPUT_CONTROL if ACPI
7 select BACKLIGHT_CLASS_DEVICE if ACPI 7 select BACKLIGHT_CLASS_DEVICE if ACPI
8 select INPUT if ACPI 8 select INPUT if ACPI
9 select ACPI_VIDEO if ACPI 9 select ACPI_VIDEO if ACPI
10 select THERMAL if ACPI
10 help 11 help
11 Choose this option if you have a system that has Intel GMA500 12 Choose this option if you have a system that has Intel GMA500
12 (Poulsbo) integrated graphics. If M is selected, the module will 13 (Poulsbo) integrated graphics. If M is selected, the module will
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 8d0e31a22027..96c83a9a76bb 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -1,5 +1,5 @@
1config VGA_ARB 1config VGA_ARB
2 bool "VGA Arbitration" if EMBEDDED 2 bool "VGA Arbitration" if EXPERT
3 default y 3 default y
4 depends on PCI 4 depends on PCI
5 help 5 help
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index c380c65da417..ace2b1623b21 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -636,7 +636,7 @@ int vga_client_register(struct pci_dev *pdev, void *cookie,
636 void (*irq_set_state)(void *cookie, bool state), 636 void (*irq_set_state)(void *cookie, bool state),
637 unsigned int (*set_vga_decode)(void *cookie, bool decode)) 637 unsigned int (*set_vga_decode)(void *cookie, bool decode))
638{ 638{
639 int ret = -1; 639 int ret = -ENODEV;
640 struct vga_device *vgadev; 640 struct vga_device *vgadev;
641 unsigned long flags; 641 unsigned long flags;
642 642