aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-09-15 04:27:31 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-15 04:27:31 -0400
commit3aabae7d9dfaed60effe93662f02c19bafc18537 (patch)
treeaf94cdd69add07601d9f3f5988dfc1dc255e3886 /drivers/gpu
parent79e406d7b00ab2b261ae32a59f266fd3b7af6f29 (diff)
parent57c072c7113f54f9512624d6c665db6184448782 (diff)
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c24
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_lock.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c24
-rw-r--r--drivers/gpu/drm/drm_modes.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c50
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c36
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c65
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h27
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c73
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h9
-rw-r--r--drivers/gpu/drm/i915/intel_display.c267
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c58
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h10
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c69
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c64
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c31
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c48
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c48
-rw-r--r--drivers/gpu/drm/radeon/r600.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c58
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c79
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c61
42 files changed, 749 insertions, 537 deletions
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7e31d4348340..d2ab01e90a96 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -34,6 +34,9 @@
34#include "drm_crtc_helper.h" 34#include "drm_crtc_helper.h"
35#include "drm_fb_helper.h" 35#include "drm_fb_helper.h"
36 36
37static bool drm_kms_helper_poll = true;
38module_param_named(poll, drm_kms_helper_poll, bool, 0600);
39
37static void drm_mode_validate_flag(struct drm_connector *connector, 40static void drm_mode_validate_flag(struct drm_connector *connector,
38 int flags) 41 int flags)
39{ 42{
@@ -99,8 +102,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
99 connector->status = connector_status_disconnected; 102 connector->status = connector_status_disconnected;
100 if (connector->funcs->force) 103 if (connector->funcs->force)
101 connector->funcs->force(connector); 104 connector->funcs->force(connector);
102 } else 105 } else {
103 connector->status = connector->funcs->detect(connector); 106 connector->status = connector->funcs->detect(connector);
107 drm_helper_hpd_irq_event(dev);
108 }
104 109
105 if (connector->status == connector_status_disconnected) { 110 if (connector->status == connector_status_disconnected) {
106 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", 111 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
@@ -110,11 +115,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
110 } 115 }
111 116
112 count = (*connector_funcs->get_modes)(connector); 117 count = (*connector_funcs->get_modes)(connector);
113 if (!count) { 118 if (count == 0 && connector->status == connector_status_connected)
114 count = drm_add_modes_noedid(connector, 1024, 768); 119 count = drm_add_modes_noedid(connector, 1024, 768);
115 if (!count) 120 if (count == 0)
116 return 0; 121 goto prune;
117 }
118 122
119 drm_mode_connector_list_update(connector); 123 drm_mode_connector_list_update(connector);
120 124
@@ -840,6 +844,9 @@ static void output_poll_execute(struct work_struct *work)
840 enum drm_connector_status old_status, status; 844 enum drm_connector_status old_status, status;
841 bool repoll = false, changed = false; 845 bool repoll = false, changed = false;
842 846
847 if (!drm_kms_helper_poll)
848 return;
849
843 mutex_lock(&dev->mode_config.mutex); 850 mutex_lock(&dev->mode_config.mutex);
844 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 851 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
845 852
@@ -890,6 +897,9 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
890 bool poll = false; 897 bool poll = false;
891 struct drm_connector *connector; 898 struct drm_connector *connector;
892 899
900 if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
901 return;
902
893 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 903 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
894 if (connector->polled) 904 if (connector->polled)
895 poll = true; 905 poll = true;
@@ -919,8 +929,10 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
919{ 929{
920 if (!dev->mode_config.poll_enabled) 930 if (!dev->mode_config.poll_enabled)
921 return; 931 return;
932
922 /* kill timer and schedule immediate execution, this doesn't block */ 933 /* kill timer and schedule immediate execution, this doesn't block */
923 cancel_delayed_work(&dev->mode_config.output_poll_work); 934 cancel_delayed_work(&dev->mode_config.output_poll_work);
924 queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); 935 if (drm_kms_helper_poll)
936 queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
925} 937}
926EXPORT_SYMBOL(drm_helper_hpd_irq_event); 938EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 8dd7e6f86bb3..6a5e403f9aa1 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -370,7 +370,7 @@ static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
370} 370}
371static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn); 371static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
372 372
373static void drm_fb_helper_sysrq(int dummy1, struct tty_struct *dummy3) 373static void drm_fb_helper_sysrq(int dummy1)
374{ 374{
375 schedule_work(&drm_fb_helper_restore_work); 375 schedule_work(&drm_fb_helper_restore_work);
376} 376}
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3a652a65546f..b744dad5c237 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -41,6 +41,7 @@
41 41
42/* from BKL pushdown: note that nothing else serializes idr_find() */ 42/* from BKL pushdown: note that nothing else serializes idr_find() */
43DEFINE_MUTEX(drm_global_mutex); 43DEFINE_MUTEX(drm_global_mutex);
44EXPORT_SYMBOL(drm_global_mutex);
44 45
45static int drm_open_helper(struct inode *inode, struct file *filp, 46static int drm_open_helper(struct inode *inode, struct file *filp,
46 struct drm_device * dev); 47 struct drm_device * dev);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index e2f70a516c34..9bf93bc9a32c 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -92,7 +92,9 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
92 } 92 }
93 93
94 /* Contention */ 94 /* Contention */
95 mutex_unlock(&drm_global_mutex);
95 schedule(); 96 schedule();
97 mutex_lock(&drm_global_mutex);
96 if (signal_pending(current)) { 98 if (signal_pending(current)) {
97 ret = -EINTR; 99 ret = -EINTR;
98 break; 100 break;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index da99edc50888..a6bfc302ed90 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -285,21 +285,21 @@ void drm_mm_put_block(struct drm_mm_node *cur)
285 285
286EXPORT_SYMBOL(drm_mm_put_block); 286EXPORT_SYMBOL(drm_mm_put_block);
287 287
288static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size, 288static int check_free_hole(unsigned long start, unsigned long end,
289 unsigned alignment) 289 unsigned long size, unsigned alignment)
290{ 290{
291 unsigned wasted = 0; 291 unsigned wasted = 0;
292 292
293 if (entry->size < size) 293 if (end - start < size)
294 return 0; 294 return 0;
295 295
296 if (alignment) { 296 if (alignment) {
297 register unsigned tmp = entry->start % alignment; 297 unsigned tmp = start % alignment;
298 if (tmp) 298 if (tmp)
299 wasted = alignment - tmp; 299 wasted = alignment - tmp;
300 } 300 }
301 301
302 if (entry->size >= size + wasted) { 302 if (end >= start + size + wasted) {
303 return 1; 303 return 1;
304 } 304 }
305 305
@@ -320,7 +320,8 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
320 best_size = ~0UL; 320 best_size = ~0UL;
321 321
322 list_for_each_entry(entry, &mm->free_stack, free_stack) { 322 list_for_each_entry(entry, &mm->free_stack, free_stack) {
323 if (!check_free_mm_node(entry, size, alignment)) 323 if (!check_free_hole(entry->start, entry->start + entry->size,
324 size, alignment))
324 continue; 325 continue;
325 326
326 if (!best_match) 327 if (!best_match)
@@ -353,10 +354,12 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
353 best_size = ~0UL; 354 best_size = ~0UL;
354 355
355 list_for_each_entry(entry, &mm->free_stack, free_stack) { 356 list_for_each_entry(entry, &mm->free_stack, free_stack) {
356 if (entry->start > end || (entry->start+entry->size) < start) 357 unsigned long adj_start = entry->start < start ?
357 continue; 358 start : entry->start;
359 unsigned long adj_end = entry->start + entry->size > end ?
360 end : entry->start + entry->size;
358 361
359 if (!check_free_mm_node(entry, size, alignment)) 362 if (!check_free_hole(adj_start, adj_end, size, alignment))
360 continue; 363 continue;
361 364
362 if (!best_match) 365 if (!best_match)
@@ -449,7 +452,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
449 node->free_stack.prev = prev_free; 452 node->free_stack.prev = prev_free;
450 node->free_stack.next = next_free; 453 node->free_stack.next = next_free;
451 454
452 if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) { 455 if (check_free_hole(node->start, node->start + node->size,
456 mm->scan_size, mm->scan_alignment)) {
453 mm->scan_hit_start = node->start; 457 mm->scan_hit_start = node->start;
454 mm->scan_hit_size = node->size; 458 mm->scan_hit_size = node->size;
455 459
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index f1f473ea97d3..949326d2a8e5 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -251,7 +251,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
251 drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK; 251 drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
252 /* Fill in HSync values */ 252 /* Fill in HSync values */
253 drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2; 253 drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
254 drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC; 254 drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
255 /* Fill in VSync values */
256 drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
257 drm_mode->vsync_end = drm_mode->vsync_start + vsync;
255 } 258 }
256 /* 15/13. Find pixel clock frequency (kHz for xf86) */ 259 /* 15/13. Find pixel clock frequency (kHz for xf86) */
257 drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod; 260 drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 92d5605a34d1..5e43d7076789 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -31,6 +31,7 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include "drmP.h" 32#include "drmP.h"
33#include "drm.h" 33#include "drm.h"
34#include "intel_drv.h"
34#include "i915_drm.h" 35#include "i915_drm.h"
35#include "i915_drv.h" 36#include "i915_drv.h"
36 37
@@ -121,6 +122,54 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
121 return 0; 122 return 0;
122} 123}
123 124
125static int i915_gem_pageflip_info(struct seq_file *m, void *data)
126{
127 struct drm_info_node *node = (struct drm_info_node *) m->private;
128 struct drm_device *dev = node->minor->dev;
129 unsigned long flags;
130 struct intel_crtc *crtc;
131
132 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
133 const char *pipe = crtc->pipe ? "B" : "A";
134 const char *plane = crtc->plane ? "B" : "A";
135 struct intel_unpin_work *work;
136
137 spin_lock_irqsave(&dev->event_lock, flags);
138 work = crtc->unpin_work;
139 if (work == NULL) {
140 seq_printf(m, "No flip due on pipe %s (plane %s)\n",
141 pipe, plane);
142 } else {
143 if (!work->pending) {
144 seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
145 pipe, plane);
146 } else {
147 seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
148 pipe, plane);
149 }
150 if (work->enable_stall_check)
151 seq_printf(m, "Stall check enabled, ");
152 else
153 seq_printf(m, "Stall check waiting for page flip ioctl, ");
154 seq_printf(m, "%d prepares\n", work->pending);
155
156 if (work->old_fb_obj) {
157 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
158 if(obj_priv)
159 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
160 }
161 if (work->pending_flip_obj) {
162 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
163 if(obj_priv)
164 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
165 }
166 }
167 spin_unlock_irqrestore(&dev->event_lock, flags);
168 }
169
170 return 0;
171}
172
124static int i915_gem_request_info(struct seq_file *m, void *data) 173static int i915_gem_request_info(struct seq_file *m, void *data)
125{ 174{
126 struct drm_info_node *node = (struct drm_info_node *) m->private; 175 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -777,6 +826,7 @@ static struct drm_info_list i915_debugfs_list[] = {
777 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 826 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
778 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 827 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
779 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 828 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
829 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
780 {"i915_gem_request", i915_gem_request_info, 0}, 830 {"i915_gem_request", i915_gem_request_info, 0},
781 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 831 {"i915_gem_seqno", i915_gem_seqno_info, 0},
782 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 832 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a7ec93e62f81..9d67b4853030 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -620,8 +620,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
620 ret = copy_from_user(cliprects, batch->cliprects, 620 ret = copy_from_user(cliprects, batch->cliprects,
621 batch->num_cliprects * 621 batch->num_cliprects *
622 sizeof(struct drm_clip_rect)); 622 sizeof(struct drm_clip_rect));
623 if (ret != 0) 623 if (ret != 0) {
624 ret = -EFAULT;
624 goto fail_free; 625 goto fail_free;
626 }
625 } 627 }
626 628
627 mutex_lock(&dev->struct_mutex); 629 mutex_lock(&dev->struct_mutex);
@@ -662,8 +664,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
662 return -ENOMEM; 664 return -ENOMEM;
663 665
664 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); 666 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
665 if (ret != 0) 667 if (ret != 0) {
668 ret = -EFAULT;
666 goto fail_batch_free; 669 goto fail_batch_free;
670 }
667 671
668 if (cmdbuf->num_cliprects) { 672 if (cmdbuf->num_cliprects) {
669 cliprects = kcalloc(cmdbuf->num_cliprects, 673 cliprects = kcalloc(cmdbuf->num_cliprects,
@@ -676,8 +680,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
676 ret = copy_from_user(cliprects, cmdbuf->cliprects, 680 ret = copy_from_user(cliprects, cmdbuf->cliprects,
677 cmdbuf->num_cliprects * 681 cmdbuf->num_cliprects *
678 sizeof(struct drm_clip_rect)); 682 sizeof(struct drm_clip_rect));
679 if (ret != 0) 683 if (ret != 0) {
684 ret = -EFAULT;
680 goto fail_clip_free; 685 goto fail_clip_free;
686 }
681 } 687 }
682 688
683 mutex_lock(&dev->struct_mutex); 689 mutex_lock(&dev->struct_mutex);
@@ -885,7 +891,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
885 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 891 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
886 u32 temp_lo, temp_hi = 0; 892 u32 temp_lo, temp_hi = 0;
887 u64 mchbar_addr; 893 u64 mchbar_addr;
888 int ret = 0; 894 int ret;
889 895
890 if (IS_I965G(dev)) 896 if (IS_I965G(dev))
891 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 897 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
@@ -895,22 +901,23 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
895 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 901 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
896#ifdef CONFIG_PNP 902#ifdef CONFIG_PNP
897 if (mchbar_addr && 903 if (mchbar_addr &&
898 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { 904 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
899 ret = 0; 905 return 0;
900 goto out;
901 }
902#endif 906#endif
903 907
904 /* Get some space for it */ 908 /* Get some space for it */
905 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res, 909 dev_priv->mch_res.name = "i915 MCHBAR";
910 dev_priv->mch_res.flags = IORESOURCE_MEM;
911 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
912 &dev_priv->mch_res,
906 MCHBAR_SIZE, MCHBAR_SIZE, 913 MCHBAR_SIZE, MCHBAR_SIZE,
907 PCIBIOS_MIN_MEM, 914 PCIBIOS_MIN_MEM,
908 0, pcibios_align_resource, 915 0, pcibios_align_resource,
909 dev_priv->bridge_dev); 916 dev_priv->bridge_dev);
910 if (ret) { 917 if (ret) {
911 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); 918 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
912 dev_priv->mch_res.start = 0; 919 dev_priv->mch_res.start = 0;
913 goto out; 920 return ret;
914 } 921 }
915 922
916 if (IS_I965G(dev)) 923 if (IS_I965G(dev))
@@ -919,8 +926,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
919 926
920 pci_write_config_dword(dev_priv->bridge_dev, reg, 927 pci_write_config_dword(dev_priv->bridge_dev, reg,
921 lower_32_bits(dev_priv->mch_res.start)); 928 lower_32_bits(dev_priv->mch_res.start));
922out: 929 return 0;
923 return ret;
924} 930}
925 931
926/* Setup MCHBAR if possible, return true if we should disable it again */ 932/* Setup MCHBAR if possible, return true if we should disable it again */
@@ -2082,6 +2088,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2082 goto free_priv; 2088 goto free_priv;
2083 } 2089 }
2084 2090
2091 /* overlay on gen2 is broken and can't address above 1G */
2092 if (IS_GEN2(dev))
2093 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
2094
2085 dev_priv->regs = ioremap(base, size); 2095 dev_priv->regs = ioremap(base, size);
2086 if (!dev_priv->regs) { 2096 if (!dev_priv->regs) {
2087 DRM_ERROR("failed to map registers\n"); 2097 DRM_ERROR("failed to map registers\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 00befce8fbb7..216deb579785 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -61,91 +61,86 @@ extern int intel_agp_enabled;
61 .driver_data = (unsigned long) info } 61 .driver_data = (unsigned long) info }
62 62
63static const struct intel_device_info intel_i830_info = { 63static const struct intel_device_info intel_i830_info = {
64 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, 64 .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
65}; 65};
66 66
67static const struct intel_device_info intel_845g_info = { 67static const struct intel_device_info intel_845g_info = {
68 .is_i8xx = 1, 68 .gen = 2, .is_i8xx = 1,
69}; 69};
70 70
71static const struct intel_device_info intel_i85x_info = { 71static const struct intel_device_info intel_i85x_info = {
72 .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, 72 .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
73 .cursor_needs_physical = 1, 73 .cursor_needs_physical = 1,
74}; 74};
75 75
76static const struct intel_device_info intel_i865g_info = { 76static const struct intel_device_info intel_i865g_info = {
77 .is_i8xx = 1, 77 .gen = 2, .is_i8xx = 1,
78}; 78};
79 79
80static const struct intel_device_info intel_i915g_info = { 80static const struct intel_device_info intel_i915g_info = {
81 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, 81 .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
82}; 82};
83static const struct intel_device_info intel_i915gm_info = { 83static const struct intel_device_info intel_i915gm_info = {
84 .is_i9xx = 1, .is_mobile = 1, 84 .gen = 3, .is_i9xx = 1, .is_mobile = 1,
85 .cursor_needs_physical = 1, 85 .cursor_needs_physical = 1,
86}; 86};
87static const struct intel_device_info intel_i945g_info = { 87static const struct intel_device_info intel_i945g_info = {
88 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, 88 .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
89}; 89};
90static const struct intel_device_info intel_i945gm_info = { 90static const struct intel_device_info intel_i945gm_info = {
91 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, 91 .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
92 .has_hotplug = 1, .cursor_needs_physical = 1, 92 .has_hotplug = 1, .cursor_needs_physical = 1,
93}; 93};
94 94
95static const struct intel_device_info intel_i965g_info = { 95static const struct intel_device_info intel_i965g_info = {
96 .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, 96 .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
97 .has_hotplug = 1,
97}; 98};
98 99
99static const struct intel_device_info intel_i965gm_info = { 100static const struct intel_device_info intel_i965gm_info = {
100 .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, 101 .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
101 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, 102 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
102 .has_hotplug = 1,
103}; 103};
104 104
105static const struct intel_device_info intel_g33_info = { 105static const struct intel_device_info intel_g33_info = {
106 .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, 106 .gen = 3, .is_g33 = 1, .is_i9xx = 1,
107 .has_hotplug = 1, 107 .need_gfx_hws = 1, .has_hotplug = 1,
108}; 108};
109 109
110static const struct intel_device_info intel_g45_info = { 110static const struct intel_device_info intel_g45_info = {
111 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, 111 .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
112 .has_pipe_cxsr = 1, 112 .has_pipe_cxsr = 1, .has_hotplug = 1,
113 .has_hotplug = 1,
114}; 113};
115 114
116static const struct intel_device_info intel_gm45_info = { 115static const struct intel_device_info intel_gm45_info = {
117 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, 116 .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
118 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 117 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
119 .has_pipe_cxsr = 1, 118 .has_pipe_cxsr = 1, .has_hotplug = 1,
120 .has_hotplug = 1,
121}; 119};
122 120
123static const struct intel_device_info intel_pineview_info = { 121static const struct intel_device_info intel_pineview_info = {
124 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 122 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
125 .need_gfx_hws = 1, 123 .need_gfx_hws = 1, .has_hotplug = 1,
126 .has_hotplug = 1,
127}; 124};
128 125
129static const struct intel_device_info intel_ironlake_d_info = { 126static const struct intel_device_info intel_ironlake_d_info = {
130 .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, 127 .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
131 .has_pipe_cxsr = 1, 128 .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
132 .has_hotplug = 1,
133}; 129};
134 130
135static const struct intel_device_info intel_ironlake_m_info = { 131static const struct intel_device_info intel_ironlake_m_info = {
136 .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, 132 .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
137 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 133 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
138 .has_hotplug = 1,
139}; 134};
140 135
141static const struct intel_device_info intel_sandybridge_d_info = { 136static const struct intel_device_info intel_sandybridge_d_info = {
142 .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, 137 .gen = 6, .is_i965g = 1, .is_i9xx = 1,
143 .has_hotplug = 1, .is_gen6 = 1, 138 .need_gfx_hws = 1, .has_hotplug = 1,
144}; 139};
145 140
146static const struct intel_device_info intel_sandybridge_m_info = { 141static const struct intel_device_info intel_sandybridge_m_info = {
147 .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, 142 .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
148 .has_hotplug = 1, .is_gen6 = 1, 143 .need_gfx_hws = 1, .has_hotplug = 1,
149}; 144};
150 145
151static const struct pci_device_id pciidlist[] = { /* aka */ 146static const struct pci_device_id pciidlist[] = { /* aka */
@@ -180,8 +175,12 @@ static const struct pci_device_id pciidlist[] = { /* aka */
180 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), 175 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
181 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), 176 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
182 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), 177 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
178 INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
179 INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
183 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), 180 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
181 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
184 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), 182 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
183 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
185 {0, 0, 0} 184 {0, 0, 0}
186}; 185};
187 186
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 047cd7ce7e1b..af4a263cf257 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -191,6 +191,7 @@ struct drm_i915_display_funcs {
191}; 191};
192 192
193struct intel_device_info { 193struct intel_device_info {
194 u8 gen;
194 u8 is_mobile : 1; 195 u8 is_mobile : 1;
195 u8 is_i8xx : 1; 196 u8 is_i8xx : 1;
196 u8 is_i85x : 1; 197 u8 is_i85x : 1;
@@ -206,7 +207,6 @@ struct intel_device_info {
206 u8 is_broadwater : 1; 207 u8 is_broadwater : 1;
207 u8 is_crestline : 1; 208 u8 is_crestline : 1;
208 u8 is_ironlake : 1; 209 u8 is_ironlake : 1;
209 u8 is_gen6 : 1;
210 u8 has_fbc : 1; 210 u8 has_fbc : 1;
211 u8 has_rc6 : 1; 211 u8 has_rc6 : 1;
212 u8 has_pipe_cxsr : 1; 212 u8 has_pipe_cxsr : 1;
@@ -1162,7 +1162,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
1162#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1162#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1163#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 1163#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1164#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1164#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1165#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
1166#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1165#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1167#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1166#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1168#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1167#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
@@ -1181,27 +1180,13 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
1181#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1180#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1182#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) 1181#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
1183#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) 1182#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
1184#define IS_GEN6(dev) (INTEL_INFO(dev)->is_gen6)
1185#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1183#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1186 1184
1187#define IS_GEN3(dev) (IS_I915G(dev) || \ 1185#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1188 IS_I915GM(dev) || \ 1186#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1189 IS_I945G(dev) || \ 1187#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1190 IS_I945GM(dev) || \ 1188#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1191 IS_G33(dev) || \ 1189#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1192 IS_PINEVIEW(dev))
1193#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
1194 (dev)->pci_device == 0x2982 || \
1195 (dev)->pci_device == 0x2992 || \
1196 (dev)->pci_device == 0x29A2 || \
1197 (dev)->pci_device == 0x2A02 || \
1198 (dev)->pci_device == 0x2A12 || \
1199 (dev)->pci_device == 0x2E02 || \
1200 (dev)->pci_device == 0x2E12 || \
1201 (dev)->pci_device == 0x2E22 || \
1202 (dev)->pci_device == 0x2E32 || \
1203 (dev)->pci_device == 0x2A42 || \
1204 (dev)->pci_device == 0x2E42)
1205 1190
1206#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) 1191#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
1207#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1192#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index df5a7135c261..16fca1d1799a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,6 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/swap.h> 35#include <linux/swap.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/intel-gtt.h>
37 38
38static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); 39static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
39static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 40static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
@@ -135,12 +136,15 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
135 return -ENOMEM; 136 return -ENOMEM;
136 137
137 ret = drm_gem_handle_create(file_priv, obj, &handle); 138 ret = drm_gem_handle_create(file_priv, obj, &handle);
138 drm_gem_object_unreference_unlocked(obj); 139 if (ret) {
139 if (ret) 140 drm_gem_object_unreference_unlocked(obj);
140 return ret; 141 return ret;
142 }
141 143
142 args->handle = handle; 144 /* Sink the floating reference from kref_init(handlecount) */
145 drm_gem_object_handle_unreference_unlocked(obj);
143 146
147 args->handle = handle;
144 return 0; 148 return 0;
145} 149}
146 150
@@ -3585,6 +3589,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3585 if (ret != 0) { 3589 if (ret != 0) {
3586 DRM_ERROR("copy %d cliprects failed: %d\n", 3590 DRM_ERROR("copy %d cliprects failed: %d\n",
3587 args->num_cliprects, ret); 3591 args->num_cliprects, ret);
3592 ret = -EFAULT;
3588 goto pre_mutex_err; 3593 goto pre_mutex_err;
3589 } 3594 }
3590 } 3595 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 16861b800fee..744225ebb4b2 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -887,6 +887,49 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
887 queue_work(dev_priv->wq, &dev_priv->error_work); 887 queue_work(dev_priv->wq, &dev_priv->error_work);
888} 888}
889 889
890static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
891{
892 drm_i915_private_t *dev_priv = dev->dev_private;
893 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
894 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
895 struct drm_i915_gem_object *obj_priv;
896 struct intel_unpin_work *work;
897 unsigned long flags;
898 bool stall_detected;
899
900 /* Ignore early vblank irqs */
901 if (intel_crtc == NULL)
902 return;
903
904 spin_lock_irqsave(&dev->event_lock, flags);
905 work = intel_crtc->unpin_work;
906
907 if (work == NULL || work->pending || !work->enable_stall_check) {
908 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
909 spin_unlock_irqrestore(&dev->event_lock, flags);
910 return;
911 }
912
913 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
914 obj_priv = to_intel_bo(work->pending_flip_obj);
915 if(IS_I965G(dev)) {
916 int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
917 stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
918 } else {
919 int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
920 stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset +
921 crtc->y * crtc->fb->pitch +
922 crtc->x * crtc->fb->bits_per_pixel/8);
923 }
924
925 spin_unlock_irqrestore(&dev->event_lock, flags);
926
927 if (stall_detected) {
928 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
929 intel_prepare_page_flip(dev, intel_crtc->plane);
930 }
931}
932
890irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 933irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
891{ 934{
892 struct drm_device *dev = (struct drm_device *) arg; 935 struct drm_device *dev = (struct drm_device *) arg;
@@ -1004,15 +1047,19 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1004 if (pipea_stats & vblank_status) { 1047 if (pipea_stats & vblank_status) {
1005 vblank++; 1048 vblank++;
1006 drm_handle_vblank(dev, 0); 1049 drm_handle_vblank(dev, 0);
1007 if (!dev_priv->flip_pending_is_done) 1050 if (!dev_priv->flip_pending_is_done) {
1051 i915_pageflip_stall_check(dev, 0);
1008 intel_finish_page_flip(dev, 0); 1052 intel_finish_page_flip(dev, 0);
1053 }
1009 } 1054 }
1010 1055
1011 if (pipeb_stats & vblank_status) { 1056 if (pipeb_stats & vblank_status) {
1012 vblank++; 1057 vblank++;
1013 drm_handle_vblank(dev, 1); 1058 drm_handle_vblank(dev, 1);
1014 if (!dev_priv->flip_pending_is_done) 1059 if (!dev_priv->flip_pending_is_done) {
1060 i915_pageflip_stall_check(dev, 1);
1015 intel_finish_page_flip(dev, 1); 1061 intel_finish_page_flip(dev, 1);
1062 }
1016 } 1063 }
1017 1064
1018 if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || 1065 if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
@@ -1303,17 +1350,25 @@ void i915_hangcheck_elapsed(unsigned long data)
1303 i915_seqno_passed(i915_get_gem_seqno(dev, 1350 i915_seqno_passed(i915_get_gem_seqno(dev,
1304 &dev_priv->render_ring), 1351 &dev_priv->render_ring),
1305 i915_get_tail_request(dev)->seqno)) { 1352 i915_get_tail_request(dev)->seqno)) {
1353 bool missed_wakeup = false;
1354
1306 dev_priv->hangcheck_count = 0; 1355 dev_priv->hangcheck_count = 0;
1307 1356
1308 /* Issue a wake-up to catch stuck h/w. */ 1357 /* Issue a wake-up to catch stuck h/w. */
1309 if (dev_priv->render_ring.waiting_gem_seqno | 1358 if (dev_priv->render_ring.waiting_gem_seqno &&
1310 dev_priv->bsd_ring.waiting_gem_seqno) { 1359 waitqueue_active(&dev_priv->render_ring.irq_queue)) {
1311 DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); 1360 DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
1312 if (dev_priv->render_ring.waiting_gem_seqno) 1361 missed_wakeup = true;
1313 DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
1314 if (dev_priv->bsd_ring.waiting_gem_seqno)
1315 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
1316 } 1362 }
1363
1364 if (dev_priv->bsd_ring.waiting_gem_seqno &&
1365 waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
1366 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
1367 missed_wakeup = true;
1368 }
1369
1370 if (missed_wakeup)
1371 DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
1317 return; 1372 return;
1318 } 1373 }
1319 1374
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 67e3ec1a6af9..4f5e15577e89 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -319,6 +319,7 @@
319 319
320#define MI_MODE 0x0209c 320#define MI_MODE 0x0209c
321# define VS_TIMER_DISPATCH (1 << 6) 321# define VS_TIMER_DISPATCH (1 << 6)
322# define MI_FLUSH_ENABLE (1 << 11)
322 323
323#define SCPD0 0x0209c /* 915+ only */ 324#define SCPD0 0x0209c /* 915+ only */
324#define IER 0x020a0 325#define IER 0x020a0
@@ -2205,9 +2206,17 @@
2205#define WM1_LP_SR_EN (1<<31) 2206#define WM1_LP_SR_EN (1<<31)
2206#define WM1_LP_LATENCY_SHIFT 24 2207#define WM1_LP_LATENCY_SHIFT 24
2207#define WM1_LP_LATENCY_MASK (0x7f<<24) 2208#define WM1_LP_LATENCY_MASK (0x7f<<24)
2209#define WM1_LP_FBC_LP1_MASK (0xf<<20)
2210#define WM1_LP_FBC_LP1_SHIFT 20
2208#define WM1_LP_SR_MASK (0x1ff<<8) 2211#define WM1_LP_SR_MASK (0x1ff<<8)
2209#define WM1_LP_SR_SHIFT 8 2212#define WM1_LP_SR_SHIFT 8
2210#define WM1_LP_CURSOR_MASK (0x3f) 2213#define WM1_LP_CURSOR_MASK (0x3f)
2214#define WM2_LP_ILK 0x4510c
2215#define WM2_LP_EN (1<<31)
2216#define WM3_LP_ILK 0x45110
2217#define WM3_LP_EN (1<<31)
2218#define WM1S_LP_ILK 0x45120
2219#define WM1S_LP_EN (1<<31)
2211 2220
2212/* Memory latency timer register */ 2221/* Memory latency timer register */
2213#define MLTR_ILK 0x11222 2222#define MLTR_ILK 0x11222
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 11a3394f5fe1..19daead5b525 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -990,6 +990,22 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
990 struct drm_i915_private *dev_priv = dev->dev_private; 990 struct drm_i915_private *dev_priv = dev->dev_private;
991 int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT); 991 int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT);
992 992
993 /* Clear existing vblank status. Note this will clear any other
994 * sticky status fields as well.
995 *
996 * This races with i915_driver_irq_handler() with the result
997 * that either function could miss a vblank event. Here it is not
998 * fatal, as we will either wait upon the next vblank interrupt or
999 * timeout. Generally speaking intel_wait_for_vblank() is only
1000 * called during modeset at which time the GPU should be idle and
1001 * should *not* be performing page flips and thus not waiting on
1002 * vblanks...
1003 * Currently, the result of us stealing a vblank from the irq
1004 * handler is that a single frame will be skipped during swapbuffers.
1005 */
1006 I915_WRITE(pipestat_reg,
1007 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
1008
993 /* Wait for vblank interrupt bit to set */ 1009 /* Wait for vblank interrupt bit to set */
994 if (wait_for((I915_READ(pipestat_reg) & 1010 if (wait_for((I915_READ(pipestat_reg) &
995 PIPE_VBLANK_INTERRUPT_STATUS), 1011 PIPE_VBLANK_INTERRUPT_STATUS),
@@ -1486,7 +1502,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1486 dspcntr &= ~DISPPLANE_TILED; 1502 dspcntr &= ~DISPPLANE_TILED;
1487 } 1503 }
1488 1504
1489 if (IS_IRONLAKE(dev)) 1505 if (HAS_PCH_SPLIT(dev))
1490 /* must disable */ 1506 /* must disable */
1491 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 1507 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1492 1508
@@ -1495,20 +1511,19 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1495 Start = obj_priv->gtt_offset; 1511 Start = obj_priv->gtt_offset;
1496 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); 1512 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
1497 1513
1498 DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); 1514 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1515 Start, Offset, x, y, fb->pitch);
1499 I915_WRITE(dspstride, fb->pitch); 1516 I915_WRITE(dspstride, fb->pitch);
1500 if (IS_I965G(dev)) { 1517 if (IS_I965G(dev)) {
1501 I915_WRITE(dspbase, Offset);
1502 I915_READ(dspbase);
1503 I915_WRITE(dspsurf, Start); 1518 I915_WRITE(dspsurf, Start);
1504 I915_READ(dspsurf);
1505 I915_WRITE(dsptileoff, (y << 16) | x); 1519 I915_WRITE(dsptileoff, (y << 16) | x);
1520 I915_WRITE(dspbase, Offset);
1506 } else { 1521 } else {
1507 I915_WRITE(dspbase, Start + Offset); 1522 I915_WRITE(dspbase, Start + Offset);
1508 I915_READ(dspbase);
1509 } 1523 }
1524 POSTING_READ(dspbase);
1510 1525
1511 if ((IS_I965G(dev) || plane == 0)) 1526 if (IS_I965G(dev) || plane == 0)
1512 intel_update_fbc(crtc, &crtc->mode); 1527 intel_update_fbc(crtc, &crtc->mode);
1513 1528
1514 intel_wait_for_vblank(dev, intel_crtc->pipe); 1529 intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -1522,7 +1537,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1522 struct drm_framebuffer *old_fb) 1537 struct drm_framebuffer *old_fb)
1523{ 1538{
1524 struct drm_device *dev = crtc->dev; 1539 struct drm_device *dev = crtc->dev;
1525 struct drm_i915_private *dev_priv = dev->dev_private;
1526 struct drm_i915_master_private *master_priv; 1540 struct drm_i915_master_private *master_priv;
1527 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1541 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1528 struct intel_framebuffer *intel_fb; 1542 struct intel_framebuffer *intel_fb;
@@ -1530,13 +1544,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1530 struct drm_gem_object *obj; 1544 struct drm_gem_object *obj;
1531 int pipe = intel_crtc->pipe; 1545 int pipe = intel_crtc->pipe;
1532 int plane = intel_crtc->plane; 1546 int plane = intel_crtc->plane;
1533 unsigned long Start, Offset;
1534 int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
1535 int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
1536 int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
1537 int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
1538 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
1539 u32 dspcntr;
1540 int ret; 1547 int ret;
1541 1548
1542 /* no fb bound */ 1549 /* no fb bound */
@@ -1572,71 +1579,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1572 return ret; 1579 return ret;
1573 } 1580 }
1574 1581
1575 dspcntr = I915_READ(dspcntr_reg); 1582 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y);
1576 /* Mask out pixel format bits in case we change it */ 1583 if (ret) {
1577 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1578 switch (crtc->fb->bits_per_pixel) {
1579 case 8:
1580 dspcntr |= DISPPLANE_8BPP;
1581 break;
1582 case 16:
1583 if (crtc->fb->depth == 15)
1584 dspcntr |= DISPPLANE_15_16BPP;
1585 else
1586 dspcntr |= DISPPLANE_16BPP;
1587 break;
1588 case 24:
1589 case 32:
1590 if (crtc->fb->depth == 30)
1591 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
1592 else
1593 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1594 break;
1595 default:
1596 DRM_ERROR("Unknown color depth\n");
1597 i915_gem_object_unpin(obj); 1584 i915_gem_object_unpin(obj);
1598 mutex_unlock(&dev->struct_mutex); 1585 mutex_unlock(&dev->struct_mutex);
1599 return -EINVAL; 1586 return ret;
1600 }
1601 if (IS_I965G(dev)) {
1602 if (obj_priv->tiling_mode != I915_TILING_NONE)
1603 dspcntr |= DISPPLANE_TILED;
1604 else
1605 dspcntr &= ~DISPPLANE_TILED;
1606 }
1607
1608 if (HAS_PCH_SPLIT(dev))
1609 /* must disable */
1610 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1611
1612 I915_WRITE(dspcntr_reg, dspcntr);
1613
1614 Start = obj_priv->gtt_offset;
1615 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
1616
1617 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1618 Start, Offset, x, y, crtc->fb->pitch);
1619 I915_WRITE(dspstride, crtc->fb->pitch);
1620 if (IS_I965G(dev)) {
1621 I915_WRITE(dspsurf, Start);
1622 I915_WRITE(dsptileoff, (y << 16) | x);
1623 I915_WRITE(dspbase, Offset);
1624 } else {
1625 I915_WRITE(dspbase, Start + Offset);
1626 } 1587 }
1627 POSTING_READ(dspbase);
1628
1629 if ((IS_I965G(dev) || plane == 0))
1630 intel_update_fbc(crtc, &crtc->mode);
1631
1632 intel_wait_for_vblank(dev, pipe);
1633 1588
1634 if (old_fb) { 1589 if (old_fb) {
1635 intel_fb = to_intel_framebuffer(old_fb); 1590 intel_fb = to_intel_framebuffer(old_fb);
1636 obj_priv = to_intel_bo(intel_fb->obj); 1591 obj_priv = to_intel_bo(intel_fb->obj);
1637 i915_gem_object_unpin(intel_fb->obj); 1592 i915_gem_object_unpin(intel_fb->obj);
1638 } 1593 }
1639 intel_increase_pllclock(crtc, true);
1640 1594
1641 mutex_unlock(&dev->struct_mutex); 1595 mutex_unlock(&dev->struct_mutex);
1642 1596
@@ -1911,9 +1865,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1911 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; 1865 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
1912 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; 1866 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1913 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; 1867 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
1914 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
1915 int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
1916 int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS;
1917 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; 1868 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
1918 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; 1869 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
1919 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; 1870 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
@@ -1982,15 +1933,19 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1982 } 1933 }
1983 1934
1984 /* Enable panel fitting for LVDS */ 1935 /* Enable panel fitting for LVDS */
1985 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) 1936 if (dev_priv->pch_pf_size &&
1986 || HAS_eDP || intel_pch_has_edp(crtc)) { 1937 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
1987 if (dev_priv->pch_pf_size) { 1938 || HAS_eDP || intel_pch_has_edp(crtc))) {
1988 temp = I915_READ(pf_ctl_reg); 1939 /* Force use of hard-coded filter coefficients
1989 I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); 1940 * as some pre-programmed values are broken,
1990 I915_WRITE(pf_win_pos, dev_priv->pch_pf_pos); 1941 * e.g. x201.
1991 I915_WRITE(pf_win_size, dev_priv->pch_pf_size); 1942 */
1992 } else 1943 I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
1993 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); 1944 PF_ENABLE | PF_FILTER_MED_3x3);
1945 I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
1946 dev_priv->pch_pf_pos);
1947 I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
1948 dev_priv->pch_pf_size);
1994 } 1949 }
1995 1950
1996 /* Enable CPU pipe */ 1951 /* Enable CPU pipe */
@@ -2115,7 +2070,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2115 I915_WRITE(transconf_reg, temp | TRANS_ENABLE); 2070 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
2116 I915_READ(transconf_reg); 2071 I915_READ(transconf_reg);
2117 2072
2118 if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 10, 0)) 2073 if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1))
2119 DRM_ERROR("failed to enable transcoder\n"); 2074 DRM_ERROR("failed to enable transcoder\n");
2120 } 2075 }
2121 2076
@@ -2155,14 +2110,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2155 udelay(100); 2110 udelay(100);
2156 2111
2157 /* Disable PF */ 2112 /* Disable PF */
2158 temp = I915_READ(pf_ctl_reg); 2113 I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
2159 if ((temp & PF_ENABLE) != 0) { 2114 I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
2160 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
2161 I915_READ(pf_ctl_reg);
2162 }
2163 I915_WRITE(pf_win_size, 0);
2164 POSTING_READ(pf_win_size);
2165
2166 2115
2167 /* disable CPU FDI tx and PCH FDI rx */ 2116 /* disable CPU FDI tx and PCH FDI rx */
2168 temp = I915_READ(fdi_tx_reg); 2117 temp = I915_READ(fdi_tx_reg);
@@ -2421,6 +2370,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
2421 int pipe = intel_crtc->pipe; 2370 int pipe = intel_crtc->pipe;
2422 bool enabled; 2371 bool enabled;
2423 2372
2373 if (intel_crtc->dpms_mode == mode)
2374 return;
2375
2424 intel_crtc->dpms_mode = mode; 2376 intel_crtc->dpms_mode = mode;
2425 intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON; 2377 intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
2426 2378
@@ -2815,14 +2767,8 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2815 /* Don't promote wm_size to unsigned... */ 2767 /* Don't promote wm_size to unsigned... */
2816 if (wm_size > (long)wm->max_wm) 2768 if (wm_size > (long)wm->max_wm)
2817 wm_size = wm->max_wm; 2769 wm_size = wm->max_wm;
2818 if (wm_size <= 0) { 2770 if (wm_size <= 0)
2819 wm_size = wm->default_wm; 2771 wm_size = wm->default_wm;
2820 DRM_ERROR("Insufficient FIFO for plane, expect flickering:"
2821 " entries required = %ld, available = %lu.\n",
2822 entries_required + wm->guard_size,
2823 wm->fifo_size);
2824 }
2825
2826 return wm_size; 2772 return wm_size;
2827} 2773}
2828 2774
@@ -3436,8 +3382,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3436 reg_value = I915_READ(WM1_LP_ILK); 3382 reg_value = I915_READ(WM1_LP_ILK);
3437 reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | 3383 reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
3438 WM1_LP_CURSOR_MASK); 3384 WM1_LP_CURSOR_MASK);
3439 reg_value |= WM1_LP_SR_EN | 3385 reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
3440 (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
3441 (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; 3386 (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
3442 3387
3443 I915_WRITE(WM1_LP_ILK, reg_value); 3388 I915_WRITE(WM1_LP_ILK, reg_value);
@@ -3554,10 +3499,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3554 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; 3499 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
3555 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 3500 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
3556 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 3501 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
3557 bool is_edp = false; 3502 struct intel_encoder *has_edp_encoder = NULL;
3558 struct drm_mode_config *mode_config = &dev->mode_config; 3503 struct drm_mode_config *mode_config = &dev->mode_config;
3559 struct drm_encoder *encoder; 3504 struct drm_encoder *encoder;
3560 struct intel_encoder *intel_encoder = NULL;
3561 const intel_limit_t *limit; 3505 const intel_limit_t *limit;
3562 int ret; 3506 int ret;
3563 struct fdi_m_n m_n = {0}; 3507 struct fdi_m_n m_n = {0};
@@ -3578,12 +3522,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3578 drm_vblank_pre_modeset(dev, pipe); 3522 drm_vblank_pre_modeset(dev, pipe);
3579 3523
3580 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 3524 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
3525 struct intel_encoder *intel_encoder;
3581 3526
3582 if (!encoder || encoder->crtc != crtc) 3527 if (encoder->crtc != crtc)
3583 continue; 3528 continue;
3584 3529
3585 intel_encoder = enc_to_intel_encoder(encoder); 3530 intel_encoder = enc_to_intel_encoder(encoder);
3586
3587 switch (intel_encoder->type) { 3531 switch (intel_encoder->type) {
3588 case INTEL_OUTPUT_LVDS: 3532 case INTEL_OUTPUT_LVDS:
3589 is_lvds = true; 3533 is_lvds = true;
@@ -3607,7 +3551,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3607 is_dp = true; 3551 is_dp = true;
3608 break; 3552 break;
3609 case INTEL_OUTPUT_EDP: 3553 case INTEL_OUTPUT_EDP:
3610 is_edp = true; 3554 has_edp_encoder = intel_encoder;
3611 break; 3555 break;
3612 } 3556 }
3613 3557
@@ -3685,10 +3629,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3685 int lane = 0, link_bw, bpp; 3629 int lane = 0, link_bw, bpp;
3686 /* eDP doesn't require FDI link, so just set DP M/N 3630 /* eDP doesn't require FDI link, so just set DP M/N
3687 according to current link config */ 3631 according to current link config */
3688 if (is_edp) { 3632 if (has_edp_encoder) {
3689 target_clock = mode->clock; 3633 target_clock = mode->clock;
3690 intel_edp_link_config(intel_encoder, 3634 intel_edp_link_config(has_edp_encoder,
3691 &lane, &link_bw); 3635 &lane, &link_bw);
3692 } else { 3636 } else {
3693 /* DP over FDI requires target mode clock 3637 /* DP over FDI requires target mode clock
3694 instead of link clock */ 3638 instead of link clock */
@@ -3709,7 +3653,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3709 temp |= PIPE_8BPC; 3653 temp |= PIPE_8BPC;
3710 else 3654 else
3711 temp |= PIPE_6BPC; 3655 temp |= PIPE_6BPC;
3712 } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) { 3656 } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
3713 switch (dev_priv->edp_bpp/3) { 3657 switch (dev_priv->edp_bpp/3) {
3714 case 8: 3658 case 8:
3715 temp |= PIPE_8BPC; 3659 temp |= PIPE_8BPC;
@@ -3782,7 +3726,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3782 3726
3783 udelay(200); 3727 udelay(200);
3784 3728
3785 if (is_edp) { 3729 if (has_edp_encoder) {
3786 if (dev_priv->lvds_use_ssc) { 3730 if (dev_priv->lvds_use_ssc) {
3787 temp |= DREF_SSC1_ENABLE; 3731 temp |= DREF_SSC1_ENABLE;
3788 I915_WRITE(PCH_DREF_CONTROL, temp); 3732 I915_WRITE(PCH_DREF_CONTROL, temp);
@@ -3931,7 +3875,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3931 dpll_reg = pch_dpll_reg; 3875 dpll_reg = pch_dpll_reg;
3932 } 3876 }
3933 3877
3934 if (!is_edp) { 3878 if (!has_edp_encoder) {
3935 I915_WRITE(fp_reg, fp); 3879 I915_WRITE(fp_reg, fp);
3936 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); 3880 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
3937 I915_READ(dpll_reg); 3881 I915_READ(dpll_reg);
@@ -4026,7 +3970,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4026 } 3970 }
4027 } 3971 }
4028 3972
4029 if (!is_edp) { 3973 if (!has_edp_encoder) {
4030 I915_WRITE(fp_reg, fp); 3974 I915_WRITE(fp_reg, fp);
4031 I915_WRITE(dpll_reg, dpll); 3975 I915_WRITE(dpll_reg, dpll);
4032 I915_READ(dpll_reg); 3976 I915_READ(dpll_reg);
@@ -4105,7 +4049,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4105 I915_WRITE(link_m1_reg, m_n.link_m); 4049 I915_WRITE(link_m1_reg, m_n.link_m);
4106 I915_WRITE(link_n1_reg, m_n.link_n); 4050 I915_WRITE(link_n1_reg, m_n.link_n);
4107 4051
4108 if (is_edp) { 4052 if (has_edp_encoder) {
4109 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 4053 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
4110 } else { 4054 } else {
4111 /* enable FDI RX PLL too */ 4055 /* enable FDI RX PLL too */
@@ -4911,15 +4855,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
4911 kfree(intel_crtc); 4855 kfree(intel_crtc);
4912} 4856}
4913 4857
4914struct intel_unpin_work {
4915 struct work_struct work;
4916 struct drm_device *dev;
4917 struct drm_gem_object *old_fb_obj;
4918 struct drm_gem_object *pending_flip_obj;
4919 struct drm_pending_vblank_event *event;
4920 int pending;
4921};
4922
4923static void intel_unpin_work_fn(struct work_struct *__work) 4858static void intel_unpin_work_fn(struct work_struct *__work)
4924{ 4859{
4925 struct intel_unpin_work *work = 4860 struct intel_unpin_work *work =
@@ -5007,7 +4942,8 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
5007 4942
5008 spin_lock_irqsave(&dev->event_lock, flags); 4943 spin_lock_irqsave(&dev->event_lock, flags);
5009 if (intel_crtc->unpin_work) { 4944 if (intel_crtc->unpin_work) {
5010 intel_crtc->unpin_work->pending = 1; 4945 if ((++intel_crtc->unpin_work->pending) > 1)
4946 DRM_ERROR("Prepared flip multiple times\n");
5011 } else { 4947 } else {
5012 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); 4948 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
5013 } 4949 }
@@ -5026,9 +4962,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5026 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4962 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5027 struct intel_unpin_work *work; 4963 struct intel_unpin_work *work;
5028 unsigned long flags, offset; 4964 unsigned long flags, offset;
5029 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; 4965 int pipe = intel_crtc->pipe;
5030 int ret, pipesrc; 4966 u32 pf, pipesrc;
5031 u32 flip_mask; 4967 int ret;
5032 4968
5033 work = kzalloc(sizeof *work, GFP_KERNEL); 4969 work = kzalloc(sizeof *work, GFP_KERNEL);
5034 if (work == NULL) 4970 if (work == NULL)
@@ -5077,42 +5013,73 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5077 atomic_inc(&obj_priv->pending_flip); 5013 atomic_inc(&obj_priv->pending_flip);
5078 work->pending_flip_obj = obj; 5014 work->pending_flip_obj = obj;
5079 5015
5080 if (intel_crtc->plane)
5081 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5082 else
5083 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5084
5085 if (IS_GEN3(dev) || IS_GEN2(dev)) { 5016 if (IS_GEN3(dev) || IS_GEN2(dev)) {
5017 u32 flip_mask;
5018
5019 if (intel_crtc->plane)
5020 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5021 else
5022 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5023
5086 BEGIN_LP_RING(2); 5024 BEGIN_LP_RING(2);
5087 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); 5025 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
5088 OUT_RING(0); 5026 OUT_RING(0);
5089 ADVANCE_LP_RING(); 5027 ADVANCE_LP_RING();
5090 } 5028 }
5091 5029
5030 work->enable_stall_check = true;
5031
5092 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 5032 /* Offset into the new buffer for cases of shared fbs between CRTCs */
5093 offset = obj_priv->gtt_offset; 5033 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
5094 offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8);
5095 5034
5096 BEGIN_LP_RING(4); 5035 BEGIN_LP_RING(4);
5097 if (IS_I965G(dev)) { 5036 switch(INTEL_INFO(dev)->gen) {
5037 case 2:
5098 OUT_RING(MI_DISPLAY_FLIP | 5038 OUT_RING(MI_DISPLAY_FLIP |
5099 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5039 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5100 OUT_RING(fb->pitch); 5040 OUT_RING(fb->pitch);
5101 OUT_RING(offset | obj_priv->tiling_mode); 5041 OUT_RING(obj_priv->gtt_offset + offset);
5102 pipesrc = I915_READ(pipesrc_reg); 5042 OUT_RING(MI_NOOP);
5103 OUT_RING(pipesrc & 0x0fff0fff); 5043 break;
5104 } else if (IS_GEN3(dev)) { 5044
5045 case 3:
5105 OUT_RING(MI_DISPLAY_FLIP_I915 | 5046 OUT_RING(MI_DISPLAY_FLIP_I915 |
5106 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5047 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5107 OUT_RING(fb->pitch); 5048 OUT_RING(fb->pitch);
5108 OUT_RING(offset); 5049 OUT_RING(obj_priv->gtt_offset + offset);
5109 OUT_RING(MI_NOOP); 5050 OUT_RING(MI_NOOP);
5110 } else { 5051 break;
5052
5053 case 4:
5054 case 5:
5055 /* i965+ uses the linear or tiled offsets from the
5056 * Display Registers (which do not change across a page-flip)
5057 * so we need only reprogram the base address.
5058 */
5111 OUT_RING(MI_DISPLAY_FLIP | 5059 OUT_RING(MI_DISPLAY_FLIP |
5112 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5060 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5113 OUT_RING(fb->pitch); 5061 OUT_RING(fb->pitch);
5114 OUT_RING(offset); 5062 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
5115 OUT_RING(MI_NOOP); 5063
5064 /* XXX Enabling the panel-fitter across page-flip is so far
5065 * untested on non-native modes, so ignore it for now.
5066 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
5067 */
5068 pf = 0;
5069 pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
5070 OUT_RING(pf | pipesrc);
5071 break;
5072
5073 case 6:
5074 OUT_RING(MI_DISPLAY_FLIP |
5075 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5076 OUT_RING(fb->pitch | obj_priv->tiling_mode);
5077 OUT_RING(obj_priv->gtt_offset);
5078
5079 pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
5080 pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
5081 OUT_RING(pf | pipesrc);
5082 break;
5116 } 5083 }
5117 ADVANCE_LP_RING(); 5084 ADVANCE_LP_RING();
5118 5085
@@ -5193,7 +5160,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5193 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 5160 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
5194 5161
5195 intel_crtc->cursor_addr = 0; 5162 intel_crtc->cursor_addr = 0;
5196 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; 5163 intel_crtc->dpms_mode = -1;
5197 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 5164 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
5198 5165
5199 intel_crtc->busy = false; 5166 intel_crtc->busy = false;
@@ -5701,6 +5668,9 @@ void intel_init_clock_gating(struct drm_device *dev)
5701 I915_WRITE(DISP_ARB_CTL, 5668 I915_WRITE(DISP_ARB_CTL,
5702 (I915_READ(DISP_ARB_CTL) | 5669 (I915_READ(DISP_ARB_CTL) |
5703 DISP_FBC_WM_DIS)); 5670 DISP_FBC_WM_DIS));
5671 I915_WRITE(WM3_LP_ILK, 0);
5672 I915_WRITE(WM2_LP_ILK, 0);
5673 I915_WRITE(WM1_LP_ILK, 0);
5704 } 5674 }
5705 /* 5675 /*
5706 * Based on the document from hardware guys the following bits 5676 * Based on the document from hardware guys the following bits
@@ -5722,8 +5692,7 @@ void intel_init_clock_gating(struct drm_device *dev)
5722 ILK_DPFC_DIS2 | 5692 ILK_DPFC_DIS2 |
5723 ILK_CLK_FBC); 5693 ILK_CLK_FBC);
5724 } 5694 }
5725 if (IS_GEN6(dev)) 5695 return;
5726 return;
5727 } else if (IS_G4X(dev)) { 5696 } else if (IS_G4X(dev)) {
5728 uint32_t dspclk_gate; 5697 uint32_t dspclk_gate;
5729 I915_WRITE(RENCLK_GATE_D1, 0); 5698 I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5784,11 +5753,9 @@ void intel_init_clock_gating(struct drm_device *dev)
5784 OUT_RING(MI_FLUSH); 5753 OUT_RING(MI_FLUSH);
5785 ADVANCE_LP_RING(); 5754 ADVANCE_LP_RING();
5786 } 5755 }
5787 } else { 5756 } else
5788 DRM_DEBUG_KMS("Failed to allocate render context." 5757 DRM_DEBUG_KMS("Failed to allocate render context."
5789 "Disable RC6\n"); 5758 "Disable RC6\n");
5790 return;
5791 }
5792 } 5759 }
5793 5760
5794 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { 5761 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 9caccd03dccb..51d142939a26 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -239,7 +239,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
239 uint32_t ch_data = ch_ctl + 4; 239 uint32_t ch_data = ch_ctl + 4;
240 int i; 240 int i;
241 int recv_bytes; 241 int recv_bytes;
242 uint32_t ctl;
243 uint32_t status; 242 uint32_t status;
244 uint32_t aux_clock_divider; 243 uint32_t aux_clock_divider;
245 int try, precharge; 244 int try, precharge;
@@ -263,41 +262,43 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
263 else 262 else
264 precharge = 5; 263 precharge = 5;
265 264
265 if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
266 DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
267 I915_READ(ch_ctl));
268 return -EBUSY;
269 }
270
266 /* Must try at least 3 times according to DP spec */ 271 /* Must try at least 3 times according to DP spec */
267 for (try = 0; try < 5; try++) { 272 for (try = 0; try < 5; try++) {
268 /* Load the send data into the aux channel data registers */ 273 /* Load the send data into the aux channel data registers */
269 for (i = 0; i < send_bytes; i += 4) { 274 for (i = 0; i < send_bytes; i += 4)
270 uint32_t d = pack_aux(send + i, send_bytes - i); 275 I915_WRITE(ch_data + i,
271 276 pack_aux(send + i, send_bytes - i));
272 I915_WRITE(ch_data + i, d);
273 }
274
275 ctl = (DP_AUX_CH_CTL_SEND_BUSY |
276 DP_AUX_CH_CTL_TIME_OUT_400us |
277 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
278 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
279 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
280 DP_AUX_CH_CTL_DONE |
281 DP_AUX_CH_CTL_TIME_OUT_ERROR |
282 DP_AUX_CH_CTL_RECEIVE_ERROR);
283 277
284 /* Send the command and wait for it to complete */ 278 /* Send the command and wait for it to complete */
285 I915_WRITE(ch_ctl, ctl); 279 I915_WRITE(ch_ctl,
286 (void) I915_READ(ch_ctl); 280 DP_AUX_CH_CTL_SEND_BUSY |
281 DP_AUX_CH_CTL_TIME_OUT_400us |
282 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
283 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
284 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
285 DP_AUX_CH_CTL_DONE |
286 DP_AUX_CH_CTL_TIME_OUT_ERROR |
287 DP_AUX_CH_CTL_RECEIVE_ERROR);
287 for (;;) { 288 for (;;) {
288 udelay(100);
289 status = I915_READ(ch_ctl); 289 status = I915_READ(ch_ctl);
290 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 290 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
291 break; 291 break;
292 udelay(100);
292 } 293 }
293 294
294 /* Clear done status and any errors */ 295 /* Clear done status and any errors */
295 I915_WRITE(ch_ctl, (status | 296 I915_WRITE(ch_ctl,
296 DP_AUX_CH_CTL_DONE | 297 status |
297 DP_AUX_CH_CTL_TIME_OUT_ERROR | 298 DP_AUX_CH_CTL_DONE |
298 DP_AUX_CH_CTL_RECEIVE_ERROR)); 299 DP_AUX_CH_CTL_TIME_OUT_ERROR |
299 (void) I915_READ(ch_ctl); 300 DP_AUX_CH_CTL_RECEIVE_ERROR);
300 if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0) 301 if (status & DP_AUX_CH_CTL_DONE)
301 break; 302 break;
302 } 303 }
303 304
@@ -324,15 +325,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
324 /* Unload any bytes sent back from the other side */ 325 /* Unload any bytes sent back from the other side */
325 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 326 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
326 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 327 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
327
328 if (recv_bytes > recv_size) 328 if (recv_bytes > recv_size)
329 recv_bytes = recv_size; 329 recv_bytes = recv_size;
330 330
331 for (i = 0; i < recv_bytes; i += 4) { 331 for (i = 0; i < recv_bytes; i += 4)
332 uint32_t d = I915_READ(ch_data + i); 332 unpack_aux(I915_READ(ch_data + i),
333 333 recv + i, recv_bytes - i);
334 unpack_aux(d, recv + i, recv_bytes - i);
335 }
336 334
337 return recv_bytes; 335 return recv_bytes;
338} 336}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0e92aa07b382..ad312ca6b3e5 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -176,6 +176,16 @@ struct intel_crtc {
176#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) 176#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
177#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 177#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
178 178
179struct intel_unpin_work {
180 struct work_struct work;
181 struct drm_device *dev;
182 struct drm_gem_object *old_fb_obj;
183 struct drm_gem_object *pending_flip_obj;
184 struct drm_pending_vblank_event *event;
185 int pending;
186 bool enable_stall_check;
187};
188
179struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, 189struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
180 const char *name); 190 const char *name);
181void intel_i2c_destroy(struct i2c_adapter *adapter); 191void intel_i2c_destroy(struct i2c_adapter *adapter);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b819c1081147..4fbb0165b26f 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -875,8 +875,6 @@ void intel_lvds_init(struct drm_device *dev)
875 875
876 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); 876 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
877 intel_encoder->crtc_mask = (1 << 1); 877 intel_encoder->crtc_mask = (1 << 1);
878 if (IS_I965G(dev))
879 intel_encoder->crtc_mask |= (1 << 0);
880 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); 878 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
881 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); 879 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
882 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 880 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 4f00390d7c61..1d306a458be6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -25,6 +25,8 @@
25 * 25 *
26 * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c 26 * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
27 */ 27 */
28
29#include <linux/seq_file.h>
28#include "drmP.h" 30#include "drmP.h"
29#include "drm.h" 31#include "drm.h"
30#include "i915_drm.h" 32#include "i915_drm.h"
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 51e9c9e718c4..cb3508f78bc3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -220,9 +220,13 @@ static int init_render_ring(struct drm_device *dev,
220{ 220{
221 drm_i915_private_t *dev_priv = dev->dev_private; 221 drm_i915_private_t *dev_priv = dev->dev_private;
222 int ret = init_ring_common(dev, ring); 222 int ret = init_ring_common(dev, ring);
223 int mode;
224
223 if (IS_I9XX(dev) && !IS_GEN3(dev)) { 225 if (IS_I9XX(dev) && !IS_GEN3(dev)) {
224 I915_WRITE(MI_MODE, 226 mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
225 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); 227 if (IS_GEN6(dev))
228 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
229 I915_WRITE(MI_MODE, mode);
226 } 230 }
227 return ret; 231 return ret;
228} 232}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 093e914e8a41..e3b7a7ee39cb 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1061,8 +1061,9 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1061 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) 1061 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
1062 return false; 1062 return false;
1063 1063
1064 if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode)) 1064 (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
1065 return false; 1065 mode,
1066 adjusted_mode);
1066 } else if (intel_sdvo->is_lvds) { 1067 } else if (intel_sdvo->is_lvds) {
1067 drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0); 1068 drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
1068 1069
@@ -1070,8 +1071,9 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1070 intel_sdvo->sdvo_lvds_fixed_mode)) 1071 intel_sdvo->sdvo_lvds_fixed_mode))
1071 return false; 1072 return false;
1072 1073
1073 if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode)) 1074 (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
1074 return false; 1075 mode,
1076 adjusted_mode);
1075 } 1077 }
1076 1078
1077 /* Make the CRTC code factor in the SDVO pixel multiplier. The 1079 /* Make the CRTC code factor in the SDVO pixel multiplier. The
@@ -1108,10 +1110,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1108 in_out.in0 = intel_sdvo->attached_output; 1110 in_out.in0 = intel_sdvo->attached_output;
1109 in_out.in1 = 0; 1111 in_out.in1 = 0;
1110 1112
1111 if (!intel_sdvo_set_value(intel_sdvo, 1113 intel_sdvo_set_value(intel_sdvo,
1112 SDVO_CMD_SET_IN_OUT_MAP, 1114 SDVO_CMD_SET_IN_OUT_MAP,
1113 &in_out, sizeof(in_out))) 1115 &in_out, sizeof(in_out));
1114 return;
1115 1116
1116 if (intel_sdvo->is_hdmi) { 1117 if (intel_sdvo->is_hdmi) {
1117 if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode)) 1118 if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
@@ -1122,11 +1123,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1122 1123
1123 /* We have tried to get input timing in mode_fixup, and filled into 1124 /* We have tried to get input timing in mode_fixup, and filled into
1124 adjusted_mode */ 1125 adjusted_mode */
1125 if (intel_sdvo->is_tv || intel_sdvo->is_lvds) { 1126 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1126 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); 1127 if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
1127 input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags; 1128 input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
1128 } else
1129 intel_sdvo_get_dtd_from_mode(&input_dtd, mode);
1130 1129
1131 /* If it's a TV, we already set the output timing in mode_fixup. 1130 /* If it's a TV, we already set the output timing in mode_fixup.
1132 * Otherwise, the output timing is equal to the input timing. 1131 * Otherwise, the output timing is equal to the input timing.
@@ -1137,8 +1136,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1137 intel_sdvo->attached_output)) 1136 intel_sdvo->attached_output))
1138 return; 1137 return;
1139 1138
1140 if (!intel_sdvo_set_output_timing(intel_sdvo, &input_dtd)) 1139 (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
1141 return;
1142 } 1140 }
1143 1141
1144 /* Set the input timing to the screen. Assume always input 0. */ 1142 /* Set the input timing to the screen. Assume always input 0. */
@@ -1165,8 +1163,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1165 intel_sdvo_set_input_timing(encoder, &input_dtd); 1163 intel_sdvo_set_input_timing(encoder, &input_dtd);
1166 } 1164 }
1167#else 1165#else
1168 if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) 1166 (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
1169 return;
1170#endif 1167#endif
1171 1168
1172 sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); 1169 sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
@@ -1932,6 +1929,41 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
1932 .destroy = intel_sdvo_enc_destroy, 1929 .destroy = intel_sdvo_enc_destroy,
1933}; 1930};
1934 1931
1932static void
1933intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
1934{
1935 uint16_t mask = 0;
1936 unsigned int num_bits;
1937
1938 /* Make a mask of outputs less than or equal to our own priority in the
1939 * list.
1940 */
1941 switch (sdvo->controlled_output) {
1942 case SDVO_OUTPUT_LVDS1:
1943 mask |= SDVO_OUTPUT_LVDS1;
1944 case SDVO_OUTPUT_LVDS0:
1945 mask |= SDVO_OUTPUT_LVDS0;
1946 case SDVO_OUTPUT_TMDS1:
1947 mask |= SDVO_OUTPUT_TMDS1;
1948 case SDVO_OUTPUT_TMDS0:
1949 mask |= SDVO_OUTPUT_TMDS0;
1950 case SDVO_OUTPUT_RGB1:
1951 mask |= SDVO_OUTPUT_RGB1;
1952 case SDVO_OUTPUT_RGB0:
1953 mask |= SDVO_OUTPUT_RGB0;
1954 break;
1955 }
1956
1957 /* Count bits to find what number we are in the priority list. */
1958 mask &= sdvo->caps.output_flags;
1959 num_bits = hweight16(mask);
1960 /* If more than 3 outputs, default to DDC bus 3 for now. */
1961 if (num_bits > 3)
1962 num_bits = 3;
1963
1964 /* Corresponds to SDVO_CONTROL_BUS_DDCx */
1965 sdvo->ddc_bus = 1 << num_bits;
1966}
1935 1967
1936/** 1968/**
1937 * Choose the appropriate DDC bus for control bus switch command for this 1969 * Choose the appropriate DDC bus for control bus switch command for this
@@ -1951,7 +1983,10 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
1951 else 1983 else
1952 mapping = &(dev_priv->sdvo_mappings[1]); 1984 mapping = &(dev_priv->sdvo_mappings[1]);
1953 1985
1954 sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); 1986 if (mapping->initialized)
1987 sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
1988 else
1989 intel_sdvo_guess_ddc_bus(sdvo);
1955} 1990}
1956 1991
1957static bool 1992static bool
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d2029efee982..c671f60ce80b 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1231,7 +1231,6 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1231 struct drm_encoder *encoder = &intel_tv->base.enc; 1231 struct drm_encoder *encoder = &intel_tv->base.enc;
1232 struct drm_device *dev = encoder->dev; 1232 struct drm_device *dev = encoder->dev;
1233 struct drm_i915_private *dev_priv = dev->dev_private; 1233 struct drm_i915_private *dev_priv = dev->dev_private;
1234 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
1235 unsigned long irqflags; 1234 unsigned long irqflags;
1236 u32 tv_ctl, save_tv_ctl; 1235 u32 tv_ctl, save_tv_ctl;
1237 u32 tv_dac, save_tv_dac; 1236 u32 tv_dac, save_tv_dac;
@@ -1268,11 +1267,15 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1268 DAC_C_0_7_V); 1267 DAC_C_0_7_V);
1269 I915_WRITE(TV_CTL, tv_ctl); 1268 I915_WRITE(TV_CTL, tv_ctl);
1270 I915_WRITE(TV_DAC, tv_dac); 1269 I915_WRITE(TV_DAC, tv_dac);
1271 intel_wait_for_vblank(dev, intel_crtc->pipe); 1270 POSTING_READ(TV_DAC);
1271 msleep(20);
1272
1272 tv_dac = I915_READ(TV_DAC); 1273 tv_dac = I915_READ(TV_DAC);
1273 I915_WRITE(TV_DAC, save_tv_dac); 1274 I915_WRITE(TV_DAC, save_tv_dac);
1274 I915_WRITE(TV_CTL, save_tv_ctl); 1275 I915_WRITE(TV_CTL, save_tv_ctl);
1275 intel_wait_for_vblank(dev, intel_crtc->pipe); 1276 POSTING_READ(TV_CTL);
1277 msleep(20);
1278
1276 /* 1279 /*
1277 * A B C 1280 * A B C
1278 * 0 1 1 Composite 1281 * 0 1 1 Composite
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index e4f33a4edea1..974b0f8ae048 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -3869,27 +3869,10 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
3869 } 3869 }
3870#ifdef __powerpc__ 3870#ifdef __powerpc__
3871 /* Powerbook specific quirks */ 3871 /* Powerbook specific quirks */
3872 if ((dev->pci_device & 0xffff) == 0x0179 || 3872 if (script == LVDS_RESET &&
3873 (dev->pci_device & 0xffff) == 0x0189 || 3873 (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
3874 (dev->pci_device & 0xffff) == 0x0329) { 3874 dev->pci_device == 0x0329))
3875 if (script == LVDS_RESET) { 3875 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
3876 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
3877
3878 } else if (script == LVDS_PANEL_ON) {
3879 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3880 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3881 | (1 << 31));
3882 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3883 bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
3884
3885 } else if (script == LVDS_PANEL_OFF) {
3886 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3887 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3888 & ~(1 << 31));
3889 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3890 bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
3891 }
3892 }
3893#endif 3876#endif
3894 3877
3895 return 0; 3878 return 0;
@@ -4381,11 +4364,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
4381 * 4364 *
4382 * For the moment, a quirk will do :) 4365 * For the moment, a quirk will do :)
4383 */ 4366 */
4384 if ((dev->pdev->device == 0x01d7) && 4367 if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2))
4385 (dev->pdev->subsystem_vendor == 0x1028) &&
4386 (dev->pdev->subsystem_device == 0x01c2)) {
4387 bios->fp.duallink_transition_clk = 80000; 4368 bios->fp.duallink_transition_clk = 80000;
4388 }
4389 4369
4390 /* set dual_link flag for EDID case */ 4370 /* set dual_link flag for EDID case */
4391 if (pxclk && (chip_version < 0x25 || chip_version > 0x28)) 4371 if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
@@ -5814,9 +5794,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
5814 */ 5794 */
5815 5795
5816 /* Apple iMac G4 NV18 */ 5796 /* Apple iMac G4 NV18 */
5817 if (dev->pdev->device == 0x0189 && 5797 if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
5818 dev->pdev->subsystem_vendor == 0x10de &&
5819 dev->pdev->subsystem_device == 0x0010) {
5820 struct dcb_gpio_entry *gpio = new_gpio_entry(bios); 5798 struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
5821 5799
5822 gpio->tag = DCB_GPIO_TVDAC0; 5800 gpio->tag = DCB_GPIO_TVDAC0;
@@ -5898,9 +5876,7 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
5898 struct drm_device *dev = bios->dev; 5876 struct drm_device *dev = bios->dev;
5899 5877
5900 /* Gigabyte NX85T */ 5878 /* Gigabyte NX85T */
5901 if ((dev->pdev->device == 0x0421) && 5879 if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
5902 (dev->pdev->subsystem_vendor == 0x1458) &&
5903 (dev->pdev->subsystem_device == 0x344c)) {
5904 if (cte->type == DCB_CONNECTOR_HDMI_1) 5880 if (cte->type == DCB_CONNECTOR_HDMI_1)
5905 cte->type = DCB_CONNECTOR_DVI_I; 5881 cte->type = DCB_CONNECTOR_DVI_I;
5906 } 5882 }
@@ -6153,7 +6129,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
6153 entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4; 6129 entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
6154 6130
6155 break; 6131 break;
6156 case 0xe: 6132 case OUTPUT_EOL:
6157 /* weird g80 mobile type that "nv" treats as a terminator */ 6133 /* weird g80 mobile type that "nv" treats as a terminator */
6158 dcb->entries--; 6134 dcb->entries--;
6159 return false; 6135 return false;
@@ -6190,22 +6166,14 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
6190 entry->type = OUTPUT_TV; 6166 entry->type = OUTPUT_TV;
6191 break; 6167 break;
6192 case 2: 6168 case 2:
6193 case 3:
6194 entry->type = OUTPUT_LVDS;
6195 break;
6196 case 4: 6169 case 4:
6197 switch ((conn & 0x000000f0) >> 4) { 6170 if (conn & 0x10)
6198 case 0:
6199 entry->type = OUTPUT_TMDS;
6200 break;
6201 case 1:
6202 entry->type = OUTPUT_LVDS; 6171 entry->type = OUTPUT_LVDS;
6203 break; 6172 else
6204 default: 6173 entry->type = OUTPUT_TMDS;
6205 NV_ERROR(dev, "Unknown DCB subtype 4/%d\n", 6174 break;
6206 (conn & 0x000000f0) >> 4); 6175 case 3:
6207 return false; 6176 entry->type = OUTPUT_LVDS;
6208 }
6209 break; 6177 break;
6210 default: 6178 default:
6211 NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f); 6179 NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
@@ -6321,9 +6289,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
6321 * nasty problems until this is sorted (assuming it's not a 6289 * nasty problems until this is sorted (assuming it's not a
6322 * VBIOS bug). 6290 * VBIOS bug).
6323 */ 6291 */
6324 if ((dev->pdev->device == 0x040d) && 6292 if (nv_match_device(dev, 0x040d, 0x1028, 0x019b)) {
6325 (dev->pdev->subsystem_vendor == 0x1028) &&
6326 (dev->pdev->subsystem_device == 0x019b)) {
6327 if (*conn == 0x02026312 && *conf == 0x00000020) 6293 if (*conn == 0x02026312 && *conf == 0x00000020)
6328 return false; 6294 return false;
6329 } 6295 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index fd14dfd3d780..c1de2f3fcb0e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -95,6 +95,7 @@ enum dcb_type {
95 OUTPUT_TMDS = 2, 95 OUTPUT_TMDS = 2,
96 OUTPUT_LVDS = 3, 96 OUTPUT_LVDS = 3,
97 OUTPUT_DP = 6, 97 OUTPUT_DP = 6,
98 OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
98 OUTPUT_ANY = -1 99 OUTPUT_ANY = -1
99}; 100};
100 101
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1e093a069b7b..b1be617373b6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1389,6 +1389,15 @@ nv_two_reg_pll(struct drm_device *dev)
1389 return false; 1389 return false;
1390} 1390}
1391 1391
1392static inline bool
1393nv_match_device(struct drm_device *dev, unsigned device,
1394 unsigned sub_vendor, unsigned sub_device)
1395{
1396 return dev->pdev->device == device &&
1397 dev->pdev->subsystem_vendor == sub_vendor &&
1398 dev->pdev->subsystem_device == sub_device;
1399}
1400
1392#define NV_SW 0x0000506e 1401#define NV_SW 0x0000506e
1393#define NV_SW_DMA_SEMAPHORE 0x00000060 1402#define NV_SW_DMA_SEMAPHORE 0x00000060
1394#define NV_SW_SEMAPHORE_OFFSET 0x00000064 1403#define NV_SW_SEMAPHORE_OFFSET 0x00000064
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 6b208ffafa8d..87ac21ec23d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -64,16 +64,17 @@ nouveau_fence_update(struct nouveau_channel *chan)
64 struct nouveau_fence *fence; 64 struct nouveau_fence *fence;
65 uint32_t sequence; 65 uint32_t sequence;
66 66
67 spin_lock(&chan->fence.lock);
68
67 if (USE_REFCNT) 69 if (USE_REFCNT)
68 sequence = nvchan_rd32(chan, 0x48); 70 sequence = nvchan_rd32(chan, 0x48);
69 else 71 else
70 sequence = atomic_read(&chan->fence.last_sequence_irq); 72 sequence = atomic_read(&chan->fence.last_sequence_irq);
71 73
72 if (chan->fence.sequence_ack == sequence) 74 if (chan->fence.sequence_ack == sequence)
73 return; 75 goto out;
74 chan->fence.sequence_ack = sequence; 76 chan->fence.sequence_ack = sequence;
75 77
76 spin_lock(&chan->fence.lock);
77 list_for_each_safe(entry, tmp, &chan->fence.pending) { 78 list_for_each_safe(entry, tmp, &chan->fence.pending) {
78 fence = list_entry(entry, struct nouveau_fence, entry); 79 fence = list_entry(entry, struct nouveau_fence, entry);
79 80
@@ -85,6 +86,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
85 if (sequence == chan->fence.sequence_ack) 86 if (sequence == chan->fence.sequence_ack)
86 break; 87 break;
87 } 88 }
89out:
88 spin_unlock(&chan->fence.lock); 90 spin_unlock(&chan->fence.lock);
89} 91}
90 92
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 79fc5ffff226..ead7b8fc53fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -245,7 +245,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
245 list_del(&nvbo->entry); 245 list_del(&nvbo->entry);
246 nvbo->reserved_by = NULL; 246 nvbo->reserved_by = NULL;
247 ttm_bo_unreserve(&nvbo->bo); 247 ttm_bo_unreserve(&nvbo->bo);
248 drm_gem_object_unreference(nvbo->gem); 248 drm_gem_object_unreference_unlocked(nvbo->gem);
249 } 249 }
250} 250}
251 251
@@ -300,7 +300,7 @@ retry:
300 validate_fini(op, NULL); 300 validate_fini(op, NULL);
301 if (ret == -EAGAIN) 301 if (ret == -EAGAIN)
302 ret = ttm_bo_wait_unreserved(&nvbo->bo, false); 302 ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
303 drm_gem_object_unreference(gem); 303 drm_gem_object_unreference_unlocked(gem);
304 if (ret) { 304 if (ret) {
305 NV_ERROR(dev, "fail reserve\n"); 305 NV_ERROR(dev, "fail reserve\n");
306 return ret; 306 return ret;
@@ -337,7 +337,9 @@ retry:
337 return -EINVAL; 337 return -EINVAL;
338 } 338 }
339 339
340 mutex_unlock(&drm_global_mutex);
340 ret = ttm_bo_wait_cpu(&nvbo->bo, false); 341 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
342 mutex_lock(&drm_global_mutex);
341 if (ret) { 343 if (ret) {
342 NV_ERROR(dev, "fail wait_cpu\n"); 344 NV_ERROR(dev, "fail wait_cpu\n");
343 return ret; 345 return ret;
@@ -614,8 +616,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
614 return PTR_ERR(bo); 616 return PTR_ERR(bo);
615 } 617 }
616 618
617 mutex_lock(&dev->struct_mutex);
618
619 /* Mark push buffers as being used on PFIFO, the validation code 619 /* Mark push buffers as being used on PFIFO, the validation code
620 * will then make sure that if the pushbuf bo moves, that they 620 * will then make sure that if the pushbuf bo moves, that they
621 * happen on the kernel channel, which will in turn cause a sync 621 * happen on the kernel channel, which will in turn cause a sync
@@ -663,7 +663,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
663 push[i].length); 663 push[i].length);
664 } 664 }
665 } else 665 } else
666 if (dev_priv->card_type >= NV_20) { 666 if (dev_priv->chipset >= 0x25) {
667 ret = RING_SPACE(chan, req->nr_push * 2); 667 ret = RING_SPACE(chan, req->nr_push * 2);
668 if (ret) { 668 if (ret) {
669 NV_ERROR(dev, "cal_space: %d\n", ret); 669 NV_ERROR(dev, "cal_space: %d\n", ret);
@@ -729,7 +729,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
729out: 729out:
730 validate_fini(&op, fence); 730 validate_fini(&op, fence);
731 nouveau_fence_unref((void**)&fence); 731 nouveau_fence_unref((void**)&fence);
732 mutex_unlock(&dev->struct_mutex);
733 kfree(bo); 732 kfree(bo);
734 kfree(push); 733 kfree(push);
735 734
@@ -738,7 +737,7 @@ out_next:
738 req->suffix0 = 0x00000000; 737 req->suffix0 = 0x00000000;
739 req->suffix1 = 0x00000000; 738 req->suffix1 = 0x00000000;
740 } else 739 } else
741 if (dev_priv->card_type >= NV_20) { 740 if (dev_priv->chipset >= 0x25) {
742 req->suffix0 = 0x00020000; 741 req->suffix0 = 0x00020000;
743 req->suffix1 = 0x00000000; 742 req->suffix1 = 0x00000000;
744 } else { 743 } else {
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index a5dcf7685800..0d3206a7046c 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -444,6 +444,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
444 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 444 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
445 struct dcb_entry *dcbe = nv_encoder->dcb; 445 struct dcb_entry *dcbe = nv_encoder->dcb;
446 int head = nouveau_crtc(encoder->crtc)->index; 446 int head = nouveau_crtc(encoder->crtc)->index;
447 struct drm_encoder *slave_encoder;
447 448
448 if (dcbe->type == OUTPUT_TMDS) 449 if (dcbe->type == OUTPUT_TMDS)
449 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock); 450 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
@@ -462,9 +463,10 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
462 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); 463 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
463 464
464 /* Init external transmitters */ 465 /* Init external transmitters */
465 if (get_tmds_slave(encoder)) 466 slave_encoder = get_tmds_slave(encoder);
466 get_slave_funcs(get_tmds_slave(encoder))->mode_set( 467 if (slave_encoder)
467 encoder, &nv_encoder->mode, &nv_encoder->mode); 468 get_slave_funcs(slave_encoder)->mode_set(
469 slave_encoder, &nv_encoder->mode, &nv_encoder->mode);
468 470
469 helper->dpms(encoder, DRM_MODE_DPMS_ON); 471 helper->dpms(encoder, DRM_MODE_DPMS_ON);
470 472
@@ -473,6 +475,27 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
473 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 475 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
474} 476}
475 477
478static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
479{
480#ifdef __powerpc__
481 struct drm_device *dev = encoder->dev;
482
483 /* BIOS scripts usually take care of the backlight, thanks
484 * Apple for your consistency.
485 */
486 if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
487 dev->pci_device == 0x0329) {
488 if (mode == DRM_MODE_DPMS_ON) {
489 nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
490 nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 1);
491 } else {
492 nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
493 nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 0);
494 }
495 }
496#endif
497}
498
476static inline bool is_powersaving_dpms(int mode) 499static inline bool is_powersaving_dpms(int mode)
477{ 500{
478 return (mode != DRM_MODE_DPMS_ON); 501 return (mode != DRM_MODE_DPMS_ON);
@@ -520,6 +543,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
520 LVDS_PANEL_OFF, 0); 543 LVDS_PANEL_OFF, 0);
521 } 544 }
522 545
546 nv04_dfp_update_backlight(encoder, mode);
523 nv04_dfp_update_fp_control(encoder, mode); 547 nv04_dfp_update_fp_control(encoder, mode);
524 548
525 if (mode == DRM_MODE_DPMS_ON) 549 if (mode == DRM_MODE_DPMS_ON)
@@ -543,6 +567,7 @@ static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
543 NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n", 567 NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
544 mode, nv_encoder->dcb->index); 568 mode, nv_encoder->dcb->index);
545 569
570 nv04_dfp_update_backlight(encoder, mode);
546 nv04_dfp_update_fp_control(encoder, mode); 571 nv04_dfp_update_fp_control(encoder, mode);
547} 572}
548 573
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index eefa5c856932..13cdc05b7c2d 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -121,18 +121,14 @@ static bool
121get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) 121get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
122{ 122{
123 /* Zotac FX5200 */ 123 /* Zotac FX5200 */
124 if (dev->pdev->device == 0x0322 && 124 if (nv_match_device(dev, 0x0322, 0x19da, 0x1035) ||
125 dev->pdev->subsystem_vendor == 0x19da && 125 nv_match_device(dev, 0x0322, 0x19da, 0x2035)) {
126 (dev->pdev->subsystem_device == 0x1035 ||
127 dev->pdev->subsystem_device == 0x2035)) {
128 *pin_mask = 0xc; 126 *pin_mask = 0xc;
129 return false; 127 return false;
130 } 128 }
131 129
132 /* MSI nForce2 IGP */ 130 /* MSI nForce2 IGP */
133 if (dev->pdev->device == 0x01f0 && 131 if (nv_match_device(dev, 0x01f0, 0x1462, 0x5710)) {
134 dev->pdev->subsystem_vendor == 0x1462 &&
135 dev->pdev->subsystem_device == 0x5710) {
136 *pin_mask = 0xc; 132 *pin_mask = 0xc;
137 return false; 133 return false;
138 } 134 }
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index c95bf9b681dd..91ef93cf1f35 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -139,6 +139,8 @@ nv50_instmem_init(struct drm_device *dev)
139 chan->file_priv = (struct drm_file *)-2; 139 chan->file_priv = (struct drm_file *)-2;
140 dev_priv->fifos[0] = dev_priv->fifos[127] = chan; 140 dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
141 141
142 INIT_LIST_HEAD(&chan->ramht_refs);
143
142 /* Channel's PRAMIN object + heap */ 144 /* Channel's PRAMIN object + heap */
143 ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, 145 ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
144 NULL, &chan->ramin); 146 NULL, &chan->ramin);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 577239a24fd5..464a81a1990f 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -332,6 +332,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
332 args.usV_SyncWidth = 332 args.usV_SyncWidth =
333 cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); 333 cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
334 334
335 args.ucOverscanRight = radeon_crtc->h_border;
336 args.ucOverscanLeft = radeon_crtc->h_border;
337 args.ucOverscanBottom = radeon_crtc->v_border;
338 args.ucOverscanTop = radeon_crtc->v_border;
339
335 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 340 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
336 misc |= ATOM_VSYNC_POLARITY; 341 misc |= ATOM_VSYNC_POLARITY;
337 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 342 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -534,6 +539,20 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
534 pll->algo = PLL_ALGO_LEGACY; 539 pll->algo = PLL_ALGO_LEGACY;
535 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; 540 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
536 } 541 }
542 /* There is some evidence (often anecdotal) that RV515 LVDS
543 * (on some boards at least) prefers the legacy algo. I'm not
544 * sure whether this should handled generically or on a
545 * case-by-case quirk basis. Both algos should work fine in the
546 * majority of cases.
547 */
548 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
549 (rdev->family == CHIP_RV515)) {
550 /* allow the user to overrride just in case */
551 if (radeon_new_pll == 1)
552 pll->algo = PLL_ALGO_NEW;
553 else
554 pll->algo = PLL_ALGO_LEGACY;
555 }
537 } else { 556 } else {
538 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 557 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
539 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; 558 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -1056,11 +1075,11 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
1056 1075
1057 if (rdev->family >= CHIP_RV770) { 1076 if (rdev->family >= CHIP_RV770) {
1058 if (radeon_crtc->crtc_id) { 1077 if (radeon_crtc->crtc_id) {
1059 WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); 1078 WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1060 WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); 1079 WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1061 } else { 1080 } else {
1062 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); 1081 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1063 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); 1082 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1064 } 1083 }
1065 } 1084 }
1066 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 1085 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
@@ -1197,8 +1216,18 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
1197 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1216 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1198 struct drm_device *dev = crtc->dev; 1217 struct drm_device *dev = crtc->dev;
1199 struct radeon_device *rdev = dev->dev_private; 1218 struct radeon_device *rdev = dev->dev_private;
1219 struct drm_encoder *encoder;
1220 bool is_tvcv = false;
1200 1221
1201 /* TODO color tiling */ 1222 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1223 /* find tv std */
1224 if (encoder->crtc == crtc) {
1225 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1226 if (radeon_encoder->active_device &
1227 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1228 is_tvcv = true;
1229 }
1230 }
1202 1231
1203 atombios_disable_ss(crtc); 1232 atombios_disable_ss(crtc);
1204 /* always set DCPLL */ 1233 /* always set DCPLL */
@@ -1207,9 +1236,14 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
1207 atombios_crtc_set_pll(crtc, adjusted_mode); 1236 atombios_crtc_set_pll(crtc, adjusted_mode);
1208 atombios_enable_ss(crtc); 1237 atombios_enable_ss(crtc);
1209 1238
1210 if (ASIC_IS_AVIVO(rdev)) 1239 if (ASIC_IS_DCE4(rdev))
1211 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 1240 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
1212 else { 1241 else if (ASIC_IS_AVIVO(rdev)) {
1242 if (is_tvcv)
1243 atombios_crtc_set_timing(crtc, adjusted_mode);
1244 else
1245 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
1246 } else {
1213 atombios_crtc_set_timing(crtc, adjusted_mode); 1247 atombios_crtc_set_timing(crtc, adjusted_mode);
1214 if (radeon_crtc->crtc_id == 0) 1248 if (radeon_crtc->crtc_id == 0)
1215 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 1249 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 957d5067ad9c..b8b7f010b25f 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -675,6 +675,43 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
675 return 0; 675 return 0;
676} 676}
677 677
678static int evergreen_cp_start(struct radeon_device *rdev)
679{
680 int r;
681 uint32_t cp_me;
682
683 r = radeon_ring_lock(rdev, 7);
684 if (r) {
685 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
686 return r;
687 }
688 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
689 radeon_ring_write(rdev, 0x1);
690 radeon_ring_write(rdev, 0x0);
691 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
692 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
693 radeon_ring_write(rdev, 0);
694 radeon_ring_write(rdev, 0);
695 radeon_ring_unlock_commit(rdev);
696
697 cp_me = 0xff;
698 WREG32(CP_ME_CNTL, cp_me);
699
700 r = radeon_ring_lock(rdev, 4);
701 if (r) {
702 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
703 return r;
704 }
705 /* init some VGT regs */
706 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
707 radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2);
708 radeon_ring_write(rdev, 0xe);
709 radeon_ring_write(rdev, 0x10);
710 radeon_ring_unlock_commit(rdev);
711
712 return 0;
713}
714
678int evergreen_cp_resume(struct radeon_device *rdev) 715int evergreen_cp_resume(struct radeon_device *rdev)
679{ 716{
680 u32 tmp; 717 u32 tmp;
@@ -719,7 +756,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
719 rdev->cp.rptr = RREG32(CP_RB_RPTR); 756 rdev->cp.rptr = RREG32(CP_RB_RPTR);
720 rdev->cp.wptr = RREG32(CP_RB_WPTR); 757 rdev->cp.wptr = RREG32(CP_RB_WPTR);
721 758
722 r600_cp_start(rdev); 759 evergreen_cp_start(rdev);
723 rdev->cp.ready = true; 760 rdev->cp.ready = true;
724 r = radeon_ring_test(rdev); 761 r = radeon_ring_test(rdev);
725 if (r) { 762 if (r) {
@@ -2054,11 +2091,6 @@ int evergreen_resume(struct radeon_device *rdev)
2054 */ 2091 */
2055 /* post card */ 2092 /* post card */
2056 atom_asic_init(rdev->mode_info.atom_context); 2093 atom_asic_init(rdev->mode_info.atom_context);
2057 /* Initialize clocks */
2058 r = radeon_clocks_init(rdev);
2059 if (r) {
2060 return r;
2061 }
2062 2094
2063 r = evergreen_startup(rdev); 2095 r = evergreen_startup(rdev);
2064 if (r) { 2096 if (r) {
@@ -2164,9 +2196,6 @@ int evergreen_init(struct radeon_device *rdev)
2164 radeon_surface_init(rdev); 2196 radeon_surface_init(rdev);
2165 /* Initialize clocks */ 2197 /* Initialize clocks */
2166 radeon_get_clock_info(rdev->ddev); 2198 radeon_get_clock_info(rdev->ddev);
2167 r = radeon_clocks_init(rdev);
2168 if (r)
2169 return r;
2170 /* Fence driver */ 2199 /* Fence driver */
2171 r = radeon_fence_driver_init(rdev); 2200 r = radeon_fence_driver_init(rdev);
2172 if (r) 2201 if (r)
@@ -2236,7 +2265,6 @@ void evergreen_fini(struct radeon_device *rdev)
2236 evergreen_pcie_gart_fini(rdev); 2265 evergreen_pcie_gart_fini(rdev);
2237 radeon_gem_fini(rdev); 2266 radeon_gem_fini(rdev);
2238 radeon_fence_driver_fini(rdev); 2267 radeon_fence_driver_fini(rdev);
2239 radeon_clocks_fini(rdev);
2240 radeon_agp_fini(rdev); 2268 radeon_agp_fini(rdev);
2241 radeon_bo_fini(rdev); 2269 radeon_bo_fini(rdev);
2242 radeon_atombios_fini(rdev); 2270 radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index d0ebae9dde25..afc18d87fdca 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2119,10 +2119,7 @@ int r600_cp_start(struct radeon_device *rdev)
2119 } 2119 }
2120 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); 2120 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
2121 radeon_ring_write(rdev, 0x1); 2121 radeon_ring_write(rdev, 0x1);
2122 if (rdev->family >= CHIP_CEDAR) { 2122 if (rdev->family >= CHIP_RV770) {
2123 radeon_ring_write(rdev, 0x0);
2124 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
2125 } else if (rdev->family >= CHIP_RV770) {
2126 radeon_ring_write(rdev, 0x0); 2123 radeon_ring_write(rdev, 0x0);
2127 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); 2124 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
2128 } else { 2125 } else {
@@ -2489,11 +2486,6 @@ int r600_resume(struct radeon_device *rdev)
2489 */ 2486 */
2490 /* post card */ 2487 /* post card */
2491 atom_asic_init(rdev->mode_info.atom_context); 2488 atom_asic_init(rdev->mode_info.atom_context);
2492 /* Initialize clocks */
2493 r = radeon_clocks_init(rdev);
2494 if (r) {
2495 return r;
2496 }
2497 2489
2498 r = r600_startup(rdev); 2490 r = r600_startup(rdev);
2499 if (r) { 2491 if (r) {
@@ -2586,9 +2578,6 @@ int r600_init(struct radeon_device *rdev)
2586 radeon_surface_init(rdev); 2578 radeon_surface_init(rdev);
2587 /* Initialize clocks */ 2579 /* Initialize clocks */
2588 radeon_get_clock_info(rdev->ddev); 2580 radeon_get_clock_info(rdev->ddev);
2589 r = radeon_clocks_init(rdev);
2590 if (r)
2591 return r;
2592 /* Fence driver */ 2581 /* Fence driver */
2593 r = radeon_fence_driver_init(rdev); 2582 r = radeon_fence_driver_init(rdev);
2594 if (r) 2583 if (r)
@@ -2663,7 +2652,6 @@ void r600_fini(struct radeon_device *rdev)
2663 radeon_agp_fini(rdev); 2652 radeon_agp_fini(rdev);
2664 radeon_gem_fini(rdev); 2653 radeon_gem_fini(rdev);
2665 radeon_fence_driver_fini(rdev); 2654 radeon_fence_driver_fini(rdev);
2666 radeon_clocks_fini(rdev);
2667 radeon_bo_fini(rdev); 2655 radeon_bo_fini(rdev);
2668 radeon_atombios_fini(rdev); 2656 radeon_atombios_fini(rdev);
2669 kfree(rdev->bios); 2657 kfree(rdev->bios);
@@ -3541,7 +3529,7 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3541 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL 3529 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
3542 */ 3530 */
3543 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { 3531 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
3544 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; 3532 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3545 u32 tmp; 3533 u32 tmp;
3546 3534
3547 WREG32(HDP_DEBUG1, 0); 3535 WREG32(HDP_DEBUG1, 0);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3dfcfa3ca425..a168d644bf9e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1013,6 +1013,11 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
1013int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 1013int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
1014 struct drm_file *filp); 1014 struct drm_file *filp);
1015 1015
1016/* VRAM scratch page for HDP bug */
1017struct r700_vram_scratch {
1018 struct radeon_bo *robj;
1019 volatile uint32_t *ptr;
1020};
1016 1021
1017/* 1022/*
1018 * Core structure, functions and helpers. 1023 * Core structure, functions and helpers.
@@ -1079,6 +1084,7 @@ struct radeon_device {
1079 const struct firmware *pfp_fw; /* r6/700 PFP firmware */ 1084 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
1080 const struct firmware *rlc_fw; /* r6/700 RLC firmware */ 1085 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
1081 struct r600_blit r600_blit; 1086 struct r600_blit r600_blit;
1087 struct r700_vram_scratch vram_scratch;
1082 int msi_enabled; /* msi enabled */ 1088 int msi_enabled; /* msi enabled */
1083 struct r600_ih ih; /* r6/700 interrupt ring */ 1089 struct r600_ih ih; /* r6/700 interrupt ring */
1084 struct workqueue_struct *wq; 1090 struct workqueue_struct *wq;
@@ -1333,8 +1339,6 @@ extern bool radeon_card_posted(struct radeon_device *rdev);
1333extern void radeon_update_bandwidth_info(struct radeon_device *rdev); 1339extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
1334extern void radeon_update_display_priority(struct radeon_device *rdev); 1340extern void radeon_update_display_priority(struct radeon_device *rdev);
1335extern bool radeon_boot_test_post_card(struct radeon_device *rdev); 1341extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
1336extern int radeon_clocks_init(struct radeon_device *rdev);
1337extern void radeon_clocks_fini(struct radeon_device *rdev);
1338extern void radeon_scratch_init(struct radeon_device *rdev); 1342extern void radeon_scratch_init(struct radeon_device *rdev);
1339extern void radeon_surface_init(struct radeon_device *rdev); 1343extern void radeon_surface_init(struct radeon_device *rdev);
1340extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); 1344extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index a21bf88e8c2d..25e1dd197791 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -858,21 +858,3 @@ int radeon_asic_init(struct radeon_device *rdev)
858 return 0; 858 return 0;
859} 859}
860 860
861/*
862 * Wrapper around modesetting bits. Move to radeon_clocks.c?
863 */
864int radeon_clocks_init(struct radeon_device *rdev)
865{
866 int r;
867
868 r = radeon_static_clocks_init(rdev->ddev);
869 if (r) {
870 return r;
871 }
872 DRM_INFO("Clocks initialized !\n");
873 return 0;
874}
875
876void radeon_clocks_fini(struct radeon_device *rdev)
877{
878}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 61141981880d..ebae14c4b768 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -85,6 +85,19 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
85 for (i = 0; i < num_indices; i++) { 85 for (i = 0; i < num_indices; i++) {
86 gpio = &i2c_info->asGPIO_Info[i]; 86 gpio = &i2c_info->asGPIO_Info[i];
87 87
88 /* some evergreen boards have bad data for this entry */
89 if (ASIC_IS_DCE4(rdev)) {
90 if ((i == 7) &&
91 (gpio->usClkMaskRegisterIndex == 0x1936) &&
92 (gpio->sucI2cId.ucAccess == 0)) {
93 gpio->sucI2cId.ucAccess = 0x97;
94 gpio->ucDataMaskShift = 8;
95 gpio->ucDataEnShift = 8;
96 gpio->ucDataY_Shift = 8;
97 gpio->ucDataA_Shift = 8;
98 }
99 }
100
88 if (gpio->sucI2cId.ucAccess == id) { 101 if (gpio->sucI2cId.ucAccess == id) {
89 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 102 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
90 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 103 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
@@ -147,6 +160,20 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
147 for (i = 0; i < num_indices; i++) { 160 for (i = 0; i < num_indices; i++) {
148 gpio = &i2c_info->asGPIO_Info[i]; 161 gpio = &i2c_info->asGPIO_Info[i];
149 i2c.valid = false; 162 i2c.valid = false;
163
164 /* some evergreen boards have bad data for this entry */
165 if (ASIC_IS_DCE4(rdev)) {
166 if ((i == 7) &&
167 (gpio->usClkMaskRegisterIndex == 0x1936) &&
168 (gpio->sucI2cId.ucAccess == 0)) {
169 gpio->sucI2cId.ucAccess = 0x97;
170 gpio->ucDataMaskShift = 8;
171 gpio->ucDataEnShift = 8;
172 gpio->ucDataY_Shift = 8;
173 gpio->ucDataA_Shift = 8;
174 }
175 }
176
150 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 177 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
151 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 178 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
152 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; 179 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 14448a740ba6..5249af8931e6 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -327,6 +327,14 @@ void radeon_get_clock_info(struct drm_device *dev)
327 mpll->max_feedback_div = 0xff; 327 mpll->max_feedback_div = 0xff;
328 mpll->best_vco = 0; 328 mpll->best_vco = 0;
329 329
330 if (!rdev->clock.default_sclk)
331 rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
332 if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock)
333 rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
334
335 rdev->pm.current_sclk = rdev->clock.default_sclk;
336 rdev->pm.current_mclk = rdev->clock.default_mclk;
337
330} 338}
331 339
332/* 10 khz */ 340/* 10 khz */
@@ -897,53 +905,3 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
897 } 905 }
898} 906}
899 907
900static void radeon_apply_clock_quirks(struct radeon_device *rdev)
901{
902 uint32_t tmp;
903
904 /* XXX make sure engine is idle */
905
906 if (rdev->family < CHIP_RS600) {
907 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
908 if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev))
909 tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP;
910 if ((rdev->family == CHIP_RV250)
911 || (rdev->family == CHIP_RV280))
912 tmp |=
913 RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2;
914 if ((rdev->family == CHIP_RV350)
915 || (rdev->family == CHIP_RV380))
916 tmp |= R300_SCLK_FORCE_VAP;
917 if (rdev->family == CHIP_R420)
918 tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX;
919 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
920 } else if (rdev->family < CHIP_R600) {
921 tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL);
922 tmp |= AVIVO_CP_FORCEON;
923 WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp);
924
925 tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL);
926 tmp |= AVIVO_E2_FORCEON;
927 WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp);
928
929 tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL);
930 tmp |= AVIVO_IDCT_FORCEON;
931 WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp);
932 }
933}
934
935int radeon_static_clocks_init(struct drm_device *dev)
936{
937 struct radeon_device *rdev = dev->dev_private;
938
939 /* XXX make sure engine is idle */
940
941 if (radeon_dynclks != -1) {
942 if (radeon_dynclks) {
943 if (rdev->asic->set_clock_gating)
944 radeon_set_clock_gating(rdev, 1);
945 }
946 }
947 radeon_apply_clock_quirks(rdev);
948 return 0;
949}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 31a09cd279ab..a9dd7847d96e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -999,6 +999,7 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
999 } 999 }
1000 } 1000 }
1001 1001
1002 radeon_connector_update_scratch_regs(connector, ret);
1002 return ret; 1003 return ret;
1003} 1004}
1004 1005
@@ -1050,10 +1051,16 @@ radeon_add_atom_connector(struct drm_device *dev,
1050 uint32_t subpixel_order = SubPixelNone; 1051 uint32_t subpixel_order = SubPixelNone;
1051 bool shared_ddc = false; 1052 bool shared_ddc = false;
1052 1053
1053 /* fixme - tv/cv/din */
1054 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 1054 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
1055 return; 1055 return;
1056 1056
1057 /* if the user selected tv=0 don't try and add the connector */
1058 if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
1059 (connector_type == DRM_MODE_CONNECTOR_Composite) ||
1060 (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
1061 (radeon_tv == 0))
1062 return;
1063
1057 /* see if we already added it */ 1064 /* see if we already added it */
1058 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1065 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1059 radeon_connector = to_radeon_connector(connector); 1066 radeon_connector = to_radeon_connector(connector);
@@ -1208,19 +1215,17 @@ radeon_add_atom_connector(struct drm_device *dev,
1208 case DRM_MODE_CONNECTOR_SVIDEO: 1215 case DRM_MODE_CONNECTOR_SVIDEO:
1209 case DRM_MODE_CONNECTOR_Composite: 1216 case DRM_MODE_CONNECTOR_Composite:
1210 case DRM_MODE_CONNECTOR_9PinDIN: 1217 case DRM_MODE_CONNECTOR_9PinDIN:
1211 if (radeon_tv == 1) { 1218 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1212 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1219 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1213 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1220 radeon_connector->dac_load_detect = true;
1214 radeon_connector->dac_load_detect = true; 1221 drm_connector_attach_property(&radeon_connector->base,
1215 drm_connector_attach_property(&radeon_connector->base, 1222 rdev->mode_info.load_detect_property,
1216 rdev->mode_info.load_detect_property, 1223 1);
1217 1); 1224 drm_connector_attach_property(&radeon_connector->base,
1218 drm_connector_attach_property(&radeon_connector->base, 1225 rdev->mode_info.tv_std_property,
1219 rdev->mode_info.tv_std_property, 1226 radeon_atombios_get_tv_info(rdev));
1220 radeon_atombios_get_tv_info(rdev)); 1227 /* no HPD on analog connectors */
1221 /* no HPD on analog connectors */ 1228 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1222 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1223 }
1224 break; 1229 break;
1225 case DRM_MODE_CONNECTOR_LVDS: 1230 case DRM_MODE_CONNECTOR_LVDS:
1226 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1231 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
@@ -1271,10 +1276,16 @@ radeon_add_legacy_connector(struct drm_device *dev,
1271 struct radeon_connector *radeon_connector; 1276 struct radeon_connector *radeon_connector;
1272 uint32_t subpixel_order = SubPixelNone; 1277 uint32_t subpixel_order = SubPixelNone;
1273 1278
1274 /* fixme - tv/cv/din */
1275 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 1279 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
1276 return; 1280 return;
1277 1281
1282 /* if the user selected tv=0 don't try and add the connector */
1283 if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
1284 (connector_type == DRM_MODE_CONNECTOR_Composite) ||
1285 (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
1286 (radeon_tv == 0))
1287 return;
1288
1278 /* see if we already added it */ 1289 /* see if we already added it */
1279 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1290 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1280 radeon_connector = to_radeon_connector(connector); 1291 radeon_connector = to_radeon_connector(connector);
@@ -1346,26 +1357,24 @@ radeon_add_legacy_connector(struct drm_device *dev,
1346 case DRM_MODE_CONNECTOR_SVIDEO: 1357 case DRM_MODE_CONNECTOR_SVIDEO:
1347 case DRM_MODE_CONNECTOR_Composite: 1358 case DRM_MODE_CONNECTOR_Composite:
1348 case DRM_MODE_CONNECTOR_9PinDIN: 1359 case DRM_MODE_CONNECTOR_9PinDIN:
1349 if (radeon_tv == 1) { 1360 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1350 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1361 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1351 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1362 radeon_connector->dac_load_detect = true;
1352 radeon_connector->dac_load_detect = true; 1363 /* RS400,RC410,RS480 chipset seems to report a lot
1353 /* RS400,RC410,RS480 chipset seems to report a lot 1364 * of false positive on load detect, we haven't yet
1354 * of false positive on load detect, we haven't yet 1365 * found a way to make load detect reliable on those
1355 * found a way to make load detect reliable on those 1366 * chipset, thus just disable it for TV.
1356 * chipset, thus just disable it for TV. 1367 */
1357 */ 1368 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
1358 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) 1369 radeon_connector->dac_load_detect = false;
1359 radeon_connector->dac_load_detect = false; 1370 drm_connector_attach_property(&radeon_connector->base,
1360 drm_connector_attach_property(&radeon_connector->base, 1371 rdev->mode_info.load_detect_property,
1361 rdev->mode_info.load_detect_property, 1372 radeon_connector->dac_load_detect);
1362 radeon_connector->dac_load_detect); 1373 drm_connector_attach_property(&radeon_connector->base,
1363 drm_connector_attach_property(&radeon_connector->base, 1374 rdev->mode_info.tv_std_property,
1364 rdev->mode_info.tv_std_property, 1375 radeon_combios_get_tv_info(rdev));
1365 radeon_combios_get_tv_info(rdev)); 1376 /* no HPD on analog connectors */
1366 /* no HPD on analog connectors */ 1377 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1367 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1368 }
1369 break; 1378 break;
1370 case DRM_MODE_CONNECTOR_LVDS: 1379 case DRM_MODE_CONNECTOR_LVDS:
1371 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 1380 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 69b3c2291e92..256d204a6d24 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -293,30 +293,20 @@ bool radeon_card_posted(struct radeon_device *rdev)
293void radeon_update_bandwidth_info(struct radeon_device *rdev) 293void radeon_update_bandwidth_info(struct radeon_device *rdev)
294{ 294{
295 fixed20_12 a; 295 fixed20_12 a;
296 u32 sclk, mclk; 296 u32 sclk = rdev->pm.current_sclk;
297 u32 mclk = rdev->pm.current_mclk;
297 298
298 if (rdev->flags & RADEON_IS_IGP) { 299 /* sclk/mclk in Mhz */
299 sclk = radeon_get_engine_clock(rdev); 300 a.full = dfixed_const(100);
300 mclk = rdev->clock.default_mclk; 301 rdev->pm.sclk.full = dfixed_const(sclk);
301 302 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
302 a.full = dfixed_const(100); 303 rdev->pm.mclk.full = dfixed_const(mclk);
303 rdev->pm.sclk.full = dfixed_const(sclk); 304 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
304 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
305 rdev->pm.mclk.full = dfixed_const(mclk);
306 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
307 305
306 if (rdev->flags & RADEON_IS_IGP) {
308 a.full = dfixed_const(16); 307 a.full = dfixed_const(16);
309 /* core_bandwidth = sclk(Mhz) * 16 */ 308 /* core_bandwidth = sclk(Mhz) * 16 */
310 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); 309 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
311 } else {
312 sclk = radeon_get_engine_clock(rdev);
313 mclk = radeon_get_memory_clock(rdev);
314
315 a.full = dfixed_const(100);
316 rdev->pm.sclk.full = dfixed_const(sclk);
317 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
318 rdev->pm.mclk.full = dfixed_const(mclk);
319 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
320 } 310 }
321} 311}
322 312
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 0416804d8f30..6a13ee38a5b9 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -213,7 +213,7 @@ static void post_xfer(struct i2c_adapter *i2c_adap)
213 213
214static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) 214static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
215{ 215{
216 u32 sclk = radeon_get_engine_clock(rdev); 216 u32 sclk = rdev->pm.current_sclk;
217 u32 prescale = 0; 217 u32 prescale = 0;
218 u32 nm; 218 u32 nm;
219 u8 n, m, loop; 219 u8 n, m, loop;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 8f93e2b4b0c8..efbe975312dc 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -600,7 +600,6 @@ extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct d
600void radeon_enc_destroy(struct drm_encoder *encoder); 600void radeon_enc_destroy(struct drm_encoder *encoder);
601void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); 601void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
602void radeon_combios_asic_init(struct drm_device *dev); 602void radeon_combios_asic_init(struct drm_device *dev);
603extern int radeon_static_clocks_init(struct drm_device *dev);
604bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, 603bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
605 struct drm_display_mode *mode, 604 struct drm_display_mode *mode,
606 struct drm_display_mode *adjusted_mode); 605 struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 477ba673e1b4..f87efec76236 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -637,8 +637,6 @@ void radeon_pm_fini(struct radeon_device *rdev)
637 } 637 }
638 638
639 radeon_hwmon_fini(rdev); 639 radeon_hwmon_fini(rdev);
640 if (rdev->pm.i2c_bus)
641 radeon_i2c_destroy(rdev->pm.i2c_bus);
642} 640}
643 641
644void radeon_pm_compute_clocks(struct radeon_device *rdev) 642void radeon_pm_compute_clocks(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index f1c796810117..bfa59db374d2 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -905,6 +905,54 @@ static void rv770_gpu_init(struct radeon_device *rdev)
905 905
906} 906}
907 907
908static int rv770_vram_scratch_init(struct radeon_device *rdev)
909{
910 int r;
911 u64 gpu_addr;
912
913 if (rdev->vram_scratch.robj == NULL) {
914 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
915 true, RADEON_GEM_DOMAIN_VRAM,
916 &rdev->vram_scratch.robj);
917 if (r) {
918 return r;
919 }
920 }
921
922 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
923 if (unlikely(r != 0))
924 return r;
925 r = radeon_bo_pin(rdev->vram_scratch.robj,
926 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
927 if (r) {
928 radeon_bo_unreserve(rdev->vram_scratch.robj);
929 return r;
930 }
931 r = radeon_bo_kmap(rdev->vram_scratch.robj,
932 (void **)&rdev->vram_scratch.ptr);
933 if (r)
934 radeon_bo_unpin(rdev->vram_scratch.robj);
935 radeon_bo_unreserve(rdev->vram_scratch.robj);
936
937 return r;
938}
939
940static void rv770_vram_scratch_fini(struct radeon_device *rdev)
941{
942 int r;
943
944 if (rdev->vram_scratch.robj == NULL) {
945 return;
946 }
947 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
948 if (likely(r == 0)) {
949 radeon_bo_kunmap(rdev->vram_scratch.robj);
950 radeon_bo_unpin(rdev->vram_scratch.robj);
951 radeon_bo_unreserve(rdev->vram_scratch.robj);
952 }
953 radeon_bo_unref(&rdev->vram_scratch.robj);
954}
955
908int rv770_mc_init(struct radeon_device *rdev) 956int rv770_mc_init(struct radeon_device *rdev)
909{ 957{
910 u32 tmp; 958 u32 tmp;
@@ -970,6 +1018,9 @@ static int rv770_startup(struct radeon_device *rdev)
970 if (r) 1018 if (r)
971 return r; 1019 return r;
972 } 1020 }
1021 r = rv770_vram_scratch_init(rdev);
1022 if (r)
1023 return r;
973 rv770_gpu_init(rdev); 1024 rv770_gpu_init(rdev);
974 r = r600_blit_init(rdev); 1025 r = r600_blit_init(rdev);
975 if (r) { 1026 if (r) {
@@ -1023,11 +1074,6 @@ int rv770_resume(struct radeon_device *rdev)
1023 */ 1074 */
1024 /* post card */ 1075 /* post card */
1025 atom_asic_init(rdev->mode_info.atom_context); 1076 atom_asic_init(rdev->mode_info.atom_context);
1026 /* Initialize clocks */
1027 r = radeon_clocks_init(rdev);
1028 if (r) {
1029 return r;
1030 }
1031 1077
1032 r = rv770_startup(rdev); 1078 r = rv770_startup(rdev);
1033 if (r) { 1079 if (r) {
@@ -1118,9 +1164,6 @@ int rv770_init(struct radeon_device *rdev)
1118 radeon_surface_init(rdev); 1164 radeon_surface_init(rdev);
1119 /* Initialize clocks */ 1165 /* Initialize clocks */
1120 radeon_get_clock_info(rdev->ddev); 1166 radeon_get_clock_info(rdev->ddev);
1121 r = radeon_clocks_init(rdev);
1122 if (r)
1123 return r;
1124 /* Fence driver */ 1167 /* Fence driver */
1125 r = radeon_fence_driver_init(rdev); 1168 r = radeon_fence_driver_init(rdev);
1126 if (r) 1169 if (r)
@@ -1195,9 +1238,9 @@ void rv770_fini(struct radeon_device *rdev)
1195 r600_irq_fini(rdev); 1238 r600_irq_fini(rdev);
1196 radeon_irq_kms_fini(rdev); 1239 radeon_irq_kms_fini(rdev);
1197 rv770_pcie_gart_fini(rdev); 1240 rv770_pcie_gart_fini(rdev);
1241 rv770_vram_scratch_fini(rdev);
1198 radeon_gem_fini(rdev); 1242 radeon_gem_fini(rdev);
1199 radeon_fence_driver_fini(rdev); 1243 radeon_fence_driver_fini(rdev);
1200 radeon_clocks_fini(rdev);
1201 radeon_agp_fini(rdev); 1244 radeon_agp_fini(rdev);
1202 radeon_bo_fini(rdev); 1245 radeon_bo_fini(rdev);
1203 radeon_atombios_fini(rdev); 1246 radeon_atombios_fini(rdev);