aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-07-18 21:56:14 -0400
committerDave Airlie <airlied@redhat.com>2013-07-18 22:12:21 -0400
commite13af9a8340685cfe25d0c9f708da7121e0f51dd (patch)
tree43511021ce5e60c03ee4c2c68c3dc9ffcdc3d399
parentee114b97e67b2a572f94982567a21ac4ee17c133 (diff)
parent50b44a449ff1a19712ebc36ffccf9ac0a68033bf (diff)
Merge tag 'drm-intel-next-2013-07-12' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Highlights: - follow-up refactoring after the shared dpll rework that landed in 3.11 - oddball prep cleanups from Ben for ppgtt - encoder->get_config state tracking infrastructure from Jesse - used by the experimental fastboot support from Jesse (disabled by default) - make the error state file official and add it to our sysfs interface (Mika) - drm_mm prep changes from Ben, prepares to embedd the drm_mm_node (which will be used by the vma rework later on) - interrupt handling rework, follow up cleanups to the VECS enabling, hpd storm handling and fifo underrun reporting. - Big pile of smaller cleanups, code improvements and related stuff. * tag 'drm-intel-next-2013-07-12' of git://people.freedesktop.org/~danvet/drm-intel: (72 commits) drm/i915: clear DPLL reg when disabling i9xx dplls drm/i915: Fix up cpt pixel multiplier enable sequence drm/i915: clean up vlv ->pre_pll_enable and pll enable sequence drm/i915: move error state to own compilation unit drm/i915: Don't attempt to read an unitialized stack value drm/i915: Use for_each_pipe() when possible drm/i915: don't enable PM_VEBOX_CS_ERROR_INTERRUPT drm/i915: unify ring irq refcounts (again) drm/i915: kill dev_priv->rps.lock drm/i915: queue work outside spinlock in hsw_pm_irq_handler drm/i915: streamline hsw_pm_irq_handler drm/i915: irq handlers don't need interrupt-safe spinlocks drm/i915: kill lpt pch transcoder->crtc mapping code for fifo underruns drm/i915: improve GEN7_ERR_INT clearing for fifo underrun reporting drm/i915: improve SERR_INT clearing for fifo underrun reporting drm/i915: extract ibx_display_interrupt_update drm/i915: remove unused members from drm_i915_private drm/i915: don't frob mm.suspended when not using ums drm/i915: Fix VLV DP RBR/HDMI/DAC PLL LPF coefficients drm/i915: WARN if the bios reserved range is bigger than stolen size ... Conflicts: drivers/gpu/drm/i915/i915_gem.c
-rw-r--r--drivers/gpu/drm/drm_mm.c31
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c658
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c22
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c33
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h184
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c162
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c21
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c106
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c76
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c971
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c742
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h6
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c71
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h8
-rw-r--r--drivers/gpu/drm/i915/intel_display.c621
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c7
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h24
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c3
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c21
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c18
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c110
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c43
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h5
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c27
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c8
-rw-r--r--include/drm/drm_mm.h6
31 files changed, 2182 insertions, 1833 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 543b9b3171d3..fe304f903b13 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -147,33 +147,27 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
147 } 147 }
148} 148}
149 149
150struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm, 150int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
151 unsigned long start,
152 unsigned long size,
153 bool atomic)
154{ 151{
155 struct drm_mm_node *hole, *node; 152 struct drm_mm_node *hole;
156 unsigned long end = start + size; 153 unsigned long end = node->start + node->size;
157 unsigned long hole_start; 154 unsigned long hole_start;
158 unsigned long hole_end; 155 unsigned long hole_end;
159 156
157 BUG_ON(node == NULL);
158
159 /* Find the relevant hole to add our node to */
160 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { 160 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
161 if (hole_start > start || hole_end < end) 161 if (hole_start > node->start || hole_end < end)
162 continue; 162 continue;
163 163
164 node = drm_mm_kmalloc(mm, atomic);
165 if (unlikely(node == NULL))
166 return NULL;
167
168 node->start = start;
169 node->size = size;
170 node->mm = mm; 164 node->mm = mm;
171 node->allocated = 1; 165 node->allocated = 1;
172 166
173 INIT_LIST_HEAD(&node->hole_stack); 167 INIT_LIST_HEAD(&node->hole_stack);
174 list_add(&node->node_list, &hole->node_list); 168 list_add(&node->node_list, &hole->node_list);
175 169
176 if (start == hole_start) { 170 if (node->start == hole_start) {
177 hole->hole_follows = 0; 171 hole->hole_follows = 0;
178 list_del_init(&hole->hole_stack); 172 list_del_init(&hole->hole_stack);
179 } 173 }
@@ -184,13 +178,14 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
184 node->hole_follows = 1; 178 node->hole_follows = 1;
185 } 179 }
186 180
187 return node; 181 return 0;
188 } 182 }
189 183
190 WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size); 184 WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
191 return NULL; 185 node->start, node->size);
186 return -ENOSPC;
192} 187}
193EXPORT_SYMBOL(drm_mm_create_block); 188EXPORT_SYMBOL(drm_mm_reserve_node);
194 189
195struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, 190struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
196 unsigned long size, 191 unsigned long size,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 40034ecefd3b..9d1da7cceb21 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -5,6 +5,7 @@
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6i915-y := i915_drv.o i915_dma.o i915_irq.o \ 6i915-y := i915_drv.o i915_dma.o i915_irq.o \
7 i915_debugfs.o \ 7 i915_debugfs.o \
8 i915_gpu_error.o \
8 i915_suspend.o \ 9 i915_suspend.o \
9 i915_gem.o \ 10 i915_gem.o \
10 i915_gem_context.o \ 11 i915_gem_context.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 47d6c748057e..86379799dab8 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,7 +30,6 @@
30#include <linux/debugfs.h> 30#include <linux/debugfs.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/export.h> 32#include <linux/export.h>
33#include <generated/utsrelease.h>
34#include <drm/drmP.h> 33#include <drm/drmP.h>
35#include "intel_drv.h" 34#include "intel_drv.h"
36#include "intel_ringbuffer.h" 35#include "intel_ringbuffer.h"
@@ -90,16 +89,6 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
90 } 89 }
91} 90}
92 91
93static const char *cache_level_str(int type)
94{
95 switch (type) {
96 case I915_CACHE_NONE: return " uncached";
97 case I915_CACHE_LLC: return " snooped (LLC)";
98 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
99 default: return "";
100 }
101}
102
103static void 92static void
104describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 93describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
105{ 94{
@@ -113,7 +102,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
113 obj->last_read_seqno, 102 obj->last_read_seqno,
114 obj->last_write_seqno, 103 obj->last_write_seqno,
115 obj->last_fenced_seqno, 104 obj->last_fenced_seqno,
116 cache_level_str(obj->cache_level), 105 i915_cache_level_str(obj->cache_level),
117 obj->dirty ? " dirty" : "", 106 obj->dirty ? " dirty" : "",
118 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 107 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
119 if (obj->base.name) 108 if (obj->base.name)
@@ -122,9 +111,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
122 seq_printf(m, " (pinned x %d)", obj->pin_count); 111 seq_printf(m, " (pinned x %d)", obj->pin_count);
123 if (obj->fence_reg != I915_FENCE_REG_NONE) 112 if (obj->fence_reg != I915_FENCE_REG_NONE)
124 seq_printf(m, " (fence: %d)", obj->fence_reg); 113 seq_printf(m, " (fence: %d)", obj->fence_reg);
125 if (obj->gtt_space != NULL) 114 if (i915_gem_obj_ggtt_bound(obj))
126 seq_printf(m, " (gtt offset: %08x, size: %08x)", 115 seq_printf(m, " (gtt offset: %08lx, size: %08x)",
127 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 116 i915_gem_obj_ggtt_offset(obj), (unsigned int)i915_gem_obj_ggtt_size(obj));
128 if (obj->stolen) 117 if (obj->stolen)
129 seq_printf(m, " (stolen: %08lx)", obj->stolen->start); 118 seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
130 if (obj->pin_mappable || obj->fault_mappable) { 119 if (obj->pin_mappable || obj->fault_mappable) {
@@ -157,11 +146,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
157 146
158 switch (list) { 147 switch (list) {
159 case ACTIVE_LIST: 148 case ACTIVE_LIST:
160 seq_printf(m, "Active:\n"); 149 seq_puts(m, "Active:\n");
161 head = &dev_priv->mm.active_list; 150 head = &dev_priv->mm.active_list;
162 break; 151 break;
163 case INACTIVE_LIST: 152 case INACTIVE_LIST:
164 seq_printf(m, "Inactive:\n"); 153 seq_puts(m, "Inactive:\n");
165 head = &dev_priv->mm.inactive_list; 154 head = &dev_priv->mm.inactive_list;
166 break; 155 break;
167 default: 156 default:
@@ -171,11 +160,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
171 160
172 total_obj_size = total_gtt_size = count = 0; 161 total_obj_size = total_gtt_size = count = 0;
173 list_for_each_entry(obj, head, mm_list) { 162 list_for_each_entry(obj, head, mm_list) {
174 seq_printf(m, " "); 163 seq_puts(m, " ");
175 describe_obj(m, obj); 164 describe_obj(m, obj);
176 seq_printf(m, "\n"); 165 seq_putc(m, '\n');
177 total_obj_size += obj->base.size; 166 total_obj_size += obj->base.size;
178 total_gtt_size += obj->gtt_space->size; 167 total_gtt_size += i915_gem_obj_ggtt_size(obj);
179 count++; 168 count++;
180 } 169 }
181 mutex_unlock(&dev->struct_mutex); 170 mutex_unlock(&dev->struct_mutex);
@@ -187,10 +176,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
187 176
188#define count_objects(list, member) do { \ 177#define count_objects(list, member) do { \
189 list_for_each_entry(obj, list, member) { \ 178 list_for_each_entry(obj, list, member) { \
190 size += obj->gtt_space->size; \ 179 size += i915_gem_obj_ggtt_size(obj); \
191 ++count; \ 180 ++count; \
192 if (obj->map_and_fenceable) { \ 181 if (obj->map_and_fenceable) { \
193 mappable_size += obj->gtt_space->size; \ 182 mappable_size += i915_gem_obj_ggtt_size(obj); \
194 ++mappable_count; \ 183 ++mappable_count; \
195 } \ 184 } \
196 } \ 185 } \
@@ -209,7 +198,7 @@ static int per_file_stats(int id, void *ptr, void *data)
209 stats->count++; 198 stats->count++;
210 stats->total += obj->base.size; 199 stats->total += obj->base.size;
211 200
212 if (obj->gtt_space) { 201 if (i915_gem_obj_ggtt_bound(obj)) {
213 if (!list_empty(&obj->ring_list)) 202 if (!list_empty(&obj->ring_list))
214 stats->active += obj->base.size; 203 stats->active += obj->base.size;
215 else 204 else
@@ -222,7 +211,7 @@ static int per_file_stats(int id, void *ptr, void *data)
222 return 0; 211 return 0;
223} 212}
224 213
225static int i915_gem_object_info(struct seq_file *m, void* data) 214static int i915_gem_object_info(struct seq_file *m, void *data)
226{ 215{
227 struct drm_info_node *node = (struct drm_info_node *) m->private; 216 struct drm_info_node *node = (struct drm_info_node *) m->private;
228 struct drm_device *dev = node->minor->dev; 217 struct drm_device *dev = node->minor->dev;
@@ -267,11 +256,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
267 size = count = mappable_size = mappable_count = 0; 256 size = count = mappable_size = mappable_count = 0;
268 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 257 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
269 if (obj->fault_mappable) { 258 if (obj->fault_mappable) {
270 size += obj->gtt_space->size; 259 size += i915_gem_obj_ggtt_size(obj);
271 ++count; 260 ++count;
272 } 261 }
273 if (obj->pin_mappable) { 262 if (obj->pin_mappable) {
274 mappable_size += obj->gtt_space->size; 263 mappable_size += i915_gem_obj_ggtt_size(obj);
275 ++mappable_count; 264 ++mappable_count;
276 } 265 }
277 if (obj->madv == I915_MADV_DONTNEED) { 266 if (obj->madv == I915_MADV_DONTNEED) {
@@ -290,7 +279,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
290 dev_priv->gtt.total, 279 dev_priv->gtt.total,
291 dev_priv->gtt.mappable_end - dev_priv->gtt.start); 280 dev_priv->gtt.mappable_end - dev_priv->gtt.start);
292 281
293 seq_printf(m, "\n"); 282 seq_putc(m, '\n');
294 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 283 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
295 struct file_stats stats; 284 struct file_stats stats;
296 285
@@ -310,7 +299,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
310 return 0; 299 return 0;
311} 300}
312 301
313static int i915_gem_gtt_info(struct seq_file *m, void* data) 302static int i915_gem_gtt_info(struct seq_file *m, void *data)
314{ 303{
315 struct drm_info_node *node = (struct drm_info_node *) m->private; 304 struct drm_info_node *node = (struct drm_info_node *) m->private;
316 struct drm_device *dev = node->minor->dev; 305 struct drm_device *dev = node->minor->dev;
@@ -329,11 +318,11 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
329 if (list == PINNED_LIST && obj->pin_count == 0) 318 if (list == PINNED_LIST && obj->pin_count == 0)
330 continue; 319 continue;
331 320
332 seq_printf(m, " "); 321 seq_puts(m, " ");
333 describe_obj(m, obj); 322 describe_obj(m, obj);
334 seq_printf(m, "\n"); 323 seq_putc(m, '\n');
335 total_obj_size += obj->base.size; 324 total_obj_size += obj->base.size;
336 total_gtt_size += obj->gtt_space->size; 325 total_gtt_size += i915_gem_obj_ggtt_size(obj);
337 count++; 326 count++;
338 } 327 }
339 328
@@ -371,20 +360,22 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
371 pipe, plane); 360 pipe, plane);
372 } 361 }
373 if (work->enable_stall_check) 362 if (work->enable_stall_check)
374 seq_printf(m, "Stall check enabled, "); 363 seq_puts(m, "Stall check enabled, ");
375 else 364 else
376 seq_printf(m, "Stall check waiting for page flip ioctl, "); 365 seq_puts(m, "Stall check waiting for page flip ioctl, ");
377 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 366 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
378 367
379 if (work->old_fb_obj) { 368 if (work->old_fb_obj) {
380 struct drm_i915_gem_object *obj = work->old_fb_obj; 369 struct drm_i915_gem_object *obj = work->old_fb_obj;
381 if (obj) 370 if (obj)
382 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 371 seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
372 i915_gem_obj_ggtt_offset(obj));
383 } 373 }
384 if (work->pending_flip_obj) { 374 if (work->pending_flip_obj) {
385 struct drm_i915_gem_object *obj = work->pending_flip_obj; 375 struct drm_i915_gem_object *obj = work->pending_flip_obj;
386 if (obj) 376 if (obj)
387 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 377 seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
378 i915_gem_obj_ggtt_offset(obj));
388 } 379 }
389 } 380 }
390 spin_unlock_irqrestore(&dev->event_lock, flags); 381 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -424,7 +415,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
424 mutex_unlock(&dev->struct_mutex); 415 mutex_unlock(&dev->struct_mutex);
425 416
426 if (count == 0) 417 if (count == 0)
427 seq_printf(m, "No requests\n"); 418 seq_puts(m, "No requests\n");
428 419
429 return 0; 420 return 0;
430} 421}
@@ -574,10 +565,10 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
574 seq_printf(m, "Fence %d, pin count = %d, object = ", 565 seq_printf(m, "Fence %d, pin count = %d, object = ",
575 i, dev_priv->fence_regs[i].pin_count); 566 i, dev_priv->fence_regs[i].pin_count);
576 if (obj == NULL) 567 if (obj == NULL)
577 seq_printf(m, "unused"); 568 seq_puts(m, "unused");
578 else 569 else
579 describe_obj(m, obj); 570 describe_obj(m, obj);
580 seq_printf(m, "\n"); 571 seq_putc(m, '\n');
581 } 572 }
582 573
583 mutex_unlock(&dev->struct_mutex); 574 mutex_unlock(&dev->struct_mutex);
@@ -606,361 +597,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
606 return 0; 597 return 0;
607} 598}
608 599
609static const char *ring_str(int ring)
610{
611 switch (ring) {
612 case RCS: return "render";
613 case VCS: return "bsd";
614 case BCS: return "blt";
615 case VECS: return "vebox";
616 default: return "";
617 }
618}
619
620static const char *pin_flag(int pinned)
621{
622 if (pinned > 0)
623 return " P";
624 else if (pinned < 0)
625 return " p";
626 else
627 return "";
628}
629
630static const char *tiling_flag(int tiling)
631{
632 switch (tiling) {
633 default:
634 case I915_TILING_NONE: return "";
635 case I915_TILING_X: return " X";
636 case I915_TILING_Y: return " Y";
637 }
638}
639
640static const char *dirty_flag(int dirty)
641{
642 return dirty ? " dirty" : "";
643}
644
645static const char *purgeable_flag(int purgeable)
646{
647 return purgeable ? " purgeable" : "";
648}
649
650static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
651{
652
653 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
654 e->err = -ENOSPC;
655 return false;
656 }
657
658 if (e->bytes == e->size - 1 || e->err)
659 return false;
660
661 return true;
662}
663
664static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
665 unsigned len)
666{
667 if (e->pos + len <= e->start) {
668 e->pos += len;
669 return false;
670 }
671
672 /* First vsnprintf needs to fit in its entirety for memmove */
673 if (len >= e->size) {
674 e->err = -EIO;
675 return false;
676 }
677
678 return true;
679}
680
681static void __i915_error_advance(struct drm_i915_error_state_buf *e,
682 unsigned len)
683{
684 /* If this is first printf in this window, adjust it so that
685 * start position matches start of the buffer
686 */
687
688 if (e->pos < e->start) {
689 const size_t off = e->start - e->pos;
690
691 /* Should not happen but be paranoid */
692 if (off > len || e->bytes) {
693 e->err = -EIO;
694 return;
695 }
696
697 memmove(e->buf, e->buf + off, len - off);
698 e->bytes = len - off;
699 e->pos = e->start;
700 return;
701 }
702
703 e->bytes += len;
704 e->pos += len;
705}
706
707static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
708 const char *f, va_list args)
709{
710 unsigned len;
711
712 if (!__i915_error_ok(e))
713 return;
714
715 /* Seek the first printf which is hits start position */
716 if (e->pos < e->start) {
717 len = vsnprintf(NULL, 0, f, args);
718 if (!__i915_error_seek(e, len))
719 return;
720 }
721
722 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
723 if (len >= e->size - e->bytes)
724 len = e->size - e->bytes - 1;
725
726 __i915_error_advance(e, len);
727}
728
729static void i915_error_puts(struct drm_i915_error_state_buf *e,
730 const char *str)
731{
732 unsigned len;
733
734 if (!__i915_error_ok(e))
735 return;
736
737 len = strlen(str);
738
739 /* Seek the first printf which is hits start position */
740 if (e->pos < e->start) {
741 if (!__i915_error_seek(e, len))
742 return;
743 }
744
745 if (len >= e->size - e->bytes)
746 len = e->size - e->bytes - 1;
747 memcpy(e->buf + e->bytes, str, len);
748
749 __i915_error_advance(e, len);
750}
751
752void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
753{
754 va_list args;
755
756 va_start(args, f);
757 i915_error_vprintf(e, f, args);
758 va_end(args);
759}
760
761#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
762#define err_puts(e, s) i915_error_puts(e, s)
763
764static void print_error_buffers(struct drm_i915_error_state_buf *m,
765 const char *name,
766 struct drm_i915_error_buffer *err,
767 int count)
768{
769 err_printf(m, "%s [%d]:\n", name, count);
770
771 while (count--) {
772 err_printf(m, " %08x %8u %02x %02x %x %x",
773 err->gtt_offset,
774 err->size,
775 err->read_domains,
776 err->write_domain,
777 err->rseqno, err->wseqno);
778 err_puts(m, pin_flag(err->pinned));
779 err_puts(m, tiling_flag(err->tiling));
780 err_puts(m, dirty_flag(err->dirty));
781 err_puts(m, purgeable_flag(err->purgeable));
782 err_puts(m, err->ring != -1 ? " " : "");
783 err_puts(m, ring_str(err->ring));
784 err_puts(m, cache_level_str(err->cache_level));
785
786 if (err->name)
787 err_printf(m, " (name: %d)", err->name);
788 if (err->fence_reg != I915_FENCE_REG_NONE)
789 err_printf(m, " (fence: %d)", err->fence_reg);
790
791 err_puts(m, "\n");
792 err++;
793 }
794}
795
796static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
797 struct drm_device *dev,
798 struct drm_i915_error_state *error,
799 unsigned ring)
800{
801 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
802 err_printf(m, "%s command stream:\n", ring_str(ring));
803 err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
804 err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
805 err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
806 err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
807 err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
808 err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
809 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
810 if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
811 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
812
813 if (INTEL_INFO(dev)->gen >= 4)
814 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
815 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
816 err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
817 if (INTEL_INFO(dev)->gen >= 6) {
818 err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
819 err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
820 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
821 error->semaphore_mboxes[ring][0],
822 error->semaphore_seqno[ring][0]);
823 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
824 error->semaphore_mboxes[ring][1],
825 error->semaphore_seqno[ring][1]);
826 }
827 err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
828 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
829 err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
830 err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
831}
832
833struct i915_error_state_file_priv {
834 struct drm_device *dev;
835 struct drm_i915_error_state *error;
836};
837
838
839static int i915_error_state(struct i915_error_state_file_priv *error_priv,
840 struct drm_i915_error_state_buf *m)
841
842{
843 struct drm_device *dev = error_priv->dev;
844 drm_i915_private_t *dev_priv = dev->dev_private;
845 struct drm_i915_error_state *error = error_priv->error;
846 struct intel_ring_buffer *ring;
847 int i, j, page, offset, elt;
848
849 if (!error) {
850 err_printf(m, "no error state collected\n");
851 return 0;
852 }
853
854 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
855 error->time.tv_usec);
856 err_printf(m, "Kernel: " UTS_RELEASE "\n");
857 err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
858 err_printf(m, "EIR: 0x%08x\n", error->eir);
859 err_printf(m, "IER: 0x%08x\n", error->ier);
860 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
861 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
862 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
863 err_printf(m, "CCID: 0x%08x\n", error->ccid);
864
865 for (i = 0; i < dev_priv->num_fence_regs; i++)
866 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
867
868 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
869 err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
870 error->extra_instdone[i]);
871
872 if (INTEL_INFO(dev)->gen >= 6) {
873 err_printf(m, "ERROR: 0x%08x\n", error->error);
874 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
875 }
876
877 if (INTEL_INFO(dev)->gen == 7)
878 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
879
880 for_each_ring(ring, dev_priv, i)
881 i915_ring_error_state(m, dev, error, i);
882
883 if (error->active_bo)
884 print_error_buffers(m, "Active",
885 error->active_bo,
886 error->active_bo_count);
887
888 if (error->pinned_bo)
889 print_error_buffers(m, "Pinned",
890 error->pinned_bo,
891 error->pinned_bo_count);
892
893 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
894 struct drm_i915_error_object *obj;
895
896 if ((obj = error->ring[i].batchbuffer)) {
897 err_printf(m, "%s --- gtt_offset = 0x%08x\n",
898 dev_priv->ring[i].name,
899 obj->gtt_offset);
900 offset = 0;
901 for (page = 0; page < obj->page_count; page++) {
902 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
903 err_printf(m, "%08x : %08x\n", offset,
904 obj->pages[page][elt]);
905 offset += 4;
906 }
907 }
908 }
909
910 if (error->ring[i].num_requests) {
911 err_printf(m, "%s --- %d requests\n",
912 dev_priv->ring[i].name,
913 error->ring[i].num_requests);
914 for (j = 0; j < error->ring[i].num_requests; j++) {
915 err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
916 error->ring[i].requests[j].seqno,
917 error->ring[i].requests[j].jiffies,
918 error->ring[i].requests[j].tail);
919 }
920 }
921
922 if ((obj = error->ring[i].ringbuffer)) {
923 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
924 dev_priv->ring[i].name,
925 obj->gtt_offset);
926 offset = 0;
927 for (page = 0; page < obj->page_count; page++) {
928 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
929 err_printf(m, "%08x : %08x\n",
930 offset,
931 obj->pages[page][elt]);
932 offset += 4;
933 }
934 }
935 }
936
937 obj = error->ring[i].ctx;
938 if (obj) {
939 err_printf(m, "%s --- HW Context = 0x%08x\n",
940 dev_priv->ring[i].name,
941 obj->gtt_offset);
942 offset = 0;
943 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
944 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
945 offset,
946 obj->pages[0][elt],
947 obj->pages[0][elt+1],
948 obj->pages[0][elt+2],
949 obj->pages[0][elt+3]);
950 offset += 16;
951 }
952 }
953 }
954
955 if (error->overlay)
956 intel_overlay_print_error_state(m, error->overlay);
957
958 if (error->display)
959 intel_display_print_error_state(m, dev, error->display);
960
961 return 0;
962}
963
964static ssize_t 600static ssize_t
965i915_error_state_write(struct file *filp, 601i915_error_state_write(struct file *filp,
966 const char __user *ubuf, 602 const char __user *ubuf,
@@ -986,9 +622,7 @@ i915_error_state_write(struct file *filp,
986static int i915_error_state_open(struct inode *inode, struct file *file) 622static int i915_error_state_open(struct inode *inode, struct file *file)
987{ 623{
988 struct drm_device *dev = inode->i_private; 624 struct drm_device *dev = inode->i_private;
989 drm_i915_private_t *dev_priv = dev->dev_private;
990 struct i915_error_state_file_priv *error_priv; 625 struct i915_error_state_file_priv *error_priv;
991 unsigned long flags;
992 626
993 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 627 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
994 if (!error_priv) 628 if (!error_priv)
@@ -996,11 +630,7 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
996 630
997 error_priv->dev = dev; 631 error_priv->dev = dev;
998 632
999 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 633 i915_error_state_get(dev, error_priv);
1000 error_priv->error = dev_priv->gpu_error.first_error;
1001 if (error_priv->error)
1002 kref_get(&error_priv->error->ref);
1003 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1004 634
1005 file->private_data = error_priv; 635 file->private_data = error_priv;
1006 636
@@ -1011,8 +641,7 @@ static int i915_error_state_release(struct inode *inode, struct file *file)
1011{ 641{
1012 struct i915_error_state_file_priv *error_priv = file->private_data; 642 struct i915_error_state_file_priv *error_priv = file->private_data;
1013 643
1014 if (error_priv->error) 644 i915_error_state_put(error_priv);
1015 kref_put(&error_priv->error->ref, i915_error_state_free);
1016 kfree(error_priv); 645 kfree(error_priv);
1017 646
1018 return 0; 647 return 0;
@@ -1025,40 +654,15 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
1025 struct drm_i915_error_state_buf error_str; 654 struct drm_i915_error_state_buf error_str;
1026 loff_t tmp_pos = 0; 655 loff_t tmp_pos = 0;
1027 ssize_t ret_count = 0; 656 ssize_t ret_count = 0;
1028 int ret = 0; 657 int ret;
1029
1030 memset(&error_str, 0, sizeof(error_str));
1031
1032 /* We need to have enough room to store any i915_error_state printf
1033 * so that we can move it to start position.
1034 */
1035 error_str.size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
1036 error_str.buf = kmalloc(error_str.size,
1037 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
1038
1039 if (error_str.buf == NULL) {
1040 error_str.size = PAGE_SIZE;
1041 error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
1042 }
1043
1044 if (error_str.buf == NULL) {
1045 error_str.size = 128;
1046 error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
1047 }
1048
1049 if (error_str.buf == NULL)
1050 return -ENOMEM;
1051
1052 error_str.start = *pos;
1053 658
1054 ret = i915_error_state(error_priv, &error_str); 659 ret = i915_error_state_buf_init(&error_str, count, *pos);
1055 if (ret) 660 if (ret)
1056 goto out; 661 return ret;
1057 662
1058 if (error_str.bytes == 0 && error_str.err) { 663 ret = i915_error_state_to_str(&error_str, error_priv);
1059 ret = error_str.err; 664 if (ret)
1060 goto out; 665 goto out;
1061 }
1062 666
1063 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos, 667 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
1064 error_str.buf, 668 error_str.buf,
@@ -1069,7 +673,7 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
1069 else 673 else
1070 *pos = error_str.start + ret_count; 674 *pos = error_str.start + ret_count;
1071out: 675out:
1072 kfree(error_str.buf); 676 i915_error_state_buf_release(&error_str);
1073 return ret ?: ret_count; 677 return ret ?: ret_count;
1074} 678}
1075 679
@@ -1246,7 +850,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
1246 (freq_sts >> 8) & 0xff)); 850 (freq_sts >> 8) & 0xff));
1247 mutex_unlock(&dev_priv->rps.hw_lock); 851 mutex_unlock(&dev_priv->rps.hw_lock);
1248 } else { 852 } else {
1249 seq_printf(m, "no P-state info available\n"); 853 seq_puts(m, "no P-state info available\n");
1250 } 854 }
1251 855
1252 return 0; 856 return 0;
@@ -1341,28 +945,28 @@ static int ironlake_drpc_info(struct seq_file *m)
1341 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 945 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1342 seq_printf(m, "Render standby enabled: %s\n", 946 seq_printf(m, "Render standby enabled: %s\n",
1343 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 947 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1344 seq_printf(m, "Current RS state: "); 948 seq_puts(m, "Current RS state: ");
1345 switch (rstdbyctl & RSX_STATUS_MASK) { 949 switch (rstdbyctl & RSX_STATUS_MASK) {
1346 case RSX_STATUS_ON: 950 case RSX_STATUS_ON:
1347 seq_printf(m, "on\n"); 951 seq_puts(m, "on\n");
1348 break; 952 break;
1349 case RSX_STATUS_RC1: 953 case RSX_STATUS_RC1:
1350 seq_printf(m, "RC1\n"); 954 seq_puts(m, "RC1\n");
1351 break; 955 break;
1352 case RSX_STATUS_RC1E: 956 case RSX_STATUS_RC1E:
1353 seq_printf(m, "RC1E\n"); 957 seq_puts(m, "RC1E\n");
1354 break; 958 break;
1355 case RSX_STATUS_RS1: 959 case RSX_STATUS_RS1:
1356 seq_printf(m, "RS1\n"); 960 seq_puts(m, "RS1\n");
1357 break; 961 break;
1358 case RSX_STATUS_RS2: 962 case RSX_STATUS_RS2:
1359 seq_printf(m, "RS2 (RC6)\n"); 963 seq_puts(m, "RS2 (RC6)\n");
1360 break; 964 break;
1361 case RSX_STATUS_RS3: 965 case RSX_STATUS_RS3:
1362 seq_printf(m, "RC3 (RC6+)\n"); 966 seq_puts(m, "RC3 (RC6+)\n");
1363 break; 967 break;
1364 default: 968 default:
1365 seq_printf(m, "unknown\n"); 969 seq_puts(m, "unknown\n");
1366 break; 970 break;
1367 } 971 }
1368 972
@@ -1377,8 +981,7 @@ static int gen6_drpc_info(struct seq_file *m)
1377 struct drm_i915_private *dev_priv = dev->dev_private; 981 struct drm_i915_private *dev_priv = dev->dev_private;
1378 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 982 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1379 unsigned forcewake_count; 983 unsigned forcewake_count;
1380 int count=0, ret; 984 int count = 0, ret;
1381
1382 985
1383 ret = mutex_lock_interruptible(&dev->struct_mutex); 986 ret = mutex_lock_interruptible(&dev->struct_mutex);
1384 if (ret) 987 if (ret)
@@ -1389,8 +992,8 @@ static int gen6_drpc_info(struct seq_file *m)
1389 spin_unlock_irq(&dev_priv->gt_lock); 992 spin_unlock_irq(&dev_priv->gt_lock);
1390 993
1391 if (forcewake_count) { 994 if (forcewake_count) {
1392 seq_printf(m, "RC information inaccurate because somebody " 995 seq_puts(m, "RC information inaccurate because somebody "
1393 "holds a forcewake reference \n"); 996 "holds a forcewake reference \n");
1394 } else { 997 } else {
1395 /* NB: we cannot use forcewake, else we read the wrong values */ 998 /* NB: we cannot use forcewake, else we read the wrong values */
1396 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 999 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
@@ -1423,25 +1026,25 @@ static int gen6_drpc_info(struct seq_file *m)
1423 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1026 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1424 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1027 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1425 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1028 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1426 seq_printf(m, "Current RC state: "); 1029 seq_puts(m, "Current RC state: ");
1427 switch (gt_core_status & GEN6_RCn_MASK) { 1030 switch (gt_core_status & GEN6_RCn_MASK) {
1428 case GEN6_RC0: 1031 case GEN6_RC0:
1429 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1032 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1430 seq_printf(m, "Core Power Down\n"); 1033 seq_puts(m, "Core Power Down\n");
1431 else 1034 else
1432 seq_printf(m, "on\n"); 1035 seq_puts(m, "on\n");
1433 break; 1036 break;
1434 case GEN6_RC3: 1037 case GEN6_RC3:
1435 seq_printf(m, "RC3\n"); 1038 seq_puts(m, "RC3\n");
1436 break; 1039 break;
1437 case GEN6_RC6: 1040 case GEN6_RC6:
1438 seq_printf(m, "RC6\n"); 1041 seq_puts(m, "RC6\n");
1439 break; 1042 break;
1440 case GEN6_RC7: 1043 case GEN6_RC7:
1441 seq_printf(m, "RC7\n"); 1044 seq_puts(m, "RC7\n");
1442 break; 1045 break;
1443 default: 1046 default:
1444 seq_printf(m, "Unknown\n"); 1047 seq_puts(m, "Unknown\n");
1445 break; 1048 break;
1446 } 1049 }
1447 1050
@@ -1485,43 +1088,46 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1485 drm_i915_private_t *dev_priv = dev->dev_private; 1088 drm_i915_private_t *dev_priv = dev->dev_private;
1486 1089
1487 if (!I915_HAS_FBC(dev)) { 1090 if (!I915_HAS_FBC(dev)) {
1488 seq_printf(m, "FBC unsupported on this chipset\n"); 1091 seq_puts(m, "FBC unsupported on this chipset\n");
1489 return 0; 1092 return 0;
1490 } 1093 }
1491 1094
1492 if (intel_fbc_enabled(dev)) { 1095 if (intel_fbc_enabled(dev)) {
1493 seq_printf(m, "FBC enabled\n"); 1096 seq_puts(m, "FBC enabled\n");
1494 } else { 1097 } else {
1495 seq_printf(m, "FBC disabled: "); 1098 seq_puts(m, "FBC disabled: ");
1496 switch (dev_priv->no_fbc_reason) { 1099 switch (dev_priv->fbc.no_fbc_reason) {
1497 case FBC_NO_OUTPUT: 1100 case FBC_NO_OUTPUT:
1498 seq_printf(m, "no outputs"); 1101 seq_puts(m, "no outputs");
1499 break; 1102 break;
1500 case FBC_STOLEN_TOO_SMALL: 1103 case FBC_STOLEN_TOO_SMALL:
1501 seq_printf(m, "not enough stolen memory"); 1104 seq_puts(m, "not enough stolen memory");
1502 break; 1105 break;
1503 case FBC_UNSUPPORTED_MODE: 1106 case FBC_UNSUPPORTED_MODE:
1504 seq_printf(m, "mode not supported"); 1107 seq_puts(m, "mode not supported");
1505 break; 1108 break;
1506 case FBC_MODE_TOO_LARGE: 1109 case FBC_MODE_TOO_LARGE:
1507 seq_printf(m, "mode too large"); 1110 seq_puts(m, "mode too large");
1508 break; 1111 break;
1509 case FBC_BAD_PLANE: 1112 case FBC_BAD_PLANE:
1510 seq_printf(m, "FBC unsupported on plane"); 1113 seq_puts(m, "FBC unsupported on plane");
1511 break; 1114 break;
1512 case FBC_NOT_TILED: 1115 case FBC_NOT_TILED:
1513 seq_printf(m, "scanout buffer not tiled"); 1116 seq_puts(m, "scanout buffer not tiled");
1514 break; 1117 break;
1515 case FBC_MULTIPLE_PIPES: 1118 case FBC_MULTIPLE_PIPES:
1516 seq_printf(m, "multiple pipes are enabled"); 1119 seq_puts(m, "multiple pipes are enabled");
1517 break; 1120 break;
1518 case FBC_MODULE_PARAM: 1121 case FBC_MODULE_PARAM:
1519 seq_printf(m, "disabled per module param (default off)"); 1122 seq_puts(m, "disabled per module param (default off)");
1123 break;
1124 case FBC_CHIP_DEFAULT:
1125 seq_puts(m, "disabled per chip default");
1520 break; 1126 break;
1521 default: 1127 default:
1522 seq_printf(m, "unknown reason"); 1128 seq_puts(m, "unknown reason");
1523 } 1129 }
1524 seq_printf(m, "\n"); 1130 seq_putc(m, '\n');
1525 } 1131 }
1526 return 0; 1132 return 0;
1527} 1133}
@@ -1604,7 +1210,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1604 int gpu_freq, ia_freq; 1210 int gpu_freq, ia_freq;
1605 1211
1606 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1212 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1607 seq_printf(m, "unsupported on this chipset\n"); 1213 seq_puts(m, "unsupported on this chipset\n");
1608 return 0; 1214 return 0;
1609 } 1215 }
1610 1216
@@ -1612,7 +1218,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1612 if (ret) 1218 if (ret)
1613 return ret; 1219 return ret;
1614 1220
1615 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1221 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1616 1222
1617 for (gpu_freq = dev_priv->rps.min_delay; 1223 for (gpu_freq = dev_priv->rps.min_delay;
1618 gpu_freq <= dev_priv->rps.max_delay; 1224 gpu_freq <= dev_priv->rps.max_delay;
@@ -1701,7 +1307,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1701 fb->base.bits_per_pixel, 1307 fb->base.bits_per_pixel,
1702 atomic_read(&fb->base.refcount.refcount)); 1308 atomic_read(&fb->base.refcount.refcount));
1703 describe_obj(m, fb->obj); 1309 describe_obj(m, fb->obj);
1704 seq_printf(m, "\n"); 1310 seq_putc(m, '\n');
1705 mutex_unlock(&dev->mode_config.mutex); 1311 mutex_unlock(&dev->mode_config.mutex);
1706 1312
1707 mutex_lock(&dev->mode_config.fb_lock); 1313 mutex_lock(&dev->mode_config.fb_lock);
@@ -1716,7 +1322,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1716 fb->base.bits_per_pixel, 1322 fb->base.bits_per_pixel,
1717 atomic_read(&fb->base.refcount.refcount)); 1323 atomic_read(&fb->base.refcount.refcount));
1718 describe_obj(m, fb->obj); 1324 describe_obj(m, fb->obj);
1719 seq_printf(m, "\n"); 1325 seq_putc(m, '\n');
1720 } 1326 }
1721 mutex_unlock(&dev->mode_config.fb_lock); 1327 mutex_unlock(&dev->mode_config.fb_lock);
1722 1328
@@ -1736,22 +1342,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
1736 return ret; 1342 return ret;
1737 1343
1738 if (dev_priv->ips.pwrctx) { 1344 if (dev_priv->ips.pwrctx) {
1739 seq_printf(m, "power context "); 1345 seq_puts(m, "power context ");
1740 describe_obj(m, dev_priv->ips.pwrctx); 1346 describe_obj(m, dev_priv->ips.pwrctx);
1741 seq_printf(m, "\n"); 1347 seq_putc(m, '\n');
1742 } 1348 }
1743 1349
1744 if (dev_priv->ips.renderctx) { 1350 if (dev_priv->ips.renderctx) {
1745 seq_printf(m, "render context "); 1351 seq_puts(m, "render context ");
1746 describe_obj(m, dev_priv->ips.renderctx); 1352 describe_obj(m, dev_priv->ips.renderctx);
1747 seq_printf(m, "\n"); 1353 seq_putc(m, '\n');
1748 } 1354 }
1749 1355
1750 for_each_ring(ring, dev_priv, i) { 1356 for_each_ring(ring, dev_priv, i) {
1751 if (ring->default_context) { 1357 if (ring->default_context) {
1752 seq_printf(m, "HW default context %s ring ", ring->name); 1358 seq_printf(m, "HW default context %s ring ", ring->name);
1753 describe_obj(m, ring->default_context->obj); 1359 describe_obj(m, ring->default_context->obj);
1754 seq_printf(m, "\n"); 1360 seq_putc(m, '\n');
1755 } 1361 }
1756 } 1362 }
1757 1363
@@ -1778,7 +1384,7 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1778 1384
1779static const char *swizzle_string(unsigned swizzle) 1385static const char *swizzle_string(unsigned swizzle)
1780{ 1386{
1781 switch(swizzle) { 1387 switch (swizzle) {
1782 case I915_BIT_6_SWIZZLE_NONE: 1388 case I915_BIT_6_SWIZZLE_NONE:
1783 return "none"; 1389 return "none";
1784 case I915_BIT_6_SWIZZLE_9: 1390 case I915_BIT_6_SWIZZLE_9:
@@ -1868,7 +1474,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
1868 if (dev_priv->mm.aliasing_ppgtt) { 1474 if (dev_priv->mm.aliasing_ppgtt) {
1869 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1475 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1870 1476
1871 seq_printf(m, "aliasing PPGTT:\n"); 1477 seq_puts(m, "aliasing PPGTT:\n");
1872 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1478 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1873 } 1479 }
1874 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1480 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
@@ -1886,7 +1492,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
1886 1492
1887 1493
1888 if (!IS_VALLEYVIEW(dev)) { 1494 if (!IS_VALLEYVIEW(dev)) {
1889 seq_printf(m, "unsupported\n"); 1495 seq_puts(m, "unsupported\n");
1890 return 0; 1496 return 0;
1891 } 1497 }
1892 1498
@@ -2356,61 +1962,35 @@ static struct drm_info_list i915_debugfs_list[] = {
2356}; 1962};
2357#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 1963#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2358 1964
1965struct i915_debugfs_files {
1966 const char *name;
1967 const struct file_operations *fops;
1968} i915_debugfs_files[] = {
1969 {"i915_wedged", &i915_wedged_fops},
1970 {"i915_max_freq", &i915_max_freq_fops},
1971 {"i915_min_freq", &i915_min_freq_fops},
1972 {"i915_cache_sharing", &i915_cache_sharing_fops},
1973 {"i915_ring_stop", &i915_ring_stop_fops},
1974 {"i915_gem_drop_caches", &i915_drop_caches_fops},
1975 {"i915_error_state", &i915_error_state_fops},
1976 {"i915_next_seqno", &i915_next_seqno_fops},
1977};
1978
2359int i915_debugfs_init(struct drm_minor *minor) 1979int i915_debugfs_init(struct drm_minor *minor)
2360{ 1980{
2361 int ret; 1981 int ret, i;
2362
2363 ret = i915_debugfs_create(minor->debugfs_root, minor,
2364 "i915_wedged",
2365 &i915_wedged_fops);
2366 if (ret)
2367 return ret;
2368 1982
2369 ret = i915_forcewake_create(minor->debugfs_root, minor); 1983 ret = i915_forcewake_create(minor->debugfs_root, minor);
2370 if (ret) 1984 if (ret)
2371 return ret; 1985 return ret;
2372 1986
2373 ret = i915_debugfs_create(minor->debugfs_root, minor, 1987 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2374 "i915_max_freq", 1988 ret = i915_debugfs_create(minor->debugfs_root, minor,
2375 &i915_max_freq_fops); 1989 i915_debugfs_files[i].name,
2376 if (ret) 1990 i915_debugfs_files[i].fops);
2377 return ret; 1991 if (ret)
2378 1992 return ret;
2379 ret = i915_debugfs_create(minor->debugfs_root, minor, 1993 }
2380 "i915_min_freq",
2381 &i915_min_freq_fops);
2382 if (ret)
2383 return ret;
2384
2385 ret = i915_debugfs_create(minor->debugfs_root, minor,
2386 "i915_cache_sharing",
2387 &i915_cache_sharing_fops);
2388 if (ret)
2389 return ret;
2390
2391 ret = i915_debugfs_create(minor->debugfs_root, minor,
2392 "i915_ring_stop",
2393 &i915_ring_stop_fops);
2394 if (ret)
2395 return ret;
2396
2397 ret = i915_debugfs_create(minor->debugfs_root, minor,
2398 "i915_gem_drop_caches",
2399 &i915_drop_caches_fops);
2400 if (ret)
2401 return ret;
2402
2403 ret = i915_debugfs_create(minor->debugfs_root, minor,
2404 "i915_error_state",
2405 &i915_error_state_fops);
2406 if (ret)
2407 return ret;
2408
2409 ret = i915_debugfs_create(minor->debugfs_root, minor,
2410 "i915_next_seqno",
2411 &i915_next_seqno_fops);
2412 if (ret)
2413 return ret;
2414 1994
2415 return drm_debugfs_create_files(i915_debugfs_list, 1995 return drm_debugfs_create_files(i915_debugfs_list,
2416 I915_DEBUGFS_ENTRIES, 1996 I915_DEBUGFS_ENTRIES,
@@ -2419,26 +1999,18 @@ int i915_debugfs_init(struct drm_minor *minor)
2419 1999
2420void i915_debugfs_cleanup(struct drm_minor *minor) 2000void i915_debugfs_cleanup(struct drm_minor *minor)
2421{ 2001{
2002 int i;
2003
2422 drm_debugfs_remove_files(i915_debugfs_list, 2004 drm_debugfs_remove_files(i915_debugfs_list,
2423 I915_DEBUGFS_ENTRIES, minor); 2005 I915_DEBUGFS_ENTRIES, minor);
2424 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 2006 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
2425 1, minor); 2007 1, minor);
2426 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, 2008 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2427 1, minor); 2009 struct drm_info_list *info_list =
2428 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, 2010 (struct drm_info_list *) i915_debugfs_files[i].fops;
2429 1, minor); 2011
2430 drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops, 2012 drm_debugfs_remove_files(info_list, 1, minor);
2431 1, minor); 2013 }
2432 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
2433 1, minor);
2434 drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
2435 1, minor);
2436 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
2437 1, minor);
2438 drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
2439 1, minor);
2440 drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
2441 1, minor);
2442} 2014}
2443 2015
2444#endif /* CONFIG_DEBUG_FS */ 2016#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index adb319b53ecd..6ce903306320 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1323,10 +1323,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
1323 /* Always safe in the mode setting case. */ 1323 /* Always safe in the mode setting case. */
1324 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1324 /* FIXME: do pre/post-mode set stuff in core KMS code */
1325 dev->vblank_disable_allowed = 1; 1325 dev->vblank_disable_allowed = 1;
1326 if (INTEL_INFO(dev)->num_pipes == 0) { 1326 if (INTEL_INFO(dev)->num_pipes == 0)
1327 dev_priv->mm.suspended = 0;
1328 return 0; 1327 return 0;
1329 }
1330 1328
1331 ret = intel_fbdev_init(dev); 1329 ret = intel_fbdev_init(dev);
1332 if (ret) 1330 if (ret)
@@ -1352,9 +1350,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
1352 1350
1353 drm_kms_helper_poll_init(dev); 1351 drm_kms_helper_poll_init(dev);
1354 1352
1355 /* We're off and running w/KMS */
1356 dev_priv->mm.suspended = 0;
1357
1358 return 0; 1353 return 0;
1359 1354
1360cleanup_gem: 1355cleanup_gem:
@@ -1558,8 +1553,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1558 goto out_rmmap; 1553 goto out_rmmap;
1559 } 1554 }
1560 1555
1561 dev_priv->mm.gtt_mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, 1556 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
1562 aperture_size); 1557 aperture_size);
1563 1558
1564 /* The i915 workqueue is primarily used for batched retirement of 1559 /* The i915 workqueue is primarily used for batched retirement of
1565 * requests (and thus managing bo) once the task has been completed 1560 * requests (and thus managing bo) once the task has been completed
@@ -1612,7 +1607,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1612 1607
1613 spin_lock_init(&dev_priv->irq_lock); 1608 spin_lock_init(&dev_priv->irq_lock);
1614 spin_lock_init(&dev_priv->gpu_error.lock); 1609 spin_lock_init(&dev_priv->gpu_error.lock);
1615 spin_lock_init(&dev_priv->rps.lock);
1616 spin_lock_init(&dev_priv->backlight.lock); 1610 spin_lock_init(&dev_priv->backlight.lock);
1617 mutex_init(&dev_priv->dpio_lock); 1611 mutex_init(&dev_priv->dpio_lock);
1618 1612
@@ -1629,9 +1623,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1629 goto out_gem_unload; 1623 goto out_gem_unload;
1630 } 1624 }
1631 1625
1632 /* Start out suspended */
1633 dev_priv->mm.suspended = 1;
1634
1635 if (HAS_POWER_WELL(dev)) 1626 if (HAS_POWER_WELL(dev))
1636 i915_init_power_well(dev); 1627 i915_init_power_well(dev);
1637 1628
@@ -1641,6 +1632,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1641 DRM_ERROR("failed to init modeset\n"); 1632 DRM_ERROR("failed to init modeset\n");
1642 goto out_gem_unload; 1633 goto out_gem_unload;
1643 } 1634 }
1635 } else {
1636 /* Start out suspended in ums mode. */
1637 dev_priv->ums.mm_suspended = 1;
1644 } 1638 }
1645 1639
1646 i915_setup_sysfs(dev); 1640 i915_setup_sysfs(dev);
@@ -1667,7 +1661,7 @@ out_gem_unload:
1667 intel_teardown_mchbar(dev); 1661 intel_teardown_mchbar(dev);
1668 destroy_workqueue(dev_priv->wq); 1662 destroy_workqueue(dev_priv->wq);
1669out_mtrrfree: 1663out_mtrrfree:
1670 arch_phys_wc_del(dev_priv->mm.gtt_mtrr); 1664 arch_phys_wc_del(dev_priv->gtt.mtrr);
1671 io_mapping_free(dev_priv->gtt.mappable); 1665 io_mapping_free(dev_priv->gtt.mappable);
1672 dev_priv->gtt.gtt_remove(dev); 1666 dev_priv->gtt.gtt_remove(dev);
1673out_rmmap: 1667out_rmmap:
@@ -1705,7 +1699,7 @@ int i915_driver_unload(struct drm_device *dev)
1705 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 1699 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
1706 1700
1707 io_mapping_free(dev_priv->gtt.mappable); 1701 io_mapping_free(dev_priv->gtt.mappable);
1708 arch_phys_wc_del(dev_priv->mm.gtt_mtrr); 1702 arch_phys_wc_del(dev_priv->gtt.mtrr);
1709 1703
1710 acpi_video_unregister(); 1704 acpi_video_unregister();
1711 1705
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f4af1ca0fb62..b07362f2675e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -132,6 +132,11 @@ int i915_enable_ips __read_mostly = 1;
132module_param_named(enable_ips, i915_enable_ips, int, 0600); 132module_param_named(enable_ips, i915_enable_ips, int, 0600);
133MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); 133MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
134 134
135bool i915_fastboot __read_mostly = 0;
136module_param_named(fastboot, i915_fastboot, bool, 0600);
137MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
138 "(default: false)");
139
135static struct drm_driver driver; 140static struct drm_driver driver;
136extern int intel_agp_enabled; 141extern int intel_agp_enabled;
137 142
@@ -551,7 +556,11 @@ static int i915_drm_freeze(struct drm_device *dev)
551 556
552 /* If KMS is active, we do the leavevt stuff here */ 557 /* If KMS is active, we do the leavevt stuff here */
553 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 558 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
554 int error = i915_gem_idle(dev); 559 int error;
560
561 mutex_lock(&dev->struct_mutex);
562 error = i915_gem_idle(dev);
563 mutex_unlock(&dev->struct_mutex);
555 if (error) { 564 if (error) {
556 dev_err(&dev->pdev->dev, 565 dev_err(&dev->pdev->dev,
557 "GEM idle failed, resume might fail\n"); 566 "GEM idle failed, resume might fail\n");
@@ -656,7 +665,6 @@ static int __i915_drm_thaw(struct drm_device *dev)
656 intel_init_pch_refclk(dev); 665 intel_init_pch_refclk(dev);
657 666
658 mutex_lock(&dev->struct_mutex); 667 mutex_lock(&dev->struct_mutex);
659 dev_priv->mm.suspended = 0;
660 668
661 error = i915_gem_init_hw(dev); 669 error = i915_gem_init_hw(dev);
662 mutex_unlock(&dev->struct_mutex); 670 mutex_unlock(&dev->struct_mutex);
@@ -793,28 +801,29 @@ static int i965_reset_complete(struct drm_device *dev)
793static int i965_do_reset(struct drm_device *dev) 801static int i965_do_reset(struct drm_device *dev)
794{ 802{
795 int ret; 803 int ret;
796 u8 gdrst;
797 804
798 /* 805 /*
799 * Set the domains we want to reset (GRDOM/bits 2 and 3) as 806 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
800 * well as the reset bit (GR/bit 0). Setting the GR bit 807 * well as the reset bit (GR/bit 0). Setting the GR bit
801 * triggers the reset; when done, the hardware will clear it. 808 * triggers the reset; when done, the hardware will clear it.
802 */ 809 */
803 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
804 pci_write_config_byte(dev->pdev, I965_GDRST, 810 pci_write_config_byte(dev->pdev, I965_GDRST,
805 gdrst | GRDOM_RENDER | 811 GRDOM_RENDER | GRDOM_RESET_ENABLE);
806 GRDOM_RESET_ENABLE);
807 ret = wait_for(i965_reset_complete(dev), 500); 812 ret = wait_for(i965_reset_complete(dev), 500);
808 if (ret) 813 if (ret)
809 return ret; 814 return ret;
810 815
811 /* We can't reset render&media without also resetting display ... */ 816 /* We can't reset render&media without also resetting display ... */
812 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
813 pci_write_config_byte(dev->pdev, I965_GDRST, 817 pci_write_config_byte(dev->pdev, I965_GDRST,
814 gdrst | GRDOM_MEDIA | 818 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
815 GRDOM_RESET_ENABLE);
816 819
817 return wait_for(i965_reset_complete(dev), 500); 820 ret = wait_for(i965_reset_complete(dev), 500);
821 if (ret)
822 return ret;
823
824 pci_write_config_byte(dev->pdev, I965_GDRST, 0);
825
826 return 0;
818} 827}
819 828
820static int ironlake_do_reset(struct drm_device *dev) 829static int ironlake_do_reset(struct drm_device *dev)
@@ -955,11 +964,11 @@ int i915_reset(struct drm_device *dev)
955 * switched away). 964 * switched away).
956 */ 965 */
957 if (drm_core_check_feature(dev, DRIVER_MODESET) || 966 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
958 !dev_priv->mm.suspended) { 967 !dev_priv->ums.mm_suspended) {
959 struct intel_ring_buffer *ring; 968 struct intel_ring_buffer *ring;
960 int i; 969 int i;
961 970
962 dev_priv->mm.suspended = 0; 971 dev_priv->ums.mm_suspended = 0;
963 972
964 i915_gem_init_swizzling(dev); 973 i915_gem_init_swizzling(dev);
965 974
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a416645bcd23..cef35d3ab37b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -144,6 +144,7 @@ enum intel_dpll_id {
144 144
145struct intel_dpll_hw_state { 145struct intel_dpll_hw_state {
146 uint32_t dpll; 146 uint32_t dpll;
147 uint32_t dpll_md;
147 uint32_t fp0; 148 uint32_t fp0;
148 uint32_t fp1; 149 uint32_t fp1;
149}; 150};
@@ -156,6 +157,8 @@ struct intel_shared_dpll {
156 /* should match the index in the dev_priv->shared_dplls array */ 157 /* should match the index in the dev_priv->shared_dplls array */
157 enum intel_dpll_id id; 158 enum intel_dpll_id id;
158 struct intel_dpll_hw_state hw_state; 159 struct intel_dpll_hw_state hw_state;
160 void (*mode_set)(struct drm_i915_private *dev_priv,
161 struct intel_shared_dpll *pll);
159 void (*enable)(struct drm_i915_private *dev_priv, 162 void (*enable)(struct drm_i915_private *dev_priv,
160 struct intel_shared_dpll *pll); 163 struct intel_shared_dpll *pll);
161 void (*disable)(struct drm_i915_private *dev_priv, 164 void (*disable)(struct drm_i915_private *dev_priv,
@@ -364,6 +367,7 @@ struct drm_i915_display_funcs {
364 * fills out the pipe-config with the hw state. */ 367 * fills out the pipe-config with the hw state. */
365 bool (*get_pipe_config)(struct intel_crtc *, 368 bool (*get_pipe_config)(struct intel_crtc *,
366 struct intel_crtc_config *); 369 struct intel_crtc_config *);
370 void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
367 int (*crtc_mode_set)(struct drm_crtc *crtc, 371 int (*crtc_mode_set)(struct drm_crtc *crtc,
368 int x, int y, 372 int x, int y,
369 struct drm_framebuffer *old_fb); 373 struct drm_framebuffer *old_fb);
@@ -462,8 +466,12 @@ struct i915_gtt {
462 void __iomem *gsm; 466 void __iomem *gsm;
463 467
464 bool do_idle_maps; 468 bool do_idle_maps;
465 dma_addr_t scratch_page_dma; 469 struct {
466 struct page *scratch_page; 470 dma_addr_t addr;
471 struct page *page;
472 } scratch;
473
474 int mtrr;
467 475
468 /* global gtt ops */ 476 /* global gtt ops */
469 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, 477 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
@@ -477,21 +485,17 @@ struct i915_gtt {
477 struct sg_table *st, 485 struct sg_table *st,
478 unsigned int pg_start, 486 unsigned int pg_start,
479 enum i915_cache_level cache_level); 487 enum i915_cache_level cache_level);
480 gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev, 488 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
481 dma_addr_t addr,
482 enum i915_cache_level level); 489 enum i915_cache_level level);
483}; 490};
484#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT) 491#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
485 492
486#define I915_PPGTT_PD_ENTRIES 512
487#define I915_PPGTT_PT_ENTRIES 1024
488struct i915_hw_ppgtt { 493struct i915_hw_ppgtt {
489 struct drm_device *dev; 494 struct drm_device *dev;
490 unsigned num_pd_entries; 495 unsigned num_pd_entries;
491 struct page **pt_pages; 496 struct page **pt_pages;
492 uint32_t pd_offset; 497 uint32_t pd_offset;
493 dma_addr_t *pt_dma_addr; 498 dma_addr_t *pt_dma_addr;
494 dma_addr_t scratch_page_dma_addr;
495 499
496 /* pte functions, mirroring the interface of the global gtt. */ 500 /* pte functions, mirroring the interface of the global gtt. */
497 void (*clear_range)(struct i915_hw_ppgtt *ppgtt, 501 void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
@@ -501,8 +505,7 @@ struct i915_hw_ppgtt {
501 struct sg_table *st, 505 struct sg_table *st,
502 unsigned int pg_start, 506 unsigned int pg_start,
503 enum i915_cache_level cache_level); 507 enum i915_cache_level cache_level);
504 gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev, 508 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
505 dma_addr_t addr,
506 enum i915_cache_level level); 509 enum i915_cache_level level);
507 int (*enable)(struct drm_device *dev); 510 int (*enable)(struct drm_device *dev);
508 void (*cleanup)(struct i915_hw_ppgtt *ppgtt); 511 void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
@@ -528,17 +531,36 @@ struct i915_hw_context {
528 struct i915_ctx_hang_stats hang_stats; 531 struct i915_ctx_hang_stats hang_stats;
529}; 532};
530 533
531enum no_fbc_reason { 534struct i915_fbc {
532 FBC_NO_OUTPUT, /* no outputs enabled to compress */ 535 unsigned long size;
533 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ 536 unsigned int fb_id;
534 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ 537 enum plane plane;
535 FBC_MODE_TOO_LARGE, /* mode too large for compression */ 538 int y;
536 FBC_BAD_PLANE, /* fbc not supported on plane */ 539
537 FBC_NOT_TILED, /* buffer not tiled */ 540 struct drm_mm_node *compressed_fb;
538 FBC_MULTIPLE_PIPES, /* more than one pipe active */ 541 struct drm_mm_node *compressed_llb;
539 FBC_MODULE_PARAM, 542
543 struct intel_fbc_work {
544 struct delayed_work work;
545 struct drm_crtc *crtc;
546 struct drm_framebuffer *fb;
547 int interval;
548 } *fbc_work;
549
550 enum {
551 FBC_NO_OUTPUT, /* no outputs enabled to compress */
552 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
553 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
554 FBC_MODE_TOO_LARGE, /* mode too large for compression */
555 FBC_BAD_PLANE, /* fbc not supported on plane */
556 FBC_NOT_TILED, /* buffer not tiled */
557 FBC_MULTIPLE_PIPES, /* more than one pipe active */
558 FBC_MODULE_PARAM,
559 FBC_CHIP_DEFAULT, /* disabled by default on this chip */
560 } no_fbc_reason;
540}; 561};
541 562
563
542enum intel_pch { 564enum intel_pch {
543 PCH_NONE = 0, /* No PCH present */ 565 PCH_NONE = 0, /* No PCH present */
544 PCH_IBX, /* Ibexpeak PCH */ 566 PCH_IBX, /* Ibexpeak PCH */
@@ -721,12 +743,12 @@ struct i915_suspend_saved_registers {
721}; 743};
722 744
723struct intel_gen6_power_mgmt { 745struct intel_gen6_power_mgmt {
746 /* work and pm_iir are protected by dev_priv->irq_lock */
724 struct work_struct work; 747 struct work_struct work;
725 struct delayed_work vlv_work;
726 u32 pm_iir; 748 u32 pm_iir;
727 /* lock - irqsave spinlock that protectects the work_struct and 749
728 * pm_iir. */ 750 /* On vlv we need to manually drop to Vmin with a delayed work. */
729 spinlock_t lock; 751 struct delayed_work vlv_work;
730 752
731 /* The below variables an all the rps hw state are protected by 753 /* The below variables an all the rps hw state are protected by
732 * dev->struct mutext. */ 754 * dev->struct mutext. */
@@ -792,6 +814,18 @@ struct i915_dri1_state {
792 uint32_t counter; 814 uint32_t counter;
793}; 815};
794 816
817struct i915_ums_state {
818 /**
819 * Flag if the X Server, and thus DRM, is not currently in
820 * control of the device.
821 *
822 * This is set between LeaveVT and EnterVT. It needs to be
823 * replaced with a semaphore. It also needs to be
824 * transitioned away from for kernel modesetting.
825 */
826 int mm_suspended;
827};
828
795struct intel_l3_parity { 829struct intel_l3_parity {
796 u32 *remap_info; 830 u32 *remap_info;
797 struct work_struct error_work; 831 struct work_struct error_work;
@@ -815,8 +849,6 @@ struct i915_gem_mm {
815 /** Usable portion of the GTT for GEM */ 849 /** Usable portion of the GTT for GEM */
816 unsigned long stolen_base; /* limited to low memory (32-bit) */ 850 unsigned long stolen_base; /* limited to low memory (32-bit) */
817 851
818 int gtt_mtrr;
819
820 /** PPGTT used for aliasing the PPGTT with the GTT */ 852 /** PPGTT used for aliasing the PPGTT with the GTT */
821 struct i915_hw_ppgtt *aliasing_ppgtt; 853 struct i915_hw_ppgtt *aliasing_ppgtt;
822 854
@@ -864,16 +896,6 @@ struct i915_gem_mm {
864 */ 896 */
865 bool interruptible; 897 bool interruptible;
866 898
867 /**
868 * Flag if the X Server, and thus DRM, is not currently in
869 * control of the device.
870 *
871 * This is set between LeaveVT and EnterVT. It needs to be
872 * replaced with a semaphore. It also needs to be
873 * transitioned away from for kernel modesetting.
874 */
875 int suspended;
876
877 /** Bit 6 swizzling required for X tiling */ 899 /** Bit 6 swizzling required for X tiling */
878 uint32_t bit_6_swizzle_x; 900 uint32_t bit_6_swizzle_x;
879 /** Bit 6 swizzling required for Y tiling */ 901 /** Bit 6 swizzling required for Y tiling */
@@ -896,6 +918,11 @@ struct drm_i915_error_state_buf {
896 loff_t pos; 918 loff_t pos;
897}; 919};
898 920
921struct i915_error_state_file_priv {
922 struct drm_device *dev;
923 struct drm_i915_error_state *error;
924};
925
899struct i915_gpu_error { 926struct i915_gpu_error {
900 /* For hangcheck timer */ 927 /* For hangcheck timer */
901#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 928#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -1058,12 +1085,7 @@ typedef struct drm_i915_private {
1058 1085
1059 int num_plane; 1086 int num_plane;
1060 1087
1061 unsigned long cfb_size; 1088 struct i915_fbc fbc;
1062 unsigned int cfb_fb;
1063 enum plane cfb_plane;
1064 int cfb_y;
1065 struct intel_fbc_work *fbc_work;
1066
1067 struct intel_opregion opregion; 1089 struct intel_opregion opregion;
1068 struct intel_vbt_data vbt; 1090 struct intel_vbt_data vbt;
1069 1091
@@ -1080,8 +1102,6 @@ typedef struct drm_i915_private {
1080 } backlight; 1102 } backlight;
1081 1103
1082 /* LVDS info */ 1104 /* LVDS info */
1083 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1084 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1085 bool no_aux_handshake; 1105 bool no_aux_handshake;
1086 1106
1087 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1107 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
@@ -1141,11 +1161,6 @@ typedef struct drm_i915_private {
1141 /* Haswell power well */ 1161 /* Haswell power well */
1142 struct i915_power_well power_well; 1162 struct i915_power_well power_well;
1143 1163
1144 enum no_fbc_reason no_fbc_reason;
1145
1146 struct drm_mm_node *compressed_fb;
1147 struct drm_mm_node *compressed_llb;
1148
1149 struct i915_gpu_error gpu_error; 1164 struct i915_gpu_error gpu_error;
1150 1165
1151 struct drm_i915_gem_object *vlv_pctx; 1166 struct drm_i915_gem_object *vlv_pctx;
@@ -1172,6 +1187,8 @@ typedef struct drm_i915_private {
1172 /* Old dri1 support infrastructure, beware the dragons ya fools entering 1187 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1173 * here! */ 1188 * here! */
1174 struct i915_dri1_state dri1; 1189 struct i915_dri1_state dri1;
1190 /* Old ums support infrastructure, same warning applies. */
1191 struct i915_ums_state ums;
1175} drm_i915_private_t; 1192} drm_i915_private_t;
1176 1193
1177/* Iterate over initialised rings */ 1194/* Iterate over initialised rings */
@@ -1186,7 +1203,7 @@ enum hdmi_force_audio {
1186 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 1203 HDMI_AUDIO_ON, /* force turn on HDMI audio */
1187}; 1204};
1188 1205
1189#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1) 1206#define I915_GTT_OFFSET_NONE ((u32)-1)
1190 1207
1191struct drm_i915_gem_object_ops { 1208struct drm_i915_gem_object_ops {
1192 /* Interface between the GEM object and its backing storage. 1209 /* Interface between the GEM object and its backing storage.
@@ -1212,7 +1229,7 @@ struct drm_i915_gem_object {
1212 const struct drm_i915_gem_object_ops *ops; 1229 const struct drm_i915_gem_object_ops *ops;
1213 1230
1214 /** Current space allocated to this object in the GTT, if any. */ 1231 /** Current space allocated to this object in the GTT, if any. */
1215 struct drm_mm_node *gtt_space; 1232 struct drm_mm_node gtt_space;
1216 /** Stolen memory for this object, instead of being backed by shmem. */ 1233 /** Stolen memory for this object, instead of being backed by shmem. */
1217 struct drm_mm_node *stolen; 1234 struct drm_mm_node *stolen;
1218 struct list_head global_list; 1235 struct list_head global_list;
@@ -1313,13 +1330,6 @@ struct drm_i915_gem_object {
1313 unsigned long exec_handle; 1330 unsigned long exec_handle;
1314 struct drm_i915_gem_exec_object2 *exec_entry; 1331 struct drm_i915_gem_exec_object2 *exec_entry;
1315 1332
1316 /**
1317 * Current offset of the object in GTT space.
1318 *
1319 * This is the same as gtt_space->start
1320 */
1321 uint32_t gtt_offset;
1322
1323 struct intel_ring_buffer *ring; 1333 struct intel_ring_buffer *ring;
1324 1334
1325 /** Breadcrumb of last rendering to the buffer. */ 1335 /** Breadcrumb of last rendering to the buffer. */
@@ -1345,6 +1355,37 @@ struct drm_i915_gem_object {
1345 1355
1346#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 1356#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1347 1357
1358/* Offset of the first PTE pointing to this object */
1359static inline unsigned long
1360i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
1361{
1362 return o->gtt_space.start;
1363}
1364
1365/* Whether or not this object is currently mapped by the translation tables */
1366static inline bool
1367i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
1368{
1369 return drm_mm_node_allocated(&o->gtt_space);
1370}
1371
1372/* The size used in the translation tables may be larger than the actual size of
1373 * the object on GEN2/GEN3 because of the way tiling is handled. See
1374 * i915_gem_get_gtt_size() for more details.
1375 */
1376static inline unsigned long
1377i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
1378{
1379 return o->gtt_space.size;
1380}
1381
1382static inline void
1383i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
1384 enum i915_cache_level color)
1385{
1386 o->gtt_space.color = color;
1387}
1388
1348/** 1389/**
1349 * Request queue structure. 1390 * Request queue structure.
1350 * 1391 *
@@ -1542,6 +1583,7 @@ extern int i915_enable_ppgtt __read_mostly;
1542extern unsigned int i915_preliminary_hw_support __read_mostly; 1583extern unsigned int i915_preliminary_hw_support __read_mostly;
1543extern int i915_disable_power_well __read_mostly; 1584extern int i915_disable_power_well __read_mostly;
1544extern int i915_enable_ips __read_mostly; 1585extern int i915_enable_ips __read_mostly;
1586extern bool i915_fastboot __read_mostly;
1545 1587
1546extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1588extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1547extern int i915_resume(struct drm_device *dev); 1589extern int i915_resume(struct drm_device *dev);
@@ -1585,21 +1627,12 @@ extern void intel_hpd_init(struct drm_device *dev);
1585extern void intel_gt_init(struct drm_device *dev); 1627extern void intel_gt_init(struct drm_device *dev);
1586extern void intel_gt_reset(struct drm_device *dev); 1628extern void intel_gt_reset(struct drm_device *dev);
1587 1629
1588void i915_error_state_free(struct kref *error_ref);
1589
1590void 1630void
1591i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1631i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1592 1632
1593void 1633void
1594i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1634i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1595 1635
1596#ifdef CONFIG_DEBUG_FS
1597extern void i915_destroy_error_state(struct drm_device *dev);
1598#else
1599#define i915_destroy_error_state(x)
1600#endif
1601
1602
1603/* i915_gem.c */ 1636/* i915_gem.c */
1604int i915_gem_init_ioctl(struct drm_device *dev, void *data, 1637int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1605 struct drm_file *file_priv); 1638 struct drm_file *file_priv);
@@ -1910,8 +1943,27 @@ void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
1910/* i915_debugfs.c */ 1943/* i915_debugfs.c */
1911int i915_debugfs_init(struct drm_minor *minor); 1944int i915_debugfs_init(struct drm_minor *minor);
1912void i915_debugfs_cleanup(struct drm_minor *minor); 1945void i915_debugfs_cleanup(struct drm_minor *minor);
1946
1947/* i915_gpu_error.c */
1913__printf(2, 3) 1948__printf(2, 3)
1914void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 1949void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
1950int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
1951 const struct i915_error_state_file_priv *error);
1952int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
1953 size_t count, loff_t pos);
1954static inline void i915_error_state_buf_release(
1955 struct drm_i915_error_state_buf *eb)
1956{
1957 kfree(eb->buf);
1958}
1959void i915_capture_error_state(struct drm_device *dev);
1960void i915_error_state_get(struct drm_device *dev,
1961 struct i915_error_state_file_priv *error_priv);
1962void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
1963void i915_destroy_error_state(struct drm_device *dev);
1964
1965void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
1966const char *i915_cache_level_str(int type);
1915 1967
1916/* i915_suspend.c */ 1968/* i915_suspend.c */
1917extern int i915_save_state(struct drm_device *dev); 1969extern int i915_save_state(struct drm_device *dev);
@@ -1991,7 +2043,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
1991 struct drm_file *file); 2043 struct drm_file *file);
1992 2044
1993/* overlay */ 2045/* overlay */
1994#ifdef CONFIG_DEBUG_FS
1995extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 2046extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
1996extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 2047extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
1997 struct intel_overlay_error_state *error); 2048 struct intel_overlay_error_state *error);
@@ -2000,7 +2051,6 @@ extern struct intel_display_error_state *intel_display_capture_error_state(struc
2000extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 2051extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
2001 struct drm_device *dev, 2052 struct drm_device *dev,
2002 struct intel_display_error_state *error); 2053 struct intel_display_error_state *error);
2003#endif
2004 2054
2005/* On SNB platform, before reading ring registers forcewake bit 2055/* On SNB platform, before reading ring registers forcewake bit
2006 * must be set to prevent GT core from power down and stale values being 2056 * must be set to prevent GT core from power down and stale values being
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 97afd2639fb6..46bf7e3887d4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -135,7 +135,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
135static inline bool 135static inline bool
136i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) 136i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
137{ 137{
138 return obj->gtt_space && !obj->active; 138 return i915_gem_obj_ggtt_bound(obj) && !obj->active;
139} 139}
140 140
141int 141int
@@ -178,7 +178,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
178 mutex_lock(&dev->struct_mutex); 178 mutex_lock(&dev->struct_mutex);
179 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 179 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
180 if (obj->pin_count) 180 if (obj->pin_count)
181 pinned += obj->gtt_space->size; 181 pinned += i915_gem_obj_ggtt_size(obj);
182 mutex_unlock(&dev->struct_mutex); 182 mutex_unlock(&dev->struct_mutex);
183 183
184 args->aper_size = dev_priv->gtt.total; 184 args->aper_size = dev_priv->gtt.total;
@@ -422,7 +422,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
422 * anyway again before the next pread happens. */ 422 * anyway again before the next pread happens. */
423 if (obj->cache_level == I915_CACHE_NONE) 423 if (obj->cache_level == I915_CACHE_NONE)
424 needs_clflush = 1; 424 needs_clflush = 1;
425 if (obj->gtt_space) { 425 if (i915_gem_obj_ggtt_bound(obj)) {
426 ret = i915_gem_object_set_to_gtt_domain(obj, false); 426 ret = i915_gem_object_set_to_gtt_domain(obj, false);
427 if (ret) 427 if (ret)
428 return ret; 428 return ret;
@@ -609,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
609 user_data = to_user_ptr(args->data_ptr); 609 user_data = to_user_ptr(args->data_ptr);
610 remain = args->size; 610 remain = args->size;
611 611
612 offset = obj->gtt_offset + args->offset; 612 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
613 613
614 while (remain > 0) { 614 while (remain > 0) {
615 /* Operation in this page 615 /* Operation in this page
@@ -739,7 +739,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
739 * right away and we therefore have to clflush anyway. */ 739 * right away and we therefore have to clflush anyway. */
740 if (obj->cache_level == I915_CACHE_NONE) 740 if (obj->cache_level == I915_CACHE_NONE)
741 needs_clflush_after = 1; 741 needs_clflush_after = 1;
742 if (obj->gtt_space) { 742 if (i915_gem_obj_ggtt_bound(obj)) {
743 ret = i915_gem_object_set_to_gtt_domain(obj, true); 743 ret = i915_gem_object_set_to_gtt_domain(obj, true);
744 if (ret) 744 if (ret)
745 return ret; 745 return ret;
@@ -1360,8 +1360,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1360 1360
1361 obj->fault_mappable = true; 1361 obj->fault_mappable = true;
1362 1362
1363 pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) + 1363 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1364 page_offset; 1364 pfn >>= PAGE_SHIFT;
1365 pfn += page_offset;
1365 1366
1366 /* Finally, remap it using the new GTT offset */ 1367 /* Finally, remap it using the new GTT offset */
1367 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 1368 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
@@ -1667,7 +1668,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1667 if (obj->pages == NULL) 1668 if (obj->pages == NULL)
1668 return 0; 1669 return 0;
1669 1670
1670 BUG_ON(obj->gtt_space); 1671 BUG_ON(i915_gem_obj_ggtt_bound(obj));
1671 1672
1672 if (obj->pages_pin_count) 1673 if (obj->pages_pin_count)
1673 return -EBUSY; 1674 return -EBUSY;
@@ -2085,7 +2086,7 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2085 trace_i915_gem_request_add(ring, request->seqno); 2086 trace_i915_gem_request_add(ring, request->seqno);
2086 ring->outstanding_lazy_request = 0; 2087 ring->outstanding_lazy_request = 0;
2087 2088
2088 if (!dev_priv->mm.suspended) { 2089 if (!dev_priv->ums.mm_suspended) {
2089 if (i915_enable_hangcheck) { 2090 if (i915_enable_hangcheck) {
2090 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2091 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2091 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2092 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
@@ -2121,8 +2122,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2121 2122
2122static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj) 2123static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
2123{ 2124{
2124 if (acthd >= obj->gtt_offset && 2125 if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
2125 acthd < obj->gtt_offset + obj->base.size) 2126 acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
2126 return true; 2127 return true;
2127 2128
2128 return false; 2129 return false;
@@ -2180,11 +2181,11 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
2180 2181
2181 if (ring->hangcheck.action != wait && 2182 if (ring->hangcheck.action != wait &&
2182 i915_request_guilty(request, acthd, &inside)) { 2183 i915_request_guilty(request, acthd, &inside)) {
2183 DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n", 2184 DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2184 ring->name, 2185 ring->name,
2185 inside ? "inside" : "flushing", 2186 inside ? "inside" : "flushing",
2186 request->batch_obj ? 2187 request->batch_obj ?
2187 request->batch_obj->gtt_offset : 0, 2188 i915_gem_obj_ggtt_offset(request->batch_obj) : 0,
2188 request->ctx ? request->ctx->id : 0, 2189 request->ctx ? request->ctx->id : 0,
2189 acthd); 2190 acthd);
2190 2191
@@ -2390,7 +2391,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
2390 idle &= list_empty(&ring->request_list); 2391 idle &= list_empty(&ring->request_list);
2391 } 2392 }
2392 2393
2393 if (!dev_priv->mm.suspended && !idle) 2394 if (!dev_priv->ums.mm_suspended && !idle)
2394 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 2395 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2395 round_jiffies_up_relative(HZ)); 2396 round_jiffies_up_relative(HZ));
2396 if (idle) 2397 if (idle)
@@ -2585,7 +2586,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2585 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 2586 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2586 int ret; 2587 int ret;
2587 2588
2588 if (obj->gtt_space == NULL) 2589 if (!i915_gem_obj_ggtt_bound(obj))
2589 return 0; 2590 return 0;
2590 2591
2591 if (obj->pin_count) 2592 if (obj->pin_count)
@@ -2624,9 +2625,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2624 /* Avoid an unnecessary call to unbind on rebind. */ 2625 /* Avoid an unnecessary call to unbind on rebind. */
2625 obj->map_and_fenceable = true; 2626 obj->map_and_fenceable = true;
2626 2627
2627 drm_mm_put_block(obj->gtt_space); 2628 drm_mm_remove_node(&obj->gtt_space);
2628 obj->gtt_space = NULL;
2629 obj->gtt_offset = 0;
2630 2629
2631 return 0; 2630 return 0;
2632} 2631}
@@ -2681,12 +2680,12 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
2681 POSTING_READ(fence_reg); 2680 POSTING_READ(fence_reg);
2682 2681
2683 if (obj) { 2682 if (obj) {
2684 u32 size = obj->gtt_space->size; 2683 u32 size = i915_gem_obj_ggtt_size(obj);
2685 uint64_t val; 2684 uint64_t val;
2686 2685
2687 val = (uint64_t)((obj->gtt_offset + size - 4096) & 2686 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
2688 0xfffff000) << 32; 2687 0xfffff000) << 32;
2689 val |= obj->gtt_offset & 0xfffff000; 2688 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
2690 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; 2689 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2691 if (obj->tiling_mode == I915_TILING_Y) 2690 if (obj->tiling_mode == I915_TILING_Y)
2692 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2691 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
@@ -2710,15 +2709,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
2710 u32 val; 2709 u32 val;
2711 2710
2712 if (obj) { 2711 if (obj) {
2713 u32 size = obj->gtt_space->size; 2712 u32 size = i915_gem_obj_ggtt_size(obj);
2714 int pitch_val; 2713 int pitch_val;
2715 int tile_width; 2714 int tile_width;
2716 2715
2717 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || 2716 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
2718 (size & -size) != size || 2717 (size & -size) != size ||
2719 (obj->gtt_offset & (size - 1)), 2718 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2720 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", 2719 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2721 obj->gtt_offset, obj->map_and_fenceable, size); 2720 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
2722 2721
2723 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) 2722 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2724 tile_width = 128; 2723 tile_width = 128;
@@ -2729,7 +2728,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
2729 pitch_val = obj->stride / tile_width; 2728 pitch_val = obj->stride / tile_width;
2730 pitch_val = ffs(pitch_val) - 1; 2729 pitch_val = ffs(pitch_val) - 1;
2731 2730
2732 val = obj->gtt_offset; 2731 val = i915_gem_obj_ggtt_offset(obj);
2733 if (obj->tiling_mode == I915_TILING_Y) 2732 if (obj->tiling_mode == I915_TILING_Y)
2734 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2733 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2735 val |= I915_FENCE_SIZE_BITS(size); 2734 val |= I915_FENCE_SIZE_BITS(size);
@@ -2754,19 +2753,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
2754 uint32_t val; 2753 uint32_t val;
2755 2754
2756 if (obj) { 2755 if (obj) {
2757 u32 size = obj->gtt_space->size; 2756 u32 size = i915_gem_obj_ggtt_size(obj);
2758 uint32_t pitch_val; 2757 uint32_t pitch_val;
2759 2758
2760 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || 2759 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
2761 (size & -size) != size || 2760 (size & -size) != size ||
2762 (obj->gtt_offset & (size - 1)), 2761 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2763 "object 0x%08x not 512K or pot-size 0x%08x aligned\n", 2762 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2764 obj->gtt_offset, size); 2763 i915_gem_obj_ggtt_offset(obj), size);
2765 2764
2766 pitch_val = obj->stride / 128; 2765 pitch_val = obj->stride / 128;
2767 pitch_val = ffs(pitch_val) - 1; 2766 pitch_val = ffs(pitch_val) - 1;
2768 2767
2769 val = obj->gtt_offset; 2768 val = i915_gem_obj_ggtt_offset(obj);
2770 if (obj->tiling_mode == I915_TILING_Y) 2769 if (obj->tiling_mode == I915_TILING_Y)
2771 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2770 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2772 val |= I830_FENCE_SIZE_BITS(size); 2771 val |= I830_FENCE_SIZE_BITS(size);
@@ -2983,7 +2982,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2983 if (HAS_LLC(dev)) 2982 if (HAS_LLC(dev))
2984 return true; 2983 return true;
2985 2984
2986 if (gtt_space == NULL) 2985 if (!drm_mm_node_allocated(gtt_space))
2987 return true; 2986 return true;
2988 2987
2989 if (list_empty(&gtt_space->node_list)) 2988 if (list_empty(&gtt_space->node_list))
@@ -3016,8 +3015,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
3016 3015
3017 if (obj->cache_level != obj->gtt_space->color) { 3016 if (obj->cache_level != obj->gtt_space->color) {
3018 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", 3017 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3019 obj->gtt_space->start, 3018 i915_gem_obj_ggtt_offset(obj),
3020 obj->gtt_space->start + obj->gtt_space->size, 3019 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3021 obj->cache_level, 3020 obj->cache_level,
3022 obj->gtt_space->color); 3021 obj->gtt_space->color);
3023 err++; 3022 err++;
@@ -3028,8 +3027,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
3028 obj->gtt_space, 3027 obj->gtt_space,
3029 obj->cache_level)) { 3028 obj->cache_level)) {
3030 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", 3029 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3031 obj->gtt_space->start, 3030 i915_gem_obj_ggtt_offset(obj),
3032 obj->gtt_space->start + obj->gtt_space->size, 3031 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3033 obj->cache_level); 3032 obj->cache_level);
3034 err++; 3033 err++;
3035 continue; 3034 continue;
@@ -3051,7 +3050,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
3051{ 3050{
3052 struct drm_device *dev = obj->base.dev; 3051 struct drm_device *dev = obj->base.dev;
3053 drm_i915_private_t *dev_priv = dev->dev_private; 3052 drm_i915_private_t *dev_priv = dev->dev_private;
3054 struct drm_mm_node *node;
3055 u32 size, fence_size, fence_alignment, unfenced_alignment; 3053 u32 size, fence_size, fence_alignment, unfenced_alignment;
3056 bool mappable, fenceable; 3054 bool mappable, fenceable;
3057 size_t gtt_max = map_and_fenceable ? 3055 size_t gtt_max = map_and_fenceable ?
@@ -3096,14 +3094,9 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
3096 3094
3097 i915_gem_object_pin_pages(obj); 3095 i915_gem_object_pin_pages(obj);
3098 3096
3099 node = kzalloc(sizeof(*node), GFP_KERNEL);
3100 if (node == NULL) {
3101 i915_gem_object_unpin_pages(obj);
3102 return -ENOMEM;
3103 }
3104
3105search_free: 3097search_free:
3106 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, 3098 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space,
3099 &obj->gtt_space,
3107 size, alignment, 3100 size, alignment,
3108 obj->cache_level, 0, gtt_max); 3101 obj->cache_level, 0, gtt_max);
3109 if (ret) { 3102 if (ret) {
@@ -3115,34 +3108,31 @@ search_free:
3115 goto search_free; 3108 goto search_free;
3116 3109
3117 i915_gem_object_unpin_pages(obj); 3110 i915_gem_object_unpin_pages(obj);
3118 kfree(node);
3119 return ret; 3111 return ret;
3120 } 3112 }
3121 if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) { 3113 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &obj->gtt_space,
3114 obj->cache_level))) {
3122 i915_gem_object_unpin_pages(obj); 3115 i915_gem_object_unpin_pages(obj);
3123 drm_mm_put_block(node); 3116 drm_mm_remove_node(&obj->gtt_space);
3124 return -EINVAL; 3117 return -EINVAL;
3125 } 3118 }
3126 3119
3127 ret = i915_gem_gtt_prepare_object(obj); 3120 ret = i915_gem_gtt_prepare_object(obj);
3128 if (ret) { 3121 if (ret) {
3129 i915_gem_object_unpin_pages(obj); 3122 i915_gem_object_unpin_pages(obj);
3130 drm_mm_put_block(node); 3123 drm_mm_remove_node(&obj->gtt_space);
3131 return ret; 3124 return ret;
3132 } 3125 }
3133 3126
3134 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); 3127 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3135 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 3128 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3136 3129
3137 obj->gtt_space = node;
3138 obj->gtt_offset = node->start;
3139
3140 fenceable = 3130 fenceable =
3141 node->size == fence_size && 3131 i915_gem_obj_ggtt_size(obj) == fence_size &&
3142 (node->start & (fence_alignment - 1)) == 0; 3132 (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
3143 3133
3144 mappable = 3134 mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
3145 obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end; 3135 dev_priv->gtt.mappable_end;
3146 3136
3147 obj->map_and_fenceable = mappable && fenceable; 3137 obj->map_and_fenceable = mappable && fenceable;
3148 3138
@@ -3244,7 +3234,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3244 int ret; 3234 int ret;
3245 3235
3246 /* Not valid to be called on unbound objects. */ 3236 /* Not valid to be called on unbound objects. */
3247 if (obj->gtt_space == NULL) 3237 if (!i915_gem_obj_ggtt_bound(obj))
3248 return -EINVAL; 3238 return -EINVAL;
3249 3239
3250 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 3240 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3303,13 +3293,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3303 return -EBUSY; 3293 return -EBUSY;
3304 } 3294 }
3305 3295
3306 if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) { 3296 if (!i915_gem_valid_gtt_space(dev, &obj->gtt_space, cache_level)) {
3307 ret = i915_gem_object_unbind(obj); 3297 ret = i915_gem_object_unbind(obj);
3308 if (ret) 3298 if (ret)
3309 return ret; 3299 return ret;
3310 } 3300 }
3311 3301
3312 if (obj->gtt_space) { 3302 if (i915_gem_obj_ggtt_bound(obj)) {
3313 ret = i915_gem_object_finish_gpu(obj); 3303 ret = i915_gem_object_finish_gpu(obj);
3314 if (ret) 3304 if (ret)
3315 return ret; 3305 return ret;
@@ -3332,7 +3322,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3332 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, 3322 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3333 obj, cache_level); 3323 obj, cache_level);
3334 3324
3335 obj->gtt_space->color = cache_level; 3325 i915_gem_obj_ggtt_set_color(obj, cache_level);
3336 } 3326 }
3337 3327
3338 if (cache_level == I915_CACHE_NONE) { 3328 if (cache_level == I915_CACHE_NONE) {
@@ -3613,14 +3603,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3613 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 3603 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3614 return -EBUSY; 3604 return -EBUSY;
3615 3605
3616 if (obj->gtt_space != NULL) { 3606 if (i915_gem_obj_ggtt_bound(obj)) {
3617 if ((alignment && obj->gtt_offset & (alignment - 1)) || 3607 if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) ||
3618 (map_and_fenceable && !obj->map_and_fenceable)) { 3608 (map_and_fenceable && !obj->map_and_fenceable)) {
3619 WARN(obj->pin_count, 3609 WARN(obj->pin_count,
3620 "bo is already pinned with incorrect alignment:" 3610 "bo is already pinned with incorrect alignment:"
3621 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," 3611 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3622 " obj->map_and_fenceable=%d\n", 3612 " obj->map_and_fenceable=%d\n",
3623 obj->gtt_offset, alignment, 3613 i915_gem_obj_ggtt_offset(obj), alignment,
3624 map_and_fenceable, 3614 map_and_fenceable,
3625 obj->map_and_fenceable); 3615 obj->map_and_fenceable);
3626 ret = i915_gem_object_unbind(obj); 3616 ret = i915_gem_object_unbind(obj);
@@ -3629,7 +3619,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3629 } 3619 }
3630 } 3620 }
3631 3621
3632 if (obj->gtt_space == NULL) { 3622 if (!i915_gem_obj_ggtt_bound(obj)) {
3633 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3623 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3634 3624
3635 ret = i915_gem_object_bind_to_gtt(obj, alignment, 3625 ret = i915_gem_object_bind_to_gtt(obj, alignment,
@@ -3655,7 +3645,7 @@ void
3655i915_gem_object_unpin(struct drm_i915_gem_object *obj) 3645i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3656{ 3646{
3657 BUG_ON(obj->pin_count == 0); 3647 BUG_ON(obj->pin_count == 0);
3658 BUG_ON(obj->gtt_space == NULL); 3648 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
3659 3649
3660 if (--obj->pin_count == 0) 3650 if (--obj->pin_count == 0)
3661 obj->pin_mappable = false; 3651 obj->pin_mappable = false;
@@ -3705,7 +3695,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3705 * as the X server doesn't manage domains yet 3695 * as the X server doesn't manage domains yet
3706 */ 3696 */
3707 i915_gem_object_flush_cpu_write_domain(obj); 3697 i915_gem_object_flush_cpu_write_domain(obj);
3708 args->offset = obj->gtt_offset; 3698 args->offset = i915_gem_obj_ggtt_offset(obj);
3709out: 3699out:
3710 drm_gem_object_unreference(&obj->base); 3700 drm_gem_object_unreference(&obj->base);
3711unlock: 3701unlock:
@@ -3974,9 +3964,7 @@ i915_gem_idle(struct drm_device *dev)
3974 drm_i915_private_t *dev_priv = dev->dev_private; 3964 drm_i915_private_t *dev_priv = dev->dev_private;
3975 int ret; 3965 int ret;
3976 3966
3977 mutex_lock(&dev->struct_mutex); 3967 if (dev_priv->ums.mm_suspended) {
3978
3979 if (dev_priv->mm.suspended) {
3980 mutex_unlock(&dev->struct_mutex); 3968 mutex_unlock(&dev->struct_mutex);
3981 return 0; 3969 return 0;
3982 } 3970 }
@@ -3992,18 +3980,11 @@ i915_gem_idle(struct drm_device *dev)
3992 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3980 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3993 i915_gem_evict_everything(dev); 3981 i915_gem_evict_everything(dev);
3994 3982
3995 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3996 * We need to replace this with a semaphore, or something.
3997 * And not confound mm.suspended!
3998 */
3999 dev_priv->mm.suspended = 1;
4000 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); 3983 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4001 3984
4002 i915_kernel_lost_context(dev); 3985 i915_kernel_lost_context(dev);
4003 i915_gem_cleanup_ringbuffer(dev); 3986 i915_gem_cleanup_ringbuffer(dev);
4004 3987
4005 mutex_unlock(&dev->struct_mutex);
4006
4007 /* Cancel the retire work handler, which should be idle now. */ 3988 /* Cancel the retire work handler, which should be idle now. */
4008 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 3989 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4009 3990
@@ -4213,7 +4194,7 @@ int
4213i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 4194i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4214 struct drm_file *file_priv) 4195 struct drm_file *file_priv)
4215{ 4196{
4216 drm_i915_private_t *dev_priv = dev->dev_private; 4197 struct drm_i915_private *dev_priv = dev->dev_private;
4217 int ret; 4198 int ret;
4218 4199
4219 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4200 if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -4225,7 +4206,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4225 } 4206 }
4226 4207
4227 mutex_lock(&dev->struct_mutex); 4208 mutex_lock(&dev->struct_mutex);
4228 dev_priv->mm.suspended = 0; 4209 dev_priv->ums.mm_suspended = 0;
4229 4210
4230 ret = i915_gem_init_hw(dev); 4211 ret = i915_gem_init_hw(dev);
4231 if (ret != 0) { 4212 if (ret != 0) {
@@ -4245,7 +4226,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4245cleanup_ringbuffer: 4226cleanup_ringbuffer:
4246 mutex_lock(&dev->struct_mutex); 4227 mutex_lock(&dev->struct_mutex);
4247 i915_gem_cleanup_ringbuffer(dev); 4228 i915_gem_cleanup_ringbuffer(dev);
4248 dev_priv->mm.suspended = 1; 4229 dev_priv->ums.mm_suspended = 1;
4249 mutex_unlock(&dev->struct_mutex); 4230 mutex_unlock(&dev->struct_mutex);
4250 4231
4251 return ret; 4232 return ret;
@@ -4255,11 +4236,26 @@ int
4255i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 4236i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4256 struct drm_file *file_priv) 4237 struct drm_file *file_priv)
4257{ 4238{
4239 struct drm_i915_private *dev_priv = dev->dev_private;
4240 int ret;
4241
4258 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4242 if (drm_core_check_feature(dev, DRIVER_MODESET))
4259 return 0; 4243 return 0;
4260 4244
4261 drm_irq_uninstall(dev); 4245 drm_irq_uninstall(dev);
4262 return i915_gem_idle(dev); 4246
4247 mutex_lock(&dev->struct_mutex);
4248 ret = i915_gem_idle(dev);
4249
4250 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4251 * We need to replace this with a semaphore, or something.
4252 * And not confound ums.mm_suspended!
4253 */
4254 if (ret != 0)
4255 dev_priv->ums.mm_suspended = 1;
4256 mutex_unlock(&dev->struct_mutex);
4257
4258 return ret;
4263} 4259}
4264 4260
4265void 4261void
@@ -4270,9 +4266,11 @@ i915_gem_lastclose(struct drm_device *dev)
4270 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4266 if (drm_core_check_feature(dev, DRIVER_MODESET))
4271 return; 4267 return;
4272 4268
4269 mutex_lock(&dev->struct_mutex);
4273 ret = i915_gem_idle(dev); 4270 ret = i915_gem_idle(dev);
4274 if (ret) 4271 if (ret)
4275 DRM_ERROR("failed to idle hardware: %d\n", ret); 4272 DRM_ERROR("failed to idle hardware: %d\n", ret);
4273 mutex_unlock(&dev->struct_mutex);
4276} 4274}
4277 4275
4278static void 4276static void
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 51b7a2171cae..2074544682cf 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -377,7 +377,7 @@ mi_set_context(struct intel_ring_buffer *ring,
377 377
378 intel_ring_emit(ring, MI_NOOP); 378 intel_ring_emit(ring, MI_NOOP);
379 intel_ring_emit(ring, MI_SET_CONTEXT); 379 intel_ring_emit(ring, MI_SET_CONTEXT);
380 intel_ring_emit(ring, new_context->obj->gtt_offset | 380 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
381 MI_MM_SPACE_GTT | 381 MI_MM_SPACE_GTT |
382 MI_SAVE_EXT_STATE_EN | 382 MI_SAVE_EXT_STATE_EN |
383 MI_RESTORE_EXT_STATE_EN | 383 MI_RESTORE_EXT_STATE_EN |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index c86d5d9356fd..5f8afc48bb7e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -38,7 +38,7 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
38 return false; 38 return false;
39 39
40 list_add(&obj->exec_list, unwind); 40 list_add(&obj->exec_list, unwind);
41 return drm_mm_scan_add_block(obj->gtt_space); 41 return drm_mm_scan_add_block(&obj->gtt_space);
42} 42}
43 43
44int 44int
@@ -107,7 +107,7 @@ none:
107 struct drm_i915_gem_object, 107 struct drm_i915_gem_object,
108 exec_list); 108 exec_list);
109 109
110 ret = drm_mm_scan_remove_block(obj->gtt_space); 110 ret = drm_mm_scan_remove_block(&obj->gtt_space);
111 BUG_ON(ret); 111 BUG_ON(ret);
112 112
113 list_del_init(&obj->exec_list); 113 list_del_init(&obj->exec_list);
@@ -127,7 +127,7 @@ found:
127 obj = list_first_entry(&unwind_list, 127 obj = list_first_entry(&unwind_list,
128 struct drm_i915_gem_object, 128 struct drm_i915_gem_object,
129 exec_list); 129 exec_list);
130 if (drm_mm_scan_remove_block(obj->gtt_space)) { 130 if (drm_mm_scan_remove_block(&obj->gtt_space)) {
131 list_move(&obj->exec_list, &eviction_list); 131 list_move(&obj->exec_list, &eviction_list);
132 drm_gem_object_reference(&obj->base); 132 drm_gem_object_reference(&obj->base);
133 continue; 133 continue;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 87a3227e5179..64eda4463b70 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -188,7 +188,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
188 return -ENOENT; 188 return -ENOENT;
189 189
190 target_i915_obj = to_intel_bo(target_obj); 190 target_i915_obj = to_intel_bo(target_obj);
191 target_offset = target_i915_obj->gtt_offset; 191 target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
192 192
193 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and 193 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
194 * pipe_control writes because the gpu doesn't properly redirect them 194 * pipe_control writes because the gpu doesn't properly redirect them
@@ -280,7 +280,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
280 return ret; 280 return ret;
281 281
282 /* Map the page containing the relocation we're going to perform. */ 282 /* Map the page containing the relocation we're going to perform. */
283 reloc->offset += obj->gtt_offset; 283 reloc->offset += i915_gem_obj_ggtt_offset(obj);
284 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 284 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
285 reloc->offset & PAGE_MASK); 285 reloc->offset & PAGE_MASK);
286 reloc_entry = (uint32_t __iomem *) 286 reloc_entry = (uint32_t __iomem *)
@@ -436,8 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
436 obj->has_aliasing_ppgtt_mapping = 1; 436 obj->has_aliasing_ppgtt_mapping = 1;
437 } 437 }
438 438
439 if (entry->offset != obj->gtt_offset) { 439 if (entry->offset != i915_gem_obj_ggtt_offset(obj)) {
440 entry->offset = obj->gtt_offset; 440 entry->offset = i915_gem_obj_ggtt_offset(obj);
441 *need_reloc = true; 441 *need_reloc = true;
442 } 442 }
443 443
@@ -458,7 +458,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
458{ 458{
459 struct drm_i915_gem_exec_object2 *entry; 459 struct drm_i915_gem_exec_object2 *entry;
460 460
461 if (!obj->gtt_space) 461 if (!i915_gem_obj_ggtt_bound(obj))
462 return; 462 return;
463 463
464 entry = obj->exec_entry; 464 entry = obj->exec_entry;
@@ -530,7 +530,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
530 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 530 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
531 bool need_fence, need_mappable; 531 bool need_fence, need_mappable;
532 532
533 if (!obj->gtt_space) 533 if (!i915_gem_obj_ggtt_bound(obj))
534 continue; 534 continue;
535 535
536 need_fence = 536 need_fence =
@@ -539,7 +539,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
539 obj->tiling_mode != I915_TILING_NONE; 539 obj->tiling_mode != I915_TILING_NONE;
540 need_mappable = need_fence || need_reloc_mappable(obj); 540 need_mappable = need_fence || need_reloc_mappable(obj);
541 541
542 if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || 542 if ((entry->alignment &&
543 i915_gem_obj_ggtt_offset(obj) & (entry->alignment - 1)) ||
543 (need_mappable && !obj->map_and_fenceable)) 544 (need_mappable && !obj->map_and_fenceable))
544 ret = i915_gem_object_unbind(obj); 545 ret = i915_gem_object_unbind(obj);
545 else 546 else
@@ -550,7 +551,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
550 551
551 /* Bind fresh objects */ 552 /* Bind fresh objects */
552 list_for_each_entry(obj, objects, exec_list) { 553 list_for_each_entry(obj, objects, exec_list) {
553 if (obj->gtt_space) 554 if (i915_gem_obj_ggtt_bound(obj))
554 continue; 555 continue;
555 556
556 ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); 557 ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
@@ -972,7 +973,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
972 if (ret) 973 if (ret)
973 goto pre_mutex_err; 974 goto pre_mutex_err;
974 975
975 if (dev_priv->mm.suspended) { 976 if (dev_priv->ums.mm_suspended) {
976 mutex_unlock(&dev->struct_mutex); 977 mutex_unlock(&dev->struct_mutex);
977 ret = -EBUSY; 978 ret = -EBUSY;
978 goto pre_mutex_err; 979 goto pre_mutex_err;
@@ -1058,7 +1059,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1058 goto err; 1059 goto err;
1059 } 1060 }
1060 1061
1061 exec_start = batch_obj->gtt_offset + args->batch_start_offset; 1062 exec_start = i915_gem_obj_ggtt_offset(batch_obj) + args->batch_start_offset;
1062 exec_len = args->batch_len; 1063 exec_len = args->batch_len;
1063 if (cliprects) { 1064 if (cliprects) {
1064 for (i = 0; i < args->num_cliprects; i++) { 1065 for (i = 0; i < args->num_cliprects; i++) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5101ab6869b4..242d0f9bb9e4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,6 +28,9 @@
28#include "i915_trace.h" 28#include "i915_trace.h"
29#include "intel_drv.h" 29#include "intel_drv.h"
30 30
31#define GEN6_PPGTT_PD_ENTRIES 512
32#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
33
31/* PPGTT stuff */ 34/* PPGTT stuff */
32#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) 35#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
33 36
@@ -42,8 +45,7 @@
42#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) 45#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
43#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 46#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
44 47
45static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev, 48static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr,
46 dma_addr_t addr,
47 enum i915_cache_level level) 49 enum i915_cache_level level)
48{ 50{
49 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 51 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
@@ -69,8 +71,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
69#define BYT_PTE_WRITEABLE (1 << 1) 71#define BYT_PTE_WRITEABLE (1 << 1)
70#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) 72#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
71 73
72static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev, 74static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
73 dma_addr_t addr,
74 enum i915_cache_level level) 75 enum i915_cache_level level)
75{ 76{
76 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 77 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
@@ -87,8 +88,7 @@ static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
87 return pte; 88 return pte;
88} 89}
89 90
90static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev, 91static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
91 dma_addr_t addr,
92 enum i915_cache_level level) 92 enum i915_cache_level level)
93{ 93{
94 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 94 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
@@ -185,13 +185,13 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
185 unsigned first_entry, 185 unsigned first_entry,
186 unsigned num_entries) 186 unsigned num_entries)
187{ 187{
188 struct drm_i915_private *dev_priv = ppgtt->dev->dev_private;
188 gen6_gtt_pte_t *pt_vaddr, scratch_pte; 189 gen6_gtt_pte_t *pt_vaddr, scratch_pte;
189 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; 190 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
190 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 191 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
191 unsigned last_pte, i; 192 unsigned last_pte, i;
192 193
193 scratch_pte = ppgtt->pte_encode(ppgtt->dev, 194 scratch_pte = ppgtt->pte_encode(dev_priv->gtt.scratch.addr,
194 ppgtt->scratch_page_dma_addr,
195 I915_CACHE_LLC); 195 I915_CACHE_LLC);
196 196
197 while (num_entries) { 197 while (num_entries) {
@@ -227,8 +227,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
227 dma_addr_t page_addr; 227 dma_addr_t page_addr;
228 228
229 page_addr = sg_page_iter_dma_address(&sg_iter); 229 page_addr = sg_page_iter_dma_address(&sg_iter);
230 pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr, 230 pt_vaddr[act_pte] = ppgtt->pte_encode(page_addr, cache_level);
231 cache_level);
232 if (++act_pte == I915_PPGTT_PT_ENTRIES) { 231 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
233 kunmap_atomic(pt_vaddr); 232 kunmap_atomic(pt_vaddr);
234 act_pt++; 233 act_pt++;
@@ -278,7 +277,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
278 } else { 277 } else {
279 ppgtt->pte_encode = gen6_pte_encode; 278 ppgtt->pte_encode = gen6_pte_encode;
280 } 279 }
281 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; 280 ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
282 ppgtt->enable = gen6_ppgtt_enable; 281 ppgtt->enable = gen6_ppgtt_enable;
283 ppgtt->clear_range = gen6_ppgtt_clear_range; 282 ppgtt->clear_range = gen6_ppgtt_clear_range;
284 ppgtt->insert_entries = gen6_ppgtt_insert_entries; 283 ppgtt->insert_entries = gen6_ppgtt_insert_entries;
@@ -348,7 +347,6 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
348 return -ENOMEM; 347 return -ENOMEM;
349 348
350 ppgtt->dev = dev; 349 ppgtt->dev = dev;
351 ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
352 350
353 if (INTEL_INFO(dev)->gen < 8) 351 if (INTEL_INFO(dev)->gen < 8)
354 ret = gen6_ppgtt_init(ppgtt); 352 ret = gen6_ppgtt_init(ppgtt);
@@ -380,7 +378,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
380 enum i915_cache_level cache_level) 378 enum i915_cache_level cache_level)
381{ 379{
382 ppgtt->insert_entries(ppgtt, obj->pages, 380 ppgtt->insert_entries(ppgtt, obj->pages,
383 obj->gtt_space->start >> PAGE_SHIFT, 381 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
384 cache_level); 382 cache_level);
385} 383}
386 384
@@ -388,7 +386,7 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
388 struct drm_i915_gem_object *obj) 386 struct drm_i915_gem_object *obj)
389{ 387{
390 ppgtt->clear_range(ppgtt, 388 ppgtt->clear_range(ppgtt,
391 obj->gtt_space->start >> PAGE_SHIFT, 389 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
392 obj->base.size >> PAGE_SHIFT); 390 obj->base.size >> PAGE_SHIFT);
393} 391}
394 392
@@ -480,7 +478,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
480 478
481 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 479 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
482 addr = sg_page_iter_dma_address(&sg_iter); 480 addr = sg_page_iter_dma_address(&sg_iter);
483 iowrite32(dev_priv->gtt.pte_encode(dev, addr, level), 481 iowrite32(dev_priv->gtt.pte_encode(addr, level),
484 &gtt_entries[i]); 482 &gtt_entries[i]);
485 i++; 483 i++;
486 } 484 }
@@ -493,7 +491,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
493 */ 491 */
494 if (i != 0) 492 if (i != 0)
495 WARN_ON(readl(&gtt_entries[i-1]) 493 WARN_ON(readl(&gtt_entries[i-1])
496 != dev_priv->gtt.pte_encode(dev, addr, level)); 494 != dev_priv->gtt.pte_encode(addr, level));
497 495
498 /* This next bit makes the above posting read even more important. We 496 /* This next bit makes the above posting read even more important. We
499 * want to flush the TLBs only after we're certain all the PTE updates 497 * want to flush the TLBs only after we're certain all the PTE updates
@@ -518,8 +516,7 @@ static void gen6_ggtt_clear_range(struct drm_device *dev,
518 first_entry, num_entries, max_entries)) 516 first_entry, num_entries, max_entries))
519 num_entries = max_entries; 517 num_entries = max_entries;
520 518
521 scratch_pte = dev_priv->gtt.pte_encode(dev, 519 scratch_pte = dev_priv->gtt.pte_encode(dev_priv->gtt.scratch.addr,
522 dev_priv->gtt.scratch_page_dma,
523 I915_CACHE_LLC); 520 I915_CACHE_LLC);
524 for (i = 0; i < num_entries; i++) 521 for (i = 0; i < num_entries; i++)
525 iowrite32(scratch_pte, &gtt_base[i]); 522 iowrite32(scratch_pte, &gtt_base[i]);
@@ -554,7 +551,7 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
554 struct drm_i915_private *dev_priv = dev->dev_private; 551 struct drm_i915_private *dev_priv = dev->dev_private;
555 552
556 dev_priv->gtt.gtt_insert_entries(dev, obj->pages, 553 dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
557 obj->gtt_space->start >> PAGE_SHIFT, 554 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
558 cache_level); 555 cache_level);
559 556
560 obj->has_global_gtt_mapping = 1; 557 obj->has_global_gtt_mapping = 1;
@@ -566,7 +563,7 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
566 struct drm_i915_private *dev_priv = dev->dev_private; 563 struct drm_i915_private *dev_priv = dev->dev_private;
567 564
568 dev_priv->gtt.gtt_clear_range(obj->base.dev, 565 dev_priv->gtt.gtt_clear_range(obj->base.dev,
569 obj->gtt_space->start >> PAGE_SHIFT, 566 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
570 obj->base.size >> PAGE_SHIFT); 567 obj->base.size >> PAGE_SHIFT);
571 568
572 obj->has_global_gtt_mapping = 0; 569 obj->has_global_gtt_mapping = 0;
@@ -632,14 +629,15 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
632 629
633 /* Mark any preallocated objects as occupied */ 630 /* Mark any preallocated objects as occupied */
634 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 631 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
635 DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n", 632 int ret;
636 obj->gtt_offset, obj->base.size); 633 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
637 634 i915_gem_obj_ggtt_offset(obj), obj->base.size);
638 BUG_ON(obj->gtt_space != I915_GTT_RESERVED); 635
639 obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, 636 WARN_ON(i915_gem_obj_ggtt_bound(obj));
640 obj->gtt_offset, 637 ret = drm_mm_reserve_node(&dev_priv->mm.gtt_space,
641 obj->base.size, 638 &obj->gtt_space);
642 false); 639 if (ret)
640 DRM_DEBUG_KMS("Reservation failed\n");
643 obj->has_global_gtt_mapping = 1; 641 obj->has_global_gtt_mapping = 1;
644 } 642 }
645 643
@@ -688,7 +686,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
688 if (INTEL_INFO(dev)->gen <= 7) { 686 if (INTEL_INFO(dev)->gen <= 7) {
689 /* PPGTT pdes are stolen from global gtt ptes, so shrink the 687 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
690 * aperture accordingly when using aliasing ppgtt. */ 688 * aperture accordingly when using aliasing ppgtt. */
691 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; 689 gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
692 } 690 }
693 691
694 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 692 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
@@ -699,7 +697,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
699 697
700 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); 698 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
701 drm_mm_takedown(&dev_priv->mm.gtt_space); 699 drm_mm_takedown(&dev_priv->mm.gtt_space);
702 gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE; 700 gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
703 } 701 }
704 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 702 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
705} 703}
@@ -724,8 +722,8 @@ static int setup_scratch_page(struct drm_device *dev)
724#else 722#else
725 dma_addr = page_to_phys(page); 723 dma_addr = page_to_phys(page);
726#endif 724#endif
727 dev_priv->gtt.scratch_page = page; 725 dev_priv->gtt.scratch.page = page;
728 dev_priv->gtt.scratch_page_dma = dma_addr; 726 dev_priv->gtt.scratch.addr = dma_addr;
729 727
730 return 0; 728 return 0;
731} 729}
@@ -733,11 +731,11 @@ static int setup_scratch_page(struct drm_device *dev)
733static void teardown_scratch_page(struct drm_device *dev) 731static void teardown_scratch_page(struct drm_device *dev)
734{ 732{
735 struct drm_i915_private *dev_priv = dev->dev_private; 733 struct drm_i915_private *dev_priv = dev->dev_private;
736 set_pages_wb(dev_priv->gtt.scratch_page, 1); 734 set_pages_wb(dev_priv->gtt.scratch.page, 1);
737 pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma, 735 pci_unmap_page(dev->pdev, dev_priv->gtt.scratch.addr,
738 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 736 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
739 put_page(dev_priv->gtt.scratch_page); 737 put_page(dev_priv->gtt.scratch.page);
740 __free_page(dev_priv->gtt.scratch_page); 738 __free_page(dev_priv->gtt.scratch.page);
741} 739}
742 740
743static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) 741static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -849,34 +847,28 @@ int i915_gem_gtt_init(struct drm_device *dev)
849 int ret; 847 int ret;
850 848
851 if (INTEL_INFO(dev)->gen <= 5) { 849 if (INTEL_INFO(dev)->gen <= 5) {
852 dev_priv->gtt.gtt_probe = i915_gmch_probe; 850 gtt->gtt_probe = i915_gmch_probe;
853 dev_priv->gtt.gtt_remove = i915_gmch_remove; 851 gtt->gtt_remove = i915_gmch_remove;
854 } else { 852 } else {
855 dev_priv->gtt.gtt_probe = gen6_gmch_probe; 853 gtt->gtt_probe = gen6_gmch_probe;
856 dev_priv->gtt.gtt_remove = gen6_gmch_remove; 854 gtt->gtt_remove = gen6_gmch_remove;
857 if (IS_HASWELL(dev)) { 855 if (IS_HASWELL(dev))
858 dev_priv->gtt.pte_encode = hsw_pte_encode; 856 gtt->pte_encode = hsw_pte_encode;
859 } else if (IS_VALLEYVIEW(dev)) { 857 else if (IS_VALLEYVIEW(dev))
860 dev_priv->gtt.pte_encode = byt_pte_encode; 858 gtt->pte_encode = byt_pte_encode;
861 } else { 859 else
862 dev_priv->gtt.pte_encode = gen6_pte_encode; 860 gtt->pte_encode = gen6_pte_encode;
863 }
864 } 861 }
865 862
866 ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total, 863 ret = gtt->gtt_probe(dev, &gtt->total, &gtt->stolen_size,
867 &dev_priv->gtt.stolen_size, 864 &gtt->mappable_base, &gtt->mappable_end);
868 &gtt->mappable_base,
869 &gtt->mappable_end);
870 if (ret) 865 if (ret)
871 return ret; 866 return ret;
872 867
873 /* GMADR is the PCI mmio aperture into the global GTT. */ 868 /* GMADR is the PCI mmio aperture into the global GTT. */
874 DRM_INFO("Memory usable by graphics device = %zdM\n", 869 DRM_INFO("Memory usable by graphics device = %zdM\n", gtt->total >> 20);
875 dev_priv->gtt.total >> 20); 870 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
876 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", 871 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
877 dev_priv->gtt.mappable_end >> 20);
878 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
879 dev_priv->gtt.stolen_size >> 20);
880 872
881 return 0; 873 return 0;
882} 874}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 982d4732cecf..5c1a535d5072 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -46,6 +46,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
46{ 46{
47 struct drm_i915_private *dev_priv = dev->dev_private; 47 struct drm_i915_private *dev_priv = dev->dev_private;
48 struct pci_dev *pdev = dev_priv->bridge_dev; 48 struct pci_dev *pdev = dev_priv->bridge_dev;
49 struct resource *r;
49 u32 base; 50 u32 base;
50 51
51 /* On the machines I have tested the Graphics Base of Stolen Memory 52 /* On the machines I have tested the Graphics Base of Stolen Memory
@@ -88,6 +89,22 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
88#endif 89#endif
89 } 90 }
90 91
92 if (base == 0)
93 return 0;
94
95 /* Verify that nothing else uses this physical address. Stolen
96 * memory should be reserved by the BIOS and hidden from the
97 * kernel. So if the region is already marked as busy, something
98 * is seriously wrong.
99 */
100 r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
101 "Graphics Stolen Memory");
102 if (r == NULL) {
103 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
104 base, base + (uint32_t)dev_priv->gtt.stolen_size);
105 base = 0;
106 }
107
91 return base; 108 return base;
92} 109}
93 110
@@ -120,7 +137,7 @@ static int i915_setup_compression(struct drm_device *dev, int size)
120 if (!compressed_llb) 137 if (!compressed_llb)
121 goto err_fb; 138 goto err_fb;
122 139
123 dev_priv->compressed_llb = compressed_llb; 140 dev_priv->fbc.compressed_llb = compressed_llb;
124 141
125 I915_WRITE(FBC_CFB_BASE, 142 I915_WRITE(FBC_CFB_BASE,
126 dev_priv->mm.stolen_base + compressed_fb->start); 143 dev_priv->mm.stolen_base + compressed_fb->start);
@@ -128,8 +145,8 @@ static int i915_setup_compression(struct drm_device *dev, int size)
128 dev_priv->mm.stolen_base + compressed_llb->start); 145 dev_priv->mm.stolen_base + compressed_llb->start);
129 } 146 }
130 147
131 dev_priv->compressed_fb = compressed_fb; 148 dev_priv->fbc.compressed_fb = compressed_fb;
132 dev_priv->cfb_size = size; 149 dev_priv->fbc.size = size;
133 150
134 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", 151 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
135 size); 152 size);
@@ -150,7 +167,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
150 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 167 if (!drm_mm_initialized(&dev_priv->mm.stolen))
151 return -ENODEV; 168 return -ENODEV;
152 169
153 if (size < dev_priv->cfb_size) 170 if (size < dev_priv->fbc.size)
154 return 0; 171 return 0;
155 172
156 /* Release any current block */ 173 /* Release any current block */
@@ -163,16 +180,16 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
163{ 180{
164 struct drm_i915_private *dev_priv = dev->dev_private; 181 struct drm_i915_private *dev_priv = dev->dev_private;
165 182
166 if (dev_priv->cfb_size == 0) 183 if (dev_priv->fbc.size == 0)
167 return; 184 return;
168 185
169 if (dev_priv->compressed_fb) 186 if (dev_priv->fbc.compressed_fb)
170 drm_mm_put_block(dev_priv->compressed_fb); 187 drm_mm_put_block(dev_priv->fbc.compressed_fb);
171 188
172 if (dev_priv->compressed_llb) 189 if (dev_priv->fbc.compressed_llb)
173 drm_mm_put_block(dev_priv->compressed_llb); 190 drm_mm_put_block(dev_priv->fbc.compressed_llb);
174 191
175 dev_priv->cfb_size = 0; 192 dev_priv->fbc.size = 0;
176} 193}
177 194
178void i915_gem_cleanup_stolen(struct drm_device *dev) 195void i915_gem_cleanup_stolen(struct drm_device *dev)
@@ -201,6 +218,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
201 if (IS_VALLEYVIEW(dev)) 218 if (IS_VALLEYVIEW(dev))
202 bios_reserved = 1024*1024; /* top 1M on VLV/BYT */ 219 bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
203 220
221 if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
222 return 0;
223
204 /* Basic memrange allocator for stolen space */ 224 /* Basic memrange allocator for stolen space */
205 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size - 225 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
206 bios_reserved); 226 bios_reserved);
@@ -333,6 +353,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
333 struct drm_i915_private *dev_priv = dev->dev_private; 353 struct drm_i915_private *dev_priv = dev->dev_private;
334 struct drm_i915_gem_object *obj; 354 struct drm_i915_gem_object *obj;
335 struct drm_mm_node *stolen; 355 struct drm_mm_node *stolen;
356 int ret;
336 357
337 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 358 if (!drm_mm_initialized(&dev_priv->mm.stolen))
338 return NULL; 359 return NULL;
@@ -347,11 +368,16 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
347 if (WARN_ON(size == 0)) 368 if (WARN_ON(size == 0))
348 return NULL; 369 return NULL;
349 370
350 stolen = drm_mm_create_block(&dev_priv->mm.stolen, 371 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
351 stolen_offset, size, 372 if (!stolen)
352 false); 373 return NULL;
353 if (stolen == NULL) { 374
375 stolen->start = stolen_offset;
376 stolen->size = size;
377 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
378 if (ret) {
354 DRM_DEBUG_KMS("failed to allocate stolen space\n"); 379 DRM_DEBUG_KMS("failed to allocate stolen space\n");
380 kfree(stolen);
355 return NULL; 381 return NULL;
356 } 382 }
357 383
@@ -363,7 +389,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
363 } 389 }
364 390
365 /* Some objects just need physical mem from stolen space */ 391 /* Some objects just need physical mem from stolen space */
366 if (gtt_offset == -1) 392 if (gtt_offset == I915_GTT_OFFSET_NONE)
367 return obj; 393 return obj;
368 394
369 /* To simplify the initialisation sequence between KMS and GTT, 395 /* To simplify the initialisation sequence between KMS and GTT,
@@ -371,25 +397,27 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
371 * setting up the GTT space. The actual reservation will occur 397 * setting up the GTT space. The actual reservation will occur
372 * later. 398 * later.
373 */ 399 */
400 obj->gtt_space.start = gtt_offset;
401 obj->gtt_space.size = size;
374 if (drm_mm_initialized(&dev_priv->mm.gtt_space)) { 402 if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
375 obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, 403 ret = drm_mm_reserve_node(&dev_priv->mm.gtt_space,
376 gtt_offset, size, 404 &obj->gtt_space);
377 false); 405 if (ret) {
378 if (obj->gtt_space == NULL) {
379 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); 406 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
380 drm_gem_object_unreference(&obj->base); 407 goto unref_out;
381 return NULL;
382 } 408 }
383 } else 409 }
384 obj->gtt_space = I915_GTT_RESERVED;
385 410
386 obj->gtt_offset = gtt_offset;
387 obj->has_global_gtt_mapping = 1; 411 obj->has_global_gtt_mapping = 1;
388 412
389 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); 413 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
390 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 414 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
391 415
392 return obj; 416 return obj;
417
418unref_out:
419 drm_gem_object_unreference(&obj->base);
420 return NULL;
393} 421}
394 422
395void 423void
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 537545be69db..92a8d279ca39 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
268 return true; 268 return true;
269 269
270 if (INTEL_INFO(obj->base.dev)->gen == 3) { 270 if (INTEL_INFO(obj->base.dev)->gen == 3) {
271 if (obj->gtt_offset & ~I915_FENCE_START_MASK) 271 if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
272 return false; 272 return false;
273 } else { 273 } else {
274 if (obj->gtt_offset & ~I830_FENCE_START_MASK) 274 if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
275 return false; 275 return false;
276 } 276 }
277 277
278 size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); 278 size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
279 if (obj->gtt_space->size != size) 279 if (i915_gem_obj_ggtt_size(obj) != size)
280 return false; 280 return false;
281 281
282 if (obj->gtt_offset & (size - 1)) 282 if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
283 return false; 283 return false;
284 284
285 return true; 285 return true;
@@ -359,8 +359,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
359 */ 359 */
360 360
361 obj->map_and_fenceable = 361 obj->map_and_fenceable =
362 obj->gtt_space == NULL || 362 !i915_gem_obj_ggtt_bound(obj) ||
363 (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end && 363 (i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end &&
364 i915_gem_object_fence_ok(obj, args->tiling_mode)); 364 i915_gem_object_fence_ok(obj, args->tiling_mode));
365 365
366 /* Rebind if we need a change of alignment */ 366 /* Rebind if we need a change of alignment */
@@ -369,7 +369,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
369 i915_gem_get_gtt_alignment(dev, obj->base.size, 369 i915_gem_get_gtt_alignment(dev, obj->base.size,
370 args->tiling_mode, 370 args->tiling_mode,
371 false); 371 false);
372 if (obj->gtt_offset & (unfenced_alignment - 1)) 372 if (i915_gem_obj_ggtt_offset(obj) & (unfenced_alignment - 1))
373 ret = i915_gem_object_unbind(obj); 373 ret = i915_gem_object_unbind(obj);
374 } 374 }
375 375
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
new file mode 100644
index 000000000000..58386cebb865
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -0,0 +1,971 @@
1/*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
30#include <generated/utsrelease.h>
31#include "i915_drv.h"
32
33static const char *yesno(int v)
34{
35 return v ? "yes" : "no";
36}
37
38static const char *ring_str(int ring)
39{
40 switch (ring) {
41 case RCS: return "render";
42 case VCS: return "bsd";
43 case BCS: return "blt";
44 case VECS: return "vebox";
45 default: return "";
46 }
47}
48
49static const char *pin_flag(int pinned)
50{
51 if (pinned > 0)
52 return " P";
53 else if (pinned < 0)
54 return " p";
55 else
56 return "";
57}
58
59static const char *tiling_flag(int tiling)
60{
61 switch (tiling) {
62 default:
63 case I915_TILING_NONE: return "";
64 case I915_TILING_X: return " X";
65 case I915_TILING_Y: return " Y";
66 }
67}
68
69static const char *dirty_flag(int dirty)
70{
71 return dirty ? " dirty" : "";
72}
73
74static const char *purgeable_flag(int purgeable)
75{
76 return purgeable ? " purgeable" : "";
77}
78
79static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
80{
81
82 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
83 e->err = -ENOSPC;
84 return false;
85 }
86
87 if (e->bytes == e->size - 1 || e->err)
88 return false;
89
90 return true;
91}
92
93static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
94 unsigned len)
95{
96 if (e->pos + len <= e->start) {
97 e->pos += len;
98 return false;
99 }
100
101 /* First vsnprintf needs to fit in its entirety for memmove */
102 if (len >= e->size) {
103 e->err = -EIO;
104 return false;
105 }
106
107 return true;
108}
109
110static void __i915_error_advance(struct drm_i915_error_state_buf *e,
111 unsigned len)
112{
113 /* If this is first printf in this window, adjust it so that
114 * start position matches start of the buffer
115 */
116
117 if (e->pos < e->start) {
118 const size_t off = e->start - e->pos;
119
120 /* Should not happen but be paranoid */
121 if (off > len || e->bytes) {
122 e->err = -EIO;
123 return;
124 }
125
126 memmove(e->buf, e->buf + off, len - off);
127 e->bytes = len - off;
128 e->pos = e->start;
129 return;
130 }
131
132 e->bytes += len;
133 e->pos += len;
134}
135
136static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
137 const char *f, va_list args)
138{
139 unsigned len;
140
141 if (!__i915_error_ok(e))
142 return;
143
144 /* Seek the first printf which is hits start position */
145 if (e->pos < e->start) {
146 len = vsnprintf(NULL, 0, f, args);
147 if (!__i915_error_seek(e, len))
148 return;
149 }
150
151 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
152 if (len >= e->size - e->bytes)
153 len = e->size - e->bytes - 1;
154
155 __i915_error_advance(e, len);
156}
157
158static void i915_error_puts(struct drm_i915_error_state_buf *e,
159 const char *str)
160{
161 unsigned len;
162
163 if (!__i915_error_ok(e))
164 return;
165
166 len = strlen(str);
167
168 /* Seek the first printf which is hits start position */
169 if (e->pos < e->start) {
170 if (!__i915_error_seek(e, len))
171 return;
172 }
173
174 if (len >= e->size - e->bytes)
175 len = e->size - e->bytes - 1;
176 memcpy(e->buf + e->bytes, str, len);
177
178 __i915_error_advance(e, len);
179}
180
181#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
182#define err_puts(e, s) i915_error_puts(e, s)
183
184static void print_error_buffers(struct drm_i915_error_state_buf *m,
185 const char *name,
186 struct drm_i915_error_buffer *err,
187 int count)
188{
189 err_printf(m, "%s [%d]:\n", name, count);
190
191 while (count--) {
192 err_printf(m, " %08x %8u %02x %02x %x %x",
193 err->gtt_offset,
194 err->size,
195 err->read_domains,
196 err->write_domain,
197 err->rseqno, err->wseqno);
198 err_puts(m, pin_flag(err->pinned));
199 err_puts(m, tiling_flag(err->tiling));
200 err_puts(m, dirty_flag(err->dirty));
201 err_puts(m, purgeable_flag(err->purgeable));
202 err_puts(m, err->ring != -1 ? " " : "");
203 err_puts(m, ring_str(err->ring));
204 err_puts(m, i915_cache_level_str(err->cache_level));
205
206 if (err->name)
207 err_printf(m, " (name: %d)", err->name);
208 if (err->fence_reg != I915_FENCE_REG_NONE)
209 err_printf(m, " (fence: %d)", err->fence_reg);
210
211 err_puts(m, "\n");
212 err++;
213 }
214}
215
216static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
217 struct drm_device *dev,
218 struct drm_i915_error_state *error,
219 unsigned ring)
220{
221 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
222 err_printf(m, "%s command stream:\n", ring_str(ring));
223 err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
224 err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
225 err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
226 err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
227 err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
228 err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
229 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
230 if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
231 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
232
233 if (INTEL_INFO(dev)->gen >= 4)
234 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
235 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
236 err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
237 if (INTEL_INFO(dev)->gen >= 6) {
238 err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
239 err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
240 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
241 error->semaphore_mboxes[ring][0],
242 error->semaphore_seqno[ring][0]);
243 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
244 error->semaphore_mboxes[ring][1],
245 error->semaphore_seqno[ring][1]);
246 }
247 err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
248 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
249 err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
250 err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
251}
252
253void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
254{
255 va_list args;
256
257 va_start(args, f);
258 i915_error_vprintf(e, f, args);
259 va_end(args);
260}
261
262int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
263 const struct i915_error_state_file_priv *error_priv)
264{
265 struct drm_device *dev = error_priv->dev;
266 drm_i915_private_t *dev_priv = dev->dev_private;
267 struct drm_i915_error_state *error = error_priv->error;
268 struct intel_ring_buffer *ring;
269 int i, j, page, offset, elt;
270
271 if (!error) {
272 err_printf(m, "no error state collected\n");
273 goto out;
274 }
275
276 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
277 error->time.tv_usec);
278 err_printf(m, "Kernel: " UTS_RELEASE "\n");
279 err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
280 err_printf(m, "EIR: 0x%08x\n", error->eir);
281 err_printf(m, "IER: 0x%08x\n", error->ier);
282 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
283 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
284 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
285 err_printf(m, "CCID: 0x%08x\n", error->ccid);
286
287 for (i = 0; i < dev_priv->num_fence_regs; i++)
288 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
289
290 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
291 err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
292 error->extra_instdone[i]);
293
294 if (INTEL_INFO(dev)->gen >= 6) {
295 err_printf(m, "ERROR: 0x%08x\n", error->error);
296 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
297 }
298
299 if (INTEL_INFO(dev)->gen == 7)
300 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
301
302 for_each_ring(ring, dev_priv, i)
303 i915_ring_error_state(m, dev, error, i);
304
305 if (error->active_bo)
306 print_error_buffers(m, "Active",
307 error->active_bo,
308 error->active_bo_count);
309
310 if (error->pinned_bo)
311 print_error_buffers(m, "Pinned",
312 error->pinned_bo,
313 error->pinned_bo_count);
314
315 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
316 struct drm_i915_error_object *obj;
317
318 if ((obj = error->ring[i].batchbuffer)) {
319 err_printf(m, "%s --- gtt_offset = 0x%08x\n",
320 dev_priv->ring[i].name,
321 obj->gtt_offset);
322 offset = 0;
323 for (page = 0; page < obj->page_count; page++) {
324 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
325 err_printf(m, "%08x : %08x\n", offset,
326 obj->pages[page][elt]);
327 offset += 4;
328 }
329 }
330 }
331
332 if (error->ring[i].num_requests) {
333 err_printf(m, "%s --- %d requests\n",
334 dev_priv->ring[i].name,
335 error->ring[i].num_requests);
336 for (j = 0; j < error->ring[i].num_requests; j++) {
337 err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
338 error->ring[i].requests[j].seqno,
339 error->ring[i].requests[j].jiffies,
340 error->ring[i].requests[j].tail);
341 }
342 }
343
344 if ((obj = error->ring[i].ringbuffer)) {
345 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
346 dev_priv->ring[i].name,
347 obj->gtt_offset);
348 offset = 0;
349 for (page = 0; page < obj->page_count; page++) {
350 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
351 err_printf(m, "%08x : %08x\n",
352 offset,
353 obj->pages[page][elt]);
354 offset += 4;
355 }
356 }
357 }
358
359 obj = error->ring[i].ctx;
360 if (obj) {
361 err_printf(m, "%s --- HW Context = 0x%08x\n",
362 dev_priv->ring[i].name,
363 obj->gtt_offset);
364 offset = 0;
365 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
366 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
367 offset,
368 obj->pages[0][elt],
369 obj->pages[0][elt+1],
370 obj->pages[0][elt+2],
371 obj->pages[0][elt+3]);
372 offset += 16;
373 }
374 }
375 }
376
377 if (error->overlay)
378 intel_overlay_print_error_state(m, error->overlay);
379
380 if (error->display)
381 intel_display_print_error_state(m, dev, error->display);
382
383out:
384 if (m->bytes == 0 && m->err)
385 return m->err;
386
387 return 0;
388}
389
390int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
391 size_t count, loff_t pos)
392{
393 memset(ebuf, 0, sizeof(*ebuf));
394
395 /* We need to have enough room to store any i915_error_state printf
396 * so that we can move it to start position.
397 */
398 ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
399 ebuf->buf = kmalloc(ebuf->size,
400 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
401
402 if (ebuf->buf == NULL) {
403 ebuf->size = PAGE_SIZE;
404 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
405 }
406
407 if (ebuf->buf == NULL) {
408 ebuf->size = 128;
409 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
410 }
411
412 if (ebuf->buf == NULL)
413 return -ENOMEM;
414
415 ebuf->start = pos;
416
417 return 0;
418}
419
420static void i915_error_object_free(struct drm_i915_error_object *obj)
421{
422 int page;
423
424 if (obj == NULL)
425 return;
426
427 for (page = 0; page < obj->page_count; page++)
428 kfree(obj->pages[page]);
429
430 kfree(obj);
431}
432
433static void i915_error_state_free(struct kref *error_ref)
434{
435 struct drm_i915_error_state *error = container_of(error_ref,
436 typeof(*error), ref);
437 int i;
438
439 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
440 i915_error_object_free(error->ring[i].batchbuffer);
441 i915_error_object_free(error->ring[i].ringbuffer);
442 i915_error_object_free(error->ring[i].ctx);
443 kfree(error->ring[i].requests);
444 }
445
446 kfree(error->active_bo);
447 kfree(error->overlay);
448 kfree(error->display);
449 kfree(error);
450}
451
452static struct drm_i915_error_object *
453i915_error_object_create_sized(struct drm_i915_private *dev_priv,
454 struct drm_i915_gem_object *src,
455 const int num_pages)
456{
457 struct drm_i915_error_object *dst;
458 int i;
459 u32 reloc_offset;
460
461 if (src == NULL || src->pages == NULL)
462 return NULL;
463
464 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
465 if (dst == NULL)
466 return NULL;
467
468 reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
469 for (i = 0; i < num_pages; i++) {
470 unsigned long flags;
471 void *d;
472
473 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
474 if (d == NULL)
475 goto unwind;
476
477 local_irq_save(flags);
478 if (reloc_offset < dev_priv->gtt.mappable_end &&
479 src->has_global_gtt_mapping) {
480 void __iomem *s;
481
482 /* Simply ignore tiling or any overlapping fence.
483 * It's part of the error state, and this hopefully
484 * captures what the GPU read.
485 */
486
487 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
488 reloc_offset);
489 memcpy_fromio(d, s, PAGE_SIZE);
490 io_mapping_unmap_atomic(s);
491 } else if (src->stolen) {
492 unsigned long offset;
493
494 offset = dev_priv->mm.stolen_base;
495 offset += src->stolen->start;
496 offset += i << PAGE_SHIFT;
497
498 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
499 } else {
500 struct page *page;
501 void *s;
502
503 page = i915_gem_object_get_page(src, i);
504
505 drm_clflush_pages(&page, 1);
506
507 s = kmap_atomic(page);
508 memcpy(d, s, PAGE_SIZE);
509 kunmap_atomic(s);
510
511 drm_clflush_pages(&page, 1);
512 }
513 local_irq_restore(flags);
514
515 dst->pages[i] = d;
516
517 reloc_offset += PAGE_SIZE;
518 }
519 dst->page_count = num_pages;
520
521 return dst;
522
523unwind:
524 while (i--)
525 kfree(dst->pages[i]);
526 kfree(dst);
527 return NULL;
528}
529#define i915_error_object_create(dev_priv, src) \
530 i915_error_object_create_sized((dev_priv), (src), \
531 (src)->base.size>>PAGE_SHIFT)
532
533static void capture_bo(struct drm_i915_error_buffer *err,
534 struct drm_i915_gem_object *obj)
535{
536 err->size = obj->base.size;
537 err->name = obj->base.name;
538 err->rseqno = obj->last_read_seqno;
539 err->wseqno = obj->last_write_seqno;
540 err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
541 err->read_domains = obj->base.read_domains;
542 err->write_domain = obj->base.write_domain;
543 err->fence_reg = obj->fence_reg;
544 err->pinned = 0;
545 if (obj->pin_count > 0)
546 err->pinned = 1;
547 if (obj->user_pin_count > 0)
548 err->pinned = -1;
549 err->tiling = obj->tiling_mode;
550 err->dirty = obj->dirty;
551 err->purgeable = obj->madv != I915_MADV_WILLNEED;
552 err->ring = obj->ring ? obj->ring->id : -1;
553 err->cache_level = obj->cache_level;
554}
555
556static u32 capture_active_bo(struct drm_i915_error_buffer *err,
557 int count, struct list_head *head)
558{
559 struct drm_i915_gem_object *obj;
560 int i = 0;
561
562 list_for_each_entry(obj, head, mm_list) {
563 capture_bo(err++, obj);
564 if (++i == count)
565 break;
566 }
567
568 return i;
569}
570
571static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
572 int count, struct list_head *head)
573{
574 struct drm_i915_gem_object *obj;
575 int i = 0;
576
577 list_for_each_entry(obj, head, global_list) {
578 if (obj->pin_count == 0)
579 continue;
580
581 capture_bo(err++, obj);
582 if (++i == count)
583 break;
584 }
585
586 return i;
587}
588
589static void i915_gem_record_fences(struct drm_device *dev,
590 struct drm_i915_error_state *error)
591{
592 struct drm_i915_private *dev_priv = dev->dev_private;
593 int i;
594
595 /* Fences */
596 switch (INTEL_INFO(dev)->gen) {
597 case 7:
598 case 6:
599 for (i = 0; i < dev_priv->num_fence_regs; i++)
600 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
601 break;
602 case 5:
603 case 4:
604 for (i = 0; i < 16; i++)
605 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
606 break;
607 case 3:
608 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
609 for (i = 0; i < 8; i++)
610 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
611 case 2:
612 for (i = 0; i < 8; i++)
613 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
614 break;
615
616 default:
617 BUG();
618 }
619}
620
621static struct drm_i915_error_object *
622i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
623 struct intel_ring_buffer *ring)
624{
625 struct drm_i915_gem_object *obj;
626 u32 seqno;
627
628 if (!ring->get_seqno)
629 return NULL;
630
631 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
632 u32 acthd = I915_READ(ACTHD);
633
634 if (WARN_ON(ring->id != RCS))
635 return NULL;
636
637 obj = ring->private;
638 if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
639 acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
640 return i915_error_object_create(dev_priv, obj);
641 }
642
643 seqno = ring->get_seqno(ring, false);
644 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
645 if (obj->ring != ring)
646 continue;
647
648 if (i915_seqno_passed(seqno, obj->last_read_seqno))
649 continue;
650
651 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
652 continue;
653
654 /* We need to copy these to an anonymous buffer as the simplest
655 * method to avoid being overwritten by userspace.
656 */
657 return i915_error_object_create(dev_priv, obj);
658 }
659
660 return NULL;
661}
662
663static void i915_record_ring_state(struct drm_device *dev,
664 struct drm_i915_error_state *error,
665 struct intel_ring_buffer *ring)
666{
667 struct drm_i915_private *dev_priv = dev->dev_private;
668
669 if (INTEL_INFO(dev)->gen >= 6) {
670 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
671 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
672 error->semaphore_mboxes[ring->id][0]
673 = I915_READ(RING_SYNC_0(ring->mmio_base));
674 error->semaphore_mboxes[ring->id][1]
675 = I915_READ(RING_SYNC_1(ring->mmio_base));
676 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
677 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
678 }
679
680 if (INTEL_INFO(dev)->gen >= 4) {
681 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
682 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
683 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
684 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
685 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
686 if (ring->id == RCS)
687 error->bbaddr = I915_READ64(BB_ADDR);
688 } else {
689 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
690 error->ipeir[ring->id] = I915_READ(IPEIR);
691 error->ipehr[ring->id] = I915_READ(IPEHR);
692 error->instdone[ring->id] = I915_READ(INSTDONE);
693 }
694
695 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
696 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
697 error->seqno[ring->id] = ring->get_seqno(ring, false);
698 error->acthd[ring->id] = intel_ring_get_active_head(ring);
699 error->head[ring->id] = I915_READ_HEAD(ring);
700 error->tail[ring->id] = I915_READ_TAIL(ring);
701 error->ctl[ring->id] = I915_READ_CTL(ring);
702
703 error->cpu_ring_head[ring->id] = ring->head;
704 error->cpu_ring_tail[ring->id] = ring->tail;
705}
706
707
708static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
709 struct drm_i915_error_state *error,
710 struct drm_i915_error_ring *ering)
711{
712 struct drm_i915_private *dev_priv = ring->dev->dev_private;
713 struct drm_i915_gem_object *obj;
714
715 /* Currently render ring is the only HW context user */
716 if (ring->id != RCS || !error->ccid)
717 return;
718
719 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
720 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
721 ering->ctx = i915_error_object_create_sized(dev_priv,
722 obj, 1);
723 break;
724 }
725 }
726}
727
728static void i915_gem_record_rings(struct drm_device *dev,
729 struct drm_i915_error_state *error)
730{
731 struct drm_i915_private *dev_priv = dev->dev_private;
732 struct intel_ring_buffer *ring;
733 struct drm_i915_gem_request *request;
734 int i, count;
735
736 for_each_ring(ring, dev_priv, i) {
737 i915_record_ring_state(dev, error, ring);
738
739 error->ring[i].batchbuffer =
740 i915_error_first_batchbuffer(dev_priv, ring);
741
742 error->ring[i].ringbuffer =
743 i915_error_object_create(dev_priv, ring->obj);
744
745
746 i915_gem_record_active_context(ring, error, &error->ring[i]);
747
748 count = 0;
749 list_for_each_entry(request, &ring->request_list, list)
750 count++;
751
752 error->ring[i].num_requests = count;
753 error->ring[i].requests =
754 kmalloc(count*sizeof(struct drm_i915_error_request),
755 GFP_ATOMIC);
756 if (error->ring[i].requests == NULL) {
757 error->ring[i].num_requests = 0;
758 continue;
759 }
760
761 count = 0;
762 list_for_each_entry(request, &ring->request_list, list) {
763 struct drm_i915_error_request *erq;
764
765 erq = &error->ring[i].requests[count++];
766 erq->seqno = request->seqno;
767 erq->jiffies = request->emitted_jiffies;
768 erq->tail = request->tail;
769 }
770 }
771}
772
773static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
774 struct drm_i915_error_state *error)
775{
776 struct drm_i915_gem_object *obj;
777 int i;
778
779 i = 0;
780 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
781 i++;
782 error->active_bo_count = i;
783 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
784 if (obj->pin_count)
785 i++;
786 error->pinned_bo_count = i - error->active_bo_count;
787
788 if (i) {
789 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
790 GFP_ATOMIC);
791 if (error->active_bo)
792 error->pinned_bo =
793 error->active_bo + error->active_bo_count;
794 }
795
796 if (error->active_bo)
797 error->active_bo_count =
798 capture_active_bo(error->active_bo,
799 error->active_bo_count,
800 &dev_priv->mm.active_list);
801
802 if (error->pinned_bo)
803 error->pinned_bo_count =
804 capture_pinned_bo(error->pinned_bo,
805 error->pinned_bo_count,
806 &dev_priv->mm.bound_list);
807}
808
809/**
810 * i915_capture_error_state - capture an error record for later analysis
811 * @dev: drm device
812 *
813 * Should be called when an error is detected (either a hang or an error
814 * interrupt) to capture error state from the time of the error. Fills
815 * out a structure which becomes available in debugfs for user level tools
816 * to pick up.
817 */
818void i915_capture_error_state(struct drm_device *dev)
819{
820 struct drm_i915_private *dev_priv = dev->dev_private;
821 struct drm_i915_error_state *error;
822 unsigned long flags;
823 int pipe;
824
825 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
826 error = dev_priv->gpu_error.first_error;
827 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
828 if (error)
829 return;
830
831 /* Account for pipe specific data like PIPE*STAT */
832 error = kzalloc(sizeof(*error), GFP_ATOMIC);
833 if (!error) {
834 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
835 return;
836 }
837
838 DRM_INFO("capturing error event; look for more information in "
839 "/sys/class/drm/card%d/error\n", dev->primary->index);
840
841 kref_init(&error->ref);
842 error->eir = I915_READ(EIR);
843 error->pgtbl_er = I915_READ(PGTBL_ER);
844 if (HAS_HW_CONTEXTS(dev))
845 error->ccid = I915_READ(CCID);
846
847 if (HAS_PCH_SPLIT(dev))
848 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
849 else if (IS_VALLEYVIEW(dev))
850 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
851 else if (IS_GEN2(dev))
852 error->ier = I915_READ16(IER);
853 else
854 error->ier = I915_READ(IER);
855
856 if (INTEL_INFO(dev)->gen >= 6)
857 error->derrmr = I915_READ(DERRMR);
858
859 if (IS_VALLEYVIEW(dev))
860 error->forcewake = I915_READ(FORCEWAKE_VLV);
861 else if (INTEL_INFO(dev)->gen >= 7)
862 error->forcewake = I915_READ(FORCEWAKE_MT);
863 else if (INTEL_INFO(dev)->gen == 6)
864 error->forcewake = I915_READ(FORCEWAKE);
865
866 if (!HAS_PCH_SPLIT(dev))
867 for_each_pipe(pipe)
868 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
869
870 if (INTEL_INFO(dev)->gen >= 6) {
871 error->error = I915_READ(ERROR_GEN6);
872 error->done_reg = I915_READ(DONE_REG);
873 }
874
875 if (INTEL_INFO(dev)->gen == 7)
876 error->err_int = I915_READ(GEN7_ERR_INT);
877
878 i915_get_extra_instdone(dev, error->extra_instdone);
879
880 i915_gem_capture_buffers(dev_priv, error);
881 i915_gem_record_fences(dev, error);
882 i915_gem_record_rings(dev, error);
883
884 do_gettimeofday(&error->time);
885
886 error->overlay = intel_overlay_capture_error_state(dev);
887 error->display = intel_display_capture_error_state(dev);
888
889 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
890 if (dev_priv->gpu_error.first_error == NULL) {
891 dev_priv->gpu_error.first_error = error;
892 error = NULL;
893 }
894 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
895
896 if (error)
897 i915_error_state_free(&error->ref);
898}
899
900void i915_error_state_get(struct drm_device *dev,
901 struct i915_error_state_file_priv *error_priv)
902{
903 struct drm_i915_private *dev_priv = dev->dev_private;
904 unsigned long flags;
905
906 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
907 error_priv->error = dev_priv->gpu_error.first_error;
908 if (error_priv->error)
909 kref_get(&error_priv->error->ref);
910 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
911
912}
913
914void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
915{
916 if (error_priv->error)
917 kref_put(&error_priv->error->ref, i915_error_state_free);
918}
919
920void i915_destroy_error_state(struct drm_device *dev)
921{
922 struct drm_i915_private *dev_priv = dev->dev_private;
923 struct drm_i915_error_state *error;
924 unsigned long flags;
925
926 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
927 error = dev_priv->gpu_error.first_error;
928 dev_priv->gpu_error.first_error = NULL;
929 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
930
931 if (error)
932 kref_put(&error->ref, i915_error_state_free);
933}
934
935const char *i915_cache_level_str(int type)
936{
937 switch (type) {
938 case I915_CACHE_NONE: return " uncached";
939 case I915_CACHE_LLC: return " snooped (LLC)";
940 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
941 default: return "";
942 }
943}
944
945/* NB: please notice the memset */
946void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
947{
948 struct drm_i915_private *dev_priv = dev->dev_private;
949 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
950
951 switch (INTEL_INFO(dev)->gen) {
952 case 2:
953 case 3:
954 instdone[0] = I915_READ(INSTDONE);
955 break;
956 case 4:
957 case 5:
958 case 6:
959 instdone[0] = I915_READ(INSTDONE_I965);
960 instdone[1] = I915_READ(INSTDONE1);
961 break;
962 default:
963 WARN_ONCE(1, "Unsupported platform\n");
964 case 7:
965 instdone[0] = I915_READ(GEN7_INSTDONE_1);
966 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
967 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
968 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
969 break;
970 }
971}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3d92a7cef154..64db680fb7a4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -128,6 +128,8 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
128 enum pipe pipe; 128 enum pipe pipe;
129 struct intel_crtc *crtc; 129 struct intel_crtc *crtc;
130 130
131 assert_spin_locked(&dev_priv->irq_lock);
132
131 for_each_pipe(pipe) { 133 for_each_pipe(pipe) {
132 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 134 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
133 135
@@ -152,38 +154,66 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
152} 154}
153 155
154static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 156static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
155 bool enable) 157 enum pipe pipe, bool enable)
156{ 158{
157 struct drm_i915_private *dev_priv = dev->dev_private; 159 struct drm_i915_private *dev_priv = dev->dev_private;
158
159 if (enable) { 160 if (enable) {
161 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
162
160 if (!ivb_can_enable_err_int(dev)) 163 if (!ivb_can_enable_err_int(dev))
161 return; 164 return;
162 165
163 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
164 ERR_INT_FIFO_UNDERRUN_B |
165 ERR_INT_FIFO_UNDERRUN_C);
166
167 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 166 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
168 } else { 167 } else {
168 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
169
170 /* Change the state _after_ we've read out the current one. */
169 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 171 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
172
173 if (!was_enabled &&
174 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
175 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
176 pipe_name(pipe));
177 }
170 } 178 }
171} 179}
172 180
173static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc, 181/**
182 * ibx_display_interrupt_update - update SDEIMR
183 * @dev_priv: driver private
184 * @interrupt_mask: mask of interrupt bits to update
185 * @enabled_irq_mask: mask of interrupt bits to enable
186 */
187static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
188 uint32_t interrupt_mask,
189 uint32_t enabled_irq_mask)
190{
191 uint32_t sdeimr = I915_READ(SDEIMR);
192 sdeimr &= ~interrupt_mask;
193 sdeimr |= (~enabled_irq_mask & interrupt_mask);
194
195 assert_spin_locked(&dev_priv->irq_lock);
196
197 I915_WRITE(SDEIMR, sdeimr);
198 POSTING_READ(SDEIMR);
199}
200#define ibx_enable_display_interrupt(dev_priv, bits) \
201 ibx_display_interrupt_update((dev_priv), (bits), (bits))
202#define ibx_disable_display_interrupt(dev_priv, bits) \
203 ibx_display_interrupt_update((dev_priv), (bits), 0)
204
205static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
206 enum transcoder pch_transcoder,
174 bool enable) 207 bool enable)
175{ 208{
176 struct drm_device *dev = crtc->base.dev;
177 struct drm_i915_private *dev_priv = dev->dev_private; 209 struct drm_i915_private *dev_priv = dev->dev_private;
178 uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER : 210 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
179 SDE_TRANSB_FIFO_UNDER; 211 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
180 212
181 if (enable) 213 if (enable)
182 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit); 214 ibx_enable_display_interrupt(dev_priv, bit);
183 else 215 else
184 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit); 216 ibx_disable_display_interrupt(dev_priv, bit);
185
186 POSTING_READ(SDEIMR);
187} 217}
188 218
189static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 219static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -193,19 +223,26 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
193 struct drm_i915_private *dev_priv = dev->dev_private; 223 struct drm_i915_private *dev_priv = dev->dev_private;
194 224
195 if (enable) { 225 if (enable) {
226 I915_WRITE(SERR_INT,
227 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
228
196 if (!cpt_can_enable_serr_int(dev)) 229 if (!cpt_can_enable_serr_int(dev))
197 return; 230 return;
198 231
199 I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN | 232 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
200 SERR_INT_TRANS_B_FIFO_UNDERRUN |
201 SERR_INT_TRANS_C_FIFO_UNDERRUN);
202
203 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
204 } else { 233 } else {
205 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT); 234 uint32_t tmp = I915_READ(SERR_INT);
206 } 235 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
207 236
208 POSTING_READ(SDEIMR); 237 /* Change the state _after_ we've read out the current one. */
238 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
239
240 if (!was_enabled &&
241 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
242 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
243 transcoder_name(pch_transcoder));
244 }
245 }
209} 246}
210 247
211/** 248/**
@@ -243,7 +280,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
243 if (IS_GEN5(dev) || IS_GEN6(dev)) 280 if (IS_GEN5(dev) || IS_GEN6(dev))
244 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 281 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
245 else if (IS_GEN7(dev)) 282 else if (IS_GEN7(dev))
246 ivybridge_set_fifo_underrun_reporting(dev, enable); 283 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
247 284
248done: 285done:
249 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 286 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -269,29 +306,19 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
269 bool enable) 306 bool enable)
270{ 307{
271 struct drm_i915_private *dev_priv = dev->dev_private; 308 struct drm_i915_private *dev_priv = dev->dev_private;
272 enum pipe p; 309 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
273 struct drm_crtc *crtc; 310 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
274 struct intel_crtc *intel_crtc;
275 unsigned long flags; 311 unsigned long flags;
276 bool ret; 312 bool ret;
277 313
278 if (HAS_PCH_LPT(dev)) { 314 /*
279 crtc = NULL; 315 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
280 for_each_pipe(p) { 316 * has only one pch transcoder A that all pipes can use. To avoid racy
281 struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p]; 317 * pch transcoder -> pipe lookups from interrupt code simply store the
282 if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) { 318 * underrun statistics in crtc A. Since we never expose this anywhere
283 crtc = c; 319 * nor use it outside of the fifo underrun code here using the "wrong"
284 break; 320 * crtc on LPT won't cause issues.
285 } 321 */
286 }
287 if (!crtc) {
288 DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
289 return false;
290 }
291 } else {
292 crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
293 }
294 intel_crtc = to_intel_crtc(crtc);
295 322
296 spin_lock_irqsave(&dev_priv->irq_lock, flags); 323 spin_lock_irqsave(&dev_priv->irq_lock, flags);
297 324
@@ -303,7 +330,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
303 intel_crtc->pch_fifo_underrun_disabled = !enable; 330 intel_crtc->pch_fifo_underrun_disabled = !enable;
304 331
305 if (HAS_PCH_IBX(dev)) 332 if (HAS_PCH_IBX(dev))
306 ibx_set_fifo_underrun_reporting(intel_crtc, enable); 333 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
307 else 334 else
308 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 335 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
309 336
@@ -319,6 +346,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
319 u32 reg = PIPESTAT(pipe); 346 u32 reg = PIPESTAT(pipe);
320 u32 pipestat = I915_READ(reg) & 0x7fff0000; 347 u32 pipestat = I915_READ(reg) & 0x7fff0000;
321 348
349 assert_spin_locked(&dev_priv->irq_lock);
350
322 if ((pipestat & mask) == mask) 351 if ((pipestat & mask) == mask)
323 return; 352 return;
324 353
@@ -334,6 +363,8 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
334 u32 reg = PIPESTAT(pipe); 363 u32 reg = PIPESTAT(pipe);
335 u32 pipestat = I915_READ(reg) & 0x7fff0000; 364 u32 pipestat = I915_READ(reg) & 0x7fff0000;
336 365
366 assert_spin_locked(&dev_priv->irq_lock);
367
337 if ((pipestat & mask) == 0) 368 if ((pipestat & mask) == 0)
338 return; 369 return;
339 370
@@ -625,14 +656,13 @@ static void i915_hotplug_work_func(struct work_struct *work)
625 drm_kms_helper_hotplug_event(dev); 656 drm_kms_helper_hotplug_event(dev);
626} 657}
627 658
628static void ironlake_handle_rps_change(struct drm_device *dev) 659static void ironlake_rps_change_irq_handler(struct drm_device *dev)
629{ 660{
630 drm_i915_private_t *dev_priv = dev->dev_private; 661 drm_i915_private_t *dev_priv = dev->dev_private;
631 u32 busy_up, busy_down, max_avg, min_avg; 662 u32 busy_up, busy_down, max_avg, min_avg;
632 u8 new_delay; 663 u8 new_delay;
633 unsigned long flags;
634 664
635 spin_lock_irqsave(&mchdev_lock, flags); 665 spin_lock(&mchdev_lock);
636 666
637 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 667 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
638 668
@@ -660,7 +690,7 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
660 if (ironlake_set_drps(dev, new_delay)) 690 if (ironlake_set_drps(dev, new_delay))
661 dev_priv->ips.cur_delay = new_delay; 691 dev_priv->ips.cur_delay = new_delay;
662 692
663 spin_unlock_irqrestore(&mchdev_lock, flags); 693 spin_unlock(&mchdev_lock);
664 694
665 return; 695 return;
666} 696}
@@ -689,13 +719,13 @@ static void gen6_pm_rps_work(struct work_struct *work)
689 u32 pm_iir, pm_imr; 719 u32 pm_iir, pm_imr;
690 u8 new_delay; 720 u8 new_delay;
691 721
692 spin_lock_irq(&dev_priv->rps.lock); 722 spin_lock_irq(&dev_priv->irq_lock);
693 pm_iir = dev_priv->rps.pm_iir; 723 pm_iir = dev_priv->rps.pm_iir;
694 dev_priv->rps.pm_iir = 0; 724 dev_priv->rps.pm_iir = 0;
695 pm_imr = I915_READ(GEN6_PMIMR); 725 pm_imr = I915_READ(GEN6_PMIMR);
696 /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 726 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
697 I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS); 727 I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
698 spin_unlock_irq(&dev_priv->rps.lock); 728 spin_unlock_irq(&dev_priv->irq_lock);
699 729
700 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 730 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
701 return; 731 return;
@@ -804,18 +834,17 @@ static void ivybridge_parity_work(struct work_struct *work)
804 kfree(parity_event[1]); 834 kfree(parity_event[1]);
805} 835}
806 836
807static void ivybridge_handle_parity_error(struct drm_device *dev) 837static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
808{ 838{
809 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 839 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
810 unsigned long flags;
811 840
812 if (!HAS_L3_GPU_CACHE(dev)) 841 if (!HAS_L3_GPU_CACHE(dev))
813 return; 842 return;
814 843
815 spin_lock_irqsave(&dev_priv->irq_lock, flags); 844 spin_lock(&dev_priv->irq_lock);
816 dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 845 dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
817 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 846 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
818 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 847 spin_unlock(&dev_priv->irq_lock);
819 848
820 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 849 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
821} 850}
@@ -841,15 +870,13 @@ static void snb_gt_irq_handler(struct drm_device *dev,
841 } 870 }
842 871
843 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 872 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
844 ivybridge_handle_parity_error(dev); 873 ivybridge_parity_error_irq_handler(dev);
845} 874}
846 875
847/* Legacy way of handling PM interrupts */ 876/* Legacy way of handling PM interrupts */
848static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, 877static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv,
849 u32 pm_iir) 878 u32 pm_iir)
850{ 879{
851 unsigned long flags;
852
853 /* 880 /*
854 * IIR bits should never already be set because IMR should 881 * IIR bits should never already be set because IMR should
855 * prevent an interrupt from being shown in IIR. The warning 882 * prevent an interrupt from being shown in IIR. The warning
@@ -860,11 +887,11 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
860 * The mask bit in IMR is cleared by dev_priv->rps.work. 887 * The mask bit in IMR is cleared by dev_priv->rps.work.
861 */ 888 */
862 889
863 spin_lock_irqsave(&dev_priv->rps.lock, flags); 890 spin_lock(&dev_priv->irq_lock);
864 dev_priv->rps.pm_iir |= pm_iir; 891 dev_priv->rps.pm_iir |= pm_iir;
865 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 892 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
866 POSTING_READ(GEN6_PMIMR); 893 POSTING_READ(GEN6_PMIMR);
867 spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 894 spin_unlock(&dev_priv->irq_lock);
868 895
869 queue_work(dev_priv->wq, &dev_priv->rps.work); 896 queue_work(dev_priv->wq, &dev_priv->rps.work);
870} 897}
@@ -928,7 +955,7 @@ static void dp_aux_irq_handler(struct drm_device *dev)
928 wake_up_all(&dev_priv->gmbus_wait_queue); 955 wake_up_all(&dev_priv->gmbus_wait_queue);
929} 956}
930 957
931/* Unlike gen6_queue_rps_work() from which this function is originally derived, 958/* Unlike gen6_rps_irq_handler() from which this function is originally derived,
932 * we must be able to deal with other PM interrupts. This is complicated because 959 * we must be able to deal with other PM interrupts. This is complicated because
933 * of the way in which we use the masks to defer the RPS work (which for 960 * of the way in which we use the masks to defer the RPS work (which for
934 * posterity is necessary because of forcewake). 961 * posterity is necessary because of forcewake).
@@ -936,27 +963,23 @@ static void dp_aux_irq_handler(struct drm_device *dev)
936static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, 963static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
937 u32 pm_iir) 964 u32 pm_iir)
938{ 965{
939 unsigned long flags; 966 if (pm_iir & GEN6_PM_RPS_EVENTS) {
940 967 spin_lock(&dev_priv->irq_lock);
941 spin_lock_irqsave(&dev_priv->rps.lock, flags); 968 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
942 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
943 if (dev_priv->rps.pm_iir) {
944 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 969 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
945 /* never want to mask useful interrupts. (also posting read) */ 970 /* never want to mask useful interrupts. (also posting read) */
946 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); 971 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
947 /* TODO: if queue_work is slow, move it out of the spinlock */ 972 spin_unlock(&dev_priv->irq_lock);
973
948 queue_work(dev_priv->wq, &dev_priv->rps.work); 974 queue_work(dev_priv->wq, &dev_priv->rps.work);
949 } 975 }
950 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
951 976
952 if (pm_iir & ~GEN6_PM_RPS_EVENTS) { 977 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
953 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 978 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
954 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
955 979
956 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 980 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
957 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 981 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
958 i915_handle_error(dev_priv->dev, false); 982 i915_handle_error(dev_priv->dev, false);
959 }
960 } 983 }
961} 984}
962 985
@@ -1029,7 +1052,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1029 gmbus_irq_handler(dev); 1052 gmbus_irq_handler(dev);
1030 1053
1031 if (pm_iir & GEN6_PM_RPS_EVENTS) 1054 if (pm_iir & GEN6_PM_RPS_EVENTS)
1032 gen6_queue_rps_work(dev_priv, pm_iir); 1055 gen6_rps_irq_handler(dev_priv, pm_iir);
1033 1056
1034 I915_WRITE(GTIIR, gt_iir); 1057 I915_WRITE(GTIIR, gt_iir);
1035 I915_WRITE(GEN6_PMIIR, pm_iir); 1058 I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -1267,7 +1290,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
1267 if (IS_HASWELL(dev)) 1290 if (IS_HASWELL(dev))
1268 hsw_pm_irq_handler(dev_priv, pm_iir); 1291 hsw_pm_irq_handler(dev_priv, pm_iir);
1269 else if (pm_iir & GEN6_PM_RPS_EVENTS) 1292 else if (pm_iir & GEN6_PM_RPS_EVENTS)
1270 gen6_queue_rps_work(dev_priv, pm_iir); 1293 gen6_rps_irq_handler(dev_priv, pm_iir);
1271 I915_WRITE(GEN6_PMIIR, pm_iir); 1294 I915_WRITE(GEN6_PMIIR, pm_iir);
1272 ret = IRQ_HANDLED; 1295 ret = IRQ_HANDLED;
1273 } 1296 }
@@ -1384,10 +1407,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1384 } 1407 }
1385 1408
1386 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1409 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1387 ironlake_handle_rps_change(dev); 1410 ironlake_rps_change_irq_handler(dev);
1388 1411
1389 if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS) 1412 if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
1390 gen6_queue_rps_work(dev_priv, pm_iir); 1413 gen6_rps_irq_handler(dev_priv, pm_iir);
1391 1414
1392 I915_WRITE(GTIIR, gt_iir); 1415 I915_WRITE(GTIIR, gt_iir);
1393 I915_WRITE(DEIIR, de_iir); 1416 I915_WRITE(DEIIR, de_iir);
@@ -1470,535 +1493,6 @@ static void i915_error_work_func(struct work_struct *work)
1470 } 1493 }
1471} 1494}
1472 1495
1473/* NB: please notice the memset */
1474static void i915_get_extra_instdone(struct drm_device *dev,
1475 uint32_t *instdone)
1476{
1477 struct drm_i915_private *dev_priv = dev->dev_private;
1478 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1479
1480 switch(INTEL_INFO(dev)->gen) {
1481 case 2:
1482 case 3:
1483 instdone[0] = I915_READ(INSTDONE);
1484 break;
1485 case 4:
1486 case 5:
1487 case 6:
1488 instdone[0] = I915_READ(INSTDONE_I965);
1489 instdone[1] = I915_READ(INSTDONE1);
1490 break;
1491 default:
1492 WARN_ONCE(1, "Unsupported platform\n");
1493 case 7:
1494 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1495 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1496 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1497 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1498 break;
1499 }
1500}
1501
1502#ifdef CONFIG_DEBUG_FS
1503static struct drm_i915_error_object *
1504i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1505 struct drm_i915_gem_object *src,
1506 const int num_pages)
1507{
1508 struct drm_i915_error_object *dst;
1509 int i;
1510 u32 reloc_offset;
1511
1512 if (src == NULL || src->pages == NULL)
1513 return NULL;
1514
1515 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
1516 if (dst == NULL)
1517 return NULL;
1518
1519 reloc_offset = src->gtt_offset;
1520 for (i = 0; i < num_pages; i++) {
1521 unsigned long flags;
1522 void *d;
1523
1524 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
1525 if (d == NULL)
1526 goto unwind;
1527
1528 local_irq_save(flags);
1529 if (reloc_offset < dev_priv->gtt.mappable_end &&
1530 src->has_global_gtt_mapping) {
1531 void __iomem *s;
1532
1533 /* Simply ignore tiling or any overlapping fence.
1534 * It's part of the error state, and this hopefully
1535 * captures what the GPU read.
1536 */
1537
1538 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1539 reloc_offset);
1540 memcpy_fromio(d, s, PAGE_SIZE);
1541 io_mapping_unmap_atomic(s);
1542 } else if (src->stolen) {
1543 unsigned long offset;
1544
1545 offset = dev_priv->mm.stolen_base;
1546 offset += src->stolen->start;
1547 offset += i << PAGE_SHIFT;
1548
1549 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
1550 } else {
1551 struct page *page;
1552 void *s;
1553
1554 page = i915_gem_object_get_page(src, i);
1555
1556 drm_clflush_pages(&page, 1);
1557
1558 s = kmap_atomic(page);
1559 memcpy(d, s, PAGE_SIZE);
1560 kunmap_atomic(s);
1561
1562 drm_clflush_pages(&page, 1);
1563 }
1564 local_irq_restore(flags);
1565
1566 dst->pages[i] = d;
1567
1568 reloc_offset += PAGE_SIZE;
1569 }
1570 dst->page_count = num_pages;
1571 dst->gtt_offset = src->gtt_offset;
1572
1573 return dst;
1574
1575unwind:
1576 while (i--)
1577 kfree(dst->pages[i]);
1578 kfree(dst);
1579 return NULL;
1580}
1581#define i915_error_object_create(dev_priv, src) \
1582 i915_error_object_create_sized((dev_priv), (src), \
1583 (src)->base.size>>PAGE_SHIFT)
1584
1585static void
1586i915_error_object_free(struct drm_i915_error_object *obj)
1587{
1588 int page;
1589
1590 if (obj == NULL)
1591 return;
1592
1593 for (page = 0; page < obj->page_count; page++)
1594 kfree(obj->pages[page]);
1595
1596 kfree(obj);
1597}
1598
1599void
1600i915_error_state_free(struct kref *error_ref)
1601{
1602 struct drm_i915_error_state *error = container_of(error_ref,
1603 typeof(*error), ref);
1604 int i;
1605
1606 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1607 i915_error_object_free(error->ring[i].batchbuffer);
1608 i915_error_object_free(error->ring[i].ringbuffer);
1609 i915_error_object_free(error->ring[i].ctx);
1610 kfree(error->ring[i].requests);
1611 }
1612
1613 kfree(error->active_bo);
1614 kfree(error->overlay);
1615 kfree(error->display);
1616 kfree(error);
1617}
1618static void capture_bo(struct drm_i915_error_buffer *err,
1619 struct drm_i915_gem_object *obj)
1620{
1621 err->size = obj->base.size;
1622 err->name = obj->base.name;
1623 err->rseqno = obj->last_read_seqno;
1624 err->wseqno = obj->last_write_seqno;
1625 err->gtt_offset = obj->gtt_offset;
1626 err->read_domains = obj->base.read_domains;
1627 err->write_domain = obj->base.write_domain;
1628 err->fence_reg = obj->fence_reg;
1629 err->pinned = 0;
1630 if (obj->pin_count > 0)
1631 err->pinned = 1;
1632 if (obj->user_pin_count > 0)
1633 err->pinned = -1;
1634 err->tiling = obj->tiling_mode;
1635 err->dirty = obj->dirty;
1636 err->purgeable = obj->madv != I915_MADV_WILLNEED;
1637 err->ring = obj->ring ? obj->ring->id : -1;
1638 err->cache_level = obj->cache_level;
1639}
1640
1641static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1642 int count, struct list_head *head)
1643{
1644 struct drm_i915_gem_object *obj;
1645 int i = 0;
1646
1647 list_for_each_entry(obj, head, mm_list) {
1648 capture_bo(err++, obj);
1649 if (++i == count)
1650 break;
1651 }
1652
1653 return i;
1654}
1655
1656static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1657 int count, struct list_head *head)
1658{
1659 struct drm_i915_gem_object *obj;
1660 int i = 0;
1661
1662 list_for_each_entry(obj, head, global_list) {
1663 if (obj->pin_count == 0)
1664 continue;
1665
1666 capture_bo(err++, obj);
1667 if (++i == count)
1668 break;
1669 }
1670
1671 return i;
1672}
1673
1674static void i915_gem_record_fences(struct drm_device *dev,
1675 struct drm_i915_error_state *error)
1676{
1677 struct drm_i915_private *dev_priv = dev->dev_private;
1678 int i;
1679
1680 /* Fences */
1681 switch (INTEL_INFO(dev)->gen) {
1682 case 7:
1683 case 6:
1684 for (i = 0; i < dev_priv->num_fence_regs; i++)
1685 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1686 break;
1687 case 5:
1688 case 4:
1689 for (i = 0; i < 16; i++)
1690 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1691 break;
1692 case 3:
1693 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1694 for (i = 0; i < 8; i++)
1695 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1696 case 2:
1697 for (i = 0; i < 8; i++)
1698 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1699 break;
1700
1701 default:
1702 BUG();
1703 }
1704}
1705
1706static struct drm_i915_error_object *
1707i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1708 struct intel_ring_buffer *ring)
1709{
1710 struct drm_i915_gem_object *obj;
1711 u32 seqno;
1712
1713 if (!ring->get_seqno)
1714 return NULL;
1715
1716 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1717 u32 acthd = I915_READ(ACTHD);
1718
1719 if (WARN_ON(ring->id != RCS))
1720 return NULL;
1721
1722 obj = ring->private;
1723 if (acthd >= obj->gtt_offset &&
1724 acthd < obj->gtt_offset + obj->base.size)
1725 return i915_error_object_create(dev_priv, obj);
1726 }
1727
1728 seqno = ring->get_seqno(ring, false);
1729 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1730 if (obj->ring != ring)
1731 continue;
1732
1733 if (i915_seqno_passed(seqno, obj->last_read_seqno))
1734 continue;
1735
1736 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1737 continue;
1738
1739 /* We need to copy these to an anonymous buffer as the simplest
1740 * method to avoid being overwritten by userspace.
1741 */
1742 return i915_error_object_create(dev_priv, obj);
1743 }
1744
1745 return NULL;
1746}
1747
1748static void i915_record_ring_state(struct drm_device *dev,
1749 struct drm_i915_error_state *error,
1750 struct intel_ring_buffer *ring)
1751{
1752 struct drm_i915_private *dev_priv = dev->dev_private;
1753
1754 if (INTEL_INFO(dev)->gen >= 6) {
1755 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1756 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1757 error->semaphore_mboxes[ring->id][0]
1758 = I915_READ(RING_SYNC_0(ring->mmio_base));
1759 error->semaphore_mboxes[ring->id][1]
1760 = I915_READ(RING_SYNC_1(ring->mmio_base));
1761 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1762 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1763 }
1764
1765 if (INTEL_INFO(dev)->gen >= 4) {
1766 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1767 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1768 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1769 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1770 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1771 if (ring->id == RCS)
1772 error->bbaddr = I915_READ64(BB_ADDR);
1773 } else {
1774 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1775 error->ipeir[ring->id] = I915_READ(IPEIR);
1776 error->ipehr[ring->id] = I915_READ(IPEHR);
1777 error->instdone[ring->id] = I915_READ(INSTDONE);
1778 }
1779
1780 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1781 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1782 error->seqno[ring->id] = ring->get_seqno(ring, false);
1783 error->acthd[ring->id] = intel_ring_get_active_head(ring);
1784 error->head[ring->id] = I915_READ_HEAD(ring);
1785 error->tail[ring->id] = I915_READ_TAIL(ring);
1786 error->ctl[ring->id] = I915_READ_CTL(ring);
1787
1788 error->cpu_ring_head[ring->id] = ring->head;
1789 error->cpu_ring_tail[ring->id] = ring->tail;
1790}
1791
1792
1793static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1794 struct drm_i915_error_state *error,
1795 struct drm_i915_error_ring *ering)
1796{
1797 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1798 struct drm_i915_gem_object *obj;
1799
1800 /* Currently render ring is the only HW context user */
1801 if (ring->id != RCS || !error->ccid)
1802 return;
1803
1804 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1805 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1806 ering->ctx = i915_error_object_create_sized(dev_priv,
1807 obj, 1);
1808 }
1809 }
1810}
1811
1812static void i915_gem_record_rings(struct drm_device *dev,
1813 struct drm_i915_error_state *error)
1814{
1815 struct drm_i915_private *dev_priv = dev->dev_private;
1816 struct intel_ring_buffer *ring;
1817 struct drm_i915_gem_request *request;
1818 int i, count;
1819
1820 for_each_ring(ring, dev_priv, i) {
1821 i915_record_ring_state(dev, error, ring);
1822
1823 error->ring[i].batchbuffer =
1824 i915_error_first_batchbuffer(dev_priv, ring);
1825
1826 error->ring[i].ringbuffer =
1827 i915_error_object_create(dev_priv, ring->obj);
1828
1829
1830 i915_gem_record_active_context(ring, error, &error->ring[i]);
1831
1832 count = 0;
1833 list_for_each_entry(request, &ring->request_list, list)
1834 count++;
1835
1836 error->ring[i].num_requests = count;
1837 error->ring[i].requests =
1838 kmalloc(count*sizeof(struct drm_i915_error_request),
1839 GFP_ATOMIC);
1840 if (error->ring[i].requests == NULL) {
1841 error->ring[i].num_requests = 0;
1842 continue;
1843 }
1844
1845 count = 0;
1846 list_for_each_entry(request, &ring->request_list, list) {
1847 struct drm_i915_error_request *erq;
1848
1849 erq = &error->ring[i].requests[count++];
1850 erq->seqno = request->seqno;
1851 erq->jiffies = request->emitted_jiffies;
1852 erq->tail = request->tail;
1853 }
1854 }
1855}
1856
1857/**
1858 * i915_capture_error_state - capture an error record for later analysis
1859 * @dev: drm device
1860 *
1861 * Should be called when an error is detected (either a hang or an error
1862 * interrupt) to capture error state from the time of the error. Fills
1863 * out a structure which becomes available in debugfs for user level tools
1864 * to pick up.
1865 */
1866static void i915_capture_error_state(struct drm_device *dev)
1867{
1868 struct drm_i915_private *dev_priv = dev->dev_private;
1869 struct drm_i915_gem_object *obj;
1870 struct drm_i915_error_state *error;
1871 unsigned long flags;
1872 int i, pipe;
1873
1874 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1875 error = dev_priv->gpu_error.first_error;
1876 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1877 if (error)
1878 return;
1879
1880 /* Account for pipe specific data like PIPE*STAT */
1881 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1882 if (!error) {
1883 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1884 return;
1885 }
1886
1887 DRM_INFO("capturing error event; look for more information in "
1888 "/sys/kernel/debug/dri/%d/i915_error_state\n",
1889 dev->primary->index);
1890
1891 kref_init(&error->ref);
1892 error->eir = I915_READ(EIR);
1893 error->pgtbl_er = I915_READ(PGTBL_ER);
1894 if (HAS_HW_CONTEXTS(dev))
1895 error->ccid = I915_READ(CCID);
1896
1897 if (HAS_PCH_SPLIT(dev))
1898 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1899 else if (IS_VALLEYVIEW(dev))
1900 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1901 else if (IS_GEN2(dev))
1902 error->ier = I915_READ16(IER);
1903 else
1904 error->ier = I915_READ(IER);
1905
1906 if (INTEL_INFO(dev)->gen >= 6)
1907 error->derrmr = I915_READ(DERRMR);
1908
1909 if (IS_VALLEYVIEW(dev))
1910 error->forcewake = I915_READ(FORCEWAKE_VLV);
1911 else if (INTEL_INFO(dev)->gen >= 7)
1912 error->forcewake = I915_READ(FORCEWAKE_MT);
1913 else if (INTEL_INFO(dev)->gen == 6)
1914 error->forcewake = I915_READ(FORCEWAKE);
1915
1916 if (!HAS_PCH_SPLIT(dev))
1917 for_each_pipe(pipe)
1918 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1919
1920 if (INTEL_INFO(dev)->gen >= 6) {
1921 error->error = I915_READ(ERROR_GEN6);
1922 error->done_reg = I915_READ(DONE_REG);
1923 }
1924
1925 if (INTEL_INFO(dev)->gen == 7)
1926 error->err_int = I915_READ(GEN7_ERR_INT);
1927
1928 i915_get_extra_instdone(dev, error->extra_instdone);
1929
1930 i915_gem_record_fences(dev, error);
1931 i915_gem_record_rings(dev, error);
1932
1933 /* Record buffers on the active and pinned lists. */
1934 error->active_bo = NULL;
1935 error->pinned_bo = NULL;
1936
1937 i = 0;
1938 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1939 i++;
1940 error->active_bo_count = i;
1941 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1942 if (obj->pin_count)
1943 i++;
1944 error->pinned_bo_count = i - error->active_bo_count;
1945
1946 error->active_bo = NULL;
1947 error->pinned_bo = NULL;
1948 if (i) {
1949 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1950 GFP_ATOMIC);
1951 if (error->active_bo)
1952 error->pinned_bo =
1953 error->active_bo + error->active_bo_count;
1954 }
1955
1956 if (error->active_bo)
1957 error->active_bo_count =
1958 capture_active_bo(error->active_bo,
1959 error->active_bo_count,
1960 &dev_priv->mm.active_list);
1961
1962 if (error->pinned_bo)
1963 error->pinned_bo_count =
1964 capture_pinned_bo(error->pinned_bo,
1965 error->pinned_bo_count,
1966 &dev_priv->mm.bound_list);
1967
1968 do_gettimeofday(&error->time);
1969
1970 error->overlay = intel_overlay_capture_error_state(dev);
1971 error->display = intel_display_capture_error_state(dev);
1972
1973 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1974 if (dev_priv->gpu_error.first_error == NULL) {
1975 dev_priv->gpu_error.first_error = error;
1976 error = NULL;
1977 }
1978 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1979
1980 if (error)
1981 i915_error_state_free(&error->ref);
1982}
1983
1984void i915_destroy_error_state(struct drm_device *dev)
1985{
1986 struct drm_i915_private *dev_priv = dev->dev_private;
1987 struct drm_i915_error_state *error;
1988 unsigned long flags;
1989
1990 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1991 error = dev_priv->gpu_error.first_error;
1992 dev_priv->gpu_error.first_error = NULL;
1993 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1994
1995 if (error)
1996 kref_put(&error->ref, i915_error_state_free);
1997}
1998#else
1999#define i915_capture_error_state(x)
2000#endif
2001
2002static void i915_report_and_clear_eir(struct drm_device *dev) 1496static void i915_report_and_clear_eir(struct drm_device *dev)
2003{ 1497{
2004 struct drm_i915_private *dev_priv = dev->dev_private; 1498 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2155,10 +1649,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
2155 if (INTEL_INFO(dev)->gen >= 4) { 1649 if (INTEL_INFO(dev)->gen >= 4) {
2156 int dspsurf = DSPSURF(intel_crtc->plane); 1650 int dspsurf = DSPSURF(intel_crtc->plane);
2157 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 1651 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2158 obj->gtt_offset; 1652 i915_gem_obj_ggtt_offset(obj);
2159 } else { 1653 } else {
2160 int dspaddr = DSPADDR(intel_crtc->plane); 1654 int dspaddr = DSPADDR(intel_crtc->plane);
2161 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 1655 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
2162 crtc->y * crtc->fb->pitches[0] + 1656 crtc->y * crtc->fb->pitches[0] +
2163 crtc->x * crtc->fb->bits_per_pixel/8); 1657 crtc->x * crtc->fb->bits_per_pixel/8);
2164 } 1658 }
@@ -2648,22 +2142,21 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
2648 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2142 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2649 struct drm_mode_config *mode_config = &dev->mode_config; 2143 struct drm_mode_config *mode_config = &dev->mode_config;
2650 struct intel_encoder *intel_encoder; 2144 struct intel_encoder *intel_encoder;
2651 u32 mask = ~I915_READ(SDEIMR); 2145 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
2652 u32 hotplug;
2653 2146
2654 if (HAS_PCH_IBX(dev)) { 2147 if (HAS_PCH_IBX(dev)) {
2655 mask &= ~SDE_HOTPLUG_MASK; 2148 hotplug_irqs = SDE_HOTPLUG_MASK;
2656 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2149 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2657 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2150 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2658 mask |= hpd_ibx[intel_encoder->hpd_pin]; 2151 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
2659 } else { 2152 } else {
2660 mask &= ~SDE_HOTPLUG_MASK_CPT; 2153 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
2661 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2154 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2662 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2155 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2663 mask |= hpd_cpt[intel_encoder->hpd_pin]; 2156 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
2664 } 2157 }
2665 2158
2666 I915_WRITE(SDEIMR, ~mask); 2159 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2667 2160
2668 /* 2161 /*
2669 * Enable digital hotplug on the PCH, and configure the DP short pulse 2162 * Enable digital hotplug on the PCH, and configure the DP short pulse
@@ -2792,8 +2285,7 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
2792 2285
2793 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2286 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2794 if (HAS_VEBOX(dev)) 2287 if (HAS_VEBOX(dev))
2795 pm_irqs |= PM_VEBOX_USER_INTERRUPT | 2288 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2796 PM_VEBOX_CS_ERROR_INTERRUPT;
2797 2289
2798 /* Our enable/disable rps functions may touch these registers so 2290 /* Our enable/disable rps functions may touch these registers so
2799 * make sure to set a known state for only the non-RPS bits. 2291 * make sure to set a known state for only the non-RPS bits.
@@ -2817,6 +2309,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2817 u32 gt_irqs; 2309 u32 gt_irqs;
2818 u32 enable_mask; 2310 u32 enable_mask;
2819 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2311 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2312 unsigned long irqflags;
2820 2313
2821 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2314 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2822 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2315 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -2842,9 +2335,13 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2842 I915_WRITE(PIPESTAT(1), 0xffff); 2335 I915_WRITE(PIPESTAT(1), 0xffff);
2843 POSTING_READ(VLV_IER); 2336 POSTING_READ(VLV_IER);
2844 2337
2338 /* Interrupt setup is already guaranteed to be single-threaded, this is
2339 * just to make the assert_spin_locked check happy. */
2340 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2845 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2341 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2846 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2342 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2847 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2343 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2344 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2848 2345
2849 I915_WRITE(VLV_IIR, 0xffffffff); 2346 I915_WRITE(VLV_IIR, 0xffffffff);
2850 I915_WRITE(VLV_IIR, 0xffffffff); 2347 I915_WRITE(VLV_IIR, 0xffffffff);
@@ -3323,6 +2820,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
3323 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2820 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3324 u32 enable_mask; 2821 u32 enable_mask;
3325 u32 error_mask; 2822 u32 error_mask;
2823 unsigned long irqflags;
3326 2824
3327 /* Unmask the interrupts that we always want on. */ 2825 /* Unmask the interrupts that we always want on. */
3328 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 2826 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
@@ -3341,7 +2839,11 @@ static int i965_irq_postinstall(struct drm_device *dev)
3341 if (IS_G4X(dev)) 2839 if (IS_G4X(dev))
3342 enable_mask |= I915_BSD_USER_INTERRUPT; 2840 enable_mask |= I915_BSD_USER_INTERRUPT;
3343 2841
2842 /* Interrupt setup is already guaranteed to be single-threaded, this is
2843 * just to make the assert_spin_locked check happy. */
2844 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3344 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2845 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2846 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3345 2847
3346 /* 2848 /*
3347 * Enable some error detection, note the instruction error mask 2849 * Enable some error detection, note the instruction error mask
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f2326fc60ac9..dc3d6a74f391 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -363,6 +363,7 @@
363#define PUNIT_REG_GPU_LFM 0xd3 363#define PUNIT_REG_GPU_LFM 0xd3
364#define PUNIT_REG_GPU_FREQ_REQ 0xd4 364#define PUNIT_REG_GPU_FREQ_REQ 0xd4
365#define PUNIT_REG_GPU_FREQ_STS 0xd8 365#define PUNIT_REG_GPU_FREQ_STS 0xd8
366#define GENFREQSTATUS (1<<0)
366#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc 367#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
367 368
368#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ 369#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
@@ -680,6 +681,7 @@
680#define ERR_INT_FIFO_UNDERRUN_C (1<<6) 681#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
681#define ERR_INT_FIFO_UNDERRUN_B (1<<3) 682#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
682#define ERR_INT_FIFO_UNDERRUN_A (1<<0) 683#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
684#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
683 685
684#define FPGA_DBG 0x42300 686#define FPGA_DBG 0x42300
685#define FPGA_DBG_RM_NOCLAIM (1<<31) 687#define FPGA_DBG_RM_NOCLAIM (1<<31)
@@ -1125,7 +1127,8 @@
1125#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018) 1127#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
1126#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) 1128#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
1127#define DPLL_VCO_ENABLE (1 << 31) 1129#define DPLL_VCO_ENABLE (1 << 31)
1128#define DPLL_DVO_HIGH_SPEED (1 << 30) 1130#define DPLL_SDVO_HIGH_SPEED (1 << 30)
1131#define DPLL_DVO_2X_MODE (1 << 30)
1129#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30) 1132#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
1130#define DPLL_SYNCLOCK_ENABLE (1 << 29) 1133#define DPLL_SYNCLOCK_ENABLE (1 << 29)
1131#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29) 1134#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
@@ -3880,6 +3883,7 @@
3880#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6) 3883#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
3881#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3) 3884#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
3882#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0) 3885#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
3886#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
3883 3887
3884/* digital port hotplug */ 3888/* digital port hotplug */
3885#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ 3889#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 6875b5654c63..a777e7f3b0df 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -409,6 +409,71 @@ static const struct attribute *gen6_attrs[] = {
409 NULL, 409 NULL,
410}; 410};
411 411
412static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
413 struct bin_attribute *attr, char *buf,
414 loff_t off, size_t count)
415{
416
417 struct device *kdev = container_of(kobj, struct device, kobj);
418 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
419 struct drm_device *dev = minor->dev;
420 struct i915_error_state_file_priv error_priv;
421 struct drm_i915_error_state_buf error_str;
422 ssize_t ret_count = 0;
423 int ret;
424
425 memset(&error_priv, 0, sizeof(error_priv));
426
427 ret = i915_error_state_buf_init(&error_str, count, off);
428 if (ret)
429 return ret;
430
431 error_priv.dev = dev;
432 i915_error_state_get(dev, &error_priv);
433
434 ret = i915_error_state_to_str(&error_str, &error_priv);
435 if (ret)
436 goto out;
437
438 ret_count = count < error_str.bytes ? count : error_str.bytes;
439
440 memcpy(buf, error_str.buf, ret_count);
441out:
442 i915_error_state_put(&error_priv);
443 i915_error_state_buf_release(&error_str);
444
445 return ret ?: ret_count;
446}
447
448static ssize_t error_state_write(struct file *file, struct kobject *kobj,
449 struct bin_attribute *attr, char *buf,
450 loff_t off, size_t count)
451{
452 struct device *kdev = container_of(kobj, struct device, kobj);
453 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
454 struct drm_device *dev = minor->dev;
455 int ret;
456
457 DRM_DEBUG_DRIVER("Resetting error state\n");
458
459 ret = mutex_lock_interruptible(&dev->struct_mutex);
460 if (ret)
461 return ret;
462
463 i915_destroy_error_state(dev);
464 mutex_unlock(&dev->struct_mutex);
465
466 return count;
467}
468
469static struct bin_attribute error_state_attr = {
470 .attr.name = "error",
471 .attr.mode = S_IRUSR | S_IWUSR,
472 .size = 0,
473 .read = error_state_read,
474 .write = error_state_write,
475};
476
412void i915_setup_sysfs(struct drm_device *dev) 477void i915_setup_sysfs(struct drm_device *dev)
413{ 478{
414 int ret; 479 int ret;
@@ -432,10 +497,16 @@ void i915_setup_sysfs(struct drm_device *dev)
432 if (ret) 497 if (ret)
433 DRM_ERROR("gen6 sysfs setup failed\n"); 498 DRM_ERROR("gen6 sysfs setup failed\n");
434 } 499 }
500
501 ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
502 &error_state_attr);
503 if (ret)
504 DRM_ERROR("error_state sysfs setup failed\n");
435} 505}
436 506
437void i915_teardown_sysfs(struct drm_device *dev) 507void i915_teardown_sysfs(struct drm_device *dev)
438{ 508{
509 sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
439 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); 510 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
440 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); 511 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
441#ifdef CONFIG_PM 512#ifdef CONFIG_PM
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 3db4a6817713..7d283b5fcbf9 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -46,8 +46,8 @@ TRACE_EVENT(i915_gem_object_bind,
46 46
47 TP_fast_assign( 47 TP_fast_assign(
48 __entry->obj = obj; 48 __entry->obj = obj;
49 __entry->offset = obj->gtt_space->start; 49 __entry->offset = i915_gem_obj_ggtt_offset(obj);
50 __entry->size = obj->gtt_space->size; 50 __entry->size = i915_gem_obj_ggtt_size(obj);
51 __entry->mappable = mappable; 51 __entry->mappable = mappable;
52 ), 52 ),
53 53
@@ -68,8 +68,8 @@ TRACE_EVENT(i915_gem_object_unbind,
68 68
69 TP_fast_assign( 69 TP_fast_assign(
70 __entry->obj = obj; 70 __entry->obj = obj;
71 __entry->offset = obj->gtt_space->start; 71 __entry->offset = i915_gem_obj_ggtt_offset(obj);
72 __entry->size = obj->gtt_space->size; 72 __entry->size = i915_gem_obj_ggtt_size(obj);
73 ), 73 ),
74 74
75 TP_printk("obj=%p, offset=%08x size=%x", 75 TP_printk("obj=%p, offset=%08x size=%x",
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 85f3eb74d2b7..ae3dc5d1ff52 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -45,6 +45,11 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
45static void intel_increase_pllclock(struct drm_crtc *crtc); 45static void intel_increase_pllclock(struct drm_crtc *crtc);
46static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 46static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
47 47
48static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
49 struct intel_crtc_config *pipe_config);
50static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
51 struct intel_crtc_config *pipe_config);
52
48typedef struct { 53typedef struct {
49 int min, max; 54 int min, max;
50} intel_range_t; 55} intel_range_t;
@@ -84,7 +89,7 @@ intel_fdi_link_freq(struct drm_device *dev)
84 return 27; 89 return 27;
85} 90}
86 91
87static const intel_limit_t intel_limits_i8xx_dvo = { 92static const intel_limit_t intel_limits_i8xx_dac = {
88 .dot = { .min = 25000, .max = 350000 }, 93 .dot = { .min = 25000, .max = 350000 },
89 .vco = { .min = 930000, .max = 1400000 }, 94 .vco = { .min = 930000, .max = 1400000 },
90 .n = { .min = 3, .max = 16 }, 95 .n = { .min = 3, .max = 16 },
@@ -97,6 +102,19 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
97 .p2_slow = 4, .p2_fast = 2 }, 102 .p2_slow = 4, .p2_fast = 2 },
98}; 103};
99 104
105static const intel_limit_t intel_limits_i8xx_dvo = {
106 .dot = { .min = 25000, .max = 350000 },
107 .vco = { .min = 930000, .max = 1400000 },
108 .n = { .min = 3, .max = 16 },
109 .m = { .min = 96, .max = 140 },
110 .m1 = { .min = 18, .max = 26 },
111 .m2 = { .min = 6, .max = 16 },
112 .p = { .min = 4, .max = 128 },
113 .p1 = { .min = 2, .max = 33 },
114 .p2 = { .dot_limit = 165000,
115 .p2_slow = 4, .p2_fast = 4 },
116};
117
100static const intel_limit_t intel_limits_i8xx_lvds = { 118static const intel_limit_t intel_limits_i8xx_lvds = {
101 .dot = { .min = 25000, .max = 350000 }, 119 .dot = { .min = 25000, .max = 350000 },
102 .vco = { .min = 930000, .max = 1400000 }, 120 .vco = { .min = 930000, .max = 1400000 },
@@ -405,8 +423,10 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
405 } else { 423 } else {
406 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 424 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
407 limit = &intel_limits_i8xx_lvds; 425 limit = &intel_limits_i8xx_lvds;
408 else 426 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
409 limit = &intel_limits_i8xx_dvo; 427 limit = &intel_limits_i8xx_dvo;
428 else
429 limit = &intel_limits_i8xx_dac;
410 } 430 }
411 return limit; 431 return limit;
412} 432}
@@ -892,8 +912,8 @@ static const char *state_string(bool enabled)
892} 912}
893 913
894/* Only for pre-ILK configs */ 914/* Only for pre-ILK configs */
895static void assert_pll(struct drm_i915_private *dev_priv, 915void assert_pll(struct drm_i915_private *dev_priv,
896 enum pipe pipe, bool state) 916 enum pipe pipe, bool state)
897{ 917{
898 int reg; 918 int reg;
899 u32 val; 919 u32 val;
@@ -906,10 +926,8 @@ static void assert_pll(struct drm_i915_private *dev_priv,
906 "PLL state assertion failure (expected %s, current %s)\n", 926 "PLL state assertion failure (expected %s, current %s)\n",
907 state_string(state), state_string(cur_state)); 927 state_string(state), state_string(cur_state));
908} 928}
909#define assert_pll_enabled(d, p) assert_pll(d, p, true)
910#define assert_pll_disabled(d, p) assert_pll(d, p, false)
911 929
912static struct intel_shared_dpll * 930struct intel_shared_dpll *
913intel_crtc_to_shared_dpll(struct intel_crtc *crtc) 931intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
914{ 932{
915 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 933 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
@@ -921,9 +939,9 @@ intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
921} 939}
922 940
923/* For ILK+ */ 941/* For ILK+ */
924static void assert_shared_dpll(struct drm_i915_private *dev_priv, 942void assert_shared_dpll(struct drm_i915_private *dev_priv,
925 struct intel_shared_dpll *pll, 943 struct intel_shared_dpll *pll,
926 bool state) 944 bool state)
927{ 945{
928 bool cur_state; 946 bool cur_state;
929 struct intel_dpll_hw_state hw_state; 947 struct intel_dpll_hw_state hw_state;
@@ -942,8 +960,6 @@ static void assert_shared_dpll(struct drm_i915_private *dev_priv,
942 "%s assertion failure (expected %s, current %s)\n", 960 "%s assertion failure (expected %s, current %s)\n",
943 pll->name, state_string(state), state_string(cur_state)); 961 pll->name, state_string(state), state_string(cur_state));
944} 962}
945#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
946#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
947 963
948static void assert_fdi_tx(struct drm_i915_private *dev_priv, 964static void assert_fdi_tx(struct drm_i915_private *dev_priv,
949 enum pipe pipe, bool state) 965 enum pipe pipe, bool state)
@@ -1007,15 +1023,19 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1007 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1023 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1008} 1024}
1009 1025
1010static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, 1026void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1011 enum pipe pipe) 1027 enum pipe pipe, bool state)
1012{ 1028{
1013 int reg; 1029 int reg;
1014 u32 val; 1030 u32 val;
1031 bool cur_state;
1015 1032
1016 reg = FDI_RX_CTL(pipe); 1033 reg = FDI_RX_CTL(pipe);
1017 val = I915_READ(reg); 1034 val = I915_READ(reg);
1018 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); 1035 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1036 WARN(cur_state != state,
1037 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1038 state_string(state), state_string(cur_state));
1019} 1039}
1020 1040
1021static void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1041static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
@@ -1111,7 +1131,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1111 } 1131 }
1112 1132
1113 /* Need to check both planes against the pipe */ 1133 /* Need to check both planes against the pipe */
1114 for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { 1134 for_each_pipe(i) {
1115 reg = DSPCNTR(i); 1135 reg = DSPCNTR(i);
1116 val = I915_READ(reg); 1136 val = I915_READ(reg);
1117 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1137 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
@@ -1301,51 +1321,92 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1301 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1321 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1302} 1322}
1303 1323
1304/** 1324static void vlv_enable_pll(struct intel_crtc *crtc)
1305 * intel_enable_pll - enable a PLL
1306 * @dev_priv: i915 private structure
1307 * @pipe: pipe PLL to enable
1308 *
1309 * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
1310 * make sure the PLL reg is writable first though, since the panel write
1311 * protect mechanism may be enabled.
1312 *
1313 * Note! This is for pre-ILK only.
1314 *
1315 * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
1316 */
1317static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1318{ 1325{
1319 int reg; 1326 struct drm_device *dev = crtc->base.dev;
1320 u32 val; 1327 struct drm_i915_private *dev_priv = dev->dev_private;
1328 int reg = DPLL(crtc->pipe);
1329 u32 dpll = crtc->config.dpll_hw_state.dpll;
1321 1330
1322 assert_pipe_disabled(dev_priv, pipe); 1331 assert_pipe_disabled(dev_priv, crtc->pipe);
1323 1332
1324 /* No really, not for ILK+ */ 1333 /* No really, not for ILK+ */
1325 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5); 1334 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1326 1335
1327 /* PLL is protected by panel, make sure we can write it */ 1336 /* PLL is protected by panel, make sure we can write it */
1328 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) 1337 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1329 assert_panel_unlocked(dev_priv, pipe); 1338 assert_panel_unlocked(dev_priv, crtc->pipe);
1330 1339
1331 reg = DPLL(pipe); 1340 I915_WRITE(reg, dpll);
1332 val = I915_READ(reg); 1341 POSTING_READ(reg);
1333 val |= DPLL_VCO_ENABLE; 1342 udelay(150);
1343
1344 if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1345 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1346
1347 I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
1348 POSTING_READ(DPLL_MD(crtc->pipe));
1334 1349
1335 /* We do this three times for luck */ 1350 /* We do this three times for luck */
1336 I915_WRITE(reg, val); 1351 I915_WRITE(reg, dpll);
1337 POSTING_READ(reg); 1352 POSTING_READ(reg);
1338 udelay(150); /* wait for warmup */ 1353 udelay(150); /* wait for warmup */
1339 I915_WRITE(reg, val); 1354 I915_WRITE(reg, dpll);
1340 POSTING_READ(reg); 1355 POSTING_READ(reg);
1341 udelay(150); /* wait for warmup */ 1356 udelay(150); /* wait for warmup */
1342 I915_WRITE(reg, val); 1357 I915_WRITE(reg, dpll);
1358 POSTING_READ(reg);
1359 udelay(150); /* wait for warmup */
1360}
1361
1362static void i9xx_enable_pll(struct intel_crtc *crtc)
1363{
1364 struct drm_device *dev = crtc->base.dev;
1365 struct drm_i915_private *dev_priv = dev->dev_private;
1366 int reg = DPLL(crtc->pipe);
1367 u32 dpll = crtc->config.dpll_hw_state.dpll;
1368
1369 assert_pipe_disabled(dev_priv, crtc->pipe);
1370
1371 /* No really, not for ILK+ */
1372 BUG_ON(dev_priv->info->gen >= 5);
1373
1374 /* PLL is protected by panel, make sure we can write it */
1375 if (IS_MOBILE(dev) && !IS_I830(dev))
1376 assert_panel_unlocked(dev_priv, crtc->pipe);
1377
1378 I915_WRITE(reg, dpll);
1379
1380 /* Wait for the clocks to stabilize. */
1381 POSTING_READ(reg);
1382 udelay(150);
1383
1384 if (INTEL_INFO(dev)->gen >= 4) {
1385 I915_WRITE(DPLL_MD(crtc->pipe),
1386 crtc->config.dpll_hw_state.dpll_md);
1387 } else {
1388 /* The pixel multiplier can only be updated once the
1389 * DPLL is enabled and the clocks are stable.
1390 *
1391 * So write it again.
1392 */
1393 I915_WRITE(reg, dpll);
1394 }
1395
1396 /* We do this three times for luck */
1397 I915_WRITE(reg, dpll);
1398 POSTING_READ(reg);
1399 udelay(150); /* wait for warmup */
1400 I915_WRITE(reg, dpll);
1401 POSTING_READ(reg);
1402 udelay(150); /* wait for warmup */
1403 I915_WRITE(reg, dpll);
1343 POSTING_READ(reg); 1404 POSTING_READ(reg);
1344 udelay(150); /* wait for warmup */ 1405 udelay(150); /* wait for warmup */
1345} 1406}
1346 1407
1347/** 1408/**
1348 * intel_disable_pll - disable a PLL 1409 * i9xx_disable_pll - disable a PLL
1349 * @dev_priv: i915 private structure 1410 * @dev_priv: i915 private structure
1350 * @pipe: pipe PLL to disable 1411 * @pipe: pipe PLL to disable
1351 * 1412 *
@@ -1353,11 +1414,8 @@ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1353 * 1414 *
1354 * Note! This is for pre-ILK only. 1415 * Note! This is for pre-ILK only.
1355 */ 1416 */
1356static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1417static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1357{ 1418{
1358 int reg;
1359 u32 val;
1360
1361 /* Don't disable pipe A or pipe A PLLs if needed */ 1419 /* Don't disable pipe A or pipe A PLLs if needed */
1362 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1420 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1363 return; 1421 return;
@@ -1365,11 +1423,8 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1365 /* Make sure the pipe isn't still relying on us */ 1423 /* Make sure the pipe isn't still relying on us */
1366 assert_pipe_disabled(dev_priv, pipe); 1424 assert_pipe_disabled(dev_priv, pipe);
1367 1425
1368 reg = DPLL(pipe); 1426 I915_WRITE(DPLL(pipe), 0);
1369 val = I915_READ(reg); 1427 POSTING_READ(DPLL(pipe));
1370 val &= ~DPLL_VCO_ENABLE;
1371 I915_WRITE(reg, val);
1372 POSTING_READ(reg);
1373} 1428}
1374 1429
1375void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port) 1430void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
@@ -1942,16 +1997,17 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1942 intel_crtc->dspaddr_offset = linear_offset; 1997 intel_crtc->dspaddr_offset = linear_offset;
1943 } 1998 }
1944 1999
1945 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", 2000 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1946 obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); 2001 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2002 fb->pitches[0]);
1947 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2003 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
1948 if (INTEL_INFO(dev)->gen >= 4) { 2004 if (INTEL_INFO(dev)->gen >= 4) {
1949 I915_MODIFY_DISPBASE(DSPSURF(plane), 2005 I915_MODIFY_DISPBASE(DSPSURF(plane),
1950 obj->gtt_offset + intel_crtc->dspaddr_offset); 2006 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
1951 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2007 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1952 I915_WRITE(DSPLINOFF(plane), linear_offset); 2008 I915_WRITE(DSPLINOFF(plane), linear_offset);
1953 } else 2009 } else
1954 I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset); 2010 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
1955 POSTING_READ(reg); 2011 POSTING_READ(reg);
1956 2012
1957 return 0; 2013 return 0;
@@ -2031,11 +2087,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2031 fb->pitches[0]); 2087 fb->pitches[0]);
2032 linear_offset -= intel_crtc->dspaddr_offset; 2088 linear_offset -= intel_crtc->dspaddr_offset;
2033 2089
2034 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", 2090 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2035 obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); 2091 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2092 fb->pitches[0]);
2036 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2093 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2037 I915_MODIFY_DISPBASE(DSPSURF(plane), 2094 I915_MODIFY_DISPBASE(DSPSURF(plane),
2038 obj->gtt_offset + intel_crtc->dspaddr_offset); 2095 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2039 if (IS_HASWELL(dev)) { 2096 if (IS_HASWELL(dev)) {
2040 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 2097 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2041 } else { 2098 } else {
@@ -2183,6 +2240,20 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2183 return ret; 2240 return ret;
2184 } 2241 }
2185 2242
2243 /* Update pipe size and adjust fitter if needed */
2244 if (i915_fastboot) {
2245 I915_WRITE(PIPESRC(intel_crtc->pipe),
2246 ((crtc->mode.hdisplay - 1) << 16) |
2247 (crtc->mode.vdisplay - 1));
2248 if (!intel_crtc->config.pch_pfit.size &&
2249 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2250 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2251 I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2252 I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2253 I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2254 }
2255 }
2256
2186 ret = dev_priv->display.update_plane(crtc, fb, x, y); 2257 ret = dev_priv->display.update_plane(crtc, fb, x, y);
2187 if (ret) { 2258 if (ret) {
2188 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); 2259 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
@@ -2927,15 +2998,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2927 /* For PCH output, training FDI link */ 2998 /* For PCH output, training FDI link */
2928 dev_priv->display.fdi_link_train(crtc); 2999 dev_priv->display.fdi_link_train(crtc);
2929 3000
2930 /* XXX: pch pll's can be enabled any time before we enable the PCH 3001 /* We need to program the right clock selection before writing the pixel
2931 * transcoder, and we actually should do this to not upset any PCH 3002 * mutliplier into the DPLL. */
2932 * transcoder that already use the clock when we share it.
2933 *
2934 * Note that enable_shared_dpll tries to do the right thing, but
2935 * get_shared_dpll unconditionally resets the pll - we need that to have
2936 * the right LVDS enable sequence. */
2937 ironlake_enable_shared_dpll(intel_crtc);
2938
2939 if (HAS_PCH_CPT(dev)) { 3003 if (HAS_PCH_CPT(dev)) {
2940 u32 sel; 3004 u32 sel;
2941 3005
@@ -2949,6 +3013,15 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2949 I915_WRITE(PCH_DPLL_SEL, temp); 3013 I915_WRITE(PCH_DPLL_SEL, temp);
2950 } 3014 }
2951 3015
3016 /* XXX: pch pll's can be enabled any time before we enable the PCH
3017 * transcoder, and we actually should do this to not upset any PCH
3018 * transcoder that already use the clock when we share it.
3019 *
3020 * Note that enable_shared_dpll tries to do the right thing, but
3021 * get_shared_dpll unconditionally resets the pll - we need that to have
3022 * the right LVDS enable sequence. */
3023 ironlake_enable_shared_dpll(intel_crtc);
3024
2952 /* set transcoder timing, panel must allow it */ 3025 /* set transcoder timing, panel must allow it */
2953 assert_panel_unlocked(dev_priv, pipe); 3026 assert_panel_unlocked(dev_priv, pipe);
2954 ironlake_pch_transcoder_set_timings(intel_crtc, pipe); 3027 ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
@@ -3031,7 +3104,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc)
3031 crtc->config.shared_dpll = DPLL_ID_PRIVATE; 3104 crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3032} 3105}
3033 3106
3034static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, u32 dpll, u32 fp) 3107static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3035{ 3108{
3036 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 3109 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3037 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 3110 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3045,7 +3118,7 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
3045 3118
3046 if (HAS_PCH_IBX(dev_priv->dev)) { 3119 if (HAS_PCH_IBX(dev_priv->dev)) {
3047 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ 3120 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3048 i = crtc->pipe; 3121 i = (enum intel_dpll_id) crtc->pipe;
3049 pll = &dev_priv->shared_dplls[i]; 3122 pll = &dev_priv->shared_dplls[i];
3050 3123
3051 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 3124 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
@@ -3061,8 +3134,8 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
3061 if (pll->refcount == 0) 3134 if (pll->refcount == 0)
3062 continue; 3135 continue;
3063 3136
3064 if (dpll == (I915_READ(PCH_DPLL(pll->id)) & 0x7fffffff) && 3137 if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
3065 fp == I915_READ(PCH_FP0(pll->id))) { 3138 sizeof(pll->hw_state)) == 0) {
3066 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n", 3139 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
3067 crtc->base.base.id, 3140 crtc->base.base.id,
3068 pll->name, pll->refcount, pll->active); 3141 pll->name, pll->refcount, pll->active);
@@ -3096,13 +3169,7 @@ found:
3096 WARN_ON(pll->on); 3169 WARN_ON(pll->on);
3097 assert_shared_dpll_disabled(dev_priv, pll); 3170 assert_shared_dpll_disabled(dev_priv, pll);
3098 3171
3099 /* Wait for the clocks to stabilize before rewriting the regs */ 3172 pll->mode_set(dev_priv, pll);
3100 I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
3101 POSTING_READ(PCH_DPLL(pll->id));
3102 udelay(150);
3103
3104 I915_WRITE(PCH_FP0(pll->id), fp);
3105 I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
3106 } 3173 }
3107 pll->refcount++; 3174 pll->refcount++;
3108 3175
@@ -3174,7 +3241,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3174 struct intel_encoder *encoder; 3241 struct intel_encoder *encoder;
3175 int pipe = intel_crtc->pipe; 3242 int pipe = intel_crtc->pipe;
3176 int plane = intel_crtc->plane; 3243 int plane = intel_crtc->plane;
3177 u32 temp;
3178 3244
3179 WARN_ON(!crtc->enabled); 3245 WARN_ON(!crtc->enabled);
3180 3246
@@ -3188,12 +3254,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3188 3254
3189 intel_update_watermarks(dev); 3255 intel_update_watermarks(dev);
3190 3256
3191 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 3257 for_each_encoder_on_crtc(dev, crtc, encoder)
3192 temp = I915_READ(PCH_LVDS); 3258 if (encoder->pre_enable)
3193 if ((temp & LVDS_PORT_EN) == 0) 3259 encoder->pre_enable(encoder);
3194 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3195 }
3196
3197 3260
3198 if (intel_crtc->config.has_pch_encoder) { 3261 if (intel_crtc->config.has_pch_encoder) {
3199 /* Note: FDI PLL enabling _must_ be done before we enable the 3262 /* Note: FDI PLL enabling _must_ be done before we enable the
@@ -3205,10 +3268,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3205 assert_fdi_rx_disabled(dev_priv, pipe); 3268 assert_fdi_rx_disabled(dev_priv, pipe);
3206 } 3269 }
3207 3270
3208 for_each_encoder_on_crtc(dev, crtc, encoder)
3209 if (encoder->pre_enable)
3210 encoder->pre_enable(encoder);
3211
3212 ironlake_pfit_enable(intel_crtc); 3271 ironlake_pfit_enable(intel_crtc);
3213 3272
3214 /* 3273 /*
@@ -3389,7 +3448,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3389 intel_crtc_wait_for_pending_flips(crtc); 3448 intel_crtc_wait_for_pending_flips(crtc);
3390 drm_vblank_off(dev, pipe); 3449 drm_vblank_off(dev, pipe);
3391 3450
3392 if (dev_priv->cfb_plane == plane) 3451 if (dev_priv->fbc.plane == plane)
3393 intel_disable_fbc(dev); 3452 intel_disable_fbc(dev);
3394 3453
3395 intel_crtc_update_cursor(crtc, false); 3454 intel_crtc_update_cursor(crtc, false);
@@ -3462,7 +3521,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3462 drm_vblank_off(dev, pipe); 3521 drm_vblank_off(dev, pipe);
3463 3522
3464 /* FBC must be disabled before disabling the plane on HSW. */ 3523 /* FBC must be disabled before disabling the plane on HSW. */
3465 if (dev_priv->cfb_plane == plane) 3524 if (dev_priv->fbc.plane == plane)
3466 intel_disable_fbc(dev); 3525 intel_disable_fbc(dev);
3467 3526
3468 hsw_disable_ips(intel_crtc); 3527 hsw_disable_ips(intel_crtc);
@@ -3599,7 +3658,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
3599 if (encoder->pre_pll_enable) 3658 if (encoder->pre_pll_enable)
3600 encoder->pre_pll_enable(encoder); 3659 encoder->pre_pll_enable(encoder);
3601 3660
3602 intel_enable_pll(dev_priv, pipe); 3661 vlv_enable_pll(intel_crtc);
3603 3662
3604 for_each_encoder_on_crtc(dev, crtc, encoder) 3663 for_each_encoder_on_crtc(dev, crtc, encoder)
3605 if (encoder->pre_enable) 3664 if (encoder->pre_enable)
@@ -3640,12 +3699,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3640 intel_crtc->active = true; 3699 intel_crtc->active = true;
3641 intel_update_watermarks(dev); 3700 intel_update_watermarks(dev);
3642 3701
3643 intel_enable_pll(dev_priv, pipe);
3644
3645 for_each_encoder_on_crtc(dev, crtc, encoder) 3702 for_each_encoder_on_crtc(dev, crtc, encoder)
3646 if (encoder->pre_enable) 3703 if (encoder->pre_enable)
3647 encoder->pre_enable(encoder); 3704 encoder->pre_enable(encoder);
3648 3705
3706 i9xx_enable_pll(intel_crtc);
3707
3649 i9xx_pfit_enable(intel_crtc); 3708 i9xx_pfit_enable(intel_crtc);
3650 3709
3651 intel_crtc_load_lut(crtc); 3710 intel_crtc_load_lut(crtc);
@@ -3701,7 +3760,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3701 intel_crtc_wait_for_pending_flips(crtc); 3760 intel_crtc_wait_for_pending_flips(crtc);
3702 drm_vblank_off(dev, pipe); 3761 drm_vblank_off(dev, pipe);
3703 3762
3704 if (dev_priv->cfb_plane == plane) 3763 if (dev_priv->fbc.plane == plane)
3705 intel_disable_fbc(dev); 3764 intel_disable_fbc(dev);
3706 3765
3707 intel_crtc_dpms_overlay(intel_crtc, false); 3766 intel_crtc_dpms_overlay(intel_crtc, false);
@@ -3717,7 +3776,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3717 if (encoder->post_disable) 3776 if (encoder->post_disable)
3718 encoder->post_disable(encoder); 3777 encoder->post_disable(encoder);
3719 3778
3720 intel_disable_pll(dev_priv, pipe); 3779 i9xx_disable_pll(dev_priv, pipe);
3721 3780
3722 intel_crtc->active = false; 3781 intel_crtc->active = false;
3723 intel_update_fbc(dev); 3782 intel_update_fbc(dev);
@@ -4266,14 +4325,17 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
4266 } 4325 }
4267 4326
4268 I915_WRITE(FP0(pipe), fp); 4327 I915_WRITE(FP0(pipe), fp);
4328 crtc->config.dpll_hw_state.fp0 = fp;
4269 4329
4270 crtc->lowfreq_avail = false; 4330 crtc->lowfreq_avail = false;
4271 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 4331 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4272 reduced_clock && i915_powersave) { 4332 reduced_clock && i915_powersave) {
4273 I915_WRITE(FP1(pipe), fp2); 4333 I915_WRITE(FP1(pipe), fp2);
4334 crtc->config.dpll_hw_state.fp1 = fp2;
4274 crtc->lowfreq_avail = true; 4335 crtc->lowfreq_avail = true;
4275 } else { 4336 } else {
4276 I915_WRITE(FP1(pipe), fp); 4337 I915_WRITE(FP1(pipe), fp);
4338 crtc->config.dpll_hw_state.fp1 = fp;
4277 } 4339 }
4278} 4340}
4279 4341
@@ -4351,7 +4413,6 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4351{ 4413{
4352 struct drm_device *dev = crtc->base.dev; 4414 struct drm_device *dev = crtc->base.dev;
4353 struct drm_i915_private *dev_priv = dev->dev_private; 4415 struct drm_i915_private *dev_priv = dev->dev_private;
4354 struct intel_encoder *encoder;
4355 int pipe = crtc->pipe; 4416 int pipe = crtc->pipe;
4356 u32 dpll, mdiv; 4417 u32 dpll, mdiv;
4357 u32 bestn, bestm1, bestm2, bestp1, bestp2; 4418 u32 bestn, bestm1, bestm2, bestp1, bestp2;
@@ -4407,7 +4468,7 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4407 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || 4468 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
4408 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) 4469 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
4409 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), 4470 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
4410 0x005f0021); 4471 0x009f0003);
4411 else 4472 else
4412 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), 4473 vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
4413 0x00d0000f); 4474 0x00d0000f);
@@ -4440,10 +4501,6 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4440 4501
4441 vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000); 4502 vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
4442 4503
4443 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
4444 if (encoder->pre_pll_enable)
4445 encoder->pre_pll_enable(encoder);
4446
4447 /* Enable DPIO clock input */ 4504 /* Enable DPIO clock input */
4448 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | 4505 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
4449 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; 4506 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
@@ -4451,17 +4508,11 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4451 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 4508 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
4452 4509
4453 dpll |= DPLL_VCO_ENABLE; 4510 dpll |= DPLL_VCO_ENABLE;
4454 I915_WRITE(DPLL(pipe), dpll); 4511 crtc->config.dpll_hw_state.dpll = dpll;
4455 POSTING_READ(DPLL(pipe));
4456 udelay(150);
4457
4458 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
4459 DRM_ERROR("DPLL %d failed to lock\n", pipe);
4460 4512
4461 dpll_md = (crtc->config.pixel_multiplier - 1) 4513 dpll_md = (crtc->config.pixel_multiplier - 1)
4462 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 4514 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4463 I915_WRITE(DPLL_MD(pipe), dpll_md); 4515 crtc->config.dpll_hw_state.dpll_md = dpll_md;
4464 POSTING_READ(DPLL_MD(pipe));
4465 4516
4466 if (crtc->config.has_dp_encoder) 4517 if (crtc->config.has_dp_encoder)
4467 intel_dp_set_m_n(crtc); 4518 intel_dp_set_m_n(crtc);
@@ -4475,8 +4526,6 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
4475{ 4526{
4476 struct drm_device *dev = crtc->base.dev; 4527 struct drm_device *dev = crtc->base.dev;
4477 struct drm_i915_private *dev_priv = dev->dev_private; 4528 struct drm_i915_private *dev_priv = dev->dev_private;
4478 struct intel_encoder *encoder;
4479 int pipe = crtc->pipe;
4480 u32 dpll; 4529 u32 dpll;
4481 bool is_sdvo; 4530 bool is_sdvo;
4482 struct dpll *clock = &crtc->config.dpll; 4531 struct dpll *clock = &crtc->config.dpll;
@@ -4499,10 +4548,10 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
4499 } 4548 }
4500 4549
4501 if (is_sdvo) 4550 if (is_sdvo)
4502 dpll |= DPLL_DVO_HIGH_SPEED; 4551 dpll |= DPLL_SDVO_HIGH_SPEED;
4503 4552
4504 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) 4553 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
4505 dpll |= DPLL_DVO_HIGH_SPEED; 4554 dpll |= DPLL_SDVO_HIGH_SPEED;
4506 4555
4507 /* compute bitmask from p1 value */ 4556 /* compute bitmask from p1 value */
4508 if (IS_PINEVIEW(dev)) 4557 if (IS_PINEVIEW(dev))
@@ -4538,35 +4587,16 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
4538 dpll |= PLL_REF_INPUT_DREFCLK; 4587 dpll |= PLL_REF_INPUT_DREFCLK;
4539 4588
4540 dpll |= DPLL_VCO_ENABLE; 4589 dpll |= DPLL_VCO_ENABLE;
4541 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 4590 crtc->config.dpll_hw_state.dpll = dpll;
4542 POSTING_READ(DPLL(pipe));
4543 udelay(150);
4544
4545 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
4546 if (encoder->pre_pll_enable)
4547 encoder->pre_pll_enable(encoder);
4548
4549 if (crtc->config.has_dp_encoder)
4550 intel_dp_set_m_n(crtc);
4551
4552 I915_WRITE(DPLL(pipe), dpll);
4553
4554 /* Wait for the clocks to stabilize. */
4555 POSTING_READ(DPLL(pipe));
4556 udelay(150);
4557 4591
4558 if (INTEL_INFO(dev)->gen >= 4) { 4592 if (INTEL_INFO(dev)->gen >= 4) {
4559 u32 dpll_md = (crtc->config.pixel_multiplier - 1) 4593 u32 dpll_md = (crtc->config.pixel_multiplier - 1)
4560 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 4594 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4561 I915_WRITE(DPLL_MD(pipe), dpll_md); 4595 crtc->config.dpll_hw_state.dpll_md = dpll_md;
4562 } else {
4563 /* The pixel multiplier can only be updated once the
4564 * DPLL is enabled and the clocks are stable.
4565 *
4566 * So write it again.
4567 */
4568 I915_WRITE(DPLL(pipe), dpll);
4569 } 4596 }
4597
4598 if (crtc->config.has_dp_encoder)
4599 intel_dp_set_m_n(crtc);
4570} 4600}
4571 4601
4572static void i8xx_update_pll(struct intel_crtc *crtc, 4602static void i8xx_update_pll(struct intel_crtc *crtc,
@@ -4575,8 +4605,6 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
4575{ 4605{
4576 struct drm_device *dev = crtc->base.dev; 4606 struct drm_device *dev = crtc->base.dev;
4577 struct drm_i915_private *dev_priv = dev->dev_private; 4607 struct drm_i915_private *dev_priv = dev->dev_private;
4578 struct intel_encoder *encoder;
4579 int pipe = crtc->pipe;
4580 u32 dpll; 4608 u32 dpll;
4581 struct dpll *clock = &crtc->config.dpll; 4609 struct dpll *clock = &crtc->config.dpll;
4582 4610
@@ -4595,6 +4623,9 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
4595 dpll |= PLL_P2_DIVIDE_BY_4; 4623 dpll |= PLL_P2_DIVIDE_BY_4;
4596 } 4624 }
4597 4625
4626 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
4627 dpll |= DPLL_DVO_2X_MODE;
4628
4598 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 4629 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4599 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 4630 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4600 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 4631 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
@@ -4602,26 +4633,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
4602 dpll |= PLL_REF_INPUT_DREFCLK; 4633 dpll |= PLL_REF_INPUT_DREFCLK;
4603 4634
4604 dpll |= DPLL_VCO_ENABLE; 4635 dpll |= DPLL_VCO_ENABLE;
4605 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 4636 crtc->config.dpll_hw_state.dpll = dpll;
4606 POSTING_READ(DPLL(pipe));
4607 udelay(150);
4608
4609 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
4610 if (encoder->pre_pll_enable)
4611 encoder->pre_pll_enable(encoder);
4612
4613 I915_WRITE(DPLL(pipe), dpll);
4614
4615 /* Wait for the clocks to stabilize. */
4616 POSTING_READ(DPLL(pipe));
4617 udelay(150);
4618
4619 /* The pixel multiplier can only be updated once the
4620 * DPLL is enabled and the clocks are stable.
4621 *
4622 * So write it again.
4623 */
4624 I915_WRITE(DPLL(pipe), dpll);
4625} 4637}
4626 4638
4627static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 4639static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
@@ -4727,6 +4739,27 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
4727 pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1; 4739 pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
4728} 4740}
4729 4741
4742static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
4743 struct intel_crtc_config *pipe_config)
4744{
4745 struct drm_crtc *crtc = &intel_crtc->base;
4746
4747 crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
4748 crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
4749 crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
4750 crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
4751
4752 crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
4753 crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
4754 crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
4755 crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
4756
4757 crtc->mode.flags = pipe_config->adjusted_mode.flags;
4758
4759 crtc->mode.clock = pipe_config->adjusted_mode.clock;
4760 crtc->mode.flags |= pipe_config->adjusted_mode.flags;
4761}
4762
4730static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 4763static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
4731{ 4764{
4732 struct drm_device *dev = intel_crtc->base.dev; 4765 struct drm_device *dev = intel_crtc->base.dev;
@@ -4942,7 +4975,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4942 struct drm_i915_private *dev_priv = dev->dev_private; 4975 struct drm_i915_private *dev_priv = dev->dev_private;
4943 uint32_t tmp; 4976 uint32_t tmp;
4944 4977
4945 pipe_config->cpu_transcoder = crtc->pipe; 4978 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
4946 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 4979 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
4947 4980
4948 tmp = I915_READ(PIPECONF(crtc->pipe)); 4981 tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -4958,6 +4991,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4958 pipe_config->pixel_multiplier = 4991 pipe_config->pixel_multiplier =
4959 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 4992 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
4960 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 4993 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
4994 pipe_config->dpll_hw_state.dpll_md = tmp;
4961 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 4995 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
4962 tmp = I915_READ(DPLL(crtc->pipe)); 4996 tmp = I915_READ(DPLL(crtc->pipe));
4963 pipe_config->pixel_multiplier = 4997 pipe_config->pixel_multiplier =
@@ -4969,6 +5003,16 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4969 * function. */ 5003 * function. */
4970 pipe_config->pixel_multiplier = 1; 5004 pipe_config->pixel_multiplier = 1;
4971 } 5005 }
5006 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
5007 if (!IS_VALLEYVIEW(dev)) {
5008 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
5009 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
5010 } else {
5011 /* Mask out read-only status bits. */
5012 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5013 DPLL_PORTC_READY_MASK |
5014 DPLL_PORTB_READY_MASK);
5015 }
4972 5016
4973 return true; 5017 return true;
4974} 5018}
@@ -5613,9 +5657,9 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5613 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 5657 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5614 5658
5615 if (is_sdvo) 5659 if (is_sdvo)
5616 dpll |= DPLL_DVO_HIGH_SPEED; 5660 dpll |= DPLL_SDVO_HIGH_SPEED;
5617 if (intel_crtc->config.has_dp_encoder) 5661 if (intel_crtc->config.has_dp_encoder)
5618 dpll |= DPLL_DVO_HIGH_SPEED; 5662 dpll |= DPLL_SDVO_HIGH_SPEED;
5619 5663
5620 /* compute bitmask from p1 value */ 5664 /* compute bitmask from p1 value */
5621 dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 5665 dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
@@ -5711,7 +5755,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5711 else 5755 else
5712 intel_crtc->config.dpll_hw_state.fp1 = fp; 5756 intel_crtc->config.dpll_hw_state.fp1 = fp;
5713 5757
5714 pll = intel_get_shared_dpll(intel_crtc, dpll, fp); 5758 pll = intel_get_shared_dpll(intel_crtc);
5715 if (pll == NULL) { 5759 if (pll == NULL) {
5716 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 5760 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
5717 pipe_name(pipe)); 5761 pipe_name(pipe));
@@ -5723,10 +5767,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5723 if (intel_crtc->config.has_dp_encoder) 5767 if (intel_crtc->config.has_dp_encoder)
5724 intel_dp_set_m_n(intel_crtc); 5768 intel_dp_set_m_n(intel_crtc);
5725 5769
5726 for_each_encoder_on_crtc(dev, crtc, encoder)
5727 if (encoder->pre_pll_enable)
5728 encoder->pre_pll_enable(encoder);
5729
5730 if (is_lvds && has_reduced_clock && i915_powersave) 5770 if (is_lvds && has_reduced_clock && i915_powersave)
5731 intel_crtc->lowfreq_avail = true; 5771 intel_crtc->lowfreq_avail = true;
5732 else 5772 else
@@ -5735,23 +5775,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5735 if (intel_crtc->config.has_pch_encoder) { 5775 if (intel_crtc->config.has_pch_encoder) {
5736 pll = intel_crtc_to_shared_dpll(intel_crtc); 5776 pll = intel_crtc_to_shared_dpll(intel_crtc);
5737 5777
5738 I915_WRITE(PCH_DPLL(pll->id), dpll);
5739
5740 /* Wait for the clocks to stabilize. */
5741 POSTING_READ(PCH_DPLL(pll->id));
5742 udelay(150);
5743
5744 /* The pixel multiplier can only be updated once the
5745 * DPLL is enabled and the clocks are stable.
5746 *
5747 * So write it again.
5748 */
5749 I915_WRITE(PCH_DPLL(pll->id), dpll);
5750
5751 if (has_reduced_clock)
5752 I915_WRITE(PCH_FP1(pll->id), fp2);
5753 else
5754 I915_WRITE(PCH_FP1(pll->id), fp);
5755 } 5778 }
5756 5779
5757 intel_set_pipe_timings(intel_crtc); 5780 intel_set_pipe_timings(intel_crtc);
@@ -5823,7 +5846,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5823 struct drm_i915_private *dev_priv = dev->dev_private; 5846 struct drm_i915_private *dev_priv = dev->dev_private;
5824 uint32_t tmp; 5847 uint32_t tmp;
5825 5848
5826 pipe_config->cpu_transcoder = crtc->pipe; 5849 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5827 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 5850 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
5828 5851
5829 tmp = I915_READ(PIPECONF(crtc->pipe)); 5852 tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -5841,12 +5864,9 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5841 5864
5842 ironlake_get_fdi_m_n_config(crtc, pipe_config); 5865 ironlake_get_fdi_m_n_config(crtc, pipe_config);
5843 5866
5844 /* XXX: Can't properly read out the pch dpll pixel multiplier
5845 * since we don't have state tracking for pch clocks yet. */
5846 pipe_config->pixel_multiplier = 1;
5847
5848 if (HAS_PCH_IBX(dev_priv->dev)) { 5867 if (HAS_PCH_IBX(dev_priv->dev)) {
5849 pipe_config->shared_dpll = crtc->pipe; 5868 pipe_config->shared_dpll =
5869 (enum intel_dpll_id) crtc->pipe;
5850 } else { 5870 } else {
5851 tmp = I915_READ(PCH_DPLL_SEL); 5871 tmp = I915_READ(PCH_DPLL_SEL);
5852 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 5872 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
@@ -5859,6 +5879,11 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5859 5879
5860 WARN_ON(!pll->get_hw_state(dev_priv, pll, 5880 WARN_ON(!pll->get_hw_state(dev_priv, pll,
5861 &pipe_config->dpll_hw_state)); 5881 &pipe_config->dpll_hw_state));
5882
5883 tmp = pipe_config->dpll_hw_state.dpll;
5884 pipe_config->pixel_multiplier =
5885 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
5886 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
5862 } else { 5887 } else {
5863 pipe_config->pixel_multiplier = 1; 5888 pipe_config->pixel_multiplier = 1;
5864 } 5889 }
@@ -5938,7 +5963,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
5938 enum intel_display_power_domain pfit_domain; 5963 enum intel_display_power_domain pfit_domain;
5939 uint32_t tmp; 5964 uint32_t tmp;
5940 5965
5941 pipe_config->cpu_transcoder = crtc->pipe; 5966 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5942 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 5967 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
5943 5968
5944 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 5969 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -6551,7 +6576,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6551 goto fail_unpin; 6576 goto fail_unpin;
6552 } 6577 }
6553 6578
6554 addr = obj->gtt_offset; 6579 addr = i915_gem_obj_ggtt_offset(obj);
6555 } else { 6580 } else {
6556 int align = IS_I830(dev) ? 16 * 1024 : 256; 6581 int align = IS_I830(dev) ? 16 * 1024 : 256;
6557 ret = i915_gem_attach_phys_object(dev, obj, 6582 ret = i915_gem_attach_phys_object(dev, obj,
@@ -6878,11 +6903,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
6878} 6903}
6879 6904
6880/* Returns the clock of the currently programmed mode of the given pipe. */ 6905/* Returns the clock of the currently programmed mode of the given pipe. */
6881static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) 6906static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
6907 struct intel_crtc_config *pipe_config)
6882{ 6908{
6909 struct drm_device *dev = crtc->base.dev;
6883 struct drm_i915_private *dev_priv = dev->dev_private; 6910 struct drm_i915_private *dev_priv = dev->dev_private;
6884 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6911 int pipe = pipe_config->cpu_transcoder;
6885 int pipe = intel_crtc->pipe;
6886 u32 dpll = I915_READ(DPLL(pipe)); 6912 u32 dpll = I915_READ(DPLL(pipe));
6887 u32 fp; 6913 u32 fp;
6888 intel_clock_t clock; 6914 intel_clock_t clock;
@@ -6921,7 +6947,8 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6921 default: 6947 default:
6922 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 6948 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
6923 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 6949 "mode\n", (int)(dpll & DPLL_MODE_MASK));
6924 return 0; 6950 pipe_config->adjusted_mode.clock = 0;
6951 return;
6925 } 6952 }
6926 6953
6927 if (IS_PINEVIEW(dev)) 6954 if (IS_PINEVIEW(dev))
@@ -6958,12 +6985,55 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6958 } 6985 }
6959 } 6986 }
6960 6987
6961 /* XXX: It would be nice to validate the clocks, but we can't reuse 6988 pipe_config->adjusted_mode.clock = clock.dot *
6962 * i830PllIsValid() because it relies on the xf86_config connector 6989 pipe_config->pixel_multiplier;
6963 * configuration being accurate, which it isn't necessarily. 6990}
6991
6992static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
6993 struct intel_crtc_config *pipe_config)
6994{
6995 struct drm_device *dev = crtc->base.dev;
6996 struct drm_i915_private *dev_priv = dev->dev_private;
6997 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6998 int link_freq, repeat;
6999 u64 clock;
7000 u32 link_m, link_n;
7001
7002 repeat = pipe_config->pixel_multiplier;
7003
7004 /*
7005 * The calculation for the data clock is:
7006 * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp
7007 * But we want to avoid losing precison if possible, so:
7008 * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp))
7009 *
7010 * and the link clock is simpler:
7011 * link_clock = (m * link_clock * repeat) / n
6964 */ 7012 */
6965 7013
6966 return clock.dot; 7014 /*
7015 * We need to get the FDI or DP link clock here to derive
7016 * the M/N dividers.
7017 *
7018 * For FDI, we read it from the BIOS or use a fixed 2.7GHz.
7019 * For DP, it's either 1.62GHz or 2.7GHz.
7020 * We do our calculations in 10*MHz since we don't need much precison.
7021 */
7022 if (pipe_config->has_pch_encoder)
7023 link_freq = intel_fdi_link_freq(dev) * 10000;
7024 else
7025 link_freq = pipe_config->port_clock;
7026
7027 link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder));
7028 link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder));
7029
7030 if (!link_m || !link_n)
7031 return;
7032
7033 clock = ((u64)link_m * (u64)link_freq * (u64)repeat);
7034 do_div(clock, link_n);
7035
7036 pipe_config->adjusted_mode.clock = clock;
6967} 7037}
6968 7038
6969/** Returns the currently programmed mode of the given pipe. */ 7039/** Returns the currently programmed mode of the given pipe. */
@@ -6974,6 +7044,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6974 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7044 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6975 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 7045 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
6976 struct drm_display_mode *mode; 7046 struct drm_display_mode *mode;
7047 struct intel_crtc_config pipe_config;
6977 int htot = I915_READ(HTOTAL(cpu_transcoder)); 7048 int htot = I915_READ(HTOTAL(cpu_transcoder));
6978 int hsync = I915_READ(HSYNC(cpu_transcoder)); 7049 int hsync = I915_READ(HSYNC(cpu_transcoder));
6979 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 7050 int vtot = I915_READ(VTOTAL(cpu_transcoder));
@@ -6983,7 +7054,18 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6983 if (!mode) 7054 if (!mode)
6984 return NULL; 7055 return NULL;
6985 7056
6986 mode->clock = intel_crtc_clock_get(dev, crtc); 7057 /*
7058 * Construct a pipe_config sufficient for getting the clock info
7059 * back out of crtc_clock_get.
7060 *
7061 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
7062 * to use a real value here instead.
7063 */
7064 pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
7065 pipe_config.pixel_multiplier = 1;
7066 i9xx_crtc_clock_get(intel_crtc, &pipe_config);
7067
7068 mode->clock = pipe_config.adjusted_mode.clock;
6987 mode->hdisplay = (htot & 0xffff) + 1; 7069 mode->hdisplay = (htot & 0xffff) + 1;
6988 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 7070 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
6989 mode->hsync_start = (hsync & 0xffff) + 1; 7071 mode->hsync_start = (hsync & 0xffff) + 1;
@@ -7266,7 +7348,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
7266 intel_ring_emit(ring, MI_DISPLAY_FLIP | 7348 intel_ring_emit(ring, MI_DISPLAY_FLIP |
7267 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7349 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7268 intel_ring_emit(ring, fb->pitches[0]); 7350 intel_ring_emit(ring, fb->pitches[0]);
7269 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7351 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
7270 intel_ring_emit(ring, 0); /* aux display base address, unused */ 7352 intel_ring_emit(ring, 0); /* aux display base address, unused */
7271 7353
7272 intel_mark_page_flip_active(intel_crtc); 7354 intel_mark_page_flip_active(intel_crtc);
@@ -7307,7 +7389,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
7307 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | 7389 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
7308 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7390 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7309 intel_ring_emit(ring, fb->pitches[0]); 7391 intel_ring_emit(ring, fb->pitches[0]);
7310 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7392 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
7311 intel_ring_emit(ring, MI_NOOP); 7393 intel_ring_emit(ring, MI_NOOP);
7312 7394
7313 intel_mark_page_flip_active(intel_crtc); 7395 intel_mark_page_flip_active(intel_crtc);
@@ -7347,7 +7429,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
7347 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7429 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7348 intel_ring_emit(ring, fb->pitches[0]); 7430 intel_ring_emit(ring, fb->pitches[0]);
7349 intel_ring_emit(ring, 7431 intel_ring_emit(ring,
7350 (obj->gtt_offset + intel_crtc->dspaddr_offset) | 7432 (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
7351 obj->tiling_mode); 7433 obj->tiling_mode);
7352 7434
7353 /* XXX Enabling the panel-fitter across page-flip is so far 7435 /* XXX Enabling the panel-fitter across page-flip is so far
@@ -7390,7 +7472,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
7390 intel_ring_emit(ring, MI_DISPLAY_FLIP | 7472 intel_ring_emit(ring, MI_DISPLAY_FLIP |
7391 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7473 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7392 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); 7474 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
7393 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7475 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
7394 7476
7395 /* Contrary to the suggestions in the documentation, 7477 /* Contrary to the suggestions in the documentation,
7396 * "Enable Panel Fitter" does not seem to be required when page 7478 * "Enable Panel Fitter" does not seem to be required when page
@@ -7455,7 +7537,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
7455 7537
7456 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 7538 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
7457 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 7539 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7458 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7540 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
7459 intel_ring_emit(ring, (MI_NOOP)); 7541 intel_ring_emit(ring, (MI_NOOP));
7460 7542
7461 intel_mark_page_flip_active(intel_crtc); 7543 intel_mark_page_flip_active(intel_crtc);
@@ -7809,7 +7891,8 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
7809 7891
7810 drm_mode_copy(&pipe_config->adjusted_mode, mode); 7892 drm_mode_copy(&pipe_config->adjusted_mode, mode);
7811 drm_mode_copy(&pipe_config->requested_mode, mode); 7893 drm_mode_copy(&pipe_config->requested_mode, mode);
7812 pipe_config->cpu_transcoder = to_intel_crtc(crtc)->pipe; 7894 pipe_config->cpu_transcoder =
7895 (enum transcoder) to_intel_crtc(crtc)->pipe;
7813 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 7896 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7814 7897
7815 /* Compute a starting value for pipe_config->pipe_bpp taking the source 7898 /* Compute a starting value for pipe_config->pipe_bpp taking the source
@@ -8044,6 +8127,28 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
8044 8127
8045} 8128}
8046 8129
8130static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur,
8131 struct intel_crtc_config *new)
8132{
8133 int clock1, clock2, diff;
8134
8135 clock1 = cur->adjusted_mode.clock;
8136 clock2 = new->adjusted_mode.clock;
8137
8138 if (clock1 == clock2)
8139 return true;
8140
8141 if (!clock1 || !clock2)
8142 return false;
8143
8144 diff = abs(clock1 - clock2);
8145
8146 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
8147 return true;
8148
8149 return false;
8150}
8151
8047#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ 8152#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
8048 list_for_each_entry((intel_crtc), \ 8153 list_for_each_entry((intel_crtc), \
8049 &(dev)->mode_config.crtc_list, \ 8154 &(dev)->mode_config.crtc_list, \
@@ -8075,7 +8180,7 @@ intel_pipe_config_compare(struct drm_device *dev,
8075 8180
8076#define PIPE_CONF_CHECK_FLAGS(name, mask) \ 8181#define PIPE_CONF_CHECK_FLAGS(name, mask) \
8077 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 8182 if ((current_config->name ^ pipe_config->name) & (mask)) { \
8078 DRM_ERROR("mismatch in " #name " " \ 8183 DRM_ERROR("mismatch in " #name "(" #mask ") " \
8079 "(expected %i, found %i)\n", \ 8184 "(expected %i, found %i)\n", \
8080 current_config->name & (mask), \ 8185 current_config->name & (mask), \
8081 pipe_config->name & (mask)); \ 8186 pipe_config->name & (mask)); \
@@ -8109,8 +8214,7 @@ intel_pipe_config_compare(struct drm_device *dev,
8109 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start); 8214 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
8110 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end); 8215 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
8111 8216
8112 if (!HAS_PCH_SPLIT(dev)) 8217 PIPE_CONF_CHECK_I(pixel_multiplier);
8113 PIPE_CONF_CHECK_I(pixel_multiplier);
8114 8218
8115 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, 8219 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8116 DRM_MODE_FLAG_INTERLACE); 8220 DRM_MODE_FLAG_INTERLACE);
@@ -8141,6 +8245,7 @@ intel_pipe_config_compare(struct drm_device *dev,
8141 8245
8142 PIPE_CONF_CHECK_I(shared_dpll); 8246 PIPE_CONF_CHECK_I(shared_dpll);
8143 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 8247 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8248 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
8144 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 8249 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8145 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 8250 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8146 8251
@@ -8149,6 +8254,15 @@ intel_pipe_config_compare(struct drm_device *dev,
8149#undef PIPE_CONF_CHECK_FLAGS 8254#undef PIPE_CONF_CHECK_FLAGS
8150#undef PIPE_CONF_QUIRK 8255#undef PIPE_CONF_QUIRK
8151 8256
8257 if (!IS_HASWELL(dev)) {
8258 if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
8259 DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
8260 current_config->adjusted_mode.clock,
8261 pipe_config->adjusted_mode.clock);
8262 return false;
8263 }
8264 }
8265
8152 return true; 8266 return true;
8153} 8267}
8154 8268
@@ -8278,6 +8392,9 @@ check_crtc_state(struct drm_device *dev)
8278 encoder->get_config(encoder, &pipe_config); 8392 encoder->get_config(encoder, &pipe_config);
8279 } 8393 }
8280 8394
8395 if (dev_priv->display.get_clock)
8396 dev_priv->display.get_clock(crtc, &pipe_config);
8397
8281 WARN(crtc->active != active, 8398 WARN(crtc->active != active,
8282 "crtc active state doesn't match with hw state " 8399 "crtc active state doesn't match with hw state "
8283 "(expected %i, found %i)\n", crtc->active, active); 8400 "(expected %i, found %i)\n", crtc->active, active);
@@ -8569,8 +8686,16 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
8569 } else if (set->crtc->fb != set->fb) { 8686 } else if (set->crtc->fb != set->fb) {
8570 /* If we have no fb then treat it as a full mode set */ 8687 /* If we have no fb then treat it as a full mode set */
8571 if (set->crtc->fb == NULL) { 8688 if (set->crtc->fb == NULL) {
8572 DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); 8689 struct intel_crtc *intel_crtc =
8573 config->mode_changed = true; 8690 to_intel_crtc(set->crtc);
8691
8692 if (intel_crtc->active && i915_fastboot) {
8693 DRM_DEBUG_KMS("crtc has no fb, will flip\n");
8694 config->fb_changed = true;
8695 } else {
8696 DRM_DEBUG_KMS("inactive crtc, full mode set\n");
8697 config->mode_changed = true;
8698 }
8574 } else if (set->fb == NULL) { 8699 } else if (set->fb == NULL) {
8575 config->mode_changed = true; 8700 config->mode_changed = true;
8576 } else if (set->fb->pixel_format != 8701 } else if (set->fb->pixel_format !=
@@ -8800,19 +8925,32 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
8800 return val & DPLL_VCO_ENABLE; 8925 return val & DPLL_VCO_ENABLE;
8801} 8926}
8802 8927
8928static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
8929 struct intel_shared_dpll *pll)
8930{
8931 I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
8932 I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
8933}
8934
8803static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, 8935static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
8804 struct intel_shared_dpll *pll) 8936 struct intel_shared_dpll *pll)
8805{ 8937{
8806 uint32_t reg, val;
8807
8808 /* PCH refclock must be enabled first */ 8938 /* PCH refclock must be enabled first */
8809 assert_pch_refclk_enabled(dev_priv); 8939 assert_pch_refclk_enabled(dev_priv);
8810 8940
8811 reg = PCH_DPLL(pll->id); 8941 I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
8812 val = I915_READ(reg); 8942
8813 val |= DPLL_VCO_ENABLE; 8943 /* Wait for the clocks to stabilize. */
8814 I915_WRITE(reg, val); 8944 POSTING_READ(PCH_DPLL(pll->id));
8815 POSTING_READ(reg); 8945 udelay(150);
8946
8947 /* The pixel multiplier can only be updated once the
8948 * DPLL is enabled and the clocks are stable.
8949 *
8950 * So write it again.
8951 */
8952 I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
8953 POSTING_READ(PCH_DPLL(pll->id));
8816 udelay(200); 8954 udelay(200);
8817} 8955}
8818 8956
@@ -8821,7 +8959,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
8821{ 8959{
8822 struct drm_device *dev = dev_priv->dev; 8960 struct drm_device *dev = dev_priv->dev;
8823 struct intel_crtc *crtc; 8961 struct intel_crtc *crtc;
8824 uint32_t reg, val;
8825 8962
8826 /* Make sure no transcoder isn't still depending on us. */ 8963 /* Make sure no transcoder isn't still depending on us. */
8827 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 8964 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
@@ -8829,11 +8966,8 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
8829 assert_pch_transcoder_disabled(dev_priv, crtc->pipe); 8966 assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
8830 } 8967 }
8831 8968
8832 reg = PCH_DPLL(pll->id); 8969 I915_WRITE(PCH_DPLL(pll->id), 0);
8833 val = I915_READ(reg); 8970 POSTING_READ(PCH_DPLL(pll->id));
8834 val &= ~DPLL_VCO_ENABLE;
8835 I915_WRITE(reg, val);
8836 POSTING_READ(reg);
8837 udelay(200); 8971 udelay(200);
8838} 8972}
8839 8973
@@ -8852,6 +8986,7 @@ static void ibx_pch_dpll_init(struct drm_device *dev)
8852 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 8986 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8853 dev_priv->shared_dplls[i].id = i; 8987 dev_priv->shared_dplls[i].id = i;
8854 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i]; 8988 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
8989 dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
8855 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable; 8990 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
8856 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable; 8991 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
8857 dev_priv->shared_dplls[i].get_hw_state = 8992 dev_priv->shared_dplls[i].get_hw_state =
@@ -9268,6 +9403,7 @@ static void intel_init_display(struct drm_device *dev)
9268 dev_priv->display.update_plane = ironlake_update_plane; 9403 dev_priv->display.update_plane = ironlake_update_plane;
9269 } else if (HAS_PCH_SPLIT(dev)) { 9404 } else if (HAS_PCH_SPLIT(dev)) {
9270 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 9405 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
9406 dev_priv->display.get_clock = ironlake_crtc_clock_get;
9271 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 9407 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
9272 dev_priv->display.crtc_enable = ironlake_crtc_enable; 9408 dev_priv->display.crtc_enable = ironlake_crtc_enable;
9273 dev_priv->display.crtc_disable = ironlake_crtc_disable; 9409 dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -9275,6 +9411,7 @@ static void intel_init_display(struct drm_device *dev)
9275 dev_priv->display.update_plane = ironlake_update_plane; 9411 dev_priv->display.update_plane = ironlake_update_plane;
9276 } else if (IS_VALLEYVIEW(dev)) { 9412 } else if (IS_VALLEYVIEW(dev)) {
9277 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 9413 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
9414 dev_priv->display.get_clock = i9xx_crtc_clock_get;
9278 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 9415 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
9279 dev_priv->display.crtc_enable = valleyview_crtc_enable; 9416 dev_priv->display.crtc_enable = valleyview_crtc_enable;
9280 dev_priv->display.crtc_disable = i9xx_crtc_disable; 9417 dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9282,6 +9419,7 @@ static void intel_init_display(struct drm_device *dev)
9282 dev_priv->display.update_plane = i9xx_update_plane; 9419 dev_priv->display.update_plane = i9xx_update_plane;
9283 } else { 9420 } else {
9284 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 9421 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
9422 dev_priv->display.get_clock = i9xx_crtc_clock_get;
9285 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 9423 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
9286 dev_priv->display.crtc_enable = i9xx_crtc_enable; 9424 dev_priv->display.crtc_enable = i9xx_crtc_enable;
9287 dev_priv->display.crtc_disable = i9xx_crtc_disable; 9425 dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9566,7 +9704,7 @@ void intel_modeset_init(struct drm_device *dev)
9566 INTEL_INFO(dev)->num_pipes, 9704 INTEL_INFO(dev)->num_pipes,
9567 INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); 9705 INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
9568 9706
9569 for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { 9707 for_each_pipe(i) {
9570 intel_crtc_init(dev, i); 9708 intel_crtc_init(dev, i);
9571 for (j = 0; j < dev_priv->num_plane; j++) { 9709 for (j = 0; j < dev_priv->num_plane; j++) {
9572 ret = intel_plane_init(dev, i, j); 9710 ret = intel_plane_init(dev, i, j);
@@ -9842,6 +9980,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
9842 pipe); 9980 pipe);
9843 } 9981 }
9844 9982
9983 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9984 base.head) {
9985 if (!crtc->active)
9986 continue;
9987 if (dev_priv->display.get_clock)
9988 dev_priv->display.get_clock(crtc,
9989 &crtc->config);
9990 }
9991
9845 list_for_each_entry(connector, &dev->mode_config.connector_list, 9992 list_for_each_entry(connector, &dev->mode_config.connector_list,
9846 base.head) { 9993 base.head) {
9847 if (connector->get_hw_state(connector)) { 9994 if (connector->get_hw_state(connector)) {
@@ -9872,6 +10019,22 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
9872 10019
9873 intel_modeset_readout_hw_state(dev); 10020 intel_modeset_readout_hw_state(dev);
9874 10021
10022 /*
10023 * Now that we have the config, copy it to each CRTC struct
10024 * Note that this could go away if we move to using crtc_config
10025 * checking everywhere.
10026 */
10027 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10028 base.head) {
10029 if (crtc->active && i915_fastboot) {
10030 intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
10031
10032 DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
10033 crtc->base.base.id);
10034 drm_mode_debug_printmodeline(&crtc->base.mode);
10035 }
10036 }
10037
9875 /* HW state is read out, now we need to sanitize this mess. */ 10038 /* HW state is read out, now we need to sanitize this mess. */
9876 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10039 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9877 base.head) { 10040 base.head) {
@@ -10002,9 +10165,6 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
10002 return 0; 10165 return 0;
10003} 10166}
10004 10167
10005#ifdef CONFIG_DEBUG_FS
10006#include <linux/seq_file.h>
10007
10008struct intel_display_error_state { 10168struct intel_display_error_state {
10009 10169
10010 u32 power_well_driver; 10170 u32 power_well_driver;
@@ -10148,4 +10308,3 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
10148 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 10308 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
10149 } 10309 }
10150} 10310}
10151#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 26e162bb3a51..7db2cd76786d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1360,6 +1360,13 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1360 } 1360 }
1361 1361
1362 pipe_config->adjusted_mode.flags |= flags; 1362 pipe_config->adjusted_mode.flags |= flags;
1363
1364 if (dp_to_dig_port(intel_dp)->port == PORT_A) {
1365 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1366 pipe_config->port_clock = 162000;
1367 else
1368 pipe_config->port_clock = 270000;
1369 }
1363} 1370}
1364 1371
1365static void intel_disable_dp(struct intel_encoder *encoder) 1372static void intel_disable_dp(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index c8c9b6f48230..5dfc1a0f2351 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -549,13 +549,6 @@ struct intel_unpin_work {
549 bool enable_stall_check; 549 bool enable_stall_check;
550}; 550};
551 551
552struct intel_fbc_work {
553 struct delayed_work work;
554 struct drm_crtc *crtc;
555 struct drm_framebuffer *fb;
556 int interval;
557};
558
559int intel_pch_rawclk(struct drm_device *dev); 552int intel_pch_rawclk(struct drm_device *dev);
560 553
561int intel_connector_update_modes(struct drm_connector *connector, 554int intel_connector_update_modes(struct drm_connector *connector,
@@ -747,6 +740,22 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
747extern void intel_fb_output_poll_changed(struct drm_device *dev); 740extern void intel_fb_output_poll_changed(struct drm_device *dev);
748extern void intel_fb_restore_mode(struct drm_device *dev); 741extern void intel_fb_restore_mode(struct drm_device *dev);
749 742
743struct intel_shared_dpll *
744intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
745
746void assert_shared_dpll(struct drm_i915_private *dev_priv,
747 struct intel_shared_dpll *pll,
748 bool state);
749#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
750#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
751void assert_pll(struct drm_i915_private *dev_priv,
752 enum pipe pipe, bool state);
753#define assert_pll_enabled(d, p) assert_pll(d, p, true)
754#define assert_pll_disabled(d, p) assert_pll(d, p, false)
755void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
756 enum pipe pipe, bool state);
757#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
758#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
750extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, 759extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
751 bool state); 760 bool state);
752#define assert_pipe_enabled(d, p) assert_pipe(d, p, true) 761#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
@@ -780,7 +789,6 @@ extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
780extern void intel_init_pm(struct drm_device *dev); 789extern void intel_init_pm(struct drm_device *dev);
781/* FBC */ 790/* FBC */
782extern bool intel_fbc_enabled(struct drm_device *dev); 791extern bool intel_fbc_enabled(struct drm_device *dev);
783extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
784extern void intel_update_fbc(struct drm_device *dev); 792extern void intel_update_fbc(struct drm_device *dev);
785/* IPS */ 793/* IPS */
786extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 794extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index eb2020eb2b7e..cbbc49dc03be 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -283,7 +283,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
283 int pipe = intel_crtc->pipe; 283 int pipe = intel_crtc->pipe;
284 u32 dvo_val; 284 u32 dvo_val;
285 u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; 285 u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
286 int dpll_reg = DPLL(pipe);
287 286
288 switch (dvo_reg) { 287 switch (dvo_reg) {
289 case DVOA: 288 case DVOA:
@@ -314,8 +313,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
314 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 313 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
315 dvo_val |= DVO_VSYNC_ACTIVE_HIGH; 314 dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
316 315
317 I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
318
319 /*I915_WRITE(DVOB_SRCDIM, 316 /*I915_WRITE(DVOB_SRCDIM,
320 (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | 317 (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
321 (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/ 318 (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index dff669e2387f..f3c97e05b0d8 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
139 info->apertures->ranges[0].base = dev->mode_config.fb_base; 139 info->apertures->ranges[0].base = dev->mode_config.fb_base;
140 info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; 140 info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
141 141
142 info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; 142 info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
143 info->fix.smem_len = size; 143 info->fix.smem_len = size;
144 144
145 info->screen_base = 145 info->screen_base =
146 ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, 146 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
147 size); 147 size);
148 if (!info->screen_base) { 148 if (!info->screen_base) {
149 ret = -ENOSPC; 149 ret = -ENOSPC;
@@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
166 166
167 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 167 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
168 168
169 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", 169 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
170 fb->width, fb->height, 170 fb->width, fb->height,
171 obj->gtt_offset, obj); 171 i915_gem_obj_ggtt_offset(obj), obj);
172 172
173 173
174 mutex_unlock(&dev->struct_mutex); 174 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 021e8daa022d..a0745d143902 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -115,17 +115,25 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
115 * This is an exception to the general rule that mode_set doesn't turn 115 * This is an exception to the general rule that mode_set doesn't turn
116 * things on. 116 * things on.
117 */ 117 */
118static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder) 118static void intel_pre_enable_lvds(struct intel_encoder *encoder)
119{ 119{
120 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 120 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
121 struct drm_device *dev = encoder->base.dev; 121 struct drm_device *dev = encoder->base.dev;
122 struct drm_i915_private *dev_priv = dev->dev_private; 122 struct drm_i915_private *dev_priv = dev->dev_private;
123 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 123 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
124 struct drm_display_mode *fixed_mode = 124 struct drm_display_mode *fixed_mode =
125 lvds_encoder->attached_connector->base.panel.fixed_mode; 125 lvds_encoder->attached_connector->base.panel.fixed_mode;
126 int pipe = intel_crtc->pipe; 126 int pipe = crtc->pipe;
127 u32 temp; 127 u32 temp;
128 128
129 if (HAS_PCH_SPLIT(dev)) {
130 assert_fdi_rx_pll_disabled(dev_priv, pipe);
131 assert_shared_dpll_disabled(dev_priv,
132 intel_crtc_to_shared_dpll(crtc));
133 } else {
134 assert_pll_disabled(dev_priv, pipe);
135 }
136
129 temp = I915_READ(lvds_encoder->reg); 137 temp = I915_READ(lvds_encoder->reg);
130 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 138 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
131 139
@@ -142,7 +150,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
142 150
143 /* set the corresponsding LVDS_BORDER bit */ 151 /* set the corresponsding LVDS_BORDER bit */
144 temp &= ~LVDS_BORDER_ENABLE; 152 temp &= ~LVDS_BORDER_ENABLE;
145 temp |= intel_crtc->config.gmch_pfit.lvds_border_bits; 153 temp |= crtc->config.gmch_pfit.lvds_border_bits;
146 /* Set the B0-B3 data pairs corresponding to whether we're going to 154 /* Set the B0-B3 data pairs corresponding to whether we're going to
147 * set the DPLLs for dual-channel mode or not. 155 * set the DPLLs for dual-channel mode or not.
148 */ 156 */
@@ -162,8 +170,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
162 if (INTEL_INFO(dev)->gen == 4) { 170 if (INTEL_INFO(dev)->gen == 4) {
163 /* Bspec wording suggests that LVDS port dithering only exists 171 /* Bspec wording suggests that LVDS port dithering only exists
164 * for 18bpp panels. */ 172 * for 18bpp panels. */
165 if (intel_crtc->config.dither && 173 if (crtc->config.dither && crtc->config.pipe_bpp == 18)
166 intel_crtc->config.pipe_bpp == 18)
167 temp |= LVDS_ENABLE_DITHER; 174 temp |= LVDS_ENABLE_DITHER;
168 else 175 else
169 temp &= ~LVDS_ENABLE_DITHER; 176 temp &= ~LVDS_ENABLE_DITHER;
@@ -955,7 +962,7 @@ void intel_lvds_init(struct drm_device *dev)
955 DRM_MODE_ENCODER_LVDS); 962 DRM_MODE_ENCODER_LVDS);
956 963
957 intel_encoder->enable = intel_enable_lvds; 964 intel_encoder->enable = intel_enable_lvds;
958 intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds; 965 intel_encoder->pre_enable = intel_pre_enable_lvds;
959 intel_encoder->compute_config = intel_lvds_compute_config; 966 intel_encoder->compute_config = intel_lvds_compute_config;
960 intel_encoder->disable = intel_disable_lvds; 967 intel_encoder->disable = intel_disable_lvds;
961 intel_encoder->get_hw_state = intel_lvds_get_hw_state; 968 intel_encoder->get_hw_state = intel_lvds_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a3698812e9c7..2abb53e6f1e0 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; 196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
197 else 197 else
198 regs = io_mapping_map_wc(dev_priv->gtt.mappable, 198 regs = io_mapping_map_wc(dev_priv->gtt.mappable,
199 overlay->reg_bo->gtt_offset); 199 i915_gem_obj_ggtt_offset(overlay->reg_bo));
200 200
201 return regs; 201 return regs;
202} 202}
@@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
740 swidth = params->src_w; 740 swidth = params->src_w;
741 swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); 741 swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
742 sheight = params->src_h; 742 sheight = params->src_h;
743 iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y); 743 iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
744 ostride = params->stride_Y; 744 ostride = params->stride_Y;
745 745
746 if (params->format & I915_OVERLAY_YUV_PLANAR) { 746 if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
754 params->src_w/uv_hscale); 754 params->src_w/uv_hscale);
755 swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; 755 swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
756 sheight |= (params->src_h/uv_vscale) << 16; 756 sheight |= (params->src_h/uv_vscale) << 16;
757 iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U); 757 iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
758 iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V); 758 iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
759 ostride |= params->stride_UV << 16; 759 ostride |= params->stride_UV << 16;
760 } 760 }
761 761
@@ -1355,7 +1355,7 @@ void intel_setup_overlay(struct drm_device *dev)
1355 DRM_ERROR("failed to pin overlay register bo\n"); 1355 DRM_ERROR("failed to pin overlay register bo\n");
1356 goto out_free_bo; 1356 goto out_free_bo;
1357 } 1357 }
1358 overlay->flip_addr = reg_bo->gtt_offset; 1358 overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
1359 1359
1360 ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); 1360 ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
1361 if (ret) { 1361 if (ret) {
@@ -1412,9 +1412,6 @@ void intel_cleanup_overlay(struct drm_device *dev)
1412 kfree(dev_priv->overlay); 1412 kfree(dev_priv->overlay);
1413} 1413}
1414 1414
1415#ifdef CONFIG_DEBUG_FS
1416#include <linux/seq_file.h>
1417
1418struct intel_overlay_error_state { 1415struct intel_overlay_error_state {
1419 struct overlay_registers regs; 1416 struct overlay_registers regs;
1420 unsigned long base; 1417 unsigned long base;
@@ -1435,7 +1432,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1435 overlay->reg_bo->phys_obj->handle->vaddr; 1432 overlay->reg_bo->phys_obj->handle->vaddr;
1436 else 1433 else
1437 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 1434 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1438 overlay->reg_bo->gtt_offset); 1435 i915_gem_obj_ggtt_offset(overlay->reg_bo));
1439 1436
1440 return regs; 1437 return regs;
1441} 1438}
@@ -1468,7 +1465,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
1468 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1465 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1469 error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; 1466 error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
1470 else 1467 else
1471 error->base = overlay->reg_bo->gtt_offset; 1468 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
1472 1469
1473 regs = intel_overlay_map_regs_atomic(overlay); 1470 regs = intel_overlay_map_regs_atomic(overlay);
1474 if (!regs) 1471 if (!regs)
@@ -1537,4 +1534,3 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
1537 P(UVSCALEV); 1534 P(UVSCALEV);
1538#undef P 1535#undef P
1539} 1536}
1540#endif
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d10e6735771f..fb4afaa8036f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,6 +30,7 @@
30#include "intel_drv.h" 30#include "intel_drv.h"
31#include "../../../platform/x86/intel_ips.h" 31#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h> 32#include <linux/module.h>
33#include <drm/i915_powerwell.h>
33 34
34#define FORCEWAKE_ACK_TIMEOUT_MS 2 35#define FORCEWAKE_ACK_TIMEOUT_MS 2
35 36
@@ -86,7 +87,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
86 int plane, i; 87 int plane, i;
87 u32 fbc_ctl, fbc_ctl2; 88 u32 fbc_ctl, fbc_ctl2;
88 89
89 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; 90 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
90 if (fb->pitches[0] < cfb_pitch) 91 if (fb->pitches[0] < cfb_pitch)
91 cfb_pitch = fb->pitches[0]; 92 cfb_pitch = fb->pitches[0];
92 93
@@ -217,7 +218,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
217 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 218 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
218 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); 219 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
219 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); 220 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
220 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); 221 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
221 /* enable it... */ 222 /* enable it... */
222 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 223 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
223 224
@@ -274,7 +275,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
274 struct drm_i915_gem_object *obj = intel_fb->obj; 275 struct drm_i915_gem_object *obj = intel_fb->obj;
275 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 276 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
276 277
277 I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset); 278 I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
278 279
279 I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X | 280 I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
280 IVB_DPFC_CTL_FENCE_EN | 281 IVB_DPFC_CTL_FENCE_EN |
@@ -325,7 +326,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
325 struct drm_i915_private *dev_priv = dev->dev_private; 326 struct drm_i915_private *dev_priv = dev->dev_private;
326 327
327 mutex_lock(&dev->struct_mutex); 328 mutex_lock(&dev->struct_mutex);
328 if (work == dev_priv->fbc_work) { 329 if (work == dev_priv->fbc.fbc_work) {
329 /* Double check that we haven't switched fb without cancelling 330 /* Double check that we haven't switched fb without cancelling
330 * the prior work. 331 * the prior work.
331 */ 332 */
@@ -333,12 +334,12 @@ static void intel_fbc_work_fn(struct work_struct *__work)
333 dev_priv->display.enable_fbc(work->crtc, 334 dev_priv->display.enable_fbc(work->crtc,
334 work->interval); 335 work->interval);
335 336
336 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane; 337 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
337 dev_priv->cfb_fb = work->crtc->fb->base.id; 338 dev_priv->fbc.fb_id = work->crtc->fb->base.id;
338 dev_priv->cfb_y = work->crtc->y; 339 dev_priv->fbc.y = work->crtc->y;
339 } 340 }
340 341
341 dev_priv->fbc_work = NULL; 342 dev_priv->fbc.fbc_work = NULL;
342 } 343 }
343 mutex_unlock(&dev->struct_mutex); 344 mutex_unlock(&dev->struct_mutex);
344 345
@@ -347,28 +348,28 @@ static void intel_fbc_work_fn(struct work_struct *__work)
347 348
348static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) 349static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
349{ 350{
350 if (dev_priv->fbc_work == NULL) 351 if (dev_priv->fbc.fbc_work == NULL)
351 return; 352 return;
352 353
353 DRM_DEBUG_KMS("cancelling pending FBC enable\n"); 354 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
354 355
355 /* Synchronisation is provided by struct_mutex and checking of 356 /* Synchronisation is provided by struct_mutex and checking of
356 * dev_priv->fbc_work, so we can perform the cancellation 357 * dev_priv->fbc.fbc_work, so we can perform the cancellation
357 * entirely asynchronously. 358 * entirely asynchronously.
358 */ 359 */
359 if (cancel_delayed_work(&dev_priv->fbc_work->work)) 360 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
360 /* tasklet was killed before being run, clean up */ 361 /* tasklet was killed before being run, clean up */
361 kfree(dev_priv->fbc_work); 362 kfree(dev_priv->fbc.fbc_work);
362 363
363 /* Mark the work as no longer wanted so that if it does 364 /* Mark the work as no longer wanted so that if it does
364 * wake-up (because the work was already running and waiting 365 * wake-up (because the work was already running and waiting
365 * for our mutex), it will discover that is no longer 366 * for our mutex), it will discover that is no longer
366 * necessary to run. 367 * necessary to run.
367 */ 368 */
368 dev_priv->fbc_work = NULL; 369 dev_priv->fbc.fbc_work = NULL;
369} 370}
370 371
371void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 372static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
372{ 373{
373 struct intel_fbc_work *work; 374 struct intel_fbc_work *work;
374 struct drm_device *dev = crtc->dev; 375 struct drm_device *dev = crtc->dev;
@@ -381,6 +382,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
381 382
382 work = kzalloc(sizeof *work, GFP_KERNEL); 383 work = kzalloc(sizeof *work, GFP_KERNEL);
383 if (work == NULL) { 384 if (work == NULL) {
385 DRM_ERROR("Failed to allocate FBC work structure\n");
384 dev_priv->display.enable_fbc(crtc, interval); 386 dev_priv->display.enable_fbc(crtc, interval);
385 return; 387 return;
386 } 388 }
@@ -390,9 +392,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
390 work->interval = interval; 392 work->interval = interval;
391 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); 393 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
392 394
393 dev_priv->fbc_work = work; 395 dev_priv->fbc.fbc_work = work;
394
395 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
396 396
397 /* Delay the actual enabling to let pageflipping cease and the 397 /* Delay the actual enabling to let pageflipping cease and the
398 * display to settle before starting the compression. Note that 398 * display to settle before starting the compression. Note that
@@ -418,7 +418,7 @@ void intel_disable_fbc(struct drm_device *dev)
418 return; 418 return;
419 419
420 dev_priv->display.disable_fbc(dev); 420 dev_priv->display.disable_fbc(dev);
421 dev_priv->cfb_plane = -1; 421 dev_priv->fbc.plane = -1;
422} 422}
423 423
424/** 424/**
@@ -448,7 +448,6 @@ void intel_update_fbc(struct drm_device *dev)
448 struct drm_framebuffer *fb; 448 struct drm_framebuffer *fb;
449 struct intel_framebuffer *intel_fb; 449 struct intel_framebuffer *intel_fb;
450 struct drm_i915_gem_object *obj; 450 struct drm_i915_gem_object *obj;
451 int enable_fbc;
452 unsigned int max_hdisplay, max_vdisplay; 451 unsigned int max_hdisplay, max_vdisplay;
453 452
454 if (!i915_powersave) 453 if (!i915_powersave)
@@ -471,7 +470,8 @@ void intel_update_fbc(struct drm_device *dev)
471 !to_intel_crtc(tmp_crtc)->primary_disabled) { 470 !to_intel_crtc(tmp_crtc)->primary_disabled) {
472 if (crtc) { 471 if (crtc) {
473 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); 472 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
474 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; 473 dev_priv->fbc.no_fbc_reason =
474 FBC_MULTIPLE_PIPES;
475 goto out_disable; 475 goto out_disable;
476 } 476 }
477 crtc = tmp_crtc; 477 crtc = tmp_crtc;
@@ -480,7 +480,7 @@ void intel_update_fbc(struct drm_device *dev)
480 480
481 if (!crtc || crtc->fb == NULL) { 481 if (!crtc || crtc->fb == NULL) {
482 DRM_DEBUG_KMS("no output, disabling\n"); 482 DRM_DEBUG_KMS("no output, disabling\n");
483 dev_priv->no_fbc_reason = FBC_NO_OUTPUT; 483 dev_priv->fbc.no_fbc_reason = FBC_NO_OUTPUT;
484 goto out_disable; 484 goto out_disable;
485 } 485 }
486 486
@@ -489,23 +489,22 @@ void intel_update_fbc(struct drm_device *dev)
489 intel_fb = to_intel_framebuffer(fb); 489 intel_fb = to_intel_framebuffer(fb);
490 obj = intel_fb->obj; 490 obj = intel_fb->obj;
491 491
492 enable_fbc = i915_enable_fbc; 492 if (i915_enable_fbc < 0 &&
493 if (enable_fbc < 0) { 493 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
494 DRM_DEBUG_KMS("fbc set to per-chip default\n"); 494 DRM_DEBUG_KMS("disabled per chip default\n");
495 enable_fbc = 1; 495 dev_priv->fbc.no_fbc_reason = FBC_CHIP_DEFAULT;
496 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 496 goto out_disable;
497 enable_fbc = 0;
498 } 497 }
499 if (!enable_fbc) { 498 if (!i915_enable_fbc) {
500 DRM_DEBUG_KMS("fbc disabled per module param\n"); 499 DRM_DEBUG_KMS("fbc disabled per module param\n");
501 dev_priv->no_fbc_reason = FBC_MODULE_PARAM; 500 dev_priv->fbc.no_fbc_reason = FBC_MODULE_PARAM;
502 goto out_disable; 501 goto out_disable;
503 } 502 }
504 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || 503 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
505 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { 504 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
506 DRM_DEBUG_KMS("mode incompatible with compression, " 505 DRM_DEBUG_KMS("mode incompatible with compression, "
507 "disabling\n"); 506 "disabling\n");
508 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; 507 dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED_MODE;
509 goto out_disable; 508 goto out_disable;
510 } 509 }
511 510
@@ -519,13 +518,13 @@ void intel_update_fbc(struct drm_device *dev)
519 if ((crtc->mode.hdisplay > max_hdisplay) || 518 if ((crtc->mode.hdisplay > max_hdisplay) ||
520 (crtc->mode.vdisplay > max_vdisplay)) { 519 (crtc->mode.vdisplay > max_vdisplay)) {
521 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 520 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
522 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; 521 dev_priv->fbc.no_fbc_reason = FBC_MODE_TOO_LARGE;
523 goto out_disable; 522 goto out_disable;
524 } 523 }
525 if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) && 524 if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
526 intel_crtc->plane != 0) { 525 intel_crtc->plane != 0) {
527 DRM_DEBUG_KMS("plane not 0, disabling compression\n"); 526 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
528 dev_priv->no_fbc_reason = FBC_BAD_PLANE; 527 dev_priv->fbc.no_fbc_reason = FBC_BAD_PLANE;
529 goto out_disable; 528 goto out_disable;
530 } 529 }
531 530
@@ -535,7 +534,7 @@ void intel_update_fbc(struct drm_device *dev)
535 if (obj->tiling_mode != I915_TILING_X || 534 if (obj->tiling_mode != I915_TILING_X ||
536 obj->fence_reg == I915_FENCE_REG_NONE) { 535 obj->fence_reg == I915_FENCE_REG_NONE) {
537 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); 536 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
538 dev_priv->no_fbc_reason = FBC_NOT_TILED; 537 dev_priv->fbc.no_fbc_reason = FBC_NOT_TILED;
539 goto out_disable; 538 goto out_disable;
540 } 539 }
541 540
@@ -545,7 +544,7 @@ void intel_update_fbc(struct drm_device *dev)
545 544
546 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) { 545 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
547 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); 546 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
548 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 547 dev_priv->fbc.no_fbc_reason = FBC_STOLEN_TOO_SMALL;
549 goto out_disable; 548 goto out_disable;
550 } 549 }
551 550
@@ -554,9 +553,9 @@ void intel_update_fbc(struct drm_device *dev)
554 * cannot be unpinned (and have its GTT offset and fence revoked) 553 * cannot be unpinned (and have its GTT offset and fence revoked)
555 * without first being decoupled from the scanout and FBC disabled. 554 * without first being decoupled from the scanout and FBC disabled.
556 */ 555 */
557 if (dev_priv->cfb_plane == intel_crtc->plane && 556 if (dev_priv->fbc.plane == intel_crtc->plane &&
558 dev_priv->cfb_fb == fb->base.id && 557 dev_priv->fbc.fb_id == fb->base.id &&
559 dev_priv->cfb_y == crtc->y) 558 dev_priv->fbc.y == crtc->y)
560 return; 559 return;
561 560
562 if (intel_fbc_enabled(dev)) { 561 if (intel_fbc_enabled(dev)) {
@@ -2468,8 +2467,8 @@ static void hsw_compute_wm_results(struct drm_device *dev,
2468 2467
2469/* Find the result with the highest level enabled. Check for enable_fbc_wm in 2468/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2470 * case both are at the same level. Prefer r1 in case they're the same. */ 2469 * case both are at the same level. Prefer r1 in case they're the same. */
2471struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1, 2470static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
2472 struct hsw_wm_values *r2) 2471 struct hsw_wm_values *r2)
2473{ 2472{
2474 int i, val_r1 = 0, val_r2 = 0; 2473 int i, val_r1 = 0, val_r2 = 0;
2475 2474
@@ -3076,19 +3075,12 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3076 */ 3075 */
3077static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv) 3076static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
3078{ 3077{
3079 unsigned long timeout = jiffies + msecs_to_jiffies(10);
3080 u32 pval; 3078 u32 pval;
3081 3079
3082 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3080 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3083 3081
3084 do { 3082 if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
3085 pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 3083 DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
3086 if (time_after(jiffies, timeout)) {
3087 DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
3088 break;
3089 }
3090 udelay(10);
3091 } while (pval & 1);
3092 3084
3093 pval >>= 8; 3085 pval >>= 8;
3094 3086
@@ -3143,9 +3135,9 @@ static void gen6_disable_rps(struct drm_device *dev)
3143 * register (PMIMR) to mask PM interrupts. The only risk is in leaving 3135 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3144 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ 3136 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3145 3137
3146 spin_lock_irq(&dev_priv->rps.lock); 3138 spin_lock_irq(&dev_priv->irq_lock);
3147 dev_priv->rps.pm_iir = 0; 3139 dev_priv->rps.pm_iir = 0;
3148 spin_unlock_irq(&dev_priv->rps.lock); 3140 spin_unlock_irq(&dev_priv->irq_lock);
3149 3141
3150 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); 3142 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3151} 3143}
@@ -3162,9 +3154,9 @@ static void valleyview_disable_rps(struct drm_device *dev)
3162 * register (PMIMR) to mask PM interrupts. The only risk is in leaving 3154 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3163 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ 3155 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3164 3156
3165 spin_lock_irq(&dev_priv->rps.lock); 3157 spin_lock_irq(&dev_priv->irq_lock);
3166 dev_priv->rps.pm_iir = 0; 3158 dev_priv->rps.pm_iir = 0;
3167 spin_unlock_irq(&dev_priv->rps.lock); 3159 spin_unlock_irq(&dev_priv->irq_lock);
3168 3160
3169 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 3161 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
3170 3162
@@ -3329,13 +3321,13 @@ static void gen6_enable_rps(struct drm_device *dev)
3329 3321
3330 /* requires MSI enabled */ 3322 /* requires MSI enabled */
3331 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS); 3323 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
3332 spin_lock_irq(&dev_priv->rps.lock); 3324 spin_lock_irq(&dev_priv->irq_lock);
3333 /* FIXME: Our interrupt enabling sequence is bonghits. 3325 /* FIXME: Our interrupt enabling sequence is bonghits.
3334 * dev_priv->rps.pm_iir really should be 0 here. */ 3326 * dev_priv->rps.pm_iir really should be 0 here. */
3335 dev_priv->rps.pm_iir = 0; 3327 dev_priv->rps.pm_iir = 0;
3336 I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); 3328 I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
3337 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); 3329 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3338 spin_unlock_irq(&dev_priv->rps.lock); 3330 spin_unlock_irq(&dev_priv->irq_lock);
3339 /* unmask all PM interrupts */ 3331 /* unmask all PM interrupts */
3340 I915_WRITE(GEN6_PMINTRMSK, 0); 3332 I915_WRITE(GEN6_PMINTRMSK, 0);
3341 3333
@@ -3482,7 +3474,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
3482 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; 3474 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
3483 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev, 3475 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
3484 pcbr_offset, 3476 pcbr_offset,
3485 -1, 3477 I915_GTT_OFFSET_NONE,
3486 pctx_size); 3478 pctx_size);
3487 goto out; 3479 goto out;
3488 } 3480 }
@@ -3609,10 +3601,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
3609 3601
3610 /* requires MSI enabled */ 3602 /* requires MSI enabled */
3611 I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS); 3603 I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
3612 spin_lock_irq(&dev_priv->rps.lock); 3604 spin_lock_irq(&dev_priv->irq_lock);
3613 WARN_ON(dev_priv->rps.pm_iir != 0); 3605 WARN_ON(dev_priv->rps.pm_iir != 0);
3614 I915_WRITE(GEN6_PMIMR, 0); 3606 I915_WRITE(GEN6_PMIMR, 0);
3615 spin_unlock_irq(&dev_priv->rps.lock); 3607 spin_unlock_irq(&dev_priv->irq_lock);
3616 /* enable all PM interrupts */ 3608 /* enable all PM interrupts */
3617 I915_WRITE(GEN6_PMINTRMSK, 0); 3609 I915_WRITE(GEN6_PMINTRMSK, 0);
3618 3610
@@ -3708,7 +3700,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
3708 3700
3709 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); 3701 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
3710 intel_ring_emit(ring, MI_SET_CONTEXT); 3702 intel_ring_emit(ring, MI_SET_CONTEXT);
3711 intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset | 3703 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
3712 MI_MM_SPACE_GTT | 3704 MI_MM_SPACE_GTT |
3713 MI_SAVE_EXT_STATE_EN | 3705 MI_SAVE_EXT_STATE_EN |
3714 MI_RESTORE_EXT_STATE_EN | 3706 MI_RESTORE_EXT_STATE_EN |
@@ -3731,7 +3723,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
3731 return; 3723 return;
3732 } 3724 }
3733 3725
3734 I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN); 3726 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
3735 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 3727 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3736} 3728}
3737 3729
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 664118d8c1d6..8527ea05124b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -440,14 +440,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
440 * registers with the above sequence (the readback of the HEAD registers 440 * registers with the above sequence (the readback of the HEAD registers
441 * also enforces ordering), otherwise the hw might lose the new ring 441 * also enforces ordering), otherwise the hw might lose the new ring
442 * register values. */ 442 * register values. */
443 I915_WRITE_START(ring, obj->gtt_offset); 443 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
444 I915_WRITE_CTL(ring, 444 I915_WRITE_CTL(ring,
445 ((ring->size - PAGE_SIZE) & RING_NR_PAGES) 445 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
446 | RING_VALID); 446 | RING_VALID);
447 447
448 /* If the head is still not zero, the ring is dead */ 448 /* If the head is still not zero, the ring is dead */
449 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && 449 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
450 I915_READ_START(ring) == obj->gtt_offset && 450 I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
451 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { 451 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
452 DRM_ERROR("%s initialization failed " 452 DRM_ERROR("%s initialization failed "
453 "ctl %08x head %08x tail %08x start %08x\n", 453 "ctl %08x head %08x tail %08x start %08x\n",
@@ -505,7 +505,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
505 if (ret) 505 if (ret)
506 goto err_unref; 506 goto err_unref;
507 507
508 pc->gtt_offset = obj->gtt_offset; 508 pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
509 pc->cpu_page = kmap(sg_page(obj->pages->sgl)); 509 pc->cpu_page = kmap(sg_page(obj->pages->sgl));
510 if (pc->cpu_page == NULL) { 510 if (pc->cpu_page == NULL) {
511 ret = -ENOMEM; 511 ret = -ENOMEM;
@@ -836,7 +836,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
836 return false; 836 return false;
837 837
838 spin_lock_irqsave(&dev_priv->irq_lock, flags); 838 spin_lock_irqsave(&dev_priv->irq_lock, flags);
839 if (ring->irq_refcount.gt++ == 0) { 839 if (ring->irq_refcount++ == 0) {
840 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 840 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
841 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 841 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
842 POSTING_READ(GTIMR); 842 POSTING_READ(GTIMR);
@@ -854,7 +854,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
854 unsigned long flags; 854 unsigned long flags;
855 855
856 spin_lock_irqsave(&dev_priv->irq_lock, flags); 856 spin_lock_irqsave(&dev_priv->irq_lock, flags);
857 if (--ring->irq_refcount.gt == 0) { 857 if (--ring->irq_refcount == 0) {
858 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 858 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
859 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 859 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
860 POSTING_READ(GTIMR); 860 POSTING_READ(GTIMR);
@@ -873,7 +873,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
873 return false; 873 return false;
874 874
875 spin_lock_irqsave(&dev_priv->irq_lock, flags); 875 spin_lock_irqsave(&dev_priv->irq_lock, flags);
876 if (ring->irq_refcount.gt++ == 0) { 876 if (ring->irq_refcount++ == 0) {
877 dev_priv->irq_mask &= ~ring->irq_enable_mask; 877 dev_priv->irq_mask &= ~ring->irq_enable_mask;
878 I915_WRITE(IMR, dev_priv->irq_mask); 878 I915_WRITE(IMR, dev_priv->irq_mask);
879 POSTING_READ(IMR); 879 POSTING_READ(IMR);
@@ -891,7 +891,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
891 unsigned long flags; 891 unsigned long flags;
892 892
893 spin_lock_irqsave(&dev_priv->irq_lock, flags); 893 spin_lock_irqsave(&dev_priv->irq_lock, flags);
894 if (--ring->irq_refcount.gt == 0) { 894 if (--ring->irq_refcount == 0) {
895 dev_priv->irq_mask |= ring->irq_enable_mask; 895 dev_priv->irq_mask |= ring->irq_enable_mask;
896 I915_WRITE(IMR, dev_priv->irq_mask); 896 I915_WRITE(IMR, dev_priv->irq_mask);
897 POSTING_READ(IMR); 897 POSTING_READ(IMR);
@@ -910,7 +910,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
910 return false; 910 return false;
911 911
912 spin_lock_irqsave(&dev_priv->irq_lock, flags); 912 spin_lock_irqsave(&dev_priv->irq_lock, flags);
913 if (ring->irq_refcount.gt++ == 0) { 913 if (ring->irq_refcount++ == 0) {
914 dev_priv->irq_mask &= ~ring->irq_enable_mask; 914 dev_priv->irq_mask &= ~ring->irq_enable_mask;
915 I915_WRITE16(IMR, dev_priv->irq_mask); 915 I915_WRITE16(IMR, dev_priv->irq_mask);
916 POSTING_READ16(IMR); 916 POSTING_READ16(IMR);
@@ -928,7 +928,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
928 unsigned long flags; 928 unsigned long flags;
929 929
930 spin_lock_irqsave(&dev_priv->irq_lock, flags); 930 spin_lock_irqsave(&dev_priv->irq_lock, flags);
931 if (--ring->irq_refcount.gt == 0) { 931 if (--ring->irq_refcount == 0) {
932 dev_priv->irq_mask |= ring->irq_enable_mask; 932 dev_priv->irq_mask |= ring->irq_enable_mask;
933 I915_WRITE16(IMR, dev_priv->irq_mask); 933 I915_WRITE16(IMR, dev_priv->irq_mask);
934 POSTING_READ16(IMR); 934 POSTING_READ16(IMR);
@@ -1021,7 +1021,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
1021 gen6_gt_force_wake_get(dev_priv); 1021 gen6_gt_force_wake_get(dev_priv);
1022 1022
1023 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1023 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1024 if (ring->irq_refcount.gt++ == 0) { 1024 if (ring->irq_refcount++ == 0) {
1025 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 1025 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
1026 I915_WRITE_IMR(ring, 1026 I915_WRITE_IMR(ring,
1027 ~(ring->irq_enable_mask | 1027 ~(ring->irq_enable_mask |
@@ -1045,7 +1045,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
1045 unsigned long flags; 1045 unsigned long flags;
1046 1046
1047 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1047 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1048 if (--ring->irq_refcount.gt == 0) { 1048 if (--ring->irq_refcount == 0) {
1049 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) 1049 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
1050 I915_WRITE_IMR(ring, 1050 I915_WRITE_IMR(ring,
1051 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1051 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
@@ -1070,14 +1070,14 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
1070 if (!dev->irq_enabled) 1070 if (!dev->irq_enabled)
1071 return false; 1071 return false;
1072 1072
1073 spin_lock_irqsave(&dev_priv->rps.lock, flags); 1073 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1074 if (ring->irq_refcount.pm++ == 0) { 1074 if (ring->irq_refcount++ == 0) {
1075 u32 pm_imr = I915_READ(GEN6_PMIMR); 1075 u32 pm_imr = I915_READ(GEN6_PMIMR);
1076 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1076 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1077 I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask); 1077 I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
1078 POSTING_READ(GEN6_PMIMR); 1078 POSTING_READ(GEN6_PMIMR);
1079 } 1079 }
1080 spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 1080 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1081 1081
1082 return true; 1082 return true;
1083} 1083}
@@ -1092,14 +1092,14 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1092 if (!dev->irq_enabled) 1092 if (!dev->irq_enabled)
1093 return; 1093 return;
1094 1094
1095 spin_lock_irqsave(&dev_priv->rps.lock, flags); 1095 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1096 if (--ring->irq_refcount.pm == 0) { 1096 if (--ring->irq_refcount == 0) {
1097 u32 pm_imr = I915_READ(GEN6_PMIMR); 1097 u32 pm_imr = I915_READ(GEN6_PMIMR);
1098 I915_WRITE_IMR(ring, ~0); 1098 I915_WRITE_IMR(ring, ~0);
1099 I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask); 1099 I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
1100 POSTING_READ(GEN6_PMIMR); 1100 POSTING_READ(GEN6_PMIMR);
1101 } 1101 }
1102 spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 1102 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1103} 1103}
1104 1104
1105static int 1105static int
@@ -1144,7 +1144,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
1144 intel_ring_advance(ring); 1144 intel_ring_advance(ring);
1145 } else { 1145 } else {
1146 struct drm_i915_gem_object *obj = ring->private; 1146 struct drm_i915_gem_object *obj = ring->private;
1147 u32 cs_offset = obj->gtt_offset; 1147 u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
1148 1148
1149 if (len > I830_BATCH_LIMIT) 1149 if (len > I830_BATCH_LIMIT)
1150 return -ENOSPC; 1150 return -ENOSPC;
@@ -1229,7 +1229,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
1229 goto err_unref; 1229 goto err_unref;
1230 } 1230 }
1231 1231
1232 ring->status_page.gfx_addr = obj->gtt_offset; 1232 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1233 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); 1233 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1234 if (ring->status_page.page_addr == NULL) { 1234 if (ring->status_page.page_addr == NULL) {
1235 ret = -ENOMEM; 1235 ret = -ENOMEM;
@@ -1316,7 +1316,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1316 goto err_unpin; 1316 goto err_unpin;
1317 1317
1318 ring->virtual_start = 1318 ring->virtual_start =
1319 ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, 1319 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
1320 ring->size); 1320 ring->size);
1321 if (ring->virtual_start == NULL) { 1321 if (ring->virtual_start == NULL) {
1322 DRM_ERROR("Failed to map ringbuffer.\n"); 1322 DRM_ERROR("Failed to map ringbuffer.\n");
@@ -2008,8 +2008,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
2008 ring->add_request = gen6_add_request; 2008 ring->add_request = gen6_add_request;
2009 ring->get_seqno = gen6_ring_get_seqno; 2009 ring->get_seqno = gen6_ring_get_seqno;
2010 ring->set_seqno = ring_set_seqno; 2010 ring->set_seqno = ring_set_seqno;
2011 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT | 2011 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2012 PM_VEBOX_CS_ERROR_INTERRUPT;
2013 ring->irq_get = hsw_vebox_get_irq; 2012 ring->irq_get = hsw_vebox_get_irq;
2014 ring->irq_put = hsw_vebox_put_irq; 2013 ring->irq_put = hsw_vebox_put_irq;
2015 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2014 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 799f04c9da45..6e38256d41e1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -78,10 +78,7 @@ struct intel_ring_buffer {
78 */ 78 */
79 u32 last_retired_head; 79 u32 last_retired_head;
80 80
81 struct { 81 unsigned irq_refcount; /* protected by dev_priv->irq_lock */
82 u32 gt; /* protected by dev_priv->irq_lock */
83 u32 pm; /* protected by dev_priv->rps.lock (sucks) */
84 } irq_refcount;
85 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 82 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
86 u32 trace_irq_seqno; 83 u32 trace_irq_seqno;
87 u32 sync_seqno[I915_NUM_RINGS-1]; 84 u32 sync_seqno[I915_NUM_RINGS-1];
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2628d5622449..798df114cfd3 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1357,22 +1357,21 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1357 } 1357 }
1358 1358
1359 /* Cross check the port pixel multiplier with the sdvo encoder state. */ 1359 /* Cross check the port pixel multiplier with the sdvo encoder state. */
1360 intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, &val, 1); 1360 if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
1361 switch (val) { 1361 &val, 1)) {
1362 case SDVO_CLOCK_RATE_MULT_1X: 1362 switch (val) {
1363 encoder_pixel_multiplier = 1; 1363 case SDVO_CLOCK_RATE_MULT_1X:
1364 break; 1364 encoder_pixel_multiplier = 1;
1365 case SDVO_CLOCK_RATE_MULT_2X: 1365 break;
1366 encoder_pixel_multiplier = 2; 1366 case SDVO_CLOCK_RATE_MULT_2X:
1367 break; 1367 encoder_pixel_multiplier = 2;
1368 case SDVO_CLOCK_RATE_MULT_4X: 1368 break;
1369 encoder_pixel_multiplier = 4; 1369 case SDVO_CLOCK_RATE_MULT_4X:
1370 break; 1370 encoder_pixel_multiplier = 4;
1371 break;
1372 }
1371 } 1373 }
1372 1374
1373 if(HAS_PCH_SPLIT(dev))
1374 return; /* no pixel multiplier readout support yet */
1375
1376 WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier, 1375 WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
1377 "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n", 1376 "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
1378 pipe_config->pixel_multiplier, encoder_pixel_multiplier); 1377 pipe_config->pixel_multiplier, encoder_pixel_multiplier);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 1fa5612a4572..55bdf70b548b 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -133,7 +133,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
133 133
134 I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); 134 I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
135 I915_WRITE(SPCNTR(pipe, plane), sprctl); 135 I915_WRITE(SPCNTR(pipe, plane), sprctl);
136 I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset + 136 I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
137 sprsurf_offset); 137 sprsurf_offset);
138 POSTING_READ(SPSURF(pipe, plane)); 138 POSTING_READ(SPSURF(pipe, plane));
139} 139}
@@ -308,7 +308,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
308 if (intel_plane->can_scale) 308 if (intel_plane->can_scale)
309 I915_WRITE(SPRSCALE(pipe), sprscale); 309 I915_WRITE(SPRSCALE(pipe), sprscale);
310 I915_WRITE(SPRCTL(pipe), sprctl); 310 I915_WRITE(SPRCTL(pipe), sprctl);
311 I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset); 311 I915_MODIFY_DISPBASE(SPRSURF(pipe),
312 i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
312 POSTING_READ(SPRSURF(pipe)); 313 POSTING_READ(SPRSURF(pipe));
313 314
314 /* potentially re-enable LP watermarks */ 315 /* potentially re-enable LP watermarks */
@@ -478,7 +479,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
478 I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); 479 I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
479 I915_WRITE(DVSSCALE(pipe), dvsscale); 480 I915_WRITE(DVSSCALE(pipe), dvsscale);
480 I915_WRITE(DVSCNTR(pipe), dvscntr); 481 I915_WRITE(DVSCNTR(pipe), dvscntr);
481 I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset); 482 I915_MODIFY_DISPBASE(DVSSURF(pipe),
483 i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
482 POSTING_READ(DVSSURF(pipe)); 484 POSTING_READ(DVSSURF(pipe));
483} 485}
484 486
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 4d06edb56d5f..b87d05e17d46 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -138,10 +138,7 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
138/* 138/*
139 * Basic range manager support (drm_mm.c) 139 * Basic range manager support (drm_mm.c)
140 */ 140 */
141extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm, 141extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
142 unsigned long start,
143 unsigned long size,
144 bool atomic);
145extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, 142extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
146 unsigned long size, 143 unsigned long size,
147 unsigned alignment, 144 unsigned alignment,
@@ -155,6 +152,7 @@ extern struct drm_mm_node *drm_mm_get_block_range_generic(
155 unsigned long start, 152 unsigned long start,
156 unsigned long end, 153 unsigned long end,
157 int atomic); 154 int atomic);
155
158static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent, 156static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
159 unsigned long size, 157 unsigned long size,
160 unsigned alignment) 158 unsigned alignment)