aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_info.c9
-rw-r--r--drivers/gpu/drm/drm_irq.c29
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c82
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c51
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c30
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h137
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c305
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c180
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c31
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c218
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h500
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c435
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h301
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c53
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c33
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1927
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c191
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h14
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c50
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c3
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c23
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c30
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c4
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c41
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c56
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c56
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h42
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c123
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c53
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c100
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c22
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c23
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h1
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c5
-rw-r--r--drivers/gpu/drm/radeon/r100.c65
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h13
-rw-r--r--drivers/gpu/drm/radeon/r200.c18
-rw-r--r--drivers/gpu/drm/radeon/r300.c44
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r600.c22
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c11
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c29
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c31
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c46
-rw-r--r--drivers/gpu/drm/radeon/r600d.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c48
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r3006
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r4207
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs6006
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5157
-rw-r--r--drivers/gpu/drm/radeon/rs690.c12
-rw-r--r--drivers/gpu/drm/radeon/rv770.c6
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h8
67 files changed, 3481 insertions, 2234 deletions
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 812aaac4438a..ab1162da70f8 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -272,17 +272,18 @@ int drm_vma_info(struct seq_file *m, void *data)
272#endif 272#endif
273 273
274 mutex_lock(&dev->struct_mutex); 274 mutex_lock(&dev->struct_mutex);
275 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n", 275 seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
276 atomic_read(&dev->vma_count), 276 atomic_read(&dev->vma_count),
277 high_memory, (u64)virt_to_phys(high_memory)); 277 high_memory, (void *)virt_to_phys(high_memory));
278 278
279 list_for_each_entry(pt, &dev->vmalist, head) { 279 list_for_each_entry(pt, &dev->vmalist, head) {
280 vma = pt->vma; 280 vma = pt->vma;
281 if (!vma) 281 if (!vma)
282 continue; 282 continue;
283 seq_printf(m, 283 seq_printf(m,
284 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000", 284 "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
285 pt->pid, vma->vm_start, vma->vm_end, 285 pt->pid,
286 (void *)vma->vm_start, (void *)vma->vm_end,
286 vma->vm_flags & VM_READ ? 'r' : '-', 287 vma->vm_flags & VM_READ ? 'r' : '-',
287 vma->vm_flags & VM_WRITE ? 'w' : '-', 288 vma->vm_flags & VM_WRITE ? 'w' : '-',
288 vma->vm_flags & VM_EXEC ? 'x' : '-', 289 vma->vm_flags & VM_EXEC ? 'x' : '-',
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index cb49685bde01..a34ef97d3c81 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -154,8 +154,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
154 * available. In that case we can't account for this and just 154 * available. In that case we can't account for this and just
155 * hope for the best. 155 * hope for the best.
156 */ 156 */
157 if ((vblrc > 0) && (abs(diff_ns) > 1000000)) 157 if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
158 atomic_inc(&dev->_vblank_count[crtc]); 158 atomic_inc(&dev->_vblank_count[crtc]);
159 smp_mb__after_atomic_inc();
160 }
159 161
160 /* Invalidate all timestamps while vblank irq's are off. */ 162 /* Invalidate all timestamps while vblank irq's are off. */
161 clear_vblank_timestamps(dev, crtc); 163 clear_vblank_timestamps(dev, crtc);
@@ -481,6 +483,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
481 /* Dot clock in Hz: */ 483 /* Dot clock in Hz: */
482 dotclock = (u64) crtc->hwmode.clock * 1000; 484 dotclock = (u64) crtc->hwmode.clock * 1000;
483 485
486 /* Fields of interlaced scanout modes are only halve a frame duration.
487 * Double the dotclock to get halve the frame-/line-/pixelduration.
488 */
489 if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
490 dotclock *= 2;
491
484 /* Valid dotclock? */ 492 /* Valid dotclock? */
485 if (dotclock > 0) { 493 if (dotclock > 0) {
486 /* Convert scanline length in pixels and video dot clock to 494 /* Convert scanline length in pixels and video dot clock to
@@ -593,14 +601,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
593 return -EAGAIN; 601 return -EAGAIN;
594 } 602 }
595 603
596 /* Don't know yet how to handle interlaced or
597 * double scan modes. Just no-op for now.
598 */
599 if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
600 DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
601 return -ENOTSUPP;
602 }
603
604 /* Get current scanout position with system timestamp. 604 /* Get current scanout position with system timestamp.
605 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times 605 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
606 * if single query takes longer than max_error nanoseconds. 606 * if single query takes longer than max_error nanoseconds.
@@ -848,10 +848,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
848 if (rc) { 848 if (rc) {
849 tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; 849 tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
850 vblanktimestamp(dev, crtc, tslot) = t_vblank; 850 vblanktimestamp(dev, crtc, tslot) = t_vblank;
851 smp_wmb();
852 } 851 }
853 852
853 smp_mb__before_atomic_inc();
854 atomic_add(diff, &dev->_vblank_count[crtc]); 854 atomic_add(diff, &dev->_vblank_count[crtc]);
855 smp_mb__after_atomic_inc();
855} 856}
856 857
857/** 858/**
@@ -1001,7 +1002,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
1001 struct drm_file *file_priv) 1002 struct drm_file *file_priv)
1002{ 1003{
1003 struct drm_modeset_ctl *modeset = data; 1004 struct drm_modeset_ctl *modeset = data;
1004 int crtc, ret = 0; 1005 int ret = 0;
1006 unsigned int crtc;
1005 1007
1006 /* If drm_vblank_init() hasn't been called yet, just no-op */ 1008 /* If drm_vblank_init() hasn't been called yet, just no-op */
1007 if (!dev->num_crtcs) 1009 if (!dev->num_crtcs)
@@ -1283,15 +1285,16 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1283 * e.g., due to spurious vblank interrupts. We need to 1285 * e.g., due to spurious vblank interrupts. We need to
1284 * ignore those for accounting. 1286 * ignore those for accounting.
1285 */ 1287 */
1286 if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { 1288 if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
1287 /* Store new timestamp in ringbuffer. */ 1289 /* Store new timestamp in ringbuffer. */
1288 vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; 1290 vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
1289 smp_wmb();
1290 1291
1291 /* Increment cooked vblank count. This also atomically commits 1292 /* Increment cooked vblank count. This also atomically commits
1292 * the timestamp computed above. 1293 * the timestamp computed above.
1293 */ 1294 */
1295 smp_mb__before_atomic_inc();
1294 atomic_inc(&dev->_vblank_count[crtc]); 1296 atomic_inc(&dev->_vblank_count[crtc]);
1297 smp_mb__after_atomic_inc();
1295 } else { 1298 } else {
1296 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", 1299 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
1297 crtc, (int) diff_ns); 1300 crtc, (int) diff_ns);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3601466c5502..09e0327fc6ce 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -326,21 +326,21 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
326 struct intel_crtc *crtc; 326 struct intel_crtc *crtc;
327 327
328 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 328 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
329 const char *pipe = crtc->pipe ? "B" : "A"; 329 const char pipe = pipe_name(crtc->pipe);
330 const char *plane = crtc->plane ? "B" : "A"; 330 const char plane = plane_name(crtc->plane);
331 struct intel_unpin_work *work; 331 struct intel_unpin_work *work;
332 332
333 spin_lock_irqsave(&dev->event_lock, flags); 333 spin_lock_irqsave(&dev->event_lock, flags);
334 work = crtc->unpin_work; 334 work = crtc->unpin_work;
335 if (work == NULL) { 335 if (work == NULL) {
336 seq_printf(m, "No flip due on pipe %s (plane %s)\n", 336 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
337 pipe, plane); 337 pipe, plane);
338 } else { 338 } else {
339 if (!work->pending) { 339 if (!work->pending) {
340 seq_printf(m, "Flip queued on pipe %s (plane %s)\n", 340 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
341 pipe, plane); 341 pipe, plane);
342 } else { 342 } else {
343 seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n", 343 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
344 pipe, plane); 344 pipe, plane);
345 } 345 }
346 if (work->enable_stall_check) 346 if (work->enable_stall_check)
@@ -458,7 +458,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
458 struct drm_info_node *node = (struct drm_info_node *) m->private; 458 struct drm_info_node *node = (struct drm_info_node *) m->private;
459 struct drm_device *dev = node->minor->dev; 459 struct drm_device *dev = node->minor->dev;
460 drm_i915_private_t *dev_priv = dev->dev_private; 460 drm_i915_private_t *dev_priv = dev->dev_private;
461 int ret, i; 461 int ret, i, pipe;
462 462
463 ret = mutex_lock_interruptible(&dev->struct_mutex); 463 ret = mutex_lock_interruptible(&dev->struct_mutex);
464 if (ret) 464 if (ret)
@@ -471,10 +471,10 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
471 I915_READ(IIR)); 471 I915_READ(IIR));
472 seq_printf(m, "Interrupt mask: %08x\n", 472 seq_printf(m, "Interrupt mask: %08x\n",
473 I915_READ(IMR)); 473 I915_READ(IMR));
474 seq_printf(m, "Pipe A stat: %08x\n", 474 for_each_pipe(pipe)
475 I915_READ(PIPEASTAT)); 475 seq_printf(m, "Pipe %c stat: %08x\n",
476 seq_printf(m, "Pipe B stat: %08x\n", 476 pipe_name(pipe),
477 I915_READ(PIPEBSTAT)); 477 I915_READ(PIPESTAT(pipe)));
478 } else { 478 } else {
479 seq_printf(m, "North Display Interrupt enable: %08x\n", 479 seq_printf(m, "North Display Interrupt enable: %08x\n",
480 I915_READ(DEIER)); 480 I915_READ(DEIER));
@@ -544,11 +544,11 @@ static int i915_hws_info(struct seq_file *m, void *data)
544 struct drm_device *dev = node->minor->dev; 544 struct drm_device *dev = node->minor->dev;
545 drm_i915_private_t *dev_priv = dev->dev_private; 545 drm_i915_private_t *dev_priv = dev->dev_private;
546 struct intel_ring_buffer *ring; 546 struct intel_ring_buffer *ring;
547 volatile u32 *hws; 547 const volatile u32 __iomem *hws;
548 int i; 548 int i;
549 549
550 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 550 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
551 hws = (volatile u32 *)ring->status_page.page_addr; 551 hws = (volatile u32 __iomem *)ring->status_page.page_addr;
552 if (hws == NULL) 552 if (hws == NULL)
553 return 0; 553 return 0;
554 554
@@ -615,7 +615,7 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
615 if (!ring->obj) { 615 if (!ring->obj) {
616 seq_printf(m, "No ringbuffer setup\n"); 616 seq_printf(m, "No ringbuffer setup\n");
617 } else { 617 } else {
618 u8 *virt = ring->virtual_start; 618 const u8 __iomem *virt = ring->virtual_start;
619 uint32_t off; 619 uint32_t off;
620 620
621 for (off = 0; off < ring->size; off += 4) { 621 for (off = 0; off < ring->size; off += 4) {
@@ -805,15 +805,20 @@ static int i915_error_state(struct seq_file *m, void *unused)
805 } 805 }
806 } 806 }
807 807
808 if (error->ringbuffer) { 808 for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
809 struct drm_i915_error_object *obj = error->ringbuffer; 809 if (error->ringbuffer[i]) {
810 810 struct drm_i915_error_object *obj = error->ringbuffer[i];
811 seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset); 811 seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
812 offset = 0; 812 dev_priv->ring[i].name,
813 for (page = 0; page < obj->page_count; page++) { 813 obj->gtt_offset);
814 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 814 offset = 0;
815 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); 815 for (page = 0; page < obj->page_count; page++) {
816 offset += 4; 816 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
817 seq_printf(m, "%08x : %08x\n",
818 offset,
819 obj->pages[page][elt]);
820 offset += 4;
821 }
817 } 822 }
818 } 823 }
819 } 824 }
@@ -862,19 +867,44 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
862 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 867 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
863 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 868 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
864 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 869 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
870 u32 rpstat;
871 u32 rpupei, rpcurup, rpprevup;
872 u32 rpdownei, rpcurdown, rpprevdown;
865 int max_freq; 873 int max_freq;
866 874
867 /* RPSTAT1 is in the GT power well */ 875 /* RPSTAT1 is in the GT power well */
868 __gen6_force_wake_get(dev_priv); 876 __gen6_gt_force_wake_get(dev_priv);
877
878 rpstat = I915_READ(GEN6_RPSTAT1);
879 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
880 rpcurup = I915_READ(GEN6_RP_CUR_UP);
881 rpprevup = I915_READ(GEN6_RP_PREV_UP);
882 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
883 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
884 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
869 885
870 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 886 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
871 seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); 887 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
872 seq_printf(m, "Render p-state ratio: %d\n", 888 seq_printf(m, "Render p-state ratio: %d\n",
873 (gt_perf_status & 0xff00) >> 8); 889 (gt_perf_status & 0xff00) >> 8);
874 seq_printf(m, "Render p-state VID: %d\n", 890 seq_printf(m, "Render p-state VID: %d\n",
875 gt_perf_status & 0xff); 891 gt_perf_status & 0xff);
876 seq_printf(m, "Render p-state limit: %d\n", 892 seq_printf(m, "Render p-state limit: %d\n",
877 rp_state_limits & 0xff); 893 rp_state_limits & 0xff);
894 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
895 GEN6_CAGF_SHIFT) * 100);
896 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
897 GEN6_CURICONT_MASK);
898 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
899 GEN6_CURBSYTAVG_MASK);
900 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
901 GEN6_CURBSYTAVG_MASK);
902 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
903 GEN6_CURIAVG_MASK);
904 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
905 GEN6_CURBSYTAVG_MASK);
906 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
907 GEN6_CURBSYTAVG_MASK);
878 908
879 max_freq = (rp_state_cap & 0xff0000) >> 16; 909 max_freq = (rp_state_cap & 0xff0000) >> 16;
880 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 910 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
@@ -888,7 +918,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
888 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 918 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
889 max_freq * 100); 919 max_freq * 100);
890 920
891 __gen6_force_wake_put(dev_priv); 921 __gen6_gt_force_wake_put(dev_priv);
892 } else { 922 } else {
893 seq_printf(m, "no P-state info available\n"); 923 seq_printf(m, "no P-state info available\n");
894 } 924 }
@@ -1259,7 +1289,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
1259} 1289}
1260 1290
1261static struct drm_info_list i915_debugfs_list[] = { 1291static struct drm_info_list i915_debugfs_list[] = {
1262 {"i915_capabilities", i915_capabilities, 0, 0}, 1292 {"i915_capabilities", i915_capabilities, 0},
1263 {"i915_gem_objects", i915_gem_object_info, 0}, 1293 {"i915_gem_objects", i915_gem_object_info, 0},
1264 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 1294 {"i915_gem_gtt", i915_gem_gtt_info, 0},
1265 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 1295 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 17bd766f2081..72730377a01b 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -43,6 +43,17 @@
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <acpi/video.h> 44#include <acpi/video.h>
45 45
46static void i915_write_hws_pga(struct drm_device *dev)
47{
48 drm_i915_private_t *dev_priv = dev->dev_private;
49 u32 addr;
50
51 addr = dev_priv->status_page_dmah->busaddr;
52 if (INTEL_INFO(dev)->gen >= 4)
53 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
54 I915_WRITE(HWS_PGA, addr);
55}
56
46/** 57/**
47 * Sets up the hardware status page for devices that need a physical address 58 * Sets up the hardware status page for devices that need a physical address
48 * in the register. 59 * in the register.
@@ -60,16 +71,13 @@ static int i915_init_phys_hws(struct drm_device *dev)
60 DRM_ERROR("Can not allocate hardware status page\n"); 71 DRM_ERROR("Can not allocate hardware status page\n");
61 return -ENOMEM; 72 return -ENOMEM;
62 } 73 }
63 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 74 ring->status_page.page_addr =
64 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 75 (void __force __iomem *)dev_priv->status_page_dmah->vaddr;
65 76
66 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 77 memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
67 78
68 if (INTEL_INFO(dev)->gen >= 4) 79 i915_write_hws_pga(dev);
69 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
70 0xf0;
71 80
72 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
73 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 81 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
74 return 0; 82 return 0;
75} 83}
@@ -216,7 +224,7 @@ static int i915_dma_resume(struct drm_device * dev)
216 if (ring->status_page.gfx_addr != 0) 224 if (ring->status_page.gfx_addr != 0)
217 intel_ring_setup_status_page(ring); 225 intel_ring_setup_status_page(ring);
218 else 226 else
219 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 227 i915_write_hws_pga(dev);
220 228
221 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 229 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
222 230
@@ -771,6 +779,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
771 case I915_PARAM_HAS_EXEC_CONSTANTS: 779 case I915_PARAM_HAS_EXEC_CONSTANTS:
772 value = INTEL_INFO(dev)->gen >= 4; 780 value = INTEL_INFO(dev)->gen >= 4;
773 break; 781 break;
782 case I915_PARAM_HAS_RELAXED_DELTA:
783 value = 1;
784 break;
774 default: 785 default:
775 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 786 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
776 param->param); 787 param->param);
@@ -859,8 +870,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
859 " G33 hw status page\n"); 870 " G33 hw status page\n");
860 return -ENOMEM; 871 return -ENOMEM;
861 } 872 }
862 ring->status_page.page_addr = dev_priv->hws_map.handle; 873 ring->status_page.page_addr =
863 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 874 (void __force __iomem *)dev_priv->hws_map.handle;
875 memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
864 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); 876 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
865 877
866 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", 878 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
@@ -1895,6 +1907,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1895 if (IS_GEN2(dev)) 1907 if (IS_GEN2(dev))
1896 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 1908 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1897 1909
1910 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1911 * using 32bit addressing, overwriting memory if HWS is located
1912 * above 4GB.
1913 *
1914 * The documentation also mentions an issue with undefined
1915 * behaviour if any general state is accessed within a page above 4GB,
1916 * which also needs to be handled carefully.
1917 */
1918 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1919 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1920
1898 mmio_bar = IS_GEN2(dev) ? 1 : 0; 1921 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1899 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); 1922 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
1900 if (!dev_priv->regs) { 1923 if (!dev_priv->regs) {
@@ -2002,9 +2025,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2002 2025
2003 spin_lock_init(&dev_priv->irq_lock); 2026 spin_lock_init(&dev_priv->irq_lock);
2004 spin_lock_init(&dev_priv->error_lock); 2027 spin_lock_init(&dev_priv->error_lock);
2005 dev_priv->trace_irq_seqno = 0;
2006 2028
2007 ret = drm_vblank_init(dev, I915_NUM_PIPE); 2029 if (IS_MOBILE(dev) || !IS_GEN2(dev))
2030 dev_priv->num_pipe = 2;
2031 else
2032 dev_priv->num_pipe = 1;
2033
2034 ret = drm_vblank_init(dev, dev_priv->num_pipe);
2008 if (ret) 2035 if (ret)
2009 goto out_gem_unload; 2036 goto out_gem_unload;
2010 2037
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9ad42d583493..c34a8dd31d02 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -43,16 +43,28 @@ module_param_named(modeset, i915_modeset, int, 0400);
43unsigned int i915_fbpercrtc = 0; 43unsigned int i915_fbpercrtc = 0;
44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); 44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
45 45
46int i915_panel_ignore_lid = 0;
47module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
48
46unsigned int i915_powersave = 1; 49unsigned int i915_powersave = 1;
47module_param_named(powersave, i915_powersave, int, 0600); 50module_param_named(powersave, i915_powersave, int, 0600);
48 51
52unsigned int i915_semaphores = 1;
53module_param_named(semaphores, i915_semaphores, int, 0600);
54
55unsigned int i915_enable_rc6 = 0;
56module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
57
49unsigned int i915_lvds_downclock = 0; 58unsigned int i915_lvds_downclock = 0;
50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 59module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
51 60
52unsigned int i915_panel_use_ssc = 1; 61unsigned int i915_panel_use_ssc = 1;
53module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); 62module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
54 63
55bool i915_try_reset = true; 64int i915_vbt_sdvo_panel_type = -1;
65module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
66
67static bool i915_try_reset = true;
56module_param_named(reset, i915_try_reset, bool, 0600); 68module_param_named(reset, i915_try_reset, bool, 0600);
57 69
58static struct drm_driver driver; 70static struct drm_driver driver;
@@ -251,7 +263,7 @@ void intel_detect_pch (struct drm_device *dev)
251 } 263 }
252} 264}
253 265
254void __gen6_force_wake_get(struct drm_i915_private *dev_priv) 266void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
255{ 267{
256 int count; 268 int count;
257 269
@@ -267,12 +279,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
267 udelay(10); 279 udelay(10);
268} 280}
269 281
270void __gen6_force_wake_put(struct drm_i915_private *dev_priv) 282void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
271{ 283{
272 I915_WRITE_NOTRACE(FORCEWAKE, 0); 284 I915_WRITE_NOTRACE(FORCEWAKE, 0);
273 POSTING_READ(FORCEWAKE); 285 POSTING_READ(FORCEWAKE);
274} 286}
275 287
288void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
289{
290 int loop = 500;
291 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
292 while (fifo < 20 && loop--) {
293 udelay(10);
294 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
295 }
296}
297
276static int i915_drm_freeze(struct drm_device *dev) 298static int i915_drm_freeze(struct drm_device *dev)
277{ 299{
278 struct drm_i915_private *dev_priv = dev->dev_private; 300 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -360,7 +382,7 @@ static int i915_drm_thaw(struct drm_device *dev)
360 /* Resume the modeset for every activated CRTC */ 382 /* Resume the modeset for every activated CRTC */
361 drm_helper_resume_force_mode(dev); 383 drm_helper_resume_force_mode(dev);
362 384
363 if (dev_priv->renderctx && dev_priv->pwrctx) 385 if (IS_IRONLAKE_M(dev))
364 ironlake_enable_rc6(dev); 386 ironlake_enable_rc6(dev);
365 } 387 }
366 388
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a78197d43ce6..449650545bb4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -49,17 +49,22 @@
49enum pipe { 49enum pipe {
50 PIPE_A = 0, 50 PIPE_A = 0,
51 PIPE_B, 51 PIPE_B,
52 PIPE_C,
53 I915_MAX_PIPES
52}; 54};
55#define pipe_name(p) ((p) + 'A')
53 56
54enum plane { 57enum plane {
55 PLANE_A = 0, 58 PLANE_A = 0,
56 PLANE_B, 59 PLANE_B,
60 PLANE_C,
57}; 61};
58 62#define plane_name(p) ((p) + 'A')
59#define I915_NUM_PIPE 2
60 63
61#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 64#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
62 65
66#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
67
63/* Interface history: 68/* Interface history:
64 * 69 *
65 * 1.1: Original. 70 * 1.1: Original.
@@ -75,10 +80,7 @@ enum plane {
75#define DRIVER_PATCHLEVEL 0 80#define DRIVER_PATCHLEVEL 0
76 81
77#define WATCH_COHERENCY 0 82#define WATCH_COHERENCY 0
78#define WATCH_EXEC 0
79#define WATCH_RELOC 0
80#define WATCH_LISTS 0 83#define WATCH_LISTS 0
81#define WATCH_PWRITE 0
82 84
83#define I915_GEM_PHYS_CURSOR_0 1 85#define I915_GEM_PHYS_CURSOR_0 1
84#define I915_GEM_PHYS_CURSOR_1 2 86#define I915_GEM_PHYS_CURSOR_1 2
@@ -111,6 +113,7 @@ struct intel_opregion {
111 struct opregion_swsci *swsci; 113 struct opregion_swsci *swsci;
112 struct opregion_asle *asle; 114 struct opregion_asle *asle;
113 void *vbt; 115 void *vbt;
116 u32 __iomem *lid_state;
114}; 117};
115#define OPREGION_SIZE (8*1024) 118#define OPREGION_SIZE (8*1024)
116 119
@@ -144,8 +147,7 @@ struct intel_display_error_state;
144struct drm_i915_error_state { 147struct drm_i915_error_state {
145 u32 eir; 148 u32 eir;
146 u32 pgtbl_er; 149 u32 pgtbl_er;
147 u32 pipeastat; 150 u32 pipestat[I915_MAX_PIPES];
148 u32 pipebstat;
149 u32 ipeir; 151 u32 ipeir;
150 u32 ipehr; 152 u32 ipehr;
151 u32 instdone; 153 u32 instdone;
@@ -172,7 +174,7 @@ struct drm_i915_error_state {
172 int page_count; 174 int page_count;
173 u32 gtt_offset; 175 u32 gtt_offset;
174 u32 *pages[0]; 176 u32 *pages[0];
175 } *ringbuffer, *batchbuffer[I915_NUM_RINGS]; 177 } *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS];
176 struct drm_i915_error_buffer { 178 struct drm_i915_error_buffer {
177 u32 size; 179 u32 size;
178 u32 name; 180 u32 name;
@@ -200,9 +202,7 @@ struct drm_i915_display_funcs {
200 void (*disable_fbc)(struct drm_device *dev); 202 void (*disable_fbc)(struct drm_device *dev);
201 int (*get_display_clock_speed)(struct drm_device *dev); 203 int (*get_display_clock_speed)(struct drm_device *dev);
202 int (*get_fifo_size)(struct drm_device *dev, int plane); 204 int (*get_fifo_size)(struct drm_device *dev, int plane);
203 void (*update_wm)(struct drm_device *dev, int planea_clock, 205 void (*update_wm)(struct drm_device *dev);
204 int planeb_clock, int sr_hdisplay, int sr_htotal,
205 int pixel_size);
206 /* clock updates for mode set */ 206 /* clock updates for mode set */
207 /* cursor updates */ 207 /* cursor updates */
208 /* render clock increase/decrease */ 208 /* render clock increase/decrease */
@@ -274,7 +274,6 @@ typedef struct drm_i915_private {
274 uint32_t next_seqno; 274 uint32_t next_seqno;
275 275
276 drm_dma_handle_t *status_page_dmah; 276 drm_dma_handle_t *status_page_dmah;
277 dma_addr_t dma_status_page;
278 uint32_t counter; 277 uint32_t counter;
279 drm_local_map_t hws_map; 278 drm_local_map_t hws_map;
280 struct drm_i915_gem_object *pwrctx; 279 struct drm_i915_gem_object *pwrctx;
@@ -289,7 +288,6 @@ typedef struct drm_i915_private {
289 int page_flipping; 288 int page_flipping;
290 289
291 atomic_t irq_received; 290 atomic_t irq_received;
292 u32 trace_irq_seqno;
293 291
294 /* protects the irq masks */ 292 /* protects the irq masks */
295 spinlock_t irq_lock; 293 spinlock_t irq_lock;
@@ -324,8 +322,6 @@ typedef struct drm_i915_private {
324 int cfb_plane; 322 int cfb_plane;
325 int cfb_y; 323 int cfb_y;
326 324
327 int irq_enabled;
328
329 struct intel_opregion opregion; 325 struct intel_opregion opregion;
330 326
331 /* overlay */ 327 /* overlay */
@@ -387,7 +383,6 @@ typedef struct drm_i915_private {
387 u32 saveDSPACNTR; 383 u32 saveDSPACNTR;
388 u32 saveDSPBCNTR; 384 u32 saveDSPBCNTR;
389 u32 saveDSPARB; 385 u32 saveDSPARB;
390 u32 saveHWS;
391 u32 savePIPEACONF; 386 u32 savePIPEACONF;
392 u32 savePIPEBCONF; 387 u32 savePIPEBCONF;
393 u32 savePIPEASRC; 388 u32 savePIPEASRC;
@@ -615,6 +610,12 @@ typedef struct drm_i915_private {
615 struct delayed_work retire_work; 610 struct delayed_work retire_work;
616 611
617 /** 612 /**
613 * Are we in a non-interruptible section of code like
614 * modesetting?
615 */
616 bool interruptible;
617
618 /**
618 * Flag if the X Server, and thus DRM, is not currently in 619 * Flag if the X Server, and thus DRM, is not currently in
619 * control of the device. 620 * control of the device.
620 * 621 *
@@ -652,6 +653,7 @@ typedef struct drm_i915_private {
652 unsigned int lvds_border_bits; 653 unsigned int lvds_border_bits;
653 /* Panel fitter placement and size for Ironlake+ */ 654 /* Panel fitter placement and size for Ironlake+ */
654 u32 pch_pf_pos, pch_pf_size; 655 u32 pch_pf_pos, pch_pf_size;
656 int panel_t3, panel_t12;
655 657
656 struct drm_crtc *plane_to_crtc_mapping[2]; 658 struct drm_crtc *plane_to_crtc_mapping[2];
657 struct drm_crtc *pipe_to_crtc_mapping[2]; 659 struct drm_crtc *pipe_to_crtc_mapping[2];
@@ -698,6 +700,8 @@ typedef struct drm_i915_private {
698 700
699 /* list of fbdev register on this device */ 701 /* list of fbdev register on this device */
700 struct intel_fbdev *fbdev; 702 struct intel_fbdev *fbdev;
703
704 struct drm_property *broadcast_rgb_property;
701} drm_i915_private_t; 705} drm_i915_private_t;
702 706
703struct drm_i915_gem_object { 707struct drm_i915_gem_object {
@@ -955,9 +959,13 @@ enum intel_chip_family {
955extern struct drm_ioctl_desc i915_ioctls[]; 959extern struct drm_ioctl_desc i915_ioctls[];
956extern int i915_max_ioctl; 960extern int i915_max_ioctl;
957extern unsigned int i915_fbpercrtc; 961extern unsigned int i915_fbpercrtc;
962extern int i915_panel_ignore_lid;
958extern unsigned int i915_powersave; 963extern unsigned int i915_powersave;
964extern unsigned int i915_semaphores;
959extern unsigned int i915_lvds_downclock; 965extern unsigned int i915_lvds_downclock;
960extern unsigned int i915_panel_use_ssc; 966extern unsigned int i915_panel_use_ssc;
967extern int i915_vbt_sdvo_panel_type;
968extern unsigned int i915_enable_rc6;
961 969
962extern int i915_suspend(struct drm_device *dev, pm_message_t state); 970extern int i915_suspend(struct drm_device *dev, pm_message_t state);
963extern int i915_resume(struct drm_device *dev); 971extern int i915_resume(struct drm_device *dev);
@@ -996,8 +1004,6 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
996 struct drm_file *file_priv); 1004 struct drm_file *file_priv);
997extern int i915_irq_wait(struct drm_device *dev, void *data, 1005extern int i915_irq_wait(struct drm_device *dev, void *data,
998 struct drm_file *file_priv); 1006 struct drm_file *file_priv);
999void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
1000extern void i915_enable_interrupt (struct drm_device *dev);
1001 1007
1002extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 1008extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
1003extern void i915_driver_irq_preinstall(struct drm_device * dev); 1009extern void i915_driver_irq_preinstall(struct drm_device * dev);
@@ -1049,7 +1055,6 @@ extern void i915_mem_takedown(struct mem_block **heap);
1049extern void i915_mem_release(struct drm_device * dev, 1055extern void i915_mem_release(struct drm_device * dev,
1050 struct drm_file *file_priv, struct mem_block *heap); 1056 struct drm_file *file_priv, struct mem_block *heap);
1051/* i915_gem.c */ 1057/* i915_gem.c */
1052int i915_gem_check_is_wedged(struct drm_device *dev);
1053int i915_gem_init_ioctl(struct drm_device *dev, void *data, 1058int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1054 struct drm_file *file_priv); 1059 struct drm_file *file_priv);
1055int i915_gem_create_ioctl(struct drm_device *dev, void *data, 1060int i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -1092,8 +1097,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1092 struct drm_file *file_priv); 1097 struct drm_file *file_priv);
1093void i915_gem_load(struct drm_device *dev); 1098void i915_gem_load(struct drm_device *dev);
1094int i915_gem_init_object(struct drm_gem_object *obj); 1099int i915_gem_init_object(struct drm_gem_object *obj);
1095int __must_check i915_gem_flush_ring(struct drm_device *dev, 1100int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
1096 struct intel_ring_buffer *ring,
1097 uint32_t invalidate_domains, 1101 uint32_t invalidate_domains,
1098 uint32_t flush_domains); 1102 uint32_t flush_domains);
1099struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1103struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
@@ -1108,8 +1112,7 @@ void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1108void i915_gem_lastclose(struct drm_device *dev); 1112void i915_gem_lastclose(struct drm_device *dev);
1109 1113
1110int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 1114int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1111int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 1115int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
1112 bool interruptible);
1113void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1116void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1114 struct intel_ring_buffer *ring, 1117 struct intel_ring_buffer *ring,
1115 u32 seqno); 1118 u32 seqno);
@@ -1131,16 +1134,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1131} 1134}
1132 1135
1133static inline u32 1136static inline u32
1134i915_gem_next_request_seqno(struct drm_device *dev, 1137i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1135 struct intel_ring_buffer *ring)
1136{ 1138{
1137 drm_i915_private_t *dev_priv = dev->dev_private; 1139 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1138 return ring->outstanding_lazy_request = dev_priv->next_seqno; 1140 return ring->outstanding_lazy_request = dev_priv->next_seqno;
1139} 1141}
1140 1142
1141int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, 1143int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
1142 struct intel_ring_buffer *pipelined, 1144 struct intel_ring_buffer *pipelined);
1143 bool interruptible);
1144int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 1145int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1145 1146
1146void i915_gem_retire_requests(struct drm_device *dev); 1147void i915_gem_retire_requests(struct drm_device *dev);
@@ -1149,8 +1150,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
1149int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, 1150int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1150 uint32_t read_domains, 1151 uint32_t read_domains,
1151 uint32_t write_domain); 1152 uint32_t write_domain);
1152int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, 1153int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj);
1153 bool interruptible);
1154int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); 1154int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
1155void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1155void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1156void i915_gem_do_init(struct drm_device *dev, 1156void i915_gem_do_init(struct drm_device *dev,
@@ -1159,14 +1159,11 @@ void i915_gem_do_init(struct drm_device *dev,
1159 unsigned long end); 1159 unsigned long end);
1160int __must_check i915_gpu_idle(struct drm_device *dev); 1160int __must_check i915_gpu_idle(struct drm_device *dev);
1161int __must_check i915_gem_idle(struct drm_device *dev); 1161int __must_check i915_gem_idle(struct drm_device *dev);
1162int __must_check i915_add_request(struct drm_device *dev, 1162int __must_check i915_add_request(struct intel_ring_buffer *ring,
1163 struct drm_file *file_priv, 1163 struct drm_file *file,
1164 struct drm_i915_gem_request *request, 1164 struct drm_i915_gem_request *request);
1165 struct intel_ring_buffer *ring); 1165int __must_check i915_wait_request(struct intel_ring_buffer *ring,
1166int __must_check i915_do_wait_request(struct drm_device *dev, 1166 uint32_t seqno);
1167 uint32_t seqno,
1168 bool interruptible,
1169 struct intel_ring_buffer *ring);
1170int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 1167int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
1171int __must_check 1168int __must_check
1172i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 1169i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@ -1183,6 +1180,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
1183void i915_gem_free_all_phys_object(struct drm_device *dev); 1180void i915_gem_free_all_phys_object(struct drm_device *dev);
1184void i915_gem_release(struct drm_device *dev, struct drm_file *file); 1181void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1185 1182
1183uint32_t
1184i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
1185
1186/* i915_gem_gtt.c */ 1186/* i915_gem_gtt.c */
1187void i915_gem_restore_gtt_mappings(struct drm_device *dev); 1187void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1188int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); 1188int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
@@ -1315,7 +1315,7 @@ extern void intel_display_print_error_state(struct seq_file *m,
1315#define __i915_read(x, y) \ 1315#define __i915_read(x, y) \
1316static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 1316static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1317 u##x val = read##y(dev_priv->regs + reg); \ 1317 u##x val = read##y(dev_priv->regs + reg); \
1318 trace_i915_reg_rw('R', reg, val, sizeof(val)); \ 1318 trace_i915_reg_rw(false, reg, val, sizeof(val)); \
1319 return val; \ 1319 return val; \
1320} 1320}
1321__i915_read(8, b) 1321__i915_read(8, b)
@@ -1326,7 +1326,7 @@ __i915_read(64, q)
1326 1326
1327#define __i915_write(x, y) \ 1327#define __i915_write(x, y) \
1328static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ 1328static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1329 trace_i915_reg_rw('W', reg, val, sizeof(val)); \ 1329 trace_i915_reg_rw(true, reg, val, sizeof(val)); \
1330 write##y(val, dev_priv->regs + reg); \ 1330 write##y(val, dev_priv->regs + reg); \
1331} 1331}
1332__i915_write(8, b) 1332__i915_write(8, b)
@@ -1359,62 +1359,29 @@ __i915_write(64, q)
1359 * must be set to prevent GT core from power down and stale values being 1359 * must be set to prevent GT core from power down and stale values being
1360 * returned. 1360 * returned.
1361 */ 1361 */
1362void __gen6_force_wake_get(struct drm_i915_private *dev_priv); 1362void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1363void __gen6_force_wake_put (struct drm_i915_private *dev_priv); 1363void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1364static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) 1364void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1365
1366static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg)
1365{ 1367{
1366 u32 val; 1368 u32 val;
1367 1369
1368 if (dev_priv->info->gen >= 6) { 1370 if (dev_priv->info->gen >= 6) {
1369 __gen6_force_wake_get(dev_priv); 1371 __gen6_gt_force_wake_get(dev_priv);
1370 val = I915_READ(reg); 1372 val = I915_READ(reg);
1371 __gen6_force_wake_put(dev_priv); 1373 __gen6_gt_force_wake_put(dev_priv);
1372 } else 1374 } else
1373 val = I915_READ(reg); 1375 val = I915_READ(reg);
1374 1376
1375 return val; 1377 return val;
1376} 1378}
1377 1379
1378static inline void 1380static inline void i915_gt_write(struct drm_i915_private *dev_priv,
1379i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) 1381 u32 reg, u32 val)
1380{ 1382{
1381 /* Trace down the write operation before the real write */ 1383 if (dev_priv->info->gen >= 6)
1382 trace_i915_reg_rw('W', reg, val, len); 1384 __gen6_gt_wait_for_fifo(dev_priv);
1383 switch (len) { 1385 I915_WRITE(reg, val);
1384 case 8:
1385 writeq(val, dev_priv->regs + reg);
1386 break;
1387 case 4:
1388 writel(val, dev_priv->regs + reg);
1389 break;
1390 case 2:
1391 writew(val, dev_priv->regs + reg);
1392 break;
1393 case 1:
1394 writeb(val, dev_priv->regs + reg);
1395 break;
1396 }
1397} 1386}
1398
1399/**
1400 * Reads a dword out of the status page, which is written to from the command
1401 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
1402 * MI_STORE_DATA_IMM.
1403 *
1404 * The following dwords have a reserved meaning:
1405 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
1406 * 0x04: ring 0 head pointer
1407 * 0x05: ring 1 head pointer (915-class)
1408 * 0x06: ring 2 head pointer (915-class)
1409 * 0x10-0x1b: Context status DWords (GM45)
1410 * 0x1f: Last written status offset. (GM45)
1411 *
1412 * The area from dword 0x20 to 0x3ff is available for driver usage.
1413 */
1414#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
1415 (LP_RING(dev_priv)->status_page.page_addr))[reg])
1416#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
1417#define I915_GEM_HWS_INDEX 0x20
1418#define I915_BREADCRUMB_INDEX 0x21
1419
1420#endif 1387#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index bc7f06b8fbca..c4c2855d002d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -75,8 +75,8 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
75 dev_priv->mm.object_memory -= size; 75 dev_priv->mm.object_memory -= size;
76} 76}
77 77
78int 78static int
79i915_gem_check_is_wedged(struct drm_device *dev) 79i915_gem_wait_for_error(struct drm_device *dev)
80{ 80{
81 struct drm_i915_private *dev_priv = dev->dev_private; 81 struct drm_i915_private *dev_priv = dev->dev_private;
82 struct completion *x = &dev_priv->error_completion; 82 struct completion *x = &dev_priv->error_completion;
@@ -90,27 +90,24 @@ i915_gem_check_is_wedged(struct drm_device *dev)
90 if (ret) 90 if (ret)
91 return ret; 91 return ret;
92 92
93 /* Success, we reset the GPU! */ 93 if (atomic_read(&dev_priv->mm.wedged)) {
94 if (!atomic_read(&dev_priv->mm.wedged)) 94 /* GPU is hung, bump the completion count to account for
95 return 0; 95 * the token we just consumed so that we never hit zero and
96 96 * end up waiting upon a subsequent completion event that
97 /* GPU is hung, bump the completion count to account for 97 * will never happen.
98 * the token we just consumed so that we never hit zero and 98 */
99 * end up waiting upon a subsequent completion event that 99 spin_lock_irqsave(&x->wait.lock, flags);
100 * will never happen. 100 x->done++;
101 */ 101 spin_unlock_irqrestore(&x->wait.lock, flags);
102 spin_lock_irqsave(&x->wait.lock, flags); 102 }
103 x->done++; 103 return 0;
104 spin_unlock_irqrestore(&x->wait.lock, flags);
105 return -EIO;
106} 104}
107 105
108int i915_mutex_lock_interruptible(struct drm_device *dev) 106int i915_mutex_lock_interruptible(struct drm_device *dev)
109{ 107{
110 struct drm_i915_private *dev_priv = dev->dev_private;
111 int ret; 108 int ret;
112 109
113 ret = i915_gem_check_is_wedged(dev); 110 ret = i915_gem_wait_for_error(dev);
114 if (ret) 111 if (ret)
115 return ret; 112 return ret;
116 113
@@ -118,11 +115,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
118 if (ret) 115 if (ret)
119 return ret; 116 return ret;
120 117
121 if (atomic_read(&dev_priv->mm.wedged)) {
122 mutex_unlock(&dev->struct_mutex);
123 return -EAGAIN;
124 }
125
126 WARN_ON(i915_verify_lists(dev)); 118 WARN_ON(i915_verify_lists(dev));
127 return 0; 119 return 0;
128} 120}
@@ -543,7 +535,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
543 return ret; 535 return ret;
544 536
545 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 537 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
546 if (obj == NULL) { 538 if (&obj->base == NULL) {
547 ret = -ENOENT; 539 ret = -ENOENT;
548 goto unlock; 540 goto unlock;
549 } 541 }
@@ -555,6 +547,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
555 goto out; 547 goto out;
556 } 548 }
557 549
550 trace_i915_gem_object_pread(obj, args->offset, args->size);
551
558 ret = i915_gem_object_set_cpu_read_domain_range(obj, 552 ret = i915_gem_object_set_cpu_read_domain_range(obj,
559 args->offset, 553 args->offset,
560 args->size); 554 args->size);
@@ -984,7 +978,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
984 return ret; 978 return ret;
985 979
986 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 980 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
987 if (obj == NULL) { 981 if (&obj->base == NULL) {
988 ret = -ENOENT; 982 ret = -ENOENT;
989 goto unlock; 983 goto unlock;
990 } 984 }
@@ -996,6 +990,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
996 goto out; 990 goto out;
997 } 991 }
998 992
993 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
994
999 /* We can only do the GTT pwrite on untiled buffers, as otherwise 995 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1000 * it would end up going through the fenced access, and we'll get 996 * it would end up going through the fenced access, and we'll get
1001 * different detiling behavior between reading and writing. 997 * different detiling behavior between reading and writing.
@@ -1078,7 +1074,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1078 return ret; 1074 return ret;
1079 1075
1080 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 1076 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1081 if (obj == NULL) { 1077 if (&obj->base == NULL) {
1082 ret = -ENOENT; 1078 ret = -ENOENT;
1083 goto unlock; 1079 goto unlock;
1084 } 1080 }
@@ -1121,7 +1117,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1121 return ret; 1117 return ret;
1122 1118
1123 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 1119 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1124 if (obj == NULL) { 1120 if (&obj->base == NULL) {
1125 ret = -ENOENT; 1121 ret = -ENOENT;
1126 goto unlock; 1122 goto unlock;
1127 } 1123 }
@@ -1150,7 +1146,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1150 struct drm_i915_private *dev_priv = dev->dev_private; 1146 struct drm_i915_private *dev_priv = dev->dev_private;
1151 struct drm_i915_gem_mmap *args = data; 1147 struct drm_i915_gem_mmap *args = data;
1152 struct drm_gem_object *obj; 1148 struct drm_gem_object *obj;
1153 loff_t offset;
1154 unsigned long addr; 1149 unsigned long addr;
1155 1150
1156 if (!(dev->driver->driver_features & DRIVER_GEM)) 1151 if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1165,8 +1160,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1165 return -E2BIG; 1160 return -E2BIG;
1166 } 1161 }
1167 1162
1168 offset = args->offset;
1169
1170 down_write(&current->mm->mmap_sem); 1163 down_write(&current->mm->mmap_sem);
1171 addr = do_mmap(obj->filp, 0, args->size, 1164 addr = do_mmap(obj->filp, 0, args->size,
1172 PROT_READ | PROT_WRITE, MAP_SHARED, 1165 PROT_READ | PROT_WRITE, MAP_SHARED,
@@ -1211,9 +1204,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1211 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> 1204 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1212 PAGE_SHIFT; 1205 PAGE_SHIFT;
1213 1206
1214 /* Now bind it into the GTT if needed */ 1207 ret = i915_mutex_lock_interruptible(dev);
1215 mutex_lock(&dev->struct_mutex); 1208 if (ret)
1209 goto out;
1216 1210
1211 trace_i915_gem_object_fault(obj, page_offset, true, write);
1212
1213 /* Now bind it into the GTT if needed */
1217 if (!obj->map_and_fenceable) { 1214 if (!obj->map_and_fenceable) {
1218 ret = i915_gem_object_unbind(obj); 1215 ret = i915_gem_object_unbind(obj);
1219 if (ret) 1216 if (ret)
@@ -1232,7 +1229,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1232 if (obj->tiling_mode == I915_TILING_NONE) 1229 if (obj->tiling_mode == I915_TILING_NONE)
1233 ret = i915_gem_object_put_fence(obj); 1230 ret = i915_gem_object_put_fence(obj);
1234 else 1231 else
1235 ret = i915_gem_object_get_fence(obj, NULL, true); 1232 ret = i915_gem_object_get_fence(obj, NULL);
1236 if (ret) 1233 if (ret)
1237 goto unlock; 1234 goto unlock;
1238 1235
@@ -1248,12 +1245,21 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1248 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 1245 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1249unlock: 1246unlock:
1250 mutex_unlock(&dev->struct_mutex); 1247 mutex_unlock(&dev->struct_mutex);
1251 1248out:
1252 switch (ret) { 1249 switch (ret) {
1250 case -EIO:
1253 case -EAGAIN: 1251 case -EAGAIN:
1252 /* Give the error handler a chance to run and move the
1253 * objects off the GPU active list. Next time we service the
1254 * fault, we should be able to transition the page into the
1255 * GTT without touching the GPU (and so avoid further
1256 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1257 * with coherency, just lost writes.
1258 */
1254 set_need_resched(); 1259 set_need_resched();
1255 case 0: 1260 case 0:
1256 case -ERESTARTSYS: 1261 case -ERESTARTSYS:
1262 case -EINTR:
1257 return VM_FAULT_NOPAGE; 1263 return VM_FAULT_NOPAGE;
1258 case -ENOMEM: 1264 case -ENOMEM:
1259 return VM_FAULT_OOM; 1265 return VM_FAULT_OOM;
@@ -1427,7 +1433,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
1427 * Return the required GTT alignment for an object, only taking into account 1433 * Return the required GTT alignment for an object, only taking into account
1428 * unfenced tiled surface requirements. 1434 * unfenced tiled surface requirements.
1429 */ 1435 */
1430static uint32_t 1436uint32_t
1431i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) 1437i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
1432{ 1438{
1433 struct drm_device *dev = obj->base.dev; 1439 struct drm_device *dev = obj->base.dev;
@@ -1472,7 +1478,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
1472 return ret; 1478 return ret;
1473 1479
1474 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); 1480 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1475 if (obj == NULL) { 1481 if (&obj->base == NULL) {
1476 ret = -ENOENT; 1482 ret = -ENOENT;
1477 goto unlock; 1483 goto unlock;
1478 } 1484 }
@@ -1712,9 +1718,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1712} 1718}
1713 1719
1714static void 1720static void
1715i915_gem_process_flushing_list(struct drm_device *dev, 1721i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1716 uint32_t flush_domains, 1722 uint32_t flush_domains)
1717 struct intel_ring_buffer *ring)
1718{ 1723{
1719 struct drm_i915_gem_object *obj, *next; 1724 struct drm_i915_gem_object *obj, *next;
1720 1725
@@ -1727,7 +1732,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1727 obj->base.write_domain = 0; 1732 obj->base.write_domain = 0;
1728 list_del_init(&obj->gpu_write_list); 1733 list_del_init(&obj->gpu_write_list);
1729 i915_gem_object_move_to_active(obj, ring, 1734 i915_gem_object_move_to_active(obj, ring,
1730 i915_gem_next_request_seqno(dev, ring)); 1735 i915_gem_next_request_seqno(ring));
1731 1736
1732 trace_i915_gem_object_change_domain(obj, 1737 trace_i915_gem_object_change_domain(obj,
1733 obj->base.read_domains, 1738 obj->base.read_domains,
@@ -1737,27 +1742,22 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1737} 1742}
1738 1743
1739int 1744int
1740i915_add_request(struct drm_device *dev, 1745i915_add_request(struct intel_ring_buffer *ring,
1741 struct drm_file *file, 1746 struct drm_file *file,
1742 struct drm_i915_gem_request *request, 1747 struct drm_i915_gem_request *request)
1743 struct intel_ring_buffer *ring)
1744{ 1748{
1745 drm_i915_private_t *dev_priv = dev->dev_private; 1749 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1746 struct drm_i915_file_private *file_priv = NULL;
1747 uint32_t seqno; 1750 uint32_t seqno;
1748 int was_empty; 1751 int was_empty;
1749 int ret; 1752 int ret;
1750 1753
1751 BUG_ON(request == NULL); 1754 BUG_ON(request == NULL);
1752 1755
1753 if (file != NULL)
1754 file_priv = file->driver_priv;
1755
1756 ret = ring->add_request(ring, &seqno); 1756 ret = ring->add_request(ring, &seqno);
1757 if (ret) 1757 if (ret)
1758 return ret; 1758 return ret;
1759 1759
1760 ring->outstanding_lazy_request = false; 1760 trace_i915_gem_request_add(ring, seqno);
1761 1761
1762 request->seqno = seqno; 1762 request->seqno = seqno;
1763 request->ring = ring; 1763 request->ring = ring;
@@ -1765,7 +1765,9 @@ i915_add_request(struct drm_device *dev,
1765 was_empty = list_empty(&ring->request_list); 1765 was_empty = list_empty(&ring->request_list);
1766 list_add_tail(&request->list, &ring->request_list); 1766 list_add_tail(&request->list, &ring->request_list);
1767 1767
1768 if (file_priv) { 1768 if (file) {
1769 struct drm_i915_file_private *file_priv = file->driver_priv;
1770
1769 spin_lock(&file_priv->mm.lock); 1771 spin_lock(&file_priv->mm.lock);
1770 request->file_priv = file_priv; 1772 request->file_priv = file_priv;
1771 list_add_tail(&request->client_list, 1773 list_add_tail(&request->client_list,
@@ -1773,6 +1775,8 @@ i915_add_request(struct drm_device *dev,
1773 spin_unlock(&file_priv->mm.lock); 1775 spin_unlock(&file_priv->mm.lock);
1774 } 1776 }
1775 1777
1778 ring->outstanding_lazy_request = false;
1779
1776 if (!dev_priv->mm.suspended) { 1780 if (!dev_priv->mm.suspended) {
1777 mod_timer(&dev_priv->hangcheck_timer, 1781 mod_timer(&dev_priv->hangcheck_timer,
1778 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 1782 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@@ -1889,18 +1893,15 @@ void i915_gem_reset(struct drm_device *dev)
1889 * This function clears the request list as sequence numbers are passed. 1893 * This function clears the request list as sequence numbers are passed.
1890 */ 1894 */
1891static void 1895static void
1892i915_gem_retire_requests_ring(struct drm_device *dev, 1896i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1893 struct intel_ring_buffer *ring)
1894{ 1897{
1895 drm_i915_private_t *dev_priv = dev->dev_private;
1896 uint32_t seqno; 1898 uint32_t seqno;
1897 int i; 1899 int i;
1898 1900
1899 if (!ring->status_page.page_addr || 1901 if (list_empty(&ring->request_list))
1900 list_empty(&ring->request_list))
1901 return; 1902 return;
1902 1903
1903 WARN_ON(i915_verify_lists(dev)); 1904 WARN_ON(i915_verify_lists(ring->dev));
1904 1905
1905 seqno = ring->get_seqno(ring); 1906 seqno = ring->get_seqno(ring);
1906 1907
@@ -1918,7 +1919,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1918 if (!i915_seqno_passed(seqno, request->seqno)) 1919 if (!i915_seqno_passed(seqno, request->seqno))
1919 break; 1920 break;
1920 1921
1921 trace_i915_gem_request_retire(dev, request->seqno); 1922 trace_i915_gem_request_retire(ring, request->seqno);
1922 1923
1923 list_del(&request->list); 1924 list_del(&request->list);
1924 i915_gem_request_remove_from_client(request); 1925 i915_gem_request_remove_from_client(request);
@@ -1944,13 +1945,13 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1944 i915_gem_object_move_to_inactive(obj); 1945 i915_gem_object_move_to_inactive(obj);
1945 } 1946 }
1946 1947
1947 if (unlikely (dev_priv->trace_irq_seqno && 1948 if (unlikely(ring->trace_irq_seqno &&
1948 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { 1949 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1949 ring->irq_put(ring); 1950 ring->irq_put(ring);
1950 dev_priv->trace_irq_seqno = 0; 1951 ring->trace_irq_seqno = 0;
1951 } 1952 }
1952 1953
1953 WARN_ON(i915_verify_lists(dev)); 1954 WARN_ON(i915_verify_lists(ring->dev));
1954} 1955}
1955 1956
1956void 1957void
@@ -1974,7 +1975,7 @@ i915_gem_retire_requests(struct drm_device *dev)
1974 } 1975 }
1975 1976
1976 for (i = 0; i < I915_NUM_RINGS; i++) 1977 for (i = 0; i < I915_NUM_RINGS; i++)
1977 i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]); 1978 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
1978} 1979}
1979 1980
1980static void 1981static void
@@ -2008,11 +2009,11 @@ i915_gem_retire_work_handler(struct work_struct *work)
2008 struct drm_i915_gem_request *request; 2009 struct drm_i915_gem_request *request;
2009 int ret; 2010 int ret;
2010 2011
2011 ret = i915_gem_flush_ring(dev, ring, 0, 2012 ret = i915_gem_flush_ring(ring,
2012 I915_GEM_GPU_DOMAINS); 2013 0, I915_GEM_GPU_DOMAINS);
2013 request = kzalloc(sizeof(*request), GFP_KERNEL); 2014 request = kzalloc(sizeof(*request), GFP_KERNEL);
2014 if (ret || request == NULL || 2015 if (ret || request == NULL ||
2015 i915_add_request(dev, NULL, request, ring)) 2016 i915_add_request(ring, NULL, request))
2016 kfree(request); 2017 kfree(request);
2017 } 2018 }
2018 2019
@@ -2025,18 +2026,32 @@ i915_gem_retire_work_handler(struct work_struct *work)
2025 mutex_unlock(&dev->struct_mutex); 2026 mutex_unlock(&dev->struct_mutex);
2026} 2027}
2027 2028
2029/**
2030 * Waits for a sequence number to be signaled, and cleans up the
2031 * request and object lists appropriately for that event.
2032 */
2028int 2033int
2029i915_do_wait_request(struct drm_device *dev, uint32_t seqno, 2034i915_wait_request(struct intel_ring_buffer *ring,
2030 bool interruptible, struct intel_ring_buffer *ring) 2035 uint32_t seqno)
2031{ 2036{
2032 drm_i915_private_t *dev_priv = dev->dev_private; 2037 drm_i915_private_t *dev_priv = ring->dev->dev_private;
2033 u32 ier; 2038 u32 ier;
2034 int ret = 0; 2039 int ret = 0;
2035 2040
2036 BUG_ON(seqno == 0); 2041 BUG_ON(seqno == 0);
2037 2042
2038 if (atomic_read(&dev_priv->mm.wedged)) 2043 if (atomic_read(&dev_priv->mm.wedged)) {
2039 return -EAGAIN; 2044 struct completion *x = &dev_priv->error_completion;
2045 bool recovery_complete;
2046 unsigned long flags;
2047
2048 /* Give the error handler a chance to run. */
2049 spin_lock_irqsave(&x->wait.lock, flags);
2050 recovery_complete = x->done > 0;
2051 spin_unlock_irqrestore(&x->wait.lock, flags);
2052
2053 return recovery_complete ? -EIO : -EAGAIN;
2054 }
2040 2055
2041 if (seqno == ring->outstanding_lazy_request) { 2056 if (seqno == ring->outstanding_lazy_request) {
2042 struct drm_i915_gem_request *request; 2057 struct drm_i915_gem_request *request;
@@ -2045,7 +2060,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
2045 if (request == NULL) 2060 if (request == NULL)
2046 return -ENOMEM; 2061 return -ENOMEM;
2047 2062
2048 ret = i915_add_request(dev, NULL, request, ring); 2063 ret = i915_add_request(ring, NULL, request);
2049 if (ret) { 2064 if (ret) {
2050 kfree(request); 2065 kfree(request);
2051 return ret; 2066 return ret;
@@ -2055,22 +2070,22 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
2055 } 2070 }
2056 2071
2057 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { 2072 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
2058 if (HAS_PCH_SPLIT(dev)) 2073 if (HAS_PCH_SPLIT(ring->dev))
2059 ier = I915_READ(DEIER) | I915_READ(GTIER); 2074 ier = I915_READ(DEIER) | I915_READ(GTIER);
2060 else 2075 else
2061 ier = I915_READ(IER); 2076 ier = I915_READ(IER);
2062 if (!ier) { 2077 if (!ier) {
2063 DRM_ERROR("something (likely vbetool) disabled " 2078 DRM_ERROR("something (likely vbetool) disabled "
2064 "interrupts, re-enabling\n"); 2079 "interrupts, re-enabling\n");
2065 i915_driver_irq_preinstall(dev); 2080 i915_driver_irq_preinstall(ring->dev);
2066 i915_driver_irq_postinstall(dev); 2081 i915_driver_irq_postinstall(ring->dev);
2067 } 2082 }
2068 2083
2069 trace_i915_gem_request_wait_begin(dev, seqno); 2084 trace_i915_gem_request_wait_begin(ring, seqno);
2070 2085
2071 ring->waiting_seqno = seqno; 2086 ring->waiting_seqno = seqno;
2072 if (ring->irq_get(ring)) { 2087 if (ring->irq_get(ring)) {
2073 if (interruptible) 2088 if (dev_priv->mm.interruptible)
2074 ret = wait_event_interruptible(ring->irq_queue, 2089 ret = wait_event_interruptible(ring->irq_queue,
2075 i915_seqno_passed(ring->get_seqno(ring), seqno) 2090 i915_seqno_passed(ring->get_seqno(ring), seqno)
2076 || atomic_read(&dev_priv->mm.wedged)); 2091 || atomic_read(&dev_priv->mm.wedged));
@@ -2086,7 +2101,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
2086 ret = -EBUSY; 2101 ret = -EBUSY;
2087 ring->waiting_seqno = 0; 2102 ring->waiting_seqno = 0;
2088 2103
2089 trace_i915_gem_request_wait_end(dev, seqno); 2104 trace_i915_gem_request_wait_end(ring, seqno);
2090 } 2105 }
2091 if (atomic_read(&dev_priv->mm.wedged)) 2106 if (atomic_read(&dev_priv->mm.wedged))
2092 ret = -EAGAIN; 2107 ret = -EAGAIN;
@@ -2102,31 +2117,18 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
2102 * a separate wait queue to handle that. 2117 * a separate wait queue to handle that.
2103 */ 2118 */
2104 if (ret == 0) 2119 if (ret == 0)
2105 i915_gem_retire_requests_ring(dev, ring); 2120 i915_gem_retire_requests_ring(ring);
2106 2121
2107 return ret; 2122 return ret;
2108} 2123}
2109 2124
2110/** 2125/**
2111 * Waits for a sequence number to be signaled, and cleans up the
2112 * request and object lists appropriately for that event.
2113 */
2114static int
2115i915_wait_request(struct drm_device *dev, uint32_t seqno,
2116 struct intel_ring_buffer *ring)
2117{
2118 return i915_do_wait_request(dev, seqno, 1, ring);
2119}
2120
2121/**
2122 * Ensures that all rendering to the object has completed and the object is 2126 * Ensures that all rendering to the object has completed and the object is
2123 * safe to unbind from the GTT or access from the CPU. 2127 * safe to unbind from the GTT or access from the CPU.
2124 */ 2128 */
2125int 2129int
2126i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 2130i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2127 bool interruptible)
2128{ 2131{
2129 struct drm_device *dev = obj->base.dev;
2130 int ret; 2132 int ret;
2131 2133
2132 /* This function only exists to support waiting for existing rendering, 2134 /* This function only exists to support waiting for existing rendering,
@@ -2138,10 +2140,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
2138 * it. 2140 * it.
2139 */ 2141 */
2140 if (obj->active) { 2142 if (obj->active) {
2141 ret = i915_do_wait_request(dev, 2143 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
2142 obj->last_rendering_seqno,
2143 interruptible,
2144 obj->ring);
2145 if (ret) 2144 if (ret)
2146 return ret; 2145 return ret;
2147 } 2146 }
@@ -2191,6 +2190,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2191 if (ret == -ERESTARTSYS) 2190 if (ret == -ERESTARTSYS)
2192 return ret; 2191 return ret;
2193 2192
2193 trace_i915_gem_object_unbind(obj);
2194
2194 i915_gem_gtt_unbind_object(obj); 2195 i915_gem_gtt_unbind_object(obj);
2195 i915_gem_object_put_pages_gtt(obj); 2196 i915_gem_object_put_pages_gtt(obj);
2196 2197
@@ -2206,29 +2207,27 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2206 if (i915_gem_object_is_purgeable(obj)) 2207 if (i915_gem_object_is_purgeable(obj))
2207 i915_gem_object_truncate(obj); 2208 i915_gem_object_truncate(obj);
2208 2209
2209 trace_i915_gem_object_unbind(obj);
2210
2211 return ret; 2210 return ret;
2212} 2211}
2213 2212
2214int 2213int
2215i915_gem_flush_ring(struct drm_device *dev, 2214i915_gem_flush_ring(struct intel_ring_buffer *ring,
2216 struct intel_ring_buffer *ring,
2217 uint32_t invalidate_domains, 2215 uint32_t invalidate_domains,
2218 uint32_t flush_domains) 2216 uint32_t flush_domains)
2219{ 2217{
2220 int ret; 2218 int ret;
2221 2219
2220 trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2221
2222 ret = ring->flush(ring, invalidate_domains, flush_domains); 2222 ret = ring->flush(ring, invalidate_domains, flush_domains);
2223 if (ret) 2223 if (ret)
2224 return ret; 2224 return ret;
2225 2225
2226 i915_gem_process_flushing_list(dev, flush_domains, ring); 2226 i915_gem_process_flushing_list(ring, flush_domains);
2227 return 0; 2227 return 0;
2228} 2228}
2229 2229
2230static int i915_ring_idle(struct drm_device *dev, 2230static int i915_ring_idle(struct intel_ring_buffer *ring)
2231 struct intel_ring_buffer *ring)
2232{ 2231{
2233 int ret; 2232 int ret;
2234 2233
@@ -2236,15 +2235,13 @@ static int i915_ring_idle(struct drm_device *dev,
2236 return 0; 2235 return 0;
2237 2236
2238 if (!list_empty(&ring->gpu_write_list)) { 2237 if (!list_empty(&ring->gpu_write_list)) {
2239 ret = i915_gem_flush_ring(dev, ring, 2238 ret = i915_gem_flush_ring(ring,
2240 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2239 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2241 if (ret) 2240 if (ret)
2242 return ret; 2241 return ret;
2243 } 2242 }
2244 2243
2245 return i915_wait_request(dev, 2244 return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
2246 i915_gem_next_request_seqno(dev, ring),
2247 ring);
2248} 2245}
2249 2246
2250int 2247int
@@ -2261,7 +2258,7 @@ i915_gpu_idle(struct drm_device *dev)
2261 2258
2262 /* Flush everything onto the inactive list. */ 2259 /* Flush everything onto the inactive list. */
2263 for (i = 0; i < I915_NUM_RINGS; i++) { 2260 for (i = 0; i < I915_NUM_RINGS; i++) {
2264 ret = i915_ring_idle(dev, &dev_priv->ring[i]); 2261 ret = i915_ring_idle(&dev_priv->ring[i]);
2265 if (ret) 2262 if (ret)
2266 return ret; 2263 return ret;
2267 } 2264 }
@@ -2445,15 +2442,13 @@ static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2445 2442
2446static int 2443static int
2447i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, 2444i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2448 struct intel_ring_buffer *pipelined, 2445 struct intel_ring_buffer *pipelined)
2449 bool interruptible)
2450{ 2446{
2451 int ret; 2447 int ret;
2452 2448
2453 if (obj->fenced_gpu_access) { 2449 if (obj->fenced_gpu_access) {
2454 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 2450 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2455 ret = i915_gem_flush_ring(obj->base.dev, 2451 ret = i915_gem_flush_ring(obj->last_fenced_ring,
2456 obj->last_fenced_ring,
2457 0, obj->base.write_domain); 2452 0, obj->base.write_domain);
2458 if (ret) 2453 if (ret)
2459 return ret; 2454 return ret;
@@ -2465,10 +2460,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2465 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) { 2460 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2466 if (!ring_passed_seqno(obj->last_fenced_ring, 2461 if (!ring_passed_seqno(obj->last_fenced_ring,
2467 obj->last_fenced_seqno)) { 2462 obj->last_fenced_seqno)) {
2468 ret = i915_do_wait_request(obj->base.dev, 2463 ret = i915_wait_request(obj->last_fenced_ring,
2469 obj->last_fenced_seqno, 2464 obj->last_fenced_seqno);
2470 interruptible,
2471 obj->last_fenced_ring);
2472 if (ret) 2465 if (ret)
2473 return ret; 2466 return ret;
2474 } 2467 }
@@ -2494,7 +2487,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2494 if (obj->tiling_mode) 2487 if (obj->tiling_mode)
2495 i915_gem_release_mmap(obj); 2488 i915_gem_release_mmap(obj);
2496 2489
2497 ret = i915_gem_object_flush_fence(obj, NULL, true); 2490 ret = i915_gem_object_flush_fence(obj, NULL);
2498 if (ret) 2491 if (ret)
2499 return ret; 2492 return ret;
2500 2493
@@ -2571,8 +2564,7 @@ i915_find_fence_reg(struct drm_device *dev,
2571 */ 2564 */
2572int 2565int
2573i915_gem_object_get_fence(struct drm_i915_gem_object *obj, 2566i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2574 struct intel_ring_buffer *pipelined, 2567 struct intel_ring_buffer *pipelined)
2575 bool interruptible)
2576{ 2568{
2577 struct drm_device *dev = obj->base.dev; 2569 struct drm_device *dev = obj->base.dev;
2578 struct drm_i915_private *dev_priv = dev->dev_private; 2570 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2594,10 +2586,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2594 if (reg->setup_seqno) { 2586 if (reg->setup_seqno) {
2595 if (!ring_passed_seqno(obj->last_fenced_ring, 2587 if (!ring_passed_seqno(obj->last_fenced_ring,
2596 reg->setup_seqno)) { 2588 reg->setup_seqno)) {
2597 ret = i915_do_wait_request(obj->base.dev, 2589 ret = i915_wait_request(obj->last_fenced_ring,
2598 reg->setup_seqno, 2590 reg->setup_seqno);
2599 interruptible,
2600 obj->last_fenced_ring);
2601 if (ret) 2591 if (ret)
2602 return ret; 2592 return ret;
2603 } 2593 }
@@ -2606,15 +2596,13 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2606 } 2596 }
2607 } else if (obj->last_fenced_ring && 2597 } else if (obj->last_fenced_ring &&
2608 obj->last_fenced_ring != pipelined) { 2598 obj->last_fenced_ring != pipelined) {
2609 ret = i915_gem_object_flush_fence(obj, 2599 ret = i915_gem_object_flush_fence(obj, pipelined);
2610 pipelined,
2611 interruptible);
2612 if (ret) 2600 if (ret)
2613 return ret; 2601 return ret;
2614 } else if (obj->tiling_changed) { 2602 } else if (obj->tiling_changed) {
2615 if (obj->fenced_gpu_access) { 2603 if (obj->fenced_gpu_access) {
2616 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 2604 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2617 ret = i915_gem_flush_ring(obj->base.dev, obj->ring, 2605 ret = i915_gem_flush_ring(obj->ring,
2618 0, obj->base.write_domain); 2606 0, obj->base.write_domain);
2619 if (ret) 2607 if (ret)
2620 return ret; 2608 return ret;
@@ -2631,7 +2619,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2631 if (obj->tiling_changed) { 2619 if (obj->tiling_changed) {
2632 if (pipelined) { 2620 if (pipelined) {
2633 reg->setup_seqno = 2621 reg->setup_seqno =
2634 i915_gem_next_request_seqno(dev, pipelined); 2622 i915_gem_next_request_seqno(pipelined);
2635 obj->last_fenced_seqno = reg->setup_seqno; 2623 obj->last_fenced_seqno = reg->setup_seqno;
2636 obj->last_fenced_ring = pipelined; 2624 obj->last_fenced_ring = pipelined;
2637 } 2625 }
@@ -2645,7 +2633,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2645 if (reg == NULL) 2633 if (reg == NULL)
2646 return -ENOSPC; 2634 return -ENOSPC;
2647 2635
2648 ret = i915_gem_object_flush_fence(obj, pipelined, interruptible); 2636 ret = i915_gem_object_flush_fence(obj, pipelined);
2649 if (ret) 2637 if (ret)
2650 return ret; 2638 return ret;
2651 2639
@@ -2657,9 +2645,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2657 if (old->tiling_mode) 2645 if (old->tiling_mode)
2658 i915_gem_release_mmap(old); 2646 i915_gem_release_mmap(old);
2659 2647
2660 ret = i915_gem_object_flush_fence(old, 2648 ret = i915_gem_object_flush_fence(old, pipelined);
2661 pipelined,
2662 interruptible);
2663 if (ret) { 2649 if (ret) {
2664 drm_gem_object_unreference(&old->base); 2650 drm_gem_object_unreference(&old->base);
2665 return ret; 2651 return ret;
@@ -2671,7 +2657,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2671 old->fence_reg = I915_FENCE_REG_NONE; 2657 old->fence_reg = I915_FENCE_REG_NONE;
2672 old->last_fenced_ring = pipelined; 2658 old->last_fenced_ring = pipelined;
2673 old->last_fenced_seqno = 2659 old->last_fenced_seqno =
2674 pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0; 2660 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2675 2661
2676 drm_gem_object_unreference(&old->base); 2662 drm_gem_object_unreference(&old->base);
2677 } else if (obj->last_fenced_seqno == 0) 2663 } else if (obj->last_fenced_seqno == 0)
@@ -2683,7 +2669,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2683 obj->last_fenced_ring = pipelined; 2669 obj->last_fenced_ring = pipelined;
2684 2670
2685 reg->setup_seqno = 2671 reg->setup_seqno =
2686 pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0; 2672 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2687 obj->last_fenced_seqno = reg->setup_seqno; 2673 obj->last_fenced_seqno = reg->setup_seqno;
2688 2674
2689update: 2675update:
@@ -2880,7 +2866,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2880 2866
2881 obj->map_and_fenceable = mappable && fenceable; 2867 obj->map_and_fenceable = mappable && fenceable;
2882 2868
2883 trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable); 2869 trace_i915_gem_object_bind(obj, map_and_fenceable);
2884 return 0; 2870 return 0;
2885} 2871}
2886 2872
@@ -2903,13 +2889,11 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2903static int 2889static int
2904i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) 2890i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
2905{ 2891{
2906 struct drm_device *dev = obj->base.dev;
2907
2908 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) 2892 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
2909 return 0; 2893 return 0;
2910 2894
2911 /* Queue the GPU write cache flushing we need. */ 2895 /* Queue the GPU write cache flushing we need. */
2912 return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); 2896 return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
2913} 2897}
2914 2898
2915/** Flushes the GTT write domain for the object if it's dirty. */ 2899/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2976,12 +2960,15 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2976 if (obj->gtt_space == NULL) 2960 if (obj->gtt_space == NULL)
2977 return -EINVAL; 2961 return -EINVAL;
2978 2962
2963 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2964 return 0;
2965
2979 ret = i915_gem_object_flush_gpu_write_domain(obj); 2966 ret = i915_gem_object_flush_gpu_write_domain(obj);
2980 if (ret) 2967 if (ret)
2981 return ret; 2968 return ret;
2982 2969
2983 if (obj->pending_gpu_write || write) { 2970 if (obj->pending_gpu_write || write) {
2984 ret = i915_gem_object_wait_rendering(obj, true); 2971 ret = i915_gem_object_wait_rendering(obj);
2985 if (ret) 2972 if (ret)
2986 return ret; 2973 return ret;
2987 } 2974 }
@@ -3031,7 +3018,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
3031 3018
3032 /* Currently, we are always called from an non-interruptible context. */ 3019 /* Currently, we are always called from an non-interruptible context. */
3033 if (pipelined != obj->ring) { 3020 if (pipelined != obj->ring) {
3034 ret = i915_gem_object_wait_rendering(obj, false); 3021 ret = i915_gem_object_wait_rendering(obj);
3035 if (ret) 3022 if (ret)
3036 return ret; 3023 return ret;
3037 } 3024 }
@@ -3049,8 +3036,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
3049} 3036}
3050 3037
3051int 3038int
3052i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, 3039i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
3053 bool interruptible)
3054{ 3040{
3055 int ret; 3041 int ret;
3056 3042
@@ -3058,13 +3044,12 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
3058 return 0; 3044 return 0;
3059 3045
3060 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 3046 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3061 ret = i915_gem_flush_ring(obj->base.dev, obj->ring, 3047 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
3062 0, obj->base.write_domain);
3063 if (ret) 3048 if (ret)
3064 return ret; 3049 return ret;
3065 } 3050 }
3066 3051
3067 return i915_gem_object_wait_rendering(obj, interruptible); 3052 return i915_gem_object_wait_rendering(obj);
3068} 3053}
3069 3054
3070/** 3055/**
@@ -3079,11 +3064,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3079 uint32_t old_write_domain, old_read_domains; 3064 uint32_t old_write_domain, old_read_domains;
3080 int ret; 3065 int ret;
3081 3066
3067 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3068 return 0;
3069
3082 ret = i915_gem_object_flush_gpu_write_domain(obj); 3070 ret = i915_gem_object_flush_gpu_write_domain(obj);
3083 if (ret) 3071 if (ret)
3084 return ret; 3072 return ret;
3085 3073
3086 ret = i915_gem_object_wait_rendering(obj, true); 3074 ret = i915_gem_object_wait_rendering(obj);
3087 if (ret) 3075 if (ret)
3088 return ret; 3076 return ret;
3089 3077
@@ -3181,7 +3169,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3181 if (ret) 3169 if (ret)
3182 return ret; 3170 return ret;
3183 3171
3184 ret = i915_gem_object_wait_rendering(obj, true); 3172 ret = i915_gem_object_wait_rendering(obj);
3185 if (ret) 3173 if (ret)
3186 return ret; 3174 return ret;
3187 3175
@@ -3252,6 +3240,9 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3252 u32 seqno = 0; 3240 u32 seqno = 0;
3253 int ret; 3241 int ret;
3254 3242
3243 if (atomic_read(&dev_priv->mm.wedged))
3244 return -EIO;
3245
3255 spin_lock(&file_priv->mm.lock); 3246 spin_lock(&file_priv->mm.lock);
3256 list_for_each_entry(request, &file_priv->mm.request_list, client_list) { 3247 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3257 if (time_after_eq(request->emitted_jiffies, recent_enough)) 3248 if (time_after_eq(request->emitted_jiffies, recent_enough))
@@ -3367,7 +3358,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3367 return ret; 3358 return ret;
3368 3359
3369 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 3360 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3370 if (obj == NULL) { 3361 if (&obj->base == NULL) {
3371 ret = -ENOENT; 3362 ret = -ENOENT;
3372 goto unlock; 3363 goto unlock;
3373 } 3364 }
@@ -3418,7 +3409,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3418 return ret; 3409 return ret;
3419 3410
3420 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 3411 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3421 if (obj == NULL) { 3412 if (&obj->base == NULL) {
3422 ret = -ENOENT; 3413 ret = -ENOENT;
3423 goto unlock; 3414 goto unlock;
3424 } 3415 }
@@ -3455,7 +3446,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3455 return ret; 3446 return ret;
3456 3447
3457 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 3448 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3458 if (obj == NULL) { 3449 if (&obj->base == NULL) {
3459 ret = -ENOENT; 3450 ret = -ENOENT;
3460 goto unlock; 3451 goto unlock;
3461 } 3452 }
@@ -3473,7 +3464,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3473 * flush earlier is beneficial. 3464 * flush earlier is beneficial.
3474 */ 3465 */
3475 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 3466 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3476 ret = i915_gem_flush_ring(dev, obj->ring, 3467 ret = i915_gem_flush_ring(obj->ring,
3477 0, obj->base.write_domain); 3468 0, obj->base.write_domain);
3478 } else if (obj->ring->outstanding_lazy_request == 3469 } else if (obj->ring->outstanding_lazy_request ==
3479 obj->last_rendering_seqno) { 3470 obj->last_rendering_seqno) {
@@ -3484,9 +3475,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3484 */ 3475 */
3485 request = kzalloc(sizeof(*request), GFP_KERNEL); 3476 request = kzalloc(sizeof(*request), GFP_KERNEL);
3486 if (request) 3477 if (request)
3487 ret = i915_add_request(dev, 3478 ret = i915_add_request(obj->ring, NULL,request);
3488 NULL, request,
3489 obj->ring);
3490 else 3479 else
3491 ret = -ENOMEM; 3480 ret = -ENOMEM;
3492 } 3481 }
@@ -3496,7 +3485,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3496 * are actually unmasked, and our working set ends up being 3485 * are actually unmasked, and our working set ends up being
3497 * larger than required. 3486 * larger than required.
3498 */ 3487 */
3499 i915_gem_retire_requests_ring(dev, obj->ring); 3488 i915_gem_retire_requests_ring(obj->ring);
3500 3489
3501 args->busy = obj->active; 3490 args->busy = obj->active;
3502 } 3491 }
@@ -3535,7 +3524,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3535 return ret; 3524 return ret;
3536 3525
3537 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); 3526 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3538 if (obj == NULL) { 3527 if (&obj->base == NULL) {
3539 ret = -ENOENT; 3528 ret = -ENOENT;
3540 goto unlock; 3529 goto unlock;
3541 } 3530 }
@@ -3626,6 +3615,8 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
3626 kfree(obj->page_cpu_valid); 3615 kfree(obj->page_cpu_valid);
3627 kfree(obj->bit_17); 3616 kfree(obj->bit_17);
3628 kfree(obj); 3617 kfree(obj);
3618
3619 trace_i915_gem_object_destroy(obj);
3629} 3620}
3630 3621
3631void i915_gem_free_object(struct drm_gem_object *gem_obj) 3622void i915_gem_free_object(struct drm_gem_object *gem_obj)
@@ -3633,8 +3624,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3633 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 3624 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3634 struct drm_device *dev = obj->base.dev; 3625 struct drm_device *dev = obj->base.dev;
3635 3626
3636 trace_i915_gem_object_destroy(obj);
3637
3638 while (obj->pin_count > 0) 3627 while (obj->pin_count > 0)
3639 i915_gem_object_unpin(obj); 3628 i915_gem_object_unpin(obj);
3640 3629
@@ -3880,6 +3869,8 @@ i915_gem_load(struct drm_device *dev)
3880 i915_gem_detect_bit_6_swizzle(dev); 3869 i915_gem_detect_bit_6_swizzle(dev);
3881 init_waitqueue_head(&dev_priv->pending_flip_queue); 3870 init_waitqueue_head(&dev_priv->pending_flip_queue);
3882 3871
3872 dev_priv->mm.interruptible = true;
3873
3883 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; 3874 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3884 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; 3875 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3885 register_shrinker(&dev_priv->mm.inactive_shrinker); 3876 register_shrinker(&dev_priv->mm.inactive_shrinker);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 29d014c48ca2..8da1899bd24f 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -134,51 +134,6 @@ i915_verify_lists(struct drm_device *dev)
134} 134}
135#endif /* WATCH_INACTIVE */ 135#endif /* WATCH_INACTIVE */
136 136
137
138#if WATCH_EXEC | WATCH_PWRITE
139static void
140i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
141 uint32_t bias, uint32_t mark)
142{
143 uint32_t *mem = kmap_atomic(page, KM_USER0);
144 int i;
145 for (i = start; i < end; i += 4)
146 DRM_INFO("%08x: %08x%s\n",
147 (int) (bias + i), mem[i / 4],
148 (bias + i == mark) ? " ********" : "");
149 kunmap_atomic(mem, KM_USER0);
150 /* give syslog time to catch up */
151 msleep(1);
152}
153
154void
155i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
156 const char *where, uint32_t mark)
157{
158 int page;
159
160 DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
161 for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
162 int page_len, chunk, chunk_len;
163
164 page_len = len - page * PAGE_SIZE;
165 if (page_len > PAGE_SIZE)
166 page_len = PAGE_SIZE;
167
168 for (chunk = 0; chunk < page_len; chunk += 128) {
169 chunk_len = page_len - chunk;
170 if (chunk_len > 128)
171 chunk_len = 128;
172 i915_gem_dump_page(obj->pages[page],
173 chunk, chunk + chunk_len,
174 obj->gtt_offset +
175 page * PAGE_SIZE,
176 mark);
177 }
178 }
179}
180#endif
181
182#if WATCH_COHERENCY 137#if WATCH_COHERENCY
183void 138void
184i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) 139i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 3d39005540aa..da05a2692a75 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -30,6 +30,7 @@
30#include "drm.h" 30#include "drm.h"
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include "i915_drm.h" 32#include "i915_drm.h"
33#include "i915_trace.h"
33 34
34static bool 35static bool
35mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) 36mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
@@ -63,6 +64,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
63 return 0; 64 return 0;
64 } 65 }
65 66
67 trace_i915_gem_evict(dev, min_size, alignment, mappable);
68
66 /* 69 /*
67 * The goal is to evict objects and amalgamate space in LRU order. 70 * The goal is to evict objects and amalgamate space in LRU order.
68 * The oldest idle objects reside on the inactive list, which is in 71 * The oldest idle objects reside on the inactive list, which is in
@@ -189,6 +192,8 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
189 if (lists_empty) 192 if (lists_empty)
190 return -ENOSPC; 193 return -ENOSPC;
191 194
195 trace_i915_gem_evict_everything(dev, purgeable_only);
196
192 /* Flush everything (on to the inactive lists) and evict */ 197 /* Flush everything (on to the inactive lists) and evict */
193 ret = i915_gpu_idle(dev); 198 ret = i915_gpu_idle(dev);
194 if (ret) 199 if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d2f445e825f2..7ff7f933ddf1 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -37,6 +37,7 @@ struct change_domains {
37 uint32_t invalidate_domains; 37 uint32_t invalidate_domains;
38 uint32_t flush_domains; 38 uint32_t flush_domains;
39 uint32_t flush_rings; 39 uint32_t flush_rings;
40 uint32_t flips;
40}; 41};
41 42
42/* 43/*
@@ -190,6 +191,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
190 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT) 191 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
191 i915_gem_release_mmap(obj); 192 i915_gem_release_mmap(obj);
192 193
194 if (obj->base.pending_write_domain)
195 cd->flips |= atomic_read(&obj->pending_flip);
196
193 /* The actual obj->write_domain will be updated with 197 /* The actual obj->write_domain will be updated with
194 * pending_write_domain after we emit the accumulated flush for all 198 * pending_write_domain after we emit the accumulated flush for all
195 * of our domain changes in execbuffers (which clears objects' 199 * of our domain changes in execbuffers (which clears objects'
@@ -282,21 +286,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
282 286
283 target_offset = to_intel_bo(target_obj)->gtt_offset; 287 target_offset = to_intel_bo(target_obj)->gtt_offset;
284 288
285#if WATCH_RELOC
286 DRM_INFO("%s: obj %p offset %08x target %d "
287 "read %08x write %08x gtt %08x "
288 "presumed %08x delta %08x\n",
289 __func__,
290 obj,
291 (int) reloc->offset,
292 (int) reloc->target_handle,
293 (int) reloc->read_domains,
294 (int) reloc->write_domain,
295 (int) target_offset,
296 (int) reloc->presumed_offset,
297 reloc->delta);
298#endif
299
300 /* The target buffer should have appeared before us in the 289 /* The target buffer should have appeared before us in the
301 * exec_object list, so it should have a GTT space bound by now. 290 * exec_object list, so it should have a GTT space bound by now.
302 */ 291 */
@@ -365,16 +354,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
365 return ret; 354 return ret;
366 } 355 }
367 356
368 /* and points to somewhere within the target object. */
369 if (unlikely(reloc->delta >= target_obj->size)) {
370 DRM_ERROR("Relocation beyond target object bounds: "
371 "obj %p target %d delta %d size %d.\n",
372 obj, reloc->target_handle,
373 (int) reloc->delta,
374 (int) target_obj->size);
375 return ret;
376 }
377
378 reloc->delta += target_offset; 357 reloc->delta += target_offset;
379 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { 358 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
380 uint32_t page_offset = reloc->offset & ~PAGE_MASK; 359 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
@@ -575,7 +554,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
575 554
576 if (has_fenced_gpu_access) { 555 if (has_fenced_gpu_access) {
577 if (need_fence) { 556 if (need_fence) {
578 ret = i915_gem_object_get_fence(obj, ring, 1); 557 ret = i915_gem_object_get_fence(obj, ring);
579 if (ret) 558 if (ret)
580 break; 559 break;
581 } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE && 560 } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
@@ -690,11 +669,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
690 /* reacquire the objects */ 669 /* reacquire the objects */
691 eb_reset(eb); 670 eb_reset(eb);
692 for (i = 0; i < count; i++) { 671 for (i = 0; i < count; i++) {
693 struct drm_i915_gem_object *obj;
694
695 obj = to_intel_bo(drm_gem_object_lookup(dev, file, 672 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
696 exec[i].handle)); 673 exec[i].handle));
697 if (obj == NULL) { 674 if (&obj->base == NULL) {
698 DRM_ERROR("Invalid object handle %d at index %d\n", 675 DRM_ERROR("Invalid object handle %d at index %d\n",
699 exec[i].handle, i); 676 exec[i].handle, i);
700 ret = -ENOENT; 677 ret = -ENOENT;
@@ -749,8 +726,7 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
749 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { 726 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
750 for (i = 0; i < I915_NUM_RINGS; i++) 727 for (i = 0; i < I915_NUM_RINGS; i++)
751 if (flush_rings & (1 << i)) { 728 if (flush_rings & (1 << i)) {
752 ret = i915_gem_flush_ring(dev, 729 ret = i915_gem_flush_ring(&dev_priv->ring[i],
753 &dev_priv->ring[i],
754 invalidate_domains, 730 invalidate_domains,
755 flush_domains); 731 flush_domains);
756 if (ret) 732 if (ret)
@@ -772,9 +748,9 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
772 if (from == NULL || to == from) 748 if (from == NULL || to == from)
773 return 0; 749 return 0;
774 750
775 /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ 751 /* XXX gpu semaphores are implicated in various hard hangs on SNB */
776 if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) 752 if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
777 return i915_gem_object_wait_rendering(obj, true); 753 return i915_gem_object_wait_rendering(obj);
778 754
779 idx = intel_ring_sync_index(from, to); 755 idx = intel_ring_sync_index(from, to);
780 756
@@ -789,7 +765,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
789 if (request == NULL) 765 if (request == NULL)
790 return -ENOMEM; 766 return -ENOMEM;
791 767
792 ret = i915_add_request(obj->base.dev, NULL, request, from); 768 ret = i915_add_request(from, NULL, request);
793 if (ret) { 769 if (ret) {
794 kfree(request); 770 kfree(request);
795 return ret; 771 return ret;
@@ -803,6 +779,39 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
803} 779}
804 780
805static int 781static int
782i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
783{
784 u32 plane, flip_mask;
785 int ret;
786
787 /* Check for any pending flips. As we only maintain a flip queue depth
788 * of 1, we can simply insert a WAIT for the next display flip prior
789 * to executing the batch and avoid stalling the CPU.
790 */
791
792 for (plane = 0; flips >> plane; plane++) {
793 if (((flips >> plane) & 1) == 0)
794 continue;
795
796 if (plane)
797 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
798 else
799 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
800
801 ret = intel_ring_begin(ring, 2);
802 if (ret)
803 return ret;
804
805 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
806 intel_ring_emit(ring, MI_NOOP);
807 intel_ring_advance(ring);
808 }
809
810 return 0;
811}
812
813
814static int
806i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, 815i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
807 struct list_head *objects) 816 struct list_head *objects)
808{ 817{
@@ -810,19 +819,11 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
810 struct change_domains cd; 819 struct change_domains cd;
811 int ret; 820 int ret;
812 821
813 cd.invalidate_domains = 0; 822 memset(&cd, 0, sizeof(cd));
814 cd.flush_domains = 0;
815 cd.flush_rings = 0;
816 list_for_each_entry(obj, objects, exec_list) 823 list_for_each_entry(obj, objects, exec_list)
817 i915_gem_object_set_to_gpu_domain(obj, ring, &cd); 824 i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
818 825
819 if (cd.invalidate_domains | cd.flush_domains) { 826 if (cd.invalidate_domains | cd.flush_domains) {
820#if WATCH_EXEC
821 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
822 __func__,
823 cd.invalidate_domains,
824 cd.flush_domains);
825#endif
826 ret = i915_gem_execbuffer_flush(ring->dev, 827 ret = i915_gem_execbuffer_flush(ring->dev,
827 cd.invalidate_domains, 828 cd.invalidate_domains,
828 cd.flush_domains, 829 cd.flush_domains,
@@ -831,6 +832,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
831 return ret; 832 return ret;
832 } 833 }
833 834
835 if (cd.flips) {
836 ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
837 if (ret)
838 return ret;
839 }
840
834 list_for_each_entry(obj, objects, exec_list) { 841 list_for_each_entry(obj, objects, exec_list) {
835 ret = i915_gem_execbuffer_sync_rings(obj, ring); 842 ret = i915_gem_execbuffer_sync_rings(obj, ring);
836 if (ret) 843 if (ret)
@@ -877,47 +884,6 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
877 return 0; 884 return 0;
878} 885}
879 886
880static int
881i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
882 struct list_head *objects)
883{
884 struct drm_i915_gem_object *obj;
885 int flips;
886
887 /* Check for any pending flips. As we only maintain a flip queue depth
888 * of 1, we can simply insert a WAIT for the next display flip prior
889 * to executing the batch and avoid stalling the CPU.
890 */
891 flips = 0;
892 list_for_each_entry(obj, objects, exec_list) {
893 if (obj->base.write_domain)
894 flips |= atomic_read(&obj->pending_flip);
895 }
896 if (flips) {
897 int plane, flip_mask, ret;
898
899 for (plane = 0; flips >> plane; plane++) {
900 if (((flips >> plane) & 1) == 0)
901 continue;
902
903 if (plane)
904 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
905 else
906 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
907
908 ret = intel_ring_begin(ring, 2);
909 if (ret)
910 return ret;
911
912 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
913 intel_ring_emit(ring, MI_NOOP);
914 intel_ring_advance(ring);
915 }
916 }
917
918 return 0;
919}
920
921static void 887static void
922i915_gem_execbuffer_move_to_active(struct list_head *objects, 888i915_gem_execbuffer_move_to_active(struct list_head *objects,
923 struct intel_ring_buffer *ring, 889 struct intel_ring_buffer *ring,
@@ -926,6 +892,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
926 struct drm_i915_gem_object *obj; 892 struct drm_i915_gem_object *obj;
927 893
928 list_for_each_entry(obj, objects, exec_list) { 894 list_for_each_entry(obj, objects, exec_list) {
895 u32 old_read = obj->base.read_domains;
896 u32 old_write = obj->base.write_domain;
897
898
929 obj->base.read_domains = obj->base.pending_read_domains; 899 obj->base.read_domains = obj->base.pending_read_domains;
930 obj->base.write_domain = obj->base.pending_write_domain; 900 obj->base.write_domain = obj->base.pending_write_domain;
931 obj->fenced_gpu_access = obj->pending_fenced_gpu_access; 901 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
@@ -939,9 +909,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
939 intel_mark_busy(ring->dev, obj); 909 intel_mark_busy(ring->dev, obj);
940 } 910 }
941 911
942 trace_i915_gem_object_change_domain(obj, 912 trace_i915_gem_object_change_domain(obj, old_read, old_write);
943 obj->base.read_domains,
944 obj->base.write_domain);
945 } 913 }
946} 914}
947 915
@@ -963,14 +931,14 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
963 if (INTEL_INFO(dev)->gen >= 4) 931 if (INTEL_INFO(dev)->gen >= 4)
964 invalidate |= I915_GEM_DOMAIN_SAMPLER; 932 invalidate |= I915_GEM_DOMAIN_SAMPLER;
965 if (ring->flush(ring, invalidate, 0)) { 933 if (ring->flush(ring, invalidate, 0)) {
966 i915_gem_next_request_seqno(dev, ring); 934 i915_gem_next_request_seqno(ring);
967 return; 935 return;
968 } 936 }
969 937
970 /* Add a breadcrumb for the completion of the batch buffer */ 938 /* Add a breadcrumb for the completion of the batch buffer */
971 request = kzalloc(sizeof(*request), GFP_KERNEL); 939 request = kzalloc(sizeof(*request), GFP_KERNEL);
972 if (request == NULL || i915_add_request(dev, file, request, ring)) { 940 if (request == NULL || i915_add_request(ring, file, request)) {
973 i915_gem_next_request_seqno(dev, ring); 941 i915_gem_next_request_seqno(ring);
974 kfree(request); 942 kfree(request);
975 } 943 }
976} 944}
@@ -1000,10 +968,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1000 if (ret) 968 if (ret)
1001 return ret; 969 return ret;
1002 970
1003#if WATCH_EXEC
1004 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1005 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1006#endif
1007 switch (args->flags & I915_EXEC_RING_MASK) { 971 switch (args->flags & I915_EXEC_RING_MASK) {
1008 case I915_EXEC_DEFAULT: 972 case I915_EXEC_DEFAULT:
1009 case I915_EXEC_RENDER: 973 case I915_EXEC_RENDER:
@@ -1113,7 +1077,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1113 1077
1114 obj = to_intel_bo(drm_gem_object_lookup(dev, file, 1078 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
1115 exec[i].handle)); 1079 exec[i].handle));
1116 if (obj == NULL) { 1080 if (&obj->base == NULL) {
1117 DRM_ERROR("Invalid object handle %d at index %d\n", 1081 DRM_ERROR("Invalid object handle %d at index %d\n",
1118 exec[i].handle, i); 1082 exec[i].handle, i);
1119 /* prevent error path from reading uninitialized data */ 1083 /* prevent error path from reading uninitialized data */
@@ -1170,11 +1134,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1170 if (ret) 1134 if (ret)
1171 goto err; 1135 goto err;
1172 1136
1173 ret = i915_gem_execbuffer_wait_for_flips(ring, &objects); 1137 seqno = i915_gem_next_request_seqno(ring);
1174 if (ret)
1175 goto err;
1176
1177 seqno = i915_gem_next_request_seqno(dev, ring);
1178 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) { 1138 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
1179 if (seqno < ring->sync_seqno[i]) { 1139 if (seqno < ring->sync_seqno[i]) {
1180 /* The GPU can not handle its semaphore value wrapping, 1140 /* The GPU can not handle its semaphore value wrapping,
@@ -1189,6 +1149,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1189 } 1149 }
1190 } 1150 }
1191 1151
1152 trace_i915_gem_ring_dispatch(ring, seqno);
1153
1192 exec_start = batch_obj->gtt_offset + args->batch_start_offset; 1154 exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1193 exec_len = args->batch_len; 1155 exec_len = args->batch_len;
1194 if (cliprects) { 1156 if (cliprects) {
@@ -1245,11 +1207,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1245 struct drm_i915_gem_exec_object2 *exec2_list = NULL; 1207 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1246 int ret, i; 1208 int ret, i;
1247 1209
1248#if WATCH_EXEC
1249 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1250 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1251#endif
1252
1253 if (args->buffer_count < 1) { 1210 if (args->buffer_count < 1) {
1254 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); 1211 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
1255 return -EINVAL; 1212 return -EINVAL;
@@ -1330,17 +1287,16 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1330 struct drm_i915_gem_exec_object2 *exec2_list = NULL; 1287 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1331 int ret; 1288 int ret;
1332 1289
1333#if WATCH_EXEC
1334 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1335 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1336#endif
1337
1338 if (args->buffer_count < 1) { 1290 if (args->buffer_count < 1) {
1339 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); 1291 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
1340 return -EINVAL; 1292 return -EINVAL;
1341 } 1293 }
1342 1294
1343 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); 1295 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1296 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
1297 if (exec2_list == NULL)
1298 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1299 args->buffer_count);
1344 if (exec2_list == NULL) { 1300 if (exec2_list == NULL) {
1345 DRM_ERROR("Failed to allocate exec list for %d buffers\n", 1301 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
1346 args->buffer_count); 1302 args->buffer_count);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 22a32b9932c5..281ad3d6115d 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -284,14 +284,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
284 struct drm_i915_gem_set_tiling *args = data; 284 struct drm_i915_gem_set_tiling *args = data;
285 drm_i915_private_t *dev_priv = dev->dev_private; 285 drm_i915_private_t *dev_priv = dev->dev_private;
286 struct drm_i915_gem_object *obj; 286 struct drm_i915_gem_object *obj;
287 int ret; 287 int ret = 0;
288
289 ret = i915_gem_check_is_wedged(dev);
290 if (ret)
291 return ret;
292 288
293 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 289 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
294 if (obj == NULL) 290 if (&obj->base == NULL)
295 return -ENOENT; 291 return -ENOENT;
296 292
297 if (!i915_tiling_ok(dev, 293 if (!i915_tiling_ok(dev,
@@ -349,14 +345,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
349 (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && 345 (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
350 i915_gem_object_fence_ok(obj, args->tiling_mode)); 346 i915_gem_object_fence_ok(obj, args->tiling_mode));
351 347
352 obj->tiling_changed = true; 348 /* Rebind if we need a change of alignment */
353 obj->tiling_mode = args->tiling_mode; 349 if (!obj->map_and_fenceable) {
354 obj->stride = args->stride; 350 u32 unfenced_alignment =
351 i915_gem_get_unfenced_gtt_alignment(obj);
352 if (obj->gtt_offset & (unfenced_alignment - 1))
353 ret = i915_gem_object_unbind(obj);
354 }
355
356 if (ret == 0) {
357 obj->tiling_changed = true;
358 obj->tiling_mode = args->tiling_mode;
359 obj->stride = args->stride;
360 }
355 } 361 }
362 /* we have to maintain this existing ABI... */
363 args->stride = obj->stride;
364 args->tiling_mode = obj->tiling_mode;
356 drm_gem_object_unreference(&obj->base); 365 drm_gem_object_unreference(&obj->base);
357 mutex_unlock(&dev->struct_mutex); 366 mutex_unlock(&dev->struct_mutex);
358 367
359 return 0; 368 return ret;
360} 369}
361 370
362/** 371/**
@@ -371,7 +380,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
371 struct drm_i915_gem_object *obj; 380 struct drm_i915_gem_object *obj;
372 381
373 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 382 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
374 if (obj == NULL) 383 if (&obj->base == NULL)
375 return -ENOENT; 384 return -ENOENT;
376 385
377 mutex_lock(&dev->struct_mutex); 386 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 97f946dcc1aa..188b497e5076 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -85,21 +85,11 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
85 } 85 }
86} 86}
87 87
88static inline u32
89i915_pipestat(int pipe)
90{
91 if (pipe == 0)
92 return PIPEASTAT;
93 if (pipe == 1)
94 return PIPEBSTAT;
95 BUG();
96}
97
98void 88void
99i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 89i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
100{ 90{
101 if ((dev_priv->pipestat[pipe] & mask) != mask) { 91 if ((dev_priv->pipestat[pipe] & mask) != mask) {
102 u32 reg = i915_pipestat(pipe); 92 u32 reg = PIPESTAT(pipe);
103 93
104 dev_priv->pipestat[pipe] |= mask; 94 dev_priv->pipestat[pipe] |= mask;
105 /* Enable the interrupt, clear any pending status */ 95 /* Enable the interrupt, clear any pending status */
@@ -112,7 +102,7 @@ void
112i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 102i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
113{ 103{
114 if ((dev_priv->pipestat[pipe] & mask) != 0) { 104 if ((dev_priv->pipestat[pipe] & mask) != 0) {
115 u32 reg = i915_pipestat(pipe); 105 u32 reg = PIPESTAT(pipe);
116 106
117 dev_priv->pipestat[pipe] &= ~mask; 107 dev_priv->pipestat[pipe] &= ~mask;
118 I915_WRITE(reg, dev_priv->pipestat[pipe]); 108 I915_WRITE(reg, dev_priv->pipestat[pipe]);
@@ -171,12 +161,12 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
171 161
172 if (!i915_pipe_enabled(dev, pipe)) { 162 if (!i915_pipe_enabled(dev, pipe)) {
173 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 163 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
174 "pipe %d\n", pipe); 164 "pipe %c\n", pipe_name(pipe));
175 return 0; 165 return 0;
176 } 166 }
177 167
178 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; 168 high_frame = PIPEFRAME(pipe);
179 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; 169 low_frame = PIPEFRAMEPIXEL(pipe);
180 170
181 /* 171 /*
182 * High & low register fields aren't synchronized, so make sure 172 * High & low register fields aren't synchronized, so make sure
@@ -197,11 +187,11 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
197u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 187u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
198{ 188{
199 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 189 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
200 int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; 190 int reg = PIPE_FRMCOUNT_GM45(pipe);
201 191
202 if (!i915_pipe_enabled(dev, pipe)) { 192 if (!i915_pipe_enabled(dev, pipe)) {
203 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 193 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
204 "pipe %d\n", pipe); 194 "pipe %c\n", pipe_name(pipe));
205 return 0; 195 return 0;
206 } 196 }
207 197
@@ -219,7 +209,7 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
219 209
220 if (!i915_pipe_enabled(dev, pipe)) { 210 if (!i915_pipe_enabled(dev, pipe)) {
221 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 211 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
222 "pipe %d\n", pipe); 212 "pipe %c\n", pipe_name(pipe));
223 return 0; 213 return 0;
224 } 214 }
225 215
@@ -316,6 +306,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
316 struct drm_mode_config *mode_config = &dev->mode_config; 306 struct drm_mode_config *mode_config = &dev->mode_config;
317 struct intel_encoder *encoder; 307 struct intel_encoder *encoder;
318 308
309 DRM_DEBUG_KMS("running encoder hotplug functions\n");
310
319 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 311 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
320 if (encoder->hot_plug) 312 if (encoder->hot_plug)
321 encoder->hot_plug(encoder); 313 encoder->hot_plug(encoder);
@@ -365,7 +357,7 @@ static void notify_ring(struct drm_device *dev,
365 return; 357 return;
366 358
367 seqno = ring->get_seqno(ring); 359 seqno = ring->get_seqno(ring);
368 trace_i915_gem_request_complete(dev, seqno); 360 trace_i915_gem_request_complete(ring, seqno);
369 361
370 ring->irq_seqno = seqno; 362 ring->irq_seqno = seqno;
371 wake_up_all(&ring->irq_queue); 363 wake_up_all(&ring->irq_queue);
@@ -417,6 +409,7 @@ static void pch_irq_handler(struct drm_device *dev)
417{ 409{
418 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 410 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
419 u32 pch_iir; 411 u32 pch_iir;
412 int pipe;
420 413
421 pch_iir = I915_READ(SDEIIR); 414 pch_iir = I915_READ(SDEIIR);
422 415
@@ -437,13 +430,11 @@ static void pch_irq_handler(struct drm_device *dev)
437 if (pch_iir & SDE_POISON) 430 if (pch_iir & SDE_POISON)
438 DRM_ERROR("PCH poison interrupt\n"); 431 DRM_ERROR("PCH poison interrupt\n");
439 432
440 if (pch_iir & SDE_FDI_MASK) { 433 if (pch_iir & SDE_FDI_MASK)
441 u32 fdia, fdib; 434 for_each_pipe(pipe)
442 435 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
443 fdia = I915_READ(FDI_RXA_IIR); 436 pipe_name(pipe),
444 fdib = I915_READ(FDI_RXB_IIR); 437 I915_READ(FDI_RX_IIR(pipe)));
445 DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib);
446 }
447 438
448 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 439 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
449 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 440 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
@@ -648,9 +639,14 @@ static void
648i915_error_state_free(struct drm_device *dev, 639i915_error_state_free(struct drm_device *dev,
649 struct drm_i915_error_state *error) 640 struct drm_i915_error_state *error)
650{ 641{
651 i915_error_object_free(error->batchbuffer[0]); 642 int i;
652 i915_error_object_free(error->batchbuffer[1]); 643
653 i915_error_object_free(error->ringbuffer); 644 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++)
645 i915_error_object_free(error->batchbuffer[i]);
646
647 for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++)
648 i915_error_object_free(error->ringbuffer[i]);
649
654 kfree(error->active_bo); 650 kfree(error->active_bo);
655 kfree(error->overlay); 651 kfree(error->overlay);
656 kfree(error); 652 kfree(error);
@@ -765,7 +761,7 @@ static void i915_capture_error_state(struct drm_device *dev)
765 struct drm_i915_gem_object *obj; 761 struct drm_i915_gem_object *obj;
766 struct drm_i915_error_state *error; 762 struct drm_i915_error_state *error;
767 unsigned long flags; 763 unsigned long flags;
768 int i; 764 int i, pipe;
769 765
770 spin_lock_irqsave(&dev_priv->error_lock, flags); 766 spin_lock_irqsave(&dev_priv->error_lock, flags);
771 error = dev_priv->first_error; 767 error = dev_priv->first_error;
@@ -773,19 +769,21 @@ static void i915_capture_error_state(struct drm_device *dev)
773 if (error) 769 if (error)
774 return; 770 return;
775 771
772 /* Account for pipe specific data like PIPE*STAT */
776 error = kmalloc(sizeof(*error), GFP_ATOMIC); 773 error = kmalloc(sizeof(*error), GFP_ATOMIC);
777 if (!error) { 774 if (!error) {
778 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 775 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
779 return; 776 return;
780 } 777 }
781 778
782 DRM_DEBUG_DRIVER("generating error event\n"); 779 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
780 dev->primary->index);
783 781
784 error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]); 782 error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
785 error->eir = I915_READ(EIR); 783 error->eir = I915_READ(EIR);
786 error->pgtbl_er = I915_READ(PGTBL_ER); 784 error->pgtbl_er = I915_READ(PGTBL_ER);
787 error->pipeastat = I915_READ(PIPEASTAT); 785 for_each_pipe(pipe)
788 error->pipebstat = I915_READ(PIPEBSTAT); 786 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
789 error->instpm = I915_READ(INSTPM); 787 error->instpm = I915_READ(INSTPM);
790 error->error = 0; 788 error->error = 0;
791 if (INTEL_INFO(dev)->gen >= 6) { 789 if (INTEL_INFO(dev)->gen >= 6) {
@@ -824,15 +822,16 @@ static void i915_capture_error_state(struct drm_device *dev)
824 } 822 }
825 i915_gem_record_fences(dev, error); 823 i915_gem_record_fences(dev, error);
826 824
827 /* Record the active batchbuffers */ 825 /* Record the active batch and ring buffers */
828 for (i = 0; i < I915_NUM_RINGS; i++) 826 for (i = 0; i < I915_NUM_RINGS; i++) {
829 error->batchbuffer[i] = 827 error->batchbuffer[i] =
830 i915_error_first_batchbuffer(dev_priv, 828 i915_error_first_batchbuffer(dev_priv,
831 &dev_priv->ring[i]); 829 &dev_priv->ring[i]);
832 830
833 /* Record the ringbuffer */ 831 error->ringbuffer[i] =
834 error->ringbuffer = i915_error_object_create(dev_priv, 832 i915_error_object_create(dev_priv,
835 dev_priv->ring[RCS].obj); 833 dev_priv->ring[i].obj);
834 }
836 835
837 /* Record buffers on the active and pinned lists. */ 836 /* Record buffers on the active and pinned lists. */
838 error->active_bo = NULL; 837 error->active_bo = NULL;
@@ -905,6 +904,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
905{ 904{
906 struct drm_i915_private *dev_priv = dev->dev_private; 905 struct drm_i915_private *dev_priv = dev->dev_private;
907 u32 eir = I915_READ(EIR); 906 u32 eir = I915_READ(EIR);
907 int pipe;
908 908
909 if (!eir) 909 if (!eir)
910 return; 910 return;
@@ -953,14 +953,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
953 } 953 }
954 954
955 if (eir & I915_ERROR_MEMORY_REFRESH) { 955 if (eir & I915_ERROR_MEMORY_REFRESH) {
956 u32 pipea_stats = I915_READ(PIPEASTAT); 956 printk(KERN_ERR "memory refresh error:\n");
957 u32 pipeb_stats = I915_READ(PIPEBSTAT); 957 for_each_pipe(pipe)
958 958 printk(KERN_ERR "pipe %c stat: 0x%08x\n",
959 printk(KERN_ERR "memory refresh error\n"); 959 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
960 printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
961 pipea_stats);
962 printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
963 pipeb_stats);
964 /* pipestat has already been acked */ 960 /* pipestat has already been acked */
965 } 961 }
966 if (eir & I915_ERROR_INSTRUCTION) { 962 if (eir & I915_ERROR_INSTRUCTION) {
@@ -1074,10 +1070,10 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1074 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 1070 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1075 obj = work->pending_flip_obj; 1071 obj = work->pending_flip_obj;
1076 if (INTEL_INFO(dev)->gen >= 4) { 1072 if (INTEL_INFO(dev)->gen >= 4) {
1077 int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; 1073 int dspsurf = DSPSURF(intel_crtc->plane);
1078 stall_detected = I915_READ(dspsurf) == obj->gtt_offset; 1074 stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
1079 } else { 1075 } else {
1080 int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; 1076 int dspaddr = DSPADDR(intel_crtc->plane);
1081 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 1077 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1082 crtc->y * crtc->fb->pitch + 1078 crtc->y * crtc->fb->pitch +
1083 crtc->x * crtc->fb->bits_per_pixel/8); 1079 crtc->x * crtc->fb->bits_per_pixel/8);
@@ -1097,12 +1093,13 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1097 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1093 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1098 struct drm_i915_master_private *master_priv; 1094 struct drm_i915_master_private *master_priv;
1099 u32 iir, new_iir; 1095 u32 iir, new_iir;
1100 u32 pipea_stats, pipeb_stats; 1096 u32 pipe_stats[I915_MAX_PIPES];
1101 u32 vblank_status; 1097 u32 vblank_status;
1102 int vblank = 0; 1098 int vblank = 0;
1103 unsigned long irqflags; 1099 unsigned long irqflags;
1104 int irq_received; 1100 int irq_received;
1105 int ret = IRQ_NONE; 1101 int ret = IRQ_NONE, pipe;
1102 bool blc_event = false;
1106 1103
1107 atomic_inc(&dev_priv->irq_received); 1104 atomic_inc(&dev_priv->irq_received);
1108 1105
@@ -1125,27 +1122,23 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1125 * interrupts (for non-MSI). 1122 * interrupts (for non-MSI).
1126 */ 1123 */
1127 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1124 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1128 pipea_stats = I915_READ(PIPEASTAT);
1129 pipeb_stats = I915_READ(PIPEBSTAT);
1130
1131 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 1125 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
1132 i915_handle_error(dev, false); 1126 i915_handle_error(dev, false);
1133 1127
1134 /* 1128 for_each_pipe(pipe) {
1135 * Clear the PIPE(A|B)STAT regs before the IIR 1129 int reg = PIPESTAT(pipe);
1136 */ 1130 pipe_stats[pipe] = I915_READ(reg);
1137 if (pipea_stats & 0x8000ffff) { 1131
1138 if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS) 1132 /*
1139 DRM_DEBUG_DRIVER("pipe a underrun\n"); 1133 * Clear the PIPE*STAT regs before the IIR
1140 I915_WRITE(PIPEASTAT, pipea_stats); 1134 */
1141 irq_received = 1; 1135 if (pipe_stats[pipe] & 0x8000ffff) {
1142 } 1136 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1143 1137 DRM_DEBUG_DRIVER("pipe %c underrun\n",
1144 if (pipeb_stats & 0x8000ffff) { 1138 pipe_name(pipe));
1145 if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS) 1139 I915_WRITE(reg, pipe_stats[pipe]);
1146 DRM_DEBUG_DRIVER("pipe b underrun\n"); 1140 irq_received = 1;
1147 I915_WRITE(PIPEBSTAT, pipeb_stats); 1141 }
1148 irq_received = 1;
1149 } 1142 }
1150 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1143 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1151 1144
@@ -1196,27 +1189,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1196 intel_finish_page_flip_plane(dev, 1); 1189 intel_finish_page_flip_plane(dev, 1);
1197 } 1190 }
1198 1191
1199 if (pipea_stats & vblank_status && 1192 for_each_pipe(pipe) {
1200 drm_handle_vblank(dev, 0)) { 1193 if (pipe_stats[pipe] & vblank_status &&
1201 vblank++; 1194 drm_handle_vblank(dev, pipe)) {
1202 if (!dev_priv->flip_pending_is_done) { 1195 vblank++;
1203 i915_pageflip_stall_check(dev, 0); 1196 if (!dev_priv->flip_pending_is_done) {
1204 intel_finish_page_flip(dev, 0); 1197 i915_pageflip_stall_check(dev, pipe);
1198 intel_finish_page_flip(dev, pipe);
1199 }
1205 } 1200 }
1206 }
1207 1201
1208 if (pipeb_stats & vblank_status && 1202 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1209 drm_handle_vblank(dev, 1)) { 1203 blc_event = true;
1210 vblank++;
1211 if (!dev_priv->flip_pending_is_done) {
1212 i915_pageflip_stall_check(dev, 1);
1213 intel_finish_page_flip(dev, 1);
1214 }
1215 } 1204 }
1216 1205
1217 if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || 1206
1218 (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || 1207 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1219 (iir & I915_ASLE_INTERRUPT))
1220 intel_opregion_asle_intr(dev); 1208 intel_opregion_asle_intr(dev);
1221 1209
1222 /* With MSI, interrupts are only generated when iir 1210 /* With MSI, interrupts are only generated when iir
@@ -1266,16 +1254,6 @@ static int i915_emit_irq(struct drm_device * dev)
1266 return dev_priv->counter; 1254 return dev_priv->counter;
1267} 1255}
1268 1256
1269void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
1270{
1271 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1272 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1273
1274 if (dev_priv->trace_irq_seqno == 0 &&
1275 ring->irq_get(ring))
1276 dev_priv->trace_irq_seqno = seqno;
1277}
1278
1279static int i915_wait_irq(struct drm_device * dev, int irq_nr) 1257static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1280{ 1258{
1281 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1259 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1375,7 +1353,12 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
1375 else 1353 else
1376 i915_enable_pipestat(dev_priv, pipe, 1354 i915_enable_pipestat(dev_priv, pipe,
1377 PIPE_VBLANK_INTERRUPT_ENABLE); 1355 PIPE_VBLANK_INTERRUPT_ENABLE);
1356
1357 /* maintain vblank delivery even in deep C-states */
1358 if (dev_priv->info->gen == 3)
1359 I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
1378 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1360 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1361
1379 return 0; 1362 return 0;
1380} 1363}
1381 1364
@@ -1388,6 +1371,10 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
1388 unsigned long irqflags; 1371 unsigned long irqflags;
1389 1372
1390 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1373 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1374 if (dev_priv->info->gen == 3)
1375 I915_WRITE(INSTPM,
1376 INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
1377
1391 if (HAS_PCH_SPLIT(dev)) 1378 if (HAS_PCH_SPLIT(dev))
1392 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1379 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1393 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1380 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
@@ -1398,16 +1385,6 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
1398 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1385 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1399} 1386}
1400 1387
1401void i915_enable_interrupt (struct drm_device *dev)
1402{
1403 struct drm_i915_private *dev_priv = dev->dev_private;
1404
1405 if (!HAS_PCH_SPLIT(dev))
1406 intel_opregion_enable_asle(dev);
1407 dev_priv->irq_enabled = 1;
1408}
1409
1410
1411/* Set the vblank monitor pipe 1388/* Set the vblank monitor pipe
1412 */ 1389 */
1413int i915_vblank_pipe_set(struct drm_device *dev, void *data, 1390int i915_vblank_pipe_set(struct drm_device *dev, void *data,
@@ -1644,14 +1621,16 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1644 POSTING_READ(GTIER); 1621 POSTING_READ(GTIER);
1645 1622
1646 if (HAS_PCH_CPT(dev)) { 1623 if (HAS_PCH_CPT(dev)) {
1647 hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | 1624 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1648 SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ; 1625 SDE_PORTB_HOTPLUG_CPT |
1626 SDE_PORTC_HOTPLUG_CPT |
1627 SDE_PORTD_HOTPLUG_CPT);
1649 } else { 1628 } else {
1650 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1629 hotplug_mask = (SDE_CRT_HOTPLUG |
1651 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1630 SDE_PORTB_HOTPLUG |
1652 hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; 1631 SDE_PORTC_HOTPLUG |
1653 I915_WRITE(FDI_RXA_IMR, 0); 1632 SDE_PORTD_HOTPLUG |
1654 I915_WRITE(FDI_RXB_IMR, 0); 1633 SDE_AUX_MASK);
1655 } 1634 }
1656 1635
1657 dev_priv->pch_irq_mask = ~hotplug_mask; 1636 dev_priv->pch_irq_mask = ~hotplug_mask;
@@ -1674,6 +1653,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1674void i915_driver_irq_preinstall(struct drm_device * dev) 1653void i915_driver_irq_preinstall(struct drm_device * dev)
1675{ 1654{
1676 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1655 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1656 int pipe;
1677 1657
1678 atomic_set(&dev_priv->irq_received, 0); 1658 atomic_set(&dev_priv->irq_received, 0);
1679 1659
@@ -1691,8 +1671,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1691 } 1671 }
1692 1672
1693 I915_WRITE(HWSTAM, 0xeffe); 1673 I915_WRITE(HWSTAM, 0xeffe);
1694 I915_WRITE(PIPEASTAT, 0); 1674 for_each_pipe(pipe)
1695 I915_WRITE(PIPEBSTAT, 0); 1675 I915_WRITE(PIPESTAT(pipe), 0);
1696 I915_WRITE(IMR, 0xffffffff); 1676 I915_WRITE(IMR, 0xffffffff);
1697 I915_WRITE(IER, 0x0); 1677 I915_WRITE(IER, 0x0);
1698 POSTING_READ(IER); 1678 POSTING_READ(IER);
@@ -1804,6 +1784,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
1804void i915_driver_irq_uninstall(struct drm_device * dev) 1784void i915_driver_irq_uninstall(struct drm_device * dev)
1805{ 1785{
1806 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1786 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1787 int pipe;
1807 1788
1808 if (!dev_priv) 1789 if (!dev_priv)
1809 return; 1790 return;
@@ -1821,12 +1802,13 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
1821 } 1802 }
1822 1803
1823 I915_WRITE(HWSTAM, 0xffffffff); 1804 I915_WRITE(HWSTAM, 0xffffffff);
1824 I915_WRITE(PIPEASTAT, 0); 1805 for_each_pipe(pipe)
1825 I915_WRITE(PIPEBSTAT, 0); 1806 I915_WRITE(PIPESTAT(pipe), 0);
1826 I915_WRITE(IMR, 0xffffffff); 1807 I915_WRITE(IMR, 0xffffffff);
1827 I915_WRITE(IER, 0x0); 1808 I915_WRITE(IER, 0x0);
1828 1809
1829 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); 1810 for_each_pipe(pipe)
1830 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); 1811 I915_WRITE(PIPESTAT(pipe),
1812 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
1831 I915_WRITE(IIR, I915_READ(IIR)); 1813 I915_WRITE(IIR, I915_READ(IIR));
1832} 1814}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5cfc68940f17..363f66ca5d33 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -174,7 +174,9 @@
174 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! 174 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
175 */ 175 */
176#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 176#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
177#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */ 177#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
178#define MI_INVALIDATE_TLB (1<<18)
179#define MI_INVALIDATE_BSD (1<<7)
178#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) 180#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
179#define MI_BATCH_NON_SECURE (1) 181#define MI_BATCH_NON_SECURE (1)
180#define MI_BATCH_NON_SECURE_I965 (1<<8) 182#define MI_BATCH_NON_SECURE_I965 (1<<8)
@@ -403,9 +405,12 @@
403#define I915_ERROR_INSTRUCTION (1<<0) 405#define I915_ERROR_INSTRUCTION (1<<0)
404#define INSTPM 0x020c0 406#define INSTPM 0x020c0
405#define INSTPM_SELF_EN (1<<12) /* 915GM only */ 407#define INSTPM_SELF_EN (1<<12) /* 915GM only */
408#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
409 will not assert AGPBUSY# and will only
410 be delivered when out of C3. */
406#define ACTHD 0x020c8 411#define ACTHD 0x020c8
407#define FW_BLC 0x020d8 412#define FW_BLC 0x020d8
408#define FW_BLC2 0x020dc 413#define FW_BLC2 0x020dc
409#define FW_BLC_SELF 0x020e0 /* 915+ only */ 414#define FW_BLC_SELF 0x020e0 /* 915+ only */
410#define FW_BLC_SELF_EN_MASK (1<<31) 415#define FW_BLC_SELF_EN_MASK (1<<31)
411#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ 416#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
@@ -704,9 +709,9 @@
704#define VGA1_PD_P1_DIV_2 (1 << 13) 709#define VGA1_PD_P1_DIV_2 (1 << 13)
705#define VGA1_PD_P1_SHIFT 8 710#define VGA1_PD_P1_SHIFT 8
706#define VGA1_PD_P1_MASK (0x1f << 8) 711#define VGA1_PD_P1_MASK (0x1f << 8)
707#define DPLL_A 0x06014 712#define _DPLL_A 0x06014
708#define DPLL_B 0x06018 713#define _DPLL_B 0x06018
709#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B) 714#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
710#define DPLL_VCO_ENABLE (1 << 31) 715#define DPLL_VCO_ENABLE (1 << 31)
711#define DPLL_DVO_HIGH_SPEED (1 << 30) 716#define DPLL_DVO_HIGH_SPEED (1 << 30)
712#define DPLL_SYNCLOCK_ENABLE (1 << 29) 717#define DPLL_SYNCLOCK_ENABLE (1 << 29)
@@ -777,7 +782,7 @@
777#define SDVO_MULTIPLIER_MASK 0x000000ff 782#define SDVO_MULTIPLIER_MASK 0x000000ff
778#define SDVO_MULTIPLIER_SHIFT_HIRES 4 783#define SDVO_MULTIPLIER_SHIFT_HIRES 4
779#define SDVO_MULTIPLIER_SHIFT_VGA 0 784#define SDVO_MULTIPLIER_SHIFT_VGA 0
780#define DPLL_A_MD 0x0601c /* 965+ only */ 785#define _DPLL_A_MD 0x0601c /* 965+ only */
781/* 786/*
782 * UDI pixel divider, controlling how many pixels are stuffed into a packet. 787 * UDI pixel divider, controlling how many pixels are stuffed into a packet.
783 * 788 *
@@ -814,14 +819,14 @@
814 */ 819 */
815#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f 820#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
816#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 821#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
817#define DPLL_B_MD 0x06020 /* 965+ only */ 822#define _DPLL_B_MD 0x06020 /* 965+ only */
818#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD) 823#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
819#define FPA0 0x06040 824#define _FPA0 0x06040
820#define FPA1 0x06044 825#define _FPA1 0x06044
821#define FPB0 0x06048 826#define _FPB0 0x06048
822#define FPB1 0x0604c 827#define _FPB1 0x0604c
823#define FP0(pipe) _PIPE(pipe, FPA0, FPB0) 828#define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0)
824#define FP1(pipe) _PIPE(pipe, FPA1, FPB1) 829#define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1)
825#define FP_N_DIV_MASK 0x003f0000 830#define FP_N_DIV_MASK 0x003f0000
826#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 831#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
827#define FP_N_DIV_SHIFT 16 832#define FP_N_DIV_SHIFT 16
@@ -960,8 +965,9 @@
960 * Palette regs 965 * Palette regs
961 */ 966 */
962 967
963#define PALETTE_A 0x0a000 968#define _PALETTE_A 0x0a000
964#define PALETTE_B 0x0a800 969#define _PALETTE_B 0x0a800
970#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
965 971
966/* MCH MMIO space */ 972/* MCH MMIO space */
967 973
@@ -1265,32 +1271,32 @@
1265 */ 1271 */
1266 1272
1267/* Pipe A timing regs */ 1273/* Pipe A timing regs */
1268#define HTOTAL_A 0x60000 1274#define _HTOTAL_A 0x60000
1269#define HBLANK_A 0x60004 1275#define _HBLANK_A 0x60004
1270#define HSYNC_A 0x60008 1276#define _HSYNC_A 0x60008
1271#define VTOTAL_A 0x6000c 1277#define _VTOTAL_A 0x6000c
1272#define VBLANK_A 0x60010 1278#define _VBLANK_A 0x60010
1273#define VSYNC_A 0x60014 1279#define _VSYNC_A 0x60014
1274#define PIPEASRC 0x6001c 1280#define _PIPEASRC 0x6001c
1275#define BCLRPAT_A 0x60020 1281#define _BCLRPAT_A 0x60020
1276 1282
1277/* Pipe B timing regs */ 1283/* Pipe B timing regs */
1278#define HTOTAL_B 0x61000 1284#define _HTOTAL_B 0x61000
1279#define HBLANK_B 0x61004 1285#define _HBLANK_B 0x61004
1280#define HSYNC_B 0x61008 1286#define _HSYNC_B 0x61008
1281#define VTOTAL_B 0x6100c 1287#define _VTOTAL_B 0x6100c
1282#define VBLANK_B 0x61010 1288#define _VBLANK_B 0x61010
1283#define VSYNC_B 0x61014 1289#define _VSYNC_B 0x61014
1284#define PIPEBSRC 0x6101c 1290#define _PIPEBSRC 0x6101c
1285#define BCLRPAT_B 0x61020 1291#define _BCLRPAT_B 0x61020
1286 1292
1287#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B) 1293#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
1288#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B) 1294#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
1289#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B) 1295#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B)
1290#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B) 1296#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B)
1291#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B) 1297#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
1292#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B) 1298#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
1293#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B) 1299#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
1294 1300
1295/* VGA port control */ 1301/* VGA port control */
1296#define ADPA 0x61100 1302#define ADPA 0x61100
@@ -1384,6 +1390,7 @@
1384#define SDVO_ENCODING_HDMI (0x2 << 10) 1390#define SDVO_ENCODING_HDMI (0x2 << 10)
1385/** Requird for HDMI operation */ 1391/** Requird for HDMI operation */
1386#define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9) 1392#define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9)
1393#define SDVO_COLOR_RANGE_16_235 (1 << 8)
1387#define SDVO_BORDER_ENABLE (1 << 7) 1394#define SDVO_BORDER_ENABLE (1 << 7)
1388#define SDVO_AUDIO_ENABLE (1 << 6) 1395#define SDVO_AUDIO_ENABLE (1 << 6)
1389/** New with 965, default is to be set */ 1396/** New with 965, default is to be set */
@@ -1439,8 +1446,13 @@
1439#define LVDS_PORT_EN (1 << 31) 1446#define LVDS_PORT_EN (1 << 31)
1440/* Selects pipe B for LVDS data. Must be set on pre-965. */ 1447/* Selects pipe B for LVDS data. Must be set on pre-965. */
1441#define LVDS_PIPEB_SELECT (1 << 30) 1448#define LVDS_PIPEB_SELECT (1 << 30)
1449#define LVDS_PIPE_MASK (1 << 30)
1442/* LVDS dithering flag on 965/g4x platform */ 1450/* LVDS dithering flag on 965/g4x platform */
1443#define LVDS_ENABLE_DITHER (1 << 25) 1451#define LVDS_ENABLE_DITHER (1 << 25)
1452/* LVDS sync polarity flags. Set to invert (i.e. negative) */
1453#define LVDS_VSYNC_POLARITY (1 << 21)
1454#define LVDS_HSYNC_POLARITY (1 << 20)
1455
1444/* Enable border for unscaled (or aspect-scaled) display */ 1456/* Enable border for unscaled (or aspect-scaled) display */
1445#define LVDS_BORDER_ENABLE (1 << 15) 1457#define LVDS_BORDER_ENABLE (1 << 15)
1446/* 1458/*
@@ -1474,6 +1486,9 @@
1474#define LVDS_B0B3_POWER_DOWN (0 << 2) 1486#define LVDS_B0B3_POWER_DOWN (0 << 2)
1475#define LVDS_B0B3_POWER_UP (3 << 2) 1487#define LVDS_B0B3_POWER_UP (3 << 2)
1476 1488
1489#define LVDS_PIPE_ENABLED(V, P) \
1490 (((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN))
1491
1477/* Video Data Island Packet control */ 1492/* Video Data Island Packet control */
1478#define VIDEO_DIP_DATA 0x61178 1493#define VIDEO_DIP_DATA 0x61178
1479#define VIDEO_DIP_CTL 0x61170 1494#define VIDEO_DIP_CTL 0x61170
@@ -1551,17 +1566,7 @@
1551 1566
1552/* Backlight control */ 1567/* Backlight control */
1553#define BLC_PWM_CTL 0x61254 1568#define BLC_PWM_CTL 0x61254
1554#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
1555#define BLC_PWM_CTL2 0x61250 /* 965+ only */ 1569#define BLC_PWM_CTL2 0x61250 /* 965+ only */
1556#define BLM_COMBINATION_MODE (1 << 30)
1557/*
1558 * This is the most significant 15 bits of the number of backlight cycles in a
1559 * complete cycle of the modulated backlight control.
1560 *
1561 * The actual value is this field multiplied by two.
1562 */
1563#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
1564#define BLM_LEGACY_MODE (1 << 16)
1565/* 1570/*
1566 * This is the number of cycles out of the backlight modulation cycle for which 1571 * This is the number of cycles out of the backlight modulation cycle for which
1567 * the backlight is on. 1572 * the backlight is on.
@@ -2062,6 +2067,10 @@
2062 2067
2063#define DP_PORT_EN (1 << 31) 2068#define DP_PORT_EN (1 << 31)
2064#define DP_PIPEB_SELECT (1 << 30) 2069#define DP_PIPEB_SELECT (1 << 30)
2070#define DP_PIPE_MASK (1 << 30)
2071
2072#define DP_PIPE_ENABLED(V, P) \
2073 (((V) & (DP_PIPE_MASK | DP_PORT_EN)) == ((P) << 30 | DP_PORT_EN))
2065 2074
2066/* Link training mode - select a suitable mode for each stage */ 2075/* Link training mode - select a suitable mode for each stage */
2067#define DP_LINK_TRAIN_PAT_1 (0 << 28) 2076#define DP_LINK_TRAIN_PAT_1 (0 << 28)
@@ -2204,8 +2213,8 @@
2204 * which is after the LUTs, so we want the bytes for our color format. 2213 * which is after the LUTs, so we want the bytes for our color format.
2205 * For our current usage, this is always 3, one byte for R, G and B. 2214 * For our current usage, this is always 3, one byte for R, G and B.
2206 */ 2215 */
2207#define PIPEA_GMCH_DATA_M 0x70050 2216#define _PIPEA_GMCH_DATA_M 0x70050
2208#define PIPEB_GMCH_DATA_M 0x71050 2217#define _PIPEB_GMCH_DATA_M 0x71050
2209 2218
2210/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ 2219/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
2211#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25) 2220#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25)
@@ -2213,8 +2222,8 @@
2213 2222
2214#define PIPE_GMCH_DATA_M_MASK (0xffffff) 2223#define PIPE_GMCH_DATA_M_MASK (0xffffff)
2215 2224
2216#define PIPEA_GMCH_DATA_N 0x70054 2225#define _PIPEA_GMCH_DATA_N 0x70054
2217#define PIPEB_GMCH_DATA_N 0x71054 2226#define _PIPEB_GMCH_DATA_N 0x71054
2218#define PIPE_GMCH_DATA_N_MASK (0xffffff) 2227#define PIPE_GMCH_DATA_N_MASK (0xffffff)
2219 2228
2220/* 2229/*
@@ -2228,20 +2237,25 @@
2228 * Attributes and VB-ID. 2237 * Attributes and VB-ID.
2229 */ 2238 */
2230 2239
2231#define PIPEA_DP_LINK_M 0x70060 2240#define _PIPEA_DP_LINK_M 0x70060
2232#define PIPEB_DP_LINK_M 0x71060 2241#define _PIPEB_DP_LINK_M 0x71060
2233#define PIPEA_DP_LINK_M_MASK (0xffffff) 2242#define PIPEA_DP_LINK_M_MASK (0xffffff)
2234 2243
2235#define PIPEA_DP_LINK_N 0x70064 2244#define _PIPEA_DP_LINK_N 0x70064
2236#define PIPEB_DP_LINK_N 0x71064 2245#define _PIPEB_DP_LINK_N 0x71064
2237#define PIPEA_DP_LINK_N_MASK (0xffffff) 2246#define PIPEA_DP_LINK_N_MASK (0xffffff)
2238 2247
2248#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
2249#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
2250#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
2251#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
2252
2239/* Display & cursor control */ 2253/* Display & cursor control */
2240 2254
2241/* Pipe A */ 2255/* Pipe A */
2242#define PIPEADSL 0x70000 2256#define _PIPEADSL 0x70000
2243#define DSL_LINEMASK 0x00000fff 2257#define DSL_LINEMASK 0x00000fff
2244#define PIPEACONF 0x70008 2258#define _PIPEACONF 0x70008
2245#define PIPECONF_ENABLE (1<<31) 2259#define PIPECONF_ENABLE (1<<31)
2246#define PIPECONF_DISABLE 0 2260#define PIPECONF_DISABLE 0
2247#define PIPECONF_DOUBLE_WIDE (1<<30) 2261#define PIPECONF_DOUBLE_WIDE (1<<30)
@@ -2267,7 +2281,7 @@
2267#define PIPECONF_DITHER_TYPE_ST1 (1<<2) 2281#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
2268#define PIPECONF_DITHER_TYPE_ST2 (2<<2) 2282#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
2269#define PIPECONF_DITHER_TYPE_TEMP (3<<2) 2283#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
2270#define PIPEASTAT 0x70024 2284#define _PIPEASTAT 0x70024
2271#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) 2285#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
2272#define PIPE_CRC_ERROR_ENABLE (1UL<<29) 2286#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
2273#define PIPE_CRC_DONE_ENABLE (1UL<<28) 2287#define PIPE_CRC_DONE_ENABLE (1UL<<28)
@@ -2303,10 +2317,12 @@
2303#define PIPE_6BPC (2 << 5) 2317#define PIPE_6BPC (2 << 5)
2304#define PIPE_12BPC (3 << 5) 2318#define PIPE_12BPC (3 << 5)
2305 2319
2306#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC) 2320#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
2307#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF) 2321#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF)
2308#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL) 2322#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
2309#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL) 2323#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
2324#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
2325#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
2310 2326
2311#define DSPARB 0x70030 2327#define DSPARB 0x70030
2312#define DSPARB_CSTART_MASK (0x7f << 7) 2328#define DSPARB_CSTART_MASK (0x7f << 7)
@@ -2468,20 +2484,21 @@
2468 * } while (high1 != high2); 2484 * } while (high1 != high2);
2469 * frame = (high1 << 8) | low1; 2485 * frame = (high1 << 8) | low1;
2470 */ 2486 */
2471#define PIPEAFRAMEHIGH 0x70040 2487#define _PIPEAFRAMEHIGH 0x70040
2472#define PIPE_FRAME_HIGH_MASK 0x0000ffff 2488#define PIPE_FRAME_HIGH_MASK 0x0000ffff
2473#define PIPE_FRAME_HIGH_SHIFT 0 2489#define PIPE_FRAME_HIGH_SHIFT 0
2474#define PIPEAFRAMEPIXEL 0x70044 2490#define _PIPEAFRAMEPIXEL 0x70044
2475#define PIPE_FRAME_LOW_MASK 0xff000000 2491#define PIPE_FRAME_LOW_MASK 0xff000000
2476#define PIPE_FRAME_LOW_SHIFT 24 2492#define PIPE_FRAME_LOW_SHIFT 24
2477#define PIPE_PIXEL_MASK 0x00ffffff 2493#define PIPE_PIXEL_MASK 0x00ffffff
2478#define PIPE_PIXEL_SHIFT 0 2494#define PIPE_PIXEL_SHIFT 0
2479/* GM45+ just has to be different */ 2495/* GM45+ just has to be different */
2480#define PIPEA_FRMCOUNT_GM45 0x70040 2496#define _PIPEA_FRMCOUNT_GM45 0x70040
2481#define PIPEA_FLIPCOUNT_GM45 0x70044 2497#define _PIPEA_FLIPCOUNT_GM45 0x70044
2498#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
2482 2499
2483/* Cursor A & B regs */ 2500/* Cursor A & B regs */
2484#define CURACNTR 0x70080 2501#define _CURACNTR 0x70080
2485/* Old style CUR*CNTR flags (desktop 8xx) */ 2502/* Old style CUR*CNTR flags (desktop 8xx) */
2486#define CURSOR_ENABLE 0x80000000 2503#define CURSOR_ENABLE 0x80000000
2487#define CURSOR_GAMMA_ENABLE 0x40000000 2504#define CURSOR_GAMMA_ENABLE 0x40000000
@@ -2502,23 +2519,23 @@
2502#define MCURSOR_PIPE_A 0x00 2519#define MCURSOR_PIPE_A 0x00
2503#define MCURSOR_PIPE_B (1 << 28) 2520#define MCURSOR_PIPE_B (1 << 28)
2504#define MCURSOR_GAMMA_ENABLE (1 << 26) 2521#define MCURSOR_GAMMA_ENABLE (1 << 26)
2505#define CURABASE 0x70084 2522#define _CURABASE 0x70084
2506#define CURAPOS 0x70088 2523#define _CURAPOS 0x70088
2507#define CURSOR_POS_MASK 0x007FF 2524#define CURSOR_POS_MASK 0x007FF
2508#define CURSOR_POS_SIGN 0x8000 2525#define CURSOR_POS_SIGN 0x8000
2509#define CURSOR_X_SHIFT 0 2526#define CURSOR_X_SHIFT 0
2510#define CURSOR_Y_SHIFT 16 2527#define CURSOR_Y_SHIFT 16
2511#define CURSIZE 0x700a0 2528#define CURSIZE 0x700a0
2512#define CURBCNTR 0x700c0 2529#define _CURBCNTR 0x700c0
2513#define CURBBASE 0x700c4 2530#define _CURBBASE 0x700c4
2514#define CURBPOS 0x700c8 2531#define _CURBPOS 0x700c8
2515 2532
2516#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR) 2533#define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR)
2517#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE) 2534#define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE)
2518#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS) 2535#define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS)
2519 2536
2520/* Display A control */ 2537/* Display A control */
2521#define DSPACNTR 0x70180 2538#define _DSPACNTR 0x70180
2522#define DISPLAY_PLANE_ENABLE (1<<31) 2539#define DISPLAY_PLANE_ENABLE (1<<31)
2523#define DISPLAY_PLANE_DISABLE 0 2540#define DISPLAY_PLANE_DISABLE 0
2524#define DISPPLANE_GAMMA_ENABLE (1<<30) 2541#define DISPPLANE_GAMMA_ENABLE (1<<30)
@@ -2532,9 +2549,10 @@
2532#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26) 2549#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
2533#define DISPPLANE_STEREO_ENABLE (1<<25) 2550#define DISPPLANE_STEREO_ENABLE (1<<25)
2534#define DISPPLANE_STEREO_DISABLE 0 2551#define DISPPLANE_STEREO_DISABLE 0
2535#define DISPPLANE_SEL_PIPE_MASK (1<<24) 2552#define DISPPLANE_SEL_PIPE_SHIFT 24
2553#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
2536#define DISPPLANE_SEL_PIPE_A 0 2554#define DISPPLANE_SEL_PIPE_A 0
2537#define DISPPLANE_SEL_PIPE_B (1<<24) 2555#define DISPPLANE_SEL_PIPE_B (1<<DISPPLANE_SEL_PIPE_SHIFT)
2538#define DISPPLANE_SRC_KEY_ENABLE (1<<22) 2556#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
2539#define DISPPLANE_SRC_KEY_DISABLE 0 2557#define DISPPLANE_SRC_KEY_DISABLE 0
2540#define DISPPLANE_LINE_DOUBLE (1<<20) 2558#define DISPPLANE_LINE_DOUBLE (1<<20)
@@ -2543,20 +2561,20 @@
2543#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) 2561#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
2544#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ 2562#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
2545#define DISPPLANE_TILED (1<<10) 2563#define DISPPLANE_TILED (1<<10)
2546#define DSPAADDR 0x70184 2564#define _DSPAADDR 0x70184
2547#define DSPASTRIDE 0x70188 2565#define _DSPASTRIDE 0x70188
2548#define DSPAPOS 0x7018C /* reserved */ 2566#define _DSPAPOS 0x7018C /* reserved */
2549#define DSPASIZE 0x70190 2567#define _DSPASIZE 0x70190
2550#define DSPASURF 0x7019C /* 965+ only */ 2568#define _DSPASURF 0x7019C /* 965+ only */
2551#define DSPATILEOFF 0x701A4 /* 965+ only */ 2569#define _DSPATILEOFF 0x701A4 /* 965+ only */
2552 2570
2553#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR) 2571#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
2554#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR) 2572#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
2555#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE) 2573#define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE)
2556#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS) 2574#define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS)
2557#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE) 2575#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
2558#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF) 2576#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
2559#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF) 2577#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
2560 2578
2561/* VBIOS flags */ 2579/* VBIOS flags */
2562#define SWF00 0x71410 2580#define SWF00 0x71410
@@ -2574,27 +2592,27 @@
2574#define SWF32 0x7241c 2592#define SWF32 0x7241c
2575 2593
2576/* Pipe B */ 2594/* Pipe B */
2577#define PIPEBDSL 0x71000 2595#define _PIPEBDSL 0x71000
2578#define PIPEBCONF 0x71008 2596#define _PIPEBCONF 0x71008
2579#define PIPEBSTAT 0x71024 2597#define _PIPEBSTAT 0x71024
2580#define PIPEBFRAMEHIGH 0x71040 2598#define _PIPEBFRAMEHIGH 0x71040
2581#define PIPEBFRAMEPIXEL 0x71044 2599#define _PIPEBFRAMEPIXEL 0x71044
2582#define PIPEB_FRMCOUNT_GM45 0x71040 2600#define _PIPEB_FRMCOUNT_GM45 0x71040
2583#define PIPEB_FLIPCOUNT_GM45 0x71044 2601#define _PIPEB_FLIPCOUNT_GM45 0x71044
2584 2602
2585 2603
2586/* Display B control */ 2604/* Display B control */
2587#define DSPBCNTR 0x71180 2605#define _DSPBCNTR 0x71180
2588#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) 2606#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
2589#define DISPPLANE_ALPHA_TRANS_DISABLE 0 2607#define DISPPLANE_ALPHA_TRANS_DISABLE 0
2590#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 2608#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
2591#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) 2609#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
2592#define DSPBADDR 0x71184 2610#define _DSPBADDR 0x71184
2593#define DSPBSTRIDE 0x71188 2611#define _DSPBSTRIDE 0x71188
2594#define DSPBPOS 0x7118C 2612#define _DSPBPOS 0x7118C
2595#define DSPBSIZE 0x71190 2613#define _DSPBSIZE 0x71190
2596#define DSPBSURF 0x7119C 2614#define _DSPBSURF 0x7119C
2597#define DSPBTILEOFF 0x711A4 2615#define _DSPBTILEOFF 0x711A4
2598 2616
2599/* VBIOS regs */ 2617/* VBIOS regs */
2600#define VGACNTRL 0x71400 2618#define VGACNTRL 0x71400
@@ -2648,68 +2666,80 @@
2648#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff 2666#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
2649 2667
2650 2668
2651#define PIPEA_DATA_M1 0x60030 2669#define _PIPEA_DATA_M1 0x60030
2652#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ 2670#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
2653#define TU_SIZE_MASK 0x7e000000 2671#define TU_SIZE_MASK 0x7e000000
2654#define PIPE_DATA_M1_OFFSET 0 2672#define PIPE_DATA_M1_OFFSET 0
2655#define PIPEA_DATA_N1 0x60034 2673#define _PIPEA_DATA_N1 0x60034
2656#define PIPE_DATA_N1_OFFSET 0 2674#define PIPE_DATA_N1_OFFSET 0
2657 2675
2658#define PIPEA_DATA_M2 0x60038 2676#define _PIPEA_DATA_M2 0x60038
2659#define PIPE_DATA_M2_OFFSET 0 2677#define PIPE_DATA_M2_OFFSET 0
2660#define PIPEA_DATA_N2 0x6003c 2678#define _PIPEA_DATA_N2 0x6003c
2661#define PIPE_DATA_N2_OFFSET 0 2679#define PIPE_DATA_N2_OFFSET 0
2662 2680
2663#define PIPEA_LINK_M1 0x60040 2681#define _PIPEA_LINK_M1 0x60040
2664#define PIPE_LINK_M1_OFFSET 0 2682#define PIPE_LINK_M1_OFFSET 0
2665#define PIPEA_LINK_N1 0x60044 2683#define _PIPEA_LINK_N1 0x60044
2666#define PIPE_LINK_N1_OFFSET 0 2684#define PIPE_LINK_N1_OFFSET 0
2667 2685
2668#define PIPEA_LINK_M2 0x60048 2686#define _PIPEA_LINK_M2 0x60048
2669#define PIPE_LINK_M2_OFFSET 0 2687#define PIPE_LINK_M2_OFFSET 0
2670#define PIPEA_LINK_N2 0x6004c 2688#define _PIPEA_LINK_N2 0x6004c
2671#define PIPE_LINK_N2_OFFSET 0 2689#define PIPE_LINK_N2_OFFSET 0
2672 2690
2673/* PIPEB timing regs are same start from 0x61000 */ 2691/* PIPEB timing regs are same start from 0x61000 */
2674 2692
2675#define PIPEB_DATA_M1 0x61030 2693#define _PIPEB_DATA_M1 0x61030
2676#define PIPEB_DATA_N1 0x61034 2694#define _PIPEB_DATA_N1 0x61034
2677 2695
2678#define PIPEB_DATA_M2 0x61038 2696#define _PIPEB_DATA_M2 0x61038
2679#define PIPEB_DATA_N2 0x6103c 2697#define _PIPEB_DATA_N2 0x6103c
2680 2698
2681#define PIPEB_LINK_M1 0x61040 2699#define _PIPEB_LINK_M1 0x61040
2682#define PIPEB_LINK_N1 0x61044 2700#define _PIPEB_LINK_N1 0x61044
2683 2701
2684#define PIPEB_LINK_M2 0x61048 2702#define _PIPEB_LINK_M2 0x61048
2685#define PIPEB_LINK_N2 0x6104c 2703#define _PIPEB_LINK_N2 0x6104c
2686 2704
2687#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1) 2705#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
2688#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1) 2706#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
2689#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2) 2707#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
2690#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2) 2708#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
2691#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1) 2709#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
2692#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1) 2710#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
2693#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2) 2711#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
2694#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2) 2712#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
2695 2713
2696/* CPU panel fitter */ 2714/* CPU panel fitter */
2697#define PFA_CTL_1 0x68080 2715/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
2698#define PFB_CTL_1 0x68880 2716#define _PFA_CTL_1 0x68080
2717#define _PFB_CTL_1 0x68880
2699#define PF_ENABLE (1<<31) 2718#define PF_ENABLE (1<<31)
2700#define PF_FILTER_MASK (3<<23) 2719#define PF_FILTER_MASK (3<<23)
2701#define PF_FILTER_PROGRAMMED (0<<23) 2720#define PF_FILTER_PROGRAMMED (0<<23)
2702#define PF_FILTER_MED_3x3 (1<<23) 2721#define PF_FILTER_MED_3x3 (1<<23)
2703#define PF_FILTER_EDGE_ENHANCE (2<<23) 2722#define PF_FILTER_EDGE_ENHANCE (2<<23)
2704#define PF_FILTER_EDGE_SOFTEN (3<<23) 2723#define PF_FILTER_EDGE_SOFTEN (3<<23)
2705#define PFA_WIN_SZ 0x68074 2724#define _PFA_WIN_SZ 0x68074
2706#define PFB_WIN_SZ 0x68874 2725#define _PFB_WIN_SZ 0x68874
2707#define PFA_WIN_POS 0x68070 2726#define _PFA_WIN_POS 0x68070
2708#define PFB_WIN_POS 0x68870 2727#define _PFB_WIN_POS 0x68870
2728#define _PFA_VSCALE 0x68084
2729#define _PFB_VSCALE 0x68884
2730#define _PFA_HSCALE 0x68090
2731#define _PFB_HSCALE 0x68890
2732
2733#define PF_CTL(pipe) _PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
2734#define PF_WIN_SZ(pipe) _PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
2735#define PF_WIN_POS(pipe) _PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
2736#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
2737#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
2709 2738
2710/* legacy palette */ 2739/* legacy palette */
2711#define LGC_PALETTE_A 0x4a000 2740#define _LGC_PALETTE_A 0x4a000
2712#define LGC_PALETTE_B 0x4a800 2741#define _LGC_PALETTE_B 0x4a800
2742#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B)
2713 2743
2714/* interrupts */ 2744/* interrupts */
2715#define DE_MASTER_IRQ_CONTROL (1 << 31) 2745#define DE_MASTER_IRQ_CONTROL (1 << 31)
@@ -2875,17 +2905,17 @@
2875#define PCH_GMBUS4 0xc5110 2905#define PCH_GMBUS4 0xc5110
2876#define PCH_GMBUS5 0xc5120 2906#define PCH_GMBUS5 0xc5120
2877 2907
2878#define PCH_DPLL_A 0xc6014 2908#define _PCH_DPLL_A 0xc6014
2879#define PCH_DPLL_B 0xc6018 2909#define _PCH_DPLL_B 0xc6018
2880#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B) 2910#define PCH_DPLL(pipe) _PIPE(pipe, _PCH_DPLL_A, _PCH_DPLL_B)
2881 2911
2882#define PCH_FPA0 0xc6040 2912#define _PCH_FPA0 0xc6040
2883#define FP_CB_TUNE (0x3<<22) 2913#define FP_CB_TUNE (0x3<<22)
2884#define PCH_FPA1 0xc6044 2914#define _PCH_FPA1 0xc6044
2885#define PCH_FPB0 0xc6048 2915#define _PCH_FPB0 0xc6048
2886#define PCH_FPB1 0xc604c 2916#define _PCH_FPB1 0xc604c
2887#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0) 2917#define PCH_FP0(pipe) _PIPE(pipe, _PCH_FPA0, _PCH_FPB0)
2888#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1) 2918#define PCH_FP1(pipe) _PIPE(pipe, _PCH_FPA1, _PCH_FPB1)
2889 2919
2890#define PCH_DPLL_TEST 0xc606c 2920#define PCH_DPLL_TEST 0xc606c
2891 2921
@@ -2904,6 +2934,7 @@
2904#define DREF_NONSPREAD_SOURCE_MASK (3<<9) 2934#define DREF_NONSPREAD_SOURCE_MASK (3<<9)
2905#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7) 2935#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
2906#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7) 2936#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
2937#define DREF_SUPERSPREAD_SOURCE_MASK (3<<7)
2907#define DREF_SSC4_DOWNSPREAD (0<<6) 2938#define DREF_SSC4_DOWNSPREAD (0<<6)
2908#define DREF_SSC4_CENTERSPREAD (1<<6) 2939#define DREF_SSC4_CENTERSPREAD (1<<6)
2909#define DREF_SSC1_DISABLE (0<<1) 2940#define DREF_SSC1_DISABLE (0<<1)
@@ -2936,60 +2967,69 @@
2936 2967
2937/* transcoder */ 2968/* transcoder */
2938 2969
2939#define TRANS_HTOTAL_A 0xe0000 2970#define _TRANS_HTOTAL_A 0xe0000
2940#define TRANS_HTOTAL_SHIFT 16 2971#define TRANS_HTOTAL_SHIFT 16
2941#define TRANS_HACTIVE_SHIFT 0 2972#define TRANS_HACTIVE_SHIFT 0
2942#define TRANS_HBLANK_A 0xe0004 2973#define _TRANS_HBLANK_A 0xe0004
2943#define TRANS_HBLANK_END_SHIFT 16 2974#define TRANS_HBLANK_END_SHIFT 16
2944#define TRANS_HBLANK_START_SHIFT 0 2975#define TRANS_HBLANK_START_SHIFT 0
2945#define TRANS_HSYNC_A 0xe0008 2976#define _TRANS_HSYNC_A 0xe0008
2946#define TRANS_HSYNC_END_SHIFT 16 2977#define TRANS_HSYNC_END_SHIFT 16
2947#define TRANS_HSYNC_START_SHIFT 0 2978#define TRANS_HSYNC_START_SHIFT 0
2948#define TRANS_VTOTAL_A 0xe000c 2979#define _TRANS_VTOTAL_A 0xe000c
2949#define TRANS_VTOTAL_SHIFT 16 2980#define TRANS_VTOTAL_SHIFT 16
2950#define TRANS_VACTIVE_SHIFT 0 2981#define TRANS_VACTIVE_SHIFT 0
2951#define TRANS_VBLANK_A 0xe0010 2982#define _TRANS_VBLANK_A 0xe0010
2952#define TRANS_VBLANK_END_SHIFT 16 2983#define TRANS_VBLANK_END_SHIFT 16
2953#define TRANS_VBLANK_START_SHIFT 0 2984#define TRANS_VBLANK_START_SHIFT 0
2954#define TRANS_VSYNC_A 0xe0014 2985#define _TRANS_VSYNC_A 0xe0014
2955#define TRANS_VSYNC_END_SHIFT 16 2986#define TRANS_VSYNC_END_SHIFT 16
2956#define TRANS_VSYNC_START_SHIFT 0 2987#define TRANS_VSYNC_START_SHIFT 0
2957 2988
2958#define TRANSA_DATA_M1 0xe0030 2989#define _TRANSA_DATA_M1 0xe0030
2959#define TRANSA_DATA_N1 0xe0034 2990#define _TRANSA_DATA_N1 0xe0034
2960#define TRANSA_DATA_M2 0xe0038 2991#define _TRANSA_DATA_M2 0xe0038
2961#define TRANSA_DATA_N2 0xe003c 2992#define _TRANSA_DATA_N2 0xe003c
2962#define TRANSA_DP_LINK_M1 0xe0040 2993#define _TRANSA_DP_LINK_M1 0xe0040
2963#define TRANSA_DP_LINK_N1 0xe0044 2994#define _TRANSA_DP_LINK_N1 0xe0044
2964#define TRANSA_DP_LINK_M2 0xe0048 2995#define _TRANSA_DP_LINK_M2 0xe0048
2965#define TRANSA_DP_LINK_N2 0xe004c 2996#define _TRANSA_DP_LINK_N2 0xe004c
2966 2997
2967#define TRANS_HTOTAL_B 0xe1000 2998#define _TRANS_HTOTAL_B 0xe1000
2968#define TRANS_HBLANK_B 0xe1004 2999#define _TRANS_HBLANK_B 0xe1004
2969#define TRANS_HSYNC_B 0xe1008 3000#define _TRANS_HSYNC_B 0xe1008
2970#define TRANS_VTOTAL_B 0xe100c 3001#define _TRANS_VTOTAL_B 0xe100c
2971#define TRANS_VBLANK_B 0xe1010 3002#define _TRANS_VBLANK_B 0xe1010
2972#define TRANS_VSYNC_B 0xe1014 3003#define _TRANS_VSYNC_B 0xe1014
2973 3004
2974#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B) 3005#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B)
2975#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B) 3006#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B)
2976#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B) 3007#define TRANS_HSYNC(pipe) _PIPE(pipe, _TRANS_HSYNC_A, _TRANS_HSYNC_B)
2977#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B) 3008#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B)
2978#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B) 3009#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B)
2979#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B) 3010#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B)
2980 3011
2981#define TRANSB_DATA_M1 0xe1030 3012#define _TRANSB_DATA_M1 0xe1030
2982#define TRANSB_DATA_N1 0xe1034 3013#define _TRANSB_DATA_N1 0xe1034
2983#define TRANSB_DATA_M2 0xe1038 3014#define _TRANSB_DATA_M2 0xe1038
2984#define TRANSB_DATA_N2 0xe103c 3015#define _TRANSB_DATA_N2 0xe103c
2985#define TRANSB_DP_LINK_M1 0xe1040 3016#define _TRANSB_DP_LINK_M1 0xe1040
2986#define TRANSB_DP_LINK_N1 0xe1044 3017#define _TRANSB_DP_LINK_N1 0xe1044
2987#define TRANSB_DP_LINK_M2 0xe1048 3018#define _TRANSB_DP_LINK_M2 0xe1048
2988#define TRANSB_DP_LINK_N2 0xe104c 3019#define _TRANSB_DP_LINK_N2 0xe104c
2989 3020
2990#define TRANSACONF 0xf0008 3021#define TRANSDATA_M1(pipe) _PIPE(pipe, _TRANSA_DATA_M1, _TRANSB_DATA_M1)
2991#define TRANSBCONF 0xf1008 3022#define TRANSDATA_N1(pipe) _PIPE(pipe, _TRANSA_DATA_N1, _TRANSB_DATA_N1)
2992#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF) 3023#define TRANSDATA_M2(pipe) _PIPE(pipe, _TRANSA_DATA_M2, _TRANSB_DATA_M2)
3024#define TRANSDATA_N2(pipe) _PIPE(pipe, _TRANSA_DATA_N2, _TRANSB_DATA_N2)
3025#define TRANSDPLINK_M1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M1, _TRANSB_DP_LINK_M1)
3026#define TRANSDPLINK_N1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N1, _TRANSB_DP_LINK_N1)
3027#define TRANSDPLINK_M2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M2, _TRANSB_DP_LINK_M2)
3028#define TRANSDPLINK_N2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N2, _TRANSB_DP_LINK_N2)
3029
3030#define _TRANSACONF 0xf0008
3031#define _TRANSBCONF 0xf1008
3032#define TRANSCONF(plane) _PIPE(plane, _TRANSACONF, _TRANSBCONF)
2993#define TRANS_DISABLE (0<<31) 3033#define TRANS_DISABLE (0<<31)
2994#define TRANS_ENABLE (1<<31) 3034#define TRANS_ENABLE (1<<31)
2995#define TRANS_STATE_MASK (1<<30) 3035#define TRANS_STATE_MASK (1<<30)
@@ -3007,18 +3047,19 @@
3007#define TRANS_6BPC (2<<5) 3047#define TRANS_6BPC (2<<5)
3008#define TRANS_12BPC (3<<5) 3048#define TRANS_12BPC (3<<5)
3009 3049
3010#define FDI_RXA_CHICKEN 0xc200c 3050#define _FDI_RXA_CHICKEN 0xc200c
3011#define FDI_RXB_CHICKEN 0xc2010 3051#define _FDI_RXB_CHICKEN 0xc2010
3012#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1) 3052#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
3013#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN) 3053#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0)
3054#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
3014 3055
3015#define SOUTH_DSPCLK_GATE_D 0xc2020 3056#define SOUTH_DSPCLK_GATE_D 0xc2020
3016#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) 3057#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
3017 3058
3018/* CPU: FDI_TX */ 3059/* CPU: FDI_TX */
3019#define FDI_TXA_CTL 0x60100 3060#define _FDI_TXA_CTL 0x60100
3020#define FDI_TXB_CTL 0x61100 3061#define _FDI_TXB_CTL 0x61100
3021#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL) 3062#define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
3022#define FDI_TX_DISABLE (0<<31) 3063#define FDI_TX_DISABLE (0<<31)
3023#define FDI_TX_ENABLE (1<<31) 3064#define FDI_TX_ENABLE (1<<31)
3024#define FDI_LINK_TRAIN_PATTERN_1 (0<<28) 3065#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
@@ -3058,9 +3099,9 @@
3058#define FDI_SCRAMBLING_DISABLE (1<<7) 3099#define FDI_SCRAMBLING_DISABLE (1<<7)
3059 3100
3060/* FDI_RX, FDI_X is hard-wired to Transcoder_X */ 3101/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
3061#define FDI_RXA_CTL 0xf000c 3102#define _FDI_RXA_CTL 0xf000c
3062#define FDI_RXB_CTL 0xf100c 3103#define _FDI_RXB_CTL 0xf100c
3063#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL) 3104#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
3064#define FDI_RX_ENABLE (1<<31) 3105#define FDI_RX_ENABLE (1<<31)
3065/* train, dp width same as FDI_TX */ 3106/* train, dp width same as FDI_TX */
3066#define FDI_DP_PORT_WIDTH_X8 (7<<19) 3107#define FDI_DP_PORT_WIDTH_X8 (7<<19)
@@ -3085,15 +3126,15 @@
3085#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) 3126#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
3086#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) 3127#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
3087 3128
3088#define FDI_RXA_MISC 0xf0010 3129#define _FDI_RXA_MISC 0xf0010
3089#define FDI_RXB_MISC 0xf1010 3130#define _FDI_RXB_MISC 0xf1010
3090#define FDI_RXA_TUSIZE1 0xf0030 3131#define _FDI_RXA_TUSIZE1 0xf0030
3091#define FDI_RXA_TUSIZE2 0xf0038 3132#define _FDI_RXA_TUSIZE2 0xf0038
3092#define FDI_RXB_TUSIZE1 0xf1030 3133#define _FDI_RXB_TUSIZE1 0xf1030
3093#define FDI_RXB_TUSIZE2 0xf1038 3134#define _FDI_RXB_TUSIZE2 0xf1038
3094#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC) 3135#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
3095#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1) 3136#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
3096#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2) 3137#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
3097 3138
3098/* FDI_RX interrupt register format */ 3139/* FDI_RX interrupt register format */
3099#define FDI_RX_INTER_LANE_ALIGN (1<<10) 3140#define FDI_RX_INTER_LANE_ALIGN (1<<10)
@@ -3108,12 +3149,12 @@
3108#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1) 3149#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
3109#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0) 3150#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
3110 3151
3111#define FDI_RXA_IIR 0xf0014 3152#define _FDI_RXA_IIR 0xf0014
3112#define FDI_RXA_IMR 0xf0018 3153#define _FDI_RXA_IMR 0xf0018
3113#define FDI_RXB_IIR 0xf1014 3154#define _FDI_RXB_IIR 0xf1014
3114#define FDI_RXB_IMR 0xf1018 3155#define _FDI_RXB_IMR 0xf1018
3115#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR) 3156#define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR)
3116#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR) 3157#define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR)
3117 3158
3118#define FDI_PLL_CTL_1 0xfe000 3159#define FDI_PLL_CTL_1 0xfe000
3119#define FDI_PLL_CTL_2 0xfe004 3160#define FDI_PLL_CTL_2 0xfe004
@@ -3143,11 +3184,15 @@
3143#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) 3184#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
3144#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) 3185#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
3145 3186
3187#define ADPA_PIPE_ENABLED(V, P) \
3188 (((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE))
3189
3146/* or SDVOB */ 3190/* or SDVOB */
3147#define HDMIB 0xe1140 3191#define HDMIB 0xe1140
3148#define PORT_ENABLE (1 << 31) 3192#define PORT_ENABLE (1 << 31)
3149#define TRANSCODER_A (0) 3193#define TRANSCODER_A (0)
3150#define TRANSCODER_B (1 << 30) 3194#define TRANSCODER_B (1 << 30)
3195#define TRANSCODER_MASK (1 << 30)
3151#define COLOR_FORMAT_8bpc (0) 3196#define COLOR_FORMAT_8bpc (0)
3152#define COLOR_FORMAT_12bpc (3 << 26) 3197#define COLOR_FORMAT_12bpc (3 << 26)
3153#define SDVOB_HOTPLUG_ENABLE (1 << 23) 3198#define SDVOB_HOTPLUG_ENABLE (1 << 23)
@@ -3163,6 +3208,9 @@
3163#define HSYNC_ACTIVE_HIGH (1 << 3) 3208#define HSYNC_ACTIVE_HIGH (1 << 3)
3164#define PORT_DETECTED (1 << 2) 3209#define PORT_DETECTED (1 << 2)
3165 3210
3211#define HDMI_PIPE_ENABLED(V, P) \
3212 (((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE))
3213
3166/* PCH SDVOB multiplex with HDMIB */ 3214/* PCH SDVOB multiplex with HDMIB */
3167#define PCH_SDVOB HDMIB 3215#define PCH_SDVOB HDMIB
3168 3216
@@ -3238,6 +3286,7 @@
3238#define TRANS_DP_PORT_SEL_B (0<<29) 3286#define TRANS_DP_PORT_SEL_B (0<<29)
3239#define TRANS_DP_PORT_SEL_C (1<<29) 3287#define TRANS_DP_PORT_SEL_C (1<<29)
3240#define TRANS_DP_PORT_SEL_D (2<<29) 3288#define TRANS_DP_PORT_SEL_D (2<<29)
3289#define TRANS_DP_PORT_SEL_NONE (3<<29)
3241#define TRANS_DP_PORT_SEL_MASK (3<<29) 3290#define TRANS_DP_PORT_SEL_MASK (3<<29)
3242#define TRANS_DP_AUDIO_ONLY (1<<26) 3291#define TRANS_DP_AUDIO_ONLY (1<<26)
3243#define TRANS_DP_ENH_FRAMING (1<<18) 3292#define TRANS_DP_ENH_FRAMING (1<<18)
@@ -3269,6 +3318,8 @@
3269#define FORCEWAKE 0xA18C 3318#define FORCEWAKE 0xA18C
3270#define FORCEWAKE_ACK 0x130090 3319#define FORCEWAKE_ACK 0x130090
3271 3320
3321#define GT_FIFO_FREE_ENTRIES 0x120008
3322
3272#define GEN6_RPNSWREQ 0xA008 3323#define GEN6_RPNSWREQ 0xA008
3273#define GEN6_TURBO_DISABLE (1<<31) 3324#define GEN6_TURBO_DISABLE (1<<31)
3274#define GEN6_FREQUENCY(x) ((x)<<25) 3325#define GEN6_FREQUENCY(x) ((x)<<25)
@@ -3286,15 +3337,28 @@
3286#define GEN6_RP_DOWN_TIMEOUT 0xA010 3337#define GEN6_RP_DOWN_TIMEOUT 0xA010
3287#define GEN6_RP_INTERRUPT_LIMITS 0xA014 3338#define GEN6_RP_INTERRUPT_LIMITS 0xA014
3288#define GEN6_RPSTAT1 0xA01C 3339#define GEN6_RPSTAT1 0xA01C
3340#define GEN6_CAGF_SHIFT 8
3341#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
3289#define GEN6_RP_CONTROL 0xA024 3342#define GEN6_RP_CONTROL 0xA024
3290#define GEN6_RP_MEDIA_TURBO (1<<11) 3343#define GEN6_RP_MEDIA_TURBO (1<<11)
3291#define GEN6_RP_USE_NORMAL_FREQ (1<<9) 3344#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
3292#define GEN6_RP_MEDIA_IS_GFX (1<<8) 3345#define GEN6_RP_MEDIA_IS_GFX (1<<8)
3293#define GEN6_RP_ENABLE (1<<7) 3346#define GEN6_RP_ENABLE (1<<7)
3294#define GEN6_RP_UP_BUSY_MAX (0x2<<3) 3347#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
3295#define GEN6_RP_DOWN_BUSY_MIN (0x2<<0) 3348#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
3349#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
3350#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
3296#define GEN6_RP_UP_THRESHOLD 0xA02C 3351#define GEN6_RP_UP_THRESHOLD 0xA02C
3297#define GEN6_RP_DOWN_THRESHOLD 0xA030 3352#define GEN6_RP_DOWN_THRESHOLD 0xA030
3353#define GEN6_RP_CUR_UP_EI 0xA050
3354#define GEN6_CURICONT_MASK 0xffffff
3355#define GEN6_RP_CUR_UP 0xA054
3356#define GEN6_CURBSYTAVG_MASK 0xffffff
3357#define GEN6_RP_PREV_UP 0xA058
3358#define GEN6_RP_CUR_DOWN_EI 0xA05C
3359#define GEN6_CURIAVG_MASK 0xffffff
3360#define GEN6_RP_CUR_DOWN 0xA060
3361#define GEN6_RP_PREV_DOWN 0xA064
3298#define GEN6_RP_UP_EI 0xA068 3362#define GEN6_RP_UP_EI 0xA068
3299#define GEN6_RP_DOWN_EI 0xA06C 3363#define GEN6_RP_DOWN_EI 0xA06C
3300#define GEN6_RP_IDLE_HYSTERSIS 0xA070 3364#define GEN6_RP_IDLE_HYSTERSIS 0xA070
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 0521ecf26017..7e992a8e9098 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -34,11 +34,10 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
34 struct drm_i915_private *dev_priv = dev->dev_private; 34 struct drm_i915_private *dev_priv = dev->dev_private;
35 u32 dpll_reg; 35 u32 dpll_reg;
36 36
37 if (HAS_PCH_SPLIT(dev)) { 37 if (HAS_PCH_SPLIT(dev))
38 dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B; 38 dpll_reg = (pipe == PIPE_A) ? _PCH_DPLL_A : _PCH_DPLL_B;
39 } else { 39 else
40 dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B; 40 dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
41 }
42 41
43 return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE); 42 return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
44} 43}
@@ -46,7 +45,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
46static void i915_save_palette(struct drm_device *dev, enum pipe pipe) 45static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
47{ 46{
48 struct drm_i915_private *dev_priv = dev->dev_private; 47 struct drm_i915_private *dev_priv = dev->dev_private;
49 unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); 48 unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
50 u32 *array; 49 u32 *array;
51 int i; 50 int i;
52 51
@@ -54,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
54 return; 53 return;
55 54
56 if (HAS_PCH_SPLIT(dev)) 55 if (HAS_PCH_SPLIT(dev))
57 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; 56 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
58 57
59 if (pipe == PIPE_A) 58 if (pipe == PIPE_A)
60 array = dev_priv->save_palette_a; 59 array = dev_priv->save_palette_a;
@@ -68,7 +67,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
68static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) 67static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
69{ 68{
70 struct drm_i915_private *dev_priv = dev->dev_private; 69 struct drm_i915_private *dev_priv = dev->dev_private;
71 unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); 70 unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
72 u32 *array; 71 u32 *array;
73 int i; 72 int i;
74 73
@@ -76,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
76 return; 75 return;
77 76
78 if (HAS_PCH_SPLIT(dev)) 77 if (HAS_PCH_SPLIT(dev))
79 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; 78 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
80 79
81 if (pipe == PIPE_A) 80 if (pipe == PIPE_A)
82 array = dev_priv->save_palette_a; 81 array = dev_priv->save_palette_a;
@@ -241,12 +240,12 @@ static void i915_save_modeset_reg(struct drm_device *dev)
241 return; 240 return;
242 241
243 /* Cursor state */ 242 /* Cursor state */
244 dev_priv->saveCURACNTR = I915_READ(CURACNTR); 243 dev_priv->saveCURACNTR = I915_READ(_CURACNTR);
245 dev_priv->saveCURAPOS = I915_READ(CURAPOS); 244 dev_priv->saveCURAPOS = I915_READ(_CURAPOS);
246 dev_priv->saveCURABASE = I915_READ(CURABASE); 245 dev_priv->saveCURABASE = I915_READ(_CURABASE);
247 dev_priv->saveCURBCNTR = I915_READ(CURBCNTR); 246 dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR);
248 dev_priv->saveCURBPOS = I915_READ(CURBPOS); 247 dev_priv->saveCURBPOS = I915_READ(_CURBPOS);
249 dev_priv->saveCURBBASE = I915_READ(CURBBASE); 248 dev_priv->saveCURBBASE = I915_READ(_CURBBASE);
250 if (IS_GEN2(dev)) 249 if (IS_GEN2(dev))
251 dev_priv->saveCURSIZE = I915_READ(CURSIZE); 250 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
252 251
@@ -256,118 +255,118 @@ static void i915_save_modeset_reg(struct drm_device *dev)
256 } 255 }
257 256
258 /* Pipe & plane A info */ 257 /* Pipe & plane A info */
259 dev_priv->savePIPEACONF = I915_READ(PIPEACONF); 258 dev_priv->savePIPEACONF = I915_READ(_PIPEACONF);
260 dev_priv->savePIPEASRC = I915_READ(PIPEASRC); 259 dev_priv->savePIPEASRC = I915_READ(_PIPEASRC);
261 if (HAS_PCH_SPLIT(dev)) { 260 if (HAS_PCH_SPLIT(dev)) {
262 dev_priv->saveFPA0 = I915_READ(PCH_FPA0); 261 dev_priv->saveFPA0 = I915_READ(_PCH_FPA0);
263 dev_priv->saveFPA1 = I915_READ(PCH_FPA1); 262 dev_priv->saveFPA1 = I915_READ(_PCH_FPA1);
264 dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A); 263 dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A);
265 } else { 264 } else {
266 dev_priv->saveFPA0 = I915_READ(FPA0); 265 dev_priv->saveFPA0 = I915_READ(_FPA0);
267 dev_priv->saveFPA1 = I915_READ(FPA1); 266 dev_priv->saveFPA1 = I915_READ(_FPA1);
268 dev_priv->saveDPLL_A = I915_READ(DPLL_A); 267 dev_priv->saveDPLL_A = I915_READ(_DPLL_A);
269 } 268 }
270 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 269 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
271 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); 270 dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
272 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); 271 dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A);
273 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); 272 dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A);
274 dev_priv->saveHSYNC_A = I915_READ(HSYNC_A); 273 dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A);
275 dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); 274 dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A);
276 dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); 275 dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A);
277 dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); 276 dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A);
278 if (!HAS_PCH_SPLIT(dev)) 277 if (!HAS_PCH_SPLIT(dev))
279 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); 278 dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
280 279
281 if (HAS_PCH_SPLIT(dev)) { 280 if (HAS_PCH_SPLIT(dev)) {
282 dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1); 281 dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
283 dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1); 282 dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
284 dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1); 283 dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
285 dev_priv->savePIPEA_LINK_N1 = I915_READ(PIPEA_LINK_N1); 284 dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
286 285
287 dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL); 286 dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
288 dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL); 287 dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
289 288
290 dev_priv->savePFA_CTL_1 = I915_READ(PFA_CTL_1); 289 dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
291 dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ); 290 dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
292 dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS); 291 dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
293 292
294 dev_priv->saveTRANSACONF = I915_READ(TRANSACONF); 293 dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF);
295 dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A); 294 dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
296 dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A); 295 dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
297 dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A); 296 dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
298 dev_priv->saveTRANS_VTOTAL_A = I915_READ(TRANS_VTOTAL_A); 297 dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
299 dev_priv->saveTRANS_VBLANK_A = I915_READ(TRANS_VBLANK_A); 298 dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
300 dev_priv->saveTRANS_VSYNC_A = I915_READ(TRANS_VSYNC_A); 299 dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
301 } 300 }
302 301
303 dev_priv->saveDSPACNTR = I915_READ(DSPACNTR); 302 dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR);
304 dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE); 303 dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
305 dev_priv->saveDSPASIZE = I915_READ(DSPASIZE); 304 dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE);
306 dev_priv->saveDSPAPOS = I915_READ(DSPAPOS); 305 dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS);
307 dev_priv->saveDSPAADDR = I915_READ(DSPAADDR); 306 dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR);
308 if (INTEL_INFO(dev)->gen >= 4) { 307 if (INTEL_INFO(dev)->gen >= 4) {
309 dev_priv->saveDSPASURF = I915_READ(DSPASURF); 308 dev_priv->saveDSPASURF = I915_READ(_DSPASURF);
310 dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); 309 dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
311 } 310 }
312 i915_save_palette(dev, PIPE_A); 311 i915_save_palette(dev, PIPE_A);
313 dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT); 312 dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT);
314 313
315 /* Pipe & plane B info */ 314 /* Pipe & plane B info */
316 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); 315 dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF);
317 dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); 316 dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC);
318 if (HAS_PCH_SPLIT(dev)) { 317 if (HAS_PCH_SPLIT(dev)) {
319 dev_priv->saveFPB0 = I915_READ(PCH_FPB0); 318 dev_priv->saveFPB0 = I915_READ(_PCH_FPB0);
320 dev_priv->saveFPB1 = I915_READ(PCH_FPB1); 319 dev_priv->saveFPB1 = I915_READ(_PCH_FPB1);
321 dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B); 320 dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B);
322 } else { 321 } else {
323 dev_priv->saveFPB0 = I915_READ(FPB0); 322 dev_priv->saveFPB0 = I915_READ(_FPB0);
324 dev_priv->saveFPB1 = I915_READ(FPB1); 323 dev_priv->saveFPB1 = I915_READ(_FPB1);
325 dev_priv->saveDPLL_B = I915_READ(DPLL_B); 324 dev_priv->saveDPLL_B = I915_READ(_DPLL_B);
326 } 325 }
327 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 326 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
328 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); 327 dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
329 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); 328 dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B);
330 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); 329 dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B);
331 dev_priv->saveHSYNC_B = I915_READ(HSYNC_B); 330 dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B);
332 dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); 331 dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B);
333 dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); 332 dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B);
334 dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); 333 dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B);
335 if (!HAS_PCH_SPLIT(dev)) 334 if (!HAS_PCH_SPLIT(dev))
336 dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B); 335 dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
337 336
338 if (HAS_PCH_SPLIT(dev)) { 337 if (HAS_PCH_SPLIT(dev)) {
339 dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1); 338 dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
340 dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1); 339 dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
341 dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1); 340 dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
342 dev_priv->savePIPEB_LINK_N1 = I915_READ(PIPEB_LINK_N1); 341 dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
343 342
344 dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL); 343 dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
345 dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL); 344 dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
346 345
347 dev_priv->savePFB_CTL_1 = I915_READ(PFB_CTL_1); 346 dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
348 dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ); 347 dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
349 dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS); 348 dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
350 349
351 dev_priv->saveTRANSBCONF = I915_READ(TRANSBCONF); 350 dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF);
352 dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B); 351 dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
353 dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B); 352 dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
354 dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B); 353 dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
355 dev_priv->saveTRANS_VTOTAL_B = I915_READ(TRANS_VTOTAL_B); 354 dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
356 dev_priv->saveTRANS_VBLANK_B = I915_READ(TRANS_VBLANK_B); 355 dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
357 dev_priv->saveTRANS_VSYNC_B = I915_READ(TRANS_VSYNC_B); 356 dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
358 } 357 }
359 358
360 dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR); 359 dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR);
361 dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE); 360 dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
362 dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE); 361 dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE);
363 dev_priv->saveDSPBPOS = I915_READ(DSPBPOS); 362 dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS);
364 dev_priv->saveDSPBADDR = I915_READ(DSPBADDR); 363 dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR);
365 if (INTEL_INFO(dev)->gen >= 4) { 364 if (INTEL_INFO(dev)->gen >= 4) {
366 dev_priv->saveDSPBSURF = I915_READ(DSPBSURF); 365 dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF);
367 dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); 366 dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
368 } 367 }
369 i915_save_palette(dev, PIPE_B); 368 i915_save_palette(dev, PIPE_B);
370 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); 369 dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT);
371 370
372 /* Fences */ 371 /* Fences */
373 switch (INTEL_INFO(dev)->gen) { 372 switch (INTEL_INFO(dev)->gen) {
@@ -426,19 +425,19 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
426 425
427 426
428 if (HAS_PCH_SPLIT(dev)) { 427 if (HAS_PCH_SPLIT(dev)) {
429 dpll_a_reg = PCH_DPLL_A; 428 dpll_a_reg = _PCH_DPLL_A;
430 dpll_b_reg = PCH_DPLL_B; 429 dpll_b_reg = _PCH_DPLL_B;
431 fpa0_reg = PCH_FPA0; 430 fpa0_reg = _PCH_FPA0;
432 fpb0_reg = PCH_FPB0; 431 fpb0_reg = _PCH_FPB0;
433 fpa1_reg = PCH_FPA1; 432 fpa1_reg = _PCH_FPA1;
434 fpb1_reg = PCH_FPB1; 433 fpb1_reg = _PCH_FPB1;
435 } else { 434 } else {
436 dpll_a_reg = DPLL_A; 435 dpll_a_reg = _DPLL_A;
437 dpll_b_reg = DPLL_B; 436 dpll_b_reg = _DPLL_B;
438 fpa0_reg = FPA0; 437 fpa0_reg = _FPA0;
439 fpb0_reg = FPB0; 438 fpb0_reg = _FPB0;
440 fpa1_reg = FPA1; 439 fpa1_reg = _FPA1;
441 fpb1_reg = FPB1; 440 fpb1_reg = _FPB1;
442 } 441 }
443 442
444 if (HAS_PCH_SPLIT(dev)) { 443 if (HAS_PCH_SPLIT(dev)) {
@@ -461,60 +460,60 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
461 POSTING_READ(dpll_a_reg); 460 POSTING_READ(dpll_a_reg);
462 udelay(150); 461 udelay(150);
463 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { 462 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
464 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); 463 I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD);
465 POSTING_READ(DPLL_A_MD); 464 POSTING_READ(_DPLL_A_MD);
466 } 465 }
467 udelay(150); 466 udelay(150);
468 467
469 /* Restore mode */ 468 /* Restore mode */
470 I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); 469 I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A);
471 I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); 470 I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A);
472 I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); 471 I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A);
473 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); 472 I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A);
474 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); 473 I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A);
475 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); 474 I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A);
476 if (!HAS_PCH_SPLIT(dev)) 475 if (!HAS_PCH_SPLIT(dev))
477 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); 476 I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A);
478 477
479 if (HAS_PCH_SPLIT(dev)) { 478 if (HAS_PCH_SPLIT(dev)) {
480 I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); 479 I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
481 I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); 480 I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
482 I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); 481 I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
483 I915_WRITE(PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1); 482 I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
484 483
485 I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); 484 I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
486 I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); 485 I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
487 486
488 I915_WRITE(PFA_CTL_1, dev_priv->savePFA_CTL_1); 487 I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1);
489 I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); 488 I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
490 I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS); 489 I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
491 490
492 I915_WRITE(TRANSACONF, dev_priv->saveTRANSACONF); 491 I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF);
493 I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); 492 I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
494 I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); 493 I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
495 I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); 494 I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
496 I915_WRITE(TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A); 495 I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
497 I915_WRITE(TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A); 496 I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
498 I915_WRITE(TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A); 497 I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
499 } 498 }
500 499
501 /* Restore plane info */ 500 /* Restore plane info */
502 I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); 501 I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE);
503 I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); 502 I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS);
504 I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); 503 I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC);
505 I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); 504 I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR);
506 I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); 505 I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE);
507 if (INTEL_INFO(dev)->gen >= 4) { 506 if (INTEL_INFO(dev)->gen >= 4) {
508 I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); 507 I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF);
509 I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); 508 I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF);
510 } 509 }
511 510
512 I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); 511 I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF);
513 512
514 i915_restore_palette(dev, PIPE_A); 513 i915_restore_palette(dev, PIPE_A);
515 /* Enable the plane */ 514 /* Enable the plane */
516 I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); 515 I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR);
517 I915_WRITE(DSPAADDR, I915_READ(DSPAADDR)); 516 I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
518 517
519 /* Pipe & plane B info */ 518 /* Pipe & plane B info */
520 if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { 519 if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
@@ -530,68 +529,68 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
530 POSTING_READ(dpll_b_reg); 529 POSTING_READ(dpll_b_reg);
531 udelay(150); 530 udelay(150);
532 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { 531 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
533 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); 532 I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD);
534 POSTING_READ(DPLL_B_MD); 533 POSTING_READ(_DPLL_B_MD);
535 } 534 }
536 udelay(150); 535 udelay(150);
537 536
538 /* Restore mode */ 537 /* Restore mode */
539 I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); 538 I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B);
540 I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); 539 I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B);
541 I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); 540 I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B);
542 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); 541 I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B);
543 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); 542 I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B);
544 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); 543 I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B);
545 if (!HAS_PCH_SPLIT(dev)) 544 if (!HAS_PCH_SPLIT(dev))
546 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); 545 I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B);
547 546
548 if (HAS_PCH_SPLIT(dev)) { 547 if (HAS_PCH_SPLIT(dev)) {
549 I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); 548 I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
550 I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); 549 I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
551 I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); 550 I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
552 I915_WRITE(PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1); 551 I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
553 552
554 I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); 553 I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
555 I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); 554 I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
556 555
557 I915_WRITE(PFB_CTL_1, dev_priv->savePFB_CTL_1); 556 I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1);
558 I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); 557 I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
559 I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS); 558 I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
560 559
561 I915_WRITE(TRANSBCONF, dev_priv->saveTRANSBCONF); 560 I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF);
562 I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); 561 I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
563 I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); 562 I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
564 I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); 563 I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
565 I915_WRITE(TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B); 564 I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
566 I915_WRITE(TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B); 565 I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
567 I915_WRITE(TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B); 566 I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
568 } 567 }
569 568
570 /* Restore plane info */ 569 /* Restore plane info */
571 I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); 570 I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE);
572 I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); 571 I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS);
573 I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); 572 I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC);
574 I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); 573 I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR);
575 I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); 574 I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
576 if (INTEL_INFO(dev)->gen >= 4) { 575 if (INTEL_INFO(dev)->gen >= 4) {
577 I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); 576 I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF);
578 I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); 577 I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
579 } 578 }
580 579
581 I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); 580 I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF);
582 581
583 i915_restore_palette(dev, PIPE_B); 582 i915_restore_palette(dev, PIPE_B);
584 /* Enable the plane */ 583 /* Enable the plane */
585 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); 584 I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR);
586 I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); 585 I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
587 586
588 /* Cursor state */ 587 /* Cursor state */
589 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); 588 I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS);
590 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); 589 I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR);
591 I915_WRITE(CURABASE, dev_priv->saveCURABASE); 590 I915_WRITE(_CURABASE, dev_priv->saveCURABASE);
592 I915_WRITE(CURBPOS, dev_priv->saveCURBPOS); 591 I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS);
593 I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR); 592 I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR);
594 I915_WRITE(CURBBASE, dev_priv->saveCURBBASE); 593 I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE);
595 if (IS_GEN2(dev)) 594 if (IS_GEN2(dev))
596 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); 595 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
597 596
@@ -653,14 +652,14 @@ void i915_save_display(struct drm_device *dev)
653 dev_priv->saveDP_B = I915_READ(DP_B); 652 dev_priv->saveDP_B = I915_READ(DP_B);
654 dev_priv->saveDP_C = I915_READ(DP_C); 653 dev_priv->saveDP_C = I915_READ(DP_C);
655 dev_priv->saveDP_D = I915_READ(DP_D); 654 dev_priv->saveDP_D = I915_READ(DP_D);
656 dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M); 655 dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
657 dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M); 656 dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
658 dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N); 657 dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
659 dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N); 658 dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
660 dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M); 659 dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
661 dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M); 660 dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
662 dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N); 661 dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
663 dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N); 662 dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
664 } 663 }
665 /* FIXME: save TV & SDVO state */ 664 /* FIXME: save TV & SDVO state */
666 665
@@ -699,14 +698,14 @@ void i915_restore_display(struct drm_device *dev)
699 698
700 /* Display port ratios (must be done before clock is set) */ 699 /* Display port ratios (must be done before clock is set) */
701 if (SUPPORTS_INTEGRATED_DP(dev)) { 700 if (SUPPORTS_INTEGRATED_DP(dev)) {
702 I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); 701 I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
703 I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); 702 I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
704 I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); 703 I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
705 I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); 704 I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
706 I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); 705 I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
707 I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); 706 I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
708 I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); 707 I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
709 I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); 708 I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
710 } 709 }
711 710
712 /* This is only meaningful in non-KMS mode */ 711 /* This is only meaningful in non-KMS mode */
@@ -797,9 +796,6 @@ int i915_save_state(struct drm_device *dev)
797 796
798 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 797 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
799 798
800 /* Hardware status page */
801 dev_priv->saveHWS = I915_READ(HWS_PGA);
802
803 i915_save_display(dev); 799 i915_save_display(dev);
804 800
805 /* Interrupt state */ 801 /* Interrupt state */
@@ -808,8 +804,8 @@ int i915_save_state(struct drm_device *dev)
808 dev_priv->saveDEIMR = I915_READ(DEIMR); 804 dev_priv->saveDEIMR = I915_READ(DEIMR);
809 dev_priv->saveGTIER = I915_READ(GTIER); 805 dev_priv->saveGTIER = I915_READ(GTIER);
810 dev_priv->saveGTIMR = I915_READ(GTIMR); 806 dev_priv->saveGTIMR = I915_READ(GTIMR);
811 dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); 807 dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
812 dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); 808 dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
813 dev_priv->saveMCHBAR_RENDER_STANDBY = 809 dev_priv->saveMCHBAR_RENDER_STANDBY =
814 I915_READ(RSTDBYCTL); 810 I915_READ(RSTDBYCTL);
815 } else { 811 } else {
@@ -846,9 +842,6 @@ int i915_restore_state(struct drm_device *dev)
846 842
847 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 843 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
848 844
849 /* Hardware status page */
850 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
851
852 i915_restore_display(dev); 845 i915_restore_display(dev);
853 846
854 /* Interrupt state */ 847 /* Interrupt state */
@@ -857,11 +850,11 @@ int i915_restore_state(struct drm_device *dev)
857 I915_WRITE(DEIMR, dev_priv->saveDEIMR); 850 I915_WRITE(DEIMR, dev_priv->saveDEIMR);
858 I915_WRITE(GTIER, dev_priv->saveGTIER); 851 I915_WRITE(GTIER, dev_priv->saveGTIER);
859 I915_WRITE(GTIMR, dev_priv->saveGTIMR); 852 I915_WRITE(GTIMR, dev_priv->saveGTIMR);
860 I915_WRITE(FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); 853 I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
861 I915_WRITE(FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); 854 I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
862 } else { 855 } else {
863 I915_WRITE (IER, dev_priv->saveIER); 856 I915_WRITE(IER, dev_priv->saveIER);
864 I915_WRITE (IMR, dev_priv->saveIMR); 857 I915_WRITE(IMR, dev_priv->saveIMR);
865 } 858 }
866 859
867 /* Clock gating state */ 860 /* Clock gating state */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 7f0fc3ed61aa..d623fefbfaca 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -7,6 +7,7 @@
7 7
8#include <drm/drmP.h> 8#include <drm/drmP.h>
9#include "i915_drv.h" 9#include "i915_drv.h"
10#include "intel_ringbuffer.h"
10 11
11#undef TRACE_SYSTEM 12#undef TRACE_SYSTEM
12#define TRACE_SYSTEM i915 13#define TRACE_SYSTEM i915
@@ -16,9 +17,7 @@
16/* object tracking */ 17/* object tracking */
17 18
18TRACE_EVENT(i915_gem_object_create, 19TRACE_EVENT(i915_gem_object_create,
19
20 TP_PROTO(struct drm_i915_gem_object *obj), 20 TP_PROTO(struct drm_i915_gem_object *obj),
21
22 TP_ARGS(obj), 21 TP_ARGS(obj),
23 22
24 TP_STRUCT__entry( 23 TP_STRUCT__entry(
@@ -35,33 +34,51 @@ TRACE_EVENT(i915_gem_object_create,
35); 34);
36 35
37TRACE_EVENT(i915_gem_object_bind, 36TRACE_EVENT(i915_gem_object_bind,
38 37 TP_PROTO(struct drm_i915_gem_object *obj, bool mappable),
39 TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable), 38 TP_ARGS(obj, mappable),
40
41 TP_ARGS(obj, gtt_offset, mappable),
42 39
43 TP_STRUCT__entry( 40 TP_STRUCT__entry(
44 __field(struct drm_i915_gem_object *, obj) 41 __field(struct drm_i915_gem_object *, obj)
45 __field(u32, gtt_offset) 42 __field(u32, offset)
43 __field(u32, size)
46 __field(bool, mappable) 44 __field(bool, mappable)
47 ), 45 ),
48 46
49 TP_fast_assign( 47 TP_fast_assign(
50 __entry->obj = obj; 48 __entry->obj = obj;
51 __entry->gtt_offset = gtt_offset; 49 __entry->offset = obj->gtt_space->start;
50 __entry->size = obj->gtt_space->size;
52 __entry->mappable = mappable; 51 __entry->mappable = mappable;
53 ), 52 ),
54 53
55 TP_printk("obj=%p, gtt_offset=%08x%s", 54 TP_printk("obj=%p, offset=%08x size=%x%s",
56 __entry->obj, __entry->gtt_offset, 55 __entry->obj, __entry->offset, __entry->size,
57 __entry->mappable ? ", mappable" : "") 56 __entry->mappable ? ", mappable" : "")
58); 57);
59 58
60TRACE_EVENT(i915_gem_object_change_domain, 59TRACE_EVENT(i915_gem_object_unbind,
60 TP_PROTO(struct drm_i915_gem_object *obj),
61 TP_ARGS(obj),
62
63 TP_STRUCT__entry(
64 __field(struct drm_i915_gem_object *, obj)
65 __field(u32, offset)
66 __field(u32, size)
67 ),
61 68
62 TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), 69 TP_fast_assign(
70 __entry->obj = obj;
71 __entry->offset = obj->gtt_space->start;
72 __entry->size = obj->gtt_space->size;
73 ),
63 74
64 TP_ARGS(obj, old_read_domains, old_write_domain), 75 TP_printk("obj=%p, offset=%08x size=%x",
76 __entry->obj, __entry->offset, __entry->size)
77);
78
79TRACE_EVENT(i915_gem_object_change_domain,
80 TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
81 TP_ARGS(obj, old_read, old_write),
65 82
66 TP_STRUCT__entry( 83 TP_STRUCT__entry(
67 __field(struct drm_i915_gem_object *, obj) 84 __field(struct drm_i915_gem_object *, obj)
@@ -71,177 +88,264 @@ TRACE_EVENT(i915_gem_object_change_domain,
71 88
72 TP_fast_assign( 89 TP_fast_assign(
73 __entry->obj = obj; 90 __entry->obj = obj;
74 __entry->read_domains = obj->base.read_domains | (old_read_domains << 16); 91 __entry->read_domains = obj->base.read_domains | (old_read << 16);
75 __entry->write_domain = obj->base.write_domain | (old_write_domain << 16); 92 __entry->write_domain = obj->base.write_domain | (old_write << 16);
76 ), 93 ),
77 94
78 TP_printk("obj=%p, read=%04x, write=%04x", 95 TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
79 __entry->obj, 96 __entry->obj,
80 __entry->read_domains, __entry->write_domain) 97 __entry->read_domains >> 16,
98 __entry->read_domains & 0xffff,
99 __entry->write_domain >> 16,
100 __entry->write_domain & 0xffff)
81); 101);
82 102
83DECLARE_EVENT_CLASS(i915_gem_object, 103TRACE_EVENT(i915_gem_object_pwrite,
104 TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
105 TP_ARGS(obj, offset, len),
84 106
85 TP_PROTO(struct drm_i915_gem_object *obj), 107 TP_STRUCT__entry(
108 __field(struct drm_i915_gem_object *, obj)
109 __field(u32, offset)
110 __field(u32, len)
111 ),
86 112
87 TP_ARGS(obj), 113 TP_fast_assign(
114 __entry->obj = obj;
115 __entry->offset = offset;
116 __entry->len = len;
117 ),
118
119 TP_printk("obj=%p, offset=%u, len=%u",
120 __entry->obj, __entry->offset, __entry->len)
121);
122
123TRACE_EVENT(i915_gem_object_pread,
124 TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
125 TP_ARGS(obj, offset, len),
88 126
89 TP_STRUCT__entry( 127 TP_STRUCT__entry(
90 __field(struct drm_i915_gem_object *, obj) 128 __field(struct drm_i915_gem_object *, obj)
129 __field(u32, offset)
130 __field(u32, len)
91 ), 131 ),
92 132
93 TP_fast_assign( 133 TP_fast_assign(
94 __entry->obj = obj; 134 __entry->obj = obj;
135 __entry->offset = offset;
136 __entry->len = len;
95 ), 137 ),
96 138
97 TP_printk("obj=%p", __entry->obj) 139 TP_printk("obj=%p, offset=%u, len=%u",
140 __entry->obj, __entry->offset, __entry->len)
98); 141);
99 142
100DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, 143TRACE_EVENT(i915_gem_object_fault,
144 TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
145 TP_ARGS(obj, index, gtt, write),
146
147 TP_STRUCT__entry(
148 __field(struct drm_i915_gem_object *, obj)
149 __field(u32, index)
150 __field(bool, gtt)
151 __field(bool, write)
152 ),
153
154 TP_fast_assign(
155 __entry->obj = obj;
156 __entry->index = index;
157 __entry->gtt = gtt;
158 __entry->write = write;
159 ),
101 160
161 TP_printk("obj=%p, %s index=%u %s",
162 __entry->obj,
163 __entry->gtt ? "GTT" : "CPU",
164 __entry->index,
165 __entry->write ? ", writable" : "")
166);
167
168DECLARE_EVENT_CLASS(i915_gem_object,
102 TP_PROTO(struct drm_i915_gem_object *obj), 169 TP_PROTO(struct drm_i915_gem_object *obj),
170 TP_ARGS(obj),
103 171
104 TP_ARGS(obj) 172 TP_STRUCT__entry(
173 __field(struct drm_i915_gem_object *, obj)
174 ),
175
176 TP_fast_assign(
177 __entry->obj = obj;
178 ),
179
180 TP_printk("obj=%p", __entry->obj)
105); 181);
106 182
107DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, 183DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
184 TP_PROTO(struct drm_i915_gem_object *obj),
185 TP_ARGS(obj)
186);
108 187
188DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
109 TP_PROTO(struct drm_i915_gem_object *obj), 189 TP_PROTO(struct drm_i915_gem_object *obj),
110
111 TP_ARGS(obj) 190 TP_ARGS(obj)
112); 191);
113 192
114DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, 193TRACE_EVENT(i915_gem_evict,
194 TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable),
195 TP_ARGS(dev, size, align, mappable),
115 196
116 TP_PROTO(struct drm_i915_gem_object *obj), 197 TP_STRUCT__entry(
198 __field(u32, dev)
199 __field(u32, size)
200 __field(u32, align)
201 __field(bool, mappable)
202 ),
117 203
118 TP_ARGS(obj) 204 TP_fast_assign(
205 __entry->dev = dev->primary->index;
206 __entry->size = size;
207 __entry->align = align;
208 __entry->mappable = mappable;
209 ),
210
211 TP_printk("dev=%d, size=%d, align=%d %s",
212 __entry->dev, __entry->size, __entry->align,
213 __entry->mappable ? ", mappable" : "")
119); 214);
120 215
121/* batch tracing */ 216TRACE_EVENT(i915_gem_evict_everything,
217 TP_PROTO(struct drm_device *dev, bool purgeable),
218 TP_ARGS(dev, purgeable),
122 219
123TRACE_EVENT(i915_gem_request_submit, 220 TP_STRUCT__entry(
221 __field(u32, dev)
222 __field(bool, purgeable)
223 ),
224
225 TP_fast_assign(
226 __entry->dev = dev->primary->index;
227 __entry->purgeable = purgeable;
228 ),
124 229
125 TP_PROTO(struct drm_device *dev, u32 seqno), 230 TP_printk("dev=%d%s",
231 __entry->dev,
232 __entry->purgeable ? ", purgeable only" : "")
233);
126 234
127 TP_ARGS(dev, seqno), 235TRACE_EVENT(i915_gem_ring_dispatch,
236 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
237 TP_ARGS(ring, seqno),
128 238
129 TP_STRUCT__entry( 239 TP_STRUCT__entry(
130 __field(u32, dev) 240 __field(u32, dev)
241 __field(u32, ring)
131 __field(u32, seqno) 242 __field(u32, seqno)
132 ), 243 ),
133 244
134 TP_fast_assign( 245 TP_fast_assign(
135 __entry->dev = dev->primary->index; 246 __entry->dev = ring->dev->primary->index;
247 __entry->ring = ring->id;
136 __entry->seqno = seqno; 248 __entry->seqno = seqno;
137 i915_trace_irq_get(dev, seqno); 249 i915_trace_irq_get(ring, seqno);
138 ), 250 ),
139 251
140 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) 252 TP_printk("dev=%u, ring=%u, seqno=%u",
253 __entry->dev, __entry->ring, __entry->seqno)
141); 254);
142 255
143TRACE_EVENT(i915_gem_request_flush, 256TRACE_EVENT(i915_gem_ring_flush,
144 257 TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush),
145 TP_PROTO(struct drm_device *dev, u32 seqno, 258 TP_ARGS(ring, invalidate, flush),
146 u32 flush_domains, u32 invalidate_domains),
147
148 TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
149 259
150 TP_STRUCT__entry( 260 TP_STRUCT__entry(
151 __field(u32, dev) 261 __field(u32, dev)
152 __field(u32, seqno) 262 __field(u32, ring)
153 __field(u32, flush_domains) 263 __field(u32, invalidate)
154 __field(u32, invalidate_domains) 264 __field(u32, flush)
155 ), 265 ),
156 266
157 TP_fast_assign( 267 TP_fast_assign(
158 __entry->dev = dev->primary->index; 268 __entry->dev = ring->dev->primary->index;
159 __entry->seqno = seqno; 269 __entry->ring = ring->id;
160 __entry->flush_domains = flush_domains; 270 __entry->invalidate = invalidate;
161 __entry->invalidate_domains = invalidate_domains; 271 __entry->flush = flush;
162 ), 272 ),
163 273
164 TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x", 274 TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
165 __entry->dev, __entry->seqno, 275 __entry->dev, __entry->ring,
166 __entry->flush_domains, __entry->invalidate_domains) 276 __entry->invalidate, __entry->flush)
167); 277);
168 278
169DECLARE_EVENT_CLASS(i915_gem_request, 279DECLARE_EVENT_CLASS(i915_gem_request,
170 280 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
171 TP_PROTO(struct drm_device *dev, u32 seqno), 281 TP_ARGS(ring, seqno),
172
173 TP_ARGS(dev, seqno),
174 282
175 TP_STRUCT__entry( 283 TP_STRUCT__entry(
176 __field(u32, dev) 284 __field(u32, dev)
285 __field(u32, ring)
177 __field(u32, seqno) 286 __field(u32, seqno)
178 ), 287 ),
179 288
180 TP_fast_assign( 289 TP_fast_assign(
181 __entry->dev = dev->primary->index; 290 __entry->dev = ring->dev->primary->index;
291 __entry->ring = ring->id;
182 __entry->seqno = seqno; 292 __entry->seqno = seqno;
183 ), 293 ),
184 294
185 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) 295 TP_printk("dev=%u, ring=%u, seqno=%u",
296 __entry->dev, __entry->ring, __entry->seqno)
186); 297);
187 298
188DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, 299DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
189 300 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
190 TP_PROTO(struct drm_device *dev, u32 seqno), 301 TP_ARGS(ring, seqno)
302);
191 303
192 TP_ARGS(dev, seqno) 304DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
305 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
306 TP_ARGS(ring, seqno)
193); 307);
194 308
195DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, 309DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
196 310 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
197 TP_PROTO(struct drm_device *dev, u32 seqno), 311 TP_ARGS(ring, seqno)
198
199 TP_ARGS(dev, seqno)
200); 312);
201 313
202DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin, 314DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
203 315 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
204 TP_PROTO(struct drm_device *dev, u32 seqno), 316 TP_ARGS(ring, seqno)
205
206 TP_ARGS(dev, seqno)
207); 317);
208 318
209DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, 319DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
210 320 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
211 TP_PROTO(struct drm_device *dev, u32 seqno), 321 TP_ARGS(ring, seqno)
212
213 TP_ARGS(dev, seqno)
214); 322);
215 323
216DECLARE_EVENT_CLASS(i915_ring, 324DECLARE_EVENT_CLASS(i915_ring,
217 325 TP_PROTO(struct intel_ring_buffer *ring),
218 TP_PROTO(struct drm_device *dev), 326 TP_ARGS(ring),
219
220 TP_ARGS(dev),
221 327
222 TP_STRUCT__entry( 328 TP_STRUCT__entry(
223 __field(u32, dev) 329 __field(u32, dev)
330 __field(u32, ring)
224 ), 331 ),
225 332
226 TP_fast_assign( 333 TP_fast_assign(
227 __entry->dev = dev->primary->index; 334 __entry->dev = ring->dev->primary->index;
335 __entry->ring = ring->id;
228 ), 336 ),
229 337
230 TP_printk("dev=%u", __entry->dev) 338 TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring)
231); 339);
232 340
233DEFINE_EVENT(i915_ring, i915_ring_wait_begin, 341DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
234 342 TP_PROTO(struct intel_ring_buffer *ring),
235 TP_PROTO(struct drm_device *dev), 343 TP_ARGS(ring)
236
237 TP_ARGS(dev)
238); 344);
239 345
240DEFINE_EVENT(i915_ring, i915_ring_wait_end, 346DEFINE_EVENT(i915_ring, i915_ring_wait_end,
241 347 TP_PROTO(struct intel_ring_buffer *ring),
242 TP_PROTO(struct drm_device *dev), 348 TP_ARGS(ring)
243
244 TP_ARGS(dev)
245); 349);
246 350
247TRACE_EVENT(i915_flip_request, 351TRACE_EVENT(i915_flip_request,
@@ -281,26 +385,29 @@ TRACE_EVENT(i915_flip_complete,
281); 385);
282 386
283TRACE_EVENT(i915_reg_rw, 387TRACE_EVENT(i915_reg_rw,
284 TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len), 388 TP_PROTO(bool write, u32 reg, u64 val, int len),
285 389
286 TP_ARGS(cmd, reg, val, len), 390 TP_ARGS(write, reg, val, len),
287 391
288 TP_STRUCT__entry( 392 TP_STRUCT__entry(
289 __field(int, cmd) 393 __field(u64, val)
290 __field(uint32_t, reg) 394 __field(u32, reg)
291 __field(uint64_t, val) 395 __field(u16, write)
292 __field(int, len) 396 __field(u16, len)
293 ), 397 ),
294 398
295 TP_fast_assign( 399 TP_fast_assign(
296 __entry->cmd = cmd; 400 __entry->val = (u64)val;
297 __entry->reg = reg; 401 __entry->reg = reg;
298 __entry->val = (uint64_t)val; 402 __entry->write = write;
299 __entry->len = len; 403 __entry->len = len;
300 ), 404 ),
301 405
302 TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d", 406 TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
303 __entry->cmd, __entry->reg, __entry->val, __entry->len) 407 __entry->write ? "write" : "read",
408 __entry->reg, __entry->len,
409 (u32)(__entry->val & 0xffffffff),
410 (u32)(__entry->val >> 32))
304); 411);
305 412
306#endif /* _I915_TRACE_H_ */ 413#endif /* _I915_TRACE_H_ */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 0b44956c336b..fb5b4d426ae0 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -226,29 +226,49 @@ static void
226parse_sdvo_panel_data(struct drm_i915_private *dev_priv, 226parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
227 struct bdb_header *bdb) 227 struct bdb_header *bdb)
228{ 228{
229 struct bdb_sdvo_lvds_options *sdvo_lvds_options;
230 struct lvds_dvo_timing *dvo_timing; 229 struct lvds_dvo_timing *dvo_timing;
231 struct drm_display_mode *panel_fixed_mode; 230 struct drm_display_mode *panel_fixed_mode;
231 int index;
232 232
233 sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); 233 index = i915_vbt_sdvo_panel_type;
234 if (!sdvo_lvds_options) 234 if (index == -1) {
235 return; 235 struct bdb_sdvo_lvds_options *sdvo_lvds_options;
236
237 sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
238 if (!sdvo_lvds_options)
239 return;
240
241 index = sdvo_lvds_options->panel_type;
242 }
236 243
237 dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS); 244 dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
238 if (!dvo_timing) 245 if (!dvo_timing)
239 return; 246 return;
240 247
241 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); 248 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
242
243 if (!panel_fixed_mode) 249 if (!panel_fixed_mode)
244 return; 250 return;
245 251
246 fill_detail_timing_data(panel_fixed_mode, 252 fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
247 dvo_timing + sdvo_lvds_options->panel_type);
248 253
249 dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode; 254 dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
250 255
251 return; 256 DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
257 drm_mode_debug_printmodeline(panel_fixed_mode);
258}
259
260static int intel_bios_ssc_frequency(struct drm_device *dev,
261 bool alternate)
262{
263 switch (INTEL_INFO(dev)->gen) {
264 case 2:
265 return alternate ? 66 : 48;
266 case 3:
267 case 4:
268 return alternate ? 100 : 96;
269 default:
270 return alternate ? 100 : 120;
271 }
252} 272}
253 273
254static void 274static void
@@ -263,13 +283,8 @@ parse_general_features(struct drm_i915_private *dev_priv,
263 dev_priv->int_tv_support = general->int_tv_support; 283 dev_priv->int_tv_support = general->int_tv_support;
264 dev_priv->int_crt_support = general->int_crt_support; 284 dev_priv->int_crt_support = general->int_crt_support;
265 dev_priv->lvds_use_ssc = general->enable_ssc; 285 dev_priv->lvds_use_ssc = general->enable_ssc;
266 286 dev_priv->lvds_ssc_freq =
267 if (IS_I85X(dev)) 287 intel_bios_ssc_frequency(dev, general->ssc_freq);
268 dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
269 else if (IS_GEN5(dev) || IS_GEN6(dev))
270 dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120;
271 else
272 dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
273 } 288 }
274} 289}
275 290
@@ -553,6 +568,8 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
553static void 568static void
554init_vbt_defaults(struct drm_i915_private *dev_priv) 569init_vbt_defaults(struct drm_i915_private *dev_priv)
555{ 570{
571 struct drm_device *dev = dev_priv->dev;
572
556 dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC; 573 dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
557 574
558 /* LFP panel data */ 575 /* LFP panel data */
@@ -565,7 +582,11 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
565 /* general features */ 582 /* general features */
566 dev_priv->int_tv_support = 1; 583 dev_priv->int_tv_support = 1;
567 dev_priv->int_crt_support = 1; 584 dev_priv->int_crt_support = 1;
568 dev_priv->lvds_use_ssc = 0; 585
586 /* Default to using SSC */
587 dev_priv->lvds_use_ssc = 1;
588 dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
589 DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
569 590
570 /* eDP data */ 591 /* eDP data */
571 dev_priv->edp.bpp = 18; 592 dev_priv->edp.bpp = 18;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 8a77ff4a7237..8342259f3160 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -129,10 +129,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
129 u32 adpa, dpll_md; 129 u32 adpa, dpll_md;
130 u32 adpa_reg; 130 u32 adpa_reg;
131 131
132 if (intel_crtc->pipe == 0) 132 dpll_md_reg = DPLL_MD(intel_crtc->pipe);
133 dpll_md_reg = DPLL_A_MD;
134 else
135 dpll_md_reg = DPLL_B_MD;
136 133
137 if (HAS_PCH_SPLIT(dev)) 134 if (HAS_PCH_SPLIT(dev))
138 adpa_reg = PCH_ADPA; 135 adpa_reg = PCH_ADPA;
@@ -160,17 +157,16 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
160 adpa |= PORT_TRANS_A_SEL_CPT; 157 adpa |= PORT_TRANS_A_SEL_CPT;
161 else 158 else
162 adpa |= ADPA_PIPE_A_SELECT; 159 adpa |= ADPA_PIPE_A_SELECT;
163 if (!HAS_PCH_SPLIT(dev))
164 I915_WRITE(BCLRPAT_A, 0);
165 } else { 160 } else {
166 if (HAS_PCH_CPT(dev)) 161 if (HAS_PCH_CPT(dev))
167 adpa |= PORT_TRANS_B_SEL_CPT; 162 adpa |= PORT_TRANS_B_SEL_CPT;
168 else 163 else
169 adpa |= ADPA_PIPE_B_SELECT; 164 adpa |= ADPA_PIPE_B_SELECT;
170 if (!HAS_PCH_SPLIT(dev))
171 I915_WRITE(BCLRPAT_B, 0);
172 } 165 }
173 166
167 if (!HAS_PCH_SPLIT(dev))
168 I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
169
174 I915_WRITE(adpa_reg, adpa); 170 I915_WRITE(adpa_reg, adpa);
175} 171}
176 172
@@ -353,21 +349,12 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt)
353 349
354 DRM_DEBUG_KMS("starting load-detect on CRT\n"); 350 DRM_DEBUG_KMS("starting load-detect on CRT\n");
355 351
356 if (pipe == 0) { 352 bclrpat_reg = BCLRPAT(pipe);
357 bclrpat_reg = BCLRPAT_A; 353 vtotal_reg = VTOTAL(pipe);
358 vtotal_reg = VTOTAL_A; 354 vblank_reg = VBLANK(pipe);
359 vblank_reg = VBLANK_A; 355 vsync_reg = VSYNC(pipe);
360 vsync_reg = VSYNC_A; 356 pipeconf_reg = PIPECONF(pipe);
361 pipeconf_reg = PIPEACONF; 357 pipe_dsl_reg = PIPEDSL(pipe);
362 pipe_dsl_reg = PIPEADSL;
363 } else {
364 bclrpat_reg = BCLRPAT_B;
365 vtotal_reg = VTOTAL_B;
366 vblank_reg = VBLANK_B;
367 vsync_reg = VSYNC_B;
368 pipeconf_reg = PIPEBCONF;
369 pipe_dsl_reg = PIPEBDSL;
370 }
371 358
372 save_bclrpat = I915_READ(bclrpat_reg); 359 save_bclrpat = I915_READ(bclrpat_reg);
373 save_vtotal = I915_READ(vtotal_reg); 360 save_vtotal = I915_READ(vtotal_reg);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7e42aa586504..3106c0dc8389 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -989,7 +989,7 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
989void intel_wait_for_vblank(struct drm_device *dev, int pipe) 989void intel_wait_for_vblank(struct drm_device *dev, int pipe)
990{ 990{
991 struct drm_i915_private *dev_priv = dev->dev_private; 991 struct drm_i915_private *dev_priv = dev->dev_private;
992 int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT); 992 int pipestat_reg = PIPESTAT(pipe);
993 993
994 /* Clear existing vblank status. Note this will clear any other 994 /* Clear existing vblank status. Note this will clear any other
995 * sticky status fields as well. 995 * sticky status fields as well.
@@ -1058,6 +1058,612 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1058 } 1058 }
1059} 1059}
1060 1060
1061static const char *state_string(bool enabled)
1062{
1063 return enabled ? "on" : "off";
1064}
1065
1066/* Only for pre-ILK configs */
1067static void assert_pll(struct drm_i915_private *dev_priv,
1068 enum pipe pipe, bool state)
1069{
1070 int reg;
1071 u32 val;
1072 bool cur_state;
1073
1074 reg = DPLL(pipe);
1075 val = I915_READ(reg);
1076 cur_state = !!(val & DPLL_VCO_ENABLE);
1077 WARN(cur_state != state,
1078 "PLL state assertion failure (expected %s, current %s)\n",
1079 state_string(state), state_string(cur_state));
1080}
1081#define assert_pll_enabled(d, p) assert_pll(d, p, true)
1082#define assert_pll_disabled(d, p) assert_pll(d, p, false)
1083
1084/* For ILK+ */
1085static void assert_pch_pll(struct drm_i915_private *dev_priv,
1086 enum pipe pipe, bool state)
1087{
1088 int reg;
1089 u32 val;
1090 bool cur_state;
1091
1092 reg = PCH_DPLL(pipe);
1093 val = I915_READ(reg);
1094 cur_state = !!(val & DPLL_VCO_ENABLE);
1095 WARN(cur_state != state,
1096 "PCH PLL state assertion failure (expected %s, current %s)\n",
1097 state_string(state), state_string(cur_state));
1098}
1099#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
1100#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
1101
1102static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1103 enum pipe pipe, bool state)
1104{
1105 int reg;
1106 u32 val;
1107 bool cur_state;
1108
1109 reg = FDI_TX_CTL(pipe);
1110 val = I915_READ(reg);
1111 cur_state = !!(val & FDI_TX_ENABLE);
1112 WARN(cur_state != state,
1113 "FDI TX state assertion failure (expected %s, current %s)\n",
1114 state_string(state), state_string(cur_state));
1115}
1116#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1117#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1118
1119static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1120 enum pipe pipe, bool state)
1121{
1122 int reg;
1123 u32 val;
1124 bool cur_state;
1125
1126 reg = FDI_RX_CTL(pipe);
1127 val = I915_READ(reg);
1128 cur_state = !!(val & FDI_RX_ENABLE);
1129 WARN(cur_state != state,
1130 "FDI RX state assertion failure (expected %s, current %s)\n",
1131 state_string(state), state_string(cur_state));
1132}
1133#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1134#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1135
1136static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1137 enum pipe pipe)
1138{
1139 int reg;
1140 u32 val;
1141
1142 /* ILK FDI PLL is always enabled */
1143 if (dev_priv->info->gen == 5)
1144 return;
1145
1146 reg = FDI_TX_CTL(pipe);
1147 val = I915_READ(reg);
1148 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1149}
1150
1151static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
1152 enum pipe pipe)
1153{
1154 int reg;
1155 u32 val;
1156
1157 reg = FDI_RX_CTL(pipe);
1158 val = I915_READ(reg);
1159 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
1160}
1161
1162static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1163 enum pipe pipe)
1164{
1165 int pp_reg, lvds_reg;
1166 u32 val;
1167 enum pipe panel_pipe = PIPE_A;
1168 bool locked = locked;
1169
1170 if (HAS_PCH_SPLIT(dev_priv->dev)) {
1171 pp_reg = PCH_PP_CONTROL;
1172 lvds_reg = PCH_LVDS;
1173 } else {
1174 pp_reg = PP_CONTROL;
1175 lvds_reg = LVDS;
1176 }
1177
1178 val = I915_READ(pp_reg);
1179 if (!(val & PANEL_POWER_ON) ||
1180 ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1181 locked = false;
1182
1183 if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1184 panel_pipe = PIPE_B;
1185
1186 WARN(panel_pipe == pipe && locked,
1187 "panel assertion failure, pipe %c regs locked\n",
1188 pipe_name(pipe));
1189}
1190
1191static void assert_pipe(struct drm_i915_private *dev_priv,
1192 enum pipe pipe, bool state)
1193{
1194 int reg;
1195 u32 val;
1196 bool cur_state;
1197
1198 reg = PIPECONF(pipe);
1199 val = I915_READ(reg);
1200 cur_state = !!(val & PIPECONF_ENABLE);
1201 WARN(cur_state != state,
1202 "pipe %c assertion failure (expected %s, current %s)\n",
1203 pipe_name(pipe), state_string(state), state_string(cur_state));
1204}
1205#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
1206#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
1207
1208static void assert_plane_enabled(struct drm_i915_private *dev_priv,
1209 enum plane plane)
1210{
1211 int reg;
1212 u32 val;
1213
1214 reg = DSPCNTR(plane);
1215 val = I915_READ(reg);
1216 WARN(!(val & DISPLAY_PLANE_ENABLE),
1217 "plane %c assertion failure, should be active but is disabled\n",
1218 plane_name(plane));
1219}
1220
1221static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1222 enum pipe pipe)
1223{
1224 int reg, i;
1225 u32 val;
1226 int cur_pipe;
1227
1228 /* Planes are fixed to pipes on ILK+ */
1229 if (HAS_PCH_SPLIT(dev_priv->dev))
1230 return;
1231
1232 /* Need to check both planes against the pipe */
1233 for (i = 0; i < 2; i++) {
1234 reg = DSPCNTR(i);
1235 val = I915_READ(reg);
1236 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1237 DISPPLANE_SEL_PIPE_SHIFT;
1238 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1239 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1240 plane_name(i), pipe_name(pipe));
1241 }
1242}
1243
1244static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1245{
1246 u32 val;
1247 bool enabled;
1248
1249 val = I915_READ(PCH_DREF_CONTROL);
1250 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1251 DREF_SUPERSPREAD_SOURCE_MASK));
1252 WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1253}
1254
1255static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1256 enum pipe pipe)
1257{
1258 int reg;
1259 u32 val;
1260 bool enabled;
1261
1262 reg = TRANSCONF(pipe);
1263 val = I915_READ(reg);
1264 enabled = !!(val & TRANS_ENABLE);
1265 WARN(enabled,
1266 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1267 pipe_name(pipe));
1268}
1269
1270static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1271 enum pipe pipe, int reg)
1272{
1273 u32 val = I915_READ(reg);
1274 WARN(DP_PIPE_ENABLED(val, pipe),
1275 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1276 reg, pipe_name(pipe));
1277}
1278
1279static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1280 enum pipe pipe, int reg)
1281{
1282 u32 val = I915_READ(reg);
1283 WARN(HDMI_PIPE_ENABLED(val, pipe),
1284 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1285 reg, pipe_name(pipe));
1286}
1287
1288static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1289 enum pipe pipe)
1290{
1291 int reg;
1292 u32 val;
1293
1294 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B);
1295 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C);
1296 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D);
1297
1298 reg = PCH_ADPA;
1299 val = I915_READ(reg);
1300 WARN(ADPA_PIPE_ENABLED(val, pipe),
1301 "PCH VGA enabled on transcoder %c, should be disabled\n",
1302 pipe_name(pipe));
1303
1304 reg = PCH_LVDS;
1305 val = I915_READ(reg);
1306 WARN(LVDS_PIPE_ENABLED(val, pipe),
1307 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1308 pipe_name(pipe));
1309
1310 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1311 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1312 assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1313}
1314
1315/**
1316 * intel_enable_pll - enable a PLL
1317 * @dev_priv: i915 private structure
1318 * @pipe: pipe PLL to enable
1319 *
1320 * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
1321 * make sure the PLL reg is writable first though, since the panel write
1322 * protect mechanism may be enabled.
1323 *
1324 * Note! This is for pre-ILK only.
1325 */
1326static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1327{
1328 int reg;
1329 u32 val;
1330
1331 /* No really, not for ILK+ */
1332 BUG_ON(dev_priv->info->gen >= 5);
1333
1334 /* PLL is protected by panel, make sure we can write it */
1335 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1336 assert_panel_unlocked(dev_priv, pipe);
1337
1338 reg = DPLL(pipe);
1339 val = I915_READ(reg);
1340 val |= DPLL_VCO_ENABLE;
1341
1342 /* We do this three times for luck */
1343 I915_WRITE(reg, val);
1344 POSTING_READ(reg);
1345 udelay(150); /* wait for warmup */
1346 I915_WRITE(reg, val);
1347 POSTING_READ(reg);
1348 udelay(150); /* wait for warmup */
1349 I915_WRITE(reg, val);
1350 POSTING_READ(reg);
1351 udelay(150); /* wait for warmup */
1352}
1353
1354/**
1355 * intel_disable_pll - disable a PLL
1356 * @dev_priv: i915 private structure
1357 * @pipe: pipe PLL to disable
1358 *
1359 * Disable the PLL for @pipe, making sure the pipe is off first.
1360 *
1361 * Note! This is for pre-ILK only.
1362 */
1363static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1364{
1365 int reg;
1366 u32 val;
1367
1368 /* Don't disable pipe A or pipe A PLLs if needed */
1369 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1370 return;
1371
1372 /* Make sure the pipe isn't still relying on us */
1373 assert_pipe_disabled(dev_priv, pipe);
1374
1375 reg = DPLL(pipe);
1376 val = I915_READ(reg);
1377 val &= ~DPLL_VCO_ENABLE;
1378 I915_WRITE(reg, val);
1379 POSTING_READ(reg);
1380}
1381
1382/**
1383 * intel_enable_pch_pll - enable PCH PLL
1384 * @dev_priv: i915 private structure
1385 * @pipe: pipe PLL to enable
1386 *
1387 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1388 * drives the transcoder clock.
1389 */
1390static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1391 enum pipe pipe)
1392{
1393 int reg;
1394 u32 val;
1395
1396 /* PCH only available on ILK+ */
1397 BUG_ON(dev_priv->info->gen < 5);
1398
1399 /* PCH refclock must be enabled first */
1400 assert_pch_refclk_enabled(dev_priv);
1401
1402 reg = PCH_DPLL(pipe);
1403 val = I915_READ(reg);
1404 val |= DPLL_VCO_ENABLE;
1405 I915_WRITE(reg, val);
1406 POSTING_READ(reg);
1407 udelay(200);
1408}
1409
1410static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1411 enum pipe pipe)
1412{
1413 int reg;
1414 u32 val;
1415
1416 /* PCH only available on ILK+ */
1417 BUG_ON(dev_priv->info->gen < 5);
1418
1419 /* Make sure transcoder isn't still depending on us */
1420 assert_transcoder_disabled(dev_priv, pipe);
1421
1422 reg = PCH_DPLL(pipe);
1423 val = I915_READ(reg);
1424 val &= ~DPLL_VCO_ENABLE;
1425 I915_WRITE(reg, val);
1426 POSTING_READ(reg);
1427 udelay(200);
1428}
1429
1430static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1431 enum pipe pipe)
1432{
1433 int reg;
1434 u32 val;
1435
1436 /* PCH only available on ILK+ */
1437 BUG_ON(dev_priv->info->gen < 5);
1438
1439 /* Make sure PCH DPLL is enabled */
1440 assert_pch_pll_enabled(dev_priv, pipe);
1441
1442 /* FDI must be feeding us bits for PCH ports */
1443 assert_fdi_tx_enabled(dev_priv, pipe);
1444 assert_fdi_rx_enabled(dev_priv, pipe);
1445
1446 reg = TRANSCONF(pipe);
1447 val = I915_READ(reg);
1448 /*
1449 * make the BPC in transcoder be consistent with
1450 * that in pipeconf reg.
1451 */
1452 val &= ~PIPE_BPC_MASK;
1453 val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
1454 I915_WRITE(reg, val | TRANS_ENABLE);
1455 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1456 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1457}
1458
1459static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1460 enum pipe pipe)
1461{
1462 int reg;
1463 u32 val;
1464
1465 /* FDI relies on the transcoder */
1466 assert_fdi_tx_disabled(dev_priv, pipe);
1467 assert_fdi_rx_disabled(dev_priv, pipe);
1468
1469 /* Ports must be off as well */
1470 assert_pch_ports_disabled(dev_priv, pipe);
1471
1472 reg = TRANSCONF(pipe);
1473 val = I915_READ(reg);
1474 val &= ~TRANS_ENABLE;
1475 I915_WRITE(reg, val);
1476 /* wait for PCH transcoder off, transcoder state */
1477 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1478 DRM_ERROR("failed to disable transcoder\n");
1479}
1480
1481/**
1482 * intel_enable_pipe - enable a pipe, asserting requirements
1483 * @dev_priv: i915 private structure
1484 * @pipe: pipe to enable
1485 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1486 *
1487 * Enable @pipe, making sure that various hardware specific requirements
1488 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1489 *
1490 * @pipe should be %PIPE_A or %PIPE_B.
1491 *
1492 * Will wait until the pipe is actually running (i.e. first vblank) before
1493 * returning.
1494 */
1495static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1496 bool pch_port)
1497{
1498 int reg;
1499 u32 val;
1500
1501 /*
1502 * A pipe without a PLL won't actually be able to drive bits from
1503 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1504 * need the check.
1505 */
1506 if (!HAS_PCH_SPLIT(dev_priv->dev))
1507 assert_pll_enabled(dev_priv, pipe);
1508 else {
1509 if (pch_port) {
1510 /* if driving the PCH, we need FDI enabled */
1511 assert_fdi_rx_pll_enabled(dev_priv, pipe);
1512 assert_fdi_tx_pll_enabled(dev_priv, pipe);
1513 }
1514 /* FIXME: assert CPU port conditions for SNB+ */
1515 }
1516
1517 reg = PIPECONF(pipe);
1518 val = I915_READ(reg);
1519 val |= PIPECONF_ENABLE;
1520 I915_WRITE(reg, val);
1521 POSTING_READ(reg);
1522 intel_wait_for_vblank(dev_priv->dev, pipe);
1523}
1524
1525/**
1526 * intel_disable_pipe - disable a pipe, asserting requirements
1527 * @dev_priv: i915 private structure
1528 * @pipe: pipe to disable
1529 *
1530 * Disable @pipe, making sure that various hardware specific requirements
1531 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1532 *
1533 * @pipe should be %PIPE_A or %PIPE_B.
1534 *
1535 * Will wait until the pipe has shut down before returning.
1536 */
1537static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1538 enum pipe pipe)
1539{
1540 int reg;
1541 u32 val;
1542
1543 /*
1544 * Make sure planes won't keep trying to pump pixels to us,
1545 * or we might hang the display.
1546 */
1547 assert_planes_disabled(dev_priv, pipe);
1548
1549 /* Don't disable pipe A or pipe A PLLs if needed */
1550 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1551 return;
1552
1553 reg = PIPECONF(pipe);
1554 val = I915_READ(reg);
1555 val &= ~PIPECONF_ENABLE;
1556 I915_WRITE(reg, val);
1557 POSTING_READ(reg);
1558 intel_wait_for_pipe_off(dev_priv->dev, pipe);
1559}
1560
1561/**
1562 * intel_enable_plane - enable a display plane on a given pipe
1563 * @dev_priv: i915 private structure
1564 * @plane: plane to enable
1565 * @pipe: pipe being fed
1566 *
1567 * Enable @plane on @pipe, making sure that @pipe is running first.
1568 */
1569static void intel_enable_plane(struct drm_i915_private *dev_priv,
1570 enum plane plane, enum pipe pipe)
1571{
1572 int reg;
1573 u32 val;
1574
1575 /* If the pipe isn't enabled, we can't pump pixels and may hang */
1576 assert_pipe_enabled(dev_priv, pipe);
1577
1578 reg = DSPCNTR(plane);
1579 val = I915_READ(reg);
1580 val |= DISPLAY_PLANE_ENABLE;
1581 I915_WRITE(reg, val);
1582 POSTING_READ(reg);
1583 intel_wait_for_vblank(dev_priv->dev, pipe);
1584}
1585
1586/*
1587 * Plane regs are double buffered, going from enabled->disabled needs a
1588 * trigger in order to latch. The display address reg provides this.
1589 */
1590static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1591 enum plane plane)
1592{
1593 u32 reg = DSPADDR(plane);
1594 I915_WRITE(reg, I915_READ(reg));
1595}
1596
1597/**
1598 * intel_disable_plane - disable a display plane
1599 * @dev_priv: i915 private structure
1600 * @plane: plane to disable
1601 * @pipe: pipe consuming the data
1602 *
1603 * Disable @plane; should be an independent operation.
1604 */
1605static void intel_disable_plane(struct drm_i915_private *dev_priv,
1606 enum plane plane, enum pipe pipe)
1607{
1608 int reg;
1609 u32 val;
1610
1611 reg = DSPCNTR(plane);
1612 val = I915_READ(reg);
1613 val &= ~DISPLAY_PLANE_ENABLE;
1614 I915_WRITE(reg, val);
1615 POSTING_READ(reg);
1616 intel_flush_display_plane(dev_priv, plane);
1617 intel_wait_for_vblank(dev_priv->dev, pipe);
1618}
1619
1620static void disable_pch_dp(struct drm_i915_private *dev_priv,
1621 enum pipe pipe, int reg)
1622{
1623 u32 val = I915_READ(reg);
1624 if (DP_PIPE_ENABLED(val, pipe))
1625 I915_WRITE(reg, val & ~DP_PORT_EN);
1626}
1627
1628static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1629 enum pipe pipe, int reg)
1630{
1631 u32 val = I915_READ(reg);
1632 if (HDMI_PIPE_ENABLED(val, pipe))
1633 I915_WRITE(reg, val & ~PORT_ENABLE);
1634}
1635
1636/* Disable any ports connected to this transcoder */
1637static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1638 enum pipe pipe)
1639{
1640 u32 reg, val;
1641
1642 val = I915_READ(PCH_PP_CONTROL);
1643 I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1644
1645 disable_pch_dp(dev_priv, pipe, PCH_DP_B);
1646 disable_pch_dp(dev_priv, pipe, PCH_DP_C);
1647 disable_pch_dp(dev_priv, pipe, PCH_DP_D);
1648
1649 reg = PCH_ADPA;
1650 val = I915_READ(reg);
1651 if (ADPA_PIPE_ENABLED(val, pipe))
1652 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1653
1654 reg = PCH_LVDS;
1655 val = I915_READ(reg);
1656 if (LVDS_PIPE_ENABLED(val, pipe)) {
1657 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1658 POSTING_READ(reg);
1659 udelay(100);
1660 }
1661
1662 disable_pch_hdmi(dev_priv, pipe, HDMIB);
1663 disable_pch_hdmi(dev_priv, pipe, HDMIC);
1664 disable_pch_hdmi(dev_priv, pipe, HDMID);
1665}
1666
1061static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1667static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1062{ 1668{
1063 struct drm_device *dev = crtc->dev; 1669 struct drm_device *dev = crtc->dev;
@@ -1219,7 +1825,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
1219 u32 blt_ecoskpd; 1825 u32 blt_ecoskpd;
1220 1826
1221 /* Make sure blitter notifies FBC of writes */ 1827 /* Make sure blitter notifies FBC of writes */
1222 __gen6_force_wake_get(dev_priv); 1828 __gen6_gt_force_wake_get(dev_priv);
1223 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); 1829 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1224 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << 1830 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1225 GEN6_BLITTER_LOCK_SHIFT; 1831 GEN6_BLITTER_LOCK_SHIFT;
@@ -1230,7 +1836,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
1230 GEN6_BLITTER_LOCK_SHIFT); 1836 GEN6_BLITTER_LOCK_SHIFT);
1231 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 1837 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1232 POSTING_READ(GEN6_BLITTER_ECOSKPD); 1838 POSTING_READ(GEN6_BLITTER_ECOSKPD);
1233 __gen6_force_wake_put(dev_priv); 1839 __gen6_gt_force_wake_put(dev_priv);
1234} 1840}
1235 1841
1236static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1842static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
@@ -1390,7 +1996,7 @@ static void intel_update_fbc(struct drm_device *dev)
1390 * - going to an unsupported config (interlace, pixel multiply, etc.) 1996 * - going to an unsupported config (interlace, pixel multiply, etc.)
1391 */ 1997 */
1392 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { 1998 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1393 if (tmp_crtc->enabled) { 1999 if (tmp_crtc->enabled && tmp_crtc->fb) {
1394 if (crtc) { 2000 if (crtc) {
1395 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); 2001 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1396 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; 2002 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
@@ -1461,6 +2067,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1461 struct drm_i915_gem_object *obj, 2067 struct drm_i915_gem_object *obj,
1462 struct intel_ring_buffer *pipelined) 2068 struct intel_ring_buffer *pipelined)
1463{ 2069{
2070 struct drm_i915_private *dev_priv = dev->dev_private;
1464 u32 alignment; 2071 u32 alignment;
1465 int ret; 2072 int ret;
1466 2073
@@ -1485,9 +2092,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1485 BUG(); 2092 BUG();
1486 } 2093 }
1487 2094
2095 dev_priv->mm.interruptible = false;
1488 ret = i915_gem_object_pin(obj, alignment, true); 2096 ret = i915_gem_object_pin(obj, alignment, true);
1489 if (ret) 2097 if (ret)
1490 return ret; 2098 goto err_interruptible;
1491 2099
1492 ret = i915_gem_object_set_to_display_plane(obj, pipelined); 2100 ret = i915_gem_object_set_to_display_plane(obj, pipelined);
1493 if (ret) 2101 if (ret)
@@ -1499,15 +2107,18 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1499 * a fence as the cost is not that onerous. 2107 * a fence as the cost is not that onerous.
1500 */ 2108 */
1501 if (obj->tiling_mode != I915_TILING_NONE) { 2109 if (obj->tiling_mode != I915_TILING_NONE) {
1502 ret = i915_gem_object_get_fence(obj, pipelined, false); 2110 ret = i915_gem_object_get_fence(obj, pipelined);
1503 if (ret) 2111 if (ret)
1504 goto err_unpin; 2112 goto err_unpin;
1505 } 2113 }
1506 2114
2115 dev_priv->mm.interruptible = true;
1507 return 0; 2116 return 0;
1508 2117
1509err_unpin: 2118err_unpin:
1510 i915_gem_object_unpin(obj); 2119 i915_gem_object_unpin(obj);
2120err_interruptible:
2121 dev_priv->mm.interruptible = true;
1511 return ret; 2122 return ret;
1512} 2123}
1513 2124
@@ -1630,19 +2241,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1630 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; 2241 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
1631 2242
1632 wait_event(dev_priv->pending_flip_queue, 2243 wait_event(dev_priv->pending_flip_queue,
2244 atomic_read(&dev_priv->mm.wedged) ||
1633 atomic_read(&obj->pending_flip) == 0); 2245 atomic_read(&obj->pending_flip) == 0);
1634 2246
1635 /* Big Hammer, we also need to ensure that any pending 2247 /* Big Hammer, we also need to ensure that any pending
1636 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 2248 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1637 * current scanout is retired before unpinning the old 2249 * current scanout is retired before unpinning the old
1638 * framebuffer. 2250 * framebuffer.
2251 *
2252 * This should only fail upon a hung GPU, in which case we
2253 * can safely continue.
1639 */ 2254 */
1640 ret = i915_gem_object_flush_gpu(obj, false); 2255 ret = i915_gem_object_flush_gpu(obj);
1641 if (ret) { 2256 (void) ret;
1642 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
1643 mutex_unlock(&dev->struct_mutex);
1644 return ret;
1645 }
1646 } 2257 }
1647 2258
1648 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, 2259 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -1753,8 +2364,13 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1753 struct drm_i915_private *dev_priv = dev->dev_private; 2364 struct drm_i915_private *dev_priv = dev->dev_private;
1754 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2365 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1755 int pipe = intel_crtc->pipe; 2366 int pipe = intel_crtc->pipe;
2367 int plane = intel_crtc->plane;
1756 u32 reg, temp, tries; 2368 u32 reg, temp, tries;
1757 2369
2370 /* FDI needs bits from pipe & plane first */
2371 assert_pipe_enabled(dev_priv, pipe);
2372 assert_plane_enabled(dev_priv, plane);
2373
1758 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2374 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1759 for train result */ 2375 for train result */
1760 reg = FDI_RX_IMR(pipe); 2376 reg = FDI_RX_IMR(pipe);
@@ -1784,7 +2400,11 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1784 udelay(150); 2400 udelay(150);
1785 2401
1786 /* Ironlake workaround, enable clock pointer after FDI enable*/ 2402 /* Ironlake workaround, enable clock pointer after FDI enable*/
1787 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE); 2403 if (HAS_PCH_IBX(dev)) {
2404 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2405 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2406 FDI_RX_PHASE_SYNC_POINTER_EN);
2407 }
1788 2408
1789 reg = FDI_RX_IIR(pipe); 2409 reg = FDI_RX_IIR(pipe);
1790 for (tries = 0; tries < 5; tries++) { 2410 for (tries = 0; tries < 5; tries++) {
@@ -1834,7 +2454,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1834 2454
1835} 2455}
1836 2456
1837static const int const snb_b_fdi_train_param [] = { 2457static const int snb_b_fdi_train_param [] = {
1838 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 2458 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
1839 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 2459 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
1840 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 2460 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
@@ -2003,12 +2623,60 @@ static void ironlake_fdi_enable(struct drm_crtc *crtc)
2003 } 2623 }
2004} 2624}
2005 2625
2006static void intel_flush_display_plane(struct drm_device *dev, 2626static void ironlake_fdi_disable(struct drm_crtc *crtc)
2007 int plane)
2008{ 2627{
2628 struct drm_device *dev = crtc->dev;
2009 struct drm_i915_private *dev_priv = dev->dev_private; 2629 struct drm_i915_private *dev_priv = dev->dev_private;
2010 u32 reg = DSPADDR(plane); 2630 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2011 I915_WRITE(reg, I915_READ(reg)); 2631 int pipe = intel_crtc->pipe;
2632 u32 reg, temp;
2633
2634 /* disable CPU FDI tx and PCH FDI rx */
2635 reg = FDI_TX_CTL(pipe);
2636 temp = I915_READ(reg);
2637 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2638 POSTING_READ(reg);
2639
2640 reg = FDI_RX_CTL(pipe);
2641 temp = I915_READ(reg);
2642 temp &= ~(0x7 << 16);
2643 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2644 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2645
2646 POSTING_READ(reg);
2647 udelay(100);
2648
2649 /* Ironlake workaround, disable clock pointer after downing FDI */
2650 if (HAS_PCH_IBX(dev)) {
2651 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2652 I915_WRITE(FDI_RX_CHICKEN(pipe),
2653 I915_READ(FDI_RX_CHICKEN(pipe) &
2654 ~FDI_RX_PHASE_SYNC_POINTER_EN));
2655 }
2656
2657 /* still set train pattern 1 */
2658 reg = FDI_TX_CTL(pipe);
2659 temp = I915_READ(reg);
2660 temp &= ~FDI_LINK_TRAIN_NONE;
2661 temp |= FDI_LINK_TRAIN_PATTERN_1;
2662 I915_WRITE(reg, temp);
2663
2664 reg = FDI_RX_CTL(pipe);
2665 temp = I915_READ(reg);
2666 if (HAS_PCH_CPT(dev)) {
2667 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2668 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2669 } else {
2670 temp &= ~FDI_LINK_TRAIN_NONE;
2671 temp |= FDI_LINK_TRAIN_PATTERN_1;
2672 }
2673 /* BPC in FDI rx is consistent with that in PIPECONF */
2674 temp &= ~(0x07 << 16);
2675 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2676 I915_WRITE(reg, temp);
2677
2678 POSTING_READ(reg);
2679 udelay(100);
2012} 2680}
2013 2681
2014/* 2682/*
@@ -2045,60 +2713,46 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2045 atomic_read(&obj->pending_flip) == 0); 2713 atomic_read(&obj->pending_flip) == 0);
2046} 2714}
2047 2715
2048static void ironlake_crtc_enable(struct drm_crtc *crtc) 2716static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2049{ 2717{
2050 struct drm_device *dev = crtc->dev; 2718 struct drm_device *dev = crtc->dev;
2051 struct drm_i915_private *dev_priv = dev->dev_private; 2719 struct drm_mode_config *mode_config = &dev->mode_config;
2052 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2720 struct intel_encoder *encoder;
2053 int pipe = intel_crtc->pipe;
2054 int plane = intel_crtc->plane;
2055 u32 reg, temp;
2056
2057 if (intel_crtc->active)
2058 return;
2059
2060 intel_crtc->active = true;
2061 intel_update_watermarks(dev);
2062
2063 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2064 temp = I915_READ(PCH_LVDS);
2065 if ((temp & LVDS_PORT_EN) == 0)
2066 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2067 }
2068 2721
2069 ironlake_fdi_enable(crtc); 2722 /*
2723 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2724 * must be driven by its own crtc; no sharing is possible.
2725 */
2726 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2727 if (encoder->base.crtc != crtc)
2728 continue;
2070 2729
2071 /* Enable panel fitting for LVDS */ 2730 switch (encoder->type) {
2072 if (dev_priv->pch_pf_size && 2731 case INTEL_OUTPUT_EDP:
2073 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { 2732 if (!intel_encoder_is_pch_edp(&encoder->base))
2074 /* Force use of hard-coded filter coefficients 2733 return false;
2075 * as some pre-programmed values are broken, 2734 continue;
2076 * e.g. x201. 2735 }
2077 */
2078 I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
2079 PF_ENABLE | PF_FILTER_MED_3x3);
2080 I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
2081 dev_priv->pch_pf_pos);
2082 I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
2083 dev_priv->pch_pf_size);
2084 } 2736 }
2085 2737
2086 /* Enable CPU pipe */ 2738 return true;
2087 reg = PIPECONF(pipe); 2739}
2088 temp = I915_READ(reg);
2089 if ((temp & PIPECONF_ENABLE) == 0) {
2090 I915_WRITE(reg, temp | PIPECONF_ENABLE);
2091 POSTING_READ(reg);
2092 intel_wait_for_vblank(dev, intel_crtc->pipe);
2093 }
2094 2740
2095 /* configure and enable CPU plane */ 2741/*
2096 reg = DSPCNTR(plane); 2742 * Enable PCH resources required for PCH ports:
2097 temp = I915_READ(reg); 2743 * - PCH PLLs
2098 if ((temp & DISPLAY_PLANE_ENABLE) == 0) { 2744 * - FDI training & RX/TX
2099 I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE); 2745 * - update transcoder timings
2100 intel_flush_display_plane(dev, plane); 2746 * - DP transcoding bits
2101 } 2747 * - transcoder
2748 */
2749static void ironlake_pch_enable(struct drm_crtc *crtc)
2750{
2751 struct drm_device *dev = crtc->dev;
2752 struct drm_i915_private *dev_priv = dev->dev_private;
2753 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2754 int pipe = intel_crtc->pipe;
2755 u32 reg, temp;
2102 2756
2103 /* For PCH output, training FDI link */ 2757 /* For PCH output, training FDI link */
2104 if (IS_GEN6(dev)) 2758 if (IS_GEN6(dev))
@@ -2106,14 +2760,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2106 else 2760 else
2107 ironlake_fdi_link_train(crtc); 2761 ironlake_fdi_link_train(crtc);
2108 2762
2109 /* enable PCH DPLL */ 2763 intel_enable_pch_pll(dev_priv, pipe);
2110 reg = PCH_DPLL(pipe);
2111 temp = I915_READ(reg);
2112 if ((temp & DPLL_VCO_ENABLE) == 0) {
2113 I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
2114 POSTING_READ(reg);
2115 udelay(200);
2116 }
2117 2764
2118 if (HAS_PCH_CPT(dev)) { 2765 if (HAS_PCH_CPT(dev)) {
2119 /* Be sure PCH DPLL SEL is set */ 2766 /* Be sure PCH DPLL SEL is set */
@@ -2125,7 +2772,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2125 I915_WRITE(PCH_DPLL_SEL, temp); 2772 I915_WRITE(PCH_DPLL_SEL, temp);
2126 } 2773 }
2127 2774
2128 /* set transcoder timing */ 2775 /* set transcoder timing, panel must allow it */
2776 assert_panel_unlocked(dev_priv, pipe);
2129 I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); 2777 I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2130 I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); 2778 I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2131 I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); 2779 I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
@@ -2172,18 +2820,55 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2172 I915_WRITE(reg, temp); 2820 I915_WRITE(reg, temp);
2173 } 2821 }
2174 2822
2175 /* enable PCH transcoder */ 2823 intel_enable_transcoder(dev_priv, pipe);
2176 reg = TRANSCONF(pipe); 2824}
2177 temp = I915_READ(reg); 2825
2178 /* 2826static void ironlake_crtc_enable(struct drm_crtc *crtc)
2179 * make the BPC in transcoder be consistent with 2827{
2180 * that in pipeconf reg. 2828 struct drm_device *dev = crtc->dev;
2181 */ 2829 struct drm_i915_private *dev_priv = dev->dev_private;
2182 temp &= ~PIPE_BPC_MASK; 2830 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2183 temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; 2831 int pipe = intel_crtc->pipe;
2184 I915_WRITE(reg, temp | TRANS_ENABLE); 2832 int plane = intel_crtc->plane;
2185 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 2833 u32 temp;
2186 DRM_ERROR("failed to enable transcoder %d\n", pipe); 2834 bool is_pch_port;
2835
2836 if (intel_crtc->active)
2837 return;
2838
2839 intel_crtc->active = true;
2840 intel_update_watermarks(dev);
2841
2842 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2843 temp = I915_READ(PCH_LVDS);
2844 if ((temp & LVDS_PORT_EN) == 0)
2845 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2846 }
2847
2848 is_pch_port = intel_crtc_driving_pch(crtc);
2849
2850 if (is_pch_port)
2851 ironlake_fdi_enable(crtc);
2852 else
2853 ironlake_fdi_disable(crtc);
2854
2855 /* Enable panel fitting for LVDS */
2856 if (dev_priv->pch_pf_size &&
2857 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
2858 /* Force use of hard-coded filter coefficients
2859 * as some pre-programmed values are broken,
2860 * e.g. x201.
2861 */
2862 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
2863 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
2864 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
2865 }
2866
2867 intel_enable_pipe(dev_priv, pipe, is_pch_port);
2868 intel_enable_plane(dev_priv, plane, pipe);
2869
2870 if (is_pch_port)
2871 ironlake_pch_enable(crtc);
2187 2872
2188 intel_crtc_load_lut(crtc); 2873 intel_crtc_load_lut(crtc);
2189 intel_update_fbc(dev); 2874 intel_update_fbc(dev);
@@ -2206,116 +2891,58 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
2206 drm_vblank_off(dev, pipe); 2891 drm_vblank_off(dev, pipe);
2207 intel_crtc_update_cursor(crtc, false); 2892 intel_crtc_update_cursor(crtc, false);
2208 2893
2209 /* Disable display plane */ 2894 intel_disable_plane(dev_priv, plane, pipe);
2210 reg = DSPCNTR(plane);
2211 temp = I915_READ(reg);
2212 if (temp & DISPLAY_PLANE_ENABLE) {
2213 I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
2214 intel_flush_display_plane(dev, plane);
2215 }
2216 2895
2217 if (dev_priv->cfb_plane == plane && 2896 if (dev_priv->cfb_plane == plane &&
2218 dev_priv->display.disable_fbc) 2897 dev_priv->display.disable_fbc)
2219 dev_priv->display.disable_fbc(dev); 2898 dev_priv->display.disable_fbc(dev);
2220 2899
2221 /* disable cpu pipe, disable after all planes disabled */ 2900 intel_disable_pipe(dev_priv, pipe);
2222 reg = PIPECONF(pipe);
2223 temp = I915_READ(reg);
2224 if (temp & PIPECONF_ENABLE) {
2225 I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
2226 POSTING_READ(reg);
2227 /* wait for cpu pipe off, pipe state */
2228 intel_wait_for_pipe_off(dev, intel_crtc->pipe);
2229 }
2230 2901
2231 /* Disable PF */ 2902 /* Disable PF */
2232 I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0); 2903 I915_WRITE(PF_CTL(pipe), 0);
2233 I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0); 2904 I915_WRITE(PF_WIN_SZ(pipe), 0);
2234
2235 /* disable CPU FDI tx and PCH FDI rx */
2236 reg = FDI_TX_CTL(pipe);
2237 temp = I915_READ(reg);
2238 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2239 POSTING_READ(reg);
2240
2241 reg = FDI_RX_CTL(pipe);
2242 temp = I915_READ(reg);
2243 temp &= ~(0x7 << 16);
2244 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2245 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2246
2247 POSTING_READ(reg);
2248 udelay(100);
2249
2250 /* Ironlake workaround, disable clock pointer after downing FDI */
2251 if (HAS_PCH_IBX(dev))
2252 I915_WRITE(FDI_RX_CHICKEN(pipe),
2253 I915_READ(FDI_RX_CHICKEN(pipe) &
2254 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
2255
2256 /* still set train pattern 1 */
2257 reg = FDI_TX_CTL(pipe);
2258 temp = I915_READ(reg);
2259 temp &= ~FDI_LINK_TRAIN_NONE;
2260 temp |= FDI_LINK_TRAIN_PATTERN_1;
2261 I915_WRITE(reg, temp);
2262
2263 reg = FDI_RX_CTL(pipe);
2264 temp = I915_READ(reg);
2265 if (HAS_PCH_CPT(dev)) {
2266 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2267 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2268 } else {
2269 temp &= ~FDI_LINK_TRAIN_NONE;
2270 temp |= FDI_LINK_TRAIN_PATTERN_1;
2271 }
2272 /* BPC in FDI rx is consistent with that in PIPECONF */
2273 temp &= ~(0x07 << 16);
2274 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2275 I915_WRITE(reg, temp);
2276 2905
2277 POSTING_READ(reg); 2906 ironlake_fdi_disable(crtc);
2278 udelay(100);
2279 2907
2280 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 2908 /* This is a horrible layering violation; we should be doing this in
2281 temp = I915_READ(PCH_LVDS); 2909 * the connector/encoder ->prepare instead, but we don't always have
2282 if (temp & LVDS_PORT_EN) { 2910 * enough information there about the config to know whether it will
2283 I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN); 2911 * actually be necessary or just cause undesired flicker.
2284 POSTING_READ(PCH_LVDS); 2912 */
2285 udelay(100); 2913 intel_disable_pch_ports(dev_priv, pipe);
2286 }
2287 }
2288 2914
2289 /* disable PCH transcoder */ 2915 intel_disable_transcoder(dev_priv, pipe);
2290 reg = TRANSCONF(plane);
2291 temp = I915_READ(reg);
2292 if (temp & TRANS_ENABLE) {
2293 I915_WRITE(reg, temp & ~TRANS_ENABLE);
2294 /* wait for PCH transcoder off, transcoder state */
2295 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
2296 DRM_ERROR("failed to disable transcoder\n");
2297 }
2298 2916
2299 if (HAS_PCH_CPT(dev)) { 2917 if (HAS_PCH_CPT(dev)) {
2300 /* disable TRANS_DP_CTL */ 2918 /* disable TRANS_DP_CTL */
2301 reg = TRANS_DP_CTL(pipe); 2919 reg = TRANS_DP_CTL(pipe);
2302 temp = I915_READ(reg); 2920 temp = I915_READ(reg);
2303 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); 2921 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
2922 temp |= TRANS_DP_PORT_SEL_NONE;
2304 I915_WRITE(reg, temp); 2923 I915_WRITE(reg, temp);
2305 2924
2306 /* disable DPLL_SEL */ 2925 /* disable DPLL_SEL */
2307 temp = I915_READ(PCH_DPLL_SEL); 2926 temp = I915_READ(PCH_DPLL_SEL);
2308 if (pipe == 0) 2927 switch (pipe) {
2309 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); 2928 case 0:
2310 else 2929 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2930 break;
2931 case 1:
2311 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); 2932 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2933 break;
2934 case 2:
2935 /* FIXME: manage transcoder PLLs? */
2936 temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
2937 break;
2938 default:
2939 BUG(); /* wtf */
2940 }
2312 I915_WRITE(PCH_DPLL_SEL, temp); 2941 I915_WRITE(PCH_DPLL_SEL, temp);
2313 } 2942 }
2314 2943
2315 /* disable PCH DPLL */ 2944 /* disable PCH DPLL */
2316 reg = PCH_DPLL(pipe); 2945 intel_disable_pch_pll(dev_priv, pipe);
2317 temp = I915_READ(reg);
2318 I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
2319 2946
2320 /* Switch from PCDclk to Rawclk */ 2947 /* Switch from PCDclk to Rawclk */
2321 reg = FDI_RX_CTL(pipe); 2948 reg = FDI_RX_CTL(pipe);
@@ -2372,9 +2999,12 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
2372{ 2999{
2373 if (!enable && intel_crtc->overlay) { 3000 if (!enable && intel_crtc->overlay) {
2374 struct drm_device *dev = intel_crtc->base.dev; 3001 struct drm_device *dev = intel_crtc->base.dev;
3002 struct drm_i915_private *dev_priv = dev->dev_private;
2375 3003
2376 mutex_lock(&dev->struct_mutex); 3004 mutex_lock(&dev->struct_mutex);
2377 (void) intel_overlay_switch_off(intel_crtc->overlay, false); 3005 dev_priv->mm.interruptible = false;
3006 (void) intel_overlay_switch_off(intel_crtc->overlay);
3007 dev_priv->mm.interruptible = true;
2378 mutex_unlock(&dev->struct_mutex); 3008 mutex_unlock(&dev->struct_mutex);
2379 } 3009 }
2380 3010
@@ -2390,7 +3020,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
2390 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3020 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2391 int pipe = intel_crtc->pipe; 3021 int pipe = intel_crtc->pipe;
2392 int plane = intel_crtc->plane; 3022 int plane = intel_crtc->plane;
2393 u32 reg, temp;
2394 3023
2395 if (intel_crtc->active) 3024 if (intel_crtc->active)
2396 return; 3025 return;
@@ -2398,42 +3027,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
2398 intel_crtc->active = true; 3027 intel_crtc->active = true;
2399 intel_update_watermarks(dev); 3028 intel_update_watermarks(dev);
2400 3029
2401 /* Enable the DPLL */ 3030 intel_enable_pll(dev_priv, pipe);
2402 reg = DPLL(pipe); 3031 intel_enable_pipe(dev_priv, pipe, false);
2403 temp = I915_READ(reg); 3032 intel_enable_plane(dev_priv, plane, pipe);
2404 if ((temp & DPLL_VCO_ENABLE) == 0) {
2405 I915_WRITE(reg, temp);
2406
2407 /* Wait for the clocks to stabilize. */
2408 POSTING_READ(reg);
2409 udelay(150);
2410
2411 I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
2412
2413 /* Wait for the clocks to stabilize. */
2414 POSTING_READ(reg);
2415 udelay(150);
2416
2417 I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
2418
2419 /* Wait for the clocks to stabilize. */
2420 POSTING_READ(reg);
2421 udelay(150);
2422 }
2423
2424 /* Enable the pipe */
2425 reg = PIPECONF(pipe);
2426 temp = I915_READ(reg);
2427 if ((temp & PIPECONF_ENABLE) == 0)
2428 I915_WRITE(reg, temp | PIPECONF_ENABLE);
2429
2430 /* Enable the plane */
2431 reg = DSPCNTR(plane);
2432 temp = I915_READ(reg);
2433 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
2434 I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
2435 intel_flush_display_plane(dev, plane);
2436 }
2437 3033
2438 intel_crtc_load_lut(crtc); 3034 intel_crtc_load_lut(crtc);
2439 intel_update_fbc(dev); 3035 intel_update_fbc(dev);
@@ -2450,7 +3046,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
2450 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3046 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2451 int pipe = intel_crtc->pipe; 3047 int pipe = intel_crtc->pipe;
2452 int plane = intel_crtc->plane; 3048 int plane = intel_crtc->plane;
2453 u32 reg, temp;
2454 3049
2455 if (!intel_crtc->active) 3050 if (!intel_crtc->active)
2456 return; 3051 return;
@@ -2465,45 +3060,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
2465 dev_priv->display.disable_fbc) 3060 dev_priv->display.disable_fbc)
2466 dev_priv->display.disable_fbc(dev); 3061 dev_priv->display.disable_fbc(dev);
2467 3062
2468 /* Disable display plane */ 3063 intel_disable_plane(dev_priv, plane, pipe);
2469 reg = DSPCNTR(plane); 3064 intel_disable_pipe(dev_priv, pipe);
2470 temp = I915_READ(reg); 3065 intel_disable_pll(dev_priv, pipe);
2471 if (temp & DISPLAY_PLANE_ENABLE) {
2472 I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
2473 /* Flush the plane changes */
2474 intel_flush_display_plane(dev, plane);
2475
2476 /* Wait for vblank for the disable to take effect */
2477 if (IS_GEN2(dev))
2478 intel_wait_for_vblank(dev, pipe);
2479 }
2480
2481 /* Don't disable pipe A or pipe A PLLs if needed */
2482 if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
2483 goto done;
2484
2485 /* Next, disable display pipes */
2486 reg = PIPECONF(pipe);
2487 temp = I915_READ(reg);
2488 if (temp & PIPECONF_ENABLE) {
2489 I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
2490
2491 /* Wait for the pipe to turn off */
2492 POSTING_READ(reg);
2493 intel_wait_for_pipe_off(dev, pipe);
2494 }
2495
2496 reg = DPLL(pipe);
2497 temp = I915_READ(reg);
2498 if (temp & DPLL_VCO_ENABLE) {
2499 I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
2500
2501 /* Wait for the clocks to turn off. */
2502 POSTING_READ(reg);
2503 udelay(150);
2504 }
2505 3066
2506done:
2507 intel_crtc->active = false; 3067 intel_crtc->active = false;
2508 intel_update_fbc(dev); 3068 intel_update_fbc(dev);
2509 intel_update_watermarks(dev); 3069 intel_update_watermarks(dev);
@@ -2565,7 +3125,7 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
2565 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; 3125 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
2566 break; 3126 break;
2567 default: 3127 default:
2568 DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); 3128 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
2569 break; 3129 break;
2570 } 3130 }
2571} 3131}
@@ -2762,77 +3322,77 @@ struct intel_watermark_params {
2762}; 3322};
2763 3323
2764/* Pineview has different values for various configs */ 3324/* Pineview has different values for various configs */
2765static struct intel_watermark_params pineview_display_wm = { 3325static const struct intel_watermark_params pineview_display_wm = {
2766 PINEVIEW_DISPLAY_FIFO, 3326 PINEVIEW_DISPLAY_FIFO,
2767 PINEVIEW_MAX_WM, 3327 PINEVIEW_MAX_WM,
2768 PINEVIEW_DFT_WM, 3328 PINEVIEW_DFT_WM,
2769 PINEVIEW_GUARD_WM, 3329 PINEVIEW_GUARD_WM,
2770 PINEVIEW_FIFO_LINE_SIZE 3330 PINEVIEW_FIFO_LINE_SIZE
2771}; 3331};
2772static struct intel_watermark_params pineview_display_hplloff_wm = { 3332static const struct intel_watermark_params pineview_display_hplloff_wm = {
2773 PINEVIEW_DISPLAY_FIFO, 3333 PINEVIEW_DISPLAY_FIFO,
2774 PINEVIEW_MAX_WM, 3334 PINEVIEW_MAX_WM,
2775 PINEVIEW_DFT_HPLLOFF_WM, 3335 PINEVIEW_DFT_HPLLOFF_WM,
2776 PINEVIEW_GUARD_WM, 3336 PINEVIEW_GUARD_WM,
2777 PINEVIEW_FIFO_LINE_SIZE 3337 PINEVIEW_FIFO_LINE_SIZE
2778}; 3338};
2779static struct intel_watermark_params pineview_cursor_wm = { 3339static const struct intel_watermark_params pineview_cursor_wm = {
2780 PINEVIEW_CURSOR_FIFO, 3340 PINEVIEW_CURSOR_FIFO,
2781 PINEVIEW_CURSOR_MAX_WM, 3341 PINEVIEW_CURSOR_MAX_WM,
2782 PINEVIEW_CURSOR_DFT_WM, 3342 PINEVIEW_CURSOR_DFT_WM,
2783 PINEVIEW_CURSOR_GUARD_WM, 3343 PINEVIEW_CURSOR_GUARD_WM,
2784 PINEVIEW_FIFO_LINE_SIZE, 3344 PINEVIEW_FIFO_LINE_SIZE,
2785}; 3345};
2786static struct intel_watermark_params pineview_cursor_hplloff_wm = { 3346static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
2787 PINEVIEW_CURSOR_FIFO, 3347 PINEVIEW_CURSOR_FIFO,
2788 PINEVIEW_CURSOR_MAX_WM, 3348 PINEVIEW_CURSOR_MAX_WM,
2789 PINEVIEW_CURSOR_DFT_WM, 3349 PINEVIEW_CURSOR_DFT_WM,
2790 PINEVIEW_CURSOR_GUARD_WM, 3350 PINEVIEW_CURSOR_GUARD_WM,
2791 PINEVIEW_FIFO_LINE_SIZE 3351 PINEVIEW_FIFO_LINE_SIZE
2792}; 3352};
2793static struct intel_watermark_params g4x_wm_info = { 3353static const struct intel_watermark_params g4x_wm_info = {
2794 G4X_FIFO_SIZE, 3354 G4X_FIFO_SIZE,
2795 G4X_MAX_WM, 3355 G4X_MAX_WM,
2796 G4X_MAX_WM, 3356 G4X_MAX_WM,
2797 2, 3357 2,
2798 G4X_FIFO_LINE_SIZE, 3358 G4X_FIFO_LINE_SIZE,
2799}; 3359};
2800static struct intel_watermark_params g4x_cursor_wm_info = { 3360static const struct intel_watermark_params g4x_cursor_wm_info = {
2801 I965_CURSOR_FIFO, 3361 I965_CURSOR_FIFO,
2802 I965_CURSOR_MAX_WM, 3362 I965_CURSOR_MAX_WM,
2803 I965_CURSOR_DFT_WM, 3363 I965_CURSOR_DFT_WM,
2804 2, 3364 2,
2805 G4X_FIFO_LINE_SIZE, 3365 G4X_FIFO_LINE_SIZE,
2806}; 3366};
2807static struct intel_watermark_params i965_cursor_wm_info = { 3367static const struct intel_watermark_params i965_cursor_wm_info = {
2808 I965_CURSOR_FIFO, 3368 I965_CURSOR_FIFO,
2809 I965_CURSOR_MAX_WM, 3369 I965_CURSOR_MAX_WM,
2810 I965_CURSOR_DFT_WM, 3370 I965_CURSOR_DFT_WM,
2811 2, 3371 2,
2812 I915_FIFO_LINE_SIZE, 3372 I915_FIFO_LINE_SIZE,
2813}; 3373};
2814static struct intel_watermark_params i945_wm_info = { 3374static const struct intel_watermark_params i945_wm_info = {
2815 I945_FIFO_SIZE, 3375 I945_FIFO_SIZE,
2816 I915_MAX_WM, 3376 I915_MAX_WM,
2817 1, 3377 1,
2818 2, 3378 2,
2819 I915_FIFO_LINE_SIZE 3379 I915_FIFO_LINE_SIZE
2820}; 3380};
2821static struct intel_watermark_params i915_wm_info = { 3381static const struct intel_watermark_params i915_wm_info = {
2822 I915_FIFO_SIZE, 3382 I915_FIFO_SIZE,
2823 I915_MAX_WM, 3383 I915_MAX_WM,
2824 1, 3384 1,
2825 2, 3385 2,
2826 I915_FIFO_LINE_SIZE 3386 I915_FIFO_LINE_SIZE
2827}; 3387};
2828static struct intel_watermark_params i855_wm_info = { 3388static const struct intel_watermark_params i855_wm_info = {
2829 I855GM_FIFO_SIZE, 3389 I855GM_FIFO_SIZE,
2830 I915_MAX_WM, 3390 I915_MAX_WM,
2831 1, 3391 1,
2832 2, 3392 2,
2833 I830_FIFO_LINE_SIZE 3393 I830_FIFO_LINE_SIZE
2834}; 3394};
2835static struct intel_watermark_params i830_wm_info = { 3395static const struct intel_watermark_params i830_wm_info = {
2836 I830_FIFO_SIZE, 3396 I830_FIFO_SIZE,
2837 I915_MAX_WM, 3397 I915_MAX_WM,
2838 1, 3398 1,
@@ -2840,31 +3400,28 @@ static struct intel_watermark_params i830_wm_info = {
2840 I830_FIFO_LINE_SIZE 3400 I830_FIFO_LINE_SIZE
2841}; 3401};
2842 3402
2843static struct intel_watermark_params ironlake_display_wm_info = { 3403static const struct intel_watermark_params ironlake_display_wm_info = {
2844 ILK_DISPLAY_FIFO, 3404 ILK_DISPLAY_FIFO,
2845 ILK_DISPLAY_MAXWM, 3405 ILK_DISPLAY_MAXWM,
2846 ILK_DISPLAY_DFTWM, 3406 ILK_DISPLAY_DFTWM,
2847 2, 3407 2,
2848 ILK_FIFO_LINE_SIZE 3408 ILK_FIFO_LINE_SIZE
2849}; 3409};
2850 3410static const struct intel_watermark_params ironlake_cursor_wm_info = {
2851static struct intel_watermark_params ironlake_cursor_wm_info = {
2852 ILK_CURSOR_FIFO, 3411 ILK_CURSOR_FIFO,
2853 ILK_CURSOR_MAXWM, 3412 ILK_CURSOR_MAXWM,
2854 ILK_CURSOR_DFTWM, 3413 ILK_CURSOR_DFTWM,
2855 2, 3414 2,
2856 ILK_FIFO_LINE_SIZE 3415 ILK_FIFO_LINE_SIZE
2857}; 3416};
2858 3417static const struct intel_watermark_params ironlake_display_srwm_info = {
2859static struct intel_watermark_params ironlake_display_srwm_info = {
2860 ILK_DISPLAY_SR_FIFO, 3418 ILK_DISPLAY_SR_FIFO,
2861 ILK_DISPLAY_MAX_SRWM, 3419 ILK_DISPLAY_MAX_SRWM,
2862 ILK_DISPLAY_DFT_SRWM, 3420 ILK_DISPLAY_DFT_SRWM,
2863 2, 3421 2,
2864 ILK_FIFO_LINE_SIZE 3422 ILK_FIFO_LINE_SIZE
2865}; 3423};
2866 3424static const struct intel_watermark_params ironlake_cursor_srwm_info = {
2867static struct intel_watermark_params ironlake_cursor_srwm_info = {
2868 ILK_CURSOR_SR_FIFO, 3425 ILK_CURSOR_SR_FIFO,
2869 ILK_CURSOR_MAX_SRWM, 3426 ILK_CURSOR_MAX_SRWM,
2870 ILK_CURSOR_DFT_SRWM, 3427 ILK_CURSOR_DFT_SRWM,
@@ -2872,31 +3429,28 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = {
2872 ILK_FIFO_LINE_SIZE 3429 ILK_FIFO_LINE_SIZE
2873}; 3430};
2874 3431
2875static struct intel_watermark_params sandybridge_display_wm_info = { 3432static const struct intel_watermark_params sandybridge_display_wm_info = {
2876 SNB_DISPLAY_FIFO, 3433 SNB_DISPLAY_FIFO,
2877 SNB_DISPLAY_MAXWM, 3434 SNB_DISPLAY_MAXWM,
2878 SNB_DISPLAY_DFTWM, 3435 SNB_DISPLAY_DFTWM,
2879 2, 3436 2,
2880 SNB_FIFO_LINE_SIZE 3437 SNB_FIFO_LINE_SIZE
2881}; 3438};
2882 3439static const struct intel_watermark_params sandybridge_cursor_wm_info = {
2883static struct intel_watermark_params sandybridge_cursor_wm_info = {
2884 SNB_CURSOR_FIFO, 3440 SNB_CURSOR_FIFO,
2885 SNB_CURSOR_MAXWM, 3441 SNB_CURSOR_MAXWM,
2886 SNB_CURSOR_DFTWM, 3442 SNB_CURSOR_DFTWM,
2887 2, 3443 2,
2888 SNB_FIFO_LINE_SIZE 3444 SNB_FIFO_LINE_SIZE
2889}; 3445};
2890 3446static const struct intel_watermark_params sandybridge_display_srwm_info = {
2891static struct intel_watermark_params sandybridge_display_srwm_info = {
2892 SNB_DISPLAY_SR_FIFO, 3447 SNB_DISPLAY_SR_FIFO,
2893 SNB_DISPLAY_MAX_SRWM, 3448 SNB_DISPLAY_MAX_SRWM,
2894 SNB_DISPLAY_DFT_SRWM, 3449 SNB_DISPLAY_DFT_SRWM,
2895 2, 3450 2,
2896 SNB_FIFO_LINE_SIZE 3451 SNB_FIFO_LINE_SIZE
2897}; 3452};
2898 3453static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
2899static struct intel_watermark_params sandybridge_cursor_srwm_info = {
2900 SNB_CURSOR_SR_FIFO, 3454 SNB_CURSOR_SR_FIFO,
2901 SNB_CURSOR_MAX_SRWM, 3455 SNB_CURSOR_MAX_SRWM,
2902 SNB_CURSOR_DFT_SRWM, 3456 SNB_CURSOR_DFT_SRWM,
@@ -2924,7 +3478,8 @@ static struct intel_watermark_params sandybridge_cursor_srwm_info = {
2924 * will occur, and a display engine hang could result. 3478 * will occur, and a display engine hang could result.
2925 */ 3479 */
2926static unsigned long intel_calculate_wm(unsigned long clock_in_khz, 3480static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2927 struct intel_watermark_params *wm, 3481 const struct intel_watermark_params *wm,
3482 int fifo_size,
2928 int pixel_size, 3483 int pixel_size,
2929 unsigned long latency_ns) 3484 unsigned long latency_ns)
2930{ 3485{
@@ -2942,7 +3497,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2942 3497
2943 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); 3498 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
2944 3499
2945 wm_size = wm->fifo_size - (entries_required + wm->guard_size); 3500 wm_size = fifo_size - (entries_required + wm->guard_size);
2946 3501
2947 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); 3502 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
2948 3503
@@ -3115,15 +3670,28 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
3115 return size; 3670 return size;
3116} 3671}
3117 3672
3118static void pineview_update_wm(struct drm_device *dev, int planea_clock, 3673static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3119 int planeb_clock, int sr_hdisplay, int unused, 3674{
3120 int pixel_size) 3675 struct drm_crtc *crtc, *enabled = NULL;
3676
3677 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3678 if (crtc->enabled && crtc->fb) {
3679 if (enabled)
3680 return NULL;
3681 enabled = crtc;
3682 }
3683 }
3684
3685 return enabled;
3686}
3687
3688static void pineview_update_wm(struct drm_device *dev)
3121{ 3689{
3122 struct drm_i915_private *dev_priv = dev->dev_private; 3690 struct drm_i915_private *dev_priv = dev->dev_private;
3691 struct drm_crtc *crtc;
3123 const struct cxsr_latency *latency; 3692 const struct cxsr_latency *latency;
3124 u32 reg; 3693 u32 reg;
3125 unsigned long wm; 3694 unsigned long wm;
3126 int sr_clock;
3127 3695
3128 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, 3696 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
3129 dev_priv->fsb_freq, dev_priv->mem_freq); 3697 dev_priv->fsb_freq, dev_priv->mem_freq);
@@ -3133,11 +3701,14 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
3133 return; 3701 return;
3134 } 3702 }
3135 3703
3136 if (!planea_clock || !planeb_clock) { 3704 crtc = single_enabled_crtc(dev);
3137 sr_clock = planea_clock ? planea_clock : planeb_clock; 3705 if (crtc) {
3706 int clock = crtc->mode.clock;
3707 int pixel_size = crtc->fb->bits_per_pixel / 8;
3138 3708
3139 /* Display SR */ 3709 /* Display SR */
3140 wm = intel_calculate_wm(sr_clock, &pineview_display_wm, 3710 wm = intel_calculate_wm(clock, &pineview_display_wm,
3711 pineview_display_wm.fifo_size,
3141 pixel_size, latency->display_sr); 3712 pixel_size, latency->display_sr);
3142 reg = I915_READ(DSPFW1); 3713 reg = I915_READ(DSPFW1);
3143 reg &= ~DSPFW_SR_MASK; 3714 reg &= ~DSPFW_SR_MASK;
@@ -3146,7 +3717,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
3146 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); 3717 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
3147 3718
3148 /* cursor SR */ 3719 /* cursor SR */
3149 wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm, 3720 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
3721 pineview_display_wm.fifo_size,
3150 pixel_size, latency->cursor_sr); 3722 pixel_size, latency->cursor_sr);
3151 reg = I915_READ(DSPFW3); 3723 reg = I915_READ(DSPFW3);
3152 reg &= ~DSPFW_CURSOR_SR_MASK; 3724 reg &= ~DSPFW_CURSOR_SR_MASK;
@@ -3154,7 +3726,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
3154 I915_WRITE(DSPFW3, reg); 3726 I915_WRITE(DSPFW3, reg);
3155 3727
3156 /* Display HPLL off SR */ 3728 /* Display HPLL off SR */
3157 wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm, 3729 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
3730 pineview_display_hplloff_wm.fifo_size,
3158 pixel_size, latency->display_hpll_disable); 3731 pixel_size, latency->display_hpll_disable);
3159 reg = I915_READ(DSPFW3); 3732 reg = I915_READ(DSPFW3);
3160 reg &= ~DSPFW_HPLL_SR_MASK; 3733 reg &= ~DSPFW_HPLL_SR_MASK;
@@ -3162,7 +3735,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
3162 I915_WRITE(DSPFW3, reg); 3735 I915_WRITE(DSPFW3, reg);
3163 3736
3164 /* cursor HPLL off SR */ 3737 /* cursor HPLL off SR */
3165 wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm, 3738 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
3739 pineview_display_hplloff_wm.fifo_size,
3166 pixel_size, latency->cursor_hpll_disable); 3740 pixel_size, latency->cursor_hpll_disable);
3167 reg = I915_READ(DSPFW3); 3741 reg = I915_READ(DSPFW3);
3168 reg &= ~DSPFW_HPLL_CURSOR_MASK; 3742 reg &= ~DSPFW_HPLL_CURSOR_MASK;
@@ -3180,125 +3754,229 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
3180 } 3754 }
3181} 3755}
3182 3756
3183static void g4x_update_wm(struct drm_device *dev, int planea_clock, 3757static bool g4x_compute_wm0(struct drm_device *dev,
3184 int planeb_clock, int sr_hdisplay, int sr_htotal, 3758 int plane,
3185 int pixel_size) 3759 const struct intel_watermark_params *display,
3760 int display_latency_ns,
3761 const struct intel_watermark_params *cursor,
3762 int cursor_latency_ns,
3763 int *plane_wm,
3764 int *cursor_wm)
3186{ 3765{
3187 struct drm_i915_private *dev_priv = dev->dev_private; 3766 struct drm_crtc *crtc;
3188 int total_size, cacheline_size; 3767 int htotal, hdisplay, clock, pixel_size;
3189 int planea_wm, planeb_wm, cursora_wm, cursorb_wm, cursor_sr; 3768 int line_time_us, line_count;
3190 struct intel_watermark_params planea_params, planeb_params; 3769 int entries, tlb_miss;
3191 unsigned long line_time_us;
3192 int sr_clock, sr_entries = 0, entries_required;
3193 3770
3194 /* Create copies of the base settings for each pipe */ 3771 crtc = intel_get_crtc_for_plane(dev, plane);
3195 planea_params = planeb_params = g4x_wm_info; 3772 if (crtc->fb == NULL || !crtc->enabled)
3773 return false;
3196 3774
3197 /* Grab a couple of global values before we overwrite them */ 3775 htotal = crtc->mode.htotal;
3198 total_size = planea_params.fifo_size; 3776 hdisplay = crtc->mode.hdisplay;
3199 cacheline_size = planea_params.cacheline_size; 3777 clock = crtc->mode.clock;
3778 pixel_size = crtc->fb->bits_per_pixel / 8;
3200 3779
3201 /* 3780 /* Use the small buffer method to calculate plane watermark */
3202 * Note: we need to make sure we don't overflow for various clock & 3781 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
3203 * latency values. 3782 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
3204 * clocks go from a few thousand to several hundred thousand. 3783 if (tlb_miss > 0)
3205 * latency is usually a few thousand 3784 entries += tlb_miss;
3206 */ 3785 entries = DIV_ROUND_UP(entries, display->cacheline_size);
3207 entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) / 3786 *plane_wm = entries + display->guard_size;
3208 1000; 3787 if (*plane_wm > (int)display->max_wm)
3209 entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE); 3788 *plane_wm = display->max_wm;
3210 planea_wm = entries_required + planea_params.guard_size;
3211 3789
3212 entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) / 3790 /* Use the large buffer method to calculate cursor watermark */
3213 1000; 3791 line_time_us = ((htotal * 1000) / clock);
3214 entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE); 3792 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
3215 planeb_wm = entries_required + planeb_params.guard_size; 3793 entries = line_count * 64 * pixel_size;
3794 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
3795 if (tlb_miss > 0)
3796 entries += tlb_miss;
3797 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3798 *cursor_wm = entries + cursor->guard_size;
3799 if (*cursor_wm > (int)cursor->max_wm)
3800 *cursor_wm = (int)cursor->max_wm;
3216 3801
3217 cursora_wm = cursorb_wm = 16; 3802 return true;
3218 cursor_sr = 32; 3803}
3219 3804
3220 DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 3805/*
3806 * Check the wm result.
3807 *
3808 * If any calculated watermark values is larger than the maximum value that
3809 * can be programmed into the associated watermark register, that watermark
3810 * must be disabled.
3811 */
3812static bool g4x_check_srwm(struct drm_device *dev,
3813 int display_wm, int cursor_wm,
3814 const struct intel_watermark_params *display,
3815 const struct intel_watermark_params *cursor)
3816{
3817 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
3818 display_wm, cursor_wm);
3221 3819
3222 /* Calc sr entries for one plane configs */ 3820 if (display_wm > display->max_wm) {
3223 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 3821 DRM_DEBUG_KMS("display watermark is too large(%d), disabling\n",
3224 /* self-refresh has much higher latency */ 3822 display_wm, display->max_wm);
3225 static const int sr_latency_ns = 12000; 3823 return false;
3824 }
3226 3825
3227 sr_clock = planea_clock ? planea_clock : planeb_clock; 3826 if (cursor_wm > cursor->max_wm) {
3228 line_time_us = ((sr_htotal * 1000) / sr_clock); 3827 DRM_DEBUG_KMS("cursor watermark is too large(%d), disabling\n",
3828 cursor_wm, cursor->max_wm);
3829 return false;
3830 }
3229 3831
3230 /* Use ns/us then divide to preserve precision */ 3832 if (!(display_wm || cursor_wm)) {
3231 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 3833 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
3232 pixel_size * sr_hdisplay; 3834 return false;
3233 sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); 3835 }
3234
3235 entries_required = (((sr_latency_ns / line_time_us) +
3236 1000) / 1000) * pixel_size * 64;
3237 entries_required = DIV_ROUND_UP(entries_required,
3238 g4x_cursor_wm_info.cacheline_size);
3239 cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
3240
3241 if (cursor_sr > g4x_cursor_wm_info.max_wm)
3242 cursor_sr = g4x_cursor_wm_info.max_wm;
3243 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
3244 "cursor %d\n", sr_entries, cursor_sr);
3245 3836
3246 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 3837 return true;
3247 } else { 3838}
3248 /* Turn off self refresh if both pipes are enabled */ 3839
3249 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 3840static bool g4x_compute_srwm(struct drm_device *dev,
3250 & ~FW_BLC_SELF_EN); 3841 int plane,
3842 int latency_ns,
3843 const struct intel_watermark_params *display,
3844 const struct intel_watermark_params *cursor,
3845 int *display_wm, int *cursor_wm)
3846{
3847 struct drm_crtc *crtc;
3848 int hdisplay, htotal, pixel_size, clock;
3849 unsigned long line_time_us;
3850 int line_count, line_size;
3851 int small, large;
3852 int entries;
3853
3854 if (!latency_ns) {
3855 *display_wm = *cursor_wm = 0;
3856 return false;
3251 } 3857 }
3252 3858
3253 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", 3859 crtc = intel_get_crtc_for_plane(dev, plane);
3254 planea_wm, planeb_wm, sr_entries); 3860 hdisplay = crtc->mode.hdisplay;
3861 htotal = crtc->mode.htotal;
3862 clock = crtc->mode.clock;
3863 pixel_size = crtc->fb->bits_per_pixel / 8;
3864
3865 line_time_us = (htotal * 1000) / clock;
3866 line_count = (latency_ns / line_time_us + 1000) / 1000;
3867 line_size = hdisplay * pixel_size;
3868
3869 /* Use the minimum of the small and large buffer method for primary */
3870 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
3871 large = line_count * line_size;
3872
3873 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
3874 *display_wm = entries + display->guard_size;
3875
3876 /* calculate the self-refresh watermark for display cursor */
3877 entries = line_count * pixel_size * 64;
3878 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3879 *cursor_wm = entries + cursor->guard_size;
3880
3881 return g4x_check_srwm(dev,
3882 *display_wm, *cursor_wm,
3883 display, cursor);
3884}
3885
3886static inline bool single_plane_enabled(unsigned int mask)
3887{
3888 return mask && (mask & -mask) == 0;
3889}
3890
3891static void g4x_update_wm(struct drm_device *dev)
3892{
3893 static const int sr_latency_ns = 12000;
3894 struct drm_i915_private *dev_priv = dev->dev_private;
3895 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
3896 int plane_sr, cursor_sr;
3897 unsigned int enabled = 0;
3898
3899 if (g4x_compute_wm0(dev, 0,
3900 &g4x_wm_info, latency_ns,
3901 &g4x_cursor_wm_info, latency_ns,
3902 &planea_wm, &cursora_wm))
3903 enabled |= 1;
3904
3905 if (g4x_compute_wm0(dev, 1,
3906 &g4x_wm_info, latency_ns,
3907 &g4x_cursor_wm_info, latency_ns,
3908 &planeb_wm, &cursorb_wm))
3909 enabled |= 2;
3910
3911 plane_sr = cursor_sr = 0;
3912 if (single_plane_enabled(enabled) &&
3913 g4x_compute_srwm(dev, ffs(enabled) - 1,
3914 sr_latency_ns,
3915 &g4x_wm_info,
3916 &g4x_cursor_wm_info,
3917 &plane_sr, &cursor_sr))
3918 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
3919 else
3920 I915_WRITE(FW_BLC_SELF,
3921 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
3255 3922
3256 planea_wm &= 0x3f; 3923 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
3257 planeb_wm &= 0x3f; 3924 planea_wm, cursora_wm,
3925 planeb_wm, cursorb_wm,
3926 plane_sr, cursor_sr);
3258 3927
3259 I915_WRITE(DSPFW1, (sr_entries << DSPFW_SR_SHIFT) | 3928 I915_WRITE(DSPFW1,
3929 (plane_sr << DSPFW_SR_SHIFT) |
3260 (cursorb_wm << DSPFW_CURSORB_SHIFT) | 3930 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
3261 (planeb_wm << DSPFW_PLANEB_SHIFT) | planea_wm); 3931 (planeb_wm << DSPFW_PLANEB_SHIFT) |
3262 I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | 3932 planea_wm);
3933 I915_WRITE(DSPFW2,
3934 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
3263 (cursora_wm << DSPFW_CURSORA_SHIFT)); 3935 (cursora_wm << DSPFW_CURSORA_SHIFT));
3264 /* HPLL off in SR has some issues on G4x... disable it */ 3936 /* HPLL off in SR has some issues on G4x... disable it */
3265 I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | 3937 I915_WRITE(DSPFW3,
3938 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
3266 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 3939 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
3267} 3940}
3268 3941
3269static void i965_update_wm(struct drm_device *dev, int planea_clock, 3942static void i965_update_wm(struct drm_device *dev)
3270 int planeb_clock, int sr_hdisplay, int sr_htotal,
3271 int pixel_size)
3272{ 3943{
3273 struct drm_i915_private *dev_priv = dev->dev_private; 3944 struct drm_i915_private *dev_priv = dev->dev_private;
3274 unsigned long line_time_us; 3945 struct drm_crtc *crtc;
3275 int sr_clock, sr_entries, srwm = 1; 3946 int srwm = 1;
3276 int cursor_sr = 16; 3947 int cursor_sr = 16;
3277 3948
3278 /* Calc sr entries for one plane configs */ 3949 /* Calc sr entries for one plane configs */
3279 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 3950 crtc = single_enabled_crtc(dev);
3951 if (crtc) {
3280 /* self-refresh has much higher latency */ 3952 /* self-refresh has much higher latency */
3281 static const int sr_latency_ns = 12000; 3953 static const int sr_latency_ns = 12000;
3954 int clock = crtc->mode.clock;
3955 int htotal = crtc->mode.htotal;
3956 int hdisplay = crtc->mode.hdisplay;
3957 int pixel_size = crtc->fb->bits_per_pixel / 8;
3958 unsigned long line_time_us;
3959 int entries;
3282 3960
3283 sr_clock = planea_clock ? planea_clock : planeb_clock; 3961 line_time_us = ((htotal * 1000) / clock);
3284 line_time_us = ((sr_htotal * 1000) / sr_clock);
3285 3962
3286 /* Use ns/us then divide to preserve precision */ 3963 /* Use ns/us then divide to preserve precision */
3287 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 3964 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3288 pixel_size * sr_hdisplay; 3965 pixel_size * hdisplay;
3289 sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE); 3966 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
3290 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 3967 srwm = I965_FIFO_SIZE - entries;
3291 srwm = I965_FIFO_SIZE - sr_entries;
3292 if (srwm < 0) 3968 if (srwm < 0)
3293 srwm = 1; 3969 srwm = 1;
3294 srwm &= 0x1ff; 3970 srwm &= 0x1ff;
3971 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
3972 entries, srwm);
3295 3973
3296 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 3974 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3297 pixel_size * 64; 3975 pixel_size * 64;
3298 sr_entries = DIV_ROUND_UP(sr_entries, 3976 entries = DIV_ROUND_UP(entries,
3299 i965_cursor_wm_info.cacheline_size); 3977 i965_cursor_wm_info.cacheline_size);
3300 cursor_sr = i965_cursor_wm_info.fifo_size - 3978 cursor_sr = i965_cursor_wm_info.fifo_size -
3301 (sr_entries + i965_cursor_wm_info.guard_size); 3979 (entries + i965_cursor_wm_info.guard_size);
3302 3980
3303 if (cursor_sr > i965_cursor_wm_info.max_wm) 3981 if (cursor_sr > i965_cursor_wm_info.max_wm)
3304 cursor_sr = i965_cursor_wm_info.max_wm; 3982 cursor_sr = i965_cursor_wm_info.max_wm;
@@ -3319,46 +3997,56 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
3319 srwm); 3997 srwm);
3320 3998
3321 /* 965 has limitations... */ 3999 /* 965 has limitations... */
3322 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | 4000 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
3323 (8 << 0)); 4001 (8 << 16) | (8 << 8) | (8 << 0));
3324 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); 4002 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
3325 /* update cursor SR watermark */ 4003 /* update cursor SR watermark */
3326 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 4004 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
3327} 4005}
3328 4006
3329static void i9xx_update_wm(struct drm_device *dev, int planea_clock, 4007static void i9xx_update_wm(struct drm_device *dev)
3330 int planeb_clock, int sr_hdisplay, int sr_htotal,
3331 int pixel_size)
3332{ 4008{
3333 struct drm_i915_private *dev_priv = dev->dev_private; 4009 struct drm_i915_private *dev_priv = dev->dev_private;
4010 const struct intel_watermark_params *wm_info;
3334 uint32_t fwater_lo; 4011 uint32_t fwater_lo;
3335 uint32_t fwater_hi; 4012 uint32_t fwater_hi;
3336 int total_size, cacheline_size, cwm, srwm = 1; 4013 int cwm, srwm = 1;
4014 int fifo_size;
3337 int planea_wm, planeb_wm; 4015 int planea_wm, planeb_wm;
3338 struct intel_watermark_params planea_params, planeb_params; 4016 struct drm_crtc *crtc, *enabled = NULL;
3339 unsigned long line_time_us;
3340 int sr_clock, sr_entries = 0;
3341 4017
3342 /* Create copies of the base settings for each pipe */ 4018 if (IS_I945GM(dev))
3343 if (IS_CRESTLINE(dev) || IS_I945GM(dev)) 4019 wm_info = &i945_wm_info;
3344 planea_params = planeb_params = i945_wm_info;
3345 else if (!IS_GEN2(dev)) 4020 else if (!IS_GEN2(dev))
3346 planea_params = planeb_params = i915_wm_info; 4021 wm_info = &i915_wm_info;
3347 else 4022 else
3348 planea_params = planeb_params = i855_wm_info; 4023 wm_info = &i855_wm_info;
3349 4024
3350 /* Grab a couple of global values before we overwrite them */ 4025 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
3351 total_size = planea_params.fifo_size; 4026 crtc = intel_get_crtc_for_plane(dev, 0);
3352 cacheline_size = planea_params.cacheline_size; 4027 if (crtc->enabled && crtc->fb) {
3353 4028 planea_wm = intel_calculate_wm(crtc->mode.clock,
3354 /* Update per-plane FIFO sizes */ 4029 wm_info, fifo_size,
3355 planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0); 4030 crtc->fb->bits_per_pixel / 8,
3356 planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1); 4031 latency_ns);
4032 enabled = crtc;
4033 } else
4034 planea_wm = fifo_size - wm_info->guard_size;
4035
4036 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4037 crtc = intel_get_crtc_for_plane(dev, 1);
4038 if (crtc->enabled && crtc->fb) {
4039 planeb_wm = intel_calculate_wm(crtc->mode.clock,
4040 wm_info, fifo_size,
4041 crtc->fb->bits_per_pixel / 8,
4042 latency_ns);
4043 if (enabled == NULL)
4044 enabled = crtc;
4045 else
4046 enabled = NULL;
4047 } else
4048 planeb_wm = fifo_size - wm_info->guard_size;
3357 4049
3358 planea_wm = intel_calculate_wm(planea_clock, &planea_params,
3359 pixel_size, latency_ns);
3360 planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params,
3361 pixel_size, latency_ns);
3362 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 4050 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
3363 4051
3364 /* 4052 /*
@@ -3366,39 +4054,39 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
3366 */ 4054 */
3367 cwm = 2; 4055 cwm = 2;
3368 4056
4057 /* Play safe and disable self-refresh before adjusting watermarks. */
4058 if (IS_I945G(dev) || IS_I945GM(dev))
4059 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4060 else if (IS_I915GM(dev))
4061 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4062
3369 /* Calc sr entries for one plane configs */ 4063 /* Calc sr entries for one plane configs */
3370 if (HAS_FW_BLC(dev) && sr_hdisplay && 4064 if (HAS_FW_BLC(dev) && enabled) {
3371 (!planea_clock || !planeb_clock)) {
3372 /* self-refresh has much higher latency */ 4065 /* self-refresh has much higher latency */
3373 static const int sr_latency_ns = 6000; 4066 static const int sr_latency_ns = 6000;
4067 int clock = enabled->mode.clock;
4068 int htotal = enabled->mode.htotal;
4069 int hdisplay = enabled->mode.hdisplay;
4070 int pixel_size = enabled->fb->bits_per_pixel / 8;
4071 unsigned long line_time_us;
4072 int entries;
3374 4073
3375 sr_clock = planea_clock ? planea_clock : planeb_clock; 4074 line_time_us = (htotal * 1000) / clock;
3376 line_time_us = ((sr_htotal * 1000) / sr_clock);
3377 4075
3378 /* Use ns/us then divide to preserve precision */ 4076 /* Use ns/us then divide to preserve precision */
3379 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 4077 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3380 pixel_size * sr_hdisplay; 4078 pixel_size * hdisplay;
3381 sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); 4079 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
3382 DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); 4080 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
3383 srwm = total_size - sr_entries; 4081 srwm = wm_info->fifo_size - entries;
3384 if (srwm < 0) 4082 if (srwm < 0)
3385 srwm = 1; 4083 srwm = 1;
3386 4084
3387 if (IS_I945G(dev) || IS_I945GM(dev)) 4085 if (IS_I945G(dev) || IS_I945GM(dev))
3388 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); 4086 I915_WRITE(FW_BLC_SELF,
3389 else if (IS_I915GM(dev)) { 4087 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
3390 /* 915M has a smaller SRWM field */ 4088 else if (IS_I915GM(dev))
3391 I915_WRITE(FW_BLC_SELF, srwm & 0x3f); 4089 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
3392 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
3393 }
3394 } else {
3395 /* Turn off self refresh if both pipes are enabled */
3396 if (IS_I945G(dev) || IS_I945GM(dev)) {
3397 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
3398 & ~FW_BLC_SELF_EN);
3399 } else if (IS_I915GM(dev)) {
3400 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
3401 }
3402 } 4090 }
3403 4091
3404 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 4092 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -3413,19 +4101,36 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
3413 4101
3414 I915_WRITE(FW_BLC, fwater_lo); 4102 I915_WRITE(FW_BLC, fwater_lo);
3415 I915_WRITE(FW_BLC2, fwater_hi); 4103 I915_WRITE(FW_BLC2, fwater_hi);
4104
4105 if (HAS_FW_BLC(dev)) {
4106 if (enabled) {
4107 if (IS_I945G(dev) || IS_I945GM(dev))
4108 I915_WRITE(FW_BLC_SELF,
4109 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4110 else if (IS_I915GM(dev))
4111 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4112 DRM_DEBUG_KMS("memory self refresh enabled\n");
4113 } else
4114 DRM_DEBUG_KMS("memory self refresh disabled\n");
4115 }
3416} 4116}
3417 4117
3418static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, 4118static void i830_update_wm(struct drm_device *dev)
3419 int unused2, int unused3, int pixel_size)
3420{ 4119{
3421 struct drm_i915_private *dev_priv = dev->dev_private; 4120 struct drm_i915_private *dev_priv = dev->dev_private;
3422 uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; 4121 struct drm_crtc *crtc;
4122 uint32_t fwater_lo;
3423 int planea_wm; 4123 int planea_wm;
3424 4124
3425 i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0); 4125 crtc = single_enabled_crtc(dev);
4126 if (crtc == NULL)
4127 return;
3426 4128
3427 planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info, 4129 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
3428 pixel_size, latency_ns); 4130 dev_priv->display.get_fifo_size(dev, 0),
4131 crtc->fb->bits_per_pixel / 8,
4132 latency_ns);
4133 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
3429 fwater_lo |= (3<<8) | planea_wm; 4134 fwater_lo |= (3<<8) | planea_wm;
3430 4135
3431 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); 4136 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
@@ -3534,15 +4239,15 @@ static bool ironlake_check_srwm(struct drm_device *dev, int level,
3534/* 4239/*
3535 * Compute watermark values of WM[1-3], 4240 * Compute watermark values of WM[1-3],
3536 */ 4241 */
3537static bool ironlake_compute_srwm(struct drm_device *dev, int level, 4242static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
3538 int hdisplay, int htotal, 4243 int latency_ns,
3539 int pixel_size, int clock, int latency_ns,
3540 const struct intel_watermark_params *display, 4244 const struct intel_watermark_params *display,
3541 const struct intel_watermark_params *cursor, 4245 const struct intel_watermark_params *cursor,
3542 int *fbc_wm, int *display_wm, int *cursor_wm) 4246 int *fbc_wm, int *display_wm, int *cursor_wm)
3543{ 4247{
3544 4248 struct drm_crtc *crtc;
3545 unsigned long line_time_us; 4249 unsigned long line_time_us;
4250 int hdisplay, htotal, pixel_size, clock;
3546 int line_count, line_size; 4251 int line_count, line_size;
3547 int small, large; 4252 int small, large;
3548 int entries; 4253 int entries;
@@ -3552,6 +4257,12 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level,
3552 return false; 4257 return false;
3553 } 4258 }
3554 4259
4260 crtc = intel_get_crtc_for_plane(dev, plane);
4261 hdisplay = crtc->mode.hdisplay;
4262 htotal = crtc->mode.htotal;
4263 clock = crtc->mode.clock;
4264 pixel_size = crtc->fb->bits_per_pixel / 8;
4265
3555 line_time_us = (htotal * 1000) / clock; 4266 line_time_us = (htotal * 1000) / clock;
3556 line_count = (latency_ns / line_time_us + 1000) / 1000; 4267 line_count = (latency_ns / line_time_us + 1000) / 1000;
3557 line_size = hdisplay * pixel_size; 4268 line_size = hdisplay * pixel_size;
@@ -3579,14 +4290,11 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level,
3579 display, cursor); 4290 display, cursor);
3580} 4291}
3581 4292
3582static void ironlake_update_wm(struct drm_device *dev, 4293static void ironlake_update_wm(struct drm_device *dev)
3583 int planea_clock, int planeb_clock,
3584 int hdisplay, int htotal,
3585 int pixel_size)
3586{ 4294{
3587 struct drm_i915_private *dev_priv = dev->dev_private; 4295 struct drm_i915_private *dev_priv = dev->dev_private;
3588 int fbc_wm, plane_wm, cursor_wm, enabled; 4296 int fbc_wm, plane_wm, cursor_wm;
3589 int clock; 4297 unsigned int enabled;
3590 4298
3591 enabled = 0; 4299 enabled = 0;
3592 if (ironlake_compute_wm0(dev, 0, 4300 if (ironlake_compute_wm0(dev, 0,
@@ -3600,7 +4308,7 @@ static void ironlake_update_wm(struct drm_device *dev,
3600 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 4308 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
3601 " plane %d, " "cursor: %d\n", 4309 " plane %d, " "cursor: %d\n",
3602 plane_wm, cursor_wm); 4310 plane_wm, cursor_wm);
3603 enabled++; 4311 enabled |= 1;
3604 } 4312 }
3605 4313
3606 if (ironlake_compute_wm0(dev, 1, 4314 if (ironlake_compute_wm0(dev, 1,
@@ -3614,7 +4322,7 @@ static void ironlake_update_wm(struct drm_device *dev,
3614 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 4322 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
3615 " plane %d, cursor: %d\n", 4323 " plane %d, cursor: %d\n",
3616 plane_wm, cursor_wm); 4324 plane_wm, cursor_wm);
3617 enabled++; 4325 enabled |= 2;
3618 } 4326 }
3619 4327
3620 /* 4328 /*
@@ -3625,14 +4333,13 @@ static void ironlake_update_wm(struct drm_device *dev,
3625 I915_WRITE(WM2_LP_ILK, 0); 4333 I915_WRITE(WM2_LP_ILK, 0);
3626 I915_WRITE(WM1_LP_ILK, 0); 4334 I915_WRITE(WM1_LP_ILK, 0);
3627 4335
3628 if (enabled != 1) 4336 if (!single_plane_enabled(enabled))
3629 return; 4337 return;
3630 4338 enabled = ffs(enabled) - 1;
3631 clock = planea_clock ? planea_clock : planeb_clock;
3632 4339
3633 /* WM1 */ 4340 /* WM1 */
3634 if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, 4341 if (!ironlake_compute_srwm(dev, 1, enabled,
3635 clock, ILK_READ_WM1_LATENCY() * 500, 4342 ILK_READ_WM1_LATENCY() * 500,
3636 &ironlake_display_srwm_info, 4343 &ironlake_display_srwm_info,
3637 &ironlake_cursor_srwm_info, 4344 &ironlake_cursor_srwm_info,
3638 &fbc_wm, &plane_wm, &cursor_wm)) 4345 &fbc_wm, &plane_wm, &cursor_wm))
@@ -3646,8 +4353,8 @@ static void ironlake_update_wm(struct drm_device *dev,
3646 cursor_wm); 4353 cursor_wm);
3647 4354
3648 /* WM2 */ 4355 /* WM2 */
3649 if (!ironlake_compute_srwm(dev, 2, hdisplay, htotal, pixel_size, 4356 if (!ironlake_compute_srwm(dev, 2, enabled,
3650 clock, ILK_READ_WM2_LATENCY() * 500, 4357 ILK_READ_WM2_LATENCY() * 500,
3651 &ironlake_display_srwm_info, 4358 &ironlake_display_srwm_info,
3652 &ironlake_cursor_srwm_info, 4359 &ironlake_cursor_srwm_info,
3653 &fbc_wm, &plane_wm, &cursor_wm)) 4360 &fbc_wm, &plane_wm, &cursor_wm))
@@ -3666,15 +4373,12 @@ static void ironlake_update_wm(struct drm_device *dev,
3666 */ 4373 */
3667} 4374}
3668 4375
3669static void sandybridge_update_wm(struct drm_device *dev, 4376static void sandybridge_update_wm(struct drm_device *dev)
3670 int planea_clock, int planeb_clock,
3671 int hdisplay, int htotal,
3672 int pixel_size)
3673{ 4377{
3674 struct drm_i915_private *dev_priv = dev->dev_private; 4378 struct drm_i915_private *dev_priv = dev->dev_private;
3675 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ 4379 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
3676 int fbc_wm, plane_wm, cursor_wm, enabled; 4380 int fbc_wm, plane_wm, cursor_wm;
3677 int clock; 4381 unsigned int enabled;
3678 4382
3679 enabled = 0; 4383 enabled = 0;
3680 if (ironlake_compute_wm0(dev, 0, 4384 if (ironlake_compute_wm0(dev, 0,
@@ -3686,7 +4390,7 @@ static void sandybridge_update_wm(struct drm_device *dev,
3686 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 4390 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
3687 " plane %d, " "cursor: %d\n", 4391 " plane %d, " "cursor: %d\n",
3688 plane_wm, cursor_wm); 4392 plane_wm, cursor_wm);
3689 enabled++; 4393 enabled |= 1;
3690 } 4394 }
3691 4395
3692 if (ironlake_compute_wm0(dev, 1, 4396 if (ironlake_compute_wm0(dev, 1,
@@ -3698,7 +4402,7 @@ static void sandybridge_update_wm(struct drm_device *dev,
3698 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 4402 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
3699 " plane %d, cursor: %d\n", 4403 " plane %d, cursor: %d\n",
3700 plane_wm, cursor_wm); 4404 plane_wm, cursor_wm);
3701 enabled++; 4405 enabled |= 2;
3702 } 4406 }
3703 4407
3704 /* 4408 /*
@@ -3715,14 +4419,13 @@ static void sandybridge_update_wm(struct drm_device *dev,
3715 I915_WRITE(WM2_LP_ILK, 0); 4419 I915_WRITE(WM2_LP_ILK, 0);
3716 I915_WRITE(WM1_LP_ILK, 0); 4420 I915_WRITE(WM1_LP_ILK, 0);
3717 4421
3718 if (enabled != 1) 4422 if (!single_plane_enabled(enabled))
3719 return; 4423 return;
3720 4424 enabled = ffs(enabled) - 1;
3721 clock = planea_clock ? planea_clock : planeb_clock;
3722 4425
3723 /* WM1 */ 4426 /* WM1 */
3724 if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, 4427 if (!ironlake_compute_srwm(dev, 1, enabled,
3725 clock, SNB_READ_WM1_LATENCY() * 500, 4428 SNB_READ_WM1_LATENCY() * 500,
3726 &sandybridge_display_srwm_info, 4429 &sandybridge_display_srwm_info,
3727 &sandybridge_cursor_srwm_info, 4430 &sandybridge_cursor_srwm_info,
3728 &fbc_wm, &plane_wm, &cursor_wm)) 4431 &fbc_wm, &plane_wm, &cursor_wm))
@@ -3736,9 +4439,8 @@ static void sandybridge_update_wm(struct drm_device *dev,
3736 cursor_wm); 4439 cursor_wm);
3737 4440
3738 /* WM2 */ 4441 /* WM2 */
3739 if (!ironlake_compute_srwm(dev, 2, 4442 if (!ironlake_compute_srwm(dev, 2, enabled,
3740 hdisplay, htotal, pixel_size, 4443 SNB_READ_WM2_LATENCY() * 500,
3741 clock, SNB_READ_WM2_LATENCY() * 500,
3742 &sandybridge_display_srwm_info, 4444 &sandybridge_display_srwm_info,
3743 &sandybridge_cursor_srwm_info, 4445 &sandybridge_cursor_srwm_info,
3744 &fbc_wm, &plane_wm, &cursor_wm)) 4446 &fbc_wm, &plane_wm, &cursor_wm))
@@ -3752,9 +4454,8 @@ static void sandybridge_update_wm(struct drm_device *dev,
3752 cursor_wm); 4454 cursor_wm);
3753 4455
3754 /* WM3 */ 4456 /* WM3 */
3755 if (!ironlake_compute_srwm(dev, 3, 4457 if (!ironlake_compute_srwm(dev, 3, enabled,
3756 hdisplay, htotal, pixel_size, 4458 SNB_READ_WM3_LATENCY() * 500,
3757 clock, SNB_READ_WM3_LATENCY() * 500,
3758 &sandybridge_display_srwm_info, 4459 &sandybridge_display_srwm_info,
3759 &sandybridge_cursor_srwm_info, 4460 &sandybridge_cursor_srwm_info,
3760 &fbc_wm, &plane_wm, &cursor_wm)) 4461 &fbc_wm, &plane_wm, &cursor_wm))
@@ -3803,44 +4504,9 @@ static void sandybridge_update_wm(struct drm_device *dev,
3803static void intel_update_watermarks(struct drm_device *dev) 4504static void intel_update_watermarks(struct drm_device *dev)
3804{ 4505{
3805 struct drm_i915_private *dev_priv = dev->dev_private; 4506 struct drm_i915_private *dev_priv = dev->dev_private;
3806 struct drm_crtc *crtc;
3807 int sr_hdisplay = 0;
3808 unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
3809 int enabled = 0, pixel_size = 0;
3810 int sr_htotal = 0;
3811
3812 if (!dev_priv->display.update_wm)
3813 return;
3814
3815 /* Get the clock config from both planes */
3816 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3817 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3818 if (intel_crtc->active) {
3819 enabled++;
3820 if (intel_crtc->plane == 0) {
3821 DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
3822 intel_crtc->pipe, crtc->mode.clock);
3823 planea_clock = crtc->mode.clock;
3824 } else {
3825 DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
3826 intel_crtc->pipe, crtc->mode.clock);
3827 planeb_clock = crtc->mode.clock;
3828 }
3829 sr_hdisplay = crtc->mode.hdisplay;
3830 sr_clock = crtc->mode.clock;
3831 sr_htotal = crtc->mode.htotal;
3832 if (crtc->fb)
3833 pixel_size = crtc->fb->bits_per_pixel / 8;
3834 else
3835 pixel_size = 4; /* by default */
3836 }
3837 }
3838
3839 if (enabled <= 0)
3840 return;
3841 4507
3842 dev_priv->display.update_wm(dev, planea_clock, planeb_clock, 4508 if (dev_priv->display.update_wm)
3843 sr_hdisplay, sr_htotal, pixel_size); 4509 dev_priv->display.update_wm(dev);
3844} 4510}
3845 4511
3846static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 4512static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@ -3872,6 +4538,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3872 int ret; 4538 int ret;
3873 struct fdi_m_n m_n = {0}; 4539 struct fdi_m_n m_n = {0};
3874 u32 reg, temp; 4540 u32 reg, temp;
4541 u32 lvds_sync = 0;
3875 int target_clock; 4542 int target_clock;
3876 4543
3877 drm_vblank_pre_modeset(dev, pipe); 4544 drm_vblank_pre_modeset(dev, pipe);
@@ -4243,9 +4910,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4243 pipeconf &= ~PIPECONF_DOUBLE_WIDE; 4910 pipeconf &= ~PIPECONF_DOUBLE_WIDE;
4244 } 4911 }
4245 4912
4246 dspcntr |= DISPLAY_PLANE_ENABLE; 4913 if (!HAS_PCH_SPLIT(dev))
4247 pipeconf |= PIPECONF_ENABLE; 4914 dpll |= DPLL_VCO_ENABLE;
4248 dpll |= DPLL_VCO_ENABLE;
4249 4915
4250 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 4916 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4251 drm_mode_debug_printmodeline(mode); 4917 drm_mode_debug_printmodeline(mode);
@@ -4271,10 +4937,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4271 /* enable transcoder DPLL */ 4937 /* enable transcoder DPLL */
4272 if (HAS_PCH_CPT(dev)) { 4938 if (HAS_PCH_CPT(dev)) {
4273 temp = I915_READ(PCH_DPLL_SEL); 4939 temp = I915_READ(PCH_DPLL_SEL);
4274 if (pipe == 0) 4940 switch (pipe) {
4941 case 0:
4275 temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL; 4942 temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
4276 else 4943 break;
4944 case 1:
4277 temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL; 4945 temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
4946 break;
4947 case 2:
4948 /* FIXME: manage transcoder PLLs? */
4949 temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
4950 break;
4951 default:
4952 BUG();
4953 }
4278 I915_WRITE(PCH_DPLL_SEL, temp); 4954 I915_WRITE(PCH_DPLL_SEL, temp);
4279 4955
4280 POSTING_READ(PCH_DPLL_SEL); 4956 POSTING_READ(PCH_DPLL_SEL);
@@ -4324,6 +5000,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4324 else 5000 else
4325 temp &= ~LVDS_ENABLE_DITHER; 5001 temp &= ~LVDS_ENABLE_DITHER;
4326 } 5002 }
5003 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5004 lvds_sync |= LVDS_HSYNC_POLARITY;
5005 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5006 lvds_sync |= LVDS_VSYNC_POLARITY;
5007 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5008 != lvds_sync) {
5009 char flags[2] = "-+";
5010 DRM_INFO("Changing LVDS panel from "
5011 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5012 flags[!(temp & LVDS_HSYNC_POLARITY)],
5013 flags[!(temp & LVDS_VSYNC_POLARITY)],
5014 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5015 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5016 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5017 temp |= lvds_sync;
5018 }
4327 I915_WRITE(reg, temp); 5019 I915_WRITE(reg, temp);
4328 } 5020 }
4329 5021
@@ -4341,17 +5033,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4341 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5033 intel_dp_set_m_n(crtc, mode, adjusted_mode);
4342 } else if (HAS_PCH_SPLIT(dev)) { 5034 } else if (HAS_PCH_SPLIT(dev)) {
4343 /* For non-DP output, clear any trans DP clock recovery setting.*/ 5035 /* For non-DP output, clear any trans DP clock recovery setting.*/
4344 if (pipe == 0) { 5036 I915_WRITE(TRANSDATA_M1(pipe), 0);
4345 I915_WRITE(TRANSA_DATA_M1, 0); 5037 I915_WRITE(TRANSDATA_N1(pipe), 0);
4346 I915_WRITE(TRANSA_DATA_N1, 0); 5038 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
4347 I915_WRITE(TRANSA_DP_LINK_M1, 0); 5039 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
4348 I915_WRITE(TRANSA_DP_LINK_N1, 0);
4349 } else {
4350 I915_WRITE(TRANSB_DATA_M1, 0);
4351 I915_WRITE(TRANSB_DATA_N1, 0);
4352 I915_WRITE(TRANSB_DP_LINK_M1, 0);
4353 I915_WRITE(TRANSB_DP_LINK_N1, 0);
4354 }
4355 } 5040 }
4356 5041
4357 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5042 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
@@ -4454,6 +5139,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4454 5139
4455 I915_WRITE(PIPECONF(pipe), pipeconf); 5140 I915_WRITE(PIPECONF(pipe), pipeconf);
4456 POSTING_READ(PIPECONF(pipe)); 5141 POSTING_READ(PIPECONF(pipe));
5142 if (!HAS_PCH_SPLIT(dev))
5143 intel_enable_pipe(dev_priv, pipe, false);
4457 5144
4458 intel_wait_for_vblank(dev, pipe); 5145 intel_wait_for_vblank(dev, pipe);
4459 5146
@@ -4464,6 +5151,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4464 } 5151 }
4465 5152
4466 I915_WRITE(DSPCNTR(plane), dspcntr); 5153 I915_WRITE(DSPCNTR(plane), dspcntr);
5154 POSTING_READ(DSPCNTR(plane));
5155 if (!HAS_PCH_SPLIT(dev))
5156 intel_enable_plane(dev_priv, plane, pipe);
4467 5157
4468 ret = intel_pipe_set_base(crtc, x, y, old_fb); 5158 ret = intel_pipe_set_base(crtc, x, y, old_fb);
4469 5159
@@ -4480,7 +5170,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
4480 struct drm_device *dev = crtc->dev; 5170 struct drm_device *dev = crtc->dev;
4481 struct drm_i915_private *dev_priv = dev->dev_private; 5171 struct drm_i915_private *dev_priv = dev->dev_private;
4482 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5172 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4483 int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B; 5173 int palreg = PALETTE(intel_crtc->pipe);
4484 int i; 5174 int i;
4485 5175
4486 /* The clocks have to be on to load the palette. */ 5176 /* The clocks have to be on to load the palette. */
@@ -4489,8 +5179,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
4489 5179
4490 /* use legacy palette for Ironlake */ 5180 /* use legacy palette for Ironlake */
4491 if (HAS_PCH_SPLIT(dev)) 5181 if (HAS_PCH_SPLIT(dev))
4492 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : 5182 palreg = LGC_PALETTE(intel_crtc->pipe);
4493 LGC_PALETTE_B;
4494 5183
4495 for (i = 0; i < 256; i++) { 5184 for (i = 0; i < 256; i++) {
4496 I915_WRITE(palreg + 4 * i, 5185 I915_WRITE(palreg + 4 * i,
@@ -4511,12 +5200,12 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
4511 if (intel_crtc->cursor_visible == visible) 5200 if (intel_crtc->cursor_visible == visible)
4512 return; 5201 return;
4513 5202
4514 cntl = I915_READ(CURACNTR); 5203 cntl = I915_READ(_CURACNTR);
4515 if (visible) { 5204 if (visible) {
4516 /* On these chipsets we can only modify the base whilst 5205 /* On these chipsets we can only modify the base whilst
4517 * the cursor is disabled. 5206 * the cursor is disabled.
4518 */ 5207 */
4519 I915_WRITE(CURABASE, base); 5208 I915_WRITE(_CURABASE, base);
4520 5209
4521 cntl &= ~(CURSOR_FORMAT_MASK); 5210 cntl &= ~(CURSOR_FORMAT_MASK);
4522 /* XXX width must be 64, stride 256 => 0x00 << 28 */ 5211 /* XXX width must be 64, stride 256 => 0x00 << 28 */
@@ -4525,7 +5214,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
4525 CURSOR_FORMAT_ARGB; 5214 CURSOR_FORMAT_ARGB;
4526 } else 5215 } else
4527 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); 5216 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
4528 I915_WRITE(CURACNTR, cntl); 5217 I915_WRITE(_CURACNTR, cntl);
4529 5218
4530 intel_crtc->cursor_visible = visible; 5219 intel_crtc->cursor_visible = visible;
4531} 5220}
@@ -4539,7 +5228,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
4539 bool visible = base != 0; 5228 bool visible = base != 0;
4540 5229
4541 if (intel_crtc->cursor_visible != visible) { 5230 if (intel_crtc->cursor_visible != visible) {
4542 uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR); 5231 uint32_t cntl = I915_READ(CURCNTR(pipe));
4543 if (base) { 5232 if (base) {
4544 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); 5233 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
4545 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 5234 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
@@ -4548,12 +5237,12 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
4548 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 5237 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
4549 cntl |= CURSOR_MODE_DISABLE; 5238 cntl |= CURSOR_MODE_DISABLE;
4550 } 5239 }
4551 I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl); 5240 I915_WRITE(CURCNTR(pipe), cntl);
4552 5241
4553 intel_crtc->cursor_visible = visible; 5242 intel_crtc->cursor_visible = visible;
4554 } 5243 }
4555 /* and commit changes on next vblank */ 5244 /* and commit changes on next vblank */
4556 I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base); 5245 I915_WRITE(CURBASE(pipe), base);
4557} 5246}
4558 5247
4559/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 5248/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -4603,7 +5292,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
4603 if (!visible && !intel_crtc->cursor_visible) 5292 if (!visible && !intel_crtc->cursor_visible)
4604 return; 5293 return;
4605 5294
4606 I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos); 5295 I915_WRITE(CURPOS(pipe), pos);
4607 if (IS_845G(dev) || IS_I865G(dev)) 5296 if (IS_845G(dev) || IS_I865G(dev))
4608 i845_update_cursor(crtc, base); 5297 i845_update_cursor(crtc, base);
4609 else 5298 else
@@ -4643,7 +5332,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4643 } 5332 }
4644 5333
4645 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); 5334 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
4646 if (!obj) 5335 if (&obj->base == NULL)
4647 return -ENOENT; 5336 return -ENOENT;
4648 5337
4649 if (obj->base.size < width * height * 4) { 5338 if (obj->base.size < width * height * 4) {
@@ -4909,14 +5598,14 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
4909 struct drm_i915_private *dev_priv = dev->dev_private; 5598 struct drm_i915_private *dev_priv = dev->dev_private;
4910 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5599 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4911 int pipe = intel_crtc->pipe; 5600 int pipe = intel_crtc->pipe;
4912 u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B); 5601 u32 dpll = I915_READ(DPLL(pipe));
4913 u32 fp; 5602 u32 fp;
4914 intel_clock_t clock; 5603 intel_clock_t clock;
4915 5604
4916 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 5605 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4917 fp = I915_READ((pipe == 0) ? FPA0 : FPB0); 5606 fp = FP0(pipe);
4918 else 5607 else
4919 fp = I915_READ((pipe == 0) ? FPA1 : FPB1); 5608 fp = FP1(pipe);
4920 5609
4921 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 5610 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
4922 if (IS_PINEVIEW(dev)) { 5611 if (IS_PINEVIEW(dev)) {
@@ -4998,10 +5687,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
4998 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5687 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4999 int pipe = intel_crtc->pipe; 5688 int pipe = intel_crtc->pipe;
5000 struct drm_display_mode *mode; 5689 struct drm_display_mode *mode;
5001 int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B); 5690 int htot = I915_READ(HTOTAL(pipe));
5002 int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B); 5691 int hsync = I915_READ(HSYNC(pipe));
5003 int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B); 5692 int vtot = I915_READ(VTOTAL(pipe));
5004 int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B); 5693 int vsync = I915_READ(VSYNC(pipe));
5005 5694
5006 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 5695 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
5007 if (!mode) 5696 if (!mode)
@@ -5110,7 +5799,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
5110 drm_i915_private_t *dev_priv = dev->dev_private; 5799 drm_i915_private_t *dev_priv = dev->dev_private;
5111 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5800 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5112 int pipe = intel_crtc->pipe; 5801 int pipe = intel_crtc->pipe;
5113 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 5802 int dpll_reg = DPLL(pipe);
5114 int dpll = I915_READ(dpll_reg); 5803 int dpll = I915_READ(dpll_reg);
5115 5804
5116 if (HAS_PCH_SPLIT(dev)) 5805 if (HAS_PCH_SPLIT(dev))
@@ -5158,7 +5847,6 @@ static void intel_idle_update(struct work_struct *work)
5158 struct drm_device *dev = dev_priv->dev; 5847 struct drm_device *dev = dev_priv->dev;
5159 struct drm_crtc *crtc; 5848 struct drm_crtc *crtc;
5160 struct intel_crtc *intel_crtc; 5849 struct intel_crtc *intel_crtc;
5161 int enabled = 0;
5162 5850
5163 if (!i915_powersave) 5851 if (!i915_powersave)
5164 return; 5852 return;
@@ -5172,16 +5860,11 @@ static void intel_idle_update(struct work_struct *work)
5172 if (!crtc->fb) 5860 if (!crtc->fb)
5173 continue; 5861 continue;
5174 5862
5175 enabled++;
5176 intel_crtc = to_intel_crtc(crtc); 5863 intel_crtc = to_intel_crtc(crtc);
5177 if (!intel_crtc->busy) 5864 if (!intel_crtc->busy)
5178 intel_decrease_pllclock(crtc); 5865 intel_decrease_pllclock(crtc);
5179 } 5866 }
5180 5867
5181 if ((enabled == 1) && (IS_I945G(dev) || IS_I945GM(dev))) {
5182 DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
5183 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
5184 }
5185 5868
5186 mutex_unlock(&dev->struct_mutex); 5869 mutex_unlock(&dev->struct_mutex);
5187} 5870}
@@ -5206,17 +5889,9 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
5206 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 5889 if (!drm_core_check_feature(dev, DRIVER_MODESET))
5207 return; 5890 return;
5208 5891
5209 if (!dev_priv->busy) { 5892 if (!dev_priv->busy)
5210 if (IS_I945G(dev) || IS_I945GM(dev)) {
5211 u32 fw_blc_self;
5212
5213 DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
5214 fw_blc_self = I915_READ(FW_BLC_SELF);
5215 fw_blc_self &= ~FW_BLC_SELF_EN;
5216 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
5217 }
5218 dev_priv->busy = true; 5893 dev_priv->busy = true;
5219 } else 5894 else
5220 mod_timer(&dev_priv->idle_timer, jiffies + 5895 mod_timer(&dev_priv->idle_timer, jiffies +
5221 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 5896 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
5222 5897
@@ -5228,14 +5903,6 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
5228 intel_fb = to_intel_framebuffer(crtc->fb); 5903 intel_fb = to_intel_framebuffer(crtc->fb);
5229 if (intel_fb->obj == obj) { 5904 if (intel_fb->obj == obj) {
5230 if (!intel_crtc->busy) { 5905 if (!intel_crtc->busy) {
5231 if (IS_I945G(dev) || IS_I945GM(dev)) {
5232 u32 fw_blc_self;
5233
5234 DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
5235 fw_blc_self = I915_READ(FW_BLC_SELF);
5236 fw_blc_self &= ~FW_BLC_SELF_EN;
5237 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
5238 }
5239 /* Non-busy -> busy, upclock */ 5906 /* Non-busy -> busy, upclock */
5240 intel_increase_pllclock(crtc); 5907 intel_increase_pllclock(crtc);
5241 intel_crtc->busy = true; 5908 intel_crtc->busy = true;
@@ -5513,7 +6180,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5513 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 6180 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
5514 */ 6181 */
5515 pf = 0; 6182 pf = 0;
5516 pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; 6183 pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
5517 OUT_RING(pf | pipesrc); 6184 OUT_RING(pf | pipesrc);
5518 break; 6185 break;
5519 6186
@@ -5523,8 +6190,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5523 OUT_RING(fb->pitch | obj->tiling_mode); 6190 OUT_RING(fb->pitch | obj->tiling_mode);
5524 OUT_RING(obj->gtt_offset); 6191 OUT_RING(obj->gtt_offset);
5525 6192
5526 pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 6193 pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE;
5527 pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; 6194 pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
5528 OUT_RING(pf | pipesrc); 6195 OUT_RING(pf | pipesrc);
5529 break; 6196 break;
5530 } 6197 }
@@ -5558,9 +6225,7 @@ static void intel_crtc_reset(struct drm_crtc *crtc)
5558 /* Reset flags back to the 'unknown' status so that they 6225 /* Reset flags back to the 'unknown' status so that they
5559 * will be correctly set on the initial modeset. 6226 * will be correctly set on the initial modeset.
5560 */ 6227 */
5561 intel_crtc->cursor_addr = 0;
5562 intel_crtc->dpms_mode = -1; 6228 intel_crtc->dpms_mode = -1;
5563 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5564} 6229}
5565 6230
5566static struct drm_crtc_helper_funcs intel_helper_funcs = { 6231static struct drm_crtc_helper_funcs intel_helper_funcs = {
@@ -5615,22 +6280,8 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
5615 pipe = !pipe; 6280 pipe = !pipe;
5616 6281
5617 /* Disable the plane and wait for it to stop reading from the pipe. */ 6282 /* Disable the plane and wait for it to stop reading from the pipe. */
5618 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); 6283 intel_disable_plane(dev_priv, plane, pipe);
5619 intel_flush_display_plane(dev, plane); 6284 intel_disable_pipe(dev_priv, pipe);
5620
5621 if (IS_GEN2(dev))
5622 intel_wait_for_vblank(dev, pipe);
5623
5624 if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
5625 return;
5626
5627 /* Switch off the pipe. */
5628 reg = PIPECONF(pipe);
5629 val = I915_READ(reg);
5630 if (val & PIPECONF_ENABLE) {
5631 I915_WRITE(reg, val & ~PIPECONF_ENABLE);
5632 intel_wait_for_pipe_off(dev, pipe);
5633 }
5634} 6285}
5635 6286
5636static void intel_crtc_init(struct drm_device *dev, int pipe) 6287static void intel_crtc_init(struct drm_device *dev, int pipe)
@@ -5666,6 +6317,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5666 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 6317 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
5667 6318
5668 intel_crtc_reset(&intel_crtc->base); 6319 intel_crtc_reset(&intel_crtc->base);
6320 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5669 6321
5670 if (HAS_PCH_SPLIT(dev)) { 6322 if (HAS_PCH_SPLIT(dev)) {
5671 intel_helper_funcs.prepare = ironlake_crtc_prepare; 6323 intel_helper_funcs.prepare = ironlake_crtc_prepare;
@@ -5919,7 +6571,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
5919 int ret; 6571 int ret;
5920 6572
5921 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); 6573 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
5922 if (!obj) 6574 if (&obj->base == NULL)
5923 return ERR_PTR(-ENOENT); 6575 return ERR_PTR(-ENOENT);
5924 6576
5925 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 6577 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
@@ -6204,7 +6856,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6204 * userspace... 6856 * userspace...
6205 */ 6857 */
6206 I915_WRITE(GEN6_RC_STATE, 0); 6858 I915_WRITE(GEN6_RC_STATE, 0);
6207 __gen6_force_wake_get(dev_priv); 6859 __gen6_gt_force_wake_get(dev_priv);
6208 6860
6209 /* disable the counters and set deterministic thresholds */ 6861 /* disable the counters and set deterministic thresholds */
6210 I915_WRITE(GEN6_RC_CONTROL, 0); 6862 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -6241,18 +6893,18 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6241 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 6893 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
6242 18 << 24 | 6894 18 << 24 |
6243 6 << 16); 6895 6 << 16);
6244 I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000); 6896 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
6245 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000); 6897 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
6246 I915_WRITE(GEN6_RP_UP_EI, 100000); 6898 I915_WRITE(GEN6_RP_UP_EI, 100000);
6247 I915_WRITE(GEN6_RP_DOWN_EI, 300000); 6899 I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
6248 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 6900 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6249 I915_WRITE(GEN6_RP_CONTROL, 6901 I915_WRITE(GEN6_RP_CONTROL,
6250 GEN6_RP_MEDIA_TURBO | 6902 GEN6_RP_MEDIA_TURBO |
6251 GEN6_RP_USE_NORMAL_FREQ | 6903 GEN6_RP_USE_NORMAL_FREQ |
6252 GEN6_RP_MEDIA_IS_GFX | 6904 GEN6_RP_MEDIA_IS_GFX |
6253 GEN6_RP_ENABLE | 6905 GEN6_RP_ENABLE |
6254 GEN6_RP_UP_BUSY_MAX | 6906 GEN6_RP_UP_BUSY_AVG |
6255 GEN6_RP_DOWN_BUSY_MIN); 6907 GEN6_RP_DOWN_IDLE_CONT);
6256 6908
6257 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 6909 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6258 500)) 6910 500))
@@ -6302,12 +6954,13 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6302 /* enable all PM interrupts */ 6954 /* enable all PM interrupts */
6303 I915_WRITE(GEN6_PMINTRMSK, 0); 6955 I915_WRITE(GEN6_PMINTRMSK, 0);
6304 6956
6305 __gen6_force_wake_put(dev_priv); 6957 __gen6_gt_force_wake_put(dev_priv);
6306} 6958}
6307 6959
6308void intel_enable_clock_gating(struct drm_device *dev) 6960void intel_enable_clock_gating(struct drm_device *dev)
6309{ 6961{
6310 struct drm_i915_private *dev_priv = dev->dev_private; 6962 struct drm_i915_private *dev_priv = dev->dev_private;
6963 int pipe;
6311 6964
6312 /* 6965 /*
6313 * Disable clock gating reported to work incorrectly according to the 6966 * Disable clock gating reported to work incorrectly according to the
@@ -6417,12 +7070,10 @@ void intel_enable_clock_gating(struct drm_device *dev)
6417 ILK_DPARB_CLK_GATE | 7070 ILK_DPARB_CLK_GATE |
6418 ILK_DPFD_CLK_GATE); 7071 ILK_DPFD_CLK_GATE);
6419 7072
6420 I915_WRITE(DSPACNTR, 7073 for_each_pipe(pipe)
6421 I915_READ(DSPACNTR) | 7074 I915_WRITE(DSPCNTR(pipe),
6422 DISPPLANE_TRICKLE_FEED_DISABLE); 7075 I915_READ(DSPCNTR(pipe)) |
6423 I915_WRITE(DSPBCNTR, 7076 DISPPLANE_TRICKLE_FEED_DISABLE);
6424 I915_READ(DSPBCNTR) |
6425 DISPPLANE_TRICKLE_FEED_DISABLE);
6426 } 7077 }
6427 } else if (IS_G4X(dev)) { 7078 } else if (IS_G4X(dev)) {
6428 uint32_t dspclk_gate; 7079 uint32_t dspclk_gate;
@@ -6463,52 +7114,60 @@ void intel_enable_clock_gating(struct drm_device *dev)
6463 } 7114 }
6464} 7115}
6465 7116
6466void intel_disable_clock_gating(struct drm_device *dev) 7117static void ironlake_teardown_rc6(struct drm_device *dev)
6467{ 7118{
6468 struct drm_i915_private *dev_priv = dev->dev_private; 7119 struct drm_i915_private *dev_priv = dev->dev_private;
6469 7120
6470 if (dev_priv->renderctx) { 7121 if (dev_priv->renderctx) {
6471 struct drm_i915_gem_object *obj = dev_priv->renderctx; 7122 i915_gem_object_unpin(dev_priv->renderctx);
6472 7123 drm_gem_object_unreference(&dev_priv->renderctx->base);
6473 I915_WRITE(CCID, 0);
6474 POSTING_READ(CCID);
6475
6476 i915_gem_object_unpin(obj);
6477 drm_gem_object_unreference(&obj->base);
6478 dev_priv->renderctx = NULL; 7124 dev_priv->renderctx = NULL;
6479 } 7125 }
6480 7126
6481 if (dev_priv->pwrctx) { 7127 if (dev_priv->pwrctx) {
6482 struct drm_i915_gem_object *obj = dev_priv->pwrctx; 7128 i915_gem_object_unpin(dev_priv->pwrctx);
7129 drm_gem_object_unreference(&dev_priv->pwrctx->base);
7130 dev_priv->pwrctx = NULL;
7131 }
7132}
7133
7134static void ironlake_disable_rc6(struct drm_device *dev)
7135{
7136 struct drm_i915_private *dev_priv = dev->dev_private;
7137
7138 if (I915_READ(PWRCTXA)) {
7139 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
7140 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
7141 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
7142 50);
6483 7143
6484 I915_WRITE(PWRCTXA, 0); 7144 I915_WRITE(PWRCTXA, 0);
6485 POSTING_READ(PWRCTXA); 7145 POSTING_READ(PWRCTXA);
6486 7146
6487 i915_gem_object_unpin(obj); 7147 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
6488 drm_gem_object_unreference(&obj->base); 7148 POSTING_READ(RSTDBYCTL);
6489 dev_priv->pwrctx = NULL;
6490 } 7149 }
7150
7151 ironlake_teardown_rc6(dev);
6491} 7152}
6492 7153
6493static void ironlake_disable_rc6(struct drm_device *dev) 7154static int ironlake_setup_rc6(struct drm_device *dev)
6494{ 7155{
6495 struct drm_i915_private *dev_priv = dev->dev_private; 7156 struct drm_i915_private *dev_priv = dev->dev_private;
6496 7157
6497 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ 7158 if (dev_priv->renderctx == NULL)
6498 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); 7159 dev_priv->renderctx = intel_alloc_context_page(dev);
6499 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), 7160 if (!dev_priv->renderctx)
6500 10); 7161 return -ENOMEM;
6501 POSTING_READ(CCID); 7162
6502 I915_WRITE(PWRCTXA, 0); 7163 if (dev_priv->pwrctx == NULL)
6503 POSTING_READ(PWRCTXA); 7164 dev_priv->pwrctx = intel_alloc_context_page(dev);
6504 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 7165 if (!dev_priv->pwrctx) {
6505 POSTING_READ(RSTDBYCTL); 7166 ironlake_teardown_rc6(dev);
6506 i915_gem_object_unpin(dev_priv->renderctx); 7167 return -ENOMEM;
6507 drm_gem_object_unreference(&dev_priv->renderctx->base); 7168 }
6508 dev_priv->renderctx = NULL; 7169
6509 i915_gem_object_unpin(dev_priv->pwrctx); 7170 return 0;
6510 drm_gem_object_unreference(&dev_priv->pwrctx->base);
6511 dev_priv->pwrctx = NULL;
6512} 7171}
6513 7172
6514void ironlake_enable_rc6(struct drm_device *dev) 7173void ironlake_enable_rc6(struct drm_device *dev)
@@ -6516,15 +7175,26 @@ void ironlake_enable_rc6(struct drm_device *dev)
6516 struct drm_i915_private *dev_priv = dev->dev_private; 7175 struct drm_i915_private *dev_priv = dev->dev_private;
6517 int ret; 7176 int ret;
6518 7177
7178 /* rc6 disabled by default due to repeated reports of hanging during
7179 * boot and resume.
7180 */
7181 if (!i915_enable_rc6)
7182 return;
7183
7184 ret = ironlake_setup_rc6(dev);
7185 if (ret)
7186 return;
7187
6519 /* 7188 /*
6520 * GPU can automatically power down the render unit if given a page 7189 * GPU can automatically power down the render unit if given a page
6521 * to save state. 7190 * to save state.
6522 */ 7191 */
6523 ret = BEGIN_LP_RING(6); 7192 ret = BEGIN_LP_RING(6);
6524 if (ret) { 7193 if (ret) {
6525 ironlake_disable_rc6(dev); 7194 ironlake_teardown_rc6(dev);
6526 return; 7195 return;
6527 } 7196 }
7197
6528 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); 7198 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
6529 OUT_RING(MI_SET_CONTEXT); 7199 OUT_RING(MI_SET_CONTEXT);
6530 OUT_RING(dev_priv->renderctx->gtt_offset | 7200 OUT_RING(dev_priv->renderctx->gtt_offset |
@@ -6541,6 +7211,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
6541 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 7211 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
6542} 7212}
6543 7213
7214
6544/* Set up chip specific display functions */ 7215/* Set up chip specific display functions */
6545static void intel_init_display(struct drm_device *dev) 7216static void intel_init_display(struct drm_device *dev)
6546{ 7217{
@@ -6757,10 +7428,6 @@ void intel_modeset_init(struct drm_device *dev)
6757 } 7428 }
6758 dev->mode_config.fb_base = dev->agp->base; 7429 dev->mode_config.fb_base = dev->agp->base;
6759 7430
6760 if (IS_MOBILE(dev) || !IS_GEN2(dev))
6761 dev_priv->num_pipe = 2;
6762 else
6763 dev_priv->num_pipe = 1;
6764 DRM_DEBUG_KMS("%d display pipe%s available.\n", 7431 DRM_DEBUG_KMS("%d display pipe%s available.\n",
6765 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); 7432 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
6766 7433
@@ -6783,21 +7450,9 @@ void intel_modeset_init(struct drm_device *dev)
6783 if (IS_GEN6(dev)) 7450 if (IS_GEN6(dev))
6784 gen6_enable_rps(dev_priv); 7451 gen6_enable_rps(dev_priv);
6785 7452
6786 if (IS_IRONLAKE_M(dev)) { 7453 if (IS_IRONLAKE_M(dev))
6787 dev_priv->renderctx = intel_alloc_context_page(dev);
6788 if (!dev_priv->renderctx)
6789 goto skip_rc6;
6790 dev_priv->pwrctx = intel_alloc_context_page(dev);
6791 if (!dev_priv->pwrctx) {
6792 i915_gem_object_unpin(dev_priv->renderctx);
6793 drm_gem_object_unreference(&dev_priv->renderctx->base);
6794 dev_priv->renderctx = NULL;
6795 goto skip_rc6;
6796 }
6797 ironlake_enable_rc6(dev); 7454 ironlake_enable_rc6(dev);
6798 }
6799 7455
6800skip_rc6:
6801 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 7456 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
6802 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 7457 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
6803 (unsigned long)dev); 7458 (unsigned long)dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1f4242b682c8..d29e33f815d7 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -49,6 +49,7 @@ struct intel_dp {
49 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; 49 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
50 bool has_audio; 50 bool has_audio;
51 int force_audio; 51 int force_audio;
52 uint32_t color_range;
52 int dpms_mode; 53 int dpms_mode;
53 uint8_t link_bw; 54 uint8_t link_bw;
54 uint8_t lane_count; 55 uint8_t lane_count;
@@ -685,6 +686,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
685 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 686 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
686 int lane_count = 4, bpp = 24; 687 int lane_count = 4, bpp = 24;
687 struct intel_dp_m_n m_n; 688 struct intel_dp_m_n m_n;
689 int pipe = intel_crtc->pipe;
688 690
689 /* 691 /*
690 * Find the lane count in the intel_encoder private 692 * Find the lane count in the intel_encoder private
@@ -715,39 +717,19 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
715 mode->clock, adjusted_mode->clock, &m_n); 717 mode->clock, adjusted_mode->clock, &m_n);
716 718
717 if (HAS_PCH_SPLIT(dev)) { 719 if (HAS_PCH_SPLIT(dev)) {
718 if (intel_crtc->pipe == 0) { 720 I915_WRITE(TRANSDATA_M1(pipe),
719 I915_WRITE(TRANSA_DATA_M1, 721 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
720 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 722 m_n.gmch_m);
721 m_n.gmch_m); 723 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
722 I915_WRITE(TRANSA_DATA_N1, m_n.gmch_n); 724 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
723 I915_WRITE(TRANSA_DP_LINK_M1, m_n.link_m); 725 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
724 I915_WRITE(TRANSA_DP_LINK_N1, m_n.link_n);
725 } else {
726 I915_WRITE(TRANSB_DATA_M1,
727 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
728 m_n.gmch_m);
729 I915_WRITE(TRANSB_DATA_N1, m_n.gmch_n);
730 I915_WRITE(TRANSB_DP_LINK_M1, m_n.link_m);
731 I915_WRITE(TRANSB_DP_LINK_N1, m_n.link_n);
732 }
733 } else { 726 } else {
734 if (intel_crtc->pipe == 0) { 727 I915_WRITE(PIPE_GMCH_DATA_M(pipe),
735 I915_WRITE(PIPEA_GMCH_DATA_M, 728 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
736 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 729 m_n.gmch_m);
737 m_n.gmch_m); 730 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
738 I915_WRITE(PIPEA_GMCH_DATA_N, 731 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
739 m_n.gmch_n); 732 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
740 I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m);
741 I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n);
742 } else {
743 I915_WRITE(PIPEB_GMCH_DATA_M,
744 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
745 m_n.gmch_m);
746 I915_WRITE(PIPEB_GMCH_DATA_N,
747 m_n.gmch_n);
748 I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m);
749 I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n);
750 }
751 } 733 }
752} 734}
753 735
@@ -760,8 +742,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
760 struct drm_crtc *crtc = intel_dp->base.base.crtc; 742 struct drm_crtc *crtc = intel_dp->base.base.crtc;
761 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 743 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
762 744
763 intel_dp->DP = (DP_VOLTAGE_0_4 | 745 intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
764 DP_PRE_EMPHASIS_0); 746 intel_dp->DP |= intel_dp->color_range;
765 747
766 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 748 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
767 intel_dp->DP |= DP_SYNC_HS_HIGH; 749 intel_dp->DP |= DP_SYNC_HS_HIGH;
@@ -813,6 +795,40 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
813 } 795 }
814} 796}
815 797
798static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
799{
800 struct drm_device *dev = intel_dp->base.base.dev;
801 struct drm_i915_private *dev_priv = dev->dev_private;
802 u32 pp;
803
804 /*
805 * If the panel wasn't on, make sure there's not a currently
806 * active PP sequence before enabling AUX VDD.
807 */
808 if (!(I915_READ(PCH_PP_STATUS) & PP_ON))
809 msleep(dev_priv->panel_t3);
810
811 pp = I915_READ(PCH_PP_CONTROL);
812 pp |= EDP_FORCE_VDD;
813 I915_WRITE(PCH_PP_CONTROL, pp);
814 POSTING_READ(PCH_PP_CONTROL);
815}
816
817static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp)
818{
819 struct drm_device *dev = intel_dp->base.base.dev;
820 struct drm_i915_private *dev_priv = dev->dev_private;
821 u32 pp;
822
823 pp = I915_READ(PCH_PP_CONTROL);
824 pp &= ~EDP_FORCE_VDD;
825 I915_WRITE(PCH_PP_CONTROL, pp);
826 POSTING_READ(PCH_PP_CONTROL);
827
828 /* Make sure sequencer is idle before allowing subsequent activity */
829 msleep(dev_priv->panel_t12);
830}
831
816/* Returns true if the panel was already on when called */ 832/* Returns true if the panel was already on when called */
817static bool ironlake_edp_panel_on (struct intel_dp *intel_dp) 833static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
818{ 834{
@@ -834,11 +850,6 @@ static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
834 I915_WRITE(PCH_PP_CONTROL, pp); 850 I915_WRITE(PCH_PP_CONTROL, pp);
835 POSTING_READ(PCH_PP_CONTROL); 851 POSTING_READ(PCH_PP_CONTROL);
836 852
837 /* Ouch. We need to wait here for some panels, like Dell e6510
838 * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
839 */
840 msleep(300);
841
842 if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, 853 if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
843 5000)) 854 5000))
844 DRM_ERROR("panel on wait timed out: 0x%08x\n", 855 DRM_ERROR("panel on wait timed out: 0x%08x\n",
@@ -875,11 +886,6 @@ static void ironlake_edp_panel_off (struct drm_device *dev)
875 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 886 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
876 I915_WRITE(PCH_PP_CONTROL, pp); 887 I915_WRITE(PCH_PP_CONTROL, pp);
877 POSTING_READ(PCH_PP_CONTROL); 888 POSTING_READ(PCH_PP_CONTROL);
878
879 /* Ouch. We need to wait here for some panels, like Dell e6510
880 * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
881 */
882 msleep(300);
883} 889}
884 890
885static void ironlake_edp_backlight_on (struct drm_device *dev) 891static void ironlake_edp_backlight_on (struct drm_device *dev)
@@ -945,7 +951,7 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
945 951
946 if (is_edp(intel_dp)) { 952 if (is_edp(intel_dp)) {
947 ironlake_edp_backlight_off(dev); 953 ironlake_edp_backlight_off(dev);
948 ironlake_edp_panel_on(intel_dp); 954 ironlake_edp_panel_off(dev);
949 if (!is_pch_edp(intel_dp)) 955 if (!is_pch_edp(intel_dp))
950 ironlake_edp_pll_on(encoder); 956 ironlake_edp_pll_on(encoder);
951 else 957 else
@@ -959,10 +965,15 @@ static void intel_dp_commit(struct drm_encoder *encoder)
959 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 965 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
960 struct drm_device *dev = encoder->dev; 966 struct drm_device *dev = encoder->dev;
961 967
968 if (is_edp(intel_dp))
969 ironlake_edp_panel_vdd_on(intel_dp);
970
962 intel_dp_start_link_train(intel_dp); 971 intel_dp_start_link_train(intel_dp);
963 972
964 if (is_edp(intel_dp)) 973 if (is_edp(intel_dp)) {
965 ironlake_edp_panel_on(intel_dp); 974 ironlake_edp_panel_on(intel_dp);
975 ironlake_edp_panel_vdd_off(intel_dp);
976 }
966 977
967 intel_dp_complete_link_train(intel_dp); 978 intel_dp_complete_link_train(intel_dp);
968 979
@@ -988,9 +999,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
988 ironlake_edp_pll_off(encoder); 999 ironlake_edp_pll_off(encoder);
989 } else { 1000 } else {
990 if (is_edp(intel_dp)) 1001 if (is_edp(intel_dp))
991 ironlake_edp_panel_on(intel_dp); 1002 ironlake_edp_panel_vdd_on(intel_dp);
992 if (!(dp_reg & DP_PORT_EN)) { 1003 if (!(dp_reg & DP_PORT_EN)) {
993 intel_dp_start_link_train(intel_dp); 1004 intel_dp_start_link_train(intel_dp);
1005 if (is_edp(intel_dp)) {
1006 ironlake_edp_panel_on(intel_dp);
1007 ironlake_edp_panel_vdd_off(intel_dp);
1008 }
994 intel_dp_complete_link_train(intel_dp); 1009 intel_dp_complete_link_train(intel_dp);
995 } 1010 }
996 if (is_edp(intel_dp)) 1011 if (is_edp(intel_dp))
@@ -1508,9 +1523,13 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
1508{ 1523{
1509 enum drm_connector_status status; 1524 enum drm_connector_status status;
1510 1525
1511 /* Can't disconnect eDP */ 1526 /* Can't disconnect eDP, but you can close the lid... */
1512 if (is_edp(intel_dp)) 1527 if (is_edp(intel_dp)) {
1513 return connector_status_connected; 1528 status = intel_panel_detect(intel_dp->base.base.dev);
1529 if (status == connector_status_unknown)
1530 status = connector_status_connected;
1531 return status;
1532 }
1514 1533
1515 status = connector_status_disconnected; 1534 status = connector_status_disconnected;
1516 if (intel_dp_aux_native_read(intel_dp, 1535 if (intel_dp_aux_native_read(intel_dp,
@@ -1639,11 +1658,30 @@ static int intel_dp_get_modes(struct drm_connector *connector)
1639 return 0; 1658 return 0;
1640} 1659}
1641 1660
1661static bool
1662intel_dp_detect_audio(struct drm_connector *connector)
1663{
1664 struct intel_dp *intel_dp = intel_attached_dp(connector);
1665 struct edid *edid;
1666 bool has_audio = false;
1667
1668 edid = drm_get_edid(connector, &intel_dp->adapter);
1669 if (edid) {
1670 has_audio = drm_detect_monitor_audio(edid);
1671
1672 connector->display_info.raw_edid = NULL;
1673 kfree(edid);
1674 }
1675
1676 return has_audio;
1677}
1678
1642static int 1679static int
1643intel_dp_set_property(struct drm_connector *connector, 1680intel_dp_set_property(struct drm_connector *connector,
1644 struct drm_property *property, 1681 struct drm_property *property,
1645 uint64_t val) 1682 uint64_t val)
1646{ 1683{
1684 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1647 struct intel_dp *intel_dp = intel_attached_dp(connector); 1685 struct intel_dp *intel_dp = intel_attached_dp(connector);
1648 int ret; 1686 int ret;
1649 1687
@@ -1652,17 +1690,31 @@ intel_dp_set_property(struct drm_connector *connector,
1652 return ret; 1690 return ret;
1653 1691
1654 if (property == intel_dp->force_audio_property) { 1692 if (property == intel_dp->force_audio_property) {
1655 if (val == intel_dp->force_audio) 1693 int i = val;
1694 bool has_audio;
1695
1696 if (i == intel_dp->force_audio)
1656 return 0; 1697 return 0;
1657 1698
1658 intel_dp->force_audio = val; 1699 intel_dp->force_audio = i;
1659 1700
1660 if (val > 0 && intel_dp->has_audio) 1701 if (i == 0)
1702 has_audio = intel_dp_detect_audio(connector);
1703 else
1704 has_audio = i > 0;
1705
1706 if (has_audio == intel_dp->has_audio)
1661 return 0; 1707 return 0;
1662 if (val < 0 && !intel_dp->has_audio) 1708
1709 intel_dp->has_audio = has_audio;
1710 goto done;
1711 }
1712
1713 if (property == dev_priv->broadcast_rgb_property) {
1714 if (val == !!intel_dp->color_range)
1663 return 0; 1715 return 0;
1664 1716
1665 intel_dp->has_audio = val > 0; 1717 intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
1666 goto done; 1718 goto done;
1667 } 1719 }
1668 1720
@@ -1785,6 +1837,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
1785 intel_dp->force_audio_property->values[1] = 1; 1837 intel_dp->force_audio_property->values[1] = 1;
1786 drm_connector_attach_property(connector, intel_dp->force_audio_property, 0); 1838 drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
1787 } 1839 }
1840
1841 intel_attach_broadcast_rgb_property(connector);
1788} 1842}
1789 1843
1790void 1844void
@@ -1802,6 +1856,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1802 if (!intel_dp) 1856 if (!intel_dp)
1803 return; 1857 return;
1804 1858
1859 intel_dp->output_reg = output_reg;
1860 intel_dp->dpms_mode = -1;
1861
1805 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 1862 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1806 if (!intel_connector) { 1863 if (!intel_connector) {
1807 kfree(intel_dp); 1864 kfree(intel_dp);
@@ -1841,10 +1898,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1841 connector->interlace_allowed = true; 1898 connector->interlace_allowed = true;
1842 connector->doublescan_allowed = 0; 1899 connector->doublescan_allowed = 0;
1843 1900
1844 intel_dp->output_reg = output_reg;
1845 intel_dp->has_audio = false;
1846 intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
1847
1848 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 1901 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
1849 DRM_MODE_ENCODER_TMDS); 1902 DRM_MODE_ENCODER_TMDS);
1850 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); 1903 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
@@ -1882,21 +1935,33 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1882 /* Cache some DPCD data in the eDP case */ 1935 /* Cache some DPCD data in the eDP case */
1883 if (is_edp(intel_dp)) { 1936 if (is_edp(intel_dp)) {
1884 int ret; 1937 int ret;
1885 bool was_on; 1938 u32 pp_on, pp_div;
1939
1940 pp_on = I915_READ(PCH_PP_ON_DELAYS);
1941 pp_div = I915_READ(PCH_PP_DIVISOR);
1886 1942
1887 was_on = ironlake_edp_panel_on(intel_dp); 1943 /* Get T3 & T12 values (note: VESA not bspec terminology) */
1944 dev_priv->panel_t3 = (pp_on & 0x1fff0000) >> 16;
1945 dev_priv->panel_t3 /= 10; /* t3 in 100us units */
1946 dev_priv->panel_t12 = pp_div & 0xf;
1947 dev_priv->panel_t12 *= 100; /* t12 in 100ms units */
1948
1949 ironlake_edp_panel_vdd_on(intel_dp);
1888 ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV, 1950 ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV,
1889 intel_dp->dpcd, 1951 intel_dp->dpcd,
1890 sizeof(intel_dp->dpcd)); 1952 sizeof(intel_dp->dpcd));
1953 ironlake_edp_panel_vdd_off(intel_dp);
1891 if (ret == sizeof(intel_dp->dpcd)) { 1954 if (ret == sizeof(intel_dp->dpcd)) {
1892 if (intel_dp->dpcd[0] >= 0x11) 1955 if (intel_dp->dpcd[0] >= 0x11)
1893 dev_priv->no_aux_handshake = intel_dp->dpcd[3] & 1956 dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
1894 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 1957 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
1895 } else { 1958 } else {
1959 /* if this fails, presume the device is a ghost */
1896 DRM_ERROR("failed to retrieve link info\n"); 1960 DRM_ERROR("failed to retrieve link info\n");
1961 intel_dp_destroy(&intel_connector->base);
1962 intel_dp_encoder_destroy(&intel_dp->base.base);
1963 return;
1897 } 1964 }
1898 if (!was_on)
1899 ironlake_edp_panel_off(dev);
1900 } 1965 }
1901 1966
1902 intel_encoder->hot_plug = intel_dp_hot_plug; 1967 intel_encoder->hot_plug = intel_dp_hot_plug;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 74db2557d644..5daa991cb287 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -217,6 +217,13 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
217 return dev_priv->pipe_to_crtc_mapping[pipe]; 217 return dev_priv->pipe_to_crtc_mapping[pipe];
218} 218}
219 219
220static inline struct drm_crtc *
221intel_get_crtc_for_plane(struct drm_device *dev, int plane)
222{
223 struct drm_i915_private *dev_priv = dev->dev_private;
224 return dev_priv->plane_to_crtc_mapping[plane];
225}
226
220struct intel_unpin_work { 227struct intel_unpin_work {
221 struct work_struct work; 228 struct work_struct work;
222 struct drm_device *dev; 229 struct drm_device *dev;
@@ -230,6 +237,8 @@ struct intel_unpin_work {
230int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); 237int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
231extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); 238extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
232 239
240extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
241
233extern void intel_crt_init(struct drm_device *dev); 242extern void intel_crt_init(struct drm_device *dev);
234extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); 243extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
235void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); 244void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
@@ -260,6 +269,7 @@ extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
260extern void intel_panel_setup_backlight(struct drm_device *dev); 269extern void intel_panel_setup_backlight(struct drm_device *dev);
261extern void intel_panel_enable_backlight(struct drm_device *dev); 270extern void intel_panel_enable_backlight(struct drm_device *dev);
262extern void intel_panel_disable_backlight(struct drm_device *dev); 271extern void intel_panel_disable_backlight(struct drm_device *dev);
272extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
263 273
264extern void intel_crtc_load_lut(struct drm_crtc *crtc); 274extern void intel_crtc_load_lut(struct drm_crtc *crtc);
265extern void intel_encoder_prepare (struct drm_encoder *encoder); 275extern void intel_encoder_prepare (struct drm_encoder *encoder);
@@ -298,7 +308,6 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
298extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 308extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
299 u16 *blue, int regno); 309 u16 *blue, int regno);
300extern void intel_enable_clock_gating(struct drm_device *dev); 310extern void intel_enable_clock_gating(struct drm_device *dev);
301extern void intel_disable_clock_gating(struct drm_device *dev);
302extern void ironlake_enable_drps(struct drm_device *dev); 311extern void ironlake_enable_drps(struct drm_device *dev);
303extern void ironlake_disable_drps(struct drm_device *dev); 312extern void ironlake_disable_drps(struct drm_device *dev);
304extern void gen6_enable_rps(struct drm_i915_private *dev_priv); 313extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
@@ -322,8 +331,7 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
322 331
323extern void intel_setup_overlay(struct drm_device *dev); 332extern void intel_setup_overlay(struct drm_device *dev);
324extern void intel_cleanup_overlay(struct drm_device *dev); 333extern void intel_cleanup_overlay(struct drm_device *dev);
325extern int intel_overlay_switch_off(struct intel_overlay *overlay, 334extern int intel_overlay_switch_off(struct intel_overlay *overlay);
326 bool interruptible);
327extern int intel_overlay_put_image(struct drm_device *dev, void *data, 335extern int intel_overlay_put_image(struct drm_device *dev, void *data,
328 struct drm_file *file_priv); 336 struct drm_file *file_priv);
329extern int intel_overlay_attrs(struct drm_device *dev, void *data, 337extern int intel_overlay_attrs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ea373283c93b..6eda1b51c636 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -178,7 +178,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
178 int pipe = intel_crtc->pipe; 178 int pipe = intel_crtc->pipe;
179 u32 dvo_val; 179 u32 dvo_val;
180 u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; 180 u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
181 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 181 int dpll_reg = DPLL(pipe);
182 182
183 switch (dvo_reg) { 183 switch (dvo_reg) {
184 case DVOA: 184 case DVOA:
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 0d0273e7b029..f289b8642976 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -41,6 +41,7 @@ struct intel_hdmi {
41 struct intel_encoder base; 41 struct intel_encoder base;
42 u32 sdvox_reg; 42 u32 sdvox_reg;
43 int ddc_bus; 43 int ddc_bus;
44 uint32_t color_range;
44 bool has_hdmi_sink; 45 bool has_hdmi_sink;
45 bool has_audio; 46 bool has_audio;
46 int force_audio; 47 int force_audio;
@@ -124,6 +125,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
124 u32 sdvox; 125 u32 sdvox;
125 126
126 sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; 127 sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
128 sdvox |= intel_hdmi->color_range;
127 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 129 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
128 sdvox |= SDVO_VSYNC_ACTIVE_HIGH; 130 sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
129 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 131 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -251,12 +253,34 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
251 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); 253 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
252} 254}
253 255
256static bool
257intel_hdmi_detect_audio(struct drm_connector *connector)
258{
259 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
260 struct drm_i915_private *dev_priv = connector->dev->dev_private;
261 struct edid *edid;
262 bool has_audio = false;
263
264 edid = drm_get_edid(connector,
265 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
266 if (edid) {
267 if (edid->input & DRM_EDID_INPUT_DIGITAL)
268 has_audio = drm_detect_monitor_audio(edid);
269
270 connector->display_info.raw_edid = NULL;
271 kfree(edid);
272 }
273
274 return has_audio;
275}
276
254static int 277static int
255intel_hdmi_set_property(struct drm_connector *connector, 278intel_hdmi_set_property(struct drm_connector *connector,
256 struct drm_property *property, 279 struct drm_property *property,
257 uint64_t val) 280 uint64_t val)
258{ 281{
259 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 282 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
283 struct drm_i915_private *dev_priv = connector->dev->dev_private;
260 int ret; 284 int ret;
261 285
262 ret = drm_connector_property_set_value(connector, property, val); 286 ret = drm_connector_property_set_value(connector, property, val);
@@ -264,17 +288,31 @@ intel_hdmi_set_property(struct drm_connector *connector,
264 return ret; 288 return ret;
265 289
266 if (property == intel_hdmi->force_audio_property) { 290 if (property == intel_hdmi->force_audio_property) {
267 if (val == intel_hdmi->force_audio) 291 int i = val;
292 bool has_audio;
293
294 if (i == intel_hdmi->force_audio)
268 return 0; 295 return 0;
269 296
270 intel_hdmi->force_audio = val; 297 intel_hdmi->force_audio = i;
298
299 if (i == 0)
300 has_audio = intel_hdmi_detect_audio(connector);
301 else
302 has_audio = i > 0;
271 303
272 if (val > 0 && intel_hdmi->has_audio) 304 if (has_audio == intel_hdmi->has_audio)
273 return 0; 305 return 0;
274 if (val < 0 && !intel_hdmi->has_audio) 306
307 intel_hdmi->has_audio = has_audio;
308 goto done;
309 }
310
311 if (property == dev_priv->broadcast_rgb_property) {
312 if (val == !!intel_hdmi->color_range)
275 return 0; 313 return 0;
276 314
277 intel_hdmi->has_audio = val > 0; 315 intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
278 goto done; 316 goto done;
279 } 317 }
280 318
@@ -336,6 +374,8 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
336 intel_hdmi->force_audio_property->values[1] = 1; 374 intel_hdmi->force_audio_property->values[1] = 1;
337 drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0); 375 drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
338 } 376 }
377
378 intel_attach_broadcast_rgb_property(connector);
339} 379}
340 380
341void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) 381void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 58040f68ed7a..82d04c5899d2 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -384,7 +384,8 @@ int intel_setup_gmbus(struct drm_device *dev)
384 bus->reg0 = i | GMBUS_RATE_100KHZ; 384 bus->reg0 = i | GMBUS_RATE_100KHZ;
385 385
386 /* XXX force bit banging until GMBUS is fully debugged */ 386 /* XXX force bit banging until GMBUS is fully debugged */
387 bus->force_bit = intel_gpio_create(dev_priv, i); 387 if (IS_GEN2(dev))
388 bus->force_bit = intel_gpio_create(dev_priv, i);
388 } 389 }
389 390
390 intel_i2c_reset(dev_priv->dev); 391 intel_i2c_reset(dev_priv->dev);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index ace8d5d30dd2..1a311ad01116 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -231,6 +231,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
231 struct intel_lvds *intel_lvds = to_intel_lvds(encoder); 231 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
232 struct drm_encoder *tmp_encoder; 232 struct drm_encoder *tmp_encoder;
233 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; 233 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
234 int pipe;
234 235
235 /* Should never happen!! */ 236 /* Should never happen!! */
236 if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) { 237 if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
@@ -261,12 +262,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
261 return true; 262 return true;
262 } 263 }
263 264
264 /* Make sure pre-965s set dither correctly */
265 if (INTEL_INFO(dev)->gen < 4) {
266 if (dev_priv->lvds_dither)
267 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
268 }
269
270 /* Native modes don't need fitting */ 265 /* Native modes don't need fitting */
271 if (adjusted_mode->hdisplay == mode->hdisplay && 266 if (adjusted_mode->hdisplay == mode->hdisplay &&
272 adjusted_mode->vdisplay == mode->vdisplay) 267 adjusted_mode->vdisplay == mode->vdisplay)
@@ -283,8 +278,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
283 * to register description and PRM. 278 * to register description and PRM.
284 * Change the value here to see the borders for debugging 279 * Change the value here to see the borders for debugging
285 */ 280 */
286 I915_WRITE(BCLRPAT_A, 0); 281 for_each_pipe(pipe)
287 I915_WRITE(BCLRPAT_B, 0); 282 I915_WRITE(BCLRPAT(pipe), 0);
288 283
289 switch (intel_lvds->fitting_mode) { 284 switch (intel_lvds->fitting_mode) {
290 case DRM_MODE_SCALE_CENTER: 285 case DRM_MODE_SCALE_CENTER:
@@ -374,10 +369,16 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
374 } 369 }
375 370
376out: 371out:
372 /* If not enabling scaling, be consistent and always use 0. */
377 if ((pfit_control & PFIT_ENABLE) == 0) { 373 if ((pfit_control & PFIT_ENABLE) == 0) {
378 pfit_control = 0; 374 pfit_control = 0;
379 pfit_pgm_ratios = 0; 375 pfit_pgm_ratios = 0;
380 } 376 }
377
378 /* Make sure pre-965 set dither correctly */
379 if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
380 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
381
381 if (pfit_control != intel_lvds->pfit_control || 382 if (pfit_control != intel_lvds->pfit_control ||
382 pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { 383 pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
383 intel_lvds->pfit_control = pfit_control; 384 intel_lvds->pfit_control = pfit_control;
@@ -474,6 +475,10 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
474 struct drm_device *dev = connector->dev; 475 struct drm_device *dev = connector->dev;
475 enum drm_connector_status status = connector_status_connected; 476 enum drm_connector_status status = connector_status_connected;
476 477
478 status = intel_panel_detect(dev);
479 if (status != connector_status_unknown)
480 return status;
481
477 /* ACPI lid methods were generally unreliable in this generation, so 482 /* ACPI lid methods were generally unreliable in this generation, so
478 * don't even bother. 483 * don't even bother.
479 */ 484 */
@@ -496,7 +501,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
496 return drm_add_edid_modes(connector, intel_lvds->edid); 501 return drm_add_edid_modes(connector, intel_lvds->edid);
497 502
498 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); 503 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
499 if (mode == 0) 504 if (mode == NULL)
500 return 0; 505 return 0;
501 506
502 drm_mode_probed_add(connector, mode); 507 drm_mode_probed_add(connector, mode);
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index f70b7cf32bff..9034dd8f33c7 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -80,3 +80,33 @@ int intel_ddc_get_modes(struct drm_connector *connector,
80 80
81 return ret; 81 return ret;
82} 82}
83
84static const char *broadcast_rgb_names[] = {
85 "Full",
86 "Limited 16:235",
87};
88
89void
90intel_attach_broadcast_rgb_property(struct drm_connector *connector)
91{
92 struct drm_device *dev = connector->dev;
93 struct drm_i915_private *dev_priv = dev->dev_private;
94 struct drm_property *prop;
95 int i;
96
97 prop = dev_priv->broadcast_rgb_property;
98 if (prop == NULL) {
99 prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
100 "Broadcast RGB",
101 ARRAY_SIZE(broadcast_rgb_names));
102 if (prop == NULL)
103 return;
104
105 for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
106 drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
107
108 dev_priv->broadcast_rgb_property = prop;
109 }
110
111 drm_connector_attach_property(connector, prop, 0);
112}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 64fd64443ca6..d2c710422908 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -39,6 +39,8 @@
39 39
40#define OPREGION_HEADER_OFFSET 0 40#define OPREGION_HEADER_OFFSET 0
41#define OPREGION_ACPI_OFFSET 0x100 41#define OPREGION_ACPI_OFFSET 0x100
42#define ACPI_CLID 0x01ac /* current lid state indicator */
43#define ACPI_CDCK 0x01b0 /* current docking state indicator */
42#define OPREGION_SWSCI_OFFSET 0x200 44#define OPREGION_SWSCI_OFFSET 0x200
43#define OPREGION_ASLE_OFFSET 0x300 45#define OPREGION_ASLE_OFFSET 0x300
44#define OPREGION_VBT_OFFSET 0x400 46#define OPREGION_VBT_OFFSET 0x400
@@ -489,6 +491,8 @@ int intel_opregion_setup(struct drm_device *dev)
489 opregion->header = base; 491 opregion->header = base;
490 opregion->vbt = base + OPREGION_VBT_OFFSET; 492 opregion->vbt = base + OPREGION_VBT_OFFSET;
491 493
494 opregion->lid_state = base + ACPI_CLID;
495
492 mboxes = opregion->header->mboxes; 496 mboxes = opregion->header->mboxes;
493 if (mboxes & MBOX_ACPI) { 497 if (mboxes & MBOX_ACPI) {
494 DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); 498 DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 3fbb98b948d6..a670c006982e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -213,7 +213,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
213 213
214static int intel_overlay_do_wait_request(struct intel_overlay *overlay, 214static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
215 struct drm_i915_gem_request *request, 215 struct drm_i915_gem_request *request,
216 bool interruptible,
217 void (*tail)(struct intel_overlay *)) 216 void (*tail)(struct intel_overlay *))
218{ 217{
219 struct drm_device *dev = overlay->dev; 218 struct drm_device *dev = overlay->dev;
@@ -221,16 +220,14 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
221 int ret; 220 int ret;
222 221
223 BUG_ON(overlay->last_flip_req); 222 BUG_ON(overlay->last_flip_req);
224 ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv)); 223 ret = i915_add_request(LP_RING(dev_priv), NULL, request);
225 if (ret) { 224 if (ret) {
226 kfree(request); 225 kfree(request);
227 return ret; 226 return ret;
228 } 227 }
229 overlay->last_flip_req = request->seqno; 228 overlay->last_flip_req = request->seqno;
230 overlay->flip_tail = tail; 229 overlay->flip_tail = tail;
231 ret = i915_do_wait_request(dev, 230 ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
232 overlay->last_flip_req, true,
233 LP_RING(dev_priv));
234 if (ret) 231 if (ret)
235 return ret; 232 return ret;
236 233
@@ -256,7 +253,7 @@ i830_activate_pipe_a(struct drm_device *dev)
256 return 0; 253 return 0;
257 254
258 /* most i8xx have pipe a forced on, so don't trust dpms mode */ 255 /* most i8xx have pipe a forced on, so don't trust dpms mode */
259 if (I915_READ(PIPEACONF) & PIPECONF_ENABLE) 256 if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE)
260 return 0; 257 return 0;
261 258
262 crtc_funcs = crtc->base.helper_private; 259 crtc_funcs = crtc->base.helper_private;
@@ -322,7 +319,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
322 OUT_RING(MI_NOOP); 319 OUT_RING(MI_NOOP);
323 ADVANCE_LP_RING(); 320 ADVANCE_LP_RING();
324 321
325 ret = intel_overlay_do_wait_request(overlay, request, true, NULL); 322 ret = intel_overlay_do_wait_request(overlay, request, NULL);
326out: 323out:
327 if (pipe_a_quirk) 324 if (pipe_a_quirk)
328 i830_deactivate_pipe_a(dev); 325 i830_deactivate_pipe_a(dev);
@@ -364,7 +361,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
364 OUT_RING(flip_addr); 361 OUT_RING(flip_addr);
365 ADVANCE_LP_RING(); 362 ADVANCE_LP_RING();
366 363
367 ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv)); 364 ret = i915_add_request(LP_RING(dev_priv), NULL, request);
368 if (ret) { 365 if (ret) {
369 kfree(request); 366 kfree(request);
370 return ret; 367 return ret;
@@ -401,8 +398,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
401} 398}
402 399
403/* overlay needs to be disabled in OCMD reg */ 400/* overlay needs to be disabled in OCMD reg */
404static int intel_overlay_off(struct intel_overlay *overlay, 401static int intel_overlay_off(struct intel_overlay *overlay)
405 bool interruptible)
406{ 402{
407 struct drm_device *dev = overlay->dev; 403 struct drm_device *dev = overlay->dev;
408 struct drm_i915_private *dev_priv = dev->dev_private; 404 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -437,14 +433,13 @@ static int intel_overlay_off(struct intel_overlay *overlay,
437 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 433 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
438 ADVANCE_LP_RING(); 434 ADVANCE_LP_RING();
439 435
440 return intel_overlay_do_wait_request(overlay, request, interruptible, 436 return intel_overlay_do_wait_request(overlay, request,
441 intel_overlay_off_tail); 437 intel_overlay_off_tail);
442} 438}
443 439
444/* recover from an interruption due to a signal 440/* recover from an interruption due to a signal
445 * We have to be careful not to repeat work forever an make forward progess. */ 441 * We have to be careful not to repeat work forever an make forward progess. */
446static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, 442static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
447 bool interruptible)
448{ 443{
449 struct drm_device *dev = overlay->dev; 444 struct drm_device *dev = overlay->dev;
450 drm_i915_private_t *dev_priv = dev->dev_private; 445 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -453,8 +448,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
453 if (overlay->last_flip_req == 0) 448 if (overlay->last_flip_req == 0)
454 return 0; 449 return 0;
455 450
456 ret = i915_do_wait_request(dev, overlay->last_flip_req, 451 ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
457 interruptible, LP_RING(dev_priv));
458 if (ret) 452 if (ret)
459 return ret; 453 return ret;
460 454
@@ -499,7 +493,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
499 OUT_RING(MI_NOOP); 493 OUT_RING(MI_NOOP);
500 ADVANCE_LP_RING(); 494 ADVANCE_LP_RING();
501 495
502 ret = intel_overlay_do_wait_request(overlay, request, true, 496 ret = intel_overlay_do_wait_request(overlay, request,
503 intel_overlay_release_old_vid_tail); 497 intel_overlay_release_old_vid_tail);
504 if (ret) 498 if (ret)
505 return ret; 499 return ret;
@@ -868,8 +862,7 @@ out_unpin:
868 return ret; 862 return ret;
869} 863}
870 864
871int intel_overlay_switch_off(struct intel_overlay *overlay, 865int intel_overlay_switch_off(struct intel_overlay *overlay)
872 bool interruptible)
873{ 866{
874 struct overlay_registers *regs; 867 struct overlay_registers *regs;
875 struct drm_device *dev = overlay->dev; 868 struct drm_device *dev = overlay->dev;
@@ -878,7 +871,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay,
878 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 871 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
879 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); 872 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
880 873
881 ret = intel_overlay_recover_from_interrupt(overlay, interruptible); 874 ret = intel_overlay_recover_from_interrupt(overlay);
882 if (ret != 0) 875 if (ret != 0)
883 return ret; 876 return ret;
884 877
@@ -893,7 +886,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay,
893 regs->OCMD = 0; 886 regs->OCMD = 0;
894 intel_overlay_unmap_regs(overlay, regs); 887 intel_overlay_unmap_regs(overlay, regs);
895 888
896 ret = intel_overlay_off(overlay, interruptible); 889 ret = intel_overlay_off(overlay);
897 if (ret != 0) 890 if (ret != 0)
898 return ret; 891 return ret;
899 892
@@ -1135,7 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1135 mutex_lock(&dev->mode_config.mutex); 1128 mutex_lock(&dev->mode_config.mutex);
1136 mutex_lock(&dev->struct_mutex); 1129 mutex_lock(&dev->struct_mutex);
1137 1130
1138 ret = intel_overlay_switch_off(overlay, true); 1131 ret = intel_overlay_switch_off(overlay);
1139 1132
1140 mutex_unlock(&dev->struct_mutex); 1133 mutex_unlock(&dev->struct_mutex);
1141 mutex_unlock(&dev->mode_config.mutex); 1134 mutex_unlock(&dev->mode_config.mutex);
@@ -1157,7 +1150,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1157 1150
1158 new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv, 1151 new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
1159 put_image_rec->bo_handle)); 1152 put_image_rec->bo_handle));
1160 if (!new_bo) { 1153 if (&new_bo->base == NULL) {
1161 ret = -ENOENT; 1154 ret = -ENOENT;
1162 goto out_free; 1155 goto out_free;
1163 } 1156 }
@@ -1171,13 +1164,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1171 goto out_unlock; 1164 goto out_unlock;
1172 } 1165 }
1173 1166
1174 ret = intel_overlay_recover_from_interrupt(overlay, true); 1167 ret = intel_overlay_recover_from_interrupt(overlay);
1175 if (ret != 0) 1168 if (ret != 0)
1176 goto out_unlock; 1169 goto out_unlock;
1177 1170
1178 if (overlay->crtc != crtc) { 1171 if (overlay->crtc != crtc) {
1179 struct drm_display_mode *mode = &crtc->base.mode; 1172 struct drm_display_mode *mode = &crtc->base.mode;
1180 ret = intel_overlay_switch_off(overlay, true); 1173 ret = intel_overlay_switch_off(overlay);
1181 if (ret != 0) 1174 if (ret != 0)
1182 goto out_unlock; 1175 goto out_unlock;
1183 1176
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index c65992df458d..18391b3ec2c1 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -30,8 +30,6 @@
30 30
31#include "intel_drv.h" 31#include "intel_drv.h"
32 32
33#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
34
35void 33void
36intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 34intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
37 struct drm_display_mode *adjusted_mode) 35 struct drm_display_mode *adjusted_mode)
@@ -112,19 +110,6 @@ done:
112 dev_priv->pch_pf_size = (width << 16) | height; 110 dev_priv->pch_pf_size = (width << 16) | height;
113} 111}
114 112
115static int is_backlight_combination_mode(struct drm_device *dev)
116{
117 struct drm_i915_private *dev_priv = dev->dev_private;
118
119 if (INTEL_INFO(dev)->gen >= 4)
120 return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
121
122 if (IS_GEN2(dev))
123 return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
124
125 return 0;
126}
127
128static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) 113static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
129{ 114{
130 u32 val; 115 u32 val;
@@ -181,9 +166,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
181 if (INTEL_INFO(dev)->gen < 4) 166 if (INTEL_INFO(dev)->gen < 4)
182 max &= ~1; 167 max &= ~1;
183 } 168 }
184
185 if (is_backlight_combination_mode(dev))
186 max *= 0xff;
187 } 169 }
188 170
189 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); 171 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
@@ -201,15 +183,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
201 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 183 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
202 if (IS_PINEVIEW(dev)) 184 if (IS_PINEVIEW(dev))
203 val >>= 1; 185 val >>= 1;
204
205 if (is_backlight_combination_mode(dev)){
206 u8 lbpc;
207
208 val &= ~1;
209 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
210 val *= lbpc;
211 val >>= 1;
212 }
213 } 186 }
214 187
215 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); 188 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
@@ -232,16 +205,6 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
232 205
233 if (HAS_PCH_SPLIT(dev)) 206 if (HAS_PCH_SPLIT(dev))
234 return intel_pch_panel_set_backlight(dev, level); 207 return intel_pch_panel_set_backlight(dev, level);
235
236 if (is_backlight_combination_mode(dev)){
237 u32 max = intel_panel_get_max_backlight(dev);
238 u8 lpbc;
239
240 lpbc = level * 0xfe / max + 1;
241 level /= lpbc;
242 pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
243 }
244
245 tmp = I915_READ(BLC_PWM_CTL); 208 tmp = I915_READ(BLC_PWM_CTL);
246 if (IS_PINEVIEW(dev)) { 209 if (IS_PINEVIEW(dev)) {
247 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); 210 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
@@ -281,3 +244,22 @@ void intel_panel_setup_backlight(struct drm_device *dev)
281 dev_priv->backlight_level = intel_panel_get_backlight(dev); 244 dev_priv->backlight_level = intel_panel_get_backlight(dev);
282 dev_priv->backlight_enabled = dev_priv->backlight_level != 0; 245 dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
283} 246}
247
248enum drm_connector_status
249intel_panel_detect(struct drm_device *dev)
250{
251 struct drm_i915_private *dev_priv = dev->dev_private;
252
253 if (i915_panel_ignore_lid)
254 return i915_panel_ignore_lid > 0 ?
255 connector_status_connected :
256 connector_status_disconnected;
257
258 /* Assume that the BIOS does not lie through the OpRegion... */
259 if (dev_priv->opregion.lid_state)
260 return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
261 connector_status_connected :
262 connector_status_disconnected;
263
264 return connector_status_unknown;
265}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6218fa97aa1e..789c47801ba8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -62,18 +62,9 @@ render_ring_flush(struct intel_ring_buffer *ring,
62 u32 flush_domains) 62 u32 flush_domains)
63{ 63{
64 struct drm_device *dev = ring->dev; 64 struct drm_device *dev = ring->dev;
65 drm_i915_private_t *dev_priv = dev->dev_private;
66 u32 cmd; 65 u32 cmd;
67 int ret; 66 int ret;
68 67
69#if WATCH_EXEC
70 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
71 invalidate_domains, flush_domains);
72#endif
73
74 trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
75 invalidate_domains, flush_domains);
76
77 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { 68 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
78 /* 69 /*
79 * read/write caches: 70 * read/write caches:
@@ -122,9 +113,6 @@ render_ring_flush(struct intel_ring_buffer *ring,
122 (IS_G4X(dev) || IS_GEN5(dev))) 113 (IS_G4X(dev) || IS_GEN5(dev)))
123 cmd |= MI_INVALIDATE_ISP; 114 cmd |= MI_INVALIDATE_ISP;
124 115
125#if WATCH_EXEC
126 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
127#endif
128 ret = intel_ring_begin(ring, 2); 116 ret = intel_ring_begin(ring, 2);
129 if (ret) 117 if (ret)
130 return ret; 118 return ret;
@@ -612,7 +600,6 @@ ring_add_request(struct intel_ring_buffer *ring,
612 intel_ring_emit(ring, MI_USER_INTERRUPT); 600 intel_ring_emit(ring, MI_USER_INTERRUPT);
613 intel_ring_advance(ring); 601 intel_ring_advance(ring);
614 602
615 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
616 *result = seqno; 603 *result = seqno;
617 return 0; 604 return 0;
618} 605}
@@ -715,11 +702,8 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
715 u32 offset, u32 len) 702 u32 offset, u32 len)
716{ 703{
717 struct drm_device *dev = ring->dev; 704 struct drm_device *dev = ring->dev;
718 drm_i915_private_t *dev_priv = dev->dev_private;
719 int ret; 705 int ret;
720 706
721 trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
722
723 if (IS_I830(dev) || IS_845G(dev)) { 707 if (IS_I830(dev) || IS_845G(dev)) {
724 ret = intel_ring_begin(ring, 4); 708 ret = intel_ring_begin(ring, 4);
725 if (ret) 709 if (ret)
@@ -894,6 +878,10 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
894 /* Disable the ring buffer. The ring must be idle at this point */ 878 /* Disable the ring buffer. The ring must be idle at this point */
895 dev_priv = ring->dev->dev_private; 879 dev_priv = ring->dev->dev_private;
896 ret = intel_wait_ring_buffer(ring, ring->size - 8); 880 ret = intel_wait_ring_buffer(ring, ring->size - 8);
881 if (ret)
882 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
883 ring->name, ret);
884
897 I915_WRITE_CTL(ring, 0); 885 I915_WRITE_CTL(ring, 0);
898 886
899 drm_core_ioremapfree(&ring->map, ring->dev); 887 drm_core_ioremapfree(&ring->map, ring->dev);
@@ -950,13 +938,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
950 return 0; 938 return 0;
951 } 939 }
952 940
953 trace_i915_ring_wait_begin (dev); 941 trace_i915_ring_wait_begin(ring);
954 end = jiffies + 3 * HZ; 942 end = jiffies + 3 * HZ;
955 do { 943 do {
956 ring->head = I915_READ_HEAD(ring); 944 ring->head = I915_READ_HEAD(ring);
957 ring->space = ring_space(ring); 945 ring->space = ring_space(ring);
958 if (ring->space >= n) { 946 if (ring->space >= n) {
959 trace_i915_ring_wait_end(dev); 947 trace_i915_ring_wait_end(ring);
960 return 0; 948 return 0;
961 } 949 }
962 950
@@ -970,16 +958,20 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
970 if (atomic_read(&dev_priv->mm.wedged)) 958 if (atomic_read(&dev_priv->mm.wedged))
971 return -EAGAIN; 959 return -EAGAIN;
972 } while (!time_after(jiffies, end)); 960 } while (!time_after(jiffies, end));
973 trace_i915_ring_wait_end (dev); 961 trace_i915_ring_wait_end(ring);
974 return -EBUSY; 962 return -EBUSY;
975} 963}
976 964
977int intel_ring_begin(struct intel_ring_buffer *ring, 965int intel_ring_begin(struct intel_ring_buffer *ring,
978 int num_dwords) 966 int num_dwords)
979{ 967{
968 struct drm_i915_private *dev_priv = ring->dev->dev_private;
980 int n = 4*num_dwords; 969 int n = 4*num_dwords;
981 int ret; 970 int ret;
982 971
972 if (unlikely(atomic_read(&dev_priv->mm.wedged)))
973 return -EIO;
974
983 if (unlikely(ring->tail + n > ring->effective_size)) { 975 if (unlikely(ring->tail + n > ring->effective_size)) {
984 ret = intel_wrap_ring_buffer(ring); 976 ret = intel_wrap_ring_buffer(ring);
985 if (unlikely(ret)) 977 if (unlikely(ret))
@@ -1059,22 +1051,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1059} 1051}
1060 1052
1061static int gen6_ring_flush(struct intel_ring_buffer *ring, 1053static int gen6_ring_flush(struct intel_ring_buffer *ring,
1062 u32 invalidate_domains, 1054 u32 invalidate, u32 flush)
1063 u32 flush_domains)
1064{ 1055{
1056 uint32_t cmd;
1065 int ret; 1057 int ret;
1066 1058
1067 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1059 if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
1068 return 0; 1060 return 0;
1069 1061
1070 ret = intel_ring_begin(ring, 4); 1062 ret = intel_ring_begin(ring, 4);
1071 if (ret) 1063 if (ret)
1072 return ret; 1064 return ret;
1073 1065
1074 intel_ring_emit(ring, MI_FLUSH_DW); 1066 cmd = MI_FLUSH_DW;
1075 intel_ring_emit(ring, 0); 1067 if (invalidate & I915_GEM_GPU_DOMAINS)
1068 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1069 intel_ring_emit(ring, cmd);
1076 intel_ring_emit(ring, 0); 1070 intel_ring_emit(ring, 0);
1077 intel_ring_emit(ring, 0); 1071 intel_ring_emit(ring, 0);
1072 intel_ring_emit(ring, MI_NOOP);
1078 intel_ring_advance(ring); 1073 intel_ring_advance(ring);
1079 return 0; 1074 return 0;
1080} 1075}
@@ -1230,22 +1225,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
1230} 1225}
1231 1226
1232static int blt_ring_flush(struct intel_ring_buffer *ring, 1227static int blt_ring_flush(struct intel_ring_buffer *ring,
1233 u32 invalidate_domains, 1228 u32 invalidate, u32 flush)
1234 u32 flush_domains)
1235{ 1229{
1230 uint32_t cmd;
1236 int ret; 1231 int ret;
1237 1232
1238 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1233 if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
1239 return 0; 1234 return 0;
1240 1235
1241 ret = blt_ring_begin(ring, 4); 1236 ret = blt_ring_begin(ring, 4);
1242 if (ret) 1237 if (ret)
1243 return ret; 1238 return ret;
1244 1239
1245 intel_ring_emit(ring, MI_FLUSH_DW); 1240 cmd = MI_FLUSH_DW;
1246 intel_ring_emit(ring, 0); 1241 if (invalidate & I915_GEM_DOMAIN_RENDER)
1242 cmd |= MI_INVALIDATE_TLB;
1243 intel_ring_emit(ring, cmd);
1247 intel_ring_emit(ring, 0); 1244 intel_ring_emit(ring, 0);
1248 intel_ring_emit(ring, 0); 1245 intel_ring_emit(ring, 0);
1246 intel_ring_emit(ring, MI_NOOP);
1249 intel_ring_advance(ring); 1247 intel_ring_advance(ring);
1250 return 0; 1248 return 0;
1251} 1249}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6d6fde85a636..f23cc5f037a6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,22 +14,23 @@ struct intel_hw_status_page {
14 struct drm_i915_gem_object *obj; 14 struct drm_i915_gem_object *obj;
15}; 15};
16 16
17#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) 17#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg)
18#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val)
18 19
19#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) 20#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
20#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) 21#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val)
21 22
22#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) 23#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
23#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) 24#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val)
24 25
25#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) 26#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base))
26#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) 27#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val)
27 28
28#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) 29#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
29#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) 30#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val)
30 31
31#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
32#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) 32#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
33#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val)
33 34
34#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) 35#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
35#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) 36#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
@@ -43,7 +44,7 @@ struct intel_ring_buffer {
43 RING_BLT = 0x4, 44 RING_BLT = 0x4,
44 } id; 45 } id;
45 u32 mmio_base; 46 u32 mmio_base;
46 void *virtual_start; 47 void __iomem *virtual_start;
47 struct drm_device *dev; 48 struct drm_device *dev;
48 struct drm_i915_gem_object *obj; 49 struct drm_i915_gem_object *obj;
49 50
@@ -58,6 +59,7 @@ struct intel_ring_buffer {
58 u32 irq_refcount; 59 u32 irq_refcount;
59 u32 irq_mask; 60 u32 irq_mask;
60 u32 irq_seqno; /* last seq seem at irq time */ 61 u32 irq_seqno; /* last seq seem at irq time */
62 u32 trace_irq_seqno;
61 u32 waiting_seqno; 63 u32 waiting_seqno;
62 u32 sync_seqno[I915_NUM_RINGS-1]; 64 u32 sync_seqno[I915_NUM_RINGS-1];
63 bool __must_check (*irq_get)(struct intel_ring_buffer *ring); 65 bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
@@ -141,6 +143,26 @@ intel_read_status_page(struct intel_ring_buffer *ring,
141 return ioread32(ring->status_page.page_addr + reg); 143 return ioread32(ring->status_page.page_addr + reg);
142} 144}
143 145
146/**
147 * Reads a dword out of the status page, which is written to from the command
148 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
149 * MI_STORE_DATA_IMM.
150 *
151 * The following dwords have a reserved meaning:
152 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
153 * 0x04: ring 0 head pointer
154 * 0x05: ring 1 head pointer (915-class)
155 * 0x06: ring 2 head pointer (915-class)
156 * 0x10-0x1b: Context status DWords (GM45)
157 * 0x1f: Last written status offset. (GM45)
158 *
159 * The area from dword 0x20 to 0x3ff is available for driver usage.
160 */
161#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
162#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
163#define I915_GEM_HWS_INDEX 0x20
164#define I915_BREADCRUMB_INDEX 0x21
165
144void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 166void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
145int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); 167int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
146int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); 168int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
@@ -166,6 +188,12 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
166u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); 188u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
167void intel_ring_setup_status_page(struct intel_ring_buffer *ring); 189void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
168 190
191static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
192{
193 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
194 ring->trace_irq_seqno = seqno;
195}
196
169/* DRI warts */ 197/* DRI warts */
170int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); 198int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
171 199
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 318f398e6b2e..4324f33212d6 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -46,6 +46,7 @@
46 SDVO_TV_MASK) 46 SDVO_TV_MASK)
47 47
48#define IS_TV(c) (c->output_flag & SDVO_TV_MASK) 48#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
49#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
49#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) 50#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
50#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) 51#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
51 52
@@ -92,6 +93,12 @@ struct intel_sdvo {
92 uint16_t attached_output; 93 uint16_t attached_output;
93 94
94 /** 95 /**
96 * This is used to select the color range of RBG outputs in HDMI mode.
97 * It is only valid when using TMDS encoding and 8 bit per color mode.
98 */
99 uint32_t color_range;
100
101 /**
95 * This is set if we're going to treat the device as TV-out. 102 * This is set if we're going to treat the device as TV-out.
96 * 103 *
97 * While we have these nice friendly flags for output types that ought 104 * While we have these nice friendly flags for output types that ought
@@ -584,6 +591,7 @@ static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *i
584{ 591{
585 struct intel_sdvo_get_trained_inputs_response response; 592 struct intel_sdvo_get_trained_inputs_response response;
586 593
594 BUILD_BUG_ON(sizeof(response) != 1);
587 if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS, 595 if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
588 &response, sizeof(response))) 596 &response, sizeof(response)))
589 return false; 597 return false;
@@ -631,6 +639,7 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo
631{ 639{
632 struct intel_sdvo_pixel_clock_range clocks; 640 struct intel_sdvo_pixel_clock_range clocks;
633 641
642 BUILD_BUG_ON(sizeof(clocks) != 4);
634 if (!intel_sdvo_get_value(intel_sdvo, 643 if (!intel_sdvo_get_value(intel_sdvo,
635 SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, 644 SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
636 &clocks, sizeof(clocks))) 645 &clocks, sizeof(clocks)))
@@ -698,6 +707,8 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
698static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo, 707static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
699 struct intel_sdvo_dtd *dtd) 708 struct intel_sdvo_dtd *dtd)
700{ 709{
710 BUILD_BUG_ON(sizeof(dtd->part1) != 8);
711 BUILD_BUG_ON(sizeof(dtd->part2) != 8);
701 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, 712 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
702 &dtd->part1, sizeof(dtd->part1)) && 713 &dtd->part1, sizeof(dtd->part1)) &&
703 intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, 714 intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
@@ -795,6 +806,7 @@ static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
795{ 806{
796 struct intel_sdvo_encode encode; 807 struct intel_sdvo_encode encode;
797 808
809 BUILD_BUG_ON(sizeof(encode) != 2);
798 return intel_sdvo_get_value(intel_sdvo, 810 return intel_sdvo_get_value(intel_sdvo,
799 SDVO_CMD_GET_SUPP_ENCODE, 811 SDVO_CMD_GET_SUPP_ENCODE,
800 &encode, sizeof(encode)); 812 &encode, sizeof(encode));
@@ -1050,6 +1062,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1050 /* Set the SDVO control regs. */ 1062 /* Set the SDVO control regs. */
1051 if (INTEL_INFO(dev)->gen >= 4) { 1063 if (INTEL_INFO(dev)->gen >= 4) {
1052 sdvox = 0; 1064 sdvox = 0;
1065 if (intel_sdvo->is_hdmi)
1066 sdvox |= intel_sdvo->color_range;
1053 if (INTEL_INFO(dev)->gen < 5) 1067 if (INTEL_INFO(dev)->gen < 5)
1054 sdvox |= SDVO_BORDER_ENABLE; 1068 sdvox |= SDVO_BORDER_ENABLE;
1055 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 1069 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -1161,6 +1175,7 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
1161 1175
1162static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) 1176static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
1163{ 1177{
1178 BUILD_BUG_ON(sizeof(*caps) != 8);
1164 if (!intel_sdvo_get_value(intel_sdvo, 1179 if (!intel_sdvo_get_value(intel_sdvo,
1165 SDVO_CMD_GET_DEVICE_CAPS, 1180 SDVO_CMD_GET_DEVICE_CAPS,
1166 caps, sizeof(*caps))) 1181 caps, sizeof(*caps)))
@@ -1267,33 +1282,9 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
1267static bool 1282static bool
1268intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) 1283intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
1269{ 1284{
1270 int caps = 0; 1285 /* Is there more than one type of output? */
1271 1286 int caps = intel_sdvo->caps.output_flags & 0xf;
1272 if (intel_sdvo->caps.output_flags & 1287 return caps & -caps;
1273 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
1274 caps++;
1275 if (intel_sdvo->caps.output_flags &
1276 (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1))
1277 caps++;
1278 if (intel_sdvo->caps.output_flags &
1279 (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1))
1280 caps++;
1281 if (intel_sdvo->caps.output_flags &
1282 (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1))
1283 caps++;
1284 if (intel_sdvo->caps.output_flags &
1285 (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1))
1286 caps++;
1287
1288 if (intel_sdvo->caps.output_flags &
1289 (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1))
1290 caps++;
1291
1292 if (intel_sdvo->caps.output_flags &
1293 (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1))
1294 caps++;
1295
1296 return (caps > 1);
1297} 1288}
1298 1289
1299static struct edid * 1290static struct edid *
@@ -1359,7 +1350,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1359 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); 1350 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
1360 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); 1351 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
1361 } 1352 }
1362 } 1353 } else
1354 status = connector_status_disconnected;
1363 connector->display_info.raw_edid = NULL; 1355 connector->display_info.raw_edid = NULL;
1364 kfree(edid); 1356 kfree(edid);
1365 } 1357 }
@@ -1407,10 +1399,25 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1407 1399
1408 if ((intel_sdvo_connector->output_flag & response) == 0) 1400 if ((intel_sdvo_connector->output_flag & response) == 0)
1409 ret = connector_status_disconnected; 1401 ret = connector_status_disconnected;
1410 else if (response & SDVO_TMDS_MASK) 1402 else if (IS_TMDS(intel_sdvo_connector))
1411 ret = intel_sdvo_hdmi_sink_detect(connector); 1403 ret = intel_sdvo_hdmi_sink_detect(connector);
1412 else 1404 else {
1413 ret = connector_status_connected; 1405 struct edid *edid;
1406
1407 /* if we have an edid check it matches the connection */
1408 edid = intel_sdvo_get_edid(connector);
1409 if (edid == NULL)
1410 edid = intel_sdvo_get_analog_edid(connector);
1411 if (edid != NULL) {
1412 if (edid->input & DRM_EDID_INPUT_DIGITAL)
1413 ret = connector_status_disconnected;
1414 else
1415 ret = connector_status_connected;
1416 connector->display_info.raw_edid = NULL;
1417 kfree(edid);
1418 } else
1419 ret = connector_status_connected;
1420 }
1414 1421
1415 /* May update encoder flag for like clock for SDVO TV, etc.*/ 1422 /* May update encoder flag for like clock for SDVO TV, etc.*/
1416 if (ret == connector_status_connected) { 1423 if (ret == connector_status_connected) {
@@ -1446,10 +1453,15 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1446 edid = intel_sdvo_get_analog_edid(connector); 1453 edid = intel_sdvo_get_analog_edid(connector);
1447 1454
1448 if (edid != NULL) { 1455 if (edid != NULL) {
1449 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 1456 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1457 bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
1458 bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
1459
1460 if (connector_is_digital == monitor_is_digital) {
1450 drm_mode_connector_update_edid_property(connector, edid); 1461 drm_mode_connector_update_edid_property(connector, edid);
1451 drm_add_edid_modes(connector, edid); 1462 drm_add_edid_modes(connector, edid);
1452 } 1463 }
1464
1453 connector->display_info.raw_edid = NULL; 1465 connector->display_info.raw_edid = NULL;
1454 kfree(edid); 1466 kfree(edid);
1455 } 1467 }
@@ -1668,6 +1680,22 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1668 kfree(connector); 1680 kfree(connector);
1669} 1681}
1670 1682
1683static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
1684{
1685 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1686 struct edid *edid;
1687 bool has_audio = false;
1688
1689 if (!intel_sdvo->is_hdmi)
1690 return false;
1691
1692 edid = intel_sdvo_get_edid(connector);
1693 if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
1694 has_audio = drm_detect_monitor_audio(edid);
1695
1696 return has_audio;
1697}
1698
1671static int 1699static int
1672intel_sdvo_set_property(struct drm_connector *connector, 1700intel_sdvo_set_property(struct drm_connector *connector,
1673 struct drm_property *property, 1701 struct drm_property *property,
@@ -1675,6 +1703,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1675{ 1703{
1676 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); 1704 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1677 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1705 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1706 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1678 uint16_t temp_value; 1707 uint16_t temp_value;
1679 uint8_t cmd; 1708 uint8_t cmd;
1680 int ret; 1709 int ret;
@@ -1684,17 +1713,31 @@ intel_sdvo_set_property(struct drm_connector *connector,
1684 return ret; 1713 return ret;
1685 1714
1686 if (property == intel_sdvo_connector->force_audio_property) { 1715 if (property == intel_sdvo_connector->force_audio_property) {
1687 if (val == intel_sdvo_connector->force_audio) 1716 int i = val;
1717 bool has_audio;
1718
1719 if (i == intel_sdvo_connector->force_audio)
1688 return 0; 1720 return 0;
1689 1721
1690 intel_sdvo_connector->force_audio = val; 1722 intel_sdvo_connector->force_audio = i;
1723
1724 if (i == 0)
1725 has_audio = intel_sdvo_detect_hdmi_audio(connector);
1726 else
1727 has_audio = i > 0;
1691 1728
1692 if (val > 0 && intel_sdvo->has_hdmi_audio) 1729 if (has_audio == intel_sdvo->has_hdmi_audio)
1693 return 0; 1730 return 0;
1694 if (val < 0 && !intel_sdvo->has_hdmi_audio) 1731
1732 intel_sdvo->has_hdmi_audio = has_audio;
1733 goto done;
1734 }
1735
1736 if (property == dev_priv->broadcast_rgb_property) {
1737 if (val == !!intel_sdvo->color_range)
1695 return 0; 1738 return 0;
1696 1739
1697 intel_sdvo->has_hdmi_audio = val > 0; 1740 intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
1698 goto done; 1741 goto done;
1699 } 1742 }
1700 1743
@@ -2002,6 +2045,9 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
2002 drm_connector_attach_property(&connector->base.base, 2045 drm_connector_attach_property(&connector->base.base,
2003 connector->force_audio_property, 0); 2046 connector->force_audio_property, 0);
2004 } 2047 }
2048
2049 if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
2050 intel_attach_broadcast_rgb_property(&connector->base.base);
2005} 2051}
2006 2052
2007static bool 2053static bool
@@ -2224,6 +2270,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
2224 if (!intel_sdvo_set_target_output(intel_sdvo, type)) 2270 if (!intel_sdvo_set_target_output(intel_sdvo, type))
2225 return false; 2271 return false;
2226 2272
2273 BUILD_BUG_ON(sizeof(format) != 6);
2227 if (!intel_sdvo_get_value(intel_sdvo, 2274 if (!intel_sdvo_get_value(intel_sdvo,
2228 SDVO_CMD_GET_SUPPORTED_TV_FORMATS, 2275 SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
2229 &format, sizeof(format))) 2276 &format, sizeof(format)))
@@ -2430,6 +2477,8 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
2430 uint16_t response; 2477 uint16_t response;
2431 } enhancements; 2478 } enhancements;
2432 2479
2480 BUILD_BUG_ON(sizeof(enhancements) != 2);
2481
2433 enhancements.response = 0; 2482 enhancements.response = 0;
2434 intel_sdvo_get_value(intel_sdvo, 2483 intel_sdvo_get_value(intel_sdvo,
2435 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, 2484 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 93206e4eaa6f..4256b8ef3947 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1006,6 +1006,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1006 const struct video_levels *video_levels; 1006 const struct video_levels *video_levels;
1007 const struct color_conversion *color_conversion; 1007 const struct color_conversion *color_conversion;
1008 bool burst_ena; 1008 bool burst_ena;
1009 int pipe = intel_crtc->pipe;
1009 1010
1010 if (!tv_mode) 1011 if (!tv_mode)
1011 return; /* can't happen (mode_prepare prevents this) */ 1012 return; /* can't happen (mode_prepare prevents this) */
@@ -1149,14 +1150,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1149 ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | 1150 ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
1150 (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); 1151 (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
1151 { 1152 {
1152 int pipeconf_reg = (intel_crtc->pipe == 0) ? 1153 int pipeconf_reg = PIPECONF(pipe);
1153 PIPEACONF : PIPEBCONF; 1154 int dspcntr_reg = DSPCNTR(pipe);
1154 int dspcntr_reg = (intel_crtc->plane == 0) ?
1155 DSPACNTR : DSPBCNTR;
1156 int pipeconf = I915_READ(pipeconf_reg); 1155 int pipeconf = I915_READ(pipeconf_reg);
1157 int dspcntr = I915_READ(dspcntr_reg); 1156 int dspcntr = I915_READ(dspcntr_reg);
1158 int dspbase_reg = (intel_crtc->plane == 0) ? 1157 int dspbase_reg = DSPADDR(pipe);
1159 DSPAADDR : DSPBADDR;
1160 int xpos = 0x0, ypos = 0x0; 1158 int xpos = 0x0, ypos = 0x0;
1161 unsigned int xsize, ysize; 1159 unsigned int xsize, ysize;
1162 /* Pipe must be off here */ 1160 /* Pipe must be off here */
@@ -1234,7 +1232,8 @@ static const struct drm_display_mode reported_modes[] = {
1234 * \return false if TV is disconnected. 1232 * \return false if TV is disconnected.
1235 */ 1233 */
1236static int 1234static int
1237intel_tv_detect_type (struct intel_tv *intel_tv) 1235intel_tv_detect_type (struct intel_tv *intel_tv,
1236 struct drm_connector *connector)
1238{ 1237{
1239 struct drm_encoder *encoder = &intel_tv->base.base; 1238 struct drm_encoder *encoder = &intel_tv->base.base;
1240 struct drm_device *dev = encoder->dev; 1239 struct drm_device *dev = encoder->dev;
@@ -1245,11 +1244,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1245 int type; 1244 int type;
1246 1245
1247 /* Disable TV interrupts around load detect or we'll recurse */ 1246 /* Disable TV interrupts around load detect or we'll recurse */
1248 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1247 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
1249 i915_disable_pipestat(dev_priv, 0, 1248 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1250 PIPE_HOTPLUG_INTERRUPT_ENABLE | 1249 i915_disable_pipestat(dev_priv, 0,
1251 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1250 PIPE_HOTPLUG_INTERRUPT_ENABLE |
1252 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1251 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1252 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1253 }
1253 1254
1254 save_tv_dac = tv_dac = I915_READ(TV_DAC); 1255 save_tv_dac = tv_dac = I915_READ(TV_DAC);
1255 save_tv_ctl = tv_ctl = I915_READ(TV_CTL); 1256 save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1302,11 +1303,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1302 I915_WRITE(TV_CTL, save_tv_ctl); 1303 I915_WRITE(TV_CTL, save_tv_ctl);
1303 1304
1304 /* Restore interrupt config */ 1305 /* Restore interrupt config */
1305 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1306 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
1306 i915_enable_pipestat(dev_priv, 0, 1307 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1307 PIPE_HOTPLUG_INTERRUPT_ENABLE | 1308 i915_enable_pipestat(dev_priv, 0,
1308 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1309 PIPE_HOTPLUG_INTERRUPT_ENABLE |
1309 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1310 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1311 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1312 }
1310 1313
1311 return type; 1314 return type;
1312} 1315}
@@ -1356,7 +1359,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1356 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); 1359 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
1357 1360
1358 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { 1361 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
1359 type = intel_tv_detect_type(intel_tv); 1362 type = intel_tv_detect_type(intel_tv, connector);
1360 } else if (force) { 1363 } else if (force) {
1361 struct drm_crtc *crtc; 1364 struct drm_crtc *crtc;
1362 int dpms_mode; 1365 int dpms_mode;
@@ -1364,7 +1367,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1364 crtc = intel_get_load_detect_pipe(&intel_tv->base, connector, 1367 crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
1365 &mode, &dpms_mode); 1368 &mode, &dpms_mode);
1366 if (crtc) { 1369 if (crtc) {
1367 type = intel_tv_detect_type(intel_tv); 1370 type = intel_tv_detect_type(intel_tv, connector);
1368 intel_release_load_detect_pipe(&intel_tv->base, connector, 1371 intel_release_load_detect_pipe(&intel_tv->base, connector,
1369 dpms_mode); 1372 dpms_mode);
1370 } else 1373 } else
@@ -1658,6 +1661,18 @@ intel_tv_init(struct drm_device *dev)
1658 intel_encoder = &intel_tv->base; 1661 intel_encoder = &intel_tv->base;
1659 connector = &intel_connector->base; 1662 connector = &intel_connector->base;
1660 1663
1664 /* The documentation, for the older chipsets at least, recommend
1665 * using a polling method rather than hotplug detection for TVs.
1666 * This is because in order to perform the hotplug detection, the PLLs
1667 * for the TV must be kept alive increasing power drain and starving
1668 * bandwidth from other encoders. Notably for instance, it causes
1669 * pipe underruns on Crestline when this encoder is supposedly idle.
1670 *
1671 * More recent chipsets favour HDMI rather than integrated S-Video.
1672 */
1673 connector->polled =
1674 DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
1675
1661 drm_connector_init(dev, connector, &intel_tv_connector_funcs, 1676 drm_connector_init(dev, connector, &intel_tv_connector_funcs,
1662 DRM_MODE_CONNECTOR_SVIDEO); 1677 DRM_MODE_CONNECTOR_SVIDEO);
1663 1678
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 3fcffcf75e35..2ad49cbf7c8b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
49 DRM_ERROR("bo %p still attached to GEM object\n", bo); 49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50 50
51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL); 51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
52 nouveau_vm_put(&nvbo->vma); 52 if (nvbo->vma.node) {
53 nouveau_vm_unmap(&nvbo->vma);
54 nouveau_vm_put(&nvbo->vma);
55 }
53 kfree(nvbo); 56 kfree(nvbo);
54} 57}
55 58
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index d56f08d3cbdc..a2199fe9fa9b 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -48,29 +48,29 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
48 48
49 switch (radeon_crtc->rmx_type) { 49 switch (radeon_crtc->rmx_type) {
50 case RMX_CENTER: 50 case RMX_CENTER:
51 args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; 51 args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
52 args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; 52 args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
53 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; 53 args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
54 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; 54 args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
55 break; 55 break;
56 case RMX_ASPECT: 56 case RMX_ASPECT:
57 a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; 57 a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
58 a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; 58 a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
59 59
60 if (a1 > a2) { 60 if (a1 > a2) {
61 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; 61 args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
62 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; 62 args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
63 } else if (a2 > a1) { 63 } else if (a2 > a1) {
64 args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; 64 args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
65 args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; 65 args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
66 } 66 }
67 break; 67 break;
68 case RMX_FULL: 68 case RMX_FULL:
69 default: 69 default:
70 args.usOverscanRight = radeon_crtc->h_border; 70 args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border);
71 args.usOverscanLeft = radeon_crtc->h_border; 71 args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border);
72 args.usOverscanBottom = radeon_crtc->v_border; 72 args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border);
73 args.usOverscanTop = radeon_crtc->v_border; 73 args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
74 break; 74 break;
75 } 75 }
76 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 76 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -419,23 +419,23 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
419 memset(&args, 0, sizeof(args)); 419 memset(&args, 0, sizeof(args));
420 420
421 if (ASIC_IS_DCE5(rdev)) { 421 if (ASIC_IS_DCE5(rdev)) {
422 args.v3.usSpreadSpectrumAmountFrac = 0; 422 args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
423 args.v3.ucSpreadSpectrumType = ss->type; 423 args.v3.ucSpreadSpectrumType = ss->type;
424 switch (pll_id) { 424 switch (pll_id) {
425 case ATOM_PPLL1: 425 case ATOM_PPLL1:
426 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; 426 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
427 args.v3.usSpreadSpectrumAmount = ss->amount; 427 args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
428 args.v3.usSpreadSpectrumStep = ss->step; 428 args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
429 break; 429 break;
430 case ATOM_PPLL2: 430 case ATOM_PPLL2:
431 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL; 431 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
432 args.v3.usSpreadSpectrumAmount = ss->amount; 432 args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
433 args.v3.usSpreadSpectrumStep = ss->step; 433 args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
434 break; 434 break;
435 case ATOM_DCPLL: 435 case ATOM_DCPLL:
436 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL; 436 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
437 args.v3.usSpreadSpectrumAmount = 0; 437 args.v3.usSpreadSpectrumAmount = cpu_to_le16(0);
438 args.v3.usSpreadSpectrumStep = 0; 438 args.v3.usSpreadSpectrumStep = cpu_to_le16(0);
439 break; 439 break;
440 case ATOM_PPLL_INVALID: 440 case ATOM_PPLL_INVALID:
441 return; 441 return;
@@ -447,18 +447,18 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
447 switch (pll_id) { 447 switch (pll_id) {
448 case ATOM_PPLL1: 448 case ATOM_PPLL1:
449 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; 449 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
450 args.v2.usSpreadSpectrumAmount = ss->amount; 450 args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
451 args.v2.usSpreadSpectrumStep = ss->step; 451 args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
452 break; 452 break;
453 case ATOM_PPLL2: 453 case ATOM_PPLL2:
454 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL; 454 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
455 args.v2.usSpreadSpectrumAmount = ss->amount; 455 args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
456 args.v2.usSpreadSpectrumStep = ss->step; 456 args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
457 break; 457 break;
458 case ATOM_DCPLL: 458 case ATOM_DCPLL:
459 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL; 459 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
460 args.v2.usSpreadSpectrumAmount = 0; 460 args.v2.usSpreadSpectrumAmount = cpu_to_le16(0);
461 args.v2.usSpreadSpectrumStep = 0; 461 args.v2.usSpreadSpectrumStep = cpu_to_le16(0);
462 break; 462 break;
463 case ATOM_PPLL_INVALID: 463 case ATOM_PPLL_INVALID:
464 return; 464 return;
@@ -538,7 +538,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
538 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 538 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
539 else 539 else
540 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 540 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
541
542 } 541 }
543 542
544 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 543 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -555,29 +554,28 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
555 dp_clock = dig_connector->dp_clock; 554 dp_clock = dig_connector->dp_clock;
556 } 555 }
557 } 556 }
558/* this might work properly with the new pll algo */ 557
559#if 0 /* doesn't work properly on some laptops */
560 /* use recommended ref_div for ss */ 558 /* use recommended ref_div for ss */
561 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 559 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
562 if (ss_enabled) { 560 if (ss_enabled) {
563 if (ss->refdiv) { 561 if (ss->refdiv) {
562 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
564 pll->flags |= RADEON_PLL_USE_REF_DIV; 563 pll->flags |= RADEON_PLL_USE_REF_DIV;
565 pll->reference_div = ss->refdiv; 564 pll->reference_div = ss->refdiv;
565 if (ASIC_IS_AVIVO(rdev))
566 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
566 } 567 }
567 } 568 }
568 } 569 }
569#endif 570
570 if (ASIC_IS_AVIVO(rdev)) { 571 if (ASIC_IS_AVIVO(rdev)) {
571 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 572 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
572 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) 573 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
573 adjusted_clock = mode->clock * 2; 574 adjusted_clock = mode->clock * 2;
574 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) 575 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
575 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; 576 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
576 /* rv515 needs more testing with this option */ 577 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
577 if (rdev->family != CHIP_RV515) { 578 pll->flags |= RADEON_PLL_IS_LCD;
578 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
579 pll->flags |= RADEON_PLL_IS_LCD;
580 }
581 } else { 579 } else {
582 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 580 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
583 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; 581 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -664,10 +662,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
664 index, (uint32_t *)&args); 662 index, (uint32_t *)&args);
665 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; 663 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
666 if (args.v3.sOutput.ucRefDiv) { 664 if (args.v3.sOutput.ucRefDiv) {
665 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
667 pll->flags |= RADEON_PLL_USE_REF_DIV; 666 pll->flags |= RADEON_PLL_USE_REF_DIV;
668 pll->reference_div = args.v3.sOutput.ucRefDiv; 667 pll->reference_div = args.v3.sOutput.ucRefDiv;
669 } 668 }
670 if (args.v3.sOutput.ucPostDiv) { 669 if (args.v3.sOutput.ucPostDiv) {
670 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
671 pll->flags |= RADEON_PLL_USE_POST_DIV; 671 pll->flags |= RADEON_PLL_USE_POST_DIV;
672 pll->post_div = args.v3.sOutput.ucPostDiv; 672 pll->post_div = args.v3.sOutput.ucPostDiv;
673 } 673 }
@@ -721,14 +721,14 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
721 * SetPixelClock provides the dividers 721 * SetPixelClock provides the dividers
722 */ 722 */
723 args.v5.ucCRTC = ATOM_CRTC_INVALID; 723 args.v5.ucCRTC = ATOM_CRTC_INVALID;
724 args.v5.usPixelClock = dispclk; 724 args.v5.usPixelClock = cpu_to_le16(dispclk);
725 args.v5.ucPpll = ATOM_DCPLL; 725 args.v5.ucPpll = ATOM_DCPLL;
726 break; 726 break;
727 case 6: 727 case 6:
728 /* if the default dcpll clock is specified, 728 /* if the default dcpll clock is specified,
729 * SetPixelClock provides the dividers 729 * SetPixelClock provides the dividers
730 */ 730 */
731 args.v6.ulDispEngClkFreq = dispclk; 731 args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
732 args.v6.ucPpll = ATOM_DCPLL; 732 args.v6.ucPpll = ATOM_DCPLL;
733 break; 733 break;
734 default: 734 default:
@@ -957,11 +957,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
957 /* adjust pixel clock as needed */ 957 /* adjust pixel clock as needed */
958 adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); 958 adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
959 959
960 /* rv515 seems happier with the old algo */ 960 if (ASIC_IS_AVIVO(rdev))
961 if (rdev->family == CHIP_RV515)
962 radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
963 &ref_div, &post_div);
964 else if (ASIC_IS_AVIVO(rdev))
965 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 961 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
966 &ref_div, &post_div); 962 &ref_div, &post_div);
967 else 963 else
@@ -995,9 +991,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
995 } 991 }
996} 992}
997 993
998static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, 994static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
999 struct drm_framebuffer *fb, 995 struct drm_framebuffer *fb,
1000 int x, int y, int atomic) 996 int x, int y, int atomic)
1001{ 997{
1002 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 998 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1003 struct drm_device *dev = crtc->dev; 999 struct drm_device *dev = crtc->dev;
@@ -1137,12 +1133,6 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
1137 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, 1133 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
1138 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); 1134 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
1139 1135
1140 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
1141 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
1142 EVERGREEN_INTERLEAVE_EN);
1143 else
1144 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1145
1146 if (!atomic && fb && fb != crtc->fb) { 1136 if (!atomic && fb && fb != crtc->fb) {
1147 radeon_fb = to_radeon_framebuffer(fb); 1137 radeon_fb = to_radeon_framebuffer(fb);
1148 rbo = gem_to_radeon_bo(radeon_fb->obj); 1138 rbo = gem_to_radeon_bo(radeon_fb->obj);
@@ -1300,12 +1290,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1300 WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, 1290 WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
1301 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); 1291 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
1302 1292
1303 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
1304 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
1305 AVIVO_D1MODE_INTERLEAVE_EN);
1306 else
1307 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1308
1309 if (!atomic && fb && fb != crtc->fb) { 1293 if (!atomic && fb && fb != crtc->fb) {
1310 radeon_fb = to_radeon_framebuffer(fb); 1294 radeon_fb = to_radeon_framebuffer(fb);
1311 rbo = gem_to_radeon_bo(radeon_fb->obj); 1295 rbo = gem_to_radeon_bo(radeon_fb->obj);
@@ -1329,7 +1313,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
1329 struct radeon_device *rdev = dev->dev_private; 1313 struct radeon_device *rdev = dev->dev_private;
1330 1314
1331 if (ASIC_IS_DCE4(rdev)) 1315 if (ASIC_IS_DCE4(rdev))
1332 return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0); 1316 return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0);
1333 else if (ASIC_IS_AVIVO(rdev)) 1317 else if (ASIC_IS_AVIVO(rdev))
1334 return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0); 1318 return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
1335 else 1319 else
@@ -1344,7 +1328,7 @@ int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
1344 struct radeon_device *rdev = dev->dev_private; 1328 struct radeon_device *rdev = dev->dev_private;
1345 1329
1346 if (ASIC_IS_DCE4(rdev)) 1330 if (ASIC_IS_DCE4(rdev))
1347 return evergreen_crtc_do_set_base(crtc, fb, x, y, 1); 1331 return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
1348 else if (ASIC_IS_AVIVO(rdev)) 1332 else if (ASIC_IS_AVIVO(rdev))
1349 return avivo_crtc_do_set_base(crtc, fb, x, y, 1); 1333 return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
1350 else 1334 else
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index d4045223d0ff..789441ed9837 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1192,7 +1192,11 @@ void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1192 radeon_ring_write(rdev, 1); 1192 radeon_ring_write(rdev, 1);
1193 /* FIXME: implement */ 1193 /* FIXME: implement */
1194 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 1194 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1195 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC); 1195 radeon_ring_write(rdev,
1196#ifdef __BIG_ENDIAN
1197 (2 << 0) |
1198#endif
1199 (ib->gpu_addr & 0xFFFFFFFC));
1196 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); 1200 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
1197 radeon_ring_write(rdev, ib->length_dw); 1201 radeon_ring_write(rdev, ib->length_dw);
1198} 1202}
@@ -1207,7 +1211,11 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1207 return -EINVAL; 1211 return -EINVAL;
1208 1212
1209 r700_cp_stop(rdev); 1213 r700_cp_stop(rdev);
1210 WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); 1214 WREG32(CP_RB_CNTL,
1215#ifdef __BIG_ENDIAN
1216 BUF_SWAP_32BIT |
1217#endif
1218 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1211 1219
1212 fw_data = (const __be32 *)rdev->pfp_fw->data; 1220 fw_data = (const __be32 *)rdev->pfp_fw->data;
1213 WREG32(CP_PFP_UCODE_ADDR, 0); 1221 WREG32(CP_PFP_UCODE_ADDR, 0);
@@ -1326,7 +1334,11 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1326 WREG32(CP_RB_WPTR, 0); 1334 WREG32(CP_RB_WPTR, 0);
1327 1335
1328 /* set the wb address wether it's enabled or not */ 1336 /* set the wb address wether it's enabled or not */
1329 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 1337 WREG32(CP_RB_RPTR_ADDR,
1338#ifdef __BIG_ENDIAN
1339 RB_RPTR_SWAP(2) |
1340#endif
1341 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
1330 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 1342 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1331 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1343 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1332 1344
@@ -2627,8 +2639,8 @@ restart_ih:
2627 while (rptr != wptr) { 2639 while (rptr != wptr) {
2628 /* wptr/rptr are in bytes! */ 2640 /* wptr/rptr are in bytes! */
2629 ring_index = rptr / 4; 2641 ring_index = rptr / 4;
2630 src_id = rdev->ih.ring[ring_index] & 0xff; 2642 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
2631 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; 2643 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
2632 2644
2633 switch (src_id) { 2645 switch (src_id) {
2634 case 1: /* D1 vblank/vline */ 2646 case 1: /* D1 vblank/vline */
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 2ed930e02f3a..3218287f4c51 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -55,7 +55,7 @@ set_render_target(struct radeon_device *rdev, int format,
55 if (h < 8) 55 if (h < 8)
56 h = 8; 56 h = 8;
57 57
58 cb_color_info = ((format << 2) | (1 << 24)); 58 cb_color_info = ((format << 2) | (1 << 24) | (1 << 8));
59 pitch = (w / 8) - 1; 59 pitch = (w / 8) - 1;
60 slice = ((w * h) / 64) - 1; 60 slice = ((w * h) / 64) - 1;
61 61
@@ -133,6 +133,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
133 133
134 /* high addr, stride */ 134 /* high addr, stride */
135 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); 135 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
136#ifdef __BIG_ENDIAN
137 sq_vtx_constant_word2 |= (2 << 30);
138#endif
136 /* xyzw swizzles */ 139 /* xyzw swizzles */
137 sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12); 140 sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
138 141
@@ -173,7 +176,7 @@ set_tex_resource(struct radeon_device *rdev,
173 sq_tex_resource_word0 = (1 << 0); /* 2D */ 176 sq_tex_resource_word0 = (1 << 0); /* 2D */
174 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) | 177 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
175 ((w - 1) << 18)); 178 ((w - 1) << 18));
176 sq_tex_resource_word1 = ((h - 1) << 0); 179 sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28);
177 /* xyzw swizzles */ 180 /* xyzw swizzles */
178 sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25); 181 sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
179 182
@@ -221,7 +224,11 @@ draw_auto(struct radeon_device *rdev)
221 radeon_ring_write(rdev, DI_PT_RECTLIST); 224 radeon_ring_write(rdev, DI_PT_RECTLIST);
222 225
223 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); 226 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
224 radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); 227 radeon_ring_write(rdev,
228#ifdef __BIG_ENDIAN
229 (2 << 2) |
230#endif
231 DI_INDEX_SIZE_16_BIT);
225 232
226 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); 233 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
227 radeon_ring_write(rdev, 1); 234 radeon_ring_write(rdev, 1);
@@ -541,7 +548,7 @@ static inline uint32_t i2f(uint32_t input)
541int evergreen_blit_init(struct radeon_device *rdev) 548int evergreen_blit_init(struct radeon_device *rdev)
542{ 549{
543 u32 obj_size; 550 u32 obj_size;
544 int r, dwords; 551 int i, r, dwords;
545 void *ptr; 552 void *ptr;
546 u32 packet2s[16]; 553 u32 packet2s[16];
547 int num_packet2s = 0; 554 int num_packet2s = 0;
@@ -557,7 +564,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
557 564
558 dwords = rdev->r600_blit.state_len; 565 dwords = rdev->r600_blit.state_len;
559 while (dwords & 0xf) { 566 while (dwords & 0xf) {
560 packet2s[num_packet2s++] = PACKET2(0); 567 packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
561 dwords++; 568 dwords++;
562 } 569 }
563 570
@@ -598,8 +605,10 @@ int evergreen_blit_init(struct radeon_device *rdev)
598 if (num_packet2s) 605 if (num_packet2s)
599 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), 606 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
600 packet2s, num_packet2s * 4); 607 packet2s, num_packet2s * 4);
601 memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4); 608 for (i = 0; i < evergreen_vs_size; i++)
602 memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4); 609 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
610 for (i = 0; i < evergreen_ps_size; i++)
611 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
603 radeon_bo_kunmap(rdev->r600_blit.shader_obj); 612 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
604 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 613 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
605 614
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
index ef1d28c07fbf..3a10399e0066 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -311,11 +311,19 @@ const u32 evergreen_vs[] =
311 0x00000000, 311 0x00000000,
312 0x3c000000, 312 0x3c000000,
313 0x67961001, 313 0x67961001,
314#ifdef __BIG_ENDIAN
315 0x000a0000,
316#else
314 0x00080000, 317 0x00080000,
318#endif
315 0x00000000, 319 0x00000000,
316 0x1c000000, 320 0x1c000000,
317 0x67961000, 321 0x67961000,
322#ifdef __BIG_ENDIAN
323 0x00020008,
324#else
318 0x00000008, 325 0x00000008,
326#endif
319 0x00000000, 327 0x00000000,
320}; 328};
321 329
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 21e839bd20e7..9aaa3f0c9372 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -98,6 +98,7 @@
98#define BUF_SWAP_32BIT (2 << 16) 98#define BUF_SWAP_32BIT (2 << 16)
99#define CP_RB_RPTR 0x8700 99#define CP_RB_RPTR 0x8700
100#define CP_RB_RPTR_ADDR 0xC10C 100#define CP_RB_RPTR_ADDR 0xC10C
101#define RB_RPTR_SWAP(x) ((x) << 0)
101#define CP_RB_RPTR_ADDR_HI 0xC110 102#define CP_RB_RPTR_ADDR_HI 0xC110
102#define CP_RB_RPTR_WR 0xC108 103#define CP_RB_RPTR_WR 0xC108
103#define CP_RB_WPTR 0xC114 104#define CP_RB_WPTR 0xC114
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index 607241c6a8a9..5a82b6b75849 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -673,8 +673,10 @@ static int parser_auth(struct table *t, const char *filename)
673 last_reg = strtol(last_reg_s, NULL, 16); 673 last_reg = strtol(last_reg_s, NULL, 16);
674 674
675 do { 675 do {
676 if (fgets(buf, 1024, file) == NULL) 676 if (fgets(buf, 1024, file) == NULL) {
677 fclose(file);
677 return -1; 678 return -1;
679 }
678 len = strlen(buf); 680 len = strlen(buf);
679 if (ftell(file) == end) 681 if (ftell(file) == end)
680 done = 1; 682 done = 1;
@@ -685,6 +687,7 @@ static int parser_auth(struct table *t, const char *filename)
685 fprintf(stderr, 687 fprintf(stderr,
686 "Error matching regular expression %d in %s\n", 688 "Error matching regular expression %d in %s\n",
687 r, filename); 689 r, filename);
690 fclose(file);
688 return -1; 691 return -1;
689 } else { 692 } else {
690 buf[match[0].rm_eo] = 0; 693 buf[match[0].rm_eo] = 0;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 5f15820efe12..93fa735c8c1a 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1427,6 +1427,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1427 } 1427 }
1428 track->zb.robj = reloc->robj; 1428 track->zb.robj = reloc->robj;
1429 track->zb.offset = idx_value; 1429 track->zb.offset = idx_value;
1430 track->zb_dirty = true;
1430 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1431 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1431 break; 1432 break;
1432 case RADEON_RB3D_COLOROFFSET: 1433 case RADEON_RB3D_COLOROFFSET:
@@ -1439,6 +1440,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1439 } 1440 }
1440 track->cb[0].robj = reloc->robj; 1441 track->cb[0].robj = reloc->robj;
1441 track->cb[0].offset = idx_value; 1442 track->cb[0].offset = idx_value;
1443 track->cb_dirty = true;
1442 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1444 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1443 break; 1445 break;
1444 case RADEON_PP_TXOFFSET_0: 1446 case RADEON_PP_TXOFFSET_0:
@@ -1454,6 +1456,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1454 } 1456 }
1455 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1457 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1456 track->textures[i].robj = reloc->robj; 1458 track->textures[i].robj = reloc->robj;
1459 track->tex_dirty = true;
1457 break; 1460 break;
1458 case RADEON_PP_CUBIC_OFFSET_T0_0: 1461 case RADEON_PP_CUBIC_OFFSET_T0_0:
1459 case RADEON_PP_CUBIC_OFFSET_T0_1: 1462 case RADEON_PP_CUBIC_OFFSET_T0_1:
@@ -1471,6 +1474,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1471 track->textures[0].cube_info[i].offset = idx_value; 1474 track->textures[0].cube_info[i].offset = idx_value;
1472 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1475 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1473 track->textures[0].cube_info[i].robj = reloc->robj; 1476 track->textures[0].cube_info[i].robj = reloc->robj;
1477 track->tex_dirty = true;
1474 break; 1478 break;
1475 case RADEON_PP_CUBIC_OFFSET_T1_0: 1479 case RADEON_PP_CUBIC_OFFSET_T1_0:
1476 case RADEON_PP_CUBIC_OFFSET_T1_1: 1480 case RADEON_PP_CUBIC_OFFSET_T1_1:
@@ -1488,6 +1492,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1488 track->textures[1].cube_info[i].offset = idx_value; 1492 track->textures[1].cube_info[i].offset = idx_value;
1489 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1493 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1490 track->textures[1].cube_info[i].robj = reloc->robj; 1494 track->textures[1].cube_info[i].robj = reloc->robj;
1495 track->tex_dirty = true;
1491 break; 1496 break;
1492 case RADEON_PP_CUBIC_OFFSET_T2_0: 1497 case RADEON_PP_CUBIC_OFFSET_T2_0:
1493 case RADEON_PP_CUBIC_OFFSET_T2_1: 1498 case RADEON_PP_CUBIC_OFFSET_T2_1:
@@ -1505,9 +1510,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1505 track->textures[2].cube_info[i].offset = idx_value; 1510 track->textures[2].cube_info[i].offset = idx_value;
1506 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1511 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1507 track->textures[2].cube_info[i].robj = reloc->robj; 1512 track->textures[2].cube_info[i].robj = reloc->robj;
1513 track->tex_dirty = true;
1508 break; 1514 break;
1509 case RADEON_RE_WIDTH_HEIGHT: 1515 case RADEON_RE_WIDTH_HEIGHT:
1510 track->maxy = ((idx_value >> 16) & 0x7FF); 1516 track->maxy = ((idx_value >> 16) & 0x7FF);
1517 track->cb_dirty = true;
1518 track->zb_dirty = true;
1511 break; 1519 break;
1512 case RADEON_RB3D_COLORPITCH: 1520 case RADEON_RB3D_COLORPITCH:
1513 r = r100_cs_packet_next_reloc(p, &reloc); 1521 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1528,9 +1536,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1528 ib[idx] = tmp; 1536 ib[idx] = tmp;
1529 1537
1530 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; 1538 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1539 track->cb_dirty = true;
1531 break; 1540 break;
1532 case RADEON_RB3D_DEPTHPITCH: 1541 case RADEON_RB3D_DEPTHPITCH:
1533 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; 1542 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1543 track->zb_dirty = true;
1534 break; 1544 break;
1535 case RADEON_RB3D_CNTL: 1545 case RADEON_RB3D_CNTL:
1536 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 1546 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -1555,6 +1565,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1555 return -EINVAL; 1565 return -EINVAL;
1556 } 1566 }
1557 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); 1567 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1568 track->cb_dirty = true;
1569 track->zb_dirty = true;
1558 break; 1570 break;
1559 case RADEON_RB3D_ZSTENCILCNTL: 1571 case RADEON_RB3D_ZSTENCILCNTL:
1560 switch (idx_value & 0xf) { 1572 switch (idx_value & 0xf) {
@@ -1572,6 +1584,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1572 default: 1584 default:
1573 break; 1585 break;
1574 } 1586 }
1587 track->zb_dirty = true;
1575 break; 1588 break;
1576 case RADEON_RB3D_ZPASS_ADDR: 1589 case RADEON_RB3D_ZPASS_ADDR:
1577 r = r100_cs_packet_next_reloc(p, &reloc); 1590 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1588,6 +1601,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1588 uint32_t temp = idx_value >> 4; 1601 uint32_t temp = idx_value >> 4;
1589 for (i = 0; i < track->num_texture; i++) 1602 for (i = 0; i < track->num_texture; i++)
1590 track->textures[i].enabled = !!(temp & (1 << i)); 1603 track->textures[i].enabled = !!(temp & (1 << i));
1604 track->tex_dirty = true;
1591 } 1605 }
1592 break; 1606 break;
1593 case RADEON_SE_VF_CNTL: 1607 case RADEON_SE_VF_CNTL:
@@ -1602,12 +1616,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1602 i = (reg - RADEON_PP_TEX_SIZE_0) / 8; 1616 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1603 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; 1617 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1604 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 1618 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1619 track->tex_dirty = true;
1605 break; 1620 break;
1606 case RADEON_PP_TEX_PITCH_0: 1621 case RADEON_PP_TEX_PITCH_0:
1607 case RADEON_PP_TEX_PITCH_1: 1622 case RADEON_PP_TEX_PITCH_1:
1608 case RADEON_PP_TEX_PITCH_2: 1623 case RADEON_PP_TEX_PITCH_2:
1609 i = (reg - RADEON_PP_TEX_PITCH_0) / 8; 1624 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1610 track->textures[i].pitch = idx_value + 32; 1625 track->textures[i].pitch = idx_value + 32;
1626 track->tex_dirty = true;
1611 break; 1627 break;
1612 case RADEON_PP_TXFILTER_0: 1628 case RADEON_PP_TXFILTER_0:
1613 case RADEON_PP_TXFILTER_1: 1629 case RADEON_PP_TXFILTER_1:
@@ -1621,6 +1637,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1621 tmp = (idx_value >> 27) & 0x7; 1637 tmp = (idx_value >> 27) & 0x7;
1622 if (tmp == 2 || tmp == 6) 1638 if (tmp == 2 || tmp == 6)
1623 track->textures[i].roundup_h = false; 1639 track->textures[i].roundup_h = false;
1640 track->tex_dirty = true;
1624 break; 1641 break;
1625 case RADEON_PP_TXFORMAT_0: 1642 case RADEON_PP_TXFORMAT_0:
1626 case RADEON_PP_TXFORMAT_1: 1643 case RADEON_PP_TXFORMAT_1:
@@ -1673,6 +1690,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1673 } 1690 }
1674 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 1691 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1675 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 1692 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1693 track->tex_dirty = true;
1676 break; 1694 break;
1677 case RADEON_PP_CUBIC_FACES_0: 1695 case RADEON_PP_CUBIC_FACES_0:
1678 case RADEON_PP_CUBIC_FACES_1: 1696 case RADEON_PP_CUBIC_FACES_1:
@@ -1683,6 +1701,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1683 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 1701 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1684 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); 1702 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
1685 } 1703 }
1704 track->tex_dirty = true;
1686 break; 1705 break;
1687 default: 1706 default:
1688 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 1707 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
@@ -3318,9 +3337,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3318 unsigned long size; 3337 unsigned long size;
3319 unsigned prim_walk; 3338 unsigned prim_walk;
3320 unsigned nverts; 3339 unsigned nverts;
3321 unsigned num_cb = track->num_cb; 3340 unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
3322 3341
3323 if (!track->zb_cb_clear && !track->color_channel_mask && 3342 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
3324 !track->blend_read_enable) 3343 !track->blend_read_enable)
3325 num_cb = 0; 3344 num_cb = 0;
3326 3345
@@ -3341,7 +3360,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3341 return -EINVAL; 3360 return -EINVAL;
3342 } 3361 }
3343 } 3362 }
3344 if (track->z_enabled) { 3363 track->cb_dirty = false;
3364
3365 if (track->zb_dirty && track->z_enabled) {
3345 if (track->zb.robj == NULL) { 3366 if (track->zb.robj == NULL) {
3346 DRM_ERROR("[drm] No buffer for z buffer !\n"); 3367 DRM_ERROR("[drm] No buffer for z buffer !\n");
3347 return -EINVAL; 3368 return -EINVAL;
@@ -3358,6 +3379,28 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3358 return -EINVAL; 3379 return -EINVAL;
3359 } 3380 }
3360 } 3381 }
3382 track->zb_dirty = false;
3383
3384 if (track->aa_dirty && track->aaresolve) {
3385 if (track->aa.robj == NULL) {
3386 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
3387 return -EINVAL;
3388 }
3389 /* I believe the format comes from colorbuffer0. */
3390 size = track->aa.pitch * track->cb[0].cpp * track->maxy;
3391 size += track->aa.offset;
3392 if (size > radeon_bo_size(track->aa.robj)) {
3393 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
3394 "(need %lu have %lu) !\n", i, size,
3395 radeon_bo_size(track->aa.robj));
3396 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
3397 i, track->aa.pitch, track->cb[0].cpp,
3398 track->aa.offset, track->maxy);
3399 return -EINVAL;
3400 }
3401 }
3402 track->aa_dirty = false;
3403
3361 prim_walk = (track->vap_vf_cntl >> 4) & 0x3; 3404 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
3362 if (track->vap_vf_cntl & (1 << 14)) { 3405 if (track->vap_vf_cntl & (1 << 14)) {
3363 nverts = track->vap_alt_nverts; 3406 nverts = track->vap_alt_nverts;
@@ -3417,13 +3460,23 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3417 prim_walk); 3460 prim_walk);
3418 return -EINVAL; 3461 return -EINVAL;
3419 } 3462 }
3420 return r100_cs_track_texture_check(rdev, track); 3463
3464 if (track->tex_dirty) {
3465 track->tex_dirty = false;
3466 return r100_cs_track_texture_check(rdev, track);
3467 }
3468 return 0;
3421} 3469}
3422 3470
3423void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) 3471void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
3424{ 3472{
3425 unsigned i, face; 3473 unsigned i, face;
3426 3474
3475 track->cb_dirty = true;
3476 track->zb_dirty = true;
3477 track->tex_dirty = true;
3478 track->aa_dirty = true;
3479
3427 if (rdev->family < CHIP_R300) { 3480 if (rdev->family < CHIP_R300) {
3428 track->num_cb = 1; 3481 track->num_cb = 1;
3429 if (rdev->family <= CHIP_RS200) 3482 if (rdev->family <= CHIP_RS200)
@@ -3437,6 +3490,8 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
3437 track->num_texture = 16; 3490 track->num_texture = 16;
3438 track->maxy = 4096; 3491 track->maxy = 4096;
3439 track->separate_cube = 0; 3492 track->separate_cube = 0;
3493 track->aaresolve = false;
3494 track->aa.robj = NULL;
3440 } 3495 }
3441 3496
3442 for (i = 0; i < track->num_cb; i++) { 3497 for (i = 0; i < track->num_cb; i++) {
@@ -3746,8 +3801,6 @@ static int r100_startup(struct radeon_device *rdev)
3746 r100_mc_program(rdev); 3801 r100_mc_program(rdev);
3747 /* Resume clock */ 3802 /* Resume clock */
3748 r100_clock_startup(rdev); 3803 r100_clock_startup(rdev);
3749 /* Initialize GPU configuration (# pipes, ...) */
3750// r100_gpu_init(rdev);
3751 /* Initialize GART (initialize after TTM so we can allocate 3804 /* Initialize GART (initialize after TTM so we can allocate
3752 * memory through TTM but finalize after TTM) */ 3805 * memory through TTM but finalize after TTM) */
3753 r100_enable_bm(rdev); 3806 r100_enable_bm(rdev);
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index af65600e6564..2fef9de7f363 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -52,14 +52,7 @@ struct r100_cs_track_texture {
52 unsigned compress_format; 52 unsigned compress_format;
53}; 53};
54 54
55struct r100_cs_track_limits {
56 unsigned num_cb;
57 unsigned num_texture;
58 unsigned max_levels;
59};
60
61struct r100_cs_track { 55struct r100_cs_track {
62 struct radeon_device *rdev;
63 unsigned num_cb; 56 unsigned num_cb;
64 unsigned num_texture; 57 unsigned num_texture;
65 unsigned maxy; 58 unsigned maxy;
@@ -73,11 +66,17 @@ struct r100_cs_track {
73 struct r100_cs_track_array arrays[11]; 66 struct r100_cs_track_array arrays[11];
74 struct r100_cs_track_cb cb[R300_MAX_CB]; 67 struct r100_cs_track_cb cb[R300_MAX_CB];
75 struct r100_cs_track_cb zb; 68 struct r100_cs_track_cb zb;
69 struct r100_cs_track_cb aa;
76 struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE]; 70 struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE];
77 bool z_enabled; 71 bool z_enabled;
78 bool separate_cube; 72 bool separate_cube;
79 bool zb_cb_clear; 73 bool zb_cb_clear;
80 bool blend_read_enable; 74 bool blend_read_enable;
75 bool cb_dirty;
76 bool zb_dirty;
77 bool tex_dirty;
78 bool aa_dirty;
79 bool aaresolve;
81}; 80};
82 81
83int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track); 82int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index d2408c395619..f24058300413 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -184,6 +184,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
184 } 184 }
185 track->zb.robj = reloc->robj; 185 track->zb.robj = reloc->robj;
186 track->zb.offset = idx_value; 186 track->zb.offset = idx_value;
187 track->zb_dirty = true;
187 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 188 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
188 break; 189 break;
189 case RADEON_RB3D_COLOROFFSET: 190 case RADEON_RB3D_COLOROFFSET:
@@ -196,6 +197,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
196 } 197 }
197 track->cb[0].robj = reloc->robj; 198 track->cb[0].robj = reloc->robj;
198 track->cb[0].offset = idx_value; 199 track->cb[0].offset = idx_value;
200 track->cb_dirty = true;
199 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 201 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
200 break; 202 break;
201 case R200_PP_TXOFFSET_0: 203 case R200_PP_TXOFFSET_0:
@@ -214,6 +216,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
214 } 216 }
215 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 217 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
216 track->textures[i].robj = reloc->robj; 218 track->textures[i].robj = reloc->robj;
219 track->tex_dirty = true;
217 break; 220 break;
218 case R200_PP_CUBIC_OFFSET_F1_0: 221 case R200_PP_CUBIC_OFFSET_F1_0:
219 case R200_PP_CUBIC_OFFSET_F2_0: 222 case R200_PP_CUBIC_OFFSET_F2_0:
@@ -257,9 +260,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
257 track->textures[i].cube_info[face - 1].offset = idx_value; 260 track->textures[i].cube_info[face - 1].offset = idx_value;
258 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 261 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
259 track->textures[i].cube_info[face - 1].robj = reloc->robj; 262 track->textures[i].cube_info[face - 1].robj = reloc->robj;
263 track->tex_dirty = true;
260 break; 264 break;
261 case RADEON_RE_WIDTH_HEIGHT: 265 case RADEON_RE_WIDTH_HEIGHT:
262 track->maxy = ((idx_value >> 16) & 0x7FF); 266 track->maxy = ((idx_value >> 16) & 0x7FF);
267 track->cb_dirty = true;
268 track->zb_dirty = true;
263 break; 269 break;
264 case RADEON_RB3D_COLORPITCH: 270 case RADEON_RB3D_COLORPITCH:
265 r = r100_cs_packet_next_reloc(p, &reloc); 271 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -280,9 +286,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
280 ib[idx] = tmp; 286 ib[idx] = tmp;
281 287
282 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; 288 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
289 track->cb_dirty = true;
283 break; 290 break;
284 case RADEON_RB3D_DEPTHPITCH: 291 case RADEON_RB3D_DEPTHPITCH:
285 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; 292 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
293 track->zb_dirty = true;
286 break; 294 break;
287 case RADEON_RB3D_CNTL: 295 case RADEON_RB3D_CNTL:
288 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 296 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -312,6 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
312 } 320 }
313 321
314 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); 322 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
323 track->cb_dirty = true;
324 track->zb_dirty = true;
315 break; 325 break;
316 case RADEON_RB3D_ZSTENCILCNTL: 326 case RADEON_RB3D_ZSTENCILCNTL:
317 switch (idx_value & 0xf) { 327 switch (idx_value & 0xf) {
@@ -329,6 +339,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
329 default: 339 default:
330 break; 340 break;
331 } 341 }
342 track->zb_dirty = true;
332 break; 343 break;
333 case RADEON_RB3D_ZPASS_ADDR: 344 case RADEON_RB3D_ZPASS_ADDR:
334 r = r100_cs_packet_next_reloc(p, &reloc); 345 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -345,6 +356,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
345 uint32_t temp = idx_value >> 4; 356 uint32_t temp = idx_value >> 4;
346 for (i = 0; i < track->num_texture; i++) 357 for (i = 0; i < track->num_texture; i++)
347 track->textures[i].enabled = !!(temp & (1 << i)); 358 track->textures[i].enabled = !!(temp & (1 << i));
359 track->tex_dirty = true;
348 } 360 }
349 break; 361 break;
350 case RADEON_SE_VF_CNTL: 362 case RADEON_SE_VF_CNTL:
@@ -369,6 +381,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
369 i = (reg - R200_PP_TXSIZE_0) / 32; 381 i = (reg - R200_PP_TXSIZE_0) / 32;
370 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; 382 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
371 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 383 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
384 track->tex_dirty = true;
372 break; 385 break;
373 case R200_PP_TXPITCH_0: 386 case R200_PP_TXPITCH_0:
374 case R200_PP_TXPITCH_1: 387 case R200_PP_TXPITCH_1:
@@ -378,6 +391,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
378 case R200_PP_TXPITCH_5: 391 case R200_PP_TXPITCH_5:
379 i = (reg - R200_PP_TXPITCH_0) / 32; 392 i = (reg - R200_PP_TXPITCH_0) / 32;
380 track->textures[i].pitch = idx_value + 32; 393 track->textures[i].pitch = idx_value + 32;
394 track->tex_dirty = true;
381 break; 395 break;
382 case R200_PP_TXFILTER_0: 396 case R200_PP_TXFILTER_0:
383 case R200_PP_TXFILTER_1: 397 case R200_PP_TXFILTER_1:
@@ -394,6 +408,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
394 tmp = (idx_value >> 27) & 0x7; 408 tmp = (idx_value >> 27) & 0x7;
395 if (tmp == 2 || tmp == 6) 409 if (tmp == 2 || tmp == 6)
396 track->textures[i].roundup_h = false; 410 track->textures[i].roundup_h = false;
411 track->tex_dirty = true;
397 break; 412 break;
398 case R200_PP_TXMULTI_CTL_0: 413 case R200_PP_TXMULTI_CTL_0:
399 case R200_PP_TXMULTI_CTL_1: 414 case R200_PP_TXMULTI_CTL_1:
@@ -432,6 +447,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
432 track->textures[i].tex_coord_type = 1; 447 track->textures[i].tex_coord_type = 1;
433 break; 448 break;
434 } 449 }
450 track->tex_dirty = true;
435 break; 451 break;
436 case R200_PP_TXFORMAT_0: 452 case R200_PP_TXFORMAT_0:
437 case R200_PP_TXFORMAT_1: 453 case R200_PP_TXFORMAT_1:
@@ -488,6 +504,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
488 } 504 }
489 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 505 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
490 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 506 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
507 track->tex_dirty = true;
491 break; 508 break;
492 case R200_PP_CUBIC_FACES_0: 509 case R200_PP_CUBIC_FACES_0:
493 case R200_PP_CUBIC_FACES_1: 510 case R200_PP_CUBIC_FACES_1:
@@ -501,6 +518,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
501 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 518 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
502 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); 519 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
503 } 520 }
521 track->tex_dirty = true;
504 break; 522 break;
505 default: 523 default:
506 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 524 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 55fe5ba7def3..069efa8c8ecf 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -667,6 +667,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
667 } 667 }
668 track->cb[i].robj = reloc->robj; 668 track->cb[i].robj = reloc->robj;
669 track->cb[i].offset = idx_value; 669 track->cb[i].offset = idx_value;
670 track->cb_dirty = true;
670 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 671 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
671 break; 672 break;
672 case R300_ZB_DEPTHOFFSET: 673 case R300_ZB_DEPTHOFFSET:
@@ -679,6 +680,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
679 } 680 }
680 track->zb.robj = reloc->robj; 681 track->zb.robj = reloc->robj;
681 track->zb.offset = idx_value; 682 track->zb.offset = idx_value;
683 track->zb_dirty = true;
682 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 684 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
683 break; 685 break;
684 case R300_TX_OFFSET_0: 686 case R300_TX_OFFSET_0:
@@ -717,6 +719,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
717 tmp |= tile_flags; 719 tmp |= tile_flags;
718 ib[idx] = tmp; 720 ib[idx] = tmp;
719 track->textures[i].robj = reloc->robj; 721 track->textures[i].robj = reloc->robj;
722 track->tex_dirty = true;
720 break; 723 break;
721 /* Tracked registers */ 724 /* Tracked registers */
722 case 0x2084: 725 case 0x2084:
@@ -743,6 +746,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
743 if (p->rdev->family < CHIP_RV515) { 746 if (p->rdev->family < CHIP_RV515) {
744 track->maxy -= 1440; 747 track->maxy -= 1440;
745 } 748 }
749 track->cb_dirty = true;
750 track->zb_dirty = true;
746 break; 751 break;
747 case 0x4E00: 752 case 0x4E00:
748 /* RB3D_CCTL */ 753 /* RB3D_CCTL */
@@ -752,6 +757,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
752 return -EINVAL; 757 return -EINVAL;
753 } 758 }
754 track->num_cb = ((idx_value >> 5) & 0x3) + 1; 759 track->num_cb = ((idx_value >> 5) & 0x3) + 1;
760 track->cb_dirty = true;
755 break; 761 break;
756 case 0x4E38: 762 case 0x4E38:
757 case 0x4E3C: 763 case 0x4E3C:
@@ -814,6 +820,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
814 ((idx_value >> 21) & 0xF)); 820 ((idx_value >> 21) & 0xF));
815 return -EINVAL; 821 return -EINVAL;
816 } 822 }
823 track->cb_dirty = true;
817 break; 824 break;
818 case 0x4F00: 825 case 0x4F00:
819 /* ZB_CNTL */ 826 /* ZB_CNTL */
@@ -822,6 +829,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
822 } else { 829 } else {
823 track->z_enabled = false; 830 track->z_enabled = false;
824 } 831 }
832 track->zb_dirty = true;
825 break; 833 break;
826 case 0x4F10: 834 case 0x4F10:
827 /* ZB_FORMAT */ 835 /* ZB_FORMAT */
@@ -838,6 +846,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
838 (idx_value & 0xF)); 846 (idx_value & 0xF));
839 return -EINVAL; 847 return -EINVAL;
840 } 848 }
849 track->zb_dirty = true;
841 break; 850 break;
842 case 0x4F24: 851 case 0x4F24:
843 /* ZB_DEPTHPITCH */ 852 /* ZB_DEPTHPITCH */
@@ -861,14 +870,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
861 ib[idx] = tmp; 870 ib[idx] = tmp;
862 871
863 track->zb.pitch = idx_value & 0x3FFC; 872 track->zb.pitch = idx_value & 0x3FFC;
873 track->zb_dirty = true;
864 break; 874 break;
865 case 0x4104: 875 case 0x4104:
876 /* TX_ENABLE */
866 for (i = 0; i < 16; i++) { 877 for (i = 0; i < 16; i++) {
867 bool enabled; 878 bool enabled;
868 879
869 enabled = !!(idx_value & (1 << i)); 880 enabled = !!(idx_value & (1 << i));
870 track->textures[i].enabled = enabled; 881 track->textures[i].enabled = enabled;
871 } 882 }
883 track->tex_dirty = true;
872 break; 884 break;
873 case 0x44C0: 885 case 0x44C0:
874 case 0x44C4: 886 case 0x44C4:
@@ -898,6 +910,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
898 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 910 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
899 break; 911 break;
900 case R300_TX_FORMAT_X16: 912 case R300_TX_FORMAT_X16:
913 case R300_TX_FORMAT_FL_I16:
901 case R300_TX_FORMAT_Y8X8: 914 case R300_TX_FORMAT_Y8X8:
902 case R300_TX_FORMAT_Z5Y6X5: 915 case R300_TX_FORMAT_Z5Y6X5:
903 case R300_TX_FORMAT_Z6Y5X5: 916 case R300_TX_FORMAT_Z6Y5X5:
@@ -910,6 +923,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
910 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 923 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
911 break; 924 break;
912 case R300_TX_FORMAT_Y16X16: 925 case R300_TX_FORMAT_Y16X16:
926 case R300_TX_FORMAT_FL_I16A16:
913 case R300_TX_FORMAT_Z11Y11X10: 927 case R300_TX_FORMAT_Z11Y11X10:
914 case R300_TX_FORMAT_Z10Y11X11: 928 case R300_TX_FORMAT_Z10Y11X11:
915 case R300_TX_FORMAT_W8Z8Y8X8: 929 case R300_TX_FORMAT_W8Z8Y8X8:
@@ -951,8 +965,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
951 DRM_ERROR("Invalid texture format %u\n", 965 DRM_ERROR("Invalid texture format %u\n",
952 (idx_value & 0x1F)); 966 (idx_value & 0x1F));
953 return -EINVAL; 967 return -EINVAL;
954 break;
955 } 968 }
969 track->tex_dirty = true;
956 break; 970 break;
957 case 0x4400: 971 case 0x4400:
958 case 0x4404: 972 case 0x4404:
@@ -980,6 +994,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
980 if (tmp == 2 || tmp == 4 || tmp == 6) { 994 if (tmp == 2 || tmp == 4 || tmp == 6) {
981 track->textures[i].roundup_h = false; 995 track->textures[i].roundup_h = false;
982 } 996 }
997 track->tex_dirty = true;
983 break; 998 break;
984 case 0x4500: 999 case 0x4500:
985 case 0x4504: 1000 case 0x4504:
@@ -1017,6 +1032,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1017 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); 1032 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
1018 return -EINVAL; 1033 return -EINVAL;
1019 } 1034 }
1035 track->tex_dirty = true;
1020 break; 1036 break;
1021 case 0x4480: 1037 case 0x4480:
1022 case 0x4484: 1038 case 0x4484:
@@ -1046,6 +1062,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1046 track->textures[i].use_pitch = !!tmp; 1062 track->textures[i].use_pitch = !!tmp;
1047 tmp = (idx_value >> 22) & 0xF; 1063 tmp = (idx_value >> 22) & 0xF;
1048 track->textures[i].txdepth = tmp; 1064 track->textures[i].txdepth = tmp;
1065 track->tex_dirty = true;
1049 break; 1066 break;
1050 case R300_ZB_ZPASS_ADDR: 1067 case R300_ZB_ZPASS_ADDR:
1051 r = r100_cs_packet_next_reloc(p, &reloc); 1068 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1060,6 +1077,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1060 case 0x4e0c: 1077 case 0x4e0c:
1061 /* RB3D_COLOR_CHANNEL_MASK */ 1078 /* RB3D_COLOR_CHANNEL_MASK */
1062 track->color_channel_mask = idx_value; 1079 track->color_channel_mask = idx_value;
1080 track->cb_dirty = true;
1063 break; 1081 break;
1064 case 0x43a4: 1082 case 0x43a4:
1065 /* SC_HYPERZ_EN */ 1083 /* SC_HYPERZ_EN */
@@ -1073,6 +1091,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1073 case 0x4f1c: 1091 case 0x4f1c:
1074 /* ZB_BW_CNTL */ 1092 /* ZB_BW_CNTL */
1075 track->zb_cb_clear = !!(idx_value & (1 << 5)); 1093 track->zb_cb_clear = !!(idx_value & (1 << 5));
1094 track->cb_dirty = true;
1095 track->zb_dirty = true;
1076 if (p->rdev->hyperz_filp != p->filp) { 1096 if (p->rdev->hyperz_filp != p->filp) {
1077 if (idx_value & (R300_HIZ_ENABLE | 1097 if (idx_value & (R300_HIZ_ENABLE |
1078 R300_RD_COMP_ENABLE | 1098 R300_RD_COMP_ENABLE |
@@ -1084,8 +1104,28 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1084 case 0x4e04: 1104 case 0x4e04:
1085 /* RB3D_BLENDCNTL */ 1105 /* RB3D_BLENDCNTL */
1086 track->blend_read_enable = !!(idx_value & (1 << 2)); 1106 track->blend_read_enable = !!(idx_value & (1 << 2));
1107 track->cb_dirty = true;
1108 break;
1109 case R300_RB3D_AARESOLVE_OFFSET:
1110 r = r100_cs_packet_next_reloc(p, &reloc);
1111 if (r) {
1112 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1113 idx, reg);
1114 r100_cs_dump_packet(p, pkt);
1115 return r;
1116 }
1117 track->aa.robj = reloc->robj;
1118 track->aa.offset = idx_value;
1119 track->aa_dirty = true;
1120 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1121 break;
1122 case R300_RB3D_AARESOLVE_PITCH:
1123 track->aa.pitch = idx_value & 0x3FFE;
1124 track->aa_dirty = true;
1087 break; 1125 break;
1088 case 0x4f28: /* ZB_DEPTHCLEARVALUE */ 1126 case R300_RB3D_AARESOLVE_CTL:
1127 track->aaresolve = idx_value & 0x1;
1128 track->aa_dirty = true;
1089 break; 1129 break;
1090 case 0x4f30: /* ZB_MASK_OFFSET */ 1130 case 0x4f30: /* ZB_MASK_OFFSET */
1091 case 0x4f34: /* ZB_ZMASK_PITCH */ 1131 case 0x4f34: /* ZB_ZMASK_PITCH */
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 1a0d5362cd79..f0bce399c9f3 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -1371,6 +1371,8 @@
1371#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ 1371#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
1372#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ 1372#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
1373 1373
1374#define R300_RB3D_AARESOLVE_OFFSET 0x4E80
1375#define R300_RB3D_AARESOLVE_PITCH 0x4E84
1374#define R300_RB3D_AARESOLVE_CTL 0x4E88 1376#define R300_RB3D_AARESOLVE_CTL 0x4E88
1375/* gap */ 1377/* gap */
1376 1378
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 1cd56dc8c8ab..b409b24207a1 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2106,7 +2106,11 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
2106 2106
2107 r600_cp_stop(rdev); 2107 r600_cp_stop(rdev);
2108 2108
2109 WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); 2109 WREG32(CP_RB_CNTL,
2110#ifdef __BIG_ENDIAN
2111 BUF_SWAP_32BIT |
2112#endif
2113 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2110 2114
2111 /* Reset cp */ 2115 /* Reset cp */
2112 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 2116 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
@@ -2193,7 +2197,11 @@ int r600_cp_resume(struct radeon_device *rdev)
2193 WREG32(CP_RB_WPTR, 0); 2197 WREG32(CP_RB_WPTR, 0);
2194 2198
2195 /* set the wb address whether it's enabled or not */ 2199 /* set the wb address whether it's enabled or not */
2196 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 2200 WREG32(CP_RB_RPTR_ADDR,
2201#ifdef __BIG_ENDIAN
2202 RB_RPTR_SWAP(2) |
2203#endif
2204 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2197 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 2205 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2198 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 2206 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2199 2207
@@ -2629,7 +2637,11 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2629{ 2637{
2630 /* FIXME: implement */ 2638 /* FIXME: implement */
2631 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 2639 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2632 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC); 2640 radeon_ring_write(rdev,
2641#ifdef __BIG_ENDIAN
2642 (2 << 0) |
2643#endif
2644 (ib->gpu_addr & 0xFFFFFFFC));
2633 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); 2645 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2634 radeon_ring_write(rdev, ib->length_dw); 2646 radeon_ring_write(rdev, ib->length_dw);
2635} 2647}
@@ -3305,8 +3317,8 @@ restart_ih:
3305 while (rptr != wptr) { 3317 while (rptr != wptr) {
3306 /* wptr/rptr are in bytes! */ 3318 /* wptr/rptr are in bytes! */
3307 ring_index = rptr / 4; 3319 ring_index = rptr / 4;
3308 src_id = rdev->ih.ring[ring_index] & 0xff; 3320 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3309 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; 3321 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3310 3322
3311 switch (src_id) { 3323 switch (src_id) {
3312 case 1: /* D1 vblank/vline */ 3324 case 1: /* D1 vblank/vline */
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index ca5c29f70779..7f1043448d25 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -137,9 +137,9 @@ set_shaders(struct drm_device *dev)
137 ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256); 137 ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
138 138
139 for (i = 0; i < r6xx_vs_size; i++) 139 for (i = 0; i < r6xx_vs_size; i++)
140 vs[i] = r6xx_vs[i]; 140 vs[i] = cpu_to_le32(r6xx_vs[i]);
141 for (i = 0; i < r6xx_ps_size; i++) 141 for (i = 0; i < r6xx_ps_size; i++)
142 ps[i] = r6xx_ps[i]; 142 ps[i] = cpu_to_le32(r6xx_ps[i]);
143 143
144 dev_priv->blit_vb->used = 512; 144 dev_priv->blit_vb->used = 512;
145 145
@@ -192,6 +192,9 @@ set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
192 DRM_DEBUG("\n"); 192 DRM_DEBUG("\n");
193 193
194 sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8)); 194 sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8));
195#ifdef __BIG_ENDIAN
196 sq_vtx_constant_word2 |= (2 << 30);
197#endif
195 198
196 BEGIN_RING(9); 199 BEGIN_RING(9);
197 OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7)); 200 OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
@@ -291,7 +294,11 @@ draw_auto(drm_radeon_private_t *dev_priv)
291 OUT_RING(DI_PT_RECTLIST); 294 OUT_RING(DI_PT_RECTLIST);
292 295
293 OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0)); 296 OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
297#ifdef __BIG_ENDIAN
298 OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT);
299#else
294 OUT_RING(DI_INDEX_SIZE_16_BIT); 300 OUT_RING(DI_INDEX_SIZE_16_BIT);
301#endif
295 302
296 OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0)); 303 OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
297 OUT_RING(1); 304 OUT_RING(1);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 16e211a614d7..2fed91750126 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -54,7 +54,7 @@ set_render_target(struct radeon_device *rdev, int format,
54 if (h < 8) 54 if (h < 8)
55 h = 8; 55 h = 8;
56 56
57 cb_color_info = ((format << 2) | (1 << 27)); 57 cb_color_info = ((format << 2) | (1 << 27) | (1 << 8));
58 pitch = (w / 8) - 1; 58 pitch = (w / 8) - 1;
59 slice = ((w * h) / 64) - 1; 59 slice = ((w * h) / 64) - 1;
60 60
@@ -165,6 +165,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
165 u32 sq_vtx_constant_word2; 165 u32 sq_vtx_constant_word2;
166 166
167 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); 167 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
168#ifdef __BIG_ENDIAN
169 sq_vtx_constant_word2 |= (2 << 30);
170#endif
168 171
169 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); 172 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
170 radeon_ring_write(rdev, 0x460); 173 radeon_ring_write(rdev, 0x460);
@@ -199,7 +202,7 @@ set_tex_resource(struct radeon_device *rdev,
199 if (h < 1) 202 if (h < 1)
200 h = 1; 203 h = 1;
201 204
202 sq_tex_resource_word0 = (1 << 0); 205 sq_tex_resource_word0 = (1 << 0) | (1 << 3);
203 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) | 206 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
204 ((w - 1) << 19)); 207 ((w - 1) << 19));
205 208
@@ -253,7 +256,11 @@ draw_auto(struct radeon_device *rdev)
253 radeon_ring_write(rdev, DI_PT_RECTLIST); 256 radeon_ring_write(rdev, DI_PT_RECTLIST);
254 257
255 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); 258 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
256 radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); 259 radeon_ring_write(rdev,
260#ifdef __BIG_ENDIAN
261 (2 << 2) |
262#endif
263 DI_INDEX_SIZE_16_BIT);
257 264
258 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); 265 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
259 radeon_ring_write(rdev, 1); 266 radeon_ring_write(rdev, 1);
@@ -424,7 +431,11 @@ set_default_state(struct radeon_device *rdev)
424 dwords = ALIGN(rdev->r600_blit.state_len, 0x10); 431 dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
425 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; 432 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
426 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 433 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
427 radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); 434 radeon_ring_write(rdev,
435#ifdef __BIG_ENDIAN
436 (2 << 0) |
437#endif
438 (gpu_addr & 0xFFFFFFFC));
428 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); 439 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
429 radeon_ring_write(rdev, dwords); 440 radeon_ring_write(rdev, dwords);
430 441
@@ -467,7 +478,7 @@ static inline uint32_t i2f(uint32_t input)
467int r600_blit_init(struct radeon_device *rdev) 478int r600_blit_init(struct radeon_device *rdev)
468{ 479{
469 u32 obj_size; 480 u32 obj_size;
470 int r, dwords; 481 int i, r, dwords;
471 void *ptr; 482 void *ptr;
472 u32 packet2s[16]; 483 u32 packet2s[16];
473 int num_packet2s = 0; 484 int num_packet2s = 0;
@@ -486,7 +497,7 @@ int r600_blit_init(struct radeon_device *rdev)
486 497
487 dwords = rdev->r600_blit.state_len; 498 dwords = rdev->r600_blit.state_len;
488 while (dwords & 0xf) { 499 while (dwords & 0xf) {
489 packet2s[num_packet2s++] = PACKET2(0); 500 packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
490 dwords++; 501 dwords++;
491 } 502 }
492 503
@@ -529,8 +540,10 @@ int r600_blit_init(struct radeon_device *rdev)
529 if (num_packet2s) 540 if (num_packet2s)
530 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), 541 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
531 packet2s, num_packet2s * 4); 542 packet2s, num_packet2s * 4);
532 memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4); 543 for (i = 0; i < r6xx_vs_size; i++)
533 memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); 544 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
545 for (i = 0; i < r6xx_ps_size; i++)
546 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
534 radeon_bo_kunmap(rdev->r600_blit.shader_obj); 547 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
535 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 548 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
536 549
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index e8151c1d55b2..2d1f6c5ee2a7 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -684,7 +684,11 @@ const u32 r6xx_vs[] =
684 0x00000000, 684 0x00000000,
685 0x3c000000, 685 0x3c000000,
686 0x68cd1000, 686 0x68cd1000,
687#ifdef __BIG_ENDIAN
688 0x000a0000,
689#else
687 0x00080000, 690 0x00080000,
691#endif
688 0x00000000, 692 0x00000000,
689}; 693};
690 694
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 4f4cd8b286d5..c3ab959bdc7c 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -396,6 +396,9 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
396 r600_do_cp_stop(dev_priv); 396 r600_do_cp_stop(dev_priv);
397 397
398 RADEON_WRITE(R600_CP_RB_CNTL, 398 RADEON_WRITE(R600_CP_RB_CNTL,
399#ifdef __BIG_ENDIAN
400 R600_BUF_SWAP_32BIT |
401#endif
399 R600_RB_NO_UPDATE | 402 R600_RB_NO_UPDATE |
400 R600_RB_BLKSZ(15) | 403 R600_RB_BLKSZ(15) |
401 R600_RB_BUFSZ(3)); 404 R600_RB_BUFSZ(3));
@@ -486,9 +489,12 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
486 r600_do_cp_stop(dev_priv); 489 r600_do_cp_stop(dev_priv);
487 490
488 RADEON_WRITE(R600_CP_RB_CNTL, 491 RADEON_WRITE(R600_CP_RB_CNTL,
492#ifdef __BIG_ENDIAN
493 R600_BUF_SWAP_32BIT |
494#endif
489 R600_RB_NO_UPDATE | 495 R600_RB_NO_UPDATE |
490 (15 << 8) | 496 R600_RB_BLKSZ(15) |
491 (3 << 0)); 497 R600_RB_BUFSZ(3));
492 498
493 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); 499 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
494 RADEON_READ(R600_GRBM_SOFT_RESET); 500 RADEON_READ(R600_GRBM_SOFT_RESET);
@@ -550,8 +556,12 @@ static void r600_test_writeback(drm_radeon_private_t *dev_priv)
550 556
551 if (!dev_priv->writeback_works) { 557 if (!dev_priv->writeback_works) {
552 /* Disable writeback to avoid unnecessary bus master transfer */ 558 /* Disable writeback to avoid unnecessary bus master transfer */
553 RADEON_WRITE(R600_CP_RB_CNTL, RADEON_READ(R600_CP_RB_CNTL) | 559 RADEON_WRITE(R600_CP_RB_CNTL,
554 RADEON_RB_NO_UPDATE); 560#ifdef __BIG_ENDIAN
561 R600_BUF_SWAP_32BIT |
562#endif
563 RADEON_READ(R600_CP_RB_CNTL) |
564 R600_RB_NO_UPDATE);
555 RADEON_WRITE(R600_SCRATCH_UMSK, 0); 565 RADEON_WRITE(R600_SCRATCH_UMSK, 0);
556 } 566 }
557} 567}
@@ -575,7 +585,11 @@ int r600_do_engine_reset(struct drm_device *dev)
575 585
576 RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0); 586 RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
577 cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL); 587 cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL);
578 RADEON_WRITE(R600_CP_RB_CNTL, R600_RB_RPTR_WR_ENA); 588 RADEON_WRITE(R600_CP_RB_CNTL,
589#ifdef __BIG_ENDIAN
590 R600_BUF_SWAP_32BIT |
591#endif
592 R600_RB_RPTR_WR_ENA);
579 593
580 RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr); 594 RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr);
581 RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr); 595 RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr);
@@ -1838,7 +1852,10 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
1838 + dev_priv->gart_vm_start; 1852 + dev_priv->gart_vm_start;
1839 } 1853 }
1840 RADEON_WRITE(R600_CP_RB_RPTR_ADDR, 1854 RADEON_WRITE(R600_CP_RB_RPTR_ADDR,
1841 rptr_addr & 0xffffffff); 1855#ifdef __BIG_ENDIAN
1856 (2 << 0) |
1857#endif
1858 (rptr_addr & 0xfffffffc));
1842 RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, 1859 RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI,
1843 upper_32_bits(rptr_addr)); 1860 upper_32_bits(rptr_addr));
1844 1861
@@ -1889,7 +1906,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
1889 { 1906 {
1890 u64 scratch_addr; 1907 u64 scratch_addr;
1891 1908
1892 scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR); 1909 scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC;
1893 scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32; 1910 scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32;
1894 scratch_addr += R600_SCRATCH_REG_OFFSET; 1911 scratch_addr += R600_SCRATCH_REG_OFFSET;
1895 scratch_addr >>= 8; 1912 scratch_addr >>= 8;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index fe0c8eb76010..0a0848f0346d 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -388,17 +388,18 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
388 } 388 }
389 389
390 if (!IS_ALIGNED(pitch, pitch_align)) { 390 if (!IS_ALIGNED(pitch, pitch_align)) {
391 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", 391 dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
392 __func__, __LINE__, pitch); 392 __func__, __LINE__, pitch, pitch_align, array_mode);
393 return -EINVAL; 393 return -EINVAL;
394 } 394 }
395 if (!IS_ALIGNED(height, height_align)) { 395 if (!IS_ALIGNED(height, height_align)) {
396 dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", 396 dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
397 __func__, __LINE__, height); 397 __func__, __LINE__, height, height_align, array_mode);
398 return -EINVAL; 398 return -EINVAL;
399 } 399 }
400 if (!IS_ALIGNED(base_offset, base_align)) { 400 if (!IS_ALIGNED(base_offset, base_align)) {
401 dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); 401 dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
402 base_offset, base_align, array_mode);
402 return -EINVAL; 403 return -EINVAL;
403 } 404 }
404 405
@@ -413,7 +414,10 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
413 * broken userspace. 414 * broken userspace.
414 */ 415 */
415 } else { 416 } else {
416 dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); 417 dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i,
418 array_mode,
419 track->cb_color_bo_offset[i], tmp,
420 radeon_bo_size(track->cb_color_bo[i]));
417 return -EINVAL; 421 return -EINVAL;
418 } 422 }
419 } 423 }
@@ -548,17 +552,18 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
548 } 552 }
549 553
550 if (!IS_ALIGNED(pitch, pitch_align)) { 554 if (!IS_ALIGNED(pitch, pitch_align)) {
551 dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", 555 dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
552 __func__, __LINE__, pitch); 556 __func__, __LINE__, pitch, pitch_align, array_mode);
553 return -EINVAL; 557 return -EINVAL;
554 } 558 }
555 if (!IS_ALIGNED(height, height_align)) { 559 if (!IS_ALIGNED(height, height_align)) {
556 dev_warn(p->dev, "%s:%d db height (%d) invalid\n", 560 dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
557 __func__, __LINE__, height); 561 __func__, __LINE__, height, height_align, array_mode);
558 return -EINVAL; 562 return -EINVAL;
559 } 563 }
560 if (!IS_ALIGNED(base_offset, base_align)) { 564 if (!IS_ALIGNED(base_offset, base_align)) {
561 dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); 565 dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
566 base_offset, base_align, array_mode);
562 return -EINVAL; 567 return -EINVAL;
563 } 568 }
564 569
@@ -566,9 +571,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
566 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; 571 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
567 tmp = ntiles * bpe * 64 * nviews; 572 tmp = ntiles * bpe * 64 * nviews;
568 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { 573 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
569 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n", 574 dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
570 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, 575 array_mode,
571 radeon_bo_size(track->db_bo)); 576 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
577 radeon_bo_size(track->db_bo));
572 return -EINVAL; 578 return -EINVAL;
573 } 579 }
574 } 580 }
@@ -1350,18 +1356,18 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
1350 /* XXX check height as well... */ 1356 /* XXX check height as well... */
1351 1357
1352 if (!IS_ALIGNED(pitch, pitch_align)) { 1358 if (!IS_ALIGNED(pitch, pitch_align)) {
1353 dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", 1359 dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
1354 __func__, __LINE__, pitch); 1360 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
1355 return -EINVAL; 1361 return -EINVAL;
1356 } 1362 }
1357 if (!IS_ALIGNED(base_offset, base_align)) { 1363 if (!IS_ALIGNED(base_offset, base_align)) {
1358 dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n", 1364 dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
1359 __func__, __LINE__, base_offset); 1365 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
1360 return -EINVAL; 1366 return -EINVAL;
1361 } 1367 }
1362 if (!IS_ALIGNED(mip_offset, base_align)) { 1368 if (!IS_ALIGNED(mip_offset, base_align)) {
1363 dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n", 1369 dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
1364 __func__, __LINE__, mip_offset); 1370 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
1365 return -EINVAL; 1371 return -EINVAL;
1366 } 1372 }
1367 1373
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index d1f598663da7..b2b944bcd05a 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -154,13 +154,14 @@
154#define ROQ_IB2_START(x) ((x) << 8) 154#define ROQ_IB2_START(x) ((x) << 8)
155#define CP_RB_BASE 0xC100 155#define CP_RB_BASE 0xC100
156#define CP_RB_CNTL 0xC104 156#define CP_RB_CNTL 0xC104
157#define RB_BUFSZ(x) ((x)<<0) 157#define RB_BUFSZ(x) ((x) << 0)
158#define RB_BLKSZ(x) ((x)<<8) 158#define RB_BLKSZ(x) ((x) << 8)
159#define RB_NO_UPDATE (1<<27) 159#define RB_NO_UPDATE (1 << 27)
160#define RB_RPTR_WR_ENA (1<<31) 160#define RB_RPTR_WR_ENA (1 << 31)
161#define BUF_SWAP_32BIT (2 << 16) 161#define BUF_SWAP_32BIT (2 << 16)
162#define CP_RB_RPTR 0x8700 162#define CP_RB_RPTR 0x8700
163#define CP_RB_RPTR_ADDR 0xC10C 163#define CP_RB_RPTR_ADDR 0xC10C
164#define RB_RPTR_SWAP(x) ((x) << 0)
164#define CP_RB_RPTR_ADDR_HI 0xC110 165#define CP_RB_RPTR_ADDR_HI 0xC110
165#define CP_RB_RPTR_WR 0xC108 166#define CP_RB_RPTR_WR 0xC108
166#define CP_RB_WPTR 0xC114 167#define CP_RB_WPTR 0xC114
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5c1cc7ad9a15..02d5c415f499 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -88,7 +88,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
88 /* some evergreen boards have bad data for this entry */ 88 /* some evergreen boards have bad data for this entry */
89 if (ASIC_IS_DCE4(rdev)) { 89 if (ASIC_IS_DCE4(rdev)) {
90 if ((i == 7) && 90 if ((i == 7) &&
91 (gpio->usClkMaskRegisterIndex == 0x1936) && 91 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
92 (gpio->sucI2cId.ucAccess == 0)) { 92 (gpio->sucI2cId.ucAccess == 0)) {
93 gpio->sucI2cId.ucAccess = 0x97; 93 gpio->sucI2cId.ucAccess = 0x97;
94 gpio->ucDataMaskShift = 8; 94 gpio->ucDataMaskShift = 8;
@@ -101,7 +101,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
101 /* some DCE3 boards have bad data for this entry */ 101 /* some DCE3 boards have bad data for this entry */
102 if (ASIC_IS_DCE3(rdev)) { 102 if (ASIC_IS_DCE3(rdev)) {
103 if ((i == 4) && 103 if ((i == 4) &&
104 (gpio->usClkMaskRegisterIndex == 0x1fda) && 104 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
105 (gpio->sucI2cId.ucAccess == 0x94)) 105 (gpio->sucI2cId.ucAccess == 0x94))
106 gpio->sucI2cId.ucAccess = 0x14; 106 gpio->sucI2cId.ucAccess = 0x14;
107 } 107 }
@@ -172,7 +172,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
172 /* some evergreen boards have bad data for this entry */ 172 /* some evergreen boards have bad data for this entry */
173 if (ASIC_IS_DCE4(rdev)) { 173 if (ASIC_IS_DCE4(rdev)) {
174 if ((i == 7) && 174 if ((i == 7) &&
175 (gpio->usClkMaskRegisterIndex == 0x1936) && 175 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
176 (gpio->sucI2cId.ucAccess == 0)) { 176 (gpio->sucI2cId.ucAccess == 0)) {
177 gpio->sucI2cId.ucAccess = 0x97; 177 gpio->sucI2cId.ucAccess = 0x97;
178 gpio->ucDataMaskShift = 8; 178 gpio->ucDataMaskShift = 8;
@@ -185,7 +185,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
185 /* some DCE3 boards have bad data for this entry */ 185 /* some DCE3 boards have bad data for this entry */
186 if (ASIC_IS_DCE3(rdev)) { 186 if (ASIC_IS_DCE3(rdev)) {
187 if ((i == 4) && 187 if ((i == 4) &&
188 (gpio->usClkMaskRegisterIndex == 0x1fda) && 188 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
189 (gpio->sucI2cId.ucAccess == 0x94)) 189 (gpio->sucI2cId.ucAccess == 0x94))
190 gpio->sucI2cId.ucAccess = 0x14; 190 gpio->sucI2cId.ucAccess = 0x14;
191 } 191 }
@@ -252,7 +252,7 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd
252 pin = &gpio_info->asGPIO_Pin[i]; 252 pin = &gpio_info->asGPIO_Pin[i];
253 if (id == pin->ucGPIO_ID) { 253 if (id == pin->ucGPIO_ID) {
254 gpio.id = pin->ucGPIO_ID; 254 gpio.id = pin->ucGPIO_ID;
255 gpio.reg = pin->usGpioPin_AIndex * 4; 255 gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
256 gpio.mask = (1 << pin->ucGpioPinBitShift); 256 gpio.mask = (1 << pin->ucGpioPinBitShift);
257 gpio.valid = true; 257 gpio.valid = true;
258 break; 258 break;
@@ -1274,11 +1274,11 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
1274 data_offset); 1274 data_offset);
1275 switch (crev) { 1275 switch (crev) {
1276 case 1: 1276 case 1:
1277 if (igp_info->info.ulBootUpMemoryClock) 1277 if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock))
1278 return true; 1278 return true;
1279 break; 1279 break;
1280 case 2: 1280 case 2:
1281 if (igp_info->info_2.ulBootUpSidePortClock) 1281 if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock))
1282 return true; 1282 return true;
1283 break; 1283 break;
1284 default: 1284 default:
@@ -1442,7 +1442,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1442 1442
1443 for (i = 0; i < num_indices; i++) { 1443 for (i = 0; i < num_indices; i++) {
1444 if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && 1444 if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
1445 (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) { 1445 (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
1446 ss->percentage = 1446 ss->percentage =
1447 le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1447 le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
1448 ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1448 ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1456,7 +1456,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1456 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); 1456 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
1457 for (i = 0; i < num_indices; i++) { 1457 for (i = 0; i < num_indices; i++) {
1458 if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && 1458 if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
1459 (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) { 1459 (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
1460 ss->percentage = 1460 ss->percentage =
1461 le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1461 le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
1462 ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1462 ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1470,7 +1470,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1470 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); 1470 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
1471 for (i = 0; i < num_indices; i++) { 1471 for (i = 0; i < num_indices; i++) {
1472 if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && 1472 if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
1473 (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) { 1473 (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
1474 ss->percentage = 1474 ss->percentage =
1475 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1475 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
1476 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1476 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1553,8 +1553,8 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1553 if (misc & ATOM_DOUBLE_CLOCK_MODE) 1553 if (misc & ATOM_DOUBLE_CLOCK_MODE)
1554 lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; 1554 lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
1555 1555
1556 lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize; 1556 lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize);
1557 lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize; 1557 lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize);
1558 1558
1559 /* set crtc values */ 1559 /* set crtc values */
1560 drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); 1560 drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
@@ -1569,13 +1569,13 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1569 lvds->linkb = false; 1569 lvds->linkb = false;
1570 1570
1571 /* parse the lcd record table */ 1571 /* parse the lcd record table */
1572 if (lvds_info->info.usModePatchTableOffset) { 1572 if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) {
1573 ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; 1573 ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
1574 ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; 1574 ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
1575 bool bad_record = false; 1575 bool bad_record = false;
1576 u8 *record = (u8 *)(mode_info->atom_context->bios + 1576 u8 *record = (u8 *)(mode_info->atom_context->bios +
1577 data_offset + 1577 data_offset +
1578 lvds_info->info.usModePatchTableOffset); 1578 le16_to_cpu(lvds_info->info.usModePatchTableOffset));
1579 while (*record != ATOM_RECORD_END_TYPE) { 1579 while (*record != ATOM_RECORD_END_TYPE) {
1580 switch (*record) { 1580 switch (*record) {
1581 case LCD_MODE_PATCH_RECORD_MODE_TYPE: 1581 case LCD_MODE_PATCH_RECORD_MODE_TYPE:
@@ -2189,7 +2189,7 @@ static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
2189 firmware_info = 2189 firmware_info =
2190 (union firmware_info *)(mode_info->atom_context->bios + 2190 (union firmware_info *)(mode_info->atom_context->bios +
2191 data_offset); 2191 data_offset);
2192 vddc = firmware_info->info_14.usBootUpVDDCVoltage; 2192 vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
2193 } 2193 }
2194 2194
2195 return vddc; 2195 return vddc;
@@ -2284,7 +2284,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2284 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = 2284 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
2285 VOLTAGE_SW; 2285 VOLTAGE_SW;
2286 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = 2286 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
2287 clock_info->evergreen.usVDDC; 2287 le16_to_cpu(clock_info->evergreen.usVDDC);
2288 } else { 2288 } else {
2289 sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); 2289 sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
2290 sclk |= clock_info->r600.ucEngineClockHigh << 16; 2290 sclk |= clock_info->r600.ucEngineClockHigh << 16;
@@ -2295,7 +2295,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2295 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = 2295 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
2296 VOLTAGE_SW; 2296 VOLTAGE_SW;
2297 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = 2297 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
2298 clock_info->r600.usVDDC; 2298 le16_to_cpu(clock_info->r600.usVDDC);
2299 } 2299 }
2300 2300
2301 if (rdev->flags & RADEON_IS_IGP) { 2301 if (rdev->flags & RADEON_IS_IGP) {
@@ -2408,13 +2408,13 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2408 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); 2408 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
2409 state_array = (struct StateArray *) 2409 state_array = (struct StateArray *)
2410 (mode_info->atom_context->bios + data_offset + 2410 (mode_info->atom_context->bios + data_offset +
2411 power_info->pplib.usStateArrayOffset); 2411 le16_to_cpu(power_info->pplib.usStateArrayOffset));
2412 clock_info_array = (struct ClockInfoArray *) 2412 clock_info_array = (struct ClockInfoArray *)
2413 (mode_info->atom_context->bios + data_offset + 2413 (mode_info->atom_context->bios + data_offset +
2414 power_info->pplib.usClockInfoArrayOffset); 2414 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
2415 non_clock_info_array = (struct NonClockInfoArray *) 2415 non_clock_info_array = (struct NonClockInfoArray *)
2416 (mode_info->atom_context->bios + data_offset + 2416 (mode_info->atom_context->bios + data_offset +
2417 power_info->pplib.usNonClockInfoArrayOffset); 2417 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
2418 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2418 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
2419 state_array->ucNumEntries, GFP_KERNEL); 2419 state_array->ucNumEntries, GFP_KERNEL);
2420 if (!rdev->pm.power_state) 2420 if (!rdev->pm.power_state)
@@ -2533,7 +2533,7 @@ uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
2533 int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); 2533 int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
2534 2534
2535 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2535 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2536 return args.ulReturnEngineClock; 2536 return le32_to_cpu(args.ulReturnEngineClock);
2537} 2537}
2538 2538
2539uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) 2539uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
@@ -2542,7 +2542,7 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
2542 int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); 2542 int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
2543 2543
2544 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2544 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2545 return args.ulReturnMemoryClock; 2545 return le32_to_cpu(args.ulReturnMemoryClock);
2546} 2546}
2547 2547
2548void radeon_atom_set_engine_clock(struct radeon_device *rdev, 2548void radeon_atom_set_engine_clock(struct radeon_device *rdev,
@@ -2551,7 +2551,7 @@ void radeon_atom_set_engine_clock(struct radeon_device *rdev,
2551 SET_ENGINE_CLOCK_PS_ALLOCATION args; 2551 SET_ENGINE_CLOCK_PS_ALLOCATION args;
2552 int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock); 2552 int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
2553 2553
2554 args.ulTargetEngineClock = eng_clock; /* 10 khz */ 2554 args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
2555 2555
2556 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2556 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2557} 2557}
@@ -2565,7 +2565,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
2565 if (rdev->flags & RADEON_IS_IGP) 2565 if (rdev->flags & RADEON_IS_IGP)
2566 return; 2566 return;
2567 2567
2568 args.ulTargetMemoryClock = mem_clock; /* 10 khz */ 2568 args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
2569 2569
2570 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2570 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2571} 2571}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index d27ef74590cd..cf7c8d5b4ec2 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1504,6 +1504,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1504 (rdev->pdev->subsystem_device == 0x4a48)) { 1504 (rdev->pdev->subsystem_device == 0x4a48)) {
1505 /* Mac X800 */ 1505 /* Mac X800 */
1506 rdev->mode_info.connector_table = CT_MAC_X800; 1506 rdev->mode_info.connector_table = CT_MAC_X800;
1507 } else if ((rdev->pdev->device == 0x4150) &&
1508 (rdev->pdev->subsystem_vendor == 0x1002) &&
1509 (rdev->pdev->subsystem_device == 0x4150)) {
1510 /* Mac G5 9600 */
1511 rdev->mode_info.connector_table = CT_MAC_G5_9600;
1507 } else 1512 } else
1508#endif /* CONFIG_PPC_PMAC */ 1513#endif /* CONFIG_PPC_PMAC */
1509#ifdef CONFIG_PPC64 1514#ifdef CONFIG_PPC64
@@ -2022,6 +2027,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
2022 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, 2027 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
2023 &hpd); 2028 &hpd);
2024 break; 2029 break;
2030 case CT_MAC_G5_9600:
2031 DRM_INFO("Connector Table: %d (mac g5 9600)\n",
2032 rdev->mode_info.connector_table);
2033 /* DVI - tv dac, dvo */
2034 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
2035 hpd.hpd = RADEON_HPD_1; /* ??? */
2036 radeon_add_legacy_encoder(dev,
2037 radeon_get_encoder_enum(dev,
2038 ATOM_DEVICE_DFP2_SUPPORT,
2039 0),
2040 ATOM_DEVICE_DFP2_SUPPORT);
2041 radeon_add_legacy_encoder(dev,
2042 radeon_get_encoder_enum(dev,
2043 ATOM_DEVICE_CRT2_SUPPORT,
2044 2),
2045 ATOM_DEVICE_CRT2_SUPPORT);
2046 radeon_add_legacy_connector(dev, 0,
2047 ATOM_DEVICE_DFP2_SUPPORT |
2048 ATOM_DEVICE_CRT2_SUPPORT,
2049 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
2050 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
2051 &hpd);
2052 /* ADC - primary dac, internal tmds */
2053 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
2054 hpd.hpd = RADEON_HPD_2; /* ??? */
2055 radeon_add_legacy_encoder(dev,
2056 radeon_get_encoder_enum(dev,
2057 ATOM_DEVICE_DFP1_SUPPORT,
2058 0),
2059 ATOM_DEVICE_DFP1_SUPPORT);
2060 radeon_add_legacy_encoder(dev,
2061 radeon_get_encoder_enum(dev,
2062 ATOM_DEVICE_CRT1_SUPPORT,
2063 1),
2064 ATOM_DEVICE_CRT1_SUPPORT);
2065 radeon_add_legacy_connector(dev, 1,
2066 ATOM_DEVICE_DFP1_SUPPORT |
2067 ATOM_DEVICE_CRT1_SUPPORT,
2068 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
2069 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
2070 &hpd);
2071 break;
2025 default: 2072 default:
2026 DRM_INFO("Connector table: %d (invalid)\n", 2073 DRM_INFO("Connector table: %d (invalid)\n",
2027 rdev->mode_info.connector_table); 2074 rdev->mode_info.connector_table);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0ca5eb217929..f0209be7a34b 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -937,8 +937,11 @@ int radeon_resume_kms(struct drm_device *dev)
937int radeon_gpu_reset(struct radeon_device *rdev) 937int radeon_gpu_reset(struct radeon_device *rdev)
938{ 938{
939 int r; 939 int r;
940 int resched;
940 941
941 radeon_save_bios_scratch_regs(rdev); 942 radeon_save_bios_scratch_regs(rdev);
943 /* block TTM */
944 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
942 radeon_suspend(rdev); 945 radeon_suspend(rdev);
943 946
944 r = radeon_asic_reset(rdev); 947 r = radeon_asic_reset(rdev);
@@ -947,6 +950,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
947 radeon_resume(rdev); 950 radeon_resume(rdev);
948 radeon_restore_bios_scratch_regs(rdev); 951 radeon_restore_bios_scratch_regs(rdev);
949 drm_helper_resume_force_mode(rdev->ddev); 952 drm_helper_resume_force_mode(rdev->ddev);
953 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
950 return 0; 954 return 0;
951 } 955 }
952 /* bad news, how to tell it to userspace ? */ 956 /* bad news, how to tell it to userspace ? */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 4409975a363c..4be58793dc17 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -793,6 +793,11 @@ static void avivo_get_fb_div(struct radeon_pll *pll,
793 tmp *= target_clock; 793 tmp *= target_clock;
794 *fb_div = tmp / pll->reference_freq; 794 *fb_div = tmp / pll->reference_freq;
795 *frac_fb_div = tmp % pll->reference_freq; 795 *frac_fb_div = tmp % pll->reference_freq;
796
797 if (*fb_div > pll->max_feedback_div)
798 *fb_div = pll->max_feedback_div;
799 else if (*fb_div < pll->min_feedback_div)
800 *fb_div = pll->min_feedback_div;
796} 801}
797 802
798static u32 avivo_get_post_div(struct radeon_pll *pll, 803static u32 avivo_get_post_div(struct radeon_pll *pll,
@@ -826,6 +831,11 @@ static u32 avivo_get_post_div(struct radeon_pll *pll,
826 post_div--; 831 post_div--;
827 } 832 }
828 833
834 if (post_div > pll->max_post_div)
835 post_div = pll->max_post_div;
836 else if (post_div < pll->min_post_div)
837 post_div = pll->min_post_div;
838
829 return post_div; 839 return post_div;
830} 840}
831 841
@@ -961,7 +971,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
961 max_fractional_feed_div = pll->max_frac_feedback_div; 971 max_fractional_feed_div = pll->max_frac_feedback_div;
962 } 972 }
963 973
964 for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { 974 for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
965 uint32_t ref_div; 975 uint32_t ref_div;
966 976
967 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) 977 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 448eba89d1e6..5cba46b9779a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -1524,6 +1524,7 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
1524#define R600_CP_RB_CNTL 0xc104 1524#define R600_CP_RB_CNTL 0xc104
1525# define R600_RB_BUFSZ(x) ((x) << 0) 1525# define R600_RB_BUFSZ(x) ((x) << 0)
1526# define R600_RB_BLKSZ(x) ((x) << 8) 1526# define R600_RB_BLKSZ(x) ((x) << 8)
1527# define R600_BUF_SWAP_32BIT (2 << 16)
1527# define R600_RB_NO_UPDATE (1 << 27) 1528# define R600_RB_NO_UPDATE (1 << 27)
1528# define R600_RB_RPTR_WR_ENA (1 << 31) 1529# define R600_RB_RPTR_WR_ENA (1 << 31)
1529#define R600_CP_RB_RPTR_WR 0xc108 1530#define R600_CP_RB_RPTR_WR 0xc108
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index d4a542247618..b4274883227f 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -910,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
910 910
911 args.v1.ucAction = action; 911 args.v1.ucAction = action;
912 if (action == ATOM_TRANSMITTER_ACTION_INIT) { 912 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
913 args.v1.usInitInfo = connector_object_id; 913 args.v1.usInitInfo = cpu_to_le16(connector_object_id);
914 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { 914 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
915 args.v1.asMode.ucLaneSel = lane_num; 915 args.v1.asMode.ucLaneSel = lane_num;
916 args.v1.asMode.ucLaneSet = lane_set; 916 args.v1.asMode.ucLaneSet = lane_set;
@@ -1140,7 +1140,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1140 case 3: 1140 case 3:
1141 args.v3.sExtEncoder.ucAction = action; 1141 args.v3.sExtEncoder.ucAction = action;
1142 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) 1142 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
1143 args.v3.sExtEncoder.usConnectorId = connector_object_id; 1143 args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
1144 else 1144 else
1145 args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 1145 args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
1146 args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); 1146 args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
@@ -1570,11 +1570,21 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
1570 } 1570 }
1571 1571
1572 /* set scaler clears this on some chips */ 1572 /* set scaler clears this on some chips */
1573 /* XXX check DCE4 */ 1573 if (ASIC_IS_AVIVO(rdev) &&
1574 if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) { 1574 (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
1575 if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE)) 1575 if (ASIC_IS_DCE4(rdev)) {
1576 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 1576 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1577 AVIVO_D1MODE_INTERLEAVE_EN); 1577 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
1578 EVERGREEN_INTERLEAVE_EN);
1579 else
1580 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1581 } else {
1582 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1583 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
1584 AVIVO_D1MODE_INTERLEAVE_EN);
1585 else
1586 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1587 }
1578 } 1588 }
1579} 1589}
1580 1590
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 28431e78ab56..0b7b486c97e8 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
113 u32 tiling_flags = 0; 113 u32 tiling_flags = 0;
114 int ret; 114 int ret;
115 int aligned_size, size; 115 int aligned_size, size;
116 int height = mode_cmd->height;
116 117
117 /* need to align pitch with crtc limits */ 118 /* need to align pitch with crtc limits */
118 mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); 119 mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
119 120
120 size = mode_cmd->pitch * mode_cmd->height; 121 if (rdev->family >= CHIP_R600)
122 height = ALIGN(mode_cmd->height, 8);
123 size = mode_cmd->pitch * height;
121 aligned_size = ALIGN(size, PAGE_SIZE); 124 aligned_size = ALIGN(size, PAGE_SIZE);
122 ret = radeon_gem_object_create(rdev, aligned_size, 0, 125 ret = radeon_gem_object_create(rdev, aligned_size, 0,
123 RADEON_GEM_DOMAIN_VRAM, 126 RADEON_GEM_DOMAIN_VRAM,
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index c3f23f6ff60e..5067d18d0009 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -209,6 +209,7 @@ enum radeon_connector_table {
209 CT_EMAC, 209 CT_EMAC,
210 CT_RN50_POWER, 210 CT_RN50_POWER,
211 CT_MAC_X800, 211 CT_MAC_X800,
212 CT_MAC_G5_9600,
212}; 213};
213 214
214enum radeon_dvo_chip { 215enum radeon_dvo_chip {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index df5734d0c4af..e446979e0e0a 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -791,9 +791,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
791 radeon_mem_types_list[i].show = &radeon_mm_dump_table; 791 radeon_mem_types_list[i].show = &radeon_mm_dump_table;
792 radeon_mem_types_list[i].driver_features = 0; 792 radeon_mem_types_list[i].driver_features = 0;
793 if (i == 0) 793 if (i == 0)
794 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv; 794 radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
795 else 795 else
796 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv; 796 radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
797 797
798 } 798 }
799 /* Add ttm page pool to debugfs */ 799 /* Add ttm page pool to debugfs */
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300
index b506ec1cab4b..e8a1786b6426 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r300
+++ b/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -683,9 +683,7 @@ r300 0x4f60
6830x4DF4 US_ALU_CONST_G_31 6830x4DF4 US_ALU_CONST_G_31
6840x4DF8 US_ALU_CONST_B_31 6840x4DF8 US_ALU_CONST_B_31
6850x4DFC US_ALU_CONST_A_31 6850x4DFC US_ALU_CONST_A_31
6860x4E04 RB3D_BLENDCNTL_R3
6870x4E08 RB3D_ABLENDCNTL_R3 6860x4E08 RB3D_ABLENDCNTL_R3
6880x4E0C RB3D_COLOR_CHANNEL_MASK
6890x4E10 RB3D_CONSTANT_COLOR 6870x4E10 RB3D_CONSTANT_COLOR
6900x4E14 RB3D_COLOR_CLEAR_VALUE 6880x4E14 RB3D_COLOR_CLEAR_VALUE
6910x4E18 RB3D_ROPCNTL_R3 6890x4E18 RB3D_ROPCNTL_R3
@@ -706,13 +704,11 @@ r300 0x4f60
7060x4E74 RB3D_CMASK_WRINDEX 7040x4E74 RB3D_CMASK_WRINDEX
7070x4E78 RB3D_CMASK_DWORD 7050x4E78 RB3D_CMASK_DWORD
7080x4E7C RB3D_CMASK_RDINDEX 7060x4E7C RB3D_CMASK_RDINDEX
7090x4E80 RB3D_AARESOLVE_OFFSET
7100x4E84 RB3D_AARESOLVE_PITCH
7110x4E88 RB3D_AARESOLVE_CTL
7120x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 7070x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
7130x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 7080x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
7140x4F04 ZB_ZSTENCILCNTL 7090x4F04 ZB_ZSTENCILCNTL
7150x4F08 ZB_STENCILREFMASK 7100x4F08 ZB_STENCILREFMASK
7160x4F14 ZB_ZTOP 7110x4F14 ZB_ZTOP
7170x4F18 ZB_ZCACHE_CTLSTAT 7120x4F18 ZB_ZCACHE_CTLSTAT
7130x4F28 ZB_DEPTHCLEARVALUE
7180x4F58 ZB_ZPASS_DATA 7140x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
index 8c1214c2390f..722074e21e2f 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r420
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -130,7 +130,6 @@ r420 0x4f60
1300x401C GB_SELECT 1300x401C GB_SELECT
1310x4020 GB_AA_CONFIG 1310x4020 GB_AA_CONFIG
1320x4024 GB_FIFO_SIZE 1320x4024 GB_FIFO_SIZE
1330x4028 GB_Z_PEQ_CONFIG
1340x4100 TX_INVALTAGS 1330x4100 TX_INVALTAGS
1350x4200 GA_POINT_S0 1340x4200 GA_POINT_S0
1360x4204 GA_POINT_T0 1350x4204 GA_POINT_T0
@@ -750,9 +749,7 @@ r420 0x4f60
7500x4DF4 US_ALU_CONST_G_31 7490x4DF4 US_ALU_CONST_G_31
7510x4DF8 US_ALU_CONST_B_31 7500x4DF8 US_ALU_CONST_B_31
7520x4DFC US_ALU_CONST_A_31 7510x4DFC US_ALU_CONST_A_31
7530x4E04 RB3D_BLENDCNTL_R3
7540x4E08 RB3D_ABLENDCNTL_R3 7520x4E08 RB3D_ABLENDCNTL_R3
7550x4E0C RB3D_COLOR_CHANNEL_MASK
7560x4E10 RB3D_CONSTANT_COLOR 7530x4E10 RB3D_CONSTANT_COLOR
7570x4E14 RB3D_COLOR_CLEAR_VALUE 7540x4E14 RB3D_COLOR_CLEAR_VALUE
7580x4E18 RB3D_ROPCNTL_R3 7550x4E18 RB3D_ROPCNTL_R3
@@ -773,13 +770,11 @@ r420 0x4f60
7730x4E74 RB3D_CMASK_WRINDEX 7700x4E74 RB3D_CMASK_WRINDEX
7740x4E78 RB3D_CMASK_DWORD 7710x4E78 RB3D_CMASK_DWORD
7750x4E7C RB3D_CMASK_RDINDEX 7720x4E7C RB3D_CMASK_RDINDEX
7760x4E80 RB3D_AARESOLVE_OFFSET
7770x4E84 RB3D_AARESOLVE_PITCH
7780x4E88 RB3D_AARESOLVE_CTL
7790x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 7730x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
7800x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 7740x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
7810x4F04 ZB_ZSTENCILCNTL 7750x4F04 ZB_ZSTENCILCNTL
7820x4F08 ZB_STENCILREFMASK 7760x4F08 ZB_STENCILREFMASK
7830x4F14 ZB_ZTOP 7770x4F14 ZB_ZTOP
7840x4F18 ZB_ZCACHE_CTLSTAT 7780x4F18 ZB_ZCACHE_CTLSTAT
7790x4F28 ZB_DEPTHCLEARVALUE
7850x4F58 ZB_ZPASS_DATA 7800x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 0828d80396f2..d9f62866bbc1 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -749,9 +749,7 @@ rs600 0x6d40
7490x4DF4 US_ALU_CONST_G_31 7490x4DF4 US_ALU_CONST_G_31
7500x4DF8 US_ALU_CONST_B_31 7500x4DF8 US_ALU_CONST_B_31
7510x4DFC US_ALU_CONST_A_31 7510x4DFC US_ALU_CONST_A_31
7520x4E04 RB3D_BLENDCNTL_R3
7530x4E08 RB3D_ABLENDCNTL_R3 7520x4E08 RB3D_ABLENDCNTL_R3
7540x4E0C RB3D_COLOR_CHANNEL_MASK
7550x4E10 RB3D_CONSTANT_COLOR 7530x4E10 RB3D_CONSTANT_COLOR
7560x4E14 RB3D_COLOR_CLEAR_VALUE 7540x4E14 RB3D_COLOR_CLEAR_VALUE
7570x4E18 RB3D_ROPCNTL_R3 7550x4E18 RB3D_ROPCNTL_R3
@@ -772,13 +770,11 @@ rs600 0x6d40
7720x4E74 RB3D_CMASK_WRINDEX 7700x4E74 RB3D_CMASK_WRINDEX
7730x4E78 RB3D_CMASK_DWORD 7710x4E78 RB3D_CMASK_DWORD
7740x4E7C RB3D_CMASK_RDINDEX 7720x4E7C RB3D_CMASK_RDINDEX
7750x4E80 RB3D_AARESOLVE_OFFSET
7760x4E84 RB3D_AARESOLVE_PITCH
7770x4E88 RB3D_AARESOLVE_CTL
7780x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 7730x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
7790x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 7740x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
7800x4F04 ZB_ZSTENCILCNTL 7750x4F04 ZB_ZSTENCILCNTL
7810x4F08 ZB_STENCILREFMASK 7760x4F08 ZB_STENCILREFMASK
7820x4F14 ZB_ZTOP 7770x4F14 ZB_ZTOP
7830x4F18 ZB_ZCACHE_CTLSTAT 7780x4F18 ZB_ZCACHE_CTLSTAT
7790x4F28 ZB_DEPTHCLEARVALUE
7840x4F58 ZB_ZPASS_DATA 7800x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index ef422bbacfc1..911a8fbd32bb 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -164,7 +164,6 @@ rv515 0x6d40
1640x401C GB_SELECT 1640x401C GB_SELECT
1650x4020 GB_AA_CONFIG 1650x4020 GB_AA_CONFIG
1660x4024 GB_FIFO_SIZE 1660x4024 GB_FIFO_SIZE
1670x4028 GB_Z_PEQ_CONFIG
1680x4100 TX_INVALTAGS 1670x4100 TX_INVALTAGS
1690x4114 SU_TEX_WRAP_PS3 1680x4114 SU_TEX_WRAP_PS3
1700x4118 PS3_ENABLE 1690x4118 PS3_ENABLE
@@ -461,9 +460,7 @@ rv515 0x6d40
4610x4DF4 US_ALU_CONST_G_31 4600x4DF4 US_ALU_CONST_G_31
4620x4DF8 US_ALU_CONST_B_31 4610x4DF8 US_ALU_CONST_B_31
4630x4DFC US_ALU_CONST_A_31 4620x4DFC US_ALU_CONST_A_31
4640x4E04 RB3D_BLENDCNTL_R3
4650x4E08 RB3D_ABLENDCNTL_R3 4630x4E08 RB3D_ABLENDCNTL_R3
4660x4E0C RB3D_COLOR_CHANNEL_MASK
4670x4E10 RB3D_CONSTANT_COLOR 4640x4E10 RB3D_CONSTANT_COLOR
4680x4E14 RB3D_COLOR_CLEAR_VALUE 4650x4E14 RB3D_COLOR_CLEAR_VALUE
4690x4E18 RB3D_ROPCNTL_R3 4660x4E18 RB3D_ROPCNTL_R3
@@ -484,9 +481,6 @@ rv515 0x6d40
4840x4E74 RB3D_CMASK_WRINDEX 4810x4E74 RB3D_CMASK_WRINDEX
4850x4E78 RB3D_CMASK_DWORD 4820x4E78 RB3D_CMASK_DWORD
4860x4E7C RB3D_CMASK_RDINDEX 4830x4E7C RB3D_CMASK_RDINDEX
4870x4E80 RB3D_AARESOLVE_OFFSET
4880x4E84 RB3D_AARESOLVE_PITCH
4890x4E88 RB3D_AARESOLVE_CTL
4900x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 4840x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
4910x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 4850x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
4920x4EF8 RB3D_CONSTANT_COLOR_AR 4860x4EF8 RB3D_CONSTANT_COLOR_AR
@@ -496,4 +490,5 @@ rv515 0x6d40
4960x4F14 ZB_ZTOP 4900x4F14 ZB_ZTOP
4970x4F18 ZB_ZCACHE_CTLSTAT 4910x4F18 ZB_ZCACHE_CTLSTAT
4980x4F58 ZB_ZPASS_DATA 4920x4F58 ZB_ZPASS_DATA
4930x4F28 ZB_DEPTHCLEARVALUE
4990x4FD4 ZB_STENCILREFMASK_BF 4940x4FD4 ZB_STENCILREFMASK_BF
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 0137d3e3728d..6638c8e4c81b 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -77,9 +77,9 @@ void rs690_pm_info(struct radeon_device *rdev)
77 switch (crev) { 77 switch (crev) {
78 case 1: 78 case 1:
79 tmp.full = dfixed_const(100); 79 tmp.full = dfixed_const(100);
80 rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); 80 rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock));
81 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); 81 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
82 if (info->info.usK8MemoryClock) 82 if (le16_to_cpu(info->info.usK8MemoryClock))
83 rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); 83 rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
84 else if (rdev->clock.default_mclk) { 84 else if (rdev->clock.default_mclk) {
85 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); 85 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
@@ -91,16 +91,16 @@ void rs690_pm_info(struct radeon_device *rdev)
91 break; 91 break;
92 case 2: 92 case 2:
93 tmp.full = dfixed_const(100); 93 tmp.full = dfixed_const(100);
94 rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); 94 rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock));
95 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); 95 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
96 if (info->info_v2.ulBootUpUMAClock) 96 if (le32_to_cpu(info->info_v2.ulBootUpUMAClock))
97 rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); 97 rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock));
98 else if (rdev->clock.default_mclk) 98 else if (rdev->clock.default_mclk)
99 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); 99 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
100 else 100 else
101 rdev->pm.igp_system_mclk.full = dfixed_const(66700); 101 rdev->pm.igp_system_mclk.full = dfixed_const(66700);
102 rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); 102 rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
103 rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); 103 rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq));
104 rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); 104 rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
105 rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); 105 rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
106 break; 106 break;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 3a95999d2fef..ee5541c6a623 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -321,7 +321,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
321 return -EINVAL; 321 return -EINVAL;
322 322
323 r700_cp_stop(rdev); 323 r700_cp_stop(rdev);
324 WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); 324 WREG32(CP_RB_CNTL,
325#ifdef __BIG_ENDIAN
326 BUF_SWAP_32BIT |
327#endif
328 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
325 329
326 /* Reset cp */ 330 /* Reset cp */
327 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 331 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index abc8cf5a3672..79fa588e9ed5 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -76,10 +76,10 @@
76#define ROQ_IB1_START(x) ((x) << 0) 76#define ROQ_IB1_START(x) ((x) << 0)
77#define ROQ_IB2_START(x) ((x) << 8) 77#define ROQ_IB2_START(x) ((x) << 8)
78#define CP_RB_CNTL 0xC104 78#define CP_RB_CNTL 0xC104
79#define RB_BUFSZ(x) ((x)<<0) 79#define RB_BUFSZ(x) ((x) << 0)
80#define RB_BLKSZ(x) ((x)<<8) 80#define RB_BLKSZ(x) ((x) << 8)
81#define RB_NO_UPDATE (1<<27) 81#define RB_NO_UPDATE (1 << 27)
82#define RB_RPTR_WR_ENA (1<<31) 82#define RB_RPTR_WR_ENA (1 << 31)
83#define BUF_SWAP_32BIT (2 << 16) 83#define BUF_SWAP_32BIT (2 << 16)
84#define CP_RB_RPTR 0x8700 84#define CP_RB_RPTR 0x8700
85#define CP_RB_RPTR_ADDR 0xC10C 85#define CP_RB_RPTR_ADDR 0xC10C