aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c4
-rw-r--r--drivers/gpu/drm/drm_info.c9
-rw-r--r--drivers/gpu/drm/drm_irq.c29
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c22
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h25
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c21
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c196
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c36
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c39
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c12
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c9
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c26
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h13
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c66
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c46
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c100
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c25
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c27
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h1
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c5
-rw-r--r--drivers/gpu/drm/radeon/r100.c87
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h13
-rw-r--r--drivers/gpu/drm/radeon/r200.c18
-rw-r--r--drivers/gpu/drm/radeon/r300.c44
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r600.c25
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c11
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c33
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c31
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c46
-rw-r--r--drivers/gpu/drm/radeon/r600d.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c48
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c18
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r3006
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r4207
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs6006
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5157
-rw-r--r--drivers/gpu/drm/radeon/rs600.c1
-rw-r--r--drivers/gpu/drm/radeon/rs690.c13
-rw-r--r--drivers/gpu/drm/radeon/rv770.c9
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h8
74 files changed, 981 insertions, 440 deletions
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 6977a1ce9d98..f73ef4390db6 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -672,7 +672,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
672 struct drm_crtc_helper_funcs *crtc_funcs; 672 struct drm_crtc_helper_funcs *crtc_funcs;
673 u16 *red, *green, *blue, *transp; 673 u16 *red, *green, *blue, *transp;
674 struct drm_crtc *crtc; 674 struct drm_crtc *crtc;
675 int i, rc = 0; 675 int i, j, rc = 0;
676 int start; 676 int start;
677 677
678 for (i = 0; i < fb_helper->crtc_count; i++) { 678 for (i = 0; i < fb_helper->crtc_count; i++) {
@@ -685,7 +685,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
685 transp = cmap->transp; 685 transp = cmap->transp;
686 start = cmap->start; 686 start = cmap->start;
687 687
688 for (i = 0; i < cmap->len; i++) { 688 for (j = 0; j < cmap->len; j++) {
689 u16 hred, hgreen, hblue, htransp = 0xffff; 689 u16 hred, hgreen, hblue, htransp = 0xffff;
690 690
691 hred = *red++; 691 hred = *red++;
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 3cdbaf379bb5..be9a9c07d152 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -283,17 +283,18 @@ int drm_vma_info(struct seq_file *m, void *data)
283#endif 283#endif
284 284
285 mutex_lock(&dev->struct_mutex); 285 mutex_lock(&dev->struct_mutex);
286 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n", 286 seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
287 atomic_read(&dev->vma_count), 287 atomic_read(&dev->vma_count),
288 high_memory, (u64)virt_to_phys(high_memory)); 288 high_memory, (void *)virt_to_phys(high_memory));
289 289
290 list_for_each_entry(pt, &dev->vmalist, head) { 290 list_for_each_entry(pt, &dev->vmalist, head) {
291 vma = pt->vma; 291 vma = pt->vma;
292 if (!vma) 292 if (!vma)
293 continue; 293 continue;
294 seq_printf(m, 294 seq_printf(m,
295 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000", 295 "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
296 pt->pid, vma->vm_start, vma->vm_end, 296 pt->pid,
297 (void *)vma->vm_start, (void *)vma->vm_end,
297 vma->vm_flags & VM_READ ? 'r' : '-', 298 vma->vm_flags & VM_READ ? 'r' : '-',
298 vma->vm_flags & VM_WRITE ? 'w' : '-', 299 vma->vm_flags & VM_WRITE ? 'w' : '-',
299 vma->vm_flags & VM_EXEC ? 'x' : '-', 300 vma->vm_flags & VM_EXEC ? 'x' : '-',
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3dadfa2a8528..28d1d3c24d65 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -164,8 +164,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
164 * available. In that case we can't account for this and just 164 * available. In that case we can't account for this and just
165 * hope for the best. 165 * hope for the best.
166 */ 166 */
167 if ((vblrc > 0) && (abs(diff_ns) > 1000000)) 167 if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
168 atomic_inc(&dev->_vblank_count[crtc]); 168 atomic_inc(&dev->_vblank_count[crtc]);
169 smp_mb__after_atomic_inc();
170 }
169 171
170 /* Invalidate all timestamps while vblank irq's are off. */ 172 /* Invalidate all timestamps while vblank irq's are off. */
171 clear_vblank_timestamps(dev, crtc); 173 clear_vblank_timestamps(dev, crtc);
@@ -491,6 +493,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
491 /* Dot clock in Hz: */ 493 /* Dot clock in Hz: */
492 dotclock = (u64) crtc->hwmode.clock * 1000; 494 dotclock = (u64) crtc->hwmode.clock * 1000;
493 495
496 /* Fields of interlaced scanout modes are only halve a frame duration.
497 * Double the dotclock to get halve the frame-/line-/pixelduration.
498 */
499 if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
500 dotclock *= 2;
501
494 /* Valid dotclock? */ 502 /* Valid dotclock? */
495 if (dotclock > 0) { 503 if (dotclock > 0) {
496 /* Convert scanline length in pixels and video dot clock to 504 /* Convert scanline length in pixels and video dot clock to
@@ -603,14 +611,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
603 return -EAGAIN; 611 return -EAGAIN;
604 } 612 }
605 613
606 /* Don't know yet how to handle interlaced or
607 * double scan modes. Just no-op for now.
608 */
609 if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
610 DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
611 return -ENOTSUPP;
612 }
613
614 /* Get current scanout position with system timestamp. 614 /* Get current scanout position with system timestamp.
615 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times 615 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
616 * if single query takes longer than max_error nanoseconds. 616 * if single query takes longer than max_error nanoseconds.
@@ -858,10 +858,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
858 if (rc) { 858 if (rc) {
859 tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; 859 tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
860 vblanktimestamp(dev, crtc, tslot) = t_vblank; 860 vblanktimestamp(dev, crtc, tslot) = t_vblank;
861 smp_wmb();
862 } 861 }
863 862
863 smp_mb__before_atomic_inc();
864 atomic_add(diff, &dev->_vblank_count[crtc]); 864 atomic_add(diff, &dev->_vblank_count[crtc]);
865 smp_mb__after_atomic_inc();
865} 866}
866 867
867/** 868/**
@@ -1011,7 +1012,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
1011 struct drm_file *file_priv) 1012 struct drm_file *file_priv)
1012{ 1013{
1013 struct drm_modeset_ctl *modeset = data; 1014 struct drm_modeset_ctl *modeset = data;
1014 int crtc, ret = 0; 1015 int ret = 0;
1016 unsigned int crtc;
1015 1017
1016 /* If drm_vblank_init() hasn't been called yet, just no-op */ 1018 /* If drm_vblank_init() hasn't been called yet, just no-op */
1017 if (!dev->num_crtcs) 1019 if (!dev->num_crtcs)
@@ -1293,15 +1295,16 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1293 * e.g., due to spurious vblank interrupts. We need to 1295 * e.g., due to spurious vblank interrupts. We need to
1294 * ignore those for accounting. 1296 * ignore those for accounting.
1295 */ 1297 */
1296 if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { 1298 if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
1297 /* Store new timestamp in ringbuffer. */ 1299 /* Store new timestamp in ringbuffer. */
1298 vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; 1300 vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
1299 smp_wmb();
1300 1301
1301 /* Increment cooked vblank count. This also atomically commits 1302 /* Increment cooked vblank count. This also atomically commits
1302 * the timestamp computed above. 1303 * the timestamp computed above.
1303 */ 1304 */
1305 smp_mb__before_atomic_inc();
1304 atomic_inc(&dev->_vblank_count[crtc]); 1306 atomic_inc(&dev->_vblank_count[crtc]);
1307 smp_mb__after_atomic_inc();
1305 } else { 1308 } else {
1306 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", 1309 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
1307 crtc, (int) diff_ns); 1310 crtc, (int) diff_ns);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3601466c5502..4ff9b6cc973f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
865 int max_freq; 865 int max_freq;
866 866
867 /* RPSTAT1 is in the GT power well */ 867 /* RPSTAT1 is in the GT power well */
868 __gen6_force_wake_get(dev_priv); 868 __gen6_gt_force_wake_get(dev_priv);
869 869
870 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 870 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
871 seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); 871 seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
@@ -888,7 +888,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
888 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 888 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
889 max_freq * 100); 889 max_freq * 100);
890 890
891 __gen6_force_wake_put(dev_priv); 891 __gen6_gt_force_wake_put(dev_priv);
892 } else { 892 } else {
893 seq_printf(m, "no P-state info available\n"); 893 seq_printf(m, "no P-state info available\n");
894 } 894 }
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 17bd766f2081..e33d9be7df3b 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1895,6 +1895,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1895 if (IS_GEN2(dev)) 1895 if (IS_GEN2(dev))
1896 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 1896 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1897 1897
1898 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1899 * using 32bit addressing, overwriting memory if HWS is located
1900 * above 4GB.
1901 *
1902 * The documentation also mentions an issue with undefined
1903 * behaviour if any general state is accessed within a page above 4GB,
1904 * which also needs to be handled carefully.
1905 */
1906 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1907 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1908
1898 mmio_bar = IS_GEN2(dev) ? 1 : 0; 1909 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1899 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); 1910 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
1900 if (!dev_priv->regs) { 1911 if (!dev_priv->regs) {
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cfb56d0ff367..22ec066adae6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -46,6 +46,12 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
46unsigned int i915_powersave = 1; 46unsigned int i915_powersave = 1;
47module_param_named(powersave, i915_powersave, int, 0600); 47module_param_named(powersave, i915_powersave, int, 0600);
48 48
49unsigned int i915_semaphores = 0;
50module_param_named(semaphores, i915_semaphores, int, 0600);
51
52unsigned int i915_enable_rc6 = 0;
53module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
54
49unsigned int i915_lvds_downclock = 0; 55unsigned int i915_lvds_downclock = 0;
50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 56module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
51 57
@@ -251,7 +257,7 @@ void intel_detect_pch (struct drm_device *dev)
251 } 257 }
252} 258}
253 259
254void __gen6_force_wake_get(struct drm_i915_private *dev_priv) 260void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
255{ 261{
256 int count; 262 int count;
257 263
@@ -267,12 +273,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
267 udelay(10); 273 udelay(10);
268} 274}
269 275
270void __gen6_force_wake_put(struct drm_i915_private *dev_priv) 276void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
271{ 277{
272 I915_WRITE_NOTRACE(FORCEWAKE, 0); 278 I915_WRITE_NOTRACE(FORCEWAKE, 0);
273 POSTING_READ(FORCEWAKE); 279 POSTING_READ(FORCEWAKE);
274} 280}
275 281
282void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
283{
284 int loop = 500;
285 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
286 while (fifo < 20 && loop--) {
287 udelay(10);
288 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
289 }
290}
291
276static int i915_drm_freeze(struct drm_device *dev) 292static int i915_drm_freeze(struct drm_device *dev)
277{ 293{
278 struct drm_i915_private *dev_priv = dev->dev_private; 294 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -360,7 +376,7 @@ static int i915_drm_thaw(struct drm_device *dev)
360 /* Resume the modeset for every activated CRTC */ 376 /* Resume the modeset for every activated CRTC */
361 drm_helper_resume_force_mode(dev); 377 drm_helper_resume_force_mode(dev);
362 378
363 if (dev_priv->renderctx && dev_priv->pwrctx) 379 if (IS_IRONLAKE_M(dev))
364 ironlake_enable_rc6(dev); 380 ironlake_enable_rc6(dev);
365 } 381 }
366 382
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a0149c619cdd..456f40484838 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -956,8 +956,10 @@ extern struct drm_ioctl_desc i915_ioctls[];
956extern int i915_max_ioctl; 956extern int i915_max_ioctl;
957extern unsigned int i915_fbpercrtc; 957extern unsigned int i915_fbpercrtc;
958extern unsigned int i915_powersave; 958extern unsigned int i915_powersave;
959extern unsigned int i915_semaphores;
959extern unsigned int i915_lvds_downclock; 960extern unsigned int i915_lvds_downclock;
960extern unsigned int i915_panel_use_ssc; 961extern unsigned int i915_panel_use_ssc;
962extern unsigned int i915_enable_rc6;
961 963
962extern int i915_suspend(struct drm_device *dev, pm_message_t state); 964extern int i915_suspend(struct drm_device *dev, pm_message_t state);
963extern int i915_resume(struct drm_device *dev); 965extern int i915_resume(struct drm_device *dev);
@@ -1176,6 +1178,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
1176void i915_gem_free_all_phys_object(struct drm_device *dev); 1178void i915_gem_free_all_phys_object(struct drm_device *dev);
1177void i915_gem_release(struct drm_device *dev, struct drm_file *file); 1179void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1178 1180
1181uint32_t
1182i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
1183
1179/* i915_gem_gtt.c */ 1184/* i915_gem_gtt.c */
1180void i915_gem_restore_gtt_mappings(struct drm_device *dev); 1185void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1181int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); 1186int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
@@ -1352,22 +1357,32 @@ __i915_write(64, q)
1352 * must be set to prevent GT core from power down and stale values being 1357 * must be set to prevent GT core from power down and stale values being
1353 * returned. 1358 * returned.
1354 */ 1359 */
1355void __gen6_force_wake_get(struct drm_i915_private *dev_priv); 1360void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1356void __gen6_force_wake_put (struct drm_i915_private *dev_priv); 1361void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1357static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) 1362void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1363
1364static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg)
1358{ 1365{
1359 u32 val; 1366 u32 val;
1360 1367
1361 if (dev_priv->info->gen >= 6) { 1368 if (dev_priv->info->gen >= 6) {
1362 __gen6_force_wake_get(dev_priv); 1369 __gen6_gt_force_wake_get(dev_priv);
1363 val = I915_READ(reg); 1370 val = I915_READ(reg);
1364 __gen6_force_wake_put(dev_priv); 1371 __gen6_gt_force_wake_put(dev_priv);
1365 } else 1372 } else
1366 val = I915_READ(reg); 1373 val = I915_READ(reg);
1367 1374
1368 return val; 1375 return val;
1369} 1376}
1370 1377
1378static inline void i915_gt_write(struct drm_i915_private *dev_priv,
1379 u32 reg, u32 val)
1380{
1381 if (dev_priv->info->gen >= 6)
1382 __gen6_gt_wait_for_fifo(dev_priv);
1383 I915_WRITE(reg, val);
1384}
1385
1371static inline void 1386static inline void
1372i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) 1387i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
1373{ 1388{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cf4f74c7c6fb..36e66cc5225e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1398,7 +1398,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
1398 * Return the required GTT alignment for an object, only taking into account 1398 * Return the required GTT alignment for an object, only taking into account
1399 * unfenced tiled surface requirements. 1399 * unfenced tiled surface requirements.
1400 */ 1400 */
1401static uint32_t 1401uint32_t
1402i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) 1402i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
1403{ 1403{
1404 struct drm_device *dev = obj->base.dev; 1404 struct drm_device *dev = obj->base.dev;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d2f445e825f2..50ab1614571c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -772,8 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
772 if (from == NULL || to == from) 772 if (from == NULL || to == from)
773 return 0; 773 return 0;
774 774
775 /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ 775 /* XXX gpu semaphores are implicated in various hard hangs on SNB */
776 if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) 776 if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
777 return i915_gem_object_wait_rendering(obj, true); 777 return i915_gem_object_wait_rendering(obj, true);
778 778
779 idx = intel_ring_sync_index(from, to); 779 idx = intel_ring_sync_index(from, to);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 22a32b9932c5..d64843e18df2 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -349,14 +349,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
349 (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && 349 (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
350 i915_gem_object_fence_ok(obj, args->tiling_mode)); 350 i915_gem_object_fence_ok(obj, args->tiling_mode));
351 351
352 obj->tiling_changed = true; 352 /* Rebind if we need a change of alignment */
353 obj->tiling_mode = args->tiling_mode; 353 if (!obj->map_and_fenceable) {
354 obj->stride = args->stride; 354 u32 unfenced_alignment =
355 i915_gem_get_unfenced_gtt_alignment(obj);
356 if (obj->gtt_offset & (unfenced_alignment - 1))
357 ret = i915_gem_object_unbind(obj);
358 }
359
360 if (ret == 0) {
361 obj->tiling_changed = true;
362 obj->tiling_mode = args->tiling_mode;
363 obj->stride = args->stride;
364 }
355 } 365 }
366 /* we have to maintain this existing ABI... */
367 args->stride = obj->stride;
368 args->tiling_mode = obj->tiling_mode;
356 drm_gem_object_unreference(&obj->base); 369 drm_gem_object_unreference(&obj->base);
357 mutex_unlock(&dev->struct_mutex); 370 mutex_unlock(&dev->struct_mutex);
358 371
359 return 0; 372 return ret;
360} 373}
361 374
362/** 375/**
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 97f946dcc1aa..8a9e08bf1cf7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
316 struct drm_mode_config *mode_config = &dev->mode_config; 316 struct drm_mode_config *mode_config = &dev->mode_config;
317 struct intel_encoder *encoder; 317 struct intel_encoder *encoder;
318 318
319 DRM_DEBUG_KMS("running encoder hotplug functions\n");
320
319 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 321 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
320 if (encoder->hot_plug) 322 if (encoder->hot_plug)
321 encoder->hot_plug(encoder); 323 encoder->hot_plug(encoder);
@@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1649 } else { 1651 } else {
1650 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1652 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1651 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1653 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1652 hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; 1654 hotplug_mask |= SDE_AUX_MASK;
1653 I915_WRITE(FDI_RXA_IMR, 0);
1654 I915_WRITE(FDI_RXB_IMR, 0);
1655 } 1655 }
1656 1656
1657 dev_priv->pch_irq_mask = ~hotplug_mask; 1657 dev_priv->pch_irq_mask = ~hotplug_mask;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5cfc68940f17..2abe240dae58 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -174,7 +174,9 @@
174 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! 174 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
175 */ 175 */
176#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 176#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
177#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */ 177#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
178#define MI_INVALIDATE_TLB (1<<18)
179#define MI_INVALIDATE_BSD (1<<7)
178#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) 180#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
179#define MI_BATCH_NON_SECURE (1) 181#define MI_BATCH_NON_SECURE (1)
180#define MI_BATCH_NON_SECURE_I965 (1<<8) 182#define MI_BATCH_NON_SECURE_I965 (1<<8)
@@ -3269,6 +3271,8 @@
3269#define FORCEWAKE 0xA18C 3271#define FORCEWAKE 0xA18C
3270#define FORCEWAKE_ACK 0x130090 3272#define FORCEWAKE_ACK 0x130090
3271 3273
3274#define GT_FIFO_FREE_ENTRIES 0x120008
3275
3272#define GEN6_RPNSWREQ 0xA008 3276#define GEN6_RPNSWREQ 0xA008
3273#define GEN6_TURBO_DISABLE (1<<31) 3277#define GEN6_TURBO_DISABLE (1<<31)
3274#define GEN6_FREQUENCY(x) ((x)<<25) 3278#define GEN6_FREQUENCY(x) ((x)<<25)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7e42aa586504..49fb54fd9a18 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1219,7 +1219,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
1219 u32 blt_ecoskpd; 1219 u32 blt_ecoskpd;
1220 1220
1221 /* Make sure blitter notifies FBC of writes */ 1221 /* Make sure blitter notifies FBC of writes */
1222 __gen6_force_wake_get(dev_priv); 1222 __gen6_gt_force_wake_get(dev_priv);
1223 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); 1223 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1224 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << 1224 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1225 GEN6_BLITTER_LOCK_SHIFT; 1225 GEN6_BLITTER_LOCK_SHIFT;
@@ -1230,7 +1230,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
1230 GEN6_BLITTER_LOCK_SHIFT); 1230 GEN6_BLITTER_LOCK_SHIFT);
1231 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 1231 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1232 POSTING_READ(GEN6_BLITTER_ECOSKPD); 1232 POSTING_READ(GEN6_BLITTER_ECOSKPD);
1233 __gen6_force_wake_put(dev_priv); 1233 __gen6_gt_force_wake_put(dev_priv);
1234} 1234}
1235 1235
1236static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1236static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
@@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1630 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; 1630 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
1631 1631
1632 wait_event(dev_priv->pending_flip_queue, 1632 wait_event(dev_priv->pending_flip_queue,
1633 atomic_read(&dev_priv->mm.wedged) ||
1633 atomic_read(&obj->pending_flip) == 0); 1634 atomic_read(&obj->pending_flip) == 0);
1634 1635
1635 /* Big Hammer, we also need to ensure that any pending 1636 /* Big Hammer, we also need to ensure that any pending
1636 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 1637 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1637 * current scanout is retired before unpinning the old 1638 * current scanout is retired before unpinning the old
1638 * framebuffer. 1639 * framebuffer.
1640 *
1641 * This should only fail upon a hung GPU, in which case we
1642 * can safely continue.
1639 */ 1643 */
1640 ret = i915_gem_object_flush_gpu(obj, false); 1644 ret = i915_gem_object_flush_gpu(obj, false);
1641 if (ret) { 1645 (void) ret;
1642 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
1643 mutex_unlock(&dev->struct_mutex);
1644 return ret;
1645 }
1646 } 1646 }
1647 1647
1648 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, 1648 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2045 atomic_read(&obj->pending_flip) == 0); 2045 atomic_read(&obj->pending_flip) == 0);
2046} 2046}
2047 2047
2048static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2049{
2050 struct drm_device *dev = crtc->dev;
2051 struct drm_mode_config *mode_config = &dev->mode_config;
2052 struct intel_encoder *encoder;
2053
2054 /*
2055 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2056 * must be driven by its own crtc; no sharing is possible.
2057 */
2058 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2059 if (encoder->base.crtc != crtc)
2060 continue;
2061
2062 switch (encoder->type) {
2063 case INTEL_OUTPUT_EDP:
2064 if (!intel_encoder_is_pch_edp(&encoder->base))
2065 return false;
2066 continue;
2067 }
2068 }
2069
2070 return true;
2071}
2072
2048static void ironlake_crtc_enable(struct drm_crtc *crtc) 2073static void ironlake_crtc_enable(struct drm_crtc *crtc)
2049{ 2074{
2050 struct drm_device *dev = crtc->dev; 2075 struct drm_device *dev = crtc->dev;
@@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2053 int pipe = intel_crtc->pipe; 2078 int pipe = intel_crtc->pipe;
2054 int plane = intel_crtc->plane; 2079 int plane = intel_crtc->plane;
2055 u32 reg, temp; 2080 u32 reg, temp;
2081 bool is_pch_port = false;
2056 2082
2057 if (intel_crtc->active) 2083 if (intel_crtc->active)
2058 return; 2084 return;
@@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2066 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); 2092 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2067 } 2093 }
2068 2094
2069 ironlake_fdi_enable(crtc); 2095 is_pch_port = intel_crtc_driving_pch(crtc);
2096
2097 if (is_pch_port)
2098 ironlake_fdi_enable(crtc);
2099 else {
2100 /* disable CPU FDI tx and PCH FDI rx */
2101 reg = FDI_TX_CTL(pipe);
2102 temp = I915_READ(reg);
2103 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2104 POSTING_READ(reg);
2105
2106 reg = FDI_RX_CTL(pipe);
2107 temp = I915_READ(reg);
2108 temp &= ~(0x7 << 16);
2109 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2110 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2111
2112 POSTING_READ(reg);
2113 udelay(100);
2114
2115 /* Ironlake workaround, disable clock pointer after downing FDI */
2116 if (HAS_PCH_IBX(dev))
2117 I915_WRITE(FDI_RX_CHICKEN(pipe),
2118 I915_READ(FDI_RX_CHICKEN(pipe) &
2119 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
2120
2121 /* still set train pattern 1 */
2122 reg = FDI_TX_CTL(pipe);
2123 temp = I915_READ(reg);
2124 temp &= ~FDI_LINK_TRAIN_NONE;
2125 temp |= FDI_LINK_TRAIN_PATTERN_1;
2126 I915_WRITE(reg, temp);
2127
2128 reg = FDI_RX_CTL(pipe);
2129 temp = I915_READ(reg);
2130 if (HAS_PCH_CPT(dev)) {
2131 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2132 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2133 } else {
2134 temp &= ~FDI_LINK_TRAIN_NONE;
2135 temp |= FDI_LINK_TRAIN_PATTERN_1;
2136 }
2137 /* BPC in FDI rx is consistent with that in PIPECONF */
2138 temp &= ~(0x07 << 16);
2139 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2140 I915_WRITE(reg, temp);
2141
2142 POSTING_READ(reg);
2143 udelay(100);
2144 }
2070 2145
2071 /* Enable panel fitting for LVDS */ 2146 /* Enable panel fitting for LVDS */
2072 if (dev_priv->pch_pf_size && 2147 if (dev_priv->pch_pf_size &&
@@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2100 intel_flush_display_plane(dev, plane); 2175 intel_flush_display_plane(dev, plane);
2101 } 2176 }
2102 2177
2178 /* Skip the PCH stuff if possible */
2179 if (!is_pch_port)
2180 goto done;
2181
2103 /* For PCH output, training FDI link */ 2182 /* For PCH output, training FDI link */
2104 if (IS_GEN6(dev)) 2183 if (IS_GEN6(dev))
2105 gen6_fdi_link_train(crtc); 2184 gen6_fdi_link_train(crtc);
@@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2184 I915_WRITE(reg, temp | TRANS_ENABLE); 2263 I915_WRITE(reg, temp | TRANS_ENABLE);
2185 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 2264 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
2186 DRM_ERROR("failed to enable transcoder %d\n", pipe); 2265 DRM_ERROR("failed to enable transcoder %d\n", pipe);
2187 2266done:
2188 intel_crtc_load_lut(crtc); 2267 intel_crtc_load_lut(crtc);
2189 intel_update_fbc(dev); 2268 intel_update_fbc(dev);
2190 intel_crtc_update_cursor(crtc, true); 2269 intel_crtc_update_cursor(crtc, true);
@@ -5558,9 +5637,7 @@ static void intel_crtc_reset(struct drm_crtc *crtc)
5558 /* Reset flags back to the 'unknown' status so that they 5637 /* Reset flags back to the 'unknown' status so that they
5559 * will be correctly set on the initial modeset. 5638 * will be correctly set on the initial modeset.
5560 */ 5639 */
5561 intel_crtc->cursor_addr = 0;
5562 intel_crtc->dpms_mode = -1; 5640 intel_crtc->dpms_mode = -1;
5563 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5564} 5641}
5565 5642
5566static struct drm_crtc_helper_funcs intel_helper_funcs = { 5643static struct drm_crtc_helper_funcs intel_helper_funcs = {
@@ -5666,6 +5743,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5666 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 5743 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
5667 5744
5668 intel_crtc_reset(&intel_crtc->base); 5745 intel_crtc_reset(&intel_crtc->base);
5746 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5669 5747
5670 if (HAS_PCH_SPLIT(dev)) { 5748 if (HAS_PCH_SPLIT(dev)) {
5671 intel_helper_funcs.prepare = ironlake_crtc_prepare; 5749 intel_helper_funcs.prepare = ironlake_crtc_prepare;
@@ -6204,7 +6282,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6204 * userspace... 6282 * userspace...
6205 */ 6283 */
6206 I915_WRITE(GEN6_RC_STATE, 0); 6284 I915_WRITE(GEN6_RC_STATE, 0);
6207 __gen6_force_wake_get(dev_priv); 6285 __gen6_gt_force_wake_get(dev_priv);
6208 6286
6209 /* disable the counters and set deterministic thresholds */ 6287 /* disable the counters and set deterministic thresholds */
6210 I915_WRITE(GEN6_RC_CONTROL, 0); 6288 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -6302,7 +6380,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6302 /* enable all PM interrupts */ 6380 /* enable all PM interrupts */
6303 I915_WRITE(GEN6_PMINTRMSK, 0); 6381 I915_WRITE(GEN6_PMINTRMSK, 0);
6304 6382
6305 __gen6_force_wake_put(dev_priv); 6383 __gen6_gt_force_wake_put(dev_priv);
6306} 6384}
6307 6385
6308void intel_enable_clock_gating(struct drm_device *dev) 6386void intel_enable_clock_gating(struct drm_device *dev)
@@ -6463,52 +6541,60 @@ void intel_enable_clock_gating(struct drm_device *dev)
6463 } 6541 }
6464} 6542}
6465 6543
6466void intel_disable_clock_gating(struct drm_device *dev) 6544static void ironlake_teardown_rc6(struct drm_device *dev)
6467{ 6545{
6468 struct drm_i915_private *dev_priv = dev->dev_private; 6546 struct drm_i915_private *dev_priv = dev->dev_private;
6469 6547
6470 if (dev_priv->renderctx) { 6548 if (dev_priv->renderctx) {
6471 struct drm_i915_gem_object *obj = dev_priv->renderctx; 6549 i915_gem_object_unpin(dev_priv->renderctx);
6472 6550 drm_gem_object_unreference(&dev_priv->renderctx->base);
6473 I915_WRITE(CCID, 0);
6474 POSTING_READ(CCID);
6475
6476 i915_gem_object_unpin(obj);
6477 drm_gem_object_unreference(&obj->base);
6478 dev_priv->renderctx = NULL; 6551 dev_priv->renderctx = NULL;
6479 } 6552 }
6480 6553
6481 if (dev_priv->pwrctx) { 6554 if (dev_priv->pwrctx) {
6482 struct drm_i915_gem_object *obj = dev_priv->pwrctx; 6555 i915_gem_object_unpin(dev_priv->pwrctx);
6556 drm_gem_object_unreference(&dev_priv->pwrctx->base);
6557 dev_priv->pwrctx = NULL;
6558 }
6559}
6560
6561static void ironlake_disable_rc6(struct drm_device *dev)
6562{
6563 struct drm_i915_private *dev_priv = dev->dev_private;
6564
6565 if (I915_READ(PWRCTXA)) {
6566 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
6567 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
6568 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
6569 50);
6483 6570
6484 I915_WRITE(PWRCTXA, 0); 6571 I915_WRITE(PWRCTXA, 0);
6485 POSTING_READ(PWRCTXA); 6572 POSTING_READ(PWRCTXA);
6486 6573
6487 i915_gem_object_unpin(obj); 6574 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
6488 drm_gem_object_unreference(&obj->base); 6575 POSTING_READ(RSTDBYCTL);
6489 dev_priv->pwrctx = NULL;
6490 } 6576 }
6577
6578 ironlake_teardown_rc6(dev);
6491} 6579}
6492 6580
6493static void ironlake_disable_rc6(struct drm_device *dev) 6581static int ironlake_setup_rc6(struct drm_device *dev)
6494{ 6582{
6495 struct drm_i915_private *dev_priv = dev->dev_private; 6583 struct drm_i915_private *dev_priv = dev->dev_private;
6496 6584
6497 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ 6585 if (dev_priv->renderctx == NULL)
6498 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); 6586 dev_priv->renderctx = intel_alloc_context_page(dev);
6499 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), 6587 if (!dev_priv->renderctx)
6500 10); 6588 return -ENOMEM;
6501 POSTING_READ(CCID); 6589
6502 I915_WRITE(PWRCTXA, 0); 6590 if (dev_priv->pwrctx == NULL)
6503 POSTING_READ(PWRCTXA); 6591 dev_priv->pwrctx = intel_alloc_context_page(dev);
6504 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 6592 if (!dev_priv->pwrctx) {
6505 POSTING_READ(RSTDBYCTL); 6593 ironlake_teardown_rc6(dev);
6506 i915_gem_object_unpin(dev_priv->renderctx); 6594 return -ENOMEM;
6507 drm_gem_object_unreference(&dev_priv->renderctx->base); 6595 }
6508 dev_priv->renderctx = NULL; 6596
6509 i915_gem_object_unpin(dev_priv->pwrctx); 6597 return 0;
6510 drm_gem_object_unreference(&dev_priv->pwrctx->base);
6511 dev_priv->pwrctx = NULL;
6512} 6598}
6513 6599
6514void ironlake_enable_rc6(struct drm_device *dev) 6600void ironlake_enable_rc6(struct drm_device *dev)
@@ -6516,15 +6602,26 @@ void ironlake_enable_rc6(struct drm_device *dev)
6516 struct drm_i915_private *dev_priv = dev->dev_private; 6602 struct drm_i915_private *dev_priv = dev->dev_private;
6517 int ret; 6603 int ret;
6518 6604
6605 /* rc6 disabled by default due to repeated reports of hanging during
6606 * boot and resume.
6607 */
6608 if (!i915_enable_rc6)
6609 return;
6610
6611 ret = ironlake_setup_rc6(dev);
6612 if (ret)
6613 return;
6614
6519 /* 6615 /*
6520 * GPU can automatically power down the render unit if given a page 6616 * GPU can automatically power down the render unit if given a page
6521 * to save state. 6617 * to save state.
6522 */ 6618 */
6523 ret = BEGIN_LP_RING(6); 6619 ret = BEGIN_LP_RING(6);
6524 if (ret) { 6620 if (ret) {
6525 ironlake_disable_rc6(dev); 6621 ironlake_teardown_rc6(dev);
6526 return; 6622 return;
6527 } 6623 }
6624
6528 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); 6625 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
6529 OUT_RING(MI_SET_CONTEXT); 6626 OUT_RING(MI_SET_CONTEXT);
6530 OUT_RING(dev_priv->renderctx->gtt_offset | 6627 OUT_RING(dev_priv->renderctx->gtt_offset |
@@ -6541,6 +6638,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
6541 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 6638 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
6542} 6639}
6543 6640
6641
6544/* Set up chip specific display functions */ 6642/* Set up chip specific display functions */
6545static void intel_init_display(struct drm_device *dev) 6643static void intel_init_display(struct drm_device *dev)
6546{ 6644{
@@ -6783,21 +6881,9 @@ void intel_modeset_init(struct drm_device *dev)
6783 if (IS_GEN6(dev)) 6881 if (IS_GEN6(dev))
6784 gen6_enable_rps(dev_priv); 6882 gen6_enable_rps(dev_priv);
6785 6883
6786 if (IS_IRONLAKE_M(dev)) { 6884 if (IS_IRONLAKE_M(dev))
6787 dev_priv->renderctx = intel_alloc_context_page(dev);
6788 if (!dev_priv->renderctx)
6789 goto skip_rc6;
6790 dev_priv->pwrctx = intel_alloc_context_page(dev);
6791 if (!dev_priv->pwrctx) {
6792 i915_gem_object_unpin(dev_priv->renderctx);
6793 drm_gem_object_unreference(&dev_priv->renderctx->base);
6794 dev_priv->renderctx = NULL;
6795 goto skip_rc6;
6796 }
6797 ironlake_enable_rc6(dev); 6885 ironlake_enable_rc6(dev);
6798 }
6799 6886
6800skip_rc6:
6801 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 6887 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
6802 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 6888 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
6803 (unsigned long)dev); 6889 (unsigned long)dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1f4242b682c8..51cb4e36997f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1639,6 +1639,24 @@ static int intel_dp_get_modes(struct drm_connector *connector)
1639 return 0; 1639 return 0;
1640} 1640}
1641 1641
1642static bool
1643intel_dp_detect_audio(struct drm_connector *connector)
1644{
1645 struct intel_dp *intel_dp = intel_attached_dp(connector);
1646 struct edid *edid;
1647 bool has_audio = false;
1648
1649 edid = drm_get_edid(connector, &intel_dp->adapter);
1650 if (edid) {
1651 has_audio = drm_detect_monitor_audio(edid);
1652
1653 connector->display_info.raw_edid = NULL;
1654 kfree(edid);
1655 }
1656
1657 return has_audio;
1658}
1659
1642static int 1660static int
1643intel_dp_set_property(struct drm_connector *connector, 1661intel_dp_set_property(struct drm_connector *connector,
1644 struct drm_property *property, 1662 struct drm_property *property,
@@ -1652,17 +1670,23 @@ intel_dp_set_property(struct drm_connector *connector,
1652 return ret; 1670 return ret;
1653 1671
1654 if (property == intel_dp->force_audio_property) { 1672 if (property == intel_dp->force_audio_property) {
1655 if (val == intel_dp->force_audio) 1673 int i = val;
1674 bool has_audio;
1675
1676 if (i == intel_dp->force_audio)
1656 return 0; 1677 return 0;
1657 1678
1658 intel_dp->force_audio = val; 1679 intel_dp->force_audio = i;
1659 1680
1660 if (val > 0 && intel_dp->has_audio) 1681 if (i == 0)
1661 return 0; 1682 has_audio = intel_dp_detect_audio(connector);
1662 if (val < 0 && !intel_dp->has_audio) 1683 else
1684 has_audio = i > 0;
1685
1686 if (has_audio == intel_dp->has_audio)
1663 return 0; 1687 return 0;
1664 1688
1665 intel_dp->has_audio = val > 0; 1689 intel_dp->has_audio = has_audio;
1666 goto done; 1690 goto done;
1667 } 1691 }
1668 1692
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 74db2557d644..2c431049963c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -298,7 +298,6 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
298extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 298extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
299 u16 *blue, int regno); 299 u16 *blue, int regno);
300extern void intel_enable_clock_gating(struct drm_device *dev); 300extern void intel_enable_clock_gating(struct drm_device *dev);
301extern void intel_disable_clock_gating(struct drm_device *dev);
302extern void ironlake_enable_drps(struct drm_device *dev); 301extern void ironlake_enable_drps(struct drm_device *dev);
303extern void ironlake_disable_drps(struct drm_device *dev); 302extern void ironlake_disable_drps(struct drm_device *dev);
304extern void gen6_enable_rps(struct drm_i915_private *dev_priv); 303extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 0d0273e7b029..c635c9e357b9 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -251,6 +251,27 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
251 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); 251 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
252} 252}
253 253
254static bool
255intel_hdmi_detect_audio(struct drm_connector *connector)
256{
257 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
258 struct drm_i915_private *dev_priv = connector->dev->dev_private;
259 struct edid *edid;
260 bool has_audio = false;
261
262 edid = drm_get_edid(connector,
263 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
264 if (edid) {
265 if (edid->input & DRM_EDID_INPUT_DIGITAL)
266 has_audio = drm_detect_monitor_audio(edid);
267
268 connector->display_info.raw_edid = NULL;
269 kfree(edid);
270 }
271
272 return has_audio;
273}
274
254static int 275static int
255intel_hdmi_set_property(struct drm_connector *connector, 276intel_hdmi_set_property(struct drm_connector *connector,
256 struct drm_property *property, 277 struct drm_property *property,
@@ -264,17 +285,23 @@ intel_hdmi_set_property(struct drm_connector *connector,
264 return ret; 285 return ret;
265 286
266 if (property == intel_hdmi->force_audio_property) { 287 if (property == intel_hdmi->force_audio_property) {
267 if (val == intel_hdmi->force_audio) 288 int i = val;
289 bool has_audio;
290
291 if (i == intel_hdmi->force_audio)
268 return 0; 292 return 0;
269 293
270 intel_hdmi->force_audio = val; 294 intel_hdmi->force_audio = i;
271 295
272 if (val > 0 && intel_hdmi->has_audio) 296 if (i == 0)
273 return 0; 297 has_audio = intel_hdmi_detect_audio(connector);
274 if (val < 0 && !intel_hdmi->has_audio) 298 else
299 has_audio = i > 0;
300
301 if (has_audio == intel_hdmi->has_audio)
275 return 0; 302 return 0;
276 303
277 intel_hdmi->has_audio = val > 0; 304 intel_hdmi->has_audio = has_audio;
278 goto done; 305 goto done;
279 } 306 }
280 307
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index ace8d5d30dd2..bcdba7bd5cfa 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -261,12 +261,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
261 return true; 261 return true;
262 } 262 }
263 263
264 /* Make sure pre-965s set dither correctly */
265 if (INTEL_INFO(dev)->gen < 4) {
266 if (dev_priv->lvds_dither)
267 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
268 }
269
270 /* Native modes don't need fitting */ 264 /* Native modes don't need fitting */
271 if (adjusted_mode->hdisplay == mode->hdisplay && 265 if (adjusted_mode->hdisplay == mode->hdisplay &&
272 adjusted_mode->vdisplay == mode->vdisplay) 266 adjusted_mode->vdisplay == mode->vdisplay)
@@ -374,10 +368,16 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
374 } 368 }
375 369
376out: 370out:
371 /* If not enabling scaling, be consistent and always use 0. */
377 if ((pfit_control & PFIT_ENABLE) == 0) { 372 if ((pfit_control & PFIT_ENABLE) == 0) {
378 pfit_control = 0; 373 pfit_control = 0;
379 pfit_pgm_ratios = 0; 374 pfit_pgm_ratios = 0;
380 } 375 }
376
377 /* Make sure pre-965 set dither correctly */
378 if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
379 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
380
381 if (pfit_control != intel_lvds->pfit_control || 381 if (pfit_control != intel_lvds->pfit_control ||
382 pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { 382 pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
383 intel_lvds->pfit_control = pfit_control; 383 intel_lvds->pfit_control = pfit_control;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index c65992df458d..f8f86e57df22 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -208,7 +208,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
208 val &= ~1; 208 val &= ~1;
209 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); 209 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
210 val *= lbpc; 210 val *= lbpc;
211 val >>= 1;
212 } 211 }
213 } 212 }
214 213
@@ -235,11 +234,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
235 234
236 if (is_backlight_combination_mode(dev)){ 235 if (is_backlight_combination_mode(dev)){
237 u32 max = intel_panel_get_max_backlight(dev); 236 u32 max = intel_panel_get_max_backlight(dev);
238 u8 lpbc; 237 u8 lbpc;
239 238
240 lpbc = level * 0xfe / max + 1; 239 lbpc = level * 0xfe / max + 1;
241 level /= lpbc; 240 level /= lbpc;
242 pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc); 241 pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
243 } 242 }
244 243
245 tmp = I915_READ(BLC_PWM_CTL); 244 tmp = I915_READ(BLC_PWM_CTL);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6218fa97aa1e..445f27efe677 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1059,22 +1059,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1059} 1059}
1060 1060
1061static int gen6_ring_flush(struct intel_ring_buffer *ring, 1061static int gen6_ring_flush(struct intel_ring_buffer *ring,
1062 u32 invalidate_domains, 1062 u32 invalidate, u32 flush)
1063 u32 flush_domains)
1064{ 1063{
1064 uint32_t cmd;
1065 int ret; 1065 int ret;
1066 1066
1067 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1067 if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
1068 return 0; 1068 return 0;
1069 1069
1070 ret = intel_ring_begin(ring, 4); 1070 ret = intel_ring_begin(ring, 4);
1071 if (ret) 1071 if (ret)
1072 return ret; 1072 return ret;
1073 1073
1074 intel_ring_emit(ring, MI_FLUSH_DW); 1074 cmd = MI_FLUSH_DW;
1075 intel_ring_emit(ring, 0); 1075 if (invalidate & I915_GEM_GPU_DOMAINS)
1076 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1077 intel_ring_emit(ring, cmd);
1076 intel_ring_emit(ring, 0); 1078 intel_ring_emit(ring, 0);
1077 intel_ring_emit(ring, 0); 1079 intel_ring_emit(ring, 0);
1080 intel_ring_emit(ring, MI_NOOP);
1078 intel_ring_advance(ring); 1081 intel_ring_advance(ring);
1079 return 0; 1082 return 0;
1080} 1083}
@@ -1230,22 +1233,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
1230} 1233}
1231 1234
1232static int blt_ring_flush(struct intel_ring_buffer *ring, 1235static int blt_ring_flush(struct intel_ring_buffer *ring,
1233 u32 invalidate_domains, 1236 u32 invalidate, u32 flush)
1234 u32 flush_domains)
1235{ 1237{
1238 uint32_t cmd;
1236 int ret; 1239 int ret;
1237 1240
1238 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1241 if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
1239 return 0; 1242 return 0;
1240 1243
1241 ret = blt_ring_begin(ring, 4); 1244 ret = blt_ring_begin(ring, 4);
1242 if (ret) 1245 if (ret)
1243 return ret; 1246 return ret;
1244 1247
1245 intel_ring_emit(ring, MI_FLUSH_DW); 1248 cmd = MI_FLUSH_DW;
1246 intel_ring_emit(ring, 0); 1249 if (invalidate & I915_GEM_DOMAIN_RENDER)
1250 cmd |= MI_INVALIDATE_TLB;
1251 intel_ring_emit(ring, cmd);
1247 intel_ring_emit(ring, 0); 1252 intel_ring_emit(ring, 0);
1248 intel_ring_emit(ring, 0); 1253 intel_ring_emit(ring, 0);
1254 intel_ring_emit(ring, MI_NOOP);
1249 intel_ring_advance(ring); 1255 intel_ring_advance(ring);
1250 return 0; 1256 return 0;
1251} 1257}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6d6fde85a636..34306865a5df 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,22 +14,23 @@ struct intel_hw_status_page {
14 struct drm_i915_gem_object *obj; 14 struct drm_i915_gem_object *obj;
15}; 15};
16 16
17#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) 17#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg)
18#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val)
18 19
19#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) 20#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
20#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) 21#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val)
21 22
22#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) 23#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
23#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) 24#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val)
24 25
25#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) 26#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base))
26#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) 27#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val)
27 28
28#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) 29#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
29#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) 30#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val)
30 31
31#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
32#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) 32#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
33#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val)
33 34
34#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) 35#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
35#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) 36#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 6a09c1413d60..7c50cdce84f0 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -46,6 +46,7 @@
46 SDVO_TV_MASK) 46 SDVO_TV_MASK)
47 47
48#define IS_TV(c) (c->output_flag & SDVO_TV_MASK) 48#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
49#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
49#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) 50#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
50#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) 51#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
51 52
@@ -1359,7 +1360,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1359 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); 1360 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
1360 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); 1361 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
1361 } 1362 }
1362 } 1363 } else
1364 status = connector_status_disconnected;
1363 connector->display_info.raw_edid = NULL; 1365 connector->display_info.raw_edid = NULL;
1364 kfree(edid); 1366 kfree(edid);
1365 } 1367 }
@@ -1407,10 +1409,25 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1407 1409
1408 if ((intel_sdvo_connector->output_flag & response) == 0) 1410 if ((intel_sdvo_connector->output_flag & response) == 0)
1409 ret = connector_status_disconnected; 1411 ret = connector_status_disconnected;
1410 else if (response & SDVO_TMDS_MASK) 1412 else if (IS_TMDS(intel_sdvo_connector))
1411 ret = intel_sdvo_hdmi_sink_detect(connector); 1413 ret = intel_sdvo_hdmi_sink_detect(connector);
1412 else 1414 else {
1413 ret = connector_status_connected; 1415 struct edid *edid;
1416
1417 /* if we have an edid check it matches the connection */
1418 edid = intel_sdvo_get_edid(connector);
1419 if (edid == NULL)
1420 edid = intel_sdvo_get_analog_edid(connector);
1421 if (edid != NULL) {
1422 if (edid->input & DRM_EDID_INPUT_DIGITAL)
1423 ret = connector_status_disconnected;
1424 else
1425 ret = connector_status_connected;
1426 connector->display_info.raw_edid = NULL;
1427 kfree(edid);
1428 } else
1429 ret = connector_status_connected;
1430 }
1414 1431
1415 /* May update encoder flag for like clock for SDVO TV, etc.*/ 1432 /* May update encoder flag for like clock for SDVO TV, etc.*/
1416 if (ret == connector_status_connected) { 1433 if (ret == connector_status_connected) {
@@ -1446,10 +1463,15 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1446 edid = intel_sdvo_get_analog_edid(connector); 1463 edid = intel_sdvo_get_analog_edid(connector);
1447 1464
1448 if (edid != NULL) { 1465 if (edid != NULL) {
1449 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 1466 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1467 bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
1468 bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
1469
1470 if (connector_is_digital == monitor_is_digital) {
1450 drm_mode_connector_update_edid_property(connector, edid); 1471 drm_mode_connector_update_edid_property(connector, edid);
1451 drm_add_edid_modes(connector, edid); 1472 drm_add_edid_modes(connector, edid);
1452 } 1473 }
1474
1453 connector->display_info.raw_edid = NULL; 1475 connector->display_info.raw_edid = NULL;
1454 kfree(edid); 1476 kfree(edid);
1455 } 1477 }
@@ -1668,6 +1690,22 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1668 kfree(connector); 1690 kfree(connector);
1669} 1691}
1670 1692
1693static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
1694{
1695 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1696 struct edid *edid;
1697 bool has_audio = false;
1698
1699 if (!intel_sdvo->is_hdmi)
1700 return false;
1701
1702 edid = intel_sdvo_get_edid(connector);
1703 if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
1704 has_audio = drm_detect_monitor_audio(edid);
1705
1706 return has_audio;
1707}
1708
1671static int 1709static int
1672intel_sdvo_set_property(struct drm_connector *connector, 1710intel_sdvo_set_property(struct drm_connector *connector,
1673 struct drm_property *property, 1711 struct drm_property *property,
@@ -1684,17 +1722,23 @@ intel_sdvo_set_property(struct drm_connector *connector,
1684 return ret; 1722 return ret;
1685 1723
1686 if (property == intel_sdvo_connector->force_audio_property) { 1724 if (property == intel_sdvo_connector->force_audio_property) {
1687 if (val == intel_sdvo_connector->force_audio) 1725 int i = val;
1726 bool has_audio;
1727
1728 if (i == intel_sdvo_connector->force_audio)
1688 return 0; 1729 return 0;
1689 1730
1690 intel_sdvo_connector->force_audio = val; 1731 intel_sdvo_connector->force_audio = i;
1691 1732
1692 if (val > 0 && intel_sdvo->has_hdmi_audio) 1733 if (i == 0)
1693 return 0; 1734 has_audio = intel_sdvo_detect_hdmi_audio(connector);
1694 if (val < 0 && !intel_sdvo->has_hdmi_audio) 1735 else
1736 has_audio = i > 0;
1737
1738 if (has_audio == intel_sdvo->has_hdmi_audio)
1695 return 0; 1739 return 0;
1696 1740
1697 intel_sdvo->has_hdmi_audio = val > 0; 1741 intel_sdvo->has_hdmi_audio = has_audio;
1698 goto done; 1742 goto done;
1699 } 1743 }
1700 1744
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 93206e4eaa6f..fe4a53a50b83 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1234,7 +1234,8 @@ static const struct drm_display_mode reported_modes[] = {
1234 * \return false if TV is disconnected. 1234 * \return false if TV is disconnected.
1235 */ 1235 */
1236static int 1236static int
1237intel_tv_detect_type (struct intel_tv *intel_tv) 1237intel_tv_detect_type (struct intel_tv *intel_tv,
1238 struct drm_connector *connector)
1238{ 1239{
1239 struct drm_encoder *encoder = &intel_tv->base.base; 1240 struct drm_encoder *encoder = &intel_tv->base.base;
1240 struct drm_device *dev = encoder->dev; 1241 struct drm_device *dev = encoder->dev;
@@ -1245,11 +1246,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1245 int type; 1246 int type;
1246 1247
1247 /* Disable TV interrupts around load detect or we'll recurse */ 1248 /* Disable TV interrupts around load detect or we'll recurse */
1248 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1249 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
1249 i915_disable_pipestat(dev_priv, 0, 1250 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1250 PIPE_HOTPLUG_INTERRUPT_ENABLE | 1251 i915_disable_pipestat(dev_priv, 0,
1251 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1252 PIPE_HOTPLUG_INTERRUPT_ENABLE |
1252 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1253 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1254 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1255 }
1253 1256
1254 save_tv_dac = tv_dac = I915_READ(TV_DAC); 1257 save_tv_dac = tv_dac = I915_READ(TV_DAC);
1255 save_tv_ctl = tv_ctl = I915_READ(TV_CTL); 1258 save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1302,11 +1305,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1302 I915_WRITE(TV_CTL, save_tv_ctl); 1305 I915_WRITE(TV_CTL, save_tv_ctl);
1303 1306
1304 /* Restore interrupt config */ 1307 /* Restore interrupt config */
1305 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1308 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
1306 i915_enable_pipestat(dev_priv, 0, 1309 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1307 PIPE_HOTPLUG_INTERRUPT_ENABLE | 1310 i915_enable_pipestat(dev_priv, 0,
1308 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1311 PIPE_HOTPLUG_INTERRUPT_ENABLE |
1309 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1312 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1313 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1314 }
1310 1315
1311 return type; 1316 return type;
1312} 1317}
@@ -1356,7 +1361,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1356 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); 1361 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
1357 1362
1358 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { 1363 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
1359 type = intel_tv_detect_type(intel_tv); 1364 type = intel_tv_detect_type(intel_tv, connector);
1360 } else if (force) { 1365 } else if (force) {
1361 struct drm_crtc *crtc; 1366 struct drm_crtc *crtc;
1362 int dpms_mode; 1367 int dpms_mode;
@@ -1364,7 +1369,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1364 crtc = intel_get_load_detect_pipe(&intel_tv->base, connector, 1369 crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
1365 &mode, &dpms_mode); 1370 &mode, &dpms_mode);
1366 if (crtc) { 1371 if (crtc) {
1367 type = intel_tv_detect_type(intel_tv); 1372 type = intel_tv_detect_type(intel_tv, connector);
1368 intel_release_load_detect_pipe(&intel_tv->base, connector, 1373 intel_release_load_detect_pipe(&intel_tv->base, connector,
1369 dpms_mode); 1374 dpms_mode);
1370 } else 1375 } else
@@ -1658,6 +1663,18 @@ intel_tv_init(struct drm_device *dev)
1658 intel_encoder = &intel_tv->base; 1663 intel_encoder = &intel_tv->base;
1659 connector = &intel_connector->base; 1664 connector = &intel_connector->base;
1660 1665
1666 /* The documentation, for the older chipsets at least, recommend
1667 * using a polling method rather than hotplug detection for TVs.
1668 * This is because in order to perform the hotplug detection, the PLLs
1669 * for the TV must be kept alive increasing power drain and starving
1670 * bandwidth from other encoders. Notably for instance, it causes
1671 * pipe underruns on Crestline when this encoder is supposedly idle.
1672 *
1673 * More recent chipsets favour HDMI rather than integrated S-Video.
1674 */
1675 connector->polled =
1676 DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
1677
1661 drm_connector_init(dev, connector, &intel_tv_connector_funcs, 1678 drm_connector_init(dev, connector, &intel_tv_connector_funcs,
1662 DRM_MODE_CONNECTOR_SVIDEO); 1679 DRM_MODE_CONNECTOR_SVIDEO);
1663 1680
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 49e5e99917e2..6bdab891c64e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6228,7 +6228,7 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
6228 entry->tvconf.has_component_output = false; 6228 entry->tvconf.has_component_output = false;
6229 break; 6229 break;
6230 case OUTPUT_LVDS: 6230 case OUTPUT_LVDS:
6231 if ((conn & 0x00003f00) != 0x10) 6231 if ((conn & 0x00003f00) >> 8 != 0x10)
6232 entry->lvdsconf.use_straps_for_mode = true; 6232 entry->lvdsconf.use_straps_for_mode = true;
6233 entry->lvdsconf.use_power_scripts = true; 6233 entry->lvdsconf.use_power_scripts = true;
6234 break; 6234 break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index a7fae26f4654..a52184007f5f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
49 DRM_ERROR("bo %p still attached to GEM object\n", bo); 49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50 50
51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL); 51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
52 nouveau_vm_put(&nvbo->vma); 52 if (nvbo->vma.node) {
53 nouveau_vm_unmap(&nvbo->vma);
54 nouveau_vm_put(&nvbo->vma);
55 }
53 kfree(nvbo); 56 kfree(nvbo);
54} 57}
55 58
@@ -128,6 +131,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
128 } 131 }
129 } 132 }
130 133
134 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
131 nouveau_bo_placement_set(nvbo, flags, 0); 135 nouveau_bo_placement_set(nvbo, flags, 0);
132 136
133 nvbo->channel = chan; 137 nvbo->channel = chan;
@@ -166,17 +170,17 @@ static void
166set_placement_range(struct nouveau_bo *nvbo, uint32_t type) 170set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
167{ 171{
168 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 172 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
173 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
169 174
170 if (dev_priv->card_type == NV_10 && 175 if (dev_priv->card_type == NV_10 &&
171 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) { 176 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
177 nvbo->bo.mem.num_pages < vram_pages / 2) {
172 /* 178 /*
173 * Make sure that the color and depth buffers are handled 179 * Make sure that the color and depth buffers are handled
174 * by independent memory controller units. Up to a 9x 180 * by independent memory controller units. Up to a 9x
175 * speed up when alpha-blending and depth-test are enabled 181 * speed up when alpha-blending and depth-test are enabled
176 * at the same time. 182 * at the same time.
177 */ 183 */
178 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
179
180 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { 184 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
181 nvbo->placement.fpfn = vram_pages / 2; 185 nvbo->placement.fpfn = vram_pages / 2;
182 nvbo->placement.lpfn = ~0; 186 nvbo->placement.lpfn = ~0;
@@ -785,7 +789,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
785 if (ret) 789 if (ret)
786 goto out; 790 goto out;
787 791
788 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 792 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
789out: 793out:
790 ttm_bo_mem_put(bo, &tmp_mem); 794 ttm_bo_mem_put(bo, &tmp_mem);
791 return ret; 795 return ret;
@@ -811,11 +815,11 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
811 if (ret) 815 if (ret)
812 return ret; 816 return ret;
813 817
814 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem); 818 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
815 if (ret) 819 if (ret)
816 goto out; 820 goto out;
817 821
818 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 822 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
819 if (ret) 823 if (ret)
820 goto out; 824 goto out;
821 825
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index a21e00076839..390d82c3c4b0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -507,6 +507,7 @@ nouveau_connector_native_mode(struct drm_connector *connector)
507 int high_w = 0, high_h = 0, high_v = 0; 507 int high_w = 0, high_h = 0, high_v = 0;
508 508
509 list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { 509 list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
510 mode->vrefresh = drm_mode_vrefresh(mode);
510 if (helper->mode_valid(connector, mode) != MODE_OK || 511 if (helper->mode_valid(connector, mode) != MODE_OK ||
511 (mode->flags & DRM_MODE_FLAG_INTERLACE)) 512 (mode->flags & DRM_MODE_FLAG_INTERLACE))
512 continue; 513 continue;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 65699bfaaaea..b368ed74aad7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -83,7 +83,8 @@ nouveau_dma_init(struct nouveau_channel *chan)
83 return ret; 83 return ret;
84 84
85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ 85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
86 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); 86 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000,
87 &chan->m2mf_ntfy);
87 if (ret) 88 if (ret)
88 return ret; 89 return ret;
89 90
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 9821fcacc3d2..982d70b12722 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -852,7 +852,8 @@ extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
852extern int nouveau_notifier_init_channel(struct nouveau_channel *); 852extern int nouveau_notifier_init_channel(struct nouveau_channel *);
853extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); 853extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
854extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, 854extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
855 int cout, uint32_t *offset); 855 int cout, uint32_t start, uint32_t end,
856 uint32_t *offset);
856extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); 857extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
857extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, 858extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
858 struct drm_file *); 859 struct drm_file *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 26347b7cd872..b0fb9bdcddb7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -725,8 +725,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
725 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, 725 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
726 mem->page_alignment << PAGE_SHIFT, size_nc, 726 mem->page_alignment << PAGE_SHIFT, size_nc,
727 (nvbo->tile_flags >> 8) & 0xff, &node); 727 (nvbo->tile_flags >> 8) & 0xff, &node);
728 if (ret) 728 if (ret) {
729 return ret; 729 mem->mm_node = NULL;
730 return (ret == -ENOSPC) ? 0 : ret;
731 }
730 732
731 node->page_shift = 12; 733 node->page_shift = 12;
732 if (nvbo->vma.node) 734 if (nvbo->vma.node)
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index 8844b50c3e54..7609756b6faf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -123,7 +123,7 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
123 return 0; 123 return 0;
124 } 124 }
125 125
126 return -ENOMEM; 126 return -ENOSPC;
127} 127}
128 128
129int 129int
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index fe29d604b820..5ea167623a82 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -96,7 +96,8 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
96 96
97int 97int
98nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, 98nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
99 int size, uint32_t *b_offset) 99 int size, uint32_t start, uint32_t end,
100 uint32_t *b_offset)
100{ 101{
101 struct drm_device *dev = chan->dev; 102 struct drm_device *dev = chan->dev;
102 struct nouveau_gpuobj *nobj = NULL; 103 struct nouveau_gpuobj *nobj = NULL;
@@ -104,9 +105,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
104 uint32_t offset; 105 uint32_t offset;
105 int target, ret; 106 int target, ret;
106 107
107 mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0); 108 mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
109 start, end, 0);
108 if (mem) 110 if (mem)
109 mem = drm_mm_get_block(mem, size, 0); 111 mem = drm_mm_get_block_range(mem, size, 0, start, end);
110 if (!mem) { 112 if (!mem) {
111 NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); 113 NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
112 return -ENOMEM; 114 return -ENOMEM;
@@ -177,7 +179,8 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
177 if (IS_ERR(chan)) 179 if (IS_ERR(chan))
178 return PTR_ERR(chan); 180 return PTR_ERR(chan);
179 181
180 ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); 182 ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
183 &na->offset);
181 nouveau_channel_put(&chan); 184 nouveau_channel_put(&chan);
182 return ret; 185 return ret;
183} 186}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index f05c0cddfeca..4399e2f34db4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -543,7 +543,7 @@ nouveau_pm_resume(struct drm_device *dev)
543 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 543 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
544 struct nouveau_pm_level *perflvl; 544 struct nouveau_pm_level *perflvl;
545 545
546 if (pm->cur == &pm->boot) 546 if (!pm->cur || pm->cur == &pm->boot)
547 return; 547 return;
548 548
549 perflvl = pm->cur; 549 perflvl = pm->cur;
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index ef23550407b5..c82db37d9f41 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -342,8 +342,8 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
342 if (nv_encoder->dcb->type == OUTPUT_LVDS) { 342 if (nv_encoder->dcb->type == OUTPUT_LVDS) {
343 bool duallink, dummy; 343 bool duallink, dummy;
344 344
345 nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode-> 345 nouveau_bios_parse_lvds_table(dev, output_mode->clock,
346 clock, &duallink, &dummy); 346 &duallink, &dummy);
347 if (duallink) 347 if (duallink)
348 regp->fp_control |= (8 << 28); 348 regp->fp_control |= (8 << 28);
349 } else 349 } else
@@ -518,8 +518,6 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
518 return; 518 return;
519 519
520 if (nv_encoder->dcb->lvdsconf.use_power_scripts) { 520 if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
521 struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
522
523 /* when removing an output, crtc may not be set, but PANEL_OFF 521 /* when removing an output, crtc may not be set, but PANEL_OFF
524 * must still be run 522 * must still be run
525 */ 523 */
@@ -527,12 +525,8 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
527 nv04_dfp_get_bound_head(dev, nv_encoder->dcb); 525 nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
528 526
529 if (mode == DRM_MODE_DPMS_ON) { 527 if (mode == DRM_MODE_DPMS_ON) {
530 if (!nv_connector->native_mode) {
531 NV_ERROR(dev, "Not turning on LVDS without native mode\n");
532 return;
533 }
534 call_lvds_script(dev, nv_encoder->dcb, head, 528 call_lvds_script(dev, nv_encoder->dcb, head,
535 LVDS_PANEL_ON, nv_connector->native_mode->clock); 529 LVDS_PANEL_ON, nv_encoder->mode.clock);
536 } else 530 } else
537 /* pxclk of 0 is fine for PANEL_OFF, and for a 531 /* pxclk of 0 is fine for PANEL_OFF, and for a
538 * disconnected LVDS encoder there is no native_mode 532 * disconnected LVDS encoder there is no native_mode
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 8870d72388c8..18d30c2c1aa6 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -211,18 +211,32 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
211 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; 211 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
212 212
213 switch (dev_priv->chipset) { 213 switch (dev_priv->chipset) {
214 case 0x40:
215 case 0x41: /* guess */
216 case 0x42:
217 case 0x43:
218 case 0x45: /* guess */
219 case 0x4e:
220 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
221 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
222 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
223 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
224 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
225 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
226 break;
214 case 0x44: 227 case 0x44:
215 case 0x4a: 228 case 0x4a:
216 case 0x4e:
217 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); 229 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
218 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); 230 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
219 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); 231 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
220 break; 232 break;
221
222 case 0x46: 233 case 0x46:
223 case 0x47: 234 case 0x47:
224 case 0x49: 235 case 0x49:
225 case 0x4b: 236 case 0x4b:
237 case 0x4c:
238 case 0x67:
239 default:
226 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); 240 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
227 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); 241 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
228 nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); 242 nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
@@ -230,15 +244,6 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
230 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); 244 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
231 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); 245 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
232 break; 246 break;
233
234 default:
235 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
236 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
237 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
238 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
239 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
240 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
241 break;
242 } 247 }
243} 248}
244 249
@@ -396,17 +401,20 @@ nv40_graph_init(struct drm_device *dev)
396 break; 401 break;
397 default: 402 default:
398 switch (dev_priv->chipset) { 403 switch (dev_priv->chipset) {
399 case 0x46: 404 case 0x41:
400 case 0x47: 405 case 0x42:
401 case 0x49: 406 case 0x43:
402 case 0x4b: 407 case 0x45:
403 nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0)); 408 case 0x4e:
404 nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1)); 409 case 0x44:
405 break; 410 case 0x4a:
406 default:
407 nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0)); 411 nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
408 nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1)); 412 nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
409 break; 413 break;
414 default:
415 nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
416 nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
417 break;
410 } 418 }
411 nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0)); 419 nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
412 nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1)); 420 nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index ea0041810ae3..e57caa2a00e3 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -403,16 +403,24 @@ nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
403void 403void
404nv50_instmem_flush(struct drm_device *dev) 404nv50_instmem_flush(struct drm_device *dev)
405{ 405{
406 struct drm_nouveau_private *dev_priv = dev->dev_private;
407
408 spin_lock(&dev_priv->ramin_lock);
406 nv_wr32(dev, 0x00330c, 0x00000001); 409 nv_wr32(dev, 0x00330c, 0x00000001);
407 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) 410 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
408 NV_ERROR(dev, "PRAMIN flush timeout\n"); 411 NV_ERROR(dev, "PRAMIN flush timeout\n");
412 spin_unlock(&dev_priv->ramin_lock);
409} 413}
410 414
411void 415void
412nv84_instmem_flush(struct drm_device *dev) 416nv84_instmem_flush(struct drm_device *dev)
413{ 417{
418 struct drm_nouveau_private *dev_priv = dev->dev_private;
419
420 spin_lock(&dev_priv->ramin_lock);
414 nv_wr32(dev, 0x070000, 0x00000001); 421 nv_wr32(dev, 0x070000, 0x00000001);
415 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) 422 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
416 NV_ERROR(dev, "PRAMIN flush timeout\n"); 423 NV_ERROR(dev, "PRAMIN flush timeout\n");
424 spin_unlock(&dev_priv->ramin_lock);
417} 425}
418 426
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 459ff08241e5..6144156f255a 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -169,7 +169,11 @@ nv50_vm_flush(struct nouveau_vm *vm)
169void 169void
170nv50_vm_flush_engine(struct drm_device *dev, int engine) 170nv50_vm_flush_engine(struct drm_device *dev, int engine)
171{ 171{
172 struct drm_nouveau_private *dev_priv = dev->dev_private;
173
174 spin_lock(&dev_priv->ramin_lock);
172 nv_wr32(dev, 0x100c80, (engine << 16) | 1); 175 nv_wr32(dev, 0x100c80, (engine << 16) | 1);
173 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) 176 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
174 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); 177 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
178 spin_unlock(&dev_priv->ramin_lock);
175} 179}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b1537000a104..a4e5e53e0a62 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -48,29 +48,29 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
48 48
49 switch (radeon_crtc->rmx_type) { 49 switch (radeon_crtc->rmx_type) {
50 case RMX_CENTER: 50 case RMX_CENTER:
51 args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; 51 args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
52 args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; 52 args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
53 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; 53 args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
54 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; 54 args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
55 break; 55 break;
56 case RMX_ASPECT: 56 case RMX_ASPECT:
57 a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; 57 a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
58 a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; 58 a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
59 59
60 if (a1 > a2) { 60 if (a1 > a2) {
61 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; 61 args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
62 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; 62 args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
63 } else if (a2 > a1) { 63 } else if (a2 > a1) {
64 args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; 64 args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
65 args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; 65 args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
66 } 66 }
67 break; 67 break;
68 case RMX_FULL: 68 case RMX_FULL:
69 default: 69 default:
70 args.usOverscanRight = radeon_crtc->h_border; 70 args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border);
71 args.usOverscanLeft = radeon_crtc->h_border; 71 args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border);
72 args.usOverscanBottom = radeon_crtc->v_border; 72 args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border);
73 args.usOverscanTop = radeon_crtc->v_border; 73 args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
74 break; 74 break;
75 } 75 }
76 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 76 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -419,23 +419,23 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
419 memset(&args, 0, sizeof(args)); 419 memset(&args, 0, sizeof(args));
420 420
421 if (ASIC_IS_DCE5(rdev)) { 421 if (ASIC_IS_DCE5(rdev)) {
422 args.v3.usSpreadSpectrumAmountFrac = 0; 422 args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
423 args.v3.ucSpreadSpectrumType = ss->type; 423 args.v3.ucSpreadSpectrumType = ss->type;
424 switch (pll_id) { 424 switch (pll_id) {
425 case ATOM_PPLL1: 425 case ATOM_PPLL1:
426 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; 426 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
427 args.v3.usSpreadSpectrumAmount = ss->amount; 427 args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
428 args.v3.usSpreadSpectrumStep = ss->step; 428 args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
429 break; 429 break;
430 case ATOM_PPLL2: 430 case ATOM_PPLL2:
431 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL; 431 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
432 args.v3.usSpreadSpectrumAmount = ss->amount; 432 args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
433 args.v3.usSpreadSpectrumStep = ss->step; 433 args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
434 break; 434 break;
435 case ATOM_DCPLL: 435 case ATOM_DCPLL:
436 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL; 436 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
437 args.v3.usSpreadSpectrumAmount = 0; 437 args.v3.usSpreadSpectrumAmount = cpu_to_le16(0);
438 args.v3.usSpreadSpectrumStep = 0; 438 args.v3.usSpreadSpectrumStep = cpu_to_le16(0);
439 break; 439 break;
440 case ATOM_PPLL_INVALID: 440 case ATOM_PPLL_INVALID:
441 return; 441 return;
@@ -447,18 +447,18 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
447 switch (pll_id) { 447 switch (pll_id) {
448 case ATOM_PPLL1: 448 case ATOM_PPLL1:
449 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; 449 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
450 args.v2.usSpreadSpectrumAmount = ss->amount; 450 args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
451 args.v2.usSpreadSpectrumStep = ss->step; 451 args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
452 break; 452 break;
453 case ATOM_PPLL2: 453 case ATOM_PPLL2:
454 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL; 454 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
455 args.v2.usSpreadSpectrumAmount = ss->amount; 455 args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
456 args.v2.usSpreadSpectrumStep = ss->step; 456 args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
457 break; 457 break;
458 case ATOM_DCPLL: 458 case ATOM_DCPLL:
459 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL; 459 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
460 args.v2.usSpreadSpectrumAmount = 0; 460 args.v2.usSpreadSpectrumAmount = cpu_to_le16(0);
461 args.v2.usSpreadSpectrumStep = 0; 461 args.v2.usSpreadSpectrumStep = cpu_to_le16(0);
462 break; 462 break;
463 case ATOM_PPLL_INVALID: 463 case ATOM_PPLL_INVALID:
464 return; 464 return;
@@ -538,7 +538,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
538 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 538 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
539 else 539 else
540 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 540 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
541
542 } 541 }
543 542
544 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 543 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -555,29 +554,28 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
555 dp_clock = dig_connector->dp_clock; 554 dp_clock = dig_connector->dp_clock;
556 } 555 }
557 } 556 }
558/* this might work properly with the new pll algo */ 557
559#if 0 /* doesn't work properly on some laptops */
560 /* use recommended ref_div for ss */ 558 /* use recommended ref_div for ss */
561 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 559 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
562 if (ss_enabled) { 560 if (ss_enabled) {
563 if (ss->refdiv) { 561 if (ss->refdiv) {
562 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
564 pll->flags |= RADEON_PLL_USE_REF_DIV; 563 pll->flags |= RADEON_PLL_USE_REF_DIV;
565 pll->reference_div = ss->refdiv; 564 pll->reference_div = ss->refdiv;
565 if (ASIC_IS_AVIVO(rdev))
566 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
566 } 567 }
567 } 568 }
568 } 569 }
569#endif 570
570 if (ASIC_IS_AVIVO(rdev)) { 571 if (ASIC_IS_AVIVO(rdev)) {
571 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 572 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
572 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) 573 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
573 adjusted_clock = mode->clock * 2; 574 adjusted_clock = mode->clock * 2;
574 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) 575 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
575 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; 576 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
576 /* rv515 needs more testing with this option */ 577 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
577 if (rdev->family != CHIP_RV515) { 578 pll->flags |= RADEON_PLL_IS_LCD;
578 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
579 pll->flags |= RADEON_PLL_IS_LCD;
580 }
581 } else { 579 } else {
582 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 580 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
583 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; 581 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -664,10 +662,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
664 index, (uint32_t *)&args); 662 index, (uint32_t *)&args);
665 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; 663 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
666 if (args.v3.sOutput.ucRefDiv) { 664 if (args.v3.sOutput.ucRefDiv) {
665 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
667 pll->flags |= RADEON_PLL_USE_REF_DIV; 666 pll->flags |= RADEON_PLL_USE_REF_DIV;
668 pll->reference_div = args.v3.sOutput.ucRefDiv; 667 pll->reference_div = args.v3.sOutput.ucRefDiv;
669 } 668 }
670 if (args.v3.sOutput.ucPostDiv) { 669 if (args.v3.sOutput.ucPostDiv) {
670 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
671 pll->flags |= RADEON_PLL_USE_POST_DIV; 671 pll->flags |= RADEON_PLL_USE_POST_DIV;
672 pll->post_div = args.v3.sOutput.ucPostDiv; 672 pll->post_div = args.v3.sOutput.ucPostDiv;
673 } 673 }
@@ -721,14 +721,14 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
721 * SetPixelClock provides the dividers 721 * SetPixelClock provides the dividers
722 */ 722 */
723 args.v5.ucCRTC = ATOM_CRTC_INVALID; 723 args.v5.ucCRTC = ATOM_CRTC_INVALID;
724 args.v5.usPixelClock = dispclk; 724 args.v5.usPixelClock = cpu_to_le16(dispclk);
725 args.v5.ucPpll = ATOM_DCPLL; 725 args.v5.ucPpll = ATOM_DCPLL;
726 break; 726 break;
727 case 6: 727 case 6:
728 /* if the default dcpll clock is specified, 728 /* if the default dcpll clock is specified,
729 * SetPixelClock provides the dividers 729 * SetPixelClock provides the dividers
730 */ 730 */
731 args.v6.ulDispEngClkFreq = dispclk; 731 args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
732 args.v6.ucPpll = ATOM_DCPLL; 732 args.v6.ucPpll = ATOM_DCPLL;
733 break; 733 break;
734 default: 734 default:
@@ -957,11 +957,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
957 /* adjust pixel clock as needed */ 957 /* adjust pixel clock as needed */
958 adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); 958 adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
959 959
960 /* rv515 seems happier with the old algo */ 960 if (ASIC_IS_AVIVO(rdev))
961 if (rdev->family == CHIP_RV515)
962 radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
963 &ref_div, &post_div);
964 else if (ASIC_IS_AVIVO(rdev))
965 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 961 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
966 &ref_div, &post_div); 962 &ref_div, &post_div);
967 else 963 else
@@ -995,9 +991,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
995 } 991 }
996} 992}
997 993
998static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, 994static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
999 struct drm_framebuffer *fb, 995 struct drm_framebuffer *fb,
1000 int x, int y, int atomic) 996 int x, int y, int atomic)
1001{ 997{
1002 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 998 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1003 struct drm_device *dev = crtc->dev; 999 struct drm_device *dev = crtc->dev;
@@ -1137,12 +1133,6 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
1137 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, 1133 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
1138 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); 1134 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
1139 1135
1140 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
1141 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
1142 EVERGREEN_INTERLEAVE_EN);
1143 else
1144 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1145
1146 if (!atomic && fb && fb != crtc->fb) { 1136 if (!atomic && fb && fb != crtc->fb) {
1147 radeon_fb = to_radeon_framebuffer(fb); 1137 radeon_fb = to_radeon_framebuffer(fb);
1148 rbo = radeon_fb->obj->driver_private; 1138 rbo = radeon_fb->obj->driver_private;
@@ -1300,12 +1290,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1300 WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, 1290 WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
1301 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); 1291 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
1302 1292
1303 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
1304 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
1305 AVIVO_D1MODE_INTERLEAVE_EN);
1306 else
1307 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1308
1309 if (!atomic && fb && fb != crtc->fb) { 1293 if (!atomic && fb && fb != crtc->fb) {
1310 radeon_fb = to_radeon_framebuffer(fb); 1294 radeon_fb = to_radeon_framebuffer(fb);
1311 rbo = radeon_fb->obj->driver_private; 1295 rbo = radeon_fb->obj->driver_private;
@@ -1329,7 +1313,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
1329 struct radeon_device *rdev = dev->dev_private; 1313 struct radeon_device *rdev = dev->dev_private;
1330 1314
1331 if (ASIC_IS_DCE4(rdev)) 1315 if (ASIC_IS_DCE4(rdev))
1332 return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0); 1316 return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0);
1333 else if (ASIC_IS_AVIVO(rdev)) 1317 else if (ASIC_IS_AVIVO(rdev))
1334 return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0); 1318 return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
1335 else 1319 else
@@ -1344,7 +1328,7 @@ int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
1344 struct radeon_device *rdev = dev->dev_private; 1328 struct radeon_device *rdev = dev->dev_private;
1345 1329
1346 if (ASIC_IS_DCE4(rdev)) 1330 if (ASIC_IS_DCE4(rdev))
1347 return evergreen_crtc_do_set_base(crtc, fb, x, y, 1); 1331 return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
1348 else if (ASIC_IS_AVIVO(rdev)) 1332 else if (ASIC_IS_AVIVO(rdev))
1349 return avivo_crtc_do_set_base(crtc, fb, x, y, 1); 1333 return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
1350 else 1334 else
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index ffdc8332b76e..6140ea1de45a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1192,7 +1192,11 @@ void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1192 radeon_ring_write(rdev, 1); 1192 radeon_ring_write(rdev, 1);
1193 /* FIXME: implement */ 1193 /* FIXME: implement */
1194 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 1194 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1195 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC); 1195 radeon_ring_write(rdev,
1196#ifdef __BIG_ENDIAN
1197 (2 << 0) |
1198#endif
1199 (ib->gpu_addr & 0xFFFFFFFC));
1196 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); 1200 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
1197 radeon_ring_write(rdev, ib->length_dw); 1201 radeon_ring_write(rdev, ib->length_dw);
1198} 1202}
@@ -1207,7 +1211,11 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1207 return -EINVAL; 1211 return -EINVAL;
1208 1212
1209 r700_cp_stop(rdev); 1213 r700_cp_stop(rdev);
1210 WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); 1214 WREG32(CP_RB_CNTL,
1215#ifdef __BIG_ENDIAN
1216 BUF_SWAP_32BIT |
1217#endif
1218 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1211 1219
1212 fw_data = (const __be32 *)rdev->pfp_fw->data; 1220 fw_data = (const __be32 *)rdev->pfp_fw->data;
1213 WREG32(CP_PFP_UCODE_ADDR, 0); 1221 WREG32(CP_PFP_UCODE_ADDR, 0);
@@ -1326,7 +1334,11 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1326 WREG32(CP_RB_WPTR, 0); 1334 WREG32(CP_RB_WPTR, 0);
1327 1335
1328 /* set the wb address wether it's enabled or not */ 1336 /* set the wb address wether it's enabled or not */
1329 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 1337 WREG32(CP_RB_RPTR_ADDR,
1338#ifdef __BIG_ENDIAN
1339 RB_RPTR_SWAP(2) |
1340#endif
1341 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
1330 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 1342 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1331 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1343 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1332 1344
@@ -2182,7 +2194,6 @@ int evergreen_mc_init(struct radeon_device *rdev)
2182 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 2194 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2183 } 2195 }
2184 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2196 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2185 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
2186 r700_vram_gtt_location(rdev, &rdev->mc); 2197 r700_vram_gtt_location(rdev, &rdev->mc);
2187 radeon_update_bandwidth_info(rdev); 2198 radeon_update_bandwidth_info(rdev);
2188 2199
@@ -2627,8 +2638,8 @@ restart_ih:
2627 while (rptr != wptr) { 2638 while (rptr != wptr) {
2628 /* wptr/rptr are in bytes! */ 2639 /* wptr/rptr are in bytes! */
2629 ring_index = rptr / 4; 2640 ring_index = rptr / 4;
2630 src_id = rdev->ih.ring[ring_index] & 0xff; 2641 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
2631 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; 2642 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
2632 2643
2633 switch (src_id) { 2644 switch (src_id) {
2634 case 1: /* D1 vblank/vline */ 2645 case 1: /* D1 vblank/vline */
@@ -2922,7 +2933,7 @@ static int evergreen_startup(struct radeon_device *rdev)
2922 /* XXX: ontario has problems blitting to gart at the moment */ 2933 /* XXX: ontario has problems blitting to gart at the moment */
2923 if (rdev->family == CHIP_PALM) { 2934 if (rdev->family == CHIP_PALM) {
2924 rdev->asic->copy = NULL; 2935 rdev->asic->copy = NULL;
2925 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 2936 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2926 } 2937 }
2927 2938
2928 /* allocate wb buffer */ 2939 /* allocate wb buffer */
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index a1ba4b3053d0..2be698e78ff2 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -55,7 +55,7 @@ set_render_target(struct radeon_device *rdev, int format,
55 if (h < 8) 55 if (h < 8)
56 h = 8; 56 h = 8;
57 57
58 cb_color_info = ((format << 2) | (1 << 24)); 58 cb_color_info = ((format << 2) | (1 << 24) | (1 << 8));
59 pitch = (w / 8) - 1; 59 pitch = (w / 8) - 1;
60 slice = ((w * h) / 64) - 1; 60 slice = ((w * h) / 64) - 1;
61 61
@@ -133,6 +133,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
133 133
134 /* high addr, stride */ 134 /* high addr, stride */
135 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); 135 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
136#ifdef __BIG_ENDIAN
137 sq_vtx_constant_word2 |= (2 << 30);
138#endif
136 /* xyzw swizzles */ 139 /* xyzw swizzles */
137 sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12); 140 sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
138 141
@@ -173,7 +176,7 @@ set_tex_resource(struct radeon_device *rdev,
173 sq_tex_resource_word0 = (1 << 0); /* 2D */ 176 sq_tex_resource_word0 = (1 << 0); /* 2D */
174 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) | 177 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
175 ((w - 1) << 18)); 178 ((w - 1) << 18));
176 sq_tex_resource_word1 = ((h - 1) << 0); 179 sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28);
177 /* xyzw swizzles */ 180 /* xyzw swizzles */
178 sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25); 181 sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
179 182
@@ -221,7 +224,11 @@ draw_auto(struct radeon_device *rdev)
221 radeon_ring_write(rdev, DI_PT_RECTLIST); 224 radeon_ring_write(rdev, DI_PT_RECTLIST);
222 225
223 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); 226 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
224 radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); 227 radeon_ring_write(rdev,
228#ifdef __BIG_ENDIAN
229 (2 << 2) |
230#endif
231 DI_INDEX_SIZE_16_BIT);
225 232
226 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); 233 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
227 radeon_ring_write(rdev, 1); 234 radeon_ring_write(rdev, 1);
@@ -541,7 +548,7 @@ static inline uint32_t i2f(uint32_t input)
541int evergreen_blit_init(struct radeon_device *rdev) 548int evergreen_blit_init(struct radeon_device *rdev)
542{ 549{
543 u32 obj_size; 550 u32 obj_size;
544 int r, dwords; 551 int i, r, dwords;
545 void *ptr; 552 void *ptr;
546 u32 packet2s[16]; 553 u32 packet2s[16];
547 int num_packet2s = 0; 554 int num_packet2s = 0;
@@ -557,7 +564,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
557 564
558 dwords = rdev->r600_blit.state_len; 565 dwords = rdev->r600_blit.state_len;
559 while (dwords & 0xf) { 566 while (dwords & 0xf) {
560 packet2s[num_packet2s++] = PACKET2(0); 567 packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
561 dwords++; 568 dwords++;
562 } 569 }
563 570
@@ -598,8 +605,10 @@ int evergreen_blit_init(struct radeon_device *rdev)
598 if (num_packet2s) 605 if (num_packet2s)
599 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), 606 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
600 packet2s, num_packet2s * 4); 607 packet2s, num_packet2s * 4);
601 memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4); 608 for (i = 0; i < evergreen_vs_size; i++)
602 memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4); 609 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
610 for (i = 0; i < evergreen_ps_size; i++)
611 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
603 radeon_bo_kunmap(rdev->r600_blit.shader_obj); 612 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
604 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 613 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
605 614
@@ -614,7 +623,7 @@ done:
614 dev_err(rdev->dev, "(%d) pin blit object failed\n", r); 623 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
615 return r; 624 return r;
616 } 625 }
617 rdev->mc.active_vram_size = rdev->mc.real_vram_size; 626 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
618 return 0; 627 return 0;
619} 628}
620 629
@@ -622,7 +631,7 @@ void evergreen_blit_fini(struct radeon_device *rdev)
622{ 631{
623 int r; 632 int r;
624 633
625 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 634 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
626 if (rdev->r600_blit.shader_obj == NULL) 635 if (rdev->r600_blit.shader_obj == NULL)
627 return; 636 return;
628 /* If we can't reserve the bo, unref should be enough to destroy 637 /* If we can't reserve the bo, unref should be enough to destroy
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
index ef1d28c07fbf..3a10399e0066 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -311,11 +311,19 @@ const u32 evergreen_vs[] =
311 0x00000000, 311 0x00000000,
312 0x3c000000, 312 0x3c000000,
313 0x67961001, 313 0x67961001,
314#ifdef __BIG_ENDIAN
315 0x000a0000,
316#else
314 0x00080000, 317 0x00080000,
318#endif
315 0x00000000, 319 0x00000000,
316 0x1c000000, 320 0x1c000000,
317 0x67961000, 321 0x67961000,
322#ifdef __BIG_ENDIAN
323 0x00020008,
324#else
318 0x00000008, 325 0x00000008,
326#endif
319 0x00000000, 327 0x00000000,
320}; 328};
321 329
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index afec1aca2a73..eb4acf4528ff 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -98,6 +98,7 @@
98#define BUF_SWAP_32BIT (2 << 16) 98#define BUF_SWAP_32BIT (2 << 16)
99#define CP_RB_RPTR 0x8700 99#define CP_RB_RPTR 0x8700
100#define CP_RB_RPTR_ADDR 0xC10C 100#define CP_RB_RPTR_ADDR 0xC10C
101#define RB_RPTR_SWAP(x) ((x) << 0)
101#define CP_RB_RPTR_ADDR_HI 0xC110 102#define CP_RB_RPTR_ADDR_HI 0xC110
102#define CP_RB_RPTR_WR 0xC108 103#define CP_RB_RPTR_WR 0xC108
103#define CP_RB_WPTR 0xC114 104#define CP_RB_WPTR 0xC114
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index 607241c6a8a9..5a82b6b75849 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -673,8 +673,10 @@ static int parser_auth(struct table *t, const char *filename)
673 last_reg = strtol(last_reg_s, NULL, 16); 673 last_reg = strtol(last_reg_s, NULL, 16);
674 674
675 do { 675 do {
676 if (fgets(buf, 1024, file) == NULL) 676 if (fgets(buf, 1024, file) == NULL) {
677 fclose(file);
677 return -1; 678 return -1;
679 }
678 len = strlen(buf); 680 len = strlen(buf);
679 if (ftell(file) == end) 681 if (ftell(file) == end)
680 done = 1; 682 done = 1;
@@ -685,6 +687,7 @@ static int parser_auth(struct table *t, const char *filename)
685 fprintf(stderr, 687 fprintf(stderr,
686 "Error matching regular expression %d in %s\n", 688 "Error matching regular expression %d in %s\n",
687 r, filename); 689 r, filename);
690 fclose(file);
688 return -1; 691 return -1;
689 } else { 692 } else {
690 buf[match[0].rm_eo] = 0; 693 buf[match[0].rm_eo] = 0;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 5f15820efe12..e372f9e1e5ce 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -70,23 +70,6 @@ MODULE_FIRMWARE(FIRMWARE_R520);
70 70
71void r100_pre_page_flip(struct radeon_device *rdev, int crtc) 71void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
72{ 72{
73 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
74 u32 tmp;
75
76 /* make sure flip is at vb rather than hb */
77 tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset);
78 tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL;
79 /* make sure pending bit is asserted */
80 tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
81 WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp);
82
83 /* set pageflip to happen as late as possible in the vblank interval.
84 * same field for crtc1/2
85 */
86 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
87 tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK;
88 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
89
90 /* enable the pflip int */ 73 /* enable the pflip int */
91 radeon_irq_kms_pflip_irq_get(rdev, crtc); 74 radeon_irq_kms_pflip_irq_get(rdev, crtc);
92} 75}
@@ -1041,7 +1024,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1041 return r; 1024 return r;
1042 } 1025 }
1043 rdev->cp.ready = true; 1026 rdev->cp.ready = true;
1044 rdev->mc.active_vram_size = rdev->mc.real_vram_size; 1027 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1045 return 0; 1028 return 0;
1046} 1029}
1047 1030
@@ -1059,7 +1042,7 @@ void r100_cp_fini(struct radeon_device *rdev)
1059void r100_cp_disable(struct radeon_device *rdev) 1042void r100_cp_disable(struct radeon_device *rdev)
1060{ 1043{
1061 /* Disable ring */ 1044 /* Disable ring */
1062 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 1045 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1063 rdev->cp.ready = false; 1046 rdev->cp.ready = false;
1064 WREG32(RADEON_CP_CSQ_MODE, 0); 1047 WREG32(RADEON_CP_CSQ_MODE, 0);
1065 WREG32(RADEON_CP_CSQ_CNTL, 0); 1048 WREG32(RADEON_CP_CSQ_CNTL, 0);
@@ -1427,6 +1410,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1427 } 1410 }
1428 track->zb.robj = reloc->robj; 1411 track->zb.robj = reloc->robj;
1429 track->zb.offset = idx_value; 1412 track->zb.offset = idx_value;
1413 track->zb_dirty = true;
1430 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1414 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1431 break; 1415 break;
1432 case RADEON_RB3D_COLOROFFSET: 1416 case RADEON_RB3D_COLOROFFSET:
@@ -1439,6 +1423,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1439 } 1423 }
1440 track->cb[0].robj = reloc->robj; 1424 track->cb[0].robj = reloc->robj;
1441 track->cb[0].offset = idx_value; 1425 track->cb[0].offset = idx_value;
1426 track->cb_dirty = true;
1442 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1427 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1443 break; 1428 break;
1444 case RADEON_PP_TXOFFSET_0: 1429 case RADEON_PP_TXOFFSET_0:
@@ -1454,6 +1439,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1454 } 1439 }
1455 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1440 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1456 track->textures[i].robj = reloc->robj; 1441 track->textures[i].robj = reloc->robj;
1442 track->tex_dirty = true;
1457 break; 1443 break;
1458 case RADEON_PP_CUBIC_OFFSET_T0_0: 1444 case RADEON_PP_CUBIC_OFFSET_T0_0:
1459 case RADEON_PP_CUBIC_OFFSET_T0_1: 1445 case RADEON_PP_CUBIC_OFFSET_T0_1:
@@ -1471,6 +1457,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1471 track->textures[0].cube_info[i].offset = idx_value; 1457 track->textures[0].cube_info[i].offset = idx_value;
1472 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1458 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1473 track->textures[0].cube_info[i].robj = reloc->robj; 1459 track->textures[0].cube_info[i].robj = reloc->robj;
1460 track->tex_dirty = true;
1474 break; 1461 break;
1475 case RADEON_PP_CUBIC_OFFSET_T1_0: 1462 case RADEON_PP_CUBIC_OFFSET_T1_0:
1476 case RADEON_PP_CUBIC_OFFSET_T1_1: 1463 case RADEON_PP_CUBIC_OFFSET_T1_1:
@@ -1488,6 +1475,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1488 track->textures[1].cube_info[i].offset = idx_value; 1475 track->textures[1].cube_info[i].offset = idx_value;
1489 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1476 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1490 track->textures[1].cube_info[i].robj = reloc->robj; 1477 track->textures[1].cube_info[i].robj = reloc->robj;
1478 track->tex_dirty = true;
1491 break; 1479 break;
1492 case RADEON_PP_CUBIC_OFFSET_T2_0: 1480 case RADEON_PP_CUBIC_OFFSET_T2_0:
1493 case RADEON_PP_CUBIC_OFFSET_T2_1: 1481 case RADEON_PP_CUBIC_OFFSET_T2_1:
@@ -1505,9 +1493,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1505 track->textures[2].cube_info[i].offset = idx_value; 1493 track->textures[2].cube_info[i].offset = idx_value;
1506 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1494 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1507 track->textures[2].cube_info[i].robj = reloc->robj; 1495 track->textures[2].cube_info[i].robj = reloc->robj;
1496 track->tex_dirty = true;
1508 break; 1497 break;
1509 case RADEON_RE_WIDTH_HEIGHT: 1498 case RADEON_RE_WIDTH_HEIGHT:
1510 track->maxy = ((idx_value >> 16) & 0x7FF); 1499 track->maxy = ((idx_value >> 16) & 0x7FF);
1500 track->cb_dirty = true;
1501 track->zb_dirty = true;
1511 break; 1502 break;
1512 case RADEON_RB3D_COLORPITCH: 1503 case RADEON_RB3D_COLORPITCH:
1513 r = r100_cs_packet_next_reloc(p, &reloc); 1504 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1528,9 +1519,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1528 ib[idx] = tmp; 1519 ib[idx] = tmp;
1529 1520
1530 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; 1521 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1522 track->cb_dirty = true;
1531 break; 1523 break;
1532 case RADEON_RB3D_DEPTHPITCH: 1524 case RADEON_RB3D_DEPTHPITCH:
1533 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; 1525 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1526 track->zb_dirty = true;
1534 break; 1527 break;
1535 case RADEON_RB3D_CNTL: 1528 case RADEON_RB3D_CNTL:
1536 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 1529 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -1555,6 +1548,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1555 return -EINVAL; 1548 return -EINVAL;
1556 } 1549 }
1557 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); 1550 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1551 track->cb_dirty = true;
1552 track->zb_dirty = true;
1558 break; 1553 break;
1559 case RADEON_RB3D_ZSTENCILCNTL: 1554 case RADEON_RB3D_ZSTENCILCNTL:
1560 switch (idx_value & 0xf) { 1555 switch (idx_value & 0xf) {
@@ -1572,6 +1567,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1572 default: 1567 default:
1573 break; 1568 break;
1574 } 1569 }
1570 track->zb_dirty = true;
1575 break; 1571 break;
1576 case RADEON_RB3D_ZPASS_ADDR: 1572 case RADEON_RB3D_ZPASS_ADDR:
1577 r = r100_cs_packet_next_reloc(p, &reloc); 1573 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1588,6 +1584,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1588 uint32_t temp = idx_value >> 4; 1584 uint32_t temp = idx_value >> 4;
1589 for (i = 0; i < track->num_texture; i++) 1585 for (i = 0; i < track->num_texture; i++)
1590 track->textures[i].enabled = !!(temp & (1 << i)); 1586 track->textures[i].enabled = !!(temp & (1 << i));
1587 track->tex_dirty = true;
1591 } 1588 }
1592 break; 1589 break;
1593 case RADEON_SE_VF_CNTL: 1590 case RADEON_SE_VF_CNTL:
@@ -1602,12 +1599,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1602 i = (reg - RADEON_PP_TEX_SIZE_0) / 8; 1599 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1603 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; 1600 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1604 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 1601 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1602 track->tex_dirty = true;
1605 break; 1603 break;
1606 case RADEON_PP_TEX_PITCH_0: 1604 case RADEON_PP_TEX_PITCH_0:
1607 case RADEON_PP_TEX_PITCH_1: 1605 case RADEON_PP_TEX_PITCH_1:
1608 case RADEON_PP_TEX_PITCH_2: 1606 case RADEON_PP_TEX_PITCH_2:
1609 i = (reg - RADEON_PP_TEX_PITCH_0) / 8; 1607 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1610 track->textures[i].pitch = idx_value + 32; 1608 track->textures[i].pitch = idx_value + 32;
1609 track->tex_dirty = true;
1611 break; 1610 break;
1612 case RADEON_PP_TXFILTER_0: 1611 case RADEON_PP_TXFILTER_0:
1613 case RADEON_PP_TXFILTER_1: 1612 case RADEON_PP_TXFILTER_1:
@@ -1621,6 +1620,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1621 tmp = (idx_value >> 27) & 0x7; 1620 tmp = (idx_value >> 27) & 0x7;
1622 if (tmp == 2 || tmp == 6) 1621 if (tmp == 2 || tmp == 6)
1623 track->textures[i].roundup_h = false; 1622 track->textures[i].roundup_h = false;
1623 track->tex_dirty = true;
1624 break; 1624 break;
1625 case RADEON_PP_TXFORMAT_0: 1625 case RADEON_PP_TXFORMAT_0:
1626 case RADEON_PP_TXFORMAT_1: 1626 case RADEON_PP_TXFORMAT_1:
@@ -1673,6 +1673,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1673 } 1673 }
1674 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 1674 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1675 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 1675 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1676 track->tex_dirty = true;
1676 break; 1677 break;
1677 case RADEON_PP_CUBIC_FACES_0: 1678 case RADEON_PP_CUBIC_FACES_0:
1678 case RADEON_PP_CUBIC_FACES_1: 1679 case RADEON_PP_CUBIC_FACES_1:
@@ -1683,6 +1684,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1683 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 1684 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1684 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); 1685 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
1685 } 1686 }
1687 track->tex_dirty = true;
1686 break; 1688 break;
1687 default: 1689 default:
1688 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 1690 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
@@ -2310,7 +2312,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
2310 /* FIXME we don't use the second aperture yet when we could use it */ 2312 /* FIXME we don't use the second aperture yet when we could use it */
2311 if (rdev->mc.visible_vram_size > rdev->mc.aper_size) 2313 if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
2312 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2314 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2313 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
2314 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2315 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2315 if (rdev->flags & RADEON_IS_IGP) { 2316 if (rdev->flags & RADEON_IS_IGP) {
2316 uint32_t tom; 2317 uint32_t tom;
@@ -3318,9 +3319,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3318 unsigned long size; 3319 unsigned long size;
3319 unsigned prim_walk; 3320 unsigned prim_walk;
3320 unsigned nverts; 3321 unsigned nverts;
3321 unsigned num_cb = track->num_cb; 3322 unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
3322 3323
3323 if (!track->zb_cb_clear && !track->color_channel_mask && 3324 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
3324 !track->blend_read_enable) 3325 !track->blend_read_enable)
3325 num_cb = 0; 3326 num_cb = 0;
3326 3327
@@ -3341,7 +3342,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3341 return -EINVAL; 3342 return -EINVAL;
3342 } 3343 }
3343 } 3344 }
3344 if (track->z_enabled) { 3345 track->cb_dirty = false;
3346
3347 if (track->zb_dirty && track->z_enabled) {
3345 if (track->zb.robj == NULL) { 3348 if (track->zb.robj == NULL) {
3346 DRM_ERROR("[drm] No buffer for z buffer !\n"); 3349 DRM_ERROR("[drm] No buffer for z buffer !\n");
3347 return -EINVAL; 3350 return -EINVAL;
@@ -3358,6 +3361,28 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3358 return -EINVAL; 3361 return -EINVAL;
3359 } 3362 }
3360 } 3363 }
3364 track->zb_dirty = false;
3365
3366 if (track->aa_dirty && track->aaresolve) {
3367 if (track->aa.robj == NULL) {
3368 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
3369 return -EINVAL;
3370 }
3371 /* I believe the format comes from colorbuffer0. */
3372 size = track->aa.pitch * track->cb[0].cpp * track->maxy;
3373 size += track->aa.offset;
3374 if (size > radeon_bo_size(track->aa.robj)) {
3375 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
3376 "(need %lu have %lu) !\n", i, size,
3377 radeon_bo_size(track->aa.robj));
3378 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
3379 i, track->aa.pitch, track->cb[0].cpp,
3380 track->aa.offset, track->maxy);
3381 return -EINVAL;
3382 }
3383 }
3384 track->aa_dirty = false;
3385
3361 prim_walk = (track->vap_vf_cntl >> 4) & 0x3; 3386 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
3362 if (track->vap_vf_cntl & (1 << 14)) { 3387 if (track->vap_vf_cntl & (1 << 14)) {
3363 nverts = track->vap_alt_nverts; 3388 nverts = track->vap_alt_nverts;
@@ -3417,13 +3442,23 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3417 prim_walk); 3442 prim_walk);
3418 return -EINVAL; 3443 return -EINVAL;
3419 } 3444 }
3420 return r100_cs_track_texture_check(rdev, track); 3445
3446 if (track->tex_dirty) {
3447 track->tex_dirty = false;
3448 return r100_cs_track_texture_check(rdev, track);
3449 }
3450 return 0;
3421} 3451}
3422 3452
3423void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) 3453void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
3424{ 3454{
3425 unsigned i, face; 3455 unsigned i, face;
3426 3456
3457 track->cb_dirty = true;
3458 track->zb_dirty = true;
3459 track->tex_dirty = true;
3460 track->aa_dirty = true;
3461
3427 if (rdev->family < CHIP_R300) { 3462 if (rdev->family < CHIP_R300) {
3428 track->num_cb = 1; 3463 track->num_cb = 1;
3429 if (rdev->family <= CHIP_RS200) 3464 if (rdev->family <= CHIP_RS200)
@@ -3437,6 +3472,8 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
3437 track->num_texture = 16; 3472 track->num_texture = 16;
3438 track->maxy = 4096; 3473 track->maxy = 4096;
3439 track->separate_cube = 0; 3474 track->separate_cube = 0;
3475 track->aaresolve = false;
3476 track->aa.robj = NULL;
3440 } 3477 }
3441 3478
3442 for (i = 0; i < track->num_cb; i++) { 3479 for (i = 0; i < track->num_cb; i++) {
@@ -3746,8 +3783,6 @@ static int r100_startup(struct radeon_device *rdev)
3746 r100_mc_program(rdev); 3783 r100_mc_program(rdev);
3747 /* Resume clock */ 3784 /* Resume clock */
3748 r100_clock_startup(rdev); 3785 r100_clock_startup(rdev);
3749 /* Initialize GPU configuration (# pipes, ...) */
3750// r100_gpu_init(rdev);
3751 /* Initialize GART (initialize after TTM so we can allocate 3786 /* Initialize GART (initialize after TTM so we can allocate
3752 * memory through TTM but finalize after TTM) */ 3787 * memory through TTM but finalize after TTM) */
3753 r100_enable_bm(rdev); 3788 r100_enable_bm(rdev);
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index af65600e6564..2fef9de7f363 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -52,14 +52,7 @@ struct r100_cs_track_texture {
52 unsigned compress_format; 52 unsigned compress_format;
53}; 53};
54 54
55struct r100_cs_track_limits {
56 unsigned num_cb;
57 unsigned num_texture;
58 unsigned max_levels;
59};
60
61struct r100_cs_track { 55struct r100_cs_track {
62 struct radeon_device *rdev;
63 unsigned num_cb; 56 unsigned num_cb;
64 unsigned num_texture; 57 unsigned num_texture;
65 unsigned maxy; 58 unsigned maxy;
@@ -73,11 +66,17 @@ struct r100_cs_track {
73 struct r100_cs_track_array arrays[11]; 66 struct r100_cs_track_array arrays[11];
74 struct r100_cs_track_cb cb[R300_MAX_CB]; 67 struct r100_cs_track_cb cb[R300_MAX_CB];
75 struct r100_cs_track_cb zb; 68 struct r100_cs_track_cb zb;
69 struct r100_cs_track_cb aa;
76 struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE]; 70 struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE];
77 bool z_enabled; 71 bool z_enabled;
78 bool separate_cube; 72 bool separate_cube;
79 bool zb_cb_clear; 73 bool zb_cb_clear;
80 bool blend_read_enable; 74 bool blend_read_enable;
75 bool cb_dirty;
76 bool zb_dirty;
77 bool tex_dirty;
78 bool aa_dirty;
79 bool aaresolve;
81}; 80};
82 81
83int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track); 82int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index d2408c395619..f24058300413 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -184,6 +184,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
184 } 184 }
185 track->zb.robj = reloc->robj; 185 track->zb.robj = reloc->robj;
186 track->zb.offset = idx_value; 186 track->zb.offset = idx_value;
187 track->zb_dirty = true;
187 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 188 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
188 break; 189 break;
189 case RADEON_RB3D_COLOROFFSET: 190 case RADEON_RB3D_COLOROFFSET:
@@ -196,6 +197,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
196 } 197 }
197 track->cb[0].robj = reloc->robj; 198 track->cb[0].robj = reloc->robj;
198 track->cb[0].offset = idx_value; 199 track->cb[0].offset = idx_value;
200 track->cb_dirty = true;
199 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 201 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
200 break; 202 break;
201 case R200_PP_TXOFFSET_0: 203 case R200_PP_TXOFFSET_0:
@@ -214,6 +216,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
214 } 216 }
215 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 217 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
216 track->textures[i].robj = reloc->robj; 218 track->textures[i].robj = reloc->robj;
219 track->tex_dirty = true;
217 break; 220 break;
218 case R200_PP_CUBIC_OFFSET_F1_0: 221 case R200_PP_CUBIC_OFFSET_F1_0:
219 case R200_PP_CUBIC_OFFSET_F2_0: 222 case R200_PP_CUBIC_OFFSET_F2_0:
@@ -257,9 +260,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
257 track->textures[i].cube_info[face - 1].offset = idx_value; 260 track->textures[i].cube_info[face - 1].offset = idx_value;
258 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 261 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
259 track->textures[i].cube_info[face - 1].robj = reloc->robj; 262 track->textures[i].cube_info[face - 1].robj = reloc->robj;
263 track->tex_dirty = true;
260 break; 264 break;
261 case RADEON_RE_WIDTH_HEIGHT: 265 case RADEON_RE_WIDTH_HEIGHT:
262 track->maxy = ((idx_value >> 16) & 0x7FF); 266 track->maxy = ((idx_value >> 16) & 0x7FF);
267 track->cb_dirty = true;
268 track->zb_dirty = true;
263 break; 269 break;
264 case RADEON_RB3D_COLORPITCH: 270 case RADEON_RB3D_COLORPITCH:
265 r = r100_cs_packet_next_reloc(p, &reloc); 271 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -280,9 +286,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
280 ib[idx] = tmp; 286 ib[idx] = tmp;
281 287
282 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; 288 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
289 track->cb_dirty = true;
283 break; 290 break;
284 case RADEON_RB3D_DEPTHPITCH: 291 case RADEON_RB3D_DEPTHPITCH:
285 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; 292 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
293 track->zb_dirty = true;
286 break; 294 break;
287 case RADEON_RB3D_CNTL: 295 case RADEON_RB3D_CNTL:
288 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 296 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -312,6 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
312 } 320 }
313 321
314 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); 322 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
323 track->cb_dirty = true;
324 track->zb_dirty = true;
315 break; 325 break;
316 case RADEON_RB3D_ZSTENCILCNTL: 326 case RADEON_RB3D_ZSTENCILCNTL:
317 switch (idx_value & 0xf) { 327 switch (idx_value & 0xf) {
@@ -329,6 +339,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
329 default: 339 default:
330 break; 340 break;
331 } 341 }
342 track->zb_dirty = true;
332 break; 343 break;
333 case RADEON_RB3D_ZPASS_ADDR: 344 case RADEON_RB3D_ZPASS_ADDR:
334 r = r100_cs_packet_next_reloc(p, &reloc); 345 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -345,6 +356,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
345 uint32_t temp = idx_value >> 4; 356 uint32_t temp = idx_value >> 4;
346 for (i = 0; i < track->num_texture; i++) 357 for (i = 0; i < track->num_texture; i++)
347 track->textures[i].enabled = !!(temp & (1 << i)); 358 track->textures[i].enabled = !!(temp & (1 << i));
359 track->tex_dirty = true;
348 } 360 }
349 break; 361 break;
350 case RADEON_SE_VF_CNTL: 362 case RADEON_SE_VF_CNTL:
@@ -369,6 +381,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
369 i = (reg - R200_PP_TXSIZE_0) / 32; 381 i = (reg - R200_PP_TXSIZE_0) / 32;
370 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; 382 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
371 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 383 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
384 track->tex_dirty = true;
372 break; 385 break;
373 case R200_PP_TXPITCH_0: 386 case R200_PP_TXPITCH_0:
374 case R200_PP_TXPITCH_1: 387 case R200_PP_TXPITCH_1:
@@ -378,6 +391,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
378 case R200_PP_TXPITCH_5: 391 case R200_PP_TXPITCH_5:
379 i = (reg - R200_PP_TXPITCH_0) / 32; 392 i = (reg - R200_PP_TXPITCH_0) / 32;
380 track->textures[i].pitch = idx_value + 32; 393 track->textures[i].pitch = idx_value + 32;
394 track->tex_dirty = true;
381 break; 395 break;
382 case R200_PP_TXFILTER_0: 396 case R200_PP_TXFILTER_0:
383 case R200_PP_TXFILTER_1: 397 case R200_PP_TXFILTER_1:
@@ -394,6 +408,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
394 tmp = (idx_value >> 27) & 0x7; 408 tmp = (idx_value >> 27) & 0x7;
395 if (tmp == 2 || tmp == 6) 409 if (tmp == 2 || tmp == 6)
396 track->textures[i].roundup_h = false; 410 track->textures[i].roundup_h = false;
411 track->tex_dirty = true;
397 break; 412 break;
398 case R200_PP_TXMULTI_CTL_0: 413 case R200_PP_TXMULTI_CTL_0:
399 case R200_PP_TXMULTI_CTL_1: 414 case R200_PP_TXMULTI_CTL_1:
@@ -432,6 +447,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
432 track->textures[i].tex_coord_type = 1; 447 track->textures[i].tex_coord_type = 1;
433 break; 448 break;
434 } 449 }
450 track->tex_dirty = true;
435 break; 451 break;
436 case R200_PP_TXFORMAT_0: 452 case R200_PP_TXFORMAT_0:
437 case R200_PP_TXFORMAT_1: 453 case R200_PP_TXFORMAT_1:
@@ -488,6 +504,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
488 } 504 }
489 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 505 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
490 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 506 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
507 track->tex_dirty = true;
491 break; 508 break;
492 case R200_PP_CUBIC_FACES_0: 509 case R200_PP_CUBIC_FACES_0:
493 case R200_PP_CUBIC_FACES_1: 510 case R200_PP_CUBIC_FACES_1:
@@ -501,6 +518,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
501 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 518 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
502 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); 519 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
503 } 520 }
521 track->tex_dirty = true;
504 break; 522 break;
505 default: 523 default:
506 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 524 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 55fe5ba7def3..069efa8c8ecf 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -667,6 +667,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
667 } 667 }
668 track->cb[i].robj = reloc->robj; 668 track->cb[i].robj = reloc->robj;
669 track->cb[i].offset = idx_value; 669 track->cb[i].offset = idx_value;
670 track->cb_dirty = true;
670 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 671 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
671 break; 672 break;
672 case R300_ZB_DEPTHOFFSET: 673 case R300_ZB_DEPTHOFFSET:
@@ -679,6 +680,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
679 } 680 }
680 track->zb.robj = reloc->robj; 681 track->zb.robj = reloc->robj;
681 track->zb.offset = idx_value; 682 track->zb.offset = idx_value;
683 track->zb_dirty = true;
682 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 684 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
683 break; 685 break;
684 case R300_TX_OFFSET_0: 686 case R300_TX_OFFSET_0:
@@ -717,6 +719,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
717 tmp |= tile_flags; 719 tmp |= tile_flags;
718 ib[idx] = tmp; 720 ib[idx] = tmp;
719 track->textures[i].robj = reloc->robj; 721 track->textures[i].robj = reloc->robj;
722 track->tex_dirty = true;
720 break; 723 break;
721 /* Tracked registers */ 724 /* Tracked registers */
722 case 0x2084: 725 case 0x2084:
@@ -743,6 +746,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
743 if (p->rdev->family < CHIP_RV515) { 746 if (p->rdev->family < CHIP_RV515) {
744 track->maxy -= 1440; 747 track->maxy -= 1440;
745 } 748 }
749 track->cb_dirty = true;
750 track->zb_dirty = true;
746 break; 751 break;
747 case 0x4E00: 752 case 0x4E00:
748 /* RB3D_CCTL */ 753 /* RB3D_CCTL */
@@ -752,6 +757,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
752 return -EINVAL; 757 return -EINVAL;
753 } 758 }
754 track->num_cb = ((idx_value >> 5) & 0x3) + 1; 759 track->num_cb = ((idx_value >> 5) & 0x3) + 1;
760 track->cb_dirty = true;
755 break; 761 break;
756 case 0x4E38: 762 case 0x4E38:
757 case 0x4E3C: 763 case 0x4E3C:
@@ -814,6 +820,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
814 ((idx_value >> 21) & 0xF)); 820 ((idx_value >> 21) & 0xF));
815 return -EINVAL; 821 return -EINVAL;
816 } 822 }
823 track->cb_dirty = true;
817 break; 824 break;
818 case 0x4F00: 825 case 0x4F00:
819 /* ZB_CNTL */ 826 /* ZB_CNTL */
@@ -822,6 +829,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
822 } else { 829 } else {
823 track->z_enabled = false; 830 track->z_enabled = false;
824 } 831 }
832 track->zb_dirty = true;
825 break; 833 break;
826 case 0x4F10: 834 case 0x4F10:
827 /* ZB_FORMAT */ 835 /* ZB_FORMAT */
@@ -838,6 +846,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
838 (idx_value & 0xF)); 846 (idx_value & 0xF));
839 return -EINVAL; 847 return -EINVAL;
840 } 848 }
849 track->zb_dirty = true;
841 break; 850 break;
842 case 0x4F24: 851 case 0x4F24:
843 /* ZB_DEPTHPITCH */ 852 /* ZB_DEPTHPITCH */
@@ -861,14 +870,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
861 ib[idx] = tmp; 870 ib[idx] = tmp;
862 871
863 track->zb.pitch = idx_value & 0x3FFC; 872 track->zb.pitch = idx_value & 0x3FFC;
873 track->zb_dirty = true;
864 break; 874 break;
865 case 0x4104: 875 case 0x4104:
876 /* TX_ENABLE */
866 for (i = 0; i < 16; i++) { 877 for (i = 0; i < 16; i++) {
867 bool enabled; 878 bool enabled;
868 879
869 enabled = !!(idx_value & (1 << i)); 880 enabled = !!(idx_value & (1 << i));
870 track->textures[i].enabled = enabled; 881 track->textures[i].enabled = enabled;
871 } 882 }
883 track->tex_dirty = true;
872 break; 884 break;
873 case 0x44C0: 885 case 0x44C0:
874 case 0x44C4: 886 case 0x44C4:
@@ -898,6 +910,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
898 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 910 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
899 break; 911 break;
900 case R300_TX_FORMAT_X16: 912 case R300_TX_FORMAT_X16:
913 case R300_TX_FORMAT_FL_I16:
901 case R300_TX_FORMAT_Y8X8: 914 case R300_TX_FORMAT_Y8X8:
902 case R300_TX_FORMAT_Z5Y6X5: 915 case R300_TX_FORMAT_Z5Y6X5:
903 case R300_TX_FORMAT_Z6Y5X5: 916 case R300_TX_FORMAT_Z6Y5X5:
@@ -910,6 +923,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
910 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 923 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
911 break; 924 break;
912 case R300_TX_FORMAT_Y16X16: 925 case R300_TX_FORMAT_Y16X16:
926 case R300_TX_FORMAT_FL_I16A16:
913 case R300_TX_FORMAT_Z11Y11X10: 927 case R300_TX_FORMAT_Z11Y11X10:
914 case R300_TX_FORMAT_Z10Y11X11: 928 case R300_TX_FORMAT_Z10Y11X11:
915 case R300_TX_FORMAT_W8Z8Y8X8: 929 case R300_TX_FORMAT_W8Z8Y8X8:
@@ -951,8 +965,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
951 DRM_ERROR("Invalid texture format %u\n", 965 DRM_ERROR("Invalid texture format %u\n",
952 (idx_value & 0x1F)); 966 (idx_value & 0x1F));
953 return -EINVAL; 967 return -EINVAL;
954 break;
955 } 968 }
969 track->tex_dirty = true;
956 break; 970 break;
957 case 0x4400: 971 case 0x4400:
958 case 0x4404: 972 case 0x4404:
@@ -980,6 +994,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
980 if (tmp == 2 || tmp == 4 || tmp == 6) { 994 if (tmp == 2 || tmp == 4 || tmp == 6) {
981 track->textures[i].roundup_h = false; 995 track->textures[i].roundup_h = false;
982 } 996 }
997 track->tex_dirty = true;
983 break; 998 break;
984 case 0x4500: 999 case 0x4500:
985 case 0x4504: 1000 case 0x4504:
@@ -1017,6 +1032,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1017 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); 1032 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
1018 return -EINVAL; 1033 return -EINVAL;
1019 } 1034 }
1035 track->tex_dirty = true;
1020 break; 1036 break;
1021 case 0x4480: 1037 case 0x4480:
1022 case 0x4484: 1038 case 0x4484:
@@ -1046,6 +1062,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1046 track->textures[i].use_pitch = !!tmp; 1062 track->textures[i].use_pitch = !!tmp;
1047 tmp = (idx_value >> 22) & 0xF; 1063 tmp = (idx_value >> 22) & 0xF;
1048 track->textures[i].txdepth = tmp; 1064 track->textures[i].txdepth = tmp;
1065 track->tex_dirty = true;
1049 break; 1066 break;
1050 case R300_ZB_ZPASS_ADDR: 1067 case R300_ZB_ZPASS_ADDR:
1051 r = r100_cs_packet_next_reloc(p, &reloc); 1068 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1060,6 +1077,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1060 case 0x4e0c: 1077 case 0x4e0c:
1061 /* RB3D_COLOR_CHANNEL_MASK */ 1078 /* RB3D_COLOR_CHANNEL_MASK */
1062 track->color_channel_mask = idx_value; 1079 track->color_channel_mask = idx_value;
1080 track->cb_dirty = true;
1063 break; 1081 break;
1064 case 0x43a4: 1082 case 0x43a4:
1065 /* SC_HYPERZ_EN */ 1083 /* SC_HYPERZ_EN */
@@ -1073,6 +1091,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1073 case 0x4f1c: 1091 case 0x4f1c:
1074 /* ZB_BW_CNTL */ 1092 /* ZB_BW_CNTL */
1075 track->zb_cb_clear = !!(idx_value & (1 << 5)); 1093 track->zb_cb_clear = !!(idx_value & (1 << 5));
1094 track->cb_dirty = true;
1095 track->zb_dirty = true;
1076 if (p->rdev->hyperz_filp != p->filp) { 1096 if (p->rdev->hyperz_filp != p->filp) {
1077 if (idx_value & (R300_HIZ_ENABLE | 1097 if (idx_value & (R300_HIZ_ENABLE |
1078 R300_RD_COMP_ENABLE | 1098 R300_RD_COMP_ENABLE |
@@ -1084,8 +1104,28 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1084 case 0x4e04: 1104 case 0x4e04:
1085 /* RB3D_BLENDCNTL */ 1105 /* RB3D_BLENDCNTL */
1086 track->blend_read_enable = !!(idx_value & (1 << 2)); 1106 track->blend_read_enable = !!(idx_value & (1 << 2));
1107 track->cb_dirty = true;
1108 break;
1109 case R300_RB3D_AARESOLVE_OFFSET:
1110 r = r100_cs_packet_next_reloc(p, &reloc);
1111 if (r) {
1112 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1113 idx, reg);
1114 r100_cs_dump_packet(p, pkt);
1115 return r;
1116 }
1117 track->aa.robj = reloc->robj;
1118 track->aa.offset = idx_value;
1119 track->aa_dirty = true;
1120 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1121 break;
1122 case R300_RB3D_AARESOLVE_PITCH:
1123 track->aa.pitch = idx_value & 0x3FFE;
1124 track->aa_dirty = true;
1087 break; 1125 break;
1088 case 0x4f28: /* ZB_DEPTHCLEARVALUE */ 1126 case R300_RB3D_AARESOLVE_CTL:
1127 track->aaresolve = idx_value & 0x1;
1128 track->aa_dirty = true;
1089 break; 1129 break;
1090 case 0x4f30: /* ZB_MASK_OFFSET */ 1130 case 0x4f30: /* ZB_MASK_OFFSET */
1091 case 0x4f34: /* ZB_ZMASK_PITCH */ 1131 case 0x4f34: /* ZB_ZMASK_PITCH */
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 1a0d5362cd79..f0bce399c9f3 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -1371,6 +1371,8 @@
1371#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ 1371#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
1372#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ 1372#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
1373 1373
1374#define R300_RB3D_AARESOLVE_OFFSET 0x4E80
1375#define R300_RB3D_AARESOLVE_PITCH 0x4E84
1374#define R300_RB3D_AARESOLVE_CTL 0x4E88 1376#define R300_RB3D_AARESOLVE_CTL 0x4E88
1375/* gap */ 1377/* gap */
1376 1378
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 650672a0f5ad..9b3fad23b76c 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1255,7 +1255,6 @@ int r600_mc_init(struct radeon_device *rdev)
1255 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1255 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1256 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1256 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1257 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1257 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1258 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1259 r600_vram_gtt_location(rdev, &rdev->mc); 1258 r600_vram_gtt_location(rdev, &rdev->mc);
1260 1259
1261 if (rdev->flags & RADEON_IS_IGP) { 1260 if (rdev->flags & RADEON_IS_IGP) {
@@ -1937,7 +1936,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1937 */ 1936 */
1938void r600_cp_stop(struct radeon_device *rdev) 1937void r600_cp_stop(struct radeon_device *rdev)
1939{ 1938{
1940 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 1939 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1941 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1940 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1942 WREG32(SCRATCH_UMSK, 0); 1941 WREG32(SCRATCH_UMSK, 0);
1943} 1942}
@@ -2105,7 +2104,11 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
2105 2104
2106 r600_cp_stop(rdev); 2105 r600_cp_stop(rdev);
2107 2106
2108 WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); 2107 WREG32(CP_RB_CNTL,
2108#ifdef __BIG_ENDIAN
2109 BUF_SWAP_32BIT |
2110#endif
2111 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2109 2112
2110 /* Reset cp */ 2113 /* Reset cp */
2111 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 2114 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
@@ -2192,7 +2195,11 @@ int r600_cp_resume(struct radeon_device *rdev)
2192 WREG32(CP_RB_WPTR, 0); 2195 WREG32(CP_RB_WPTR, 0);
2193 2196
2194 /* set the wb address whether it's enabled or not */ 2197 /* set the wb address whether it's enabled or not */
2195 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 2198 WREG32(CP_RB_RPTR_ADDR,
2199#ifdef __BIG_ENDIAN
2200 RB_RPTR_SWAP(2) |
2201#endif
2202 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2196 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 2203 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2197 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 2204 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2198 2205
@@ -2628,7 +2635,11 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2628{ 2635{
2629 /* FIXME: implement */ 2636 /* FIXME: implement */
2630 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 2637 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2631 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC); 2638 radeon_ring_write(rdev,
2639#ifdef __BIG_ENDIAN
2640 (2 << 0) |
2641#endif
2642 (ib->gpu_addr & 0xFFFFFFFC));
2632 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); 2643 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2633 radeon_ring_write(rdev, ib->length_dw); 2644 radeon_ring_write(rdev, ib->length_dw);
2634} 2645}
@@ -3297,8 +3308,8 @@ restart_ih:
3297 while (rptr != wptr) { 3308 while (rptr != wptr) {
3298 /* wptr/rptr are in bytes! */ 3309 /* wptr/rptr are in bytes! */
3299 ring_index = rptr / 4; 3310 ring_index = rptr / 4;
3300 src_id = rdev->ih.ring[ring_index] & 0xff; 3311 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3301 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; 3312 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3302 3313
3303 switch (src_id) { 3314 switch (src_id) {
3304 case 1: /* D1 vblank/vline */ 3315 case 1: /* D1 vblank/vline */
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index ca5c29f70779..7f1043448d25 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -137,9 +137,9 @@ set_shaders(struct drm_device *dev)
137 ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256); 137 ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
138 138
139 for (i = 0; i < r6xx_vs_size; i++) 139 for (i = 0; i < r6xx_vs_size; i++)
140 vs[i] = r6xx_vs[i]; 140 vs[i] = cpu_to_le32(r6xx_vs[i]);
141 for (i = 0; i < r6xx_ps_size; i++) 141 for (i = 0; i < r6xx_ps_size; i++)
142 ps[i] = r6xx_ps[i]; 142 ps[i] = cpu_to_le32(r6xx_ps[i]);
143 143
144 dev_priv->blit_vb->used = 512; 144 dev_priv->blit_vb->used = 512;
145 145
@@ -192,6 +192,9 @@ set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
192 DRM_DEBUG("\n"); 192 DRM_DEBUG("\n");
193 193
194 sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8)); 194 sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8));
195#ifdef __BIG_ENDIAN
196 sq_vtx_constant_word2 |= (2 << 30);
197#endif
195 198
196 BEGIN_RING(9); 199 BEGIN_RING(9);
197 OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7)); 200 OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
@@ -291,7 +294,11 @@ draw_auto(drm_radeon_private_t *dev_priv)
291 OUT_RING(DI_PT_RECTLIST); 294 OUT_RING(DI_PT_RECTLIST);
292 295
293 OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0)); 296 OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
297#ifdef __BIG_ENDIAN
298 OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT);
299#else
294 OUT_RING(DI_INDEX_SIZE_16_BIT); 300 OUT_RING(DI_INDEX_SIZE_16_BIT);
301#endif
295 302
296 OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0)); 303 OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
297 OUT_RING(1); 304 OUT_RING(1);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 86e5aa07f0db..df68d91e8190 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -54,7 +54,7 @@ set_render_target(struct radeon_device *rdev, int format,
54 if (h < 8) 54 if (h < 8)
55 h = 8; 55 h = 8;
56 56
57 cb_color_info = ((format << 2) | (1 << 27)); 57 cb_color_info = ((format << 2) | (1 << 27) | (1 << 8));
58 pitch = (w / 8) - 1; 58 pitch = (w / 8) - 1;
59 slice = ((w * h) / 64) - 1; 59 slice = ((w * h) / 64) - 1;
60 60
@@ -165,6 +165,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
165 u32 sq_vtx_constant_word2; 165 u32 sq_vtx_constant_word2;
166 166
167 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); 167 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
168#ifdef __BIG_ENDIAN
169 sq_vtx_constant_word2 |= (2 << 30);
170#endif
168 171
169 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); 172 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
170 radeon_ring_write(rdev, 0x460); 173 radeon_ring_write(rdev, 0x460);
@@ -199,7 +202,7 @@ set_tex_resource(struct radeon_device *rdev,
199 if (h < 1) 202 if (h < 1)
200 h = 1; 203 h = 1;
201 204
202 sq_tex_resource_word0 = (1 << 0); 205 sq_tex_resource_word0 = (1 << 0) | (1 << 3);
203 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) | 206 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
204 ((w - 1) << 19)); 207 ((w - 1) << 19));
205 208
@@ -253,7 +256,11 @@ draw_auto(struct radeon_device *rdev)
253 radeon_ring_write(rdev, DI_PT_RECTLIST); 256 radeon_ring_write(rdev, DI_PT_RECTLIST);
254 257
255 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); 258 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
256 radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); 259 radeon_ring_write(rdev,
260#ifdef __BIG_ENDIAN
261 (2 << 2) |
262#endif
263 DI_INDEX_SIZE_16_BIT);
257 264
258 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); 265 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
259 radeon_ring_write(rdev, 1); 266 radeon_ring_write(rdev, 1);
@@ -424,7 +431,11 @@ set_default_state(struct radeon_device *rdev)
424 dwords = ALIGN(rdev->r600_blit.state_len, 0x10); 431 dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
425 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; 432 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
426 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 433 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
427 radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); 434 radeon_ring_write(rdev,
435#ifdef __BIG_ENDIAN
436 (2 << 0) |
437#endif
438 (gpu_addr & 0xFFFFFFFC));
428 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); 439 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
429 radeon_ring_write(rdev, dwords); 440 radeon_ring_write(rdev, dwords);
430 441
@@ -467,7 +478,7 @@ static inline uint32_t i2f(uint32_t input)
467int r600_blit_init(struct radeon_device *rdev) 478int r600_blit_init(struct radeon_device *rdev)
468{ 479{
469 u32 obj_size; 480 u32 obj_size;
470 int r, dwords; 481 int i, r, dwords;
471 void *ptr; 482 void *ptr;
472 u32 packet2s[16]; 483 u32 packet2s[16];
473 int num_packet2s = 0; 484 int num_packet2s = 0;
@@ -486,7 +497,7 @@ int r600_blit_init(struct radeon_device *rdev)
486 497
487 dwords = rdev->r600_blit.state_len; 498 dwords = rdev->r600_blit.state_len;
488 while (dwords & 0xf) { 499 while (dwords & 0xf) {
489 packet2s[num_packet2s++] = PACKET2(0); 500 packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
490 dwords++; 501 dwords++;
491 } 502 }
492 503
@@ -529,8 +540,10 @@ int r600_blit_init(struct radeon_device *rdev)
529 if (num_packet2s) 540 if (num_packet2s)
530 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), 541 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
531 packet2s, num_packet2s * 4); 542 packet2s, num_packet2s * 4);
532 memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4); 543 for (i = 0; i < r6xx_vs_size; i++)
533 memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); 544 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
545 for (i = 0; i < r6xx_ps_size; i++)
546 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
534 radeon_bo_kunmap(rdev->r600_blit.shader_obj); 547 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
535 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 548 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
536 549
@@ -545,7 +558,7 @@ done:
545 dev_err(rdev->dev, "(%d) pin blit object failed\n", r); 558 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
546 return r; 559 return r;
547 } 560 }
548 rdev->mc.active_vram_size = rdev->mc.real_vram_size; 561 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
549 return 0; 562 return 0;
550} 563}
551 564
@@ -553,7 +566,7 @@ void r600_blit_fini(struct radeon_device *rdev)
553{ 566{
554 int r; 567 int r;
555 568
556 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 569 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
557 if (rdev->r600_blit.shader_obj == NULL) 570 if (rdev->r600_blit.shader_obj == NULL)
558 return; 571 return;
559 /* If we can't reserve the bo, unref should be enough to destroy 572 /* If we can't reserve the bo, unref should be enough to destroy
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index e8151c1d55b2..2d1f6c5ee2a7 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -684,7 +684,11 @@ const u32 r6xx_vs[] =
684 0x00000000, 684 0x00000000,
685 0x3c000000, 685 0x3c000000,
686 0x68cd1000, 686 0x68cd1000,
687#ifdef __BIG_ENDIAN
688 0x000a0000,
689#else
687 0x00080000, 690 0x00080000,
691#endif
688 0x00000000, 692 0x00000000,
689}; 693};
690 694
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 4f4cd8b286d5..c3ab959bdc7c 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -396,6 +396,9 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
396 r600_do_cp_stop(dev_priv); 396 r600_do_cp_stop(dev_priv);
397 397
398 RADEON_WRITE(R600_CP_RB_CNTL, 398 RADEON_WRITE(R600_CP_RB_CNTL,
399#ifdef __BIG_ENDIAN
400 R600_BUF_SWAP_32BIT |
401#endif
399 R600_RB_NO_UPDATE | 402 R600_RB_NO_UPDATE |
400 R600_RB_BLKSZ(15) | 403 R600_RB_BLKSZ(15) |
401 R600_RB_BUFSZ(3)); 404 R600_RB_BUFSZ(3));
@@ -486,9 +489,12 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
486 r600_do_cp_stop(dev_priv); 489 r600_do_cp_stop(dev_priv);
487 490
488 RADEON_WRITE(R600_CP_RB_CNTL, 491 RADEON_WRITE(R600_CP_RB_CNTL,
492#ifdef __BIG_ENDIAN
493 R600_BUF_SWAP_32BIT |
494#endif
489 R600_RB_NO_UPDATE | 495 R600_RB_NO_UPDATE |
490 (15 << 8) | 496 R600_RB_BLKSZ(15) |
491 (3 << 0)); 497 R600_RB_BUFSZ(3));
492 498
493 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); 499 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
494 RADEON_READ(R600_GRBM_SOFT_RESET); 500 RADEON_READ(R600_GRBM_SOFT_RESET);
@@ -550,8 +556,12 @@ static void r600_test_writeback(drm_radeon_private_t *dev_priv)
550 556
551 if (!dev_priv->writeback_works) { 557 if (!dev_priv->writeback_works) {
552 /* Disable writeback to avoid unnecessary bus master transfer */ 558 /* Disable writeback to avoid unnecessary bus master transfer */
553 RADEON_WRITE(R600_CP_RB_CNTL, RADEON_READ(R600_CP_RB_CNTL) | 559 RADEON_WRITE(R600_CP_RB_CNTL,
554 RADEON_RB_NO_UPDATE); 560#ifdef __BIG_ENDIAN
561 R600_BUF_SWAP_32BIT |
562#endif
563 RADEON_READ(R600_CP_RB_CNTL) |
564 R600_RB_NO_UPDATE);
555 RADEON_WRITE(R600_SCRATCH_UMSK, 0); 565 RADEON_WRITE(R600_SCRATCH_UMSK, 0);
556 } 566 }
557} 567}
@@ -575,7 +585,11 @@ int r600_do_engine_reset(struct drm_device *dev)
575 585
576 RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0); 586 RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
577 cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL); 587 cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL);
578 RADEON_WRITE(R600_CP_RB_CNTL, R600_RB_RPTR_WR_ENA); 588 RADEON_WRITE(R600_CP_RB_CNTL,
589#ifdef __BIG_ENDIAN
590 R600_BUF_SWAP_32BIT |
591#endif
592 R600_RB_RPTR_WR_ENA);
579 593
580 RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr); 594 RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr);
581 RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr); 595 RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr);
@@ -1838,7 +1852,10 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
1838 + dev_priv->gart_vm_start; 1852 + dev_priv->gart_vm_start;
1839 } 1853 }
1840 RADEON_WRITE(R600_CP_RB_RPTR_ADDR, 1854 RADEON_WRITE(R600_CP_RB_RPTR_ADDR,
1841 rptr_addr & 0xffffffff); 1855#ifdef __BIG_ENDIAN
1856 (2 << 0) |
1857#endif
1858 (rptr_addr & 0xfffffffc));
1842 RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, 1859 RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI,
1843 upper_32_bits(rptr_addr)); 1860 upper_32_bits(rptr_addr));
1844 1861
@@ -1889,7 +1906,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
1889 { 1906 {
1890 u64 scratch_addr; 1907 u64 scratch_addr;
1891 1908
1892 scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR); 1909 scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC;
1893 scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32; 1910 scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32;
1894 scratch_addr += R600_SCRATCH_REG_OFFSET; 1911 scratch_addr += R600_SCRATCH_REG_OFFSET;
1895 scratch_addr >>= 8; 1912 scratch_addr >>= 8;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 7831e0890210..153095fba62f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -295,17 +295,18 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
295 } 295 }
296 296
297 if (!IS_ALIGNED(pitch, pitch_align)) { 297 if (!IS_ALIGNED(pitch, pitch_align)) {
298 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", 298 dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
299 __func__, __LINE__, pitch); 299 __func__, __LINE__, pitch, pitch_align, array_mode);
300 return -EINVAL; 300 return -EINVAL;
301 } 301 }
302 if (!IS_ALIGNED(height, height_align)) { 302 if (!IS_ALIGNED(height, height_align)) {
303 dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", 303 dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
304 __func__, __LINE__, height); 304 __func__, __LINE__, height, height_align, array_mode);
305 return -EINVAL; 305 return -EINVAL;
306 } 306 }
307 if (!IS_ALIGNED(base_offset, base_align)) { 307 if (!IS_ALIGNED(base_offset, base_align)) {
308 dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); 308 dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
309 base_offset, base_align, array_mode);
309 return -EINVAL; 310 return -EINVAL;
310 } 311 }
311 312
@@ -320,7 +321,10 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
320 * broken userspace. 321 * broken userspace.
321 */ 322 */
322 } else { 323 } else {
323 dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); 324 dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i,
325 array_mode,
326 track->cb_color_bo_offset[i], tmp,
327 radeon_bo_size(track->cb_color_bo[i]));
324 return -EINVAL; 328 return -EINVAL;
325 } 329 }
326 } 330 }
@@ -455,17 +459,18 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
455 } 459 }
456 460
457 if (!IS_ALIGNED(pitch, pitch_align)) { 461 if (!IS_ALIGNED(pitch, pitch_align)) {
458 dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", 462 dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
459 __func__, __LINE__, pitch); 463 __func__, __LINE__, pitch, pitch_align, array_mode);
460 return -EINVAL; 464 return -EINVAL;
461 } 465 }
462 if (!IS_ALIGNED(height, height_align)) { 466 if (!IS_ALIGNED(height, height_align)) {
463 dev_warn(p->dev, "%s:%d db height (%d) invalid\n", 467 dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
464 __func__, __LINE__, height); 468 __func__, __LINE__, height, height_align, array_mode);
465 return -EINVAL; 469 return -EINVAL;
466 } 470 }
467 if (!IS_ALIGNED(base_offset, base_align)) { 471 if (!IS_ALIGNED(base_offset, base_align)) {
468 dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); 472 dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
473 base_offset, base_align, array_mode);
469 return -EINVAL; 474 return -EINVAL;
470 } 475 }
471 476
@@ -473,9 +478,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
473 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; 478 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
474 tmp = ntiles * bpe * 64 * nviews; 479 tmp = ntiles * bpe * 64 * nviews;
475 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { 480 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
476 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n", 481 dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
477 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, 482 array_mode,
478 radeon_bo_size(track->db_bo)); 483 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
484 radeon_bo_size(track->db_bo));
479 return -EINVAL; 485 return -EINVAL;
480 } 486 }
481 } 487 }
@@ -1227,18 +1233,18 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
1227 /* XXX check height as well... */ 1233 /* XXX check height as well... */
1228 1234
1229 if (!IS_ALIGNED(pitch, pitch_align)) { 1235 if (!IS_ALIGNED(pitch, pitch_align)) {
1230 dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", 1236 dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
1231 __func__, __LINE__, pitch); 1237 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
1232 return -EINVAL; 1238 return -EINVAL;
1233 } 1239 }
1234 if (!IS_ALIGNED(base_offset, base_align)) { 1240 if (!IS_ALIGNED(base_offset, base_align)) {
1235 dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n", 1241 dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
1236 __func__, __LINE__, base_offset); 1242 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
1237 return -EINVAL; 1243 return -EINVAL;
1238 } 1244 }
1239 if (!IS_ALIGNED(mip_offset, base_align)) { 1245 if (!IS_ALIGNED(mip_offset, base_align)) {
1240 dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n", 1246 dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
1241 __func__, __LINE__, mip_offset); 1247 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
1242 return -EINVAL; 1248 return -EINVAL;
1243 } 1249 }
1244 1250
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index a5d898b4bad2..04bac0bbd3ec 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -154,13 +154,14 @@
154#define ROQ_IB2_START(x) ((x) << 8) 154#define ROQ_IB2_START(x) ((x) << 8)
155#define CP_RB_BASE 0xC100 155#define CP_RB_BASE 0xC100
156#define CP_RB_CNTL 0xC104 156#define CP_RB_CNTL 0xC104
157#define RB_BUFSZ(x) ((x)<<0) 157#define RB_BUFSZ(x) ((x) << 0)
158#define RB_BLKSZ(x) ((x)<<8) 158#define RB_BLKSZ(x) ((x) << 8)
159#define RB_NO_UPDATE (1<<27) 159#define RB_NO_UPDATE (1 << 27)
160#define RB_RPTR_WR_ENA (1<<31) 160#define RB_RPTR_WR_ENA (1 << 31)
161#define BUF_SWAP_32BIT (2 << 16) 161#define BUF_SWAP_32BIT (2 << 16)
162#define CP_RB_RPTR 0x8700 162#define CP_RB_RPTR 0x8700
163#define CP_RB_RPTR_ADDR 0xC10C 163#define CP_RB_RPTR_ADDR 0xC10C
164#define RB_RPTR_SWAP(x) ((x) << 0)
164#define CP_RB_RPTR_ADDR_HI 0xC110 165#define CP_RB_RPTR_ADDR_HI 0xC110
165#define CP_RB_RPTR_WR 0xC108 166#define CP_RB_RPTR_WR 0xC108
166#define CP_RB_WPTR 0xC114 167#define CP_RB_WPTR 0xC114
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 56c48b67ef3d..6b3429495118 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -345,7 +345,6 @@ struct radeon_mc {
345 * about vram size near mc fb location */ 345 * about vram size near mc fb location */
346 u64 mc_vram_size; 346 u64 mc_vram_size;
347 u64 visible_vram_size; 347 u64 visible_vram_size;
348 u64 active_vram_size;
349 u64 gtt_size; 348 u64 gtt_size;
350 u64 gtt_start; 349 u64 gtt_start;
351 u64 gtt_end; 350 u64 gtt_end;
@@ -1448,6 +1447,7 @@ extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *m
1448extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 1447extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
1449extern int radeon_resume_kms(struct drm_device *dev); 1448extern int radeon_resume_kms(struct drm_device *dev);
1450extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 1449extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1450extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
1451 1451
1452/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ 1452/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
1453extern bool r600_card_posted(struct radeon_device *rdev); 1453extern bool r600_card_posted(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e75d63b8e21d..793c5e6026ad 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -834,6 +834,9 @@ static struct radeon_asic sumo_asic = {
834 .pm_finish = &evergreen_pm_finish, 834 .pm_finish = &evergreen_pm_finish,
835 .pm_init_profile = &rs780_pm_init_profile, 835 .pm_init_profile = &rs780_pm_init_profile,
836 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 836 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
837 .pre_page_flip = &evergreen_pre_page_flip,
838 .page_flip = &evergreen_page_flip,
839 .post_page_flip = &evergreen_post_page_flip,
837}; 840};
838 841
839static struct radeon_asic btc_asic = { 842static struct radeon_asic btc_asic = {
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5c1cc7ad9a15..02d5c415f499 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -88,7 +88,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
88 /* some evergreen boards have bad data for this entry */ 88 /* some evergreen boards have bad data for this entry */
89 if (ASIC_IS_DCE4(rdev)) { 89 if (ASIC_IS_DCE4(rdev)) {
90 if ((i == 7) && 90 if ((i == 7) &&
91 (gpio->usClkMaskRegisterIndex == 0x1936) && 91 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
92 (gpio->sucI2cId.ucAccess == 0)) { 92 (gpio->sucI2cId.ucAccess == 0)) {
93 gpio->sucI2cId.ucAccess = 0x97; 93 gpio->sucI2cId.ucAccess = 0x97;
94 gpio->ucDataMaskShift = 8; 94 gpio->ucDataMaskShift = 8;
@@ -101,7 +101,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
101 /* some DCE3 boards have bad data for this entry */ 101 /* some DCE3 boards have bad data for this entry */
102 if (ASIC_IS_DCE3(rdev)) { 102 if (ASIC_IS_DCE3(rdev)) {
103 if ((i == 4) && 103 if ((i == 4) &&
104 (gpio->usClkMaskRegisterIndex == 0x1fda) && 104 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
105 (gpio->sucI2cId.ucAccess == 0x94)) 105 (gpio->sucI2cId.ucAccess == 0x94))
106 gpio->sucI2cId.ucAccess = 0x14; 106 gpio->sucI2cId.ucAccess = 0x14;
107 } 107 }
@@ -172,7 +172,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
172 /* some evergreen boards have bad data for this entry */ 172 /* some evergreen boards have bad data for this entry */
173 if (ASIC_IS_DCE4(rdev)) { 173 if (ASIC_IS_DCE4(rdev)) {
174 if ((i == 7) && 174 if ((i == 7) &&
175 (gpio->usClkMaskRegisterIndex == 0x1936) && 175 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
176 (gpio->sucI2cId.ucAccess == 0)) { 176 (gpio->sucI2cId.ucAccess == 0)) {
177 gpio->sucI2cId.ucAccess = 0x97; 177 gpio->sucI2cId.ucAccess = 0x97;
178 gpio->ucDataMaskShift = 8; 178 gpio->ucDataMaskShift = 8;
@@ -185,7 +185,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
185 /* some DCE3 boards have bad data for this entry */ 185 /* some DCE3 boards have bad data for this entry */
186 if (ASIC_IS_DCE3(rdev)) { 186 if (ASIC_IS_DCE3(rdev)) {
187 if ((i == 4) && 187 if ((i == 4) &&
188 (gpio->usClkMaskRegisterIndex == 0x1fda) && 188 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
189 (gpio->sucI2cId.ucAccess == 0x94)) 189 (gpio->sucI2cId.ucAccess == 0x94))
190 gpio->sucI2cId.ucAccess = 0x14; 190 gpio->sucI2cId.ucAccess = 0x14;
191 } 191 }
@@ -252,7 +252,7 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd
252 pin = &gpio_info->asGPIO_Pin[i]; 252 pin = &gpio_info->asGPIO_Pin[i];
253 if (id == pin->ucGPIO_ID) { 253 if (id == pin->ucGPIO_ID) {
254 gpio.id = pin->ucGPIO_ID; 254 gpio.id = pin->ucGPIO_ID;
255 gpio.reg = pin->usGpioPin_AIndex * 4; 255 gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
256 gpio.mask = (1 << pin->ucGpioPinBitShift); 256 gpio.mask = (1 << pin->ucGpioPinBitShift);
257 gpio.valid = true; 257 gpio.valid = true;
258 break; 258 break;
@@ -1274,11 +1274,11 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
1274 data_offset); 1274 data_offset);
1275 switch (crev) { 1275 switch (crev) {
1276 case 1: 1276 case 1:
1277 if (igp_info->info.ulBootUpMemoryClock) 1277 if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock))
1278 return true; 1278 return true;
1279 break; 1279 break;
1280 case 2: 1280 case 2:
1281 if (igp_info->info_2.ulBootUpSidePortClock) 1281 if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock))
1282 return true; 1282 return true;
1283 break; 1283 break;
1284 default: 1284 default:
@@ -1442,7 +1442,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1442 1442
1443 for (i = 0; i < num_indices; i++) { 1443 for (i = 0; i < num_indices; i++) {
1444 if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && 1444 if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
1445 (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) { 1445 (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
1446 ss->percentage = 1446 ss->percentage =
1447 le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1447 le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
1448 ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1448 ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1456,7 +1456,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1456 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); 1456 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
1457 for (i = 0; i < num_indices; i++) { 1457 for (i = 0; i < num_indices; i++) {
1458 if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && 1458 if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
1459 (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) { 1459 (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
1460 ss->percentage = 1460 ss->percentage =
1461 le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1461 le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
1462 ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1462 ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1470,7 +1470,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1470 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); 1470 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
1471 for (i = 0; i < num_indices; i++) { 1471 for (i = 0; i < num_indices; i++) {
1472 if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && 1472 if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
1473 (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) { 1473 (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
1474 ss->percentage = 1474 ss->percentage =
1475 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1475 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
1476 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1476 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1553,8 +1553,8 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1553 if (misc & ATOM_DOUBLE_CLOCK_MODE) 1553 if (misc & ATOM_DOUBLE_CLOCK_MODE)
1554 lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; 1554 lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
1555 1555
1556 lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize; 1556 lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize);
1557 lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize; 1557 lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize);
1558 1558
1559 /* set crtc values */ 1559 /* set crtc values */
1560 drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); 1560 drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
@@ -1569,13 +1569,13 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1569 lvds->linkb = false; 1569 lvds->linkb = false;
1570 1570
1571 /* parse the lcd record table */ 1571 /* parse the lcd record table */
1572 if (lvds_info->info.usModePatchTableOffset) { 1572 if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) {
1573 ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; 1573 ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
1574 ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; 1574 ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
1575 bool bad_record = false; 1575 bool bad_record = false;
1576 u8 *record = (u8 *)(mode_info->atom_context->bios + 1576 u8 *record = (u8 *)(mode_info->atom_context->bios +
1577 data_offset + 1577 data_offset +
1578 lvds_info->info.usModePatchTableOffset); 1578 le16_to_cpu(lvds_info->info.usModePatchTableOffset));
1579 while (*record != ATOM_RECORD_END_TYPE) { 1579 while (*record != ATOM_RECORD_END_TYPE) {
1580 switch (*record) { 1580 switch (*record) {
1581 case LCD_MODE_PATCH_RECORD_MODE_TYPE: 1581 case LCD_MODE_PATCH_RECORD_MODE_TYPE:
@@ -2189,7 +2189,7 @@ static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
2189 firmware_info = 2189 firmware_info =
2190 (union firmware_info *)(mode_info->atom_context->bios + 2190 (union firmware_info *)(mode_info->atom_context->bios +
2191 data_offset); 2191 data_offset);
2192 vddc = firmware_info->info_14.usBootUpVDDCVoltage; 2192 vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
2193 } 2193 }
2194 2194
2195 return vddc; 2195 return vddc;
@@ -2284,7 +2284,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2284 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = 2284 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
2285 VOLTAGE_SW; 2285 VOLTAGE_SW;
2286 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = 2286 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
2287 clock_info->evergreen.usVDDC; 2287 le16_to_cpu(clock_info->evergreen.usVDDC);
2288 } else { 2288 } else {
2289 sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); 2289 sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
2290 sclk |= clock_info->r600.ucEngineClockHigh << 16; 2290 sclk |= clock_info->r600.ucEngineClockHigh << 16;
@@ -2295,7 +2295,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2295 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = 2295 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
2296 VOLTAGE_SW; 2296 VOLTAGE_SW;
2297 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = 2297 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
2298 clock_info->r600.usVDDC; 2298 le16_to_cpu(clock_info->r600.usVDDC);
2299 } 2299 }
2300 2300
2301 if (rdev->flags & RADEON_IS_IGP) { 2301 if (rdev->flags & RADEON_IS_IGP) {
@@ -2408,13 +2408,13 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2408 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); 2408 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
2409 state_array = (struct StateArray *) 2409 state_array = (struct StateArray *)
2410 (mode_info->atom_context->bios + data_offset + 2410 (mode_info->atom_context->bios + data_offset +
2411 power_info->pplib.usStateArrayOffset); 2411 le16_to_cpu(power_info->pplib.usStateArrayOffset));
2412 clock_info_array = (struct ClockInfoArray *) 2412 clock_info_array = (struct ClockInfoArray *)
2413 (mode_info->atom_context->bios + data_offset + 2413 (mode_info->atom_context->bios + data_offset +
2414 power_info->pplib.usClockInfoArrayOffset); 2414 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
2415 non_clock_info_array = (struct NonClockInfoArray *) 2415 non_clock_info_array = (struct NonClockInfoArray *)
2416 (mode_info->atom_context->bios + data_offset + 2416 (mode_info->atom_context->bios + data_offset +
2417 power_info->pplib.usNonClockInfoArrayOffset); 2417 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
2418 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2418 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
2419 state_array->ucNumEntries, GFP_KERNEL); 2419 state_array->ucNumEntries, GFP_KERNEL);
2420 if (!rdev->pm.power_state) 2420 if (!rdev->pm.power_state)
@@ -2533,7 +2533,7 @@ uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
2533 int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); 2533 int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
2534 2534
2535 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2535 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2536 return args.ulReturnEngineClock; 2536 return le32_to_cpu(args.ulReturnEngineClock);
2537} 2537}
2538 2538
2539uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) 2539uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
@@ -2542,7 +2542,7 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
2542 int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); 2542 int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
2543 2543
2544 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2544 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2545 return args.ulReturnMemoryClock; 2545 return le32_to_cpu(args.ulReturnMemoryClock);
2546} 2546}
2547 2547
2548void radeon_atom_set_engine_clock(struct radeon_device *rdev, 2548void radeon_atom_set_engine_clock(struct radeon_device *rdev,
@@ -2551,7 +2551,7 @@ void radeon_atom_set_engine_clock(struct radeon_device *rdev,
2551 SET_ENGINE_CLOCK_PS_ALLOCATION args; 2551 SET_ENGINE_CLOCK_PS_ALLOCATION args;
2552 int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock); 2552 int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
2553 2553
2554 args.ulTargetEngineClock = eng_clock; /* 10 khz */ 2554 args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
2555 2555
2556 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2556 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2557} 2557}
@@ -2565,7 +2565,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
2565 if (rdev->flags & RADEON_IS_IGP) 2565 if (rdev->flags & RADEON_IS_IGP)
2566 return; 2566 return;
2567 2567
2568 args.ulTargetMemoryClock = mem_clock; /* 10 khz */ 2568 args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
2569 2569
2570 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2570 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2571} 2571}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index d27ef74590cd..cf7c8d5b4ec2 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1504,6 +1504,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1504 (rdev->pdev->subsystem_device == 0x4a48)) { 1504 (rdev->pdev->subsystem_device == 0x4a48)) {
1505 /* Mac X800 */ 1505 /* Mac X800 */
1506 rdev->mode_info.connector_table = CT_MAC_X800; 1506 rdev->mode_info.connector_table = CT_MAC_X800;
1507 } else if ((rdev->pdev->device == 0x4150) &&
1508 (rdev->pdev->subsystem_vendor == 0x1002) &&
1509 (rdev->pdev->subsystem_device == 0x4150)) {
1510 /* Mac G5 9600 */
1511 rdev->mode_info.connector_table = CT_MAC_G5_9600;
1507 } else 1512 } else
1508#endif /* CONFIG_PPC_PMAC */ 1513#endif /* CONFIG_PPC_PMAC */
1509#ifdef CONFIG_PPC64 1514#ifdef CONFIG_PPC64
@@ -2022,6 +2027,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
2022 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, 2027 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
2023 &hpd); 2028 &hpd);
2024 break; 2029 break;
2030 case CT_MAC_G5_9600:
2031 DRM_INFO("Connector Table: %d (mac g5 9600)\n",
2032 rdev->mode_info.connector_table);
2033 /* DVI - tv dac, dvo */
2034 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
2035 hpd.hpd = RADEON_HPD_1; /* ??? */
2036 radeon_add_legacy_encoder(dev,
2037 radeon_get_encoder_enum(dev,
2038 ATOM_DEVICE_DFP2_SUPPORT,
2039 0),
2040 ATOM_DEVICE_DFP2_SUPPORT);
2041 radeon_add_legacy_encoder(dev,
2042 radeon_get_encoder_enum(dev,
2043 ATOM_DEVICE_CRT2_SUPPORT,
2044 2),
2045 ATOM_DEVICE_CRT2_SUPPORT);
2046 radeon_add_legacy_connector(dev, 0,
2047 ATOM_DEVICE_DFP2_SUPPORT |
2048 ATOM_DEVICE_CRT2_SUPPORT,
2049 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
2050 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
2051 &hpd);
2052 /* ADC - primary dac, internal tmds */
2053 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
2054 hpd.hpd = RADEON_HPD_2; /* ??? */
2055 radeon_add_legacy_encoder(dev,
2056 radeon_get_encoder_enum(dev,
2057 ATOM_DEVICE_DFP1_SUPPORT,
2058 0),
2059 ATOM_DEVICE_DFP1_SUPPORT);
2060 radeon_add_legacy_encoder(dev,
2061 radeon_get_encoder_enum(dev,
2062 ATOM_DEVICE_CRT1_SUPPORT,
2063 1),
2064 ATOM_DEVICE_CRT1_SUPPORT);
2065 radeon_add_legacy_connector(dev, 1,
2066 ATOM_DEVICE_DFP1_SUPPORT |
2067 ATOM_DEVICE_CRT1_SUPPORT,
2068 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
2069 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
2070 &hpd);
2071 break;
2025 default: 2072 default:
2026 DRM_INFO("Connector table: %d (invalid)\n", 2073 DRM_INFO("Connector table: %d (invalid)\n",
2027 rdev->mode_info.connector_table); 2074 rdev->mode_info.connector_table);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0d478932b1a9..4954e2d6ffa2 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -936,8 +936,11 @@ int radeon_resume_kms(struct drm_device *dev)
936int radeon_gpu_reset(struct radeon_device *rdev) 936int radeon_gpu_reset(struct radeon_device *rdev)
937{ 937{
938 int r; 938 int r;
939 int resched;
939 940
940 radeon_save_bios_scratch_regs(rdev); 941 radeon_save_bios_scratch_regs(rdev);
942 /* block TTM */
943 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
941 radeon_suspend(rdev); 944 radeon_suspend(rdev);
942 945
943 r = radeon_asic_reset(rdev); 946 r = radeon_asic_reset(rdev);
@@ -946,6 +949,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
946 radeon_resume(rdev); 949 radeon_resume(rdev);
947 radeon_restore_bios_scratch_regs(rdev); 950 radeon_restore_bios_scratch_regs(rdev);
948 drm_helper_resume_force_mode(rdev->ddev); 951 drm_helper_resume_force_mode(rdev->ddev);
952 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
949 return 0; 953 return 0;
950 } 954 }
951 /* bad news, how to tell it to userspace ? */ 955 /* bad news, how to tell it to userspace ? */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 2eff98cfd728..3e7e7f9eb781 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -793,6 +793,11 @@ static void avivo_get_fb_div(struct radeon_pll *pll,
793 tmp *= target_clock; 793 tmp *= target_clock;
794 *fb_div = tmp / pll->reference_freq; 794 *fb_div = tmp / pll->reference_freq;
795 *frac_fb_div = tmp % pll->reference_freq; 795 *frac_fb_div = tmp % pll->reference_freq;
796
797 if (*fb_div > pll->max_feedback_div)
798 *fb_div = pll->max_feedback_div;
799 else if (*fb_div < pll->min_feedback_div)
800 *fb_div = pll->min_feedback_div;
796} 801}
797 802
798static u32 avivo_get_post_div(struct radeon_pll *pll, 803static u32 avivo_get_post_div(struct radeon_pll *pll,
@@ -826,6 +831,11 @@ static u32 avivo_get_post_div(struct radeon_pll *pll,
826 post_div--; 831 post_div--;
827 } 832 }
828 833
834 if (post_div > pll->max_post_div)
835 post_div = pll->max_post_div;
836 else if (post_div < pll->min_post_div)
837 post_div = pll->min_post_div;
838
829 return post_div; 839 return post_div;
830} 840}
831 841
@@ -961,7 +971,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
961 max_fractional_feed_div = pll->max_frac_feedback_div; 971 max_fractional_feed_div = pll->max_frac_feedback_div;
962 } 972 }
963 973
964 for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { 974 for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
965 uint32_t ref_div; 975 uint32_t ref_div;
966 976
967 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) 977 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 448eba89d1e6..5cba46b9779a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -1524,6 +1524,7 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
1524#define R600_CP_RB_CNTL 0xc104 1524#define R600_CP_RB_CNTL 0xc104
1525# define R600_RB_BUFSZ(x) ((x) << 0) 1525# define R600_RB_BUFSZ(x) ((x) << 0)
1526# define R600_RB_BLKSZ(x) ((x) << 8) 1526# define R600_RB_BLKSZ(x) ((x) << 8)
1527# define R600_BUF_SWAP_32BIT (2 << 16)
1527# define R600_RB_NO_UPDATE (1 << 27) 1528# define R600_RB_NO_UPDATE (1 << 27)
1528# define R600_RB_RPTR_WR_ENA (1 << 31) 1529# define R600_RB_RPTR_WR_ENA (1 << 31)
1529#define R600_CP_RB_RPTR_WR 0xc108 1530#define R600_CP_RB_RPTR_WR 0xc108
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index d4a542247618..b4274883227f 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -910,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
910 910
911 args.v1.ucAction = action; 911 args.v1.ucAction = action;
912 if (action == ATOM_TRANSMITTER_ACTION_INIT) { 912 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
913 args.v1.usInitInfo = connector_object_id; 913 args.v1.usInitInfo = cpu_to_le16(connector_object_id);
914 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { 914 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
915 args.v1.asMode.ucLaneSel = lane_num; 915 args.v1.asMode.ucLaneSel = lane_num;
916 args.v1.asMode.ucLaneSet = lane_set; 916 args.v1.asMode.ucLaneSet = lane_set;
@@ -1140,7 +1140,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1140 case 3: 1140 case 3:
1141 args.v3.sExtEncoder.ucAction = action; 1141 args.v3.sExtEncoder.ucAction = action;
1142 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) 1142 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
1143 args.v3.sExtEncoder.usConnectorId = connector_object_id; 1143 args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
1144 else 1144 else
1145 args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 1145 args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
1146 args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); 1146 args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
@@ -1570,11 +1570,21 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
1570 } 1570 }
1571 1571
1572 /* set scaler clears this on some chips */ 1572 /* set scaler clears this on some chips */
1573 /* XXX check DCE4 */ 1573 if (ASIC_IS_AVIVO(rdev) &&
1574 if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) { 1574 (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
1575 if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE)) 1575 if (ASIC_IS_DCE4(rdev)) {
1576 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 1576 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1577 AVIVO_D1MODE_INTERLEAVE_EN); 1577 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
1578 EVERGREEN_INTERLEAVE_EN);
1579 else
1580 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1581 } else {
1582 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1583 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
1584 AVIVO_D1MODE_INTERLEAVE_EN);
1585 else
1586 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1587 }
1578 } 1588 }
1579} 1589}
1580 1590
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 66324b5bb5ba..cc44bdfec80f 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
113 u32 tiling_flags = 0; 113 u32 tiling_flags = 0;
114 int ret; 114 int ret;
115 int aligned_size, size; 115 int aligned_size, size;
116 int height = mode_cmd->height;
116 117
117 /* need to align pitch with crtc limits */ 118 /* need to align pitch with crtc limits */
118 mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); 119 mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
119 120
120 size = mode_cmd->pitch * mode_cmd->height; 121 if (rdev->family >= CHIP_R600)
122 height = ALIGN(mode_cmd->height, 8);
123 size = mode_cmd->pitch * height;
121 aligned_size = ALIGN(size, PAGE_SIZE); 124 aligned_size = ALIGN(size, PAGE_SIZE);
122 ret = radeon_gem_object_create(rdev, aligned_size, 0, 125 ret = radeon_gem_object_create(rdev, aligned_size, 0,
123 RADEON_GEM_DOMAIN_VRAM, 126 RADEON_GEM_DOMAIN_VRAM,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index df95eb83dac6..1fe95dfe48c9 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -156,9 +156,12 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
156{ 156{
157 struct radeon_device *rdev = dev->dev_private; 157 struct radeon_device *rdev = dev->dev_private;
158 struct drm_radeon_gem_info *args = data; 158 struct drm_radeon_gem_info *args = data;
159 struct ttm_mem_type_manager *man;
160
161 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
159 162
160 args->vram_size = rdev->mc.real_vram_size; 163 args->vram_size = rdev->mc.real_vram_size;
161 args->vram_visible = rdev->mc.real_vram_size; 164 args->vram_visible = (u64)man->size << PAGE_SHIFT;
162 if (rdev->stollen_vga_memory) 165 if (rdev->stollen_vga_memory)
163 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); 166 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
164 args->vram_visible -= radeon_fbdev_total_size(rdev); 167 args->vram_visible -= radeon_fbdev_total_size(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index cf0638c3b7c7..78968b738e88 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -443,7 +443,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
443 (target_fb->bits_per_pixel * 8)); 443 (target_fb->bits_per_pixel * 8));
444 crtc_pitch |= crtc_pitch << 16; 444 crtc_pitch |= crtc_pitch << 16;
445 445
446 446 crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
447 if (tiling_flags & RADEON_TILING_MACRO) { 447 if (tiling_flags & RADEON_TILING_MACRO) {
448 if (ASIC_IS_R300(rdev)) 448 if (ASIC_IS_R300(rdev))
449 crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | 449 crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
@@ -502,6 +502,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
502 gen_cntl_val = RREG32(gen_cntl_reg); 502 gen_cntl_val = RREG32(gen_cntl_reg);
503 gen_cntl_val &= ~(0xf << 8); 503 gen_cntl_val &= ~(0xf << 8);
504 gen_cntl_val |= (format << 8); 504 gen_cntl_val |= (format << 8);
505 gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK;
505 WREG32(gen_cntl_reg, gen_cntl_val); 506 WREG32(gen_cntl_reg, gen_cntl_val);
506 507
507 crtc_offset = (u32)base; 508 crtc_offset = (u32)base;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 6794cdf91f28..a670caaee29e 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -209,6 +209,7 @@ enum radeon_connector_table {
209 CT_EMAC, 209 CT_EMAC,
210 CT_RN50_POWER, 210 CT_RN50_POWER,
211 CT_MAC_X800, 211 CT_MAC_X800,
212 CT_MAC_G5_9600,
212}; 213};
213 214
214enum radeon_dvo_chip { 215enum radeon_dvo_chip {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 1272e4b6a1d4..8389b4c63d12 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -589,6 +589,20 @@ void radeon_ttm_fini(struct radeon_device *rdev)
589 DRM_INFO("radeon: ttm finalized\n"); 589 DRM_INFO("radeon: ttm finalized\n");
590} 590}
591 591
592/* this should only be called at bootup or when userspace
593 * isn't running */
594void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
595{
596 struct ttm_mem_type_manager *man;
597
598 if (!rdev->mman.initialized)
599 return;
600
601 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
602 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
603 man->size = size >> PAGE_SHIFT;
604}
605
592static struct vm_operations_struct radeon_ttm_vm_ops; 606static struct vm_operations_struct radeon_ttm_vm_ops;
593static const struct vm_operations_struct *ttm_vm_ops = NULL; 607static const struct vm_operations_struct *ttm_vm_ops = NULL;
594 608
@@ -787,9 +801,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
787 radeon_mem_types_list[i].show = &radeon_mm_dump_table; 801 radeon_mem_types_list[i].show = &radeon_mm_dump_table;
788 radeon_mem_types_list[i].driver_features = 0; 802 radeon_mem_types_list[i].driver_features = 0;
789 if (i == 0) 803 if (i == 0)
790 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv; 804 radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
791 else 805 else
792 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv; 806 radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
793 807
794 } 808 }
795 /* Add ttm page pool to debugfs */ 809 /* Add ttm page pool to debugfs */
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300
index b506ec1cab4b..e8a1786b6426 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r300
+++ b/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -683,9 +683,7 @@ r300 0x4f60
6830x4DF4 US_ALU_CONST_G_31 6830x4DF4 US_ALU_CONST_G_31
6840x4DF8 US_ALU_CONST_B_31 6840x4DF8 US_ALU_CONST_B_31
6850x4DFC US_ALU_CONST_A_31 6850x4DFC US_ALU_CONST_A_31
6860x4E04 RB3D_BLENDCNTL_R3
6870x4E08 RB3D_ABLENDCNTL_R3 6860x4E08 RB3D_ABLENDCNTL_R3
6880x4E0C RB3D_COLOR_CHANNEL_MASK
6890x4E10 RB3D_CONSTANT_COLOR 6870x4E10 RB3D_CONSTANT_COLOR
6900x4E14 RB3D_COLOR_CLEAR_VALUE 6880x4E14 RB3D_COLOR_CLEAR_VALUE
6910x4E18 RB3D_ROPCNTL_R3 6890x4E18 RB3D_ROPCNTL_R3
@@ -706,13 +704,11 @@ r300 0x4f60
7060x4E74 RB3D_CMASK_WRINDEX 7040x4E74 RB3D_CMASK_WRINDEX
7070x4E78 RB3D_CMASK_DWORD 7050x4E78 RB3D_CMASK_DWORD
7080x4E7C RB3D_CMASK_RDINDEX 7060x4E7C RB3D_CMASK_RDINDEX
7090x4E80 RB3D_AARESOLVE_OFFSET
7100x4E84 RB3D_AARESOLVE_PITCH
7110x4E88 RB3D_AARESOLVE_CTL
7120x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 7070x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
7130x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 7080x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
7140x4F04 ZB_ZSTENCILCNTL 7090x4F04 ZB_ZSTENCILCNTL
7150x4F08 ZB_STENCILREFMASK 7100x4F08 ZB_STENCILREFMASK
7160x4F14 ZB_ZTOP 7110x4F14 ZB_ZTOP
7170x4F18 ZB_ZCACHE_CTLSTAT 7120x4F18 ZB_ZCACHE_CTLSTAT
7130x4F28 ZB_DEPTHCLEARVALUE
7180x4F58 ZB_ZPASS_DATA 7140x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
index 8c1214c2390f..722074e21e2f 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r420
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -130,7 +130,6 @@ r420 0x4f60
1300x401C GB_SELECT 1300x401C GB_SELECT
1310x4020 GB_AA_CONFIG 1310x4020 GB_AA_CONFIG
1320x4024 GB_FIFO_SIZE 1320x4024 GB_FIFO_SIZE
1330x4028 GB_Z_PEQ_CONFIG
1340x4100 TX_INVALTAGS 1330x4100 TX_INVALTAGS
1350x4200 GA_POINT_S0 1340x4200 GA_POINT_S0
1360x4204 GA_POINT_T0 1350x4204 GA_POINT_T0
@@ -750,9 +749,7 @@ r420 0x4f60
7500x4DF4 US_ALU_CONST_G_31 7490x4DF4 US_ALU_CONST_G_31
7510x4DF8 US_ALU_CONST_B_31 7500x4DF8 US_ALU_CONST_B_31
7520x4DFC US_ALU_CONST_A_31 7510x4DFC US_ALU_CONST_A_31
7530x4E04 RB3D_BLENDCNTL_R3
7540x4E08 RB3D_ABLENDCNTL_R3 7520x4E08 RB3D_ABLENDCNTL_R3
7550x4E0C RB3D_COLOR_CHANNEL_MASK
7560x4E10 RB3D_CONSTANT_COLOR 7530x4E10 RB3D_CONSTANT_COLOR
7570x4E14 RB3D_COLOR_CLEAR_VALUE 7540x4E14 RB3D_COLOR_CLEAR_VALUE
7580x4E18 RB3D_ROPCNTL_R3 7550x4E18 RB3D_ROPCNTL_R3
@@ -773,13 +770,11 @@ r420 0x4f60
7730x4E74 RB3D_CMASK_WRINDEX 7700x4E74 RB3D_CMASK_WRINDEX
7740x4E78 RB3D_CMASK_DWORD 7710x4E78 RB3D_CMASK_DWORD
7750x4E7C RB3D_CMASK_RDINDEX 7720x4E7C RB3D_CMASK_RDINDEX
7760x4E80 RB3D_AARESOLVE_OFFSET
7770x4E84 RB3D_AARESOLVE_PITCH
7780x4E88 RB3D_AARESOLVE_CTL
7790x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 7730x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
7800x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 7740x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
7810x4F04 ZB_ZSTENCILCNTL 7750x4F04 ZB_ZSTENCILCNTL
7820x4F08 ZB_STENCILREFMASK 7760x4F08 ZB_STENCILREFMASK
7830x4F14 ZB_ZTOP 7770x4F14 ZB_ZTOP
7840x4F18 ZB_ZCACHE_CTLSTAT 7780x4F18 ZB_ZCACHE_CTLSTAT
7790x4F28 ZB_DEPTHCLEARVALUE
7850x4F58 ZB_ZPASS_DATA 7800x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 0828d80396f2..d9f62866bbc1 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -749,9 +749,7 @@ rs600 0x6d40
7490x4DF4 US_ALU_CONST_G_31 7490x4DF4 US_ALU_CONST_G_31
7500x4DF8 US_ALU_CONST_B_31 7500x4DF8 US_ALU_CONST_B_31
7510x4DFC US_ALU_CONST_A_31 7510x4DFC US_ALU_CONST_A_31
7520x4E04 RB3D_BLENDCNTL_R3
7530x4E08 RB3D_ABLENDCNTL_R3 7520x4E08 RB3D_ABLENDCNTL_R3
7540x4E0C RB3D_COLOR_CHANNEL_MASK
7550x4E10 RB3D_CONSTANT_COLOR 7530x4E10 RB3D_CONSTANT_COLOR
7560x4E14 RB3D_COLOR_CLEAR_VALUE 7540x4E14 RB3D_COLOR_CLEAR_VALUE
7570x4E18 RB3D_ROPCNTL_R3 7550x4E18 RB3D_ROPCNTL_R3
@@ -772,13 +770,11 @@ rs600 0x6d40
7720x4E74 RB3D_CMASK_WRINDEX 7700x4E74 RB3D_CMASK_WRINDEX
7730x4E78 RB3D_CMASK_DWORD 7710x4E78 RB3D_CMASK_DWORD
7740x4E7C RB3D_CMASK_RDINDEX 7720x4E7C RB3D_CMASK_RDINDEX
7750x4E80 RB3D_AARESOLVE_OFFSET
7760x4E84 RB3D_AARESOLVE_PITCH
7770x4E88 RB3D_AARESOLVE_CTL
7780x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 7730x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
7790x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 7740x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
7800x4F04 ZB_ZSTENCILCNTL 7750x4F04 ZB_ZSTENCILCNTL
7810x4F08 ZB_STENCILREFMASK 7760x4F08 ZB_STENCILREFMASK
7820x4F14 ZB_ZTOP 7770x4F14 ZB_ZTOP
7830x4F18 ZB_ZCACHE_CTLSTAT 7780x4F18 ZB_ZCACHE_CTLSTAT
7790x4F28 ZB_DEPTHCLEARVALUE
7840x4F58 ZB_ZPASS_DATA 7800x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index ef422bbacfc1..911a8fbd32bb 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -164,7 +164,6 @@ rv515 0x6d40
1640x401C GB_SELECT 1640x401C GB_SELECT
1650x4020 GB_AA_CONFIG 1650x4020 GB_AA_CONFIG
1660x4024 GB_FIFO_SIZE 1660x4024 GB_FIFO_SIZE
1670x4028 GB_Z_PEQ_CONFIG
1680x4100 TX_INVALTAGS 1670x4100 TX_INVALTAGS
1690x4114 SU_TEX_WRAP_PS3 1680x4114 SU_TEX_WRAP_PS3
1700x4118 PS3_ENABLE 1690x4118 PS3_ENABLE
@@ -461,9 +460,7 @@ rv515 0x6d40
4610x4DF4 US_ALU_CONST_G_31 4600x4DF4 US_ALU_CONST_G_31
4620x4DF8 US_ALU_CONST_B_31 4610x4DF8 US_ALU_CONST_B_31
4630x4DFC US_ALU_CONST_A_31 4620x4DFC US_ALU_CONST_A_31
4640x4E04 RB3D_BLENDCNTL_R3
4650x4E08 RB3D_ABLENDCNTL_R3 4630x4E08 RB3D_ABLENDCNTL_R3
4660x4E0C RB3D_COLOR_CHANNEL_MASK
4670x4E10 RB3D_CONSTANT_COLOR 4640x4E10 RB3D_CONSTANT_COLOR
4680x4E14 RB3D_COLOR_CLEAR_VALUE 4650x4E14 RB3D_COLOR_CLEAR_VALUE
4690x4E18 RB3D_ROPCNTL_R3 4660x4E18 RB3D_ROPCNTL_R3
@@ -484,9 +481,6 @@ rv515 0x6d40
4840x4E74 RB3D_CMASK_WRINDEX 4810x4E74 RB3D_CMASK_WRINDEX
4850x4E78 RB3D_CMASK_DWORD 4820x4E78 RB3D_CMASK_DWORD
4860x4E7C RB3D_CMASK_RDINDEX 4830x4E7C RB3D_CMASK_RDINDEX
4870x4E80 RB3D_AARESOLVE_OFFSET
4880x4E84 RB3D_AARESOLVE_PITCH
4890x4E88 RB3D_AARESOLVE_CTL
4900x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 4840x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
4910x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 4850x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
4920x4EF8 RB3D_CONSTANT_COLOR_AR 4860x4EF8 RB3D_CONSTANT_COLOR_AR
@@ -496,4 +490,5 @@ rv515 0x6d40
4960x4F14 ZB_ZTOP 4900x4F14 ZB_ZTOP
4970x4F18 ZB_ZCACHE_CTLSTAT 4910x4F18 ZB_ZCACHE_CTLSTAT
4980x4F58 ZB_ZPASS_DATA 4920x4F58 ZB_ZPASS_DATA
4930x4F28 ZB_DEPTHCLEARVALUE
4990x4FD4 ZB_STENCILREFMASK_BF 4940x4FD4 ZB_STENCILREFMASK_BF
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5afe294ed51f..8af4679db23e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -751,7 +751,6 @@ void rs600_mc_init(struct radeon_device *rdev)
751 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 751 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
752 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 752 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
753 rdev->mc.visible_vram_size = rdev->mc.aper_size; 753 rdev->mc.visible_vram_size = rdev->mc.aper_size;
754 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
755 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 754 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
756 base = RREG32_MC(R_000004_MC_FB_LOCATION); 755 base = RREG32_MC(R_000004_MC_FB_LOCATION);
757 base = G_000004_MC_FB_START(base) << 16; 756 base = G_000004_MC_FB_START(base) << 16;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 0137d3e3728d..66c949b7c18c 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -77,9 +77,9 @@ void rs690_pm_info(struct radeon_device *rdev)
77 switch (crev) { 77 switch (crev) {
78 case 1: 78 case 1:
79 tmp.full = dfixed_const(100); 79 tmp.full = dfixed_const(100);
80 rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); 80 rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock));
81 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); 81 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
82 if (info->info.usK8MemoryClock) 82 if (le16_to_cpu(info->info.usK8MemoryClock))
83 rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); 83 rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
84 else if (rdev->clock.default_mclk) { 84 else if (rdev->clock.default_mclk) {
85 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); 85 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
@@ -91,16 +91,16 @@ void rs690_pm_info(struct radeon_device *rdev)
91 break; 91 break;
92 case 2: 92 case 2:
93 tmp.full = dfixed_const(100); 93 tmp.full = dfixed_const(100);
94 rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); 94 rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock));
95 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); 95 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
96 if (info->info_v2.ulBootUpUMAClock) 96 if (le32_to_cpu(info->info_v2.ulBootUpUMAClock))
97 rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); 97 rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock));
98 else if (rdev->clock.default_mclk) 98 else if (rdev->clock.default_mclk)
99 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); 99 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
100 else 100 else
101 rdev->pm.igp_system_mclk.full = dfixed_const(66700); 101 rdev->pm.igp_system_mclk.full = dfixed_const(66700);
102 rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); 102 rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
103 rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); 103 rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq));
104 rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); 104 rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
105 rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); 105 rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
106 break; 106 break;
@@ -157,7 +157,6 @@ void rs690_mc_init(struct radeon_device *rdev)
157 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 157 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
158 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 158 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
159 rdev->mc.visible_vram_size = rdev->mc.aper_size; 159 rdev->mc.visible_vram_size = rdev->mc.aper_size;
160 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
161 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); 160 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
162 base = G_000100_MC_FB_START(base) << 16; 161 base = G_000100_MC_FB_START(base) << 16;
163 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 162 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 2211a323db41..714ad45757d0 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -307,7 +307,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
307 */ 307 */
308void r700_cp_stop(struct radeon_device *rdev) 308void r700_cp_stop(struct radeon_device *rdev)
309{ 309{
310 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 310 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
311 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 311 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
312 WREG32(SCRATCH_UMSK, 0); 312 WREG32(SCRATCH_UMSK, 0);
313} 313}
@@ -321,7 +321,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
321 return -EINVAL; 321 return -EINVAL;
322 322
323 r700_cp_stop(rdev); 323 r700_cp_stop(rdev);
324 WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); 324 WREG32(CP_RB_CNTL,
325#ifdef __BIG_ENDIAN
326 BUF_SWAP_32BIT |
327#endif
328 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
325 329
326 /* Reset cp */ 330 /* Reset cp */
327 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 331 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
@@ -1119,7 +1123,6 @@ int rv770_mc_init(struct radeon_device *rdev)
1119 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1123 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1120 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1124 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1121 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1125 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1122 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1123 r700_vram_gtt_location(rdev, &rdev->mc); 1126 r700_vram_gtt_location(rdev, &rdev->mc);
1124 radeon_update_bandwidth_info(rdev); 1127 radeon_update_bandwidth_info(rdev);
1125 1128
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index abc8cf5a3672..79fa588e9ed5 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -76,10 +76,10 @@
76#define ROQ_IB1_START(x) ((x) << 0) 76#define ROQ_IB1_START(x) ((x) << 0)
77#define ROQ_IB2_START(x) ((x) << 8) 77#define ROQ_IB2_START(x) ((x) << 8)
78#define CP_RB_CNTL 0xC104 78#define CP_RB_CNTL 0xC104
79#define RB_BUFSZ(x) ((x)<<0) 79#define RB_BUFSZ(x) ((x) << 0)
80#define RB_BLKSZ(x) ((x)<<8) 80#define RB_BLKSZ(x) ((x) << 8)
81#define RB_NO_UPDATE (1<<27) 81#define RB_NO_UPDATE (1 << 27)
82#define RB_RPTR_WR_ENA (1<<31) 82#define RB_RPTR_WR_ENA (1 << 31)
83#define BUF_SWAP_32BIT (2 << 16) 83#define BUF_SWAP_32BIT (2 << 16)
84#define CP_RB_RPTR 0x8700 84#define CP_RB_RPTR 0x8700
85#define CP_RB_RPTR_ADDR 0xC10C 85#define CP_RB_RPTR_ADDR 0xC10C