diff options
Diffstat (limited to 'drivers/gpu')
32 files changed, 271 insertions, 151 deletions
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 6977a1ce9d98..f73ef4390db6 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -672,7 +672,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) | |||
672 | struct drm_crtc_helper_funcs *crtc_funcs; | 672 | struct drm_crtc_helper_funcs *crtc_funcs; |
673 | u16 *red, *green, *blue, *transp; | 673 | u16 *red, *green, *blue, *transp; |
674 | struct drm_crtc *crtc; | 674 | struct drm_crtc *crtc; |
675 | int i, rc = 0; | 675 | int i, j, rc = 0; |
676 | int start; | 676 | int start; |
677 | 677 | ||
678 | for (i = 0; i < fb_helper->crtc_count; i++) { | 678 | for (i = 0; i < fb_helper->crtc_count; i++) { |
@@ -685,7 +685,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) | |||
685 | transp = cmap->transp; | 685 | transp = cmap->transp; |
686 | start = cmap->start; | 686 | start = cmap->start; |
687 | 687 | ||
688 | for (i = 0; i < cmap->len; i++) { | 688 | for (j = 0; j < cmap->len; j++) { |
689 | u16 hred, hgreen, hblue, htransp = 0xffff; | 689 | u16 hred, hgreen, hblue, htransp = 0xffff; |
690 | 690 | ||
691 | hred = *red++; | 691 | hred = *red++; |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 3dadfa2a8528..28d1d3c24d65 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -164,8 +164,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) | |||
164 | * available. In that case we can't account for this and just | 164 | * available. In that case we can't account for this and just |
165 | * hope for the best. | 165 | * hope for the best. |
166 | */ | 166 | */ |
167 | if ((vblrc > 0) && (abs(diff_ns) > 1000000)) | 167 | if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { |
168 | atomic_inc(&dev->_vblank_count[crtc]); | 168 | atomic_inc(&dev->_vblank_count[crtc]); |
169 | smp_mb__after_atomic_inc(); | ||
170 | } | ||
169 | 171 | ||
170 | /* Invalidate all timestamps while vblank irq's are off. */ | 172 | /* Invalidate all timestamps while vblank irq's are off. */ |
171 | clear_vblank_timestamps(dev, crtc); | 173 | clear_vblank_timestamps(dev, crtc); |
@@ -491,6 +493,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc) | |||
491 | /* Dot clock in Hz: */ | 493 | /* Dot clock in Hz: */ |
492 | dotclock = (u64) crtc->hwmode.clock * 1000; | 494 | dotclock = (u64) crtc->hwmode.clock * 1000; |
493 | 495 | ||
496 | /* Fields of interlaced scanout modes are only halve a frame duration. | ||
497 | * Double the dotclock to get halve the frame-/line-/pixelduration. | ||
498 | */ | ||
499 | if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE) | ||
500 | dotclock *= 2; | ||
501 | |||
494 | /* Valid dotclock? */ | 502 | /* Valid dotclock? */ |
495 | if (dotclock > 0) { | 503 | if (dotclock > 0) { |
496 | /* Convert scanline length in pixels and video dot clock to | 504 | /* Convert scanline length in pixels and video dot clock to |
@@ -603,14 +611,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | |||
603 | return -EAGAIN; | 611 | return -EAGAIN; |
604 | } | 612 | } |
605 | 613 | ||
606 | /* Don't know yet how to handle interlaced or | ||
607 | * double scan modes. Just no-op for now. | ||
608 | */ | ||
609 | if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) { | ||
610 | DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc); | ||
611 | return -ENOTSUPP; | ||
612 | } | ||
613 | |||
614 | /* Get current scanout position with system timestamp. | 614 | /* Get current scanout position with system timestamp. |
615 | * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times | 615 | * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times |
616 | * if single query takes longer than max_error nanoseconds. | 616 | * if single query takes longer than max_error nanoseconds. |
@@ -858,10 +858,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) | |||
858 | if (rc) { | 858 | if (rc) { |
859 | tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; | 859 | tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; |
860 | vblanktimestamp(dev, crtc, tslot) = t_vblank; | 860 | vblanktimestamp(dev, crtc, tslot) = t_vblank; |
861 | smp_wmb(); | ||
862 | } | 861 | } |
863 | 862 | ||
863 | smp_mb__before_atomic_inc(); | ||
864 | atomic_add(diff, &dev->_vblank_count[crtc]); | 864 | atomic_add(diff, &dev->_vblank_count[crtc]); |
865 | smp_mb__after_atomic_inc(); | ||
865 | } | 866 | } |
866 | 867 | ||
867 | /** | 868 | /** |
@@ -1011,7 +1012,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, | |||
1011 | struct drm_file *file_priv) | 1012 | struct drm_file *file_priv) |
1012 | { | 1013 | { |
1013 | struct drm_modeset_ctl *modeset = data; | 1014 | struct drm_modeset_ctl *modeset = data; |
1014 | int crtc, ret = 0; | 1015 | int ret = 0; |
1016 | unsigned int crtc; | ||
1015 | 1017 | ||
1016 | /* If drm_vblank_init() hasn't been called yet, just no-op */ | 1018 | /* If drm_vblank_init() hasn't been called yet, just no-op */ |
1017 | if (!dev->num_crtcs) | 1019 | if (!dev->num_crtcs) |
@@ -1293,15 +1295,16 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) | |||
1293 | * e.g., due to spurious vblank interrupts. We need to | 1295 | * e.g., due to spurious vblank interrupts. We need to |
1294 | * ignore those for accounting. | 1296 | * ignore those for accounting. |
1295 | */ | 1297 | */ |
1296 | if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { | 1298 | if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { |
1297 | /* Store new timestamp in ringbuffer. */ | 1299 | /* Store new timestamp in ringbuffer. */ |
1298 | vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; | 1300 | vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; |
1299 | smp_wmb(); | ||
1300 | 1301 | ||
1301 | /* Increment cooked vblank count. This also atomically commits | 1302 | /* Increment cooked vblank count. This also atomically commits |
1302 | * the timestamp computed above. | 1303 | * the timestamp computed above. |
1303 | */ | 1304 | */ |
1305 | smp_mb__before_atomic_inc(); | ||
1304 | atomic_inc(&dev->_vblank_count[crtc]); | 1306 | atomic_inc(&dev->_vblank_count[crtc]); |
1307 | smp_mb__after_atomic_inc(); | ||
1305 | } else { | 1308 | } else { |
1306 | DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", | 1309 | DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", |
1307 | crtc, (int) diff_ns); | 1310 | crtc, (int) diff_ns); |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3601466c5502..4ff9b6cc973f 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
865 | int max_freq; | 865 | int max_freq; |
866 | 866 | ||
867 | /* RPSTAT1 is in the GT power well */ | 867 | /* RPSTAT1 is in the GT power well */ |
868 | __gen6_force_wake_get(dev_priv); | 868 | __gen6_gt_force_wake_get(dev_priv); |
869 | 869 | ||
870 | seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); | 870 | seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); |
871 | seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); | 871 | seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); |
@@ -888,7 +888,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
888 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", | 888 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", |
889 | max_freq * 100); | 889 | max_freq * 100); |
890 | 890 | ||
891 | __gen6_force_wake_put(dev_priv); | 891 | __gen6_gt_force_wake_put(dev_priv); |
892 | } else { | 892 | } else { |
893 | seq_printf(m, "no P-state info available\n"); | 893 | seq_printf(m, "no P-state info available\n"); |
894 | } | 894 | } |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 17bd766f2081..e33d9be7df3b 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1895,6 +1895,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1895 | if (IS_GEN2(dev)) | 1895 | if (IS_GEN2(dev)) |
1896 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); | 1896 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); |
1897 | 1897 | ||
1898 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) | ||
1899 | * using 32bit addressing, overwriting memory if HWS is located | ||
1900 | * above 4GB. | ||
1901 | * | ||
1902 | * The documentation also mentions an issue with undefined | ||
1903 | * behaviour if any general state is accessed within a page above 4GB, | ||
1904 | * which also needs to be handled carefully. | ||
1905 | */ | ||
1906 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | ||
1907 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); | ||
1908 | |||
1898 | mmio_bar = IS_GEN2(dev) ? 1 : 0; | 1909 | mmio_bar = IS_GEN2(dev) ? 1 : 0; |
1899 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); | 1910 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); |
1900 | if (!dev_priv->regs) { | 1911 | if (!dev_priv->regs) { |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0ad533f06af9..22ec066adae6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -46,6 +46,9 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); | |||
46 | unsigned int i915_powersave = 1; | 46 | unsigned int i915_powersave = 1; |
47 | module_param_named(powersave, i915_powersave, int, 0600); | 47 | module_param_named(powersave, i915_powersave, int, 0600); |
48 | 48 | ||
49 | unsigned int i915_semaphores = 0; | ||
50 | module_param_named(semaphores, i915_semaphores, int, 0600); | ||
51 | |||
49 | unsigned int i915_enable_rc6 = 0; | 52 | unsigned int i915_enable_rc6 = 0; |
50 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); | 53 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); |
51 | 54 | ||
@@ -254,7 +257,7 @@ void intel_detect_pch (struct drm_device *dev) | |||
254 | } | 257 | } |
255 | } | 258 | } |
256 | 259 | ||
257 | void __gen6_force_wake_get(struct drm_i915_private *dev_priv) | 260 | void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) |
258 | { | 261 | { |
259 | int count; | 262 | int count; |
260 | 263 | ||
@@ -270,12 +273,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv) | |||
270 | udelay(10); | 273 | udelay(10); |
271 | } | 274 | } |
272 | 275 | ||
273 | void __gen6_force_wake_put(struct drm_i915_private *dev_priv) | 276 | void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) |
274 | { | 277 | { |
275 | I915_WRITE_NOTRACE(FORCEWAKE, 0); | 278 | I915_WRITE_NOTRACE(FORCEWAKE, 0); |
276 | POSTING_READ(FORCEWAKE); | 279 | POSTING_READ(FORCEWAKE); |
277 | } | 280 | } |
278 | 281 | ||
282 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | ||
283 | { | ||
284 | int loop = 500; | ||
285 | u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||
286 | while (fifo < 20 && loop--) { | ||
287 | udelay(10); | ||
288 | fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||
289 | } | ||
290 | } | ||
291 | |||
279 | static int i915_drm_freeze(struct drm_device *dev) | 292 | static int i915_drm_freeze(struct drm_device *dev) |
280 | { | 293 | { |
281 | struct drm_i915_private *dev_priv = dev->dev_private; | 294 | struct drm_i915_private *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 65dfe81d0035..456f40484838 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -956,6 +956,7 @@ extern struct drm_ioctl_desc i915_ioctls[]; | |||
956 | extern int i915_max_ioctl; | 956 | extern int i915_max_ioctl; |
957 | extern unsigned int i915_fbpercrtc; | 957 | extern unsigned int i915_fbpercrtc; |
958 | extern unsigned int i915_powersave; | 958 | extern unsigned int i915_powersave; |
959 | extern unsigned int i915_semaphores; | ||
959 | extern unsigned int i915_lvds_downclock; | 960 | extern unsigned int i915_lvds_downclock; |
960 | extern unsigned int i915_panel_use_ssc; | 961 | extern unsigned int i915_panel_use_ssc; |
961 | extern unsigned int i915_enable_rc6; | 962 | extern unsigned int i915_enable_rc6; |
@@ -1177,6 +1178,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
1177 | void i915_gem_free_all_phys_object(struct drm_device *dev); | 1178 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
1178 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); | 1179 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
1179 | 1180 | ||
1181 | uint32_t | ||
1182 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj); | ||
1183 | |||
1180 | /* i915_gem_gtt.c */ | 1184 | /* i915_gem_gtt.c */ |
1181 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); | 1185 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
1182 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); | 1186 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); |
@@ -1353,22 +1357,32 @@ __i915_write(64, q) | |||
1353 | * must be set to prevent GT core from power down and stale values being | 1357 | * must be set to prevent GT core from power down and stale values being |
1354 | * returned. | 1358 | * returned. |
1355 | */ | 1359 | */ |
1356 | void __gen6_force_wake_get(struct drm_i915_private *dev_priv); | 1360 | void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); |
1357 | void __gen6_force_wake_put (struct drm_i915_private *dev_priv); | 1361 | void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); |
1358 | static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) | 1362 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); |
1363 | |||
1364 | static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg) | ||
1359 | { | 1365 | { |
1360 | u32 val; | 1366 | u32 val; |
1361 | 1367 | ||
1362 | if (dev_priv->info->gen >= 6) { | 1368 | if (dev_priv->info->gen >= 6) { |
1363 | __gen6_force_wake_get(dev_priv); | 1369 | __gen6_gt_force_wake_get(dev_priv); |
1364 | val = I915_READ(reg); | 1370 | val = I915_READ(reg); |
1365 | __gen6_force_wake_put(dev_priv); | 1371 | __gen6_gt_force_wake_put(dev_priv); |
1366 | } else | 1372 | } else |
1367 | val = I915_READ(reg); | 1373 | val = I915_READ(reg); |
1368 | 1374 | ||
1369 | return val; | 1375 | return val; |
1370 | } | 1376 | } |
1371 | 1377 | ||
1378 | static inline void i915_gt_write(struct drm_i915_private *dev_priv, | ||
1379 | u32 reg, u32 val) | ||
1380 | { | ||
1381 | if (dev_priv->info->gen >= 6) | ||
1382 | __gen6_gt_wait_for_fifo(dev_priv); | ||
1383 | I915_WRITE(reg, val); | ||
1384 | } | ||
1385 | |||
1372 | static inline void | 1386 | static inline void |
1373 | i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) | 1387 | i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) |
1374 | { | 1388 | { |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index cf4f74c7c6fb..36e66cc5225e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1398,7 +1398,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj) | |||
1398 | * Return the required GTT alignment for an object, only taking into account | 1398 | * Return the required GTT alignment for an object, only taking into account |
1399 | * unfenced tiled surface requirements. | 1399 | * unfenced tiled surface requirements. |
1400 | */ | 1400 | */ |
1401 | static uint32_t | 1401 | uint32_t |
1402 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) | 1402 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) |
1403 | { | 1403 | { |
1404 | struct drm_device *dev = obj->base.dev; | 1404 | struct drm_device *dev = obj->base.dev; |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d2f445e825f2..50ab1614571c 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -772,8 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, | |||
772 | if (from == NULL || to == from) | 772 | if (from == NULL || to == from) |
773 | return 0; | 773 | return 0; |
774 | 774 | ||
775 | /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ | 775 | /* XXX gpu semaphores are implicated in various hard hangs on SNB */ |
776 | if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) | 776 | if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores) |
777 | return i915_gem_object_wait_rendering(obj, true); | 777 | return i915_gem_object_wait_rendering(obj, true); |
778 | 778 | ||
779 | idx = intel_ring_sync_index(from, to); | 779 | idx = intel_ring_sync_index(from, to); |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 22a32b9932c5..d64843e18df2 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -349,14 +349,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
349 | (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && | 349 | (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && |
350 | i915_gem_object_fence_ok(obj, args->tiling_mode)); | 350 | i915_gem_object_fence_ok(obj, args->tiling_mode)); |
351 | 351 | ||
352 | obj->tiling_changed = true; | 352 | /* Rebind if we need a change of alignment */ |
353 | obj->tiling_mode = args->tiling_mode; | 353 | if (!obj->map_and_fenceable) { |
354 | obj->stride = args->stride; | 354 | u32 unfenced_alignment = |
355 | i915_gem_get_unfenced_gtt_alignment(obj); | ||
356 | if (obj->gtt_offset & (unfenced_alignment - 1)) | ||
357 | ret = i915_gem_object_unbind(obj); | ||
358 | } | ||
359 | |||
360 | if (ret == 0) { | ||
361 | obj->tiling_changed = true; | ||
362 | obj->tiling_mode = args->tiling_mode; | ||
363 | obj->stride = args->stride; | ||
364 | } | ||
355 | } | 365 | } |
366 | /* we have to maintain this existing ABI... */ | ||
367 | args->stride = obj->stride; | ||
368 | args->tiling_mode = obj->tiling_mode; | ||
356 | drm_gem_object_unreference(&obj->base); | 369 | drm_gem_object_unreference(&obj->base); |
357 | mutex_unlock(&dev->struct_mutex); | 370 | mutex_unlock(&dev->struct_mutex); |
358 | 371 | ||
359 | return 0; | 372 | return ret; |
360 | } | 373 | } |
361 | 374 | ||
362 | /** | 375 | /** |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 97f946dcc1aa..8a9e08bf1cf7 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
316 | struct drm_mode_config *mode_config = &dev->mode_config; | 316 | struct drm_mode_config *mode_config = &dev->mode_config; |
317 | struct intel_encoder *encoder; | 317 | struct intel_encoder *encoder; |
318 | 318 | ||
319 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); | ||
320 | |||
319 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) | 321 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) |
320 | if (encoder->hot_plug) | 322 | if (encoder->hot_plug) |
321 | encoder->hot_plug(encoder); | 323 | encoder->hot_plug(encoder); |
@@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1649 | } else { | 1651 | } else { |
1650 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1652 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | |
1651 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1653 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; |
1652 | hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; | 1654 | hotplug_mask |= SDE_AUX_MASK; |
1653 | I915_WRITE(FDI_RXA_IMR, 0); | ||
1654 | I915_WRITE(FDI_RXB_IMR, 0); | ||
1655 | } | 1655 | } |
1656 | 1656 | ||
1657 | dev_priv->pch_irq_mask = ~hotplug_mask; | 1657 | dev_priv->pch_irq_mask = ~hotplug_mask; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 15d94c63918c..3e6f486f4605 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -1553,17 +1553,7 @@ | |||
1553 | 1553 | ||
1554 | /* Backlight control */ | 1554 | /* Backlight control */ |
1555 | #define BLC_PWM_CTL 0x61254 | 1555 | #define BLC_PWM_CTL 0x61254 |
1556 | #define BACKLIGHT_MODULATION_FREQ_SHIFT (17) | ||
1557 | #define BLC_PWM_CTL2 0x61250 /* 965+ only */ | 1556 | #define BLC_PWM_CTL2 0x61250 /* 965+ only */ |
1558 | #define BLM_COMBINATION_MODE (1 << 30) | ||
1559 | /* | ||
1560 | * This is the most significant 15 bits of the number of backlight cycles in a | ||
1561 | * complete cycle of the modulated backlight control. | ||
1562 | * | ||
1563 | * The actual value is this field multiplied by two. | ||
1564 | */ | ||
1565 | #define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) | ||
1566 | #define BLM_LEGACY_MODE (1 << 16) | ||
1567 | /* | 1557 | /* |
1568 | * This is the number of cycles out of the backlight modulation cycle for which | 1558 | * This is the number of cycles out of the backlight modulation cycle for which |
1569 | * the backlight is on. | 1559 | * the backlight is on. |
@@ -3271,6 +3261,8 @@ | |||
3271 | #define FORCEWAKE 0xA18C | 3261 | #define FORCEWAKE 0xA18C |
3272 | #define FORCEWAKE_ACK 0x130090 | 3262 | #define FORCEWAKE_ACK 0x130090 |
3273 | 3263 | ||
3264 | #define GT_FIFO_FREE_ENTRIES 0x120008 | ||
3265 | |||
3274 | #define GEN6_RPNSWREQ 0xA008 | 3266 | #define GEN6_RPNSWREQ 0xA008 |
3275 | #define GEN6_TURBO_DISABLE (1<<31) | 3267 | #define GEN6_TURBO_DISABLE (1<<31) |
3276 | #define GEN6_FREQUENCY(x) ((x)<<25) | 3268 | #define GEN6_FREQUENCY(x) ((x)<<25) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3b006536b3d2..49fb54fd9a18 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1219,7 +1219,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) | |||
1219 | u32 blt_ecoskpd; | 1219 | u32 blt_ecoskpd; |
1220 | 1220 | ||
1221 | /* Make sure blitter notifies FBC of writes */ | 1221 | /* Make sure blitter notifies FBC of writes */ |
1222 | __gen6_force_wake_get(dev_priv); | 1222 | __gen6_gt_force_wake_get(dev_priv); |
1223 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); | 1223 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); |
1224 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << | 1224 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << |
1225 | GEN6_BLITTER_LOCK_SHIFT; | 1225 | GEN6_BLITTER_LOCK_SHIFT; |
@@ -1230,7 +1230,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) | |||
1230 | GEN6_BLITTER_LOCK_SHIFT); | 1230 | GEN6_BLITTER_LOCK_SHIFT); |
1231 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | 1231 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); |
1232 | POSTING_READ(GEN6_BLITTER_ECOSKPD); | 1232 | POSTING_READ(GEN6_BLITTER_ECOSKPD); |
1233 | __gen6_force_wake_put(dev_priv); | 1233 | __gen6_gt_force_wake_put(dev_priv); |
1234 | } | 1234 | } |
1235 | 1235 | ||
1236 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 1236 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
@@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1630 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; | 1630 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; |
1631 | 1631 | ||
1632 | wait_event(dev_priv->pending_flip_queue, | 1632 | wait_event(dev_priv->pending_flip_queue, |
1633 | atomic_read(&dev_priv->mm.wedged) || | ||
1633 | atomic_read(&obj->pending_flip) == 0); | 1634 | atomic_read(&obj->pending_flip) == 0); |
1634 | 1635 | ||
1635 | /* Big Hammer, we also need to ensure that any pending | 1636 | /* Big Hammer, we also need to ensure that any pending |
1636 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the | 1637 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the |
1637 | * current scanout is retired before unpinning the old | 1638 | * current scanout is retired before unpinning the old |
1638 | * framebuffer. | 1639 | * framebuffer. |
1640 | * | ||
1641 | * This should only fail upon a hung GPU, in which case we | ||
1642 | * can safely continue. | ||
1639 | */ | 1643 | */ |
1640 | ret = i915_gem_object_flush_gpu(obj, false); | 1644 | ret = i915_gem_object_flush_gpu(obj, false); |
1641 | if (ret) { | 1645 | (void) ret; |
1642 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | ||
1643 | mutex_unlock(&dev->struct_mutex); | ||
1644 | return ret; | ||
1645 | } | ||
1646 | } | 1646 | } |
1647 | 1647 | ||
1648 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, | 1648 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, |
@@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | |||
2045 | atomic_read(&obj->pending_flip) == 0); | 2045 | atomic_read(&obj->pending_flip) == 0); |
2046 | } | 2046 | } |
2047 | 2047 | ||
2048 | static bool intel_crtc_driving_pch(struct drm_crtc *crtc) | ||
2049 | { | ||
2050 | struct drm_device *dev = crtc->dev; | ||
2051 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
2052 | struct intel_encoder *encoder; | ||
2053 | |||
2054 | /* | ||
2055 | * If there's a non-PCH eDP on this crtc, it must be DP_A, and that | ||
2056 | * must be driven by its own crtc; no sharing is possible. | ||
2057 | */ | ||
2058 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | ||
2059 | if (encoder->base.crtc != crtc) | ||
2060 | continue; | ||
2061 | |||
2062 | switch (encoder->type) { | ||
2063 | case INTEL_OUTPUT_EDP: | ||
2064 | if (!intel_encoder_is_pch_edp(&encoder->base)) | ||
2065 | return false; | ||
2066 | continue; | ||
2067 | } | ||
2068 | } | ||
2069 | |||
2070 | return true; | ||
2071 | } | ||
2072 | |||
2048 | static void ironlake_crtc_enable(struct drm_crtc *crtc) | 2073 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
2049 | { | 2074 | { |
2050 | struct drm_device *dev = crtc->dev; | 2075 | struct drm_device *dev = crtc->dev; |
@@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2053 | int pipe = intel_crtc->pipe; | 2078 | int pipe = intel_crtc->pipe; |
2054 | int plane = intel_crtc->plane; | 2079 | int plane = intel_crtc->plane; |
2055 | u32 reg, temp; | 2080 | u32 reg, temp; |
2081 | bool is_pch_port = false; | ||
2056 | 2082 | ||
2057 | if (intel_crtc->active) | 2083 | if (intel_crtc->active) |
2058 | return; | 2084 | return; |
@@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2066 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); | 2092 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); |
2067 | } | 2093 | } |
2068 | 2094 | ||
2069 | ironlake_fdi_enable(crtc); | 2095 | is_pch_port = intel_crtc_driving_pch(crtc); |
2096 | |||
2097 | if (is_pch_port) | ||
2098 | ironlake_fdi_enable(crtc); | ||
2099 | else { | ||
2100 | /* disable CPU FDI tx and PCH FDI rx */ | ||
2101 | reg = FDI_TX_CTL(pipe); | ||
2102 | temp = I915_READ(reg); | ||
2103 | I915_WRITE(reg, temp & ~FDI_TX_ENABLE); | ||
2104 | POSTING_READ(reg); | ||
2105 | |||
2106 | reg = FDI_RX_CTL(pipe); | ||
2107 | temp = I915_READ(reg); | ||
2108 | temp &= ~(0x7 << 16); | ||
2109 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; | ||
2110 | I915_WRITE(reg, temp & ~FDI_RX_ENABLE); | ||
2111 | |||
2112 | POSTING_READ(reg); | ||
2113 | udelay(100); | ||
2114 | |||
2115 | /* Ironlake workaround, disable clock pointer after downing FDI */ | ||
2116 | if (HAS_PCH_IBX(dev)) | ||
2117 | I915_WRITE(FDI_RX_CHICKEN(pipe), | ||
2118 | I915_READ(FDI_RX_CHICKEN(pipe) & | ||
2119 | ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); | ||
2120 | |||
2121 | /* still set train pattern 1 */ | ||
2122 | reg = FDI_TX_CTL(pipe); | ||
2123 | temp = I915_READ(reg); | ||
2124 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2125 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2126 | I915_WRITE(reg, temp); | ||
2127 | |||
2128 | reg = FDI_RX_CTL(pipe); | ||
2129 | temp = I915_READ(reg); | ||
2130 | if (HAS_PCH_CPT(dev)) { | ||
2131 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
2132 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | ||
2133 | } else { | ||
2134 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2135 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2136 | } | ||
2137 | /* BPC in FDI rx is consistent with that in PIPECONF */ | ||
2138 | temp &= ~(0x07 << 16); | ||
2139 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; | ||
2140 | I915_WRITE(reg, temp); | ||
2141 | |||
2142 | POSTING_READ(reg); | ||
2143 | udelay(100); | ||
2144 | } | ||
2070 | 2145 | ||
2071 | /* Enable panel fitting for LVDS */ | 2146 | /* Enable panel fitting for LVDS */ |
2072 | if (dev_priv->pch_pf_size && | 2147 | if (dev_priv->pch_pf_size && |
@@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2100 | intel_flush_display_plane(dev, plane); | 2175 | intel_flush_display_plane(dev, plane); |
2101 | } | 2176 | } |
2102 | 2177 | ||
2178 | /* Skip the PCH stuff if possible */ | ||
2179 | if (!is_pch_port) | ||
2180 | goto done; | ||
2181 | |||
2103 | /* For PCH output, training FDI link */ | 2182 | /* For PCH output, training FDI link */ |
2104 | if (IS_GEN6(dev)) | 2183 | if (IS_GEN6(dev)) |
2105 | gen6_fdi_link_train(crtc); | 2184 | gen6_fdi_link_train(crtc); |
@@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2184 | I915_WRITE(reg, temp | TRANS_ENABLE); | 2263 | I915_WRITE(reg, temp | TRANS_ENABLE); |
2185 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) | 2264 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) |
2186 | DRM_ERROR("failed to enable transcoder %d\n", pipe); | 2265 | DRM_ERROR("failed to enable transcoder %d\n", pipe); |
2187 | 2266 | done: | |
2188 | intel_crtc_load_lut(crtc); | 2267 | intel_crtc_load_lut(crtc); |
2189 | intel_update_fbc(dev); | 2268 | intel_update_fbc(dev); |
2190 | intel_crtc_update_cursor(crtc, true); | 2269 | intel_crtc_update_cursor(crtc, true); |
@@ -6203,7 +6282,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
6203 | * userspace... | 6282 | * userspace... |
6204 | */ | 6283 | */ |
6205 | I915_WRITE(GEN6_RC_STATE, 0); | 6284 | I915_WRITE(GEN6_RC_STATE, 0); |
6206 | __gen6_force_wake_get(dev_priv); | 6285 | __gen6_gt_force_wake_get(dev_priv); |
6207 | 6286 | ||
6208 | /* disable the counters and set deterministic thresholds */ | 6287 | /* disable the counters and set deterministic thresholds */ |
6209 | I915_WRITE(GEN6_RC_CONTROL, 0); | 6288 | I915_WRITE(GEN6_RC_CONTROL, 0); |
@@ -6301,7 +6380,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
6301 | /* enable all PM interrupts */ | 6380 | /* enable all PM interrupts */ |
6302 | I915_WRITE(GEN6_PMINTRMSK, 0); | 6381 | I915_WRITE(GEN6_PMINTRMSK, 0); |
6303 | 6382 | ||
6304 | __gen6_force_wake_put(dev_priv); | 6383 | __gen6_gt_force_wake_put(dev_priv); |
6305 | } | 6384 | } |
6306 | 6385 | ||
6307 | void intel_enable_clock_gating(struct drm_device *dev) | 6386 | void intel_enable_clock_gating(struct drm_device *dev) |
@@ -6496,7 +6575,7 @@ static void ironlake_disable_rc6(struct drm_device *dev) | |||
6496 | POSTING_READ(RSTDBYCTL); | 6575 | POSTING_READ(RSTDBYCTL); |
6497 | } | 6576 | } |
6498 | 6577 | ||
6499 | ironlake_disable_rc6(dev); | 6578 | ironlake_teardown_rc6(dev); |
6500 | } | 6579 | } |
6501 | 6580 | ||
6502 | static int ironlake_setup_rc6(struct drm_device *dev) | 6581 | static int ironlake_setup_rc6(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index c65992df458d..d860abeda70f 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -30,8 +30,6 @@ | |||
30 | 30 | ||
31 | #include "intel_drv.h" | 31 | #include "intel_drv.h" |
32 | 32 | ||
33 | #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ | ||
34 | |||
35 | void | 33 | void |
36 | intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, | 34 | intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, |
37 | struct drm_display_mode *adjusted_mode) | 35 | struct drm_display_mode *adjusted_mode) |
@@ -112,19 +110,6 @@ done: | |||
112 | dev_priv->pch_pf_size = (width << 16) | height; | 110 | dev_priv->pch_pf_size = (width << 16) | height; |
113 | } | 111 | } |
114 | 112 | ||
115 | static int is_backlight_combination_mode(struct drm_device *dev) | ||
116 | { | ||
117 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
118 | |||
119 | if (INTEL_INFO(dev)->gen >= 4) | ||
120 | return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; | ||
121 | |||
122 | if (IS_GEN2(dev)) | ||
123 | return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE; | ||
124 | |||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) | 113 | static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) |
129 | { | 114 | { |
130 | u32 val; | 115 | u32 val; |
@@ -181,9 +166,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev) | |||
181 | if (INTEL_INFO(dev)->gen < 4) | 166 | if (INTEL_INFO(dev)->gen < 4) |
182 | max &= ~1; | 167 | max &= ~1; |
183 | } | 168 | } |
184 | |||
185 | if (is_backlight_combination_mode(dev)) | ||
186 | max *= 0xff; | ||
187 | } | 169 | } |
188 | 170 | ||
189 | DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); | 171 | DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); |
@@ -201,15 +183,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev) | |||
201 | val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; | 183 | val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; |
202 | if (IS_PINEVIEW(dev)) | 184 | if (IS_PINEVIEW(dev)) |
203 | val >>= 1; | 185 | val >>= 1; |
204 | |||
205 | if (is_backlight_combination_mode(dev)){ | ||
206 | u8 lbpc; | ||
207 | |||
208 | val &= ~1; | ||
209 | pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); | ||
210 | val *= lbpc; | ||
211 | val >>= 1; | ||
212 | } | ||
213 | } | 186 | } |
214 | 187 | ||
215 | DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); | 188 | DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); |
@@ -232,16 +205,6 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level) | |||
232 | 205 | ||
233 | if (HAS_PCH_SPLIT(dev)) | 206 | if (HAS_PCH_SPLIT(dev)) |
234 | return intel_pch_panel_set_backlight(dev, level); | 207 | return intel_pch_panel_set_backlight(dev, level); |
235 | |||
236 | if (is_backlight_combination_mode(dev)){ | ||
237 | u32 max = intel_panel_get_max_backlight(dev); | ||
238 | u8 lpbc; | ||
239 | |||
240 | lpbc = level * 0xfe / max + 1; | ||
241 | level /= lpbc; | ||
242 | pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc); | ||
243 | } | ||
244 | |||
245 | tmp = I915_READ(BLC_PWM_CTL); | 208 | tmp = I915_READ(BLC_PWM_CTL); |
246 | if (IS_PINEVIEW(dev)) { | 209 | if (IS_PINEVIEW(dev)) { |
247 | tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); | 210 | tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 6d6fde85a636..34306865a5df 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -14,22 +14,23 @@ struct intel_hw_status_page { | |||
14 | struct drm_i915_gem_object *obj; | 14 | struct drm_i915_gem_object *obj; |
15 | }; | 15 | }; |
16 | 16 | ||
17 | #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) | 17 | #define I915_RING_READ(reg) i915_gt_read(dev_priv, reg) |
18 | #define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val) | ||
18 | 19 | ||
19 | #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) | 20 | #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) |
20 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) | 21 | #define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val) |
21 | 22 | ||
22 | #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) | 23 | #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) |
23 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) | 24 | #define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val) |
24 | 25 | ||
25 | #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) | 26 | #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) |
26 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) | 27 | #define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val) |
27 | 28 | ||
28 | #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) | 29 | #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) |
29 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) | 30 | #define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val) |
30 | 31 | ||
31 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) | ||
32 | #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) | 32 | #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) |
33 | #define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val) | ||
33 | 34 | ||
34 | #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) | 35 | #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) |
35 | #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) | 36 | #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 49e5e99917e2..6bdab891c64e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -6228,7 +6228,7 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb, | |||
6228 | entry->tvconf.has_component_output = false; | 6228 | entry->tvconf.has_component_output = false; |
6229 | break; | 6229 | break; |
6230 | case OUTPUT_LVDS: | 6230 | case OUTPUT_LVDS: |
6231 | if ((conn & 0x00003f00) != 0x10) | 6231 | if ((conn & 0x00003f00) >> 8 != 0x10) |
6232 | entry->lvdsconf.use_straps_for_mode = true; | 6232 | entry->lvdsconf.use_straps_for_mode = true; |
6233 | entry->lvdsconf.use_power_scripts = true; | 6233 | entry->lvdsconf.use_power_scripts = true; |
6234 | break; | 6234 | break; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index a7fae26f4654..a52184007f5f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |||
49 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | 49 | DRM_ERROR("bo %p still attached to GEM object\n", bo); |
50 | 50 | ||
51 | nv10_mem_put_tile_region(dev, nvbo->tile, NULL); | 51 | nv10_mem_put_tile_region(dev, nvbo->tile, NULL); |
52 | nouveau_vm_put(&nvbo->vma); | 52 | if (nvbo->vma.node) { |
53 | nouveau_vm_unmap(&nvbo->vma); | ||
54 | nouveau_vm_put(&nvbo->vma); | ||
55 | } | ||
53 | kfree(nvbo); | 56 | kfree(nvbo); |
54 | } | 57 | } |
55 | 58 | ||
@@ -128,6 +131,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
128 | } | 131 | } |
129 | } | 132 | } |
130 | 133 | ||
134 | nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; | ||
131 | nouveau_bo_placement_set(nvbo, flags, 0); | 135 | nouveau_bo_placement_set(nvbo, flags, 0); |
132 | 136 | ||
133 | nvbo->channel = chan; | 137 | nvbo->channel = chan; |
@@ -166,17 +170,17 @@ static void | |||
166 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) | 170 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) |
167 | { | 171 | { |
168 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | 172 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); |
173 | int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; | ||
169 | 174 | ||
170 | if (dev_priv->card_type == NV_10 && | 175 | if (dev_priv->card_type == NV_10 && |
171 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) { | 176 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && |
177 | nvbo->bo.mem.num_pages < vram_pages / 2) { | ||
172 | /* | 178 | /* |
173 | * Make sure that the color and depth buffers are handled | 179 | * Make sure that the color and depth buffers are handled |
174 | * by independent memory controller units. Up to a 9x | 180 | * by independent memory controller units. Up to a 9x |
175 | * speed up when alpha-blending and depth-test are enabled | 181 | * speed up when alpha-blending and depth-test are enabled |
176 | * at the same time. | 182 | * at the same time. |
177 | */ | 183 | */ |
178 | int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; | ||
179 | |||
180 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { | 184 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { |
181 | nvbo->placement.fpfn = vram_pages / 2; | 185 | nvbo->placement.fpfn = vram_pages / 2; |
182 | nvbo->placement.lpfn = ~0; | 186 | nvbo->placement.lpfn = ~0; |
@@ -785,7 +789,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
785 | if (ret) | 789 | if (ret) |
786 | goto out; | 790 | goto out; |
787 | 791 | ||
788 | ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); | 792 | ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); |
789 | out: | 793 | out: |
790 | ttm_bo_mem_put(bo, &tmp_mem); | 794 | ttm_bo_mem_put(bo, &tmp_mem); |
791 | return ret; | 795 | return ret; |
@@ -811,11 +815,11 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
811 | if (ret) | 815 | if (ret) |
812 | return ret; | 816 | return ret; |
813 | 817 | ||
814 | ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem); | 818 | ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); |
815 | if (ret) | 819 | if (ret) |
816 | goto out; | 820 | goto out; |
817 | 821 | ||
818 | ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); | 822 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem); |
819 | if (ret) | 823 | if (ret) |
820 | goto out; | 824 | goto out; |
821 | 825 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index a21e00076839..390d82c3c4b0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -507,6 +507,7 @@ nouveau_connector_native_mode(struct drm_connector *connector) | |||
507 | int high_w = 0, high_h = 0, high_v = 0; | 507 | int high_w = 0, high_h = 0, high_v = 0; |
508 | 508 | ||
509 | list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { | 509 | list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { |
510 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
510 | if (helper->mode_valid(connector, mode) != MODE_OK || | 511 | if (helper->mode_valid(connector, mode) != MODE_OK || |
511 | (mode->flags & DRM_MODE_FLAG_INTERLACE)) | 512 | (mode->flags & DRM_MODE_FLAG_INTERLACE)) |
512 | continue; | 513 | continue; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 65699bfaaaea..b368ed74aad7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c | |||
@@ -83,7 +83,8 @@ nouveau_dma_init(struct nouveau_channel *chan) | |||
83 | return ret; | 83 | return ret; |
84 | 84 | ||
85 | /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ | 85 | /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ |
86 | ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); | 86 | ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000, |
87 | &chan->m2mf_ntfy); | ||
87 | if (ret) | 88 | if (ret) |
88 | return ret; | 89 | return ret; |
89 | 90 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 9821fcacc3d2..982d70b12722 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -852,7 +852,8 @@ extern const struct ttm_mem_type_manager_func nouveau_vram_manager; | |||
852 | extern int nouveau_notifier_init_channel(struct nouveau_channel *); | 852 | extern int nouveau_notifier_init_channel(struct nouveau_channel *); |
853 | extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); | 853 | extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); |
854 | extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, | 854 | extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, |
855 | int cout, uint32_t *offset); | 855 | int cout, uint32_t start, uint32_t end, |
856 | uint32_t *offset); | ||
856 | extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); | 857 | extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); |
857 | extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, | 858 | extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, |
858 | struct drm_file *); | 859 | struct drm_file *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 26347b7cd872..b0fb9bdcddb7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -725,8 +725,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, | |||
725 | ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, | 725 | ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, |
726 | mem->page_alignment << PAGE_SHIFT, size_nc, | 726 | mem->page_alignment << PAGE_SHIFT, size_nc, |
727 | (nvbo->tile_flags >> 8) & 0xff, &node); | 727 | (nvbo->tile_flags >> 8) & 0xff, &node); |
728 | if (ret) | 728 | if (ret) { |
729 | return ret; | 729 | mem->mm_node = NULL; |
730 | return (ret == -ENOSPC) ? 0 : ret; | ||
731 | } | ||
730 | 732 | ||
731 | node->page_shift = 12; | 733 | node->page_shift = 12; |
732 | if (nvbo->vma.node) | 734 | if (nvbo->vma.node) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c index 8844b50c3e54..7609756b6faf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mm.c +++ b/drivers/gpu/drm/nouveau/nouveau_mm.c | |||
@@ -123,7 +123,7 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, | |||
123 | return 0; | 123 | return 0; |
124 | } | 124 | } |
125 | 125 | ||
126 | return -ENOMEM; | 126 | return -ENOSPC; |
127 | } | 127 | } |
128 | 128 | ||
129 | int | 129 | int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index fe29d604b820..5ea167623a82 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
@@ -96,7 +96,8 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev, | |||
96 | 96 | ||
97 | int | 97 | int |
98 | nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | 98 | nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, |
99 | int size, uint32_t *b_offset) | 99 | int size, uint32_t start, uint32_t end, |
100 | uint32_t *b_offset) | ||
100 | { | 101 | { |
101 | struct drm_device *dev = chan->dev; | 102 | struct drm_device *dev = chan->dev; |
102 | struct nouveau_gpuobj *nobj = NULL; | 103 | struct nouveau_gpuobj *nobj = NULL; |
@@ -104,9 +105,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | |||
104 | uint32_t offset; | 105 | uint32_t offset; |
105 | int target, ret; | 106 | int target, ret; |
106 | 107 | ||
107 | mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0); | 108 | mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0, |
109 | start, end, 0); | ||
108 | if (mem) | 110 | if (mem) |
109 | mem = drm_mm_get_block(mem, size, 0); | 111 | mem = drm_mm_get_block_range(mem, size, 0, start, end); |
110 | if (!mem) { | 112 | if (!mem) { |
111 | NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); | 113 | NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); |
112 | return -ENOMEM; | 114 | return -ENOMEM; |
@@ -177,7 +179,8 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, | |||
177 | if (IS_ERR(chan)) | 179 | if (IS_ERR(chan)) |
178 | return PTR_ERR(chan); | 180 | return PTR_ERR(chan); |
179 | 181 | ||
180 | ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); | 182 | ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000, |
183 | &na->offset); | ||
181 | nouveau_channel_put(&chan); | 184 | nouveau_channel_put(&chan); |
182 | return ret; | 185 | return ret; |
183 | } | 186 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index f05c0cddfeca..4399e2f34db4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c | |||
@@ -543,7 +543,7 @@ nouveau_pm_resume(struct drm_device *dev) | |||
543 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 543 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
544 | struct nouveau_pm_level *perflvl; | 544 | struct nouveau_pm_level *perflvl; |
545 | 545 | ||
546 | if (pm->cur == &pm->boot) | 546 | if (!pm->cur || pm->cur == &pm->boot) |
547 | return; | 547 | return; |
548 | 548 | ||
549 | perflvl = pm->cur; | 549 | perflvl = pm->cur; |
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index ef23550407b5..c82db37d9f41 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c | |||
@@ -342,8 +342,8 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, | |||
342 | if (nv_encoder->dcb->type == OUTPUT_LVDS) { | 342 | if (nv_encoder->dcb->type == OUTPUT_LVDS) { |
343 | bool duallink, dummy; | 343 | bool duallink, dummy; |
344 | 344 | ||
345 | nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode-> | 345 | nouveau_bios_parse_lvds_table(dev, output_mode->clock, |
346 | clock, &duallink, &dummy); | 346 | &duallink, &dummy); |
347 | if (duallink) | 347 | if (duallink) |
348 | regp->fp_control |= (8 << 28); | 348 | regp->fp_control |= (8 << 28); |
349 | } else | 349 | } else |
@@ -518,8 +518,6 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
518 | return; | 518 | return; |
519 | 519 | ||
520 | if (nv_encoder->dcb->lvdsconf.use_power_scripts) { | 520 | if (nv_encoder->dcb->lvdsconf.use_power_scripts) { |
521 | struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); | ||
522 | |||
523 | /* when removing an output, crtc may not be set, but PANEL_OFF | 521 | /* when removing an output, crtc may not be set, but PANEL_OFF |
524 | * must still be run | 522 | * must still be run |
525 | */ | 523 | */ |
@@ -527,12 +525,8 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
527 | nv04_dfp_get_bound_head(dev, nv_encoder->dcb); | 525 | nv04_dfp_get_bound_head(dev, nv_encoder->dcb); |
528 | 526 | ||
529 | if (mode == DRM_MODE_DPMS_ON) { | 527 | if (mode == DRM_MODE_DPMS_ON) { |
530 | if (!nv_connector->native_mode) { | ||
531 | NV_ERROR(dev, "Not turning on LVDS without native mode\n"); | ||
532 | return; | ||
533 | } | ||
534 | call_lvds_script(dev, nv_encoder->dcb, head, | 528 | call_lvds_script(dev, nv_encoder->dcb, head, |
535 | LVDS_PANEL_ON, nv_connector->native_mode->clock); | 529 | LVDS_PANEL_ON, nv_encoder->mode.clock); |
536 | } else | 530 | } else |
537 | /* pxclk of 0 is fine for PANEL_OFF, and for a | 531 | /* pxclk of 0 is fine for PANEL_OFF, and for a |
538 | * disconnected LVDS encoder there is no native_mode | 532 | * disconnected LVDS encoder there is no native_mode |
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 8870d72388c8..18d30c2c1aa6 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c | |||
@@ -211,18 +211,32 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i) | |||
211 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | 211 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; |
212 | 212 | ||
213 | switch (dev_priv->chipset) { | 213 | switch (dev_priv->chipset) { |
214 | case 0x40: | ||
215 | case 0x41: /* guess */ | ||
216 | case 0x42: | ||
217 | case 0x43: | ||
218 | case 0x45: /* guess */ | ||
219 | case 0x4e: | ||
220 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); | ||
221 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); | ||
222 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); | ||
223 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); | ||
224 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); | ||
225 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); | ||
226 | break; | ||
214 | case 0x44: | 227 | case 0x44: |
215 | case 0x4a: | 228 | case 0x4a: |
216 | case 0x4e: | ||
217 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); | 229 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); |
218 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); | 230 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); |
219 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); | 231 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); |
220 | break; | 232 | break; |
221 | |||
222 | case 0x46: | 233 | case 0x46: |
223 | case 0x47: | 234 | case 0x47: |
224 | case 0x49: | 235 | case 0x49: |
225 | case 0x4b: | 236 | case 0x4b: |
237 | case 0x4c: | ||
238 | case 0x67: | ||
239 | default: | ||
226 | nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); | 240 | nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); |
227 | nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); | 241 | nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); |
228 | nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); | 242 | nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); |
@@ -230,15 +244,6 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i) | |||
230 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); | 244 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); |
231 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); | 245 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); |
232 | break; | 246 | break; |
233 | |||
234 | default: | ||
235 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); | ||
236 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); | ||
237 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); | ||
238 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); | ||
239 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); | ||
240 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); | ||
241 | break; | ||
242 | } | 247 | } |
243 | } | 248 | } |
244 | 249 | ||
@@ -396,17 +401,20 @@ nv40_graph_init(struct drm_device *dev) | |||
396 | break; | 401 | break; |
397 | default: | 402 | default: |
398 | switch (dev_priv->chipset) { | 403 | switch (dev_priv->chipset) { |
399 | case 0x46: | 404 | case 0x41: |
400 | case 0x47: | 405 | case 0x42: |
401 | case 0x49: | 406 | case 0x43: |
402 | case 0x4b: | 407 | case 0x45: |
403 | nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0)); | 408 | case 0x4e: |
404 | nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1)); | 409 | case 0x44: |
405 | break; | 410 | case 0x4a: |
406 | default: | ||
407 | nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0)); | 411 | nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0)); |
408 | nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1)); | 412 | nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1)); |
409 | break; | 413 | break; |
414 | default: | ||
415 | nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0)); | ||
416 | nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1)); | ||
417 | break; | ||
410 | } | 418 | } |
411 | nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0)); | 419 | nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0)); |
412 | nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1)); | 420 | nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1)); |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index ea0041810ae3..e57caa2a00e3 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -403,16 +403,24 @@ nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj) | |||
403 | void | 403 | void |
404 | nv50_instmem_flush(struct drm_device *dev) | 404 | nv50_instmem_flush(struct drm_device *dev) |
405 | { | 405 | { |
406 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
407 | |||
408 | spin_lock(&dev_priv->ramin_lock); | ||
406 | nv_wr32(dev, 0x00330c, 0x00000001); | 409 | nv_wr32(dev, 0x00330c, 0x00000001); |
407 | if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) | 410 | if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) |
408 | NV_ERROR(dev, "PRAMIN flush timeout\n"); | 411 | NV_ERROR(dev, "PRAMIN flush timeout\n"); |
412 | spin_unlock(&dev_priv->ramin_lock); | ||
409 | } | 413 | } |
410 | 414 | ||
411 | void | 415 | void |
412 | nv84_instmem_flush(struct drm_device *dev) | 416 | nv84_instmem_flush(struct drm_device *dev) |
413 | { | 417 | { |
418 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
419 | |||
420 | spin_lock(&dev_priv->ramin_lock); | ||
414 | nv_wr32(dev, 0x070000, 0x00000001); | 421 | nv_wr32(dev, 0x070000, 0x00000001); |
415 | if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) | 422 | if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) |
416 | NV_ERROR(dev, "PRAMIN flush timeout\n"); | 423 | NV_ERROR(dev, "PRAMIN flush timeout\n"); |
424 | spin_unlock(&dev_priv->ramin_lock); | ||
417 | } | 425 | } |
418 | 426 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c index 459ff08241e5..6144156f255a 100644 --- a/drivers/gpu/drm/nouveau/nv50_vm.c +++ b/drivers/gpu/drm/nouveau/nv50_vm.c | |||
@@ -169,7 +169,11 @@ nv50_vm_flush(struct nouveau_vm *vm) | |||
169 | void | 169 | void |
170 | nv50_vm_flush_engine(struct drm_device *dev, int engine) | 170 | nv50_vm_flush_engine(struct drm_device *dev, int engine) |
171 | { | 171 | { |
172 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
173 | |||
174 | spin_lock(&dev_priv->ramin_lock); | ||
172 | nv_wr32(dev, 0x100c80, (engine << 16) | 1); | 175 | nv_wr32(dev, 0x100c80, (engine << 16) | 1); |
173 | if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) | 176 | if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) |
174 | NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); | 177 | NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); |
178 | spin_unlock(&dev_priv->ramin_lock); | ||
175 | } | 179 | } |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 095bc507fb16..a4e5e53e0a62 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -557,9 +557,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
557 | 557 | ||
558 | /* use recommended ref_div for ss */ | 558 | /* use recommended ref_div for ss */ |
559 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 559 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
560 | pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; | ||
561 | if (ss_enabled) { | 560 | if (ss_enabled) { |
562 | if (ss->refdiv) { | 561 | if (ss->refdiv) { |
562 | pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; | ||
563 | pll->flags |= RADEON_PLL_USE_REF_DIV; | 563 | pll->flags |= RADEON_PLL_USE_REF_DIV; |
564 | pll->reference_div = ss->refdiv; | 564 | pll->reference_div = ss->refdiv; |
565 | if (ASIC_IS_AVIVO(rdev)) | 565 | if (ASIC_IS_AVIVO(rdev)) |
@@ -662,10 +662,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
662 | index, (uint32_t *)&args); | 662 | index, (uint32_t *)&args); |
663 | adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; | 663 | adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; |
664 | if (args.v3.sOutput.ucRefDiv) { | 664 | if (args.v3.sOutput.ucRefDiv) { |
665 | pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; | ||
665 | pll->flags |= RADEON_PLL_USE_REF_DIV; | 666 | pll->flags |= RADEON_PLL_USE_REF_DIV; |
666 | pll->reference_div = args.v3.sOutput.ucRefDiv; | 667 | pll->reference_div = args.v3.sOutput.ucRefDiv; |
667 | } | 668 | } |
668 | if (args.v3.sOutput.ucPostDiv) { | 669 | if (args.v3.sOutput.ucPostDiv) { |
670 | pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; | ||
669 | pll->flags |= RADEON_PLL_USE_POST_DIV; | 671 | pll->flags |= RADEON_PLL_USE_POST_DIV; |
670 | pll->post_div = args.v3.sOutput.ucPostDiv; | 672 | pll->post_div = args.v3.sOutput.ucPostDiv; |
671 | } | 673 | } |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 56deae5bf02e..93fa735c8c1a 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -3490,7 +3490,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track | |||
3490 | track->num_texture = 16; | 3490 | track->num_texture = 16; |
3491 | track->maxy = 4096; | 3491 | track->maxy = 4096; |
3492 | track->separate_cube = 0; | 3492 | track->separate_cube = 0; |
3493 | track->aaresolve = true; | 3493 | track->aaresolve = false; |
3494 | track->aa.robj = NULL; | 3494 | track->aa.robj = NULL; |
3495 | } | 3495 | } |
3496 | 3496 | ||
@@ -3801,8 +3801,6 @@ static int r100_startup(struct radeon_device *rdev) | |||
3801 | r100_mc_program(rdev); | 3801 | r100_mc_program(rdev); |
3802 | /* Resume clock */ | 3802 | /* Resume clock */ |
3803 | r100_clock_startup(rdev); | 3803 | r100_clock_startup(rdev); |
3804 | /* Initialize GPU configuration (# pipes, ...) */ | ||
3805 | // r100_gpu_init(rdev); | ||
3806 | /* Initialize GART (initialize after TTM so we can allocate | 3804 | /* Initialize GART (initialize after TTM so we can allocate |
3807 | * memory through TTM but finalize after TTM) */ | 3805 | * memory through TTM but finalize after TTM) */ |
3808 | r100_enable_bm(rdev); | 3806 | r100_enable_bm(rdev); |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 768c60ee4ab6..069efa8c8ecf 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -910,6 +910,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
910 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | 910 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
911 | break; | 911 | break; |
912 | case R300_TX_FORMAT_X16: | 912 | case R300_TX_FORMAT_X16: |
913 | case R300_TX_FORMAT_FL_I16: | ||
913 | case R300_TX_FORMAT_Y8X8: | 914 | case R300_TX_FORMAT_Y8X8: |
914 | case R300_TX_FORMAT_Z5Y6X5: | 915 | case R300_TX_FORMAT_Z5Y6X5: |
915 | case R300_TX_FORMAT_Z6Y5X5: | 916 | case R300_TX_FORMAT_Z6Y5X5: |
@@ -922,6 +923,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
922 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | 923 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
923 | break; | 924 | break; |
924 | case R300_TX_FORMAT_Y16X16: | 925 | case R300_TX_FORMAT_Y16X16: |
926 | case R300_TX_FORMAT_FL_I16A16: | ||
925 | case R300_TX_FORMAT_Z11Y11X10: | 927 | case R300_TX_FORMAT_Z11Y11X10: |
926 | case R300_TX_FORMAT_Z10Y11X11: | 928 | case R300_TX_FORMAT_Z10Y11X11: |
927 | case R300_TX_FORMAT_W8Z8Y8X8: | 929 | case R300_TX_FORMAT_W8Z8Y8X8: |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 0e657095de7c..3e7e7f9eb781 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -971,7 +971,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll, | |||
971 | max_fractional_feed_div = pll->max_frac_feedback_div; | 971 | max_fractional_feed_div = pll->max_frac_feedback_div; |
972 | } | 972 | } |
973 | 973 | ||
974 | for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { | 974 | for (post_div = max_post_div; post_div >= min_post_div; --post_div) { |
975 | uint32_t ref_div; | 975 | uint32_t ref_div; |
976 | 976 | ||
977 | if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) | 977 | if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 66324b5bb5ba..cc44bdfec80f 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, | |||
113 | u32 tiling_flags = 0; | 113 | u32 tiling_flags = 0; |
114 | int ret; | 114 | int ret; |
115 | int aligned_size, size; | 115 | int aligned_size, size; |
116 | int height = mode_cmd->height; | ||
116 | 117 | ||
117 | /* need to align pitch with crtc limits */ | 118 | /* need to align pitch with crtc limits */ |
118 | mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); | 119 | mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); |
119 | 120 | ||
120 | size = mode_cmd->pitch * mode_cmd->height; | 121 | if (rdev->family >= CHIP_R600) |
122 | height = ALIGN(mode_cmd->height, 8); | ||
123 | size = mode_cmd->pitch * height; | ||
121 | aligned_size = ALIGN(size, PAGE_SIZE); | 124 | aligned_size = ALIGN(size, PAGE_SIZE); |
122 | ret = radeon_gem_object_create(rdev, aligned_size, 0, | 125 | ret = radeon_gem_object_create(rdev, aligned_size, 0, |
123 | RADEON_GEM_DOMAIN_VRAM, | 126 | RADEON_GEM_DOMAIN_VRAM, |