aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c27
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c43
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c19
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h30
-rw-r--r--drivers/gpu/drm/i915/intel_display.c89
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c173
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c16
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c35
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c149
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h29
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h31
-rw-r--r--drivers/gpu/drm/radeon/r100.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c7
-rw-r--r--drivers/gpu/drm/radeon/rv770.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c405
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c23
38 files changed, 1018 insertions, 299 deletions
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 3969f7553fe7..d2619d72cece 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -456,6 +456,30 @@ done:
456EXPORT_SYMBOL(drm_crtc_helper_set_mode); 456EXPORT_SYMBOL(drm_crtc_helper_set_mode);
457 457
458 458
459static int
460drm_crtc_helper_disable(struct drm_crtc *crtc)
461{
462 struct drm_device *dev = crtc->dev;
463 struct drm_connector *connector;
464 struct drm_encoder *encoder;
465
466 /* Decouple all encoders and their attached connectors from this crtc */
467 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
468 if (encoder->crtc != crtc)
469 continue;
470
471 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
472 if (connector->encoder != encoder)
473 continue;
474
475 connector->encoder = NULL;
476 }
477 }
478
479 drm_helper_disable_unused_functions(dev);
480 return 0;
481}
482
459/** 483/**
460 * drm_crtc_helper_set_config - set a new config from userspace 484 * drm_crtc_helper_set_config - set a new config from userspace
461 * @crtc: CRTC to setup 485 * @crtc: CRTC to setup
@@ -510,8 +534,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
510 (int)set->num_connectors, set->x, set->y); 534 (int)set->num_connectors, set->x, set->y);
511 } else { 535 } else {
512 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); 536 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
513 set->mode = NULL; 537 return drm_crtc_helper_disable(set->crtc);
514 set->num_connectors = 0;
515 } 538 }
516 539
517 dev = set->crtc->dev; 540 dev = set->crtc->dev;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d09a6e02dc95..004b048c5192 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -62,6 +62,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
62 const struct intel_device_info *info = INTEL_INFO(dev); 62 const struct intel_device_info *info = INTEL_INFO(dev);
63 63
64 seq_printf(m, "gen: %d\n", info->gen); 64 seq_printf(m, "gen: %d\n", info->gen);
65 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
65#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 66#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
66 B(is_mobile); 67 B(is_mobile);
67 B(is_i85x); 68 B(is_i85x);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a9533c54c93c..a9ae374861e7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1454,6 +1454,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1454 1454
1455 diff1 = now - dev_priv->last_time1; 1455 diff1 = now - dev_priv->last_time1;
1456 1456
1457 /* Prevent division-by-zero if we are asking too fast.
1458 * Also, we don't get interesting results if we are polling
1459 * faster than once in 10ms, so just return the saved value
1460 * in such cases.
1461 */
1462 if (diff1 <= 10)
1463 return dev_priv->chipset_power;
1464
1457 count1 = I915_READ(DMIEC); 1465 count1 = I915_READ(DMIEC);
1458 count2 = I915_READ(DDREC); 1466 count2 = I915_READ(DDREC);
1459 count3 = I915_READ(CSIEC); 1467 count3 = I915_READ(CSIEC);
@@ -1484,6 +1492,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1484 dev_priv->last_count1 = total_count; 1492 dev_priv->last_count1 = total_count;
1485 dev_priv->last_time1 = now; 1493 dev_priv->last_time1 = now;
1486 1494
1495 dev_priv->chipset_power = ret;
1496
1487 return ret; 1497 return ret;
1488} 1498}
1489 1499
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 15bfa9145d2b..a1103fc6597d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,15 +58,15 @@ module_param_named(powersave, i915_powersave, int, 0600);
58MODULE_PARM_DESC(powersave, 58MODULE_PARM_DESC(powersave,
59 "Enable powersavings, fbc, downclocking, etc. (default: true)"); 59 "Enable powersavings, fbc, downclocking, etc. (default: true)");
60 60
61unsigned int i915_semaphores __read_mostly = 0; 61int i915_semaphores __read_mostly = -1;
62module_param_named(semaphores, i915_semaphores, int, 0600); 62module_param_named(semaphores, i915_semaphores, int, 0600);
63MODULE_PARM_DESC(semaphores, 63MODULE_PARM_DESC(semaphores,
64 "Use semaphores for inter-ring sync (default: false)"); 64 "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
65 65
66unsigned int i915_enable_rc6 __read_mostly = 0; 66int i915_enable_rc6 __read_mostly = -1;
67module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); 67module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
68MODULE_PARM_DESC(i915_enable_rc6, 68MODULE_PARM_DESC(i915_enable_rc6,
69 "Enable power-saving render C-state 6 (default: true)"); 69 "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
70 70
71int i915_enable_fbc __read_mostly = -1; 71int i915_enable_fbc __read_mostly = -1;
72module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 72module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
@@ -328,7 +328,7 @@ void intel_detect_pch(struct drm_device *dev)
328 } 328 }
329} 329}
330 330
331static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 331void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
332{ 332{
333 int count; 333 int count;
334 334
@@ -344,6 +344,22 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
344 udelay(10); 344 udelay(10);
345} 345}
346 346
347void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
348{
349 int count;
350
351 count = 0;
352 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
353 udelay(10);
354
355 I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
356 POSTING_READ(FORCEWAKE_MT);
357
358 count = 0;
359 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
360 udelay(10);
361}
362
347/* 363/*
348 * Generally this is called implicitly by the register read function. However, 364 * Generally this is called implicitly by the register read function. However,
349 * if some sequence requires the GT to not power down then this function should 365 * if some sequence requires the GT to not power down then this function should
@@ -356,15 +372,21 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
356 372
357 /* Forcewake is atomic in case we get in here without the lock */ 373 /* Forcewake is atomic in case we get in here without the lock */
358 if (atomic_add_return(1, &dev_priv->forcewake_count) == 1) 374 if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
359 __gen6_gt_force_wake_get(dev_priv); 375 dev_priv->display.force_wake_get(dev_priv);
360} 376}
361 377
362static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 378void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
363{ 379{
364 I915_WRITE_NOTRACE(FORCEWAKE, 0); 380 I915_WRITE_NOTRACE(FORCEWAKE, 0);
365 POSTING_READ(FORCEWAKE); 381 POSTING_READ(FORCEWAKE);
366} 382}
367 383
384void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
385{
386 I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
387 POSTING_READ(FORCEWAKE_MT);
388}
389
368/* 390/*
369 * see gen6_gt_force_wake_get() 391 * see gen6_gt_force_wake_get()
370 */ 392 */
@@ -373,7 +395,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
373 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 395 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
374 396
375 if (atomic_dec_and_test(&dev_priv->forcewake_count)) 397 if (atomic_dec_and_test(&dev_priv->forcewake_count))
376 __gen6_gt_force_wake_put(dev_priv); 398 dev_priv->display.force_wake_put(dev_priv);
377} 399}
378 400
379void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 401void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -903,8 +925,9 @@ MODULE_LICENSE("GPL and additional rights");
903/* We give fast paths for the really cool registers */ 925/* We give fast paths for the really cool registers */
904#define NEEDS_FORCE_WAKE(dev_priv, reg) \ 926#define NEEDS_FORCE_WAKE(dev_priv, reg) \
905 (((dev_priv)->info->gen >= 6) && \ 927 (((dev_priv)->info->gen >= 6) && \
906 ((reg) < 0x40000) && \ 928 ((reg) < 0x40000) && \
907 ((reg) != FORCEWAKE)) 929 ((reg) != FORCEWAKE) && \
930 ((reg) != ECOBUS))
908 931
909#define __i915_read(x, y) \ 932#define __i915_read(x, y) \
910u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 933u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4a9c1b979804..554bef7a3b9c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -107,6 +107,7 @@ struct opregion_header;
107struct opregion_acpi; 107struct opregion_acpi;
108struct opregion_swsci; 108struct opregion_swsci;
109struct opregion_asle; 109struct opregion_asle;
110struct drm_i915_private;
110 111
111struct intel_opregion { 112struct intel_opregion {
112 struct opregion_header *header; 113 struct opregion_header *header;
@@ -221,6 +222,8 @@ struct drm_i915_display_funcs {
221 struct drm_i915_gem_object *obj); 222 struct drm_i915_gem_object *obj);
222 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, 223 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
223 int x, int y); 224 int x, int y);
225 void (*force_wake_get)(struct drm_i915_private *dev_priv);
226 void (*force_wake_put)(struct drm_i915_private *dev_priv);
224 /* clock updates for mode set */ 227 /* clock updates for mode set */
225 /* cursor updates */ 228 /* cursor updates */
226 /* render clock increase/decrease */ 229 /* render clock increase/decrease */
@@ -710,6 +713,7 @@ typedef struct drm_i915_private {
710 713
711 u64 last_count1; 714 u64 last_count1;
712 unsigned long last_time1; 715 unsigned long last_time1;
716 unsigned long chipset_power;
713 u64 last_count2; 717 u64 last_count2;
714 struct timespec last_time2; 718 struct timespec last_time2;
715 unsigned long gfx_power; 719 unsigned long gfx_power;
@@ -998,11 +1002,11 @@ extern int i915_max_ioctl;
998extern unsigned int i915_fbpercrtc __always_unused; 1002extern unsigned int i915_fbpercrtc __always_unused;
999extern int i915_panel_ignore_lid __read_mostly; 1003extern int i915_panel_ignore_lid __read_mostly;
1000extern unsigned int i915_powersave __read_mostly; 1004extern unsigned int i915_powersave __read_mostly;
1001extern unsigned int i915_semaphores __read_mostly; 1005extern int i915_semaphores __read_mostly;
1002extern unsigned int i915_lvds_downclock __read_mostly; 1006extern unsigned int i915_lvds_downclock __read_mostly;
1003extern int i915_panel_use_ssc __read_mostly; 1007extern int i915_panel_use_ssc __read_mostly;
1004extern int i915_vbt_sdvo_panel_type __read_mostly; 1008extern int i915_vbt_sdvo_panel_type __read_mostly;
1005extern unsigned int i915_enable_rc6 __read_mostly; 1009extern int i915_enable_rc6 __read_mostly;
1006extern int i915_enable_fbc __read_mostly; 1010extern int i915_enable_fbc __read_mostly;
1007extern bool i915_enable_hangcheck __read_mostly; 1011extern bool i915_enable_hangcheck __read_mostly;
1008 1012
@@ -1308,6 +1312,11 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
1308extern void intel_detect_pch(struct drm_device *dev); 1312extern void intel_detect_pch(struct drm_device *dev);
1309extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 1313extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
1310 1314
1315extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1316extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
1317extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1318extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
1319
1311/* overlay */ 1320/* overlay */
1312#ifdef CONFIG_DEBUG_FS 1321#ifdef CONFIG_DEBUG_FS
1313extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 1322extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1352,8 +1361,9 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1352/* We give fast paths for the really cool registers */ 1361/* We give fast paths for the really cool registers */
1353#define NEEDS_FORCE_WAKE(dev_priv, reg) \ 1362#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1354 (((dev_priv)->info->gen >= 6) && \ 1363 (((dev_priv)->info->gen >= 6) && \
1355 ((reg) < 0x40000) && \ 1364 ((reg) < 0x40000) && \
1356 ((reg) != FORCEWAKE)) 1365 ((reg) != FORCEWAKE) && \
1366 ((reg) != ECOBUS))
1357 1367
1358#define __i915_read(x, y) \ 1368#define __i915_read(x, y) \
1359 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 1369 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3693e83a97f3..c681dc149d2a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -32,6 +32,7 @@
32#include "i915_drv.h" 32#include "i915_drv.h"
33#include "i915_trace.h" 33#include "i915_trace.h"
34#include "intel_drv.h" 34#include "intel_drv.h"
35#include <linux/dma_remapping.h>
35 36
36struct change_domains { 37struct change_domains {
37 uint32_t invalidate_domains; 38 uint32_t invalidate_domains;
@@ -746,6 +747,22 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
746 return 0; 747 return 0;
747} 748}
748 749
750static bool
751intel_enable_semaphores(struct drm_device *dev)
752{
753 if (INTEL_INFO(dev)->gen < 6)
754 return 0;
755
756 if (i915_semaphores >= 0)
757 return i915_semaphores;
758
759 /* Enable semaphores on SNB when IO remapping is off */
760 if (INTEL_INFO(dev)->gen == 6)
761 return !intel_iommu_enabled;
762
763 return 1;
764}
765
749static int 766static int
750i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, 767i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
751 struct intel_ring_buffer *to) 768 struct intel_ring_buffer *to)
@@ -758,7 +775,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
758 return 0; 775 return 0;
759 776
760 /* XXX gpu semaphores are implicated in various hard hangs on SNB */ 777 /* XXX gpu semaphores are implicated in various hard hangs on SNB */
761 if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores) 778 if (!intel_enable_semaphores(obj->base.dev))
762 return i915_gem_object_wait_rendering(obj); 779 return i915_gem_object_wait_rendering(obj);
763 780
764 idx = intel_ring_sync_index(from, to); 781 idx = intel_ring_sync_index(from, to);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b080cc824001..a26d5b0a3690 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3303,10 +3303,10 @@
3303/* or SDVOB */ 3303/* or SDVOB */
3304#define HDMIB 0xe1140 3304#define HDMIB 0xe1140
3305#define PORT_ENABLE (1 << 31) 3305#define PORT_ENABLE (1 << 31)
3306#define TRANSCODER_A (0) 3306#define TRANSCODER(pipe) ((pipe) << 30)
3307#define TRANSCODER_B (1 << 30) 3307#define TRANSCODER_CPT(pipe) ((pipe) << 29)
3308#define TRANSCODER(pipe) ((pipe) << 30) 3308#define TRANSCODER_MASK (1 << 30)
3309#define TRANSCODER_MASK (1 << 30) 3309#define TRANSCODER_MASK_CPT (3 << 29)
3310#define COLOR_FORMAT_8bpc (0) 3310#define COLOR_FORMAT_8bpc (0)
3311#define COLOR_FORMAT_12bpc (3 << 26) 3311#define COLOR_FORMAT_12bpc (3 << 26)
3312#define SDVOB_HOTPLUG_ENABLE (1 << 23) 3312#define SDVOB_HOTPLUG_ENABLE (1 << 23)
@@ -3447,8 +3447,30 @@
3447#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22) 3447#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
3448#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) 3448#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
3449 3449
3450/* IVB */
3451#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
3452#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
3453#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
3454#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
3455#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
3456#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
3457#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
3458
3459/* legacy values */
3460#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
3461#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
3462#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
3463#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
3464#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
3465
3466#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
3467
3450#define FORCEWAKE 0xA18C 3468#define FORCEWAKE 0xA18C
3451#define FORCEWAKE_ACK 0x130090 3469#define FORCEWAKE_ACK 0x130090
3470#define FORCEWAKE_MT 0xa188 /* multi-threaded */
3471#define FORCEWAKE_MT_ACK 0x130040
3472#define ECOBUS 0xa180
3473#define FORCEWAKE_MT_ENABLE (1<<5)
3452 3474
3453#define GT_FIFO_FREE_ENTRIES 0x120008 3475#define GT_FIFO_FREE_ENTRIES 0x120008
3454#define GT_FIFO_NUM_RESERVED_ENTRIES 20 3476#define GT_FIFO_NUM_RESERVED_ENTRIES 20
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e77a863a3833..d809b038ca88 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -38,8 +38,8 @@
38#include "i915_drv.h" 38#include "i915_drv.h"
39#include "i915_trace.h" 39#include "i915_trace.h"
40#include "drm_dp_helper.h" 40#include "drm_dp_helper.h"
41
42#include "drm_crtc_helper.h" 41#include "drm_crtc_helper.h"
42#include <linux/dma_remapping.h>
43 43
44#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 44#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
45 45
@@ -4670,6 +4670,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4670/** 4670/**
4671 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send 4671 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4672 * @crtc: CRTC structure 4672 * @crtc: CRTC structure
4673 * @mode: requested mode
4673 * 4674 *
4674 * A pipe may be connected to one or more outputs. Based on the depth of the 4675 * A pipe may be connected to one or more outputs. Based on the depth of the
4675 * attached framebuffer, choose a good color depth to use on the pipe. 4676 * attached framebuffer, choose a good color depth to use on the pipe.
@@ -4681,13 +4682,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4681 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc 4682 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4682 * Displays may support a restricted set as well, check EDID and clamp as 4683 * Displays may support a restricted set as well, check EDID and clamp as
4683 * appropriate. 4684 * appropriate.
4685 * DP may want to dither down to 6bpc to fit larger modes
4684 * 4686 *
4685 * RETURNS: 4687 * RETURNS:
4686 * Dithering requirement (i.e. false if display bpc and pipe bpc match, 4688 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4687 * true if they don't match). 4689 * true if they don't match).
4688 */ 4690 */
4689static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, 4691static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4690 unsigned int *pipe_bpp) 4692 unsigned int *pipe_bpp,
4693 struct drm_display_mode *mode)
4691{ 4694{
4692 struct drm_device *dev = crtc->dev; 4695 struct drm_device *dev = crtc->dev;
4693 struct drm_i915_private *dev_priv = dev->dev_private; 4696 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4758,6 +4761,11 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4758 } 4761 }
4759 } 4762 }
4760 4763
4764 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4765 DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
4766 display_bpc = 6;
4767 }
4768
4761 /* 4769 /*
4762 * We could just drive the pipe at the highest bpc all the time and 4770 * We could just drive the pipe at the highest bpc all the time and
4763 * enable dithering as needed, but that costs bandwidth. So choose 4771 * enable dithering as needed, but that costs bandwidth. So choose
@@ -5019,6 +5027,16 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5019 pipeconf &= ~PIPECONF_DOUBLE_WIDE; 5027 pipeconf &= ~PIPECONF_DOUBLE_WIDE;
5020 } 5028 }
5021 5029
5030 /* default to 8bpc */
5031 pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
5032 if (is_dp) {
5033 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5034 pipeconf |= PIPECONF_BPP_6 |
5035 PIPECONF_DITHER_EN |
5036 PIPECONF_DITHER_TYPE_SP;
5037 }
5038 }
5039
5022 dpll |= DPLL_VCO_ENABLE; 5040 dpll |= DPLL_VCO_ENABLE;
5023 5041
5024 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 5042 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
@@ -5480,7 +5498,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5480 /* determine panel color depth */ 5498 /* determine panel color depth */
5481 temp = I915_READ(PIPECONF(pipe)); 5499 temp = I915_READ(PIPECONF(pipe));
5482 temp &= ~PIPE_BPC_MASK; 5500 temp &= ~PIPE_BPC_MASK;
5483 dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp); 5501 dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
5484 switch (pipe_bpp) { 5502 switch (pipe_bpp) {
5485 case 18: 5503 case 18:
5486 temp |= PIPE_6BPC; 5504 temp |= PIPE_6BPC;
@@ -7189,11 +7207,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7189 work->old_fb_obj = intel_fb->obj; 7207 work->old_fb_obj = intel_fb->obj;
7190 INIT_WORK(&work->work, intel_unpin_work_fn); 7208 INIT_WORK(&work->work, intel_unpin_work_fn);
7191 7209
7210 ret = drm_vblank_get(dev, intel_crtc->pipe);
7211 if (ret)
7212 goto free_work;
7213
7192 /* We borrow the event spin lock for protecting unpin_work */ 7214 /* We borrow the event spin lock for protecting unpin_work */
7193 spin_lock_irqsave(&dev->event_lock, flags); 7215 spin_lock_irqsave(&dev->event_lock, flags);
7194 if (intel_crtc->unpin_work) { 7216 if (intel_crtc->unpin_work) {
7195 spin_unlock_irqrestore(&dev->event_lock, flags); 7217 spin_unlock_irqrestore(&dev->event_lock, flags);
7196 kfree(work); 7218 kfree(work);
7219 drm_vblank_put(dev, intel_crtc->pipe);
7197 7220
7198 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 7221 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
7199 return -EBUSY; 7222 return -EBUSY;
@@ -7212,10 +7235,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7212 7235
7213 crtc->fb = fb; 7236 crtc->fb = fb;
7214 7237
7215 ret = drm_vblank_get(dev, intel_crtc->pipe);
7216 if (ret)
7217 goto cleanup_objs;
7218
7219 work->pending_flip_obj = obj; 7238 work->pending_flip_obj = obj;
7220 7239
7221 work->enable_stall_check = true; 7240 work->enable_stall_check = true;
@@ -7238,7 +7257,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7238 7257
7239cleanup_pending: 7258cleanup_pending:
7240 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 7259 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7241cleanup_objs:
7242 drm_gem_object_unreference(&work->old_fb_obj->base); 7260 drm_gem_object_unreference(&work->old_fb_obj->base);
7243 drm_gem_object_unreference(&obj->base); 7261 drm_gem_object_unreference(&obj->base);
7244 mutex_unlock(&dev->struct_mutex); 7262 mutex_unlock(&dev->struct_mutex);
@@ -7247,6 +7265,8 @@ cleanup_objs:
7247 intel_crtc->unpin_work = NULL; 7265 intel_crtc->unpin_work = NULL;
7248 spin_unlock_irqrestore(&dev->event_lock, flags); 7266 spin_unlock_irqrestore(&dev->event_lock, flags);
7249 7267
7268 drm_vblank_put(dev, intel_crtc->pipe);
7269free_work:
7250 kfree(work); 7270 kfree(work);
7251 7271
7252 return ret; 7272 return ret;
@@ -7887,6 +7907,33 @@ void intel_init_emon(struct drm_device *dev)
7887 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); 7907 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
7888} 7908}
7889 7909
7910static bool intel_enable_rc6(struct drm_device *dev)
7911{
7912 /*
7913 * Respect the kernel parameter if it is set
7914 */
7915 if (i915_enable_rc6 >= 0)
7916 return i915_enable_rc6;
7917
7918 /*
7919 * Disable RC6 on Ironlake
7920 */
7921 if (INTEL_INFO(dev)->gen == 5)
7922 return 0;
7923
7924 /*
7925 * Enable rc6 on Sandybridge if DMA remapping is disabled
7926 */
7927 if (INTEL_INFO(dev)->gen == 6) {
7928 DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
7929 intel_iommu_enabled ? "true" : "false",
7930 !intel_iommu_enabled ? "en" : "dis");
7931 return !intel_iommu_enabled;
7932 }
7933 DRM_DEBUG_DRIVER("RC6 enabled\n");
7934 return 1;
7935}
7936
7890void gen6_enable_rps(struct drm_i915_private *dev_priv) 7937void gen6_enable_rps(struct drm_i915_private *dev_priv)
7891{ 7938{
7892 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 7939 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -7923,7 +7970,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
7923 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); 7970 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
7924 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 7971 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
7925 7972
7926 if (i915_enable_rc6) 7973 if (intel_enable_rc6(dev_priv->dev))
7927 rc6_mask = GEN6_RC_CTL_RC6p_ENABLE | 7974 rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
7928 GEN6_RC_CTL_RC6_ENABLE; 7975 GEN6_RC_CTL_RC6_ENABLE;
7929 7976
@@ -8372,7 +8419,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
8372 /* rc6 disabled by default due to repeated reports of hanging during 8419 /* rc6 disabled by default due to repeated reports of hanging during
8373 * boot and resume. 8420 * boot and resume.
8374 */ 8421 */
8375 if (!i915_enable_rc6) 8422 if (!intel_enable_rc6(dev))
8376 return; 8423 return;
8377 8424
8378 mutex_lock(&dev->struct_mutex); 8425 mutex_lock(&dev->struct_mutex);
@@ -8491,6 +8538,28 @@ static void intel_init_display(struct drm_device *dev)
8491 8538
8492 /* For FIFO watermark updates */ 8539 /* For FIFO watermark updates */
8493 if (HAS_PCH_SPLIT(dev)) { 8540 if (HAS_PCH_SPLIT(dev)) {
8541 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
8542 dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
8543
8544 /* IVB configs may use multi-threaded forcewake */
8545 if (IS_IVYBRIDGE(dev)) {
8546 u32 ecobus;
8547
8548 mutex_lock(&dev->struct_mutex);
8549 __gen6_gt_force_wake_mt_get(dev_priv);
8550 ecobus = I915_READ(ECOBUS);
8551 __gen6_gt_force_wake_mt_put(dev_priv);
8552 mutex_unlock(&dev->struct_mutex);
8553
8554 if (ecobus & FORCEWAKE_MT_ENABLE) {
8555 DRM_DEBUG_KMS("Using MT version of forcewake\n");
8556 dev_priv->display.force_wake_get =
8557 __gen6_gt_force_wake_mt_get;
8558 dev_priv->display.force_wake_put =
8559 __gen6_gt_force_wake_mt_put;
8560 }
8561 }
8562
8494 if (HAS_PCH_IBX(dev)) 8563 if (HAS_PCH_IBX(dev))
8495 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; 8564 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
8496 else if (HAS_PCH_CPT(dev)) 8565 else if (HAS_PCH_CPT(dev))
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4d0358fad937..92b041b66e49 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -208,13 +208,15 @@ intel_dp_link_clock(uint8_t link_bw)
208 */ 208 */
209 209
210static int 210static int
211intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock) 211intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
212{ 212{
213 struct drm_crtc *crtc = intel_dp->base.base.crtc; 213 struct drm_crtc *crtc = intel_dp->base.base.crtc;
214 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 214 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
215 int bpp = 24; 215 int bpp = 24;
216 216
217 if (intel_crtc) 217 if (check_bpp)
218 bpp = check_bpp;
219 else if (intel_crtc)
218 bpp = intel_crtc->bpp; 220 bpp = intel_crtc->bpp;
219 221
220 return (pixel_clock * bpp + 9) / 10; 222 return (pixel_clock * bpp + 9) / 10;
@@ -233,6 +235,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
233 struct intel_dp *intel_dp = intel_attached_dp(connector); 235 struct intel_dp *intel_dp = intel_attached_dp(connector);
234 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); 236 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
235 int max_lanes = intel_dp_max_lane_count(intel_dp); 237 int max_lanes = intel_dp_max_lane_count(intel_dp);
238 int max_rate, mode_rate;
236 239
237 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 240 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
238 if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) 241 if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -242,9 +245,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
242 return MODE_PANEL; 245 return MODE_PANEL;
243 } 246 }
244 247
245 if (intel_dp_link_required(intel_dp, mode->clock) 248 mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
246 > intel_dp_max_data_rate(max_link_clock, max_lanes)) 249 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
247 return MODE_CLOCK_HIGH; 250
251 if (mode_rate > max_rate) {
252 mode_rate = intel_dp_link_required(intel_dp,
253 mode->clock, 18);
254 if (mode_rate > max_rate)
255 return MODE_CLOCK_HIGH;
256 else
257 mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
258 }
248 259
249 if (mode->clock < 10000) 260 if (mode->clock < 10000)
250 return MODE_CLOCK_LOW; 261 return MODE_CLOCK_LOW;
@@ -362,8 +373,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
362 * clock divider. 373 * clock divider.
363 */ 374 */
364 if (is_cpu_edp(intel_dp)) { 375 if (is_cpu_edp(intel_dp)) {
365 if (IS_GEN6(dev)) 376 if (IS_GEN6(dev) || IS_GEN7(dev))
366 aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ 377 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
367 else 378 else
368 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 379 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
369 } else if (HAS_PCH_SPLIT(dev)) 380 } else if (HAS_PCH_SPLIT(dev))
@@ -672,6 +683,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
672 int lane_count, clock; 683 int lane_count, clock;
673 int max_lane_count = intel_dp_max_lane_count(intel_dp); 684 int max_lane_count = intel_dp_max_lane_count(intel_dp);
674 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 685 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
686 int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
675 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 687 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
676 688
677 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 689 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -689,7 +701,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
689 for (clock = 0; clock <= max_clock; clock++) { 701 for (clock = 0; clock <= max_clock; clock++) {
690 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 702 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
691 703
692 if (intel_dp_link_required(intel_dp, mode->clock) 704 if (intel_dp_link_required(intel_dp, mode->clock, bpp)
693 <= link_avail) { 705 <= link_avail) {
694 intel_dp->link_bw = bws[clock]; 706 intel_dp->link_bw = bws[clock];
695 intel_dp->lane_count = lane_count; 707 intel_dp->lane_count = lane_count;
@@ -817,10 +829,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
817 } 829 }
818 830
819 /* 831 /*
820 * There are three kinds of DP registers: 832 * There are four kinds of DP registers:
821 * 833 *
822 * IBX PCH 834 * IBX PCH
823 * CPU 835 * SNB CPU
836 * IVB CPU
824 * CPT PCH 837 * CPT PCH
825 * 838 *
826 * IBX PCH and CPU are the same for almost everything, 839 * IBX PCH and CPU are the same for almost everything,
@@ -873,7 +886,25 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
873 886
874 /* Split out the IBX/CPU vs CPT settings */ 887 /* Split out the IBX/CPU vs CPT settings */
875 888
876 if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 889 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
890 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
891 intel_dp->DP |= DP_SYNC_HS_HIGH;
892 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
893 intel_dp->DP |= DP_SYNC_VS_HIGH;
894 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
895
896 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
897 intel_dp->DP |= DP_ENHANCED_FRAMING;
898
899 intel_dp->DP |= intel_crtc->pipe << 29;
900
901 /* don't miss out required setting for eDP */
902 intel_dp->DP |= DP_PLL_ENABLE;
903 if (adjusted_mode->clock < 200000)
904 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
905 else
906 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
907 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
877 intel_dp->DP |= intel_dp->color_range; 908 intel_dp->DP |= intel_dp->color_range;
878 909
879 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 910 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1375,34 +1406,59 @@ static char *link_train_names[] = {
1375 * These are source-specific values; current Intel hardware supports 1406 * These are source-specific values; current Intel hardware supports
1376 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1407 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1377 */ 1408 */
1378#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
1379#define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200
1380 1409
1381static uint8_t 1410static uint8_t
1382intel_dp_pre_emphasis_max(uint8_t voltage_swing) 1411intel_dp_voltage_max(struct intel_dp *intel_dp)
1383{ 1412{
1384 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1413 struct drm_device *dev = intel_dp->base.base.dev;
1385 case DP_TRAIN_VOLTAGE_SWING_400: 1414
1386 return DP_TRAIN_PRE_EMPHASIS_6; 1415 if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
1387 case DP_TRAIN_VOLTAGE_SWING_600: 1416 return DP_TRAIN_VOLTAGE_SWING_800;
1388 return DP_TRAIN_PRE_EMPHASIS_6; 1417 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1389 case DP_TRAIN_VOLTAGE_SWING_800: 1418 return DP_TRAIN_VOLTAGE_SWING_1200;
1390 return DP_TRAIN_PRE_EMPHASIS_3_5; 1419 else
1391 case DP_TRAIN_VOLTAGE_SWING_1200: 1420 return DP_TRAIN_VOLTAGE_SWING_800;
1392 default: 1421}
1393 return DP_TRAIN_PRE_EMPHASIS_0; 1422
1423static uint8_t
1424intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1425{
1426 struct drm_device *dev = intel_dp->base.base.dev;
1427
1428 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1429 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1430 case DP_TRAIN_VOLTAGE_SWING_400:
1431 return DP_TRAIN_PRE_EMPHASIS_6;
1432 case DP_TRAIN_VOLTAGE_SWING_600:
1433 case DP_TRAIN_VOLTAGE_SWING_800:
1434 return DP_TRAIN_PRE_EMPHASIS_3_5;
1435 default:
1436 return DP_TRAIN_PRE_EMPHASIS_0;
1437 }
1438 } else {
1439 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1440 case DP_TRAIN_VOLTAGE_SWING_400:
1441 return DP_TRAIN_PRE_EMPHASIS_6;
1442 case DP_TRAIN_VOLTAGE_SWING_600:
1443 return DP_TRAIN_PRE_EMPHASIS_6;
1444 case DP_TRAIN_VOLTAGE_SWING_800:
1445 return DP_TRAIN_PRE_EMPHASIS_3_5;
1446 case DP_TRAIN_VOLTAGE_SWING_1200:
1447 default:
1448 return DP_TRAIN_PRE_EMPHASIS_0;
1449 }
1394 } 1450 }
1395} 1451}
1396 1452
1397static void 1453static void
1398intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1454intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1399{ 1455{
1400 struct drm_device *dev = intel_dp->base.base.dev;
1401 uint8_t v = 0; 1456 uint8_t v = 0;
1402 uint8_t p = 0; 1457 uint8_t p = 0;
1403 int lane; 1458 int lane;
1404 uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS); 1459 uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
1405 int voltage_max; 1460 uint8_t voltage_max;
1461 uint8_t preemph_max;
1406 1462
1407 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1463 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1408 uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane); 1464 uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
@@ -1414,15 +1470,13 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
1414 p = this_p; 1470 p = this_p;
1415 } 1471 }
1416 1472
1417 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1473 voltage_max = intel_dp_voltage_max(intel_dp);
1418 voltage_max = I830_DP_VOLTAGE_MAX_CPT;
1419 else
1420 voltage_max = I830_DP_VOLTAGE_MAX;
1421 if (v >= voltage_max) 1474 if (v >= voltage_max)
1422 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 1475 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
1423 1476
1424 if (p >= intel_dp_pre_emphasis_max(v)) 1477 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
1425 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 1478 if (p >= preemph_max)
1479 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
1426 1480
1427 for (lane = 0; lane < 4; lane++) 1481 for (lane = 0; lane < 4; lane++)
1428 intel_dp->train_set[lane] = v | p; 1482 intel_dp->train_set[lane] = v | p;
@@ -1494,6 +1548,37 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
1494 } 1548 }
1495} 1549}
1496 1550
1551/* Gen7's DP voltage swing and pre-emphasis control */
1552static uint32_t
1553intel_gen7_edp_signal_levels(uint8_t train_set)
1554{
1555 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1556 DP_TRAIN_PRE_EMPHASIS_MASK);
1557 switch (signal_levels) {
1558 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1559 return EDP_LINK_TRAIN_400MV_0DB_IVB;
1560 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1561 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
1562 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1563 return EDP_LINK_TRAIN_400MV_6DB_IVB;
1564
1565 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1566 return EDP_LINK_TRAIN_600MV_0DB_IVB;
1567 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1568 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
1569
1570 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1571 return EDP_LINK_TRAIN_800MV_0DB_IVB;
1572 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1573 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
1574
1575 default:
1576 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1577 "0x%x\n", signal_levels);
1578 return EDP_LINK_TRAIN_500MV_0DB_IVB;
1579 }
1580}
1581
1497static uint8_t 1582static uint8_t
1498intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1583intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1499 int lane) 1584 int lane)
@@ -1599,7 +1684,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1599 DP_LINK_CONFIGURATION_SIZE); 1684 DP_LINK_CONFIGURATION_SIZE);
1600 1685
1601 DP |= DP_PORT_EN; 1686 DP |= DP_PORT_EN;
1602 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1687
1688 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1603 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1689 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1604 else 1690 else
1605 DP &= ~DP_LINK_TRAIN_MASK; 1691 DP &= ~DP_LINK_TRAIN_MASK;
@@ -1613,7 +1699,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1613 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1699 uint8_t link_status[DP_LINK_STATUS_SIZE];
1614 uint32_t signal_levels; 1700 uint32_t signal_levels;
1615 1701
1616 if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1702
1703 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1704 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1705 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1706 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1617 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1707 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1618 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1708 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1619 } else { 1709 } else {
@@ -1622,7 +1712,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1622 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1712 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1623 } 1713 }
1624 1714
1625 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1715 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1626 reg = DP | DP_LINK_TRAIN_PAT_1_CPT; 1716 reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
1627 else 1717 else
1628 reg = DP | DP_LINK_TRAIN_PAT_1; 1718 reg = DP | DP_LINK_TRAIN_PAT_1;
@@ -1703,7 +1793,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1703 break; 1793 break;
1704 } 1794 }
1705 1795
1706 if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1796 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1797 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1798 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1799 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1707 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1800 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1708 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1801 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1709 } else { 1802 } else {
@@ -1711,7 +1804,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1711 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1804 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1712 } 1805 }
1713 1806
1714 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1807 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1715 reg = DP | DP_LINK_TRAIN_PAT_2_CPT; 1808 reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
1716 else 1809 else
1717 reg = DP | DP_LINK_TRAIN_PAT_2; 1810 reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1752,7 +1845,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1752 ++tries; 1845 ++tries;
1753 } 1846 }
1754 1847
1755 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1848 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1756 reg = DP | DP_LINK_TRAIN_OFF_CPT; 1849 reg = DP | DP_LINK_TRAIN_OFF_CPT;
1757 else 1850 else
1758 reg = DP | DP_LINK_TRAIN_OFF; 1851 reg = DP | DP_LINK_TRAIN_OFF;
@@ -1782,7 +1875,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1782 udelay(100); 1875 udelay(100);
1783 } 1876 }
1784 1877
1785 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) { 1878 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1786 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1879 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1787 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 1880 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
1788 } else { 1881 } else {
@@ -1794,7 +1887,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1794 msleep(17); 1887 msleep(17);
1795 1888
1796 if (is_edp(intel_dp)) { 1889 if (is_edp(intel_dp)) {
1797 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1890 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1798 DP |= DP_LINK_TRAIN_OFF_CPT; 1891 DP |= DP_LINK_TRAIN_OFF_CPT;
1799 else 1892 else
1800 DP |= DP_LINK_TRAIN_OFF; 1893 DP |= DP_LINK_TRAIN_OFF;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index bd9a604b73da..a1b4343814e8 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -110,6 +110,7 @@
110/* drm_display_mode->private_flags */ 110/* drm_display_mode->private_flags */
111#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0) 111#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
112#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT) 112#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
113#define INTEL_MODE_DP_FORCE_6BPC (0x10)
113 114
114static inline void 115static inline void
115intel_mode_set_pixel_multiplier(struct drm_display_mode *mode, 116intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 42f165a520de..e44191132ac4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -715,6 +715,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
715 DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"), 715 DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
716 }, 716 },
717 }, 717 },
718 {
719 .callback = intel_no_lvds_dmi_callback,
720 .ident = "Asus AT5NM10T-I",
721 .matches = {
722 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
723 DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
724 },
725 },
718 726
719 { } /* terminating entry */ 727 { } /* terminating entry */
720}; 728};
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 21f60b7d69a3..04d79fd1dc9d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -178,13 +178,10 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
178 if (HAS_PCH_SPLIT(dev)) { 178 if (HAS_PCH_SPLIT(dev)) {
179 max >>= 16; 179 max >>= 16;
180 } else { 180 } else {
181 if (IS_PINEVIEW(dev)) { 181 if (INTEL_INFO(dev)->gen < 4)
182 max >>= 17; 182 max >>= 17;
183 } else { 183 else
184 max >>= 16; 184 max >>= 16;
185 if (INTEL_INFO(dev)->gen < 4)
186 max &= ~1;
187 }
188 185
189 if (is_backlight_combination_mode(dev)) 186 if (is_backlight_combination_mode(dev))
190 max *= 0xff; 187 max *= 0xff;
@@ -203,13 +200,12 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
203 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 200 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
204 } else { 201 } else {
205 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 202 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
206 if (IS_PINEVIEW(dev)) 203 if (INTEL_INFO(dev)->gen < 4)
207 val >>= 1; 204 val >>= 1;
208 205
209 if (is_backlight_combination_mode(dev)) { 206 if (is_backlight_combination_mode(dev)) {
210 u8 lbpc; 207 u8 lbpc;
211 208
212 val &= ~1;
213 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); 209 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
214 val *= lbpc; 210 val *= lbpc;
215 } 211 }
@@ -246,11 +242,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
246 } 242 }
247 243
248 tmp = I915_READ(BLC_PWM_CTL); 244 tmp = I915_READ(BLC_PWM_CTL);
249 if (IS_PINEVIEW(dev)) { 245 if (INTEL_INFO(dev)->gen < 4)
250 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
251 level <<= 1; 246 level <<= 1;
252 } else 247 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
253 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
254 I915_WRITE(BLC_PWM_CTL, tmp | level); 248 I915_WRITE(BLC_PWM_CTL, tmp | level);
255} 249}
256 250
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 3003fb25aefd..f7b9268df266 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -50,6 +50,7 @@
50#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK) 50#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
51#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) 51#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
52#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) 52#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
53#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
53 54
54 55
55static const char *tv_format_names[] = { 56static const char *tv_format_names[] = {
@@ -1086,8 +1087,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1086 } 1087 }
1087 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; 1088 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
1088 } 1089 }
1089 if (intel_crtc->pipe == 1) 1090
1090 sdvox |= SDVO_PIPE_B_SELECT; 1091 if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
1092 sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
1093 else
1094 sdvox |= TRANSCODER(intel_crtc->pipe);
1095
1091 if (intel_sdvo->has_hdmi_audio) 1096 if (intel_sdvo->has_hdmi_audio)
1092 sdvox |= SDVO_AUDIO_ENABLE; 1097 sdvox |= SDVO_AUDIO_ENABLE;
1093 1098
@@ -1314,6 +1319,18 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
1314 return status; 1319 return status;
1315} 1320}
1316 1321
1322static bool
1323intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
1324 struct edid *edid)
1325{
1326 bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
1327 bool connector_is_digital = !!IS_DIGITAL(sdvo);
1328
1329 DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
1330 connector_is_digital, monitor_is_digital);
1331 return connector_is_digital == monitor_is_digital;
1332}
1333
1317static enum drm_connector_status 1334static enum drm_connector_status
1318intel_sdvo_detect(struct drm_connector *connector, bool force) 1335intel_sdvo_detect(struct drm_connector *connector, bool force)
1319{ 1336{
@@ -1358,10 +1375,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1358 if (edid == NULL) 1375 if (edid == NULL)
1359 edid = intel_sdvo_get_analog_edid(connector); 1376 edid = intel_sdvo_get_analog_edid(connector);
1360 if (edid != NULL) { 1377 if (edid != NULL) {
1361 if (edid->input & DRM_EDID_INPUT_DIGITAL) 1378 if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
1362 ret = connector_status_disconnected; 1379 edid))
1363 else
1364 ret = connector_status_connected; 1380 ret = connector_status_connected;
1381 else
1382 ret = connector_status_disconnected;
1383
1365 connector->display_info.raw_edid = NULL; 1384 connector->display_info.raw_edid = NULL;
1366 kfree(edid); 1385 kfree(edid);
1367 } else 1386 } else
@@ -1402,11 +1421,8 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1402 edid = intel_sdvo_get_analog_edid(connector); 1421 edid = intel_sdvo_get_analog_edid(connector);
1403 1422
1404 if (edid != NULL) { 1423 if (edid != NULL) {
1405 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1424 if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
1406 bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); 1425 edid)) {
1407 bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
1408
1409 if (connector_is_digital == monitor_is_digital) {
1410 drm_mode_connector_update_edid_property(connector, edid); 1426 drm_mode_connector_update_edid_property(connector, edid);
1411 drm_add_edid_modes(connector, edid); 1427 drm_add_edid_modes(connector, edid);
1412 } 1428 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index ddbabefb4273..b12fd2c80812 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -369,3 +369,48 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
369 spin_unlock_irqrestore(&dev->event_lock, flags); 369 spin_unlock_irqrestore(&dev->event_lock, flags);
370 return 0; 370 return 0;
371} 371}
372
373int
374nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
375 struct drm_mode_create_dumb *args)
376{
377 struct nouveau_bo *bo;
378 int ret;
379
380 args->pitch = roundup(args->width * (args->bpp / 8), 256);
381 args->size = args->pitch * args->height;
382 args->size = roundup(args->size, PAGE_SIZE);
383
384 ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
385 if (ret)
386 return ret;
387
388 ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
389 drm_gem_object_unreference_unlocked(bo->gem);
390 return ret;
391}
392
393int
394nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
395 uint32_t handle)
396{
397 return drm_gem_handle_delete(file_priv, handle);
398}
399
400int
401nouveau_display_dumb_map_offset(struct drm_file *file_priv,
402 struct drm_device *dev,
403 uint32_t handle, uint64_t *poffset)
404{
405 struct drm_gem_object *gem;
406
407 gem = drm_gem_object_lookup(dev, file_priv, handle);
408 if (gem) {
409 struct nouveau_bo *bo = gem->driver_private;
410 *poffset = bo->bo.addr_space_offset;
411 drm_gem_object_unreference_unlocked(gem);
412 return 0;
413 }
414
415 return -ENOENT;
416}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 9f7bb1295262..9791d13c9e3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -433,6 +433,10 @@ static struct drm_driver driver = {
433 .gem_open_object = nouveau_gem_object_open, 433 .gem_open_object = nouveau_gem_object_open,
434 .gem_close_object = nouveau_gem_object_close, 434 .gem_close_object = nouveau_gem_object_close,
435 435
436 .dumb_create = nouveau_display_dumb_create,
437 .dumb_map_offset = nouveau_display_dumb_map_offset,
438 .dumb_destroy = nouveau_display_dumb_destroy,
439
436 .name = DRIVER_NAME, 440 .name = DRIVER_NAME,
437 .desc = DRIVER_DESC, 441 .desc = DRIVER_DESC,
438#ifdef GIT_REVISION 442#ifdef GIT_REVISION
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 29837da1098b..4c0be3a4ed88 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1418,6 +1418,12 @@ int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1418 struct drm_pending_vblank_event *event); 1418 struct drm_pending_vblank_event *event);
1419int nouveau_finish_page_flip(struct nouveau_channel *, 1419int nouveau_finish_page_flip(struct nouveau_channel *,
1420 struct nouveau_page_flip_state *); 1420 struct nouveau_page_flip_state *);
1421int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
1422 struct drm_mode_create_dumb *args);
1423int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
1424 uint32_t handle, uint64_t *offset);
1425int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
1426 uint32_t handle);
1421 1427
1422/* nv10_gpio.c */ 1428/* nv10_gpio.c */
1423int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); 1429int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 02222c540aee..960c0ae0c0c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -680,7 +680,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
680 return ret; 680 return ret;
681 } 681 }
682 682
683 ret = drm_mm_init(&chan->ramin_heap, base, size); 683 ret = drm_mm_init(&chan->ramin_heap, base, size - base);
684 if (ret) { 684 if (ret) {
685 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); 685 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
686 nouveau_gpuobj_ref(NULL, &chan->ramin); 686 nouveau_gpuobj_ref(NULL, &chan->ramin);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index b75258a9fe44..c8a463b76c89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -67,7 +67,10 @@ nouveau_sgdma_clear(struct ttm_backend *be)
67 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], 67 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
68 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 68 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
69 } 69 }
70 nvbe->unmap_pages = false;
70 } 71 }
72
73 nvbe->pages = NULL;
71} 74}
72 75
73static void 76static void
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index d23ca00e7d62..06de250fe617 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -616,7 +616,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
616 struct drm_nouveau_private *dev_priv = dev->dev_private; 616 struct drm_nouveau_private *dev_priv = dev->dev_private;
617 struct nv50_display *disp = nv50_display(dev); 617 struct nv50_display *disp = nv50_display(dev);
618 u32 unk30 = nv_rd32(dev, 0x610030), mc; 618 u32 unk30 = nv_rd32(dev, 0x610030), mc;
619 int i, crtc, or, type = OUTPUT_ANY; 619 int i, crtc, or = 0, type = OUTPUT_ANY;
620 620
621 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); 621 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
622 disp->irq.dcb = NULL; 622 disp->irq.dcb = NULL;
@@ -708,7 +708,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
708 struct nv50_display *disp = nv50_display(dev); 708 struct nv50_display *disp = nv50_display(dev);
709 u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0; 709 u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
710 struct dcb_entry *dcb; 710 struct dcb_entry *dcb;
711 int i, crtc, or, type = OUTPUT_ANY; 711 int i, crtc, or = 0, type = OUTPUT_ANY;
712 712
713 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); 713 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
714 dcb = disp->irq.dcb; 714 dcb = disp->irq.dcb;
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index a74e501afd25..ecfafd70cf0e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -381,6 +381,8 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
381 u8 tpnr[GPC_MAX]; 381 u8 tpnr[GPC_MAX];
382 int i, gpc, tpc; 382 int i, gpc, tpc;
383 383
384 nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
385
384 /* 386 /*
385 * TP ROP UNKVAL(magic_not_rop_nr) 387 * TP ROP UNKVAL(magic_not_rop_nr)
386 * 450: 4/0/0/0 2 3 388 * 450: 4/0/0/0 2 3
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 23d63b4b3d77..cb006a718e70 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -780,7 +780,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
780 continue; 780 continue;
781 781
782 if (nv_partner != nv_encoder && 782 if (nv_partner != nv_encoder &&
783 nv_partner->dcb->or == nv_encoder->or) { 783 nv_partner->dcb->or == nv_encoder->dcb->or) {
784 if (nv_partner->last_dpms == DRM_MODE_DPMS_ON) 784 if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
785 return; 785 return;
786 break; 786 break;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 87631fede1f8..2b97262e3ab1 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1107,9 +1107,40 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1107 return -EINVAL; 1107 return -EINVAL;
1108 } 1108 }
1109 1109
1110 if (tiling_flags & RADEON_TILING_MACRO) 1110 if (tiling_flags & RADEON_TILING_MACRO) {
1111 if (rdev->family >= CHIP_CAYMAN)
1112 tmp = rdev->config.cayman.tile_config;
1113 else
1114 tmp = rdev->config.evergreen.tile_config;
1115
1116 switch ((tmp & 0xf0) >> 4) {
1117 case 0: /* 4 banks */
1118 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
1119 break;
1120 case 1: /* 8 banks */
1121 default:
1122 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
1123 break;
1124 case 2: /* 16 banks */
1125 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
1126 break;
1127 }
1128
1129 switch ((tmp & 0xf000) >> 12) {
1130 case 0: /* 1KB rows */
1131 default:
1132 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB);
1133 break;
1134 case 1: /* 2KB rows */
1135 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB);
1136 break;
1137 case 2: /* 4KB rows */
1138 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB);
1139 break;
1140 }
1141
1111 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); 1142 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
1112 else if (tiling_flags & RADEON_TILING_MICRO) 1143 } else if (tiling_flags & RADEON_TILING_MICRO)
1113 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); 1144 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1114 1145
1115 switch (radeon_crtc->crtc_id) { 1146 switch (radeon_crtc->crtc_id) {
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 1d603a3335db..5e00d1670aa9 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -82,6 +82,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
82{ 82{
83 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 83 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
84 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); 84 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
85 int i;
85 86
86 /* Lock the graphics update lock */ 87 /* Lock the graphics update lock */
87 tmp |= EVERGREEN_GRPH_UPDATE_LOCK; 88 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
@@ -99,7 +100,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
99 (u32)crtc_base); 100 (u32)crtc_base);
100 101
101 /* Wait for update_pending to go high. */ 102 /* Wait for update_pending to go high. */
102 while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)); 103 for (i = 0; i < rdev->usec_timeout; i++) {
104 if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
105 break;
106 udelay(1);
107 }
103 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 108 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
104 109
105 /* Unlock the lock, so double-buffering can take place inside vblank */ 110 /* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 38e1bda73d33..cd4590aae154 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -38,6 +38,7 @@ struct evergreen_cs_track {
38 u32 group_size; 38 u32 group_size;
39 u32 nbanks; 39 u32 nbanks;
40 u32 npipes; 40 u32 npipes;
41 u32 row_size;
41 /* value we track */ 42 /* value we track */
42 u32 nsamples; 43 u32 nsamples;
43 u32 cb_color_base_last[12]; 44 u32 cb_color_base_last[12];
@@ -77,6 +78,44 @@ struct evergreen_cs_track {
77 struct radeon_bo *db_s_write_bo; 78 struct radeon_bo *db_s_write_bo;
78}; 79};
79 80
81static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
82{
83 if (tiling_flags & RADEON_TILING_MACRO)
84 return ARRAY_2D_TILED_THIN1;
85 else if (tiling_flags & RADEON_TILING_MICRO)
86 return ARRAY_1D_TILED_THIN1;
87 else
88 return ARRAY_LINEAR_GENERAL;
89}
90
91static u32 evergreen_cs_get_num_banks(u32 nbanks)
92{
93 switch (nbanks) {
94 case 2:
95 return ADDR_SURF_2_BANK;
96 case 4:
97 return ADDR_SURF_4_BANK;
98 case 8:
99 default:
100 return ADDR_SURF_8_BANK;
101 case 16:
102 return ADDR_SURF_16_BANK;
103 }
104}
105
106static u32 evergreen_cs_get_tile_split(u32 row_size)
107{
108 switch (row_size) {
109 case 1:
110 default:
111 return ADDR_SURF_TILE_SPLIT_1KB;
112 case 2:
113 return ADDR_SURF_TILE_SPLIT_2KB;
114 case 4:
115 return ADDR_SURF_TILE_SPLIT_4KB;
116 }
117}
118
80static void evergreen_cs_track_init(struct evergreen_cs_track *track) 119static void evergreen_cs_track_init(struct evergreen_cs_track *track)
81{ 120{
82 int i; 121 int i;
@@ -490,12 +529,11 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
490 } 529 }
491 ib[idx] &= ~Z_ARRAY_MODE(0xf); 530 ib[idx] &= ~Z_ARRAY_MODE(0xf);
492 track->db_z_info &= ~Z_ARRAY_MODE(0xf); 531 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
532 ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
533 track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
493 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 534 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
494 ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 535 ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
495 track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 536 ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
496 } else {
497 ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
498 track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
499 } 537 }
500 } 538 }
501 break; 539 break;
@@ -618,13 +656,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
618 "0x%04X\n", reg); 656 "0x%04X\n", reg);
619 return -EINVAL; 657 return -EINVAL;
620 } 658 }
621 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 659 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
622 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 660 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
623 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
624 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
625 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
626 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
627 }
628 } 661 }
629 break; 662 break;
630 case CB_COLOR8_INFO: 663 case CB_COLOR8_INFO:
@@ -640,13 +673,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
640 "0x%04X\n", reg); 673 "0x%04X\n", reg);
641 return -EINVAL; 674 return -EINVAL;
642 } 675 }
643 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 676 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
644 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 677 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
645 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
646 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
647 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
648 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
649 }
650 } 678 }
651 break; 679 break;
652 case CB_COLOR0_PITCH: 680 case CB_COLOR0_PITCH:
@@ -701,6 +729,16 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
701 case CB_COLOR9_ATTRIB: 729 case CB_COLOR9_ATTRIB:
702 case CB_COLOR10_ATTRIB: 730 case CB_COLOR10_ATTRIB:
703 case CB_COLOR11_ATTRIB: 731 case CB_COLOR11_ATTRIB:
732 r = evergreen_cs_packet_next_reloc(p, &reloc);
733 if (r) {
734 dev_warn(p->dev, "bad SET_CONTEXT_REG "
735 "0x%04X\n", reg);
736 return -EINVAL;
737 }
738 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
739 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
740 ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
741 }
704 break; 742 break;
705 case CB_COLOR0_DIM: 743 case CB_COLOR0_DIM:
706 case CB_COLOR1_DIM: 744 case CB_COLOR1_DIM:
@@ -1318,10 +1356,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
1318 } 1356 }
1319 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1357 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1320 if (!p->keep_tiling_flags) { 1358 if (!p->keep_tiling_flags) {
1321 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1359 ib[idx+1+(i*8)+1] |=
1322 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 1360 TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1323 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1361 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1324 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 1362 ib[idx+1+(i*8)+6] |=
1363 TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
1364 ib[idx+1+(i*8)+7] |=
1365 TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1366 }
1325 } 1367 }
1326 texture = reloc->robj; 1368 texture = reloc->robj;
1327 /* tex mip base */ 1369 /* tex mip base */
@@ -1422,6 +1464,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
1422{ 1464{
1423 struct radeon_cs_packet pkt; 1465 struct radeon_cs_packet pkt;
1424 struct evergreen_cs_track *track; 1466 struct evergreen_cs_track *track;
1467 u32 tmp;
1425 int r; 1468 int r;
1426 1469
1427 if (p->track == NULL) { 1470 if (p->track == NULL) {
@@ -1430,9 +1473,63 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
1430 if (track == NULL) 1473 if (track == NULL)
1431 return -ENOMEM; 1474 return -ENOMEM;
1432 evergreen_cs_track_init(track); 1475 evergreen_cs_track_init(track);
1433 track->npipes = p->rdev->config.evergreen.tiling_npipes; 1476 if (p->rdev->family >= CHIP_CAYMAN)
1434 track->nbanks = p->rdev->config.evergreen.tiling_nbanks; 1477 tmp = p->rdev->config.cayman.tile_config;
1435 track->group_size = p->rdev->config.evergreen.tiling_group_size; 1478 else
1479 tmp = p->rdev->config.evergreen.tile_config;
1480
1481 switch (tmp & 0xf) {
1482 case 0:
1483 track->npipes = 1;
1484 break;
1485 case 1:
1486 default:
1487 track->npipes = 2;
1488 break;
1489 case 2:
1490 track->npipes = 4;
1491 break;
1492 case 3:
1493 track->npipes = 8;
1494 break;
1495 }
1496
1497 switch ((tmp & 0xf0) >> 4) {
1498 case 0:
1499 track->nbanks = 4;
1500 break;
1501 case 1:
1502 default:
1503 track->nbanks = 8;
1504 break;
1505 case 2:
1506 track->nbanks = 16;
1507 break;
1508 }
1509
1510 switch ((tmp & 0xf00) >> 8) {
1511 case 0:
1512 track->group_size = 256;
1513 break;
1514 case 1:
1515 default:
1516 track->group_size = 512;
1517 break;
1518 }
1519
1520 switch ((tmp & 0xf000) >> 12) {
1521 case 0:
1522 track->row_size = 1;
1523 break;
1524 case 1:
1525 default:
1526 track->row_size = 2;
1527 break;
1528 case 2:
1529 track->row_size = 4;
1530 break;
1531 }
1532
1436 p->track = track; 1533 p->track = track;
1437 } 1534 }
1438 do { 1535 do {
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index c781c92c3451..7d7f2155e34c 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -42,6 +42,17 @@
42# define EVERGREEN_GRPH_DEPTH_8BPP 0 42# define EVERGREEN_GRPH_DEPTH_8BPP 0
43# define EVERGREEN_GRPH_DEPTH_16BPP 1 43# define EVERGREEN_GRPH_DEPTH_16BPP 1
44# define EVERGREEN_GRPH_DEPTH_32BPP 2 44# define EVERGREEN_GRPH_DEPTH_32BPP 2
45# define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
46# define EVERGREEN_ADDR_SURF_2_BANK 0
47# define EVERGREEN_ADDR_SURF_4_BANK 1
48# define EVERGREEN_ADDR_SURF_8_BANK 2
49# define EVERGREEN_ADDR_SURF_16_BANK 3
50# define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
51# define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
52# define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
53# define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
54# define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
55# define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
45# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8) 56# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
46/* 8 BPP */ 57/* 8 BPP */
47# define EVERGREEN_GRPH_FORMAT_INDEXED 0 58# define EVERGREEN_GRPH_FORMAT_INDEXED 0
@@ -61,6 +72,24 @@
61# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5 72# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
62# define EVERGREEN_GRPH_FORMAT_RGB111110 6 73# define EVERGREEN_GRPH_FORMAT_RGB111110 6
63# define EVERGREEN_GRPH_FORMAT_BGR101111 7 74# define EVERGREEN_GRPH_FORMAT_BGR101111 7
75# define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
76# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
77# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
78# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
79# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
80# define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
81# define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
82# define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
83# define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
84# define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
85# define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
86# define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
87# define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
88# define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
89# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
90# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
91# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
92# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
64# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20) 93# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
65# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0 94# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
66# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1 95# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b937c49054d9..e00039e59a75 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -899,6 +899,10 @@
899#define DB_HTILE_DATA_BASE 0x28014 899#define DB_HTILE_DATA_BASE 0x28014
900#define DB_Z_INFO 0x28040 900#define DB_Z_INFO 0x28040
901# define Z_ARRAY_MODE(x) ((x) << 4) 901# define Z_ARRAY_MODE(x) ((x) << 4)
902# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8)
903# define DB_NUM_BANKS(x) (((x) & 0x3) << 12)
904# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16)
905# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20)
902#define DB_STENCIL_INFO 0x28044 906#define DB_STENCIL_INFO 0x28044
903#define DB_Z_READ_BASE 0x28048 907#define DB_Z_READ_BASE 0x28048
904#define DB_STENCIL_READ_BASE 0x2804c 908#define DB_STENCIL_READ_BASE 0x2804c
@@ -951,6 +955,29 @@
951# define CB_SF_EXPORT_FULL 0 955# define CB_SF_EXPORT_FULL 0
952# define CB_SF_EXPORT_NORM 1 956# define CB_SF_EXPORT_NORM 1
953#define CB_COLOR0_ATTRIB 0x28c74 957#define CB_COLOR0_ATTRIB 0x28c74
958# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5)
959# define ADDR_SURF_TILE_SPLIT_64B 0
960# define ADDR_SURF_TILE_SPLIT_128B 1
961# define ADDR_SURF_TILE_SPLIT_256B 2
962# define ADDR_SURF_TILE_SPLIT_512B 3
963# define ADDR_SURF_TILE_SPLIT_1KB 4
964# define ADDR_SURF_TILE_SPLIT_2KB 5
965# define ADDR_SURF_TILE_SPLIT_4KB 6
966# define CB_NUM_BANKS(x) (((x) & 0x3) << 10)
967# define ADDR_SURF_2_BANK 0
968# define ADDR_SURF_4_BANK 1
969# define ADDR_SURF_8_BANK 2
970# define ADDR_SURF_16_BANK 3
971# define CB_BANK_WIDTH(x) (((x) & 0x3) << 13)
972# define ADDR_SURF_BANK_WIDTH_1 0
973# define ADDR_SURF_BANK_WIDTH_2 1
974# define ADDR_SURF_BANK_WIDTH_4 2
975# define ADDR_SURF_BANK_WIDTH_8 3
976# define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16)
977# define ADDR_SURF_BANK_HEIGHT_1 0
978# define ADDR_SURF_BANK_HEIGHT_2 1
979# define ADDR_SURF_BANK_HEIGHT_4 2
980# define ADDR_SURF_BANK_HEIGHT_8 3
954#define CB_COLOR0_DIM 0x28c78 981#define CB_COLOR0_DIM 0x28c78
955/* only CB0-7 blocks have these regs */ 982/* only CB0-7 blocks have these regs */
956#define CB_COLOR0_CMASK 0x28c7c 983#define CB_COLOR0_CMASK 0x28c7c
@@ -1137,7 +1164,11 @@
1137# define SQ_SEL_1 5 1164# define SQ_SEL_1 5
1138#define SQ_TEX_RESOURCE_WORD5_0 0x30014 1165#define SQ_TEX_RESOURCE_WORD5_0 0x30014
1139#define SQ_TEX_RESOURCE_WORD6_0 0x30018 1166#define SQ_TEX_RESOURCE_WORD6_0 0x30018
1167# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29)
1140#define SQ_TEX_RESOURCE_WORD7_0 0x3001c 1168#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
1169# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8)
1170# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10)
1171# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16)
1141 1172
1142#define SQ_VTX_CONSTANT_WORD0_0 0x30000 1173#define SQ_VTX_CONSTANT_WORD0_0 0x30000
1143#define SQ_VTX_CONSTANT_WORD1_0 0x30004 1174#define SQ_VTX_CONSTANT_WORD1_0 0x30004
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ad158ea49901..bfc08f6320f8 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -187,13 +187,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
187{ 187{
188 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 188 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
189 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; 189 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
190 int i;
190 191
191 /* Lock the graphics update lock */ 192 /* Lock the graphics update lock */
192 /* update the scanout addresses */ 193 /* update the scanout addresses */
193 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 194 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
194 195
195 /* Wait for update_pending to go high. */ 196 /* Wait for update_pending to go high. */
196 while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)); 197 for (i = 0; i < rdev->usec_timeout; i++) {
198 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
199 break;
200 udelay(1);
201 }
197 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 202 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
198 203
199 /* Unlock the lock, so double-buffering can take place inside vblank */ 204 /* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 3f6636bb2d7f..3516a6081dcf 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -35,7 +35,8 @@ static int radeon_atif_call(acpi_handle handle)
35 35
36 /* Fail only if calling the method fails and ATIF is supported */ 36 /* Fail only if calling the method fails and ATIF is supported */
37 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 37 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
38 printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status)); 38 DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
39 acpi_format_exception(status));
39 kfree(buffer.pointer); 40 kfree(buffer.pointer);
40 return 1; 41 return 1;
41 } 42 }
@@ -50,13 +51,13 @@ int radeon_acpi_init(struct radeon_device *rdev)
50 acpi_handle handle; 51 acpi_handle handle;
51 int ret; 52 int ret;
52 53
53 /* No need to proceed if we're sure that ATIF is not supported */
54 if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
55 return 0;
56
57 /* Get the device handle */ 54 /* Get the device handle */
58 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); 55 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
59 56
57 /* No need to proceed if we're sure that ATIF is not supported */
58 if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
59 return 0;
60
60 /* Call the ATIF method */ 61 /* Call the ATIF method */
61 ret = radeon_atif_call(handle); 62 ret = radeon_atif_call(handle);
62 if (ret) 63 if (ret)
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 06e413e6a920..4b27efa4405b 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -233,13 +233,12 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
233 switch (radeon_encoder->encoder_id) { 233 switch (radeon_encoder->encoder_id) {
234 case ENCODER_OBJECT_ID_TRAVIS: 234 case ENCODER_OBJECT_ID_TRAVIS:
235 case ENCODER_OBJECT_ID_NUTMEG: 235 case ENCODER_OBJECT_ID_NUTMEG:
236 return true; 236 return radeon_encoder->encoder_id;
237 default: 237 default:
238 return false; 238 return ENCODER_OBJECT_ID_NONE;
239 } 239 }
240 } 240 }
241 241 return ENCODER_OBJECT_ID_NONE;
242 return false;
243} 242}
244 243
245void radeon_panel_mode_fixup(struct drm_encoder *encoder, 244void radeon_panel_mode_fixup(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 481b99e89f65..b1053d640423 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
62{ 62{
63 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 63 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
64 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); 64 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
65 int i;
65 66
66 /* Lock the graphics update lock */ 67 /* Lock the graphics update lock */
67 tmp |= AVIVO_D1GRPH_UPDATE_LOCK; 68 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
74 (u32)crtc_base); 75 (u32)crtc_base);
75 76
76 /* Wait for update_pending to go high. */ 77 /* Wait for update_pending to go high. */
77 while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); 78 for (i = 0; i < rdev->usec_timeout; i++) {
79 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
80 break;
81 udelay(1);
82 }
78 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 83 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
79 84
80 /* Unlock the lock, so double-buffering can take place inside vblank */ 85 /* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index a983f410ab89..23ae1c60ab3d 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
47{ 47{
48 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 48 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
49 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); 49 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
50 int i;
50 51
51 /* Lock the graphics update lock */ 52 /* Lock the graphics update lock */
52 tmp |= AVIVO_D1GRPH_UPDATE_LOCK; 53 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
66 (u32)crtc_base); 67 (u32)crtc_base);
67 68
68 /* Wait for update_pending to go high. */ 69 /* Wait for update_pending to go high. */
69 while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); 70 for (i = 0; i < rdev->usec_timeout; i++) {
71 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
72 break;
73 udelay(1);
74 }
70 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 75 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
71 76
72 /* Unlock the lock, so double-buffering can take place inside vblank */ 77 /* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 8cca91a93bde..dc279706ca70 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -390,6 +390,11 @@ extern int vmw_context_check(struct vmw_private *dev_priv,
390 struct ttm_object_file *tfile, 390 struct ttm_object_file *tfile,
391 int id, 391 int id,
392 struct vmw_resource **p_res); 392 struct vmw_resource **p_res);
393extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
394 struct ttm_object_file *tfile,
395 uint32_t handle,
396 struct vmw_surface **out_surf,
397 struct vmw_dma_buffer **out_buf);
393extern void vmw_surface_res_free(struct vmw_resource *res); 398extern void vmw_surface_res_free(struct vmw_resource *res);
394extern int vmw_surface_init(struct vmw_private *dev_priv, 399extern int vmw_surface_init(struct vmw_private *dev_priv,
395 struct vmw_surface *srf, 400 struct vmw_surface *srf,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 03bbc2a6f9a7..a0c2f12b1e1b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -33,6 +33,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
33{ 33{
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t fifo_min, hwversion; 35 uint32_t fifo_min, hwversion;
36 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
36 37
37 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) 38 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
38 return false; 39 return false;
@@ -41,7 +42,12 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
41 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) 42 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
42 return false; 43 return false;
43 44
44 hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION); 45 hwversion = ioread32(fifo_mem +
46 ((fifo->capabilities &
47 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
48 SVGA_FIFO_3D_HWVERSION_REVISED :
49 SVGA_FIFO_3D_HWVERSION));
50
45 if (hwversion == 0) 51 if (hwversion == 0)
46 return false; 52 return false;
47 53
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 3f6343502d1f..66917c6c3813 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -58,8 +58,14 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
58 case DRM_VMW_PARAM_FIFO_HW_VERSION: 58 case DRM_VMW_PARAM_FIFO_HW_VERSION:
59 { 59 {
60 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 60 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
61 61 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
62 param->value = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION); 62
63 param->value =
64 ioread32(fifo_mem +
65 ((fifo->capabilities &
66 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
67 SVGA_FIFO_3D_HWVERSION_REVISED :
68 SVGA_FIFO_3D_HWVERSION));
63 break; 69 break;
64 } 70 }
65 default: 71 default:
@@ -140,7 +146,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
140 goto out_clips; 146 goto out_clips;
141 } 147 }
142 148
143 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); 149 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
144 if (clips == NULL) { 150 if (clips == NULL) {
145 DRM_ERROR("Failed to allocate clip rect list.\n"); 151 DRM_ERROR("Failed to allocate clip rect list.\n");
146 ret = -ENOMEM; 152 ret = -ENOMEM;
@@ -166,13 +172,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
166 ret = -EINVAL; 172 ret = -EINVAL;
167 goto out_no_fb; 173 goto out_no_fb;
168 } 174 }
169
170 vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj)); 175 vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
171 if (!vfb->dmabuf) {
172 DRM_ERROR("Framebuffer not dmabuf backed.\n");
173 ret = -EINVAL;
174 goto out_no_fb;
175 }
176 176
177 ret = ttm_read_lock(&vmaster->lock, true); 177 ret = ttm_read_lock(&vmaster->lock, true);
178 if (unlikely(ret != 0)) 178 if (unlikely(ret != 0))
@@ -232,7 +232,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
232 goto out_clips; 232 goto out_clips;
233 } 233 }
234 234
235 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); 235 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
236 if (clips == NULL) { 236 if (clips == NULL) {
237 DRM_ERROR("Failed to allocate clip rect list.\n"); 237 DRM_ERROR("Failed to allocate clip rect list.\n");
238 ret = -ENOMEM; 238 ret = -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 880e285d7578..8aa1dbb45c67 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -31,6 +31,44 @@
31/* Might need a hrtimer here? */ 31/* Might need a hrtimer here? */
32#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) 32#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
33 33
34
35struct vmw_clip_rect {
36 int x1, x2, y1, y2;
37};
38
39/**
40 * Clip @num_rects number of @rects against @clip storing the
41 * results in @out_rects and the number of passed rects in @out_num.
42 */
43void vmw_clip_cliprects(struct drm_clip_rect *rects,
44 int num_rects,
45 struct vmw_clip_rect clip,
46 SVGASignedRect *out_rects,
47 int *out_num)
48{
49 int i, k;
50
51 for (i = 0, k = 0; i < num_rects; i++) {
52 int x1 = max_t(int, clip.x1, rects[i].x1);
53 int y1 = max_t(int, clip.y1, rects[i].y1);
54 int x2 = min_t(int, clip.x2, rects[i].x2);
55 int y2 = min_t(int, clip.y2, rects[i].y2);
56
57 if (x1 >= x2)
58 continue;
59 if (y1 >= y2)
60 continue;
61
62 out_rects[k].left = x1;
63 out_rects[k].top = y1;
64 out_rects[k].right = x2;
65 out_rects[k].bottom = y2;
66 k++;
67 }
68
69 *out_num = k;
70}
71
34void vmw_display_unit_cleanup(struct vmw_display_unit *du) 72void vmw_display_unit_cleanup(struct vmw_display_unit *du)
35{ 73{
36 if (du->cursor_surface) 74 if (du->cursor_surface)
@@ -82,6 +120,43 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
82 return 0; 120 return 0;
83} 121}
84 122
123int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
124 struct vmw_dma_buffer *dmabuf,
125 u32 width, u32 height,
126 u32 hotspotX, u32 hotspotY)
127{
128 struct ttm_bo_kmap_obj map;
129 unsigned long kmap_offset;
130 unsigned long kmap_num;
131 void *virtual;
132 bool dummy;
133 int ret;
134
135 kmap_offset = 0;
136 kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
137
138 ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
139 if (unlikely(ret != 0)) {
140 DRM_ERROR("reserve failed\n");
141 return -EINVAL;
142 }
143
144 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
145 if (unlikely(ret != 0))
146 goto err_unreserve;
147
148 virtual = ttm_kmap_obj_virtual(&map, &dummy);
149 ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
150 hotspotX, hotspotY);
151
152 ttm_bo_kunmap(&map);
153err_unreserve:
154 ttm_bo_unreserve(&dmabuf->base);
155
156 return ret;
157}
158
159
85void vmw_cursor_update_position(struct vmw_private *dev_priv, 160void vmw_cursor_update_position(struct vmw_private *dev_priv,
86 bool show, int x, int y) 161 bool show, int x, int y)
87{ 162{
@@ -110,24 +185,21 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
110 return -EINVAL; 185 return -EINVAL;
111 186
112 if (handle) { 187 if (handle) {
113 ret = vmw_user_surface_lookup_handle(dev_priv, tfile, 188 ret = vmw_user_lookup_handle(dev_priv, tfile,
114 handle, &surface); 189 handle, &surface, &dmabuf);
115 if (!ret) { 190 if (ret) {
116 if (!surface->snooper.image) { 191 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
117 DRM_ERROR("surface not suitable for cursor\n"); 192 return -EINVAL;
118 vmw_surface_unreference(&surface);
119 return -EINVAL;
120 }
121 } else {
122 ret = vmw_user_dmabuf_lookup(tfile,
123 handle, &dmabuf);
124 if (ret) {
125 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
126 return -EINVAL;
127 }
128 } 193 }
129 } 194 }
130 195
196 /* need to do this before taking down old image */
197 if (surface && !surface->snooper.image) {
198 DRM_ERROR("surface not suitable for cursor\n");
199 vmw_surface_unreference(&surface);
200 return -EINVAL;
201 }
202
131 /* takedown old cursor */ 203 /* takedown old cursor */
132 if (du->cursor_surface) { 204 if (du->cursor_surface) {
133 du->cursor_surface->snooper.crtc = NULL; 205 du->cursor_surface->snooper.crtc = NULL;
@@ -146,36 +218,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
146 vmw_cursor_update_image(dev_priv, surface->snooper.image, 218 vmw_cursor_update_image(dev_priv, surface->snooper.image,
147 64, 64, du->hotspot_x, du->hotspot_y); 219 64, 64, du->hotspot_x, du->hotspot_y);
148 } else if (dmabuf) { 220 } else if (dmabuf) {
149 struct ttm_bo_kmap_obj map;
150 unsigned long kmap_offset;
151 unsigned long kmap_num;
152 void *virtual;
153 bool dummy;
154
155 /* vmw_user_surface_lookup takes one reference */ 221 /* vmw_user_surface_lookup takes one reference */
156 du->cursor_dmabuf = dmabuf; 222 du->cursor_dmabuf = dmabuf;
157 223
158 kmap_offset = 0; 224 ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
159 kmap_num = (64*64*4) >> PAGE_SHIFT; 225 du->hotspot_x, du->hotspot_y);
160
161 ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
162 if (unlikely(ret != 0)) {
163 DRM_ERROR("reserve failed\n");
164 return -EINVAL;
165 }
166
167 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
168 if (unlikely(ret != 0))
169 goto err_unreserve;
170
171 virtual = ttm_kmap_obj_virtual(&map, &dummy);
172 vmw_cursor_update_image(dev_priv, virtual, 64, 64,
173 du->hotspot_x, du->hotspot_y);
174
175 ttm_bo_kunmap(&map);
176err_unreserve:
177 ttm_bo_unreserve(&dmabuf->base);
178
179 } else { 226 } else {
180 vmw_cursor_update_position(dev_priv, false, 0, 0); 227 vmw_cursor_update_position(dev_priv, false, 0, 0);
181 return 0; 228 return 0;
@@ -377,8 +424,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
377 struct drm_clip_rect *clips, 424 struct drm_clip_rect *clips,
378 unsigned num_clips, int inc) 425 unsigned num_clips, int inc)
379{ 426{
380 struct drm_clip_rect *clips_ptr;
381 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 427 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
428 struct drm_clip_rect *clips_ptr;
429 struct drm_clip_rect *tmp;
382 struct drm_crtc *crtc; 430 struct drm_crtc *crtc;
383 size_t fifo_size; 431 size_t fifo_size;
384 int i, num_units; 432 int i, num_units;
@@ -391,7 +439,6 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
391 } *cmd; 439 } *cmd;
392 SVGASignedRect *blits; 440 SVGASignedRect *blits;
393 441
394
395 num_units = 0; 442 num_units = 0;
396 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, 443 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
397 head) { 444 head) {
@@ -402,13 +449,24 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
402 449
403 BUG_ON(!clips || !num_clips); 450 BUG_ON(!clips || !num_clips);
404 451
452 tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
453 if (unlikely(tmp == NULL)) {
454 DRM_ERROR("Temporary cliprect memory alloc failed.\n");
455 return -ENOMEM;
456 }
457
405 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips; 458 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
406 cmd = kzalloc(fifo_size, GFP_KERNEL); 459 cmd = kzalloc(fifo_size, GFP_KERNEL);
407 if (unlikely(cmd == NULL)) { 460 if (unlikely(cmd == NULL)) {
408 DRM_ERROR("Temporary fifo memory alloc failed.\n"); 461 DRM_ERROR("Temporary fifo memory alloc failed.\n");
409 return -ENOMEM; 462 ret = -ENOMEM;
463 goto out_free_tmp;
410 } 464 }
411 465
466 /* setup blits pointer */
467 blits = (SVGASignedRect *)&cmd[1];
468
469 /* initial clip region */
412 left = clips->x1; 470 left = clips->x1;
413 right = clips->x2; 471 right = clips->x2;
414 top = clips->y1; 472 top = clips->y1;
@@ -434,45 +492,60 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
434 cmd->body.srcRect.bottom = bottom; 492 cmd->body.srcRect.bottom = bottom;
435 493
436 clips_ptr = clips; 494 clips_ptr = clips;
437 blits = (SVGASignedRect *)&cmd[1];
438 for (i = 0; i < num_clips; i++, clips_ptr += inc) { 495 for (i = 0; i < num_clips; i++, clips_ptr += inc) {
439 blits[i].left = clips_ptr->x1 - left; 496 tmp[i].x1 = clips_ptr->x1 - left;
440 blits[i].right = clips_ptr->x2 - left; 497 tmp[i].x2 = clips_ptr->x2 - left;
441 blits[i].top = clips_ptr->y1 - top; 498 tmp[i].y1 = clips_ptr->y1 - top;
442 blits[i].bottom = clips_ptr->y2 - top; 499 tmp[i].y2 = clips_ptr->y2 - top;
443 } 500 }
444 501
445 /* do per unit writing, reuse fifo for each */ 502 /* do per unit writing, reuse fifo for each */
446 for (i = 0; i < num_units; i++) { 503 for (i = 0; i < num_units; i++) {
447 struct vmw_display_unit *unit = units[i]; 504 struct vmw_display_unit *unit = units[i];
448 int clip_x1 = left - unit->crtc.x; 505 struct vmw_clip_rect clip;
449 int clip_y1 = top - unit->crtc.y; 506 int num;
450 int clip_x2 = right - unit->crtc.x; 507
451 int clip_y2 = bottom - unit->crtc.y; 508 clip.x1 = left - unit->crtc.x;
509 clip.y1 = top - unit->crtc.y;
510 clip.x2 = right - unit->crtc.x;
511 clip.y2 = bottom - unit->crtc.y;
452 512
453 /* skip any crtcs that misses the clip region */ 513 /* skip any crtcs that misses the clip region */
454 if (clip_x1 >= unit->crtc.mode.hdisplay || 514 if (clip.x1 >= unit->crtc.mode.hdisplay ||
455 clip_y1 >= unit->crtc.mode.vdisplay || 515 clip.y1 >= unit->crtc.mode.vdisplay ||
456 clip_x2 <= 0 || clip_y2 <= 0) 516 clip.x2 <= 0 || clip.y2 <= 0)
457 continue; 517 continue;
458 518
519 /*
520 * In order for the clip rects to be correctly scaled
521 * the src and dest rects needs to be the same size.
522 */
523 cmd->body.destRect.left = clip.x1;
524 cmd->body.destRect.right = clip.x2;
525 cmd->body.destRect.top = clip.y1;
526 cmd->body.destRect.bottom = clip.y2;
527
528 /* create a clip rect of the crtc in dest coords */
529 clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
530 clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
531 clip.x1 = 0 - clip.x1;
532 clip.y1 = 0 - clip.y1;
533
459 /* need to reset sid as it is changed by execbuf */ 534 /* need to reset sid as it is changed by execbuf */
460 cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle); 535 cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
461
462 cmd->body.destScreenId = unit->unit; 536 cmd->body.destScreenId = unit->unit;
463 537
464 /* 538 /* clip and write blits to cmd stream */
465 * The blit command is a lot more resilient then the 539 vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
466 * readback command when it comes to clip rects. So its
467 * okay to go out of bounds.
468 */
469 540
470 cmd->body.destRect.left = clip_x1; 541 /* if no cliprects hit skip this */
471 cmd->body.destRect.right = clip_x2; 542 if (num == 0)
472 cmd->body.destRect.top = clip_y1; 543 continue;
473 cmd->body.destRect.bottom = clip_y2;
474 544
475 545
546 /* recalculate package length */
547 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
548 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
476 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, 549 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
477 fifo_size, 0, NULL); 550 fifo_size, 0, NULL);
478 551
@@ -480,7 +553,10 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
480 break; 553 break;
481 } 554 }
482 555
556
483 kfree(cmd); 557 kfree(cmd);
558out_free_tmp:
559 kfree(tmp);
484 560
485 return ret; 561 return ret;
486} 562}
@@ -556,6 +632,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
556 * Sanity checks. 632 * Sanity checks.
557 */ 633 */
558 634
635 /* Surface must be marked as a scanout. */
636 if (unlikely(!surface->scanout))
637 return -EINVAL;
638
559 if (unlikely(surface->mip_levels[0] != 1 || 639 if (unlikely(surface->mip_levels[0] != 1 ||
560 surface->num_sizes != 1 || 640 surface->num_sizes != 1 ||
561 surface->sizes[0].width < mode_cmd->width || 641 surface->sizes[0].width < mode_cmd->width ||
@@ -782,6 +862,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
782 int clip_y1 = clips_ptr->y1 - unit->crtc.y; 862 int clip_y1 = clips_ptr->y1 - unit->crtc.y;
783 int clip_x2 = clips_ptr->x2 - unit->crtc.x; 863 int clip_x2 = clips_ptr->x2 - unit->crtc.x;
784 int clip_y2 = clips_ptr->y2 - unit->crtc.y; 864 int clip_y2 = clips_ptr->y2 - unit->crtc.y;
865 int move_x, move_y;
785 866
786 /* skip any crtcs that misses the clip region */ 867 /* skip any crtcs that misses the clip region */
787 if (clip_x1 >= unit->crtc.mode.hdisplay || 868 if (clip_x1 >= unit->crtc.mode.hdisplay ||
@@ -789,12 +870,21 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
789 clip_x2 <= 0 || clip_y2 <= 0) 870 clip_x2 <= 0 || clip_y2 <= 0)
790 continue; 871 continue;
791 872
873 /* clip size to crtc size */
874 clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
875 clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
876
877 /* translate both src and dest to bring clip into screen */
878 move_x = min_t(int, clip_x1, 0);
879 move_y = min_t(int, clip_y1, 0);
880
881 /* actual translate done here */
792 blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN; 882 blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
793 blits[hit_num].body.destScreenId = unit->unit; 883 blits[hit_num].body.destScreenId = unit->unit;
794 blits[hit_num].body.srcOrigin.x = clips_ptr->x1; 884 blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
795 blits[hit_num].body.srcOrigin.y = clips_ptr->y1; 885 blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
796 blits[hit_num].body.destRect.left = clip_x1; 886 blits[hit_num].body.destRect.left = clip_x1 - move_x;
797 blits[hit_num].body.destRect.top = clip_y1; 887 blits[hit_num].body.destRect.top = clip_y1 - move_y;
798 blits[hit_num].body.destRect.right = clip_x2; 888 blits[hit_num].body.destRect.right = clip_x2;
799 blits[hit_num].body.destRect.bottom = clip_y2; 889 blits[hit_num].body.destRect.bottom = clip_y2;
800 hit_num++; 890 hit_num++;
@@ -1033,46 +1123,29 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1033 return ERR_PTR(-ENOENT); 1123 return ERR_PTR(-ENOENT);
1034 } 1124 }
1035 1125
1036 /** 1126 /* returns either a dmabuf or surface */
1037 * End conditioned code. 1127 ret = vmw_user_lookup_handle(dev_priv, tfile,
1038 */ 1128 mode_cmd->handle,
1039 1129 &surface, &bo);
1040 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
1041 mode_cmd->handle, &surface);
1042 if (ret) 1130 if (ret)
1043 goto try_dmabuf; 1131 goto err_out;
1044 1132
1045 if (!surface->scanout) 1133 /* Create the new framebuffer depending one what we got back */
1046 goto err_not_scanout; 1134 if (bo)
1047 1135 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
1048 ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface, 1136 mode_cmd);
1049 &vfb, mode_cmd); 1137 else if (surface)
1050 1138 ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
1051 /* vmw_user_surface_lookup takes one ref so does new_fb */ 1139 surface, &vfb, mode_cmd);
1052 vmw_surface_unreference(&surface); 1140 else
1053 1141 BUG();
1054 if (ret) {
1055 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1056 ttm_base_object_unref(&user_obj);
1057 return ERR_PTR(ret);
1058 } else
1059 vfb->user_obj = user_obj;
1060 return &vfb->base;
1061
1062try_dmabuf:
1063 DRM_INFO("%s: trying buffer\n", __func__);
1064
1065 ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
1066 if (ret) {
1067 DRM_ERROR("failed to find buffer: %i\n", ret);
1068 return ERR_PTR(-ENOENT);
1069 }
1070
1071 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
1072 mode_cmd);
1073 1142
1074 /* vmw_user_dmabuf_lookup takes one ref so does new_fb */ 1143err_out:
1075 vmw_dmabuf_unreference(&bo); 1144 /* vmw_user_lookup_handle takes one ref so does new_fb */
1145 if (bo)
1146 vmw_dmabuf_unreference(&bo);
1147 if (surface)
1148 vmw_surface_unreference(&surface);
1076 1149
1077 if (ret) { 1150 if (ret) {
1078 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); 1151 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
@@ -1082,14 +1155,6 @@ try_dmabuf:
1082 vfb->user_obj = user_obj; 1155 vfb->user_obj = user_obj;
1083 1156
1084 return &vfb->base; 1157 return &vfb->base;
1085
1086err_not_scanout:
1087 DRM_ERROR("surface not marked as scanout\n");
1088 /* vmw_user_surface_lookup takes one ref */
1089 vmw_surface_unreference(&surface);
1090 ttm_base_object_unref(&user_obj);
1091
1092 return ERR_PTR(-EINVAL);
1093} 1158}
1094 1159
1095static struct drm_mode_config_funcs vmw_kms_funcs = { 1160static struct drm_mode_config_funcs vmw_kms_funcs = {
@@ -1106,10 +1171,12 @@ int vmw_kms_present(struct vmw_private *dev_priv,
1106 uint32_t num_clips) 1171 uint32_t num_clips)
1107{ 1172{
1108 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 1173 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
1174 struct drm_clip_rect *tmp;
1109 struct drm_crtc *crtc; 1175 struct drm_crtc *crtc;
1110 size_t fifo_size; 1176 size_t fifo_size;
1111 int i, k, num_units; 1177 int i, k, num_units;
1112 int ret = 0; /* silence warning */ 1178 int ret = 0; /* silence warning */
1179 int left, right, top, bottom;
1113 1180
1114 struct { 1181 struct {
1115 SVGA3dCmdHeader header; 1182 SVGA3dCmdHeader header;
@@ -1127,60 +1194,95 @@ int vmw_kms_present(struct vmw_private *dev_priv,
1127 BUG_ON(surface == NULL); 1194 BUG_ON(surface == NULL);
1128 BUG_ON(!clips || !num_clips); 1195 BUG_ON(!clips || !num_clips);
1129 1196
1197 tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
1198 if (unlikely(tmp == NULL)) {
1199 DRM_ERROR("Temporary cliprect memory alloc failed.\n");
1200 return -ENOMEM;
1201 }
1202
1130 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips; 1203 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
1131 cmd = kmalloc(fifo_size, GFP_KERNEL); 1204 cmd = kmalloc(fifo_size, GFP_KERNEL);
1132 if (unlikely(cmd == NULL)) { 1205 if (unlikely(cmd == NULL)) {
1133 DRM_ERROR("Failed to allocate temporary fifo memory.\n"); 1206 DRM_ERROR("Failed to allocate temporary fifo memory.\n");
1134 return -ENOMEM; 1207 ret = -ENOMEM;
1208 goto out_free_tmp;
1209 }
1210
1211 left = clips->x;
1212 right = clips->x + clips->w;
1213 top = clips->y;
1214 bottom = clips->y + clips->h;
1215
1216 for (i = 1; i < num_clips; i++) {
1217 left = min_t(int, left, (int)clips[i].x);
1218 right = max_t(int, right, (int)clips[i].x + clips[i].w);
1219 top = min_t(int, top, (int)clips[i].y);
1220 bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
1135 } 1221 }
1136 1222
1137 /* only need to do this once */ 1223 /* only need to do this once */
1138 memset(cmd, 0, fifo_size); 1224 memset(cmd, 0, fifo_size);
1139 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN); 1225 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
1140 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
1141
1142 cmd->body.srcRect.left = 0;
1143 cmd->body.srcRect.right = surface->sizes[0].width;
1144 cmd->body.srcRect.top = 0;
1145 cmd->body.srcRect.bottom = surface->sizes[0].height;
1146 1226
1147 blits = (SVGASignedRect *)&cmd[1]; 1227 blits = (SVGASignedRect *)&cmd[1];
1228
1229 cmd->body.srcRect.left = left;
1230 cmd->body.srcRect.right = right;
1231 cmd->body.srcRect.top = top;
1232 cmd->body.srcRect.bottom = bottom;
1233
1148 for (i = 0; i < num_clips; i++) { 1234 for (i = 0; i < num_clips; i++) {
1149 blits[i].left = clips[i].x; 1235 tmp[i].x1 = clips[i].x - left;
1150 blits[i].right = clips[i].x + clips[i].w; 1236 tmp[i].x2 = clips[i].x + clips[i].w - left;
1151 blits[i].top = clips[i].y; 1237 tmp[i].y1 = clips[i].y - top;
1152 blits[i].bottom = clips[i].y + clips[i].h; 1238 tmp[i].y2 = clips[i].y + clips[i].h - top;
1153 } 1239 }
1154 1240
1155 for (k = 0; k < num_units; k++) { 1241 for (k = 0; k < num_units; k++) {
1156 struct vmw_display_unit *unit = units[k]; 1242 struct vmw_display_unit *unit = units[k];
1157 int clip_x1 = destX - unit->crtc.x; 1243 struct vmw_clip_rect clip;
1158 int clip_y1 = destY - unit->crtc.y; 1244 int num;
1159 int clip_x2 = clip_x1 + surface->sizes[0].width; 1245
1160 int clip_y2 = clip_y1 + surface->sizes[0].height; 1246 clip.x1 = left + destX - unit->crtc.x;
1247 clip.y1 = top + destY - unit->crtc.y;
1248 clip.x2 = right + destX - unit->crtc.x;
1249 clip.y2 = bottom + destY - unit->crtc.y;
1161 1250
1162 /* skip any crtcs that misses the clip region */ 1251 /* skip any crtcs that misses the clip region */
1163 if (clip_x1 >= unit->crtc.mode.hdisplay || 1252 if (clip.x1 >= unit->crtc.mode.hdisplay ||
1164 clip_y1 >= unit->crtc.mode.vdisplay || 1253 clip.y1 >= unit->crtc.mode.vdisplay ||
1165 clip_x2 <= 0 || clip_y2 <= 0) 1254 clip.x2 <= 0 || clip.y2 <= 0)
1166 continue; 1255 continue;
1167 1256
1257 /*
1258 * In order for the clip rects to be correctly scaled
1259 * the src and dest rects needs to be the same size.
1260 */
1261 cmd->body.destRect.left = clip.x1;
1262 cmd->body.destRect.right = clip.x2;
1263 cmd->body.destRect.top = clip.y1;
1264 cmd->body.destRect.bottom = clip.y2;
1265
1266 /* create a clip rect of the crtc in dest coords */
1267 clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
1268 clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
1269 clip.x1 = 0 - clip.x1;
1270 clip.y1 = 0 - clip.y1;
1271
1168 /* need to reset sid as it is changed by execbuf */ 1272 /* need to reset sid as it is changed by execbuf */
1169 cmd->body.srcImage.sid = sid; 1273 cmd->body.srcImage.sid = sid;
1170
1171 cmd->body.destScreenId = unit->unit; 1274 cmd->body.destScreenId = unit->unit;
1172 1275
1173 /* 1276 /* clip and write blits to cmd stream */
1174 * The blit command is a lot more resilient then the 1277 vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
1175 * readback command when it comes to clip rects. So its
1176 * okay to go out of bounds.
1177 */
1178 1278
1179 cmd->body.destRect.left = clip_x1; 1279 /* if no cliprects hit skip this */
1180 cmd->body.destRect.right = clip_x2; 1280 if (num == 0)
1181 cmd->body.destRect.top = clip_y1; 1281 continue;
1182 cmd->body.destRect.bottom = clip_y2;
1183 1282
1283 /* recalculate package length */
1284 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
1285 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
1184 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, 1286 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
1185 fifo_size, 0, NULL); 1287 fifo_size, 0, NULL);
1186 1288
@@ -1189,6 +1291,8 @@ int vmw_kms_present(struct vmw_private *dev_priv,
1189 } 1291 }
1190 1292
1191 kfree(cmd); 1293 kfree(cmd);
1294out_free_tmp:
1295 kfree(tmp);
1192 1296
1193 return ret; 1297 return ret;
1194} 1298}
@@ -1809,7 +1913,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1809 } 1913 }
1810 1914
1811 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); 1915 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
1812 rects = kzalloc(rects_size, GFP_KERNEL); 1916 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
1917 GFP_KERNEL);
1813 if (unlikely(!rects)) { 1918 if (unlikely(!rects)) {
1814 ret = -ENOMEM; 1919 ret = -ENOMEM;
1815 goto out_unlock; 1920 goto out_unlock;
@@ -1824,10 +1929,10 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1824 } 1929 }
1825 1930
1826 for (i = 0; i < arg->num_outputs; ++i) { 1931 for (i = 0; i < arg->num_outputs; ++i) {
1827 if (rects->x < 0 || 1932 if (rects[i].x < 0 ||
1828 rects->y < 0 || 1933 rects[i].y < 0 ||
1829 rects->x + rects->w > mode_config->max_width || 1934 rects[i].x + rects[i].w > mode_config->max_width ||
1830 rects->y + rects->h > mode_config->max_height) { 1935 rects[i].y + rects[i].h > mode_config->max_height) {
1831 DRM_ERROR("Invalid GUI layout.\n"); 1936 DRM_ERROR("Invalid GUI layout.\n");
1832 ret = -EINVAL; 1937 ret = -EINVAL;
1833 goto out_free; 1938 goto out_free;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index af8e6e5bd964..e1cb8556355f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -62,9 +62,14 @@ struct vmw_framebuffer {
62int vmw_cursor_update_image(struct vmw_private *dev_priv, 62int vmw_cursor_update_image(struct vmw_private *dev_priv,
63 u32 *image, u32 width, u32 height, 63 u32 *image, u32 width, u32 height,
64 u32 hotspotX, u32 hotspotY); 64 u32 hotspotX, u32 hotspotY);
65int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
66 struct vmw_dma_buffer *dmabuf,
67 u32 width, u32 height,
68 u32 hotspotX, u32 hotspotY);
65void vmw_cursor_update_position(struct vmw_private *dev_priv, 69void vmw_cursor_update_position(struct vmw_private *dev_priv,
66 bool show, int x, int y); 70 bool show, int x, int y);
67 71
72
68/** 73/**
69 * Base class display unit. 74 * Base class display unit.
70 * 75 *
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 90c5e3928491..8f8dbd43c33d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -74,9 +74,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
74{ 74{
75 struct vmw_legacy_display *lds = dev_priv->ldu_priv; 75 struct vmw_legacy_display *lds = dev_priv->ldu_priv;
76 struct vmw_legacy_display_unit *entry; 76 struct vmw_legacy_display_unit *entry;
77 struct vmw_display_unit *du = NULL;
77 struct drm_framebuffer *fb = NULL; 78 struct drm_framebuffer *fb = NULL;
78 struct drm_crtc *crtc = NULL; 79 struct drm_crtc *crtc = NULL;
79 int i = 0; 80 int i = 0, ret;
80 81
81 /* If there is no display topology the host just assumes 82 /* If there is no display topology the host just assumes
82 * that the guest will set the same layout as the host. 83 * that the guest will set the same layout as the host.
@@ -129,6 +130,25 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
129 130
130 lds->last_num_active = lds->num_active; 131 lds->last_num_active = lds->num_active;
131 132
133
134 /* Find the first du with a cursor. */
135 list_for_each_entry(entry, &lds->active, active) {
136 du = &entry->base;
137
138 if (!du->cursor_dmabuf)
139 continue;
140
141 ret = vmw_cursor_update_dmabuf(dev_priv,
142 du->cursor_dmabuf,
143 64, 64,
144 du->hotspot_x,
145 du->hotspot_y);
146 if (ret == 0)
147 break;
148
149 DRM_ERROR("Could not update cursor image\n");
150 }
151
132 return 0; 152 return 0;
133} 153}
134 154
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 86c5e4cceb31..1c7f09e26819 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1190,6 +1190,29 @@ void vmw_resource_unreserve(struct list_head *list)
1190 write_unlock(lock); 1190 write_unlock(lock);
1191} 1191}
1192 1192
1193/**
1194 * Helper function that looks either a surface or dmabuf.
1195 *
1196 * The pointer this pointed at by out_surf and out_buf needs to be null.
1197 */
1198int vmw_user_lookup_handle(struct vmw_private *dev_priv,
1199 struct ttm_object_file *tfile,
1200 uint32_t handle,
1201 struct vmw_surface **out_surf,
1202 struct vmw_dma_buffer **out_buf)
1203{
1204 int ret;
1205
1206 BUG_ON(*out_surf || *out_buf);
1207
1208 ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
1209 if (!ret)
1210 return 0;
1211
1212 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
1213 return ret;
1214}
1215
1193 1216
1194int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, 1217int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
1195 struct ttm_object_file *tfile, 1218 struct ttm_object_file *tfile,