aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c26
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h13
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c102
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c14
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c11
-rw-r--r--drivers/gpu/drm/i915/intel_display.c80
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c24
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c25
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c25
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c7
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c64
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c14
21 files changed, 264 insertions, 179 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f85e8b0ec00f..6745c7f976db 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -47,11 +47,11 @@ MODULE_PARM_DESC(modeset,
47unsigned int i915_fbpercrtc __always_unused = 0; 47unsigned int i915_fbpercrtc __always_unused = 0;
48module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); 48module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
49 49
50int i915_panel_ignore_lid __read_mostly = 0; 50int i915_panel_ignore_lid __read_mostly = 1;
51module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); 51module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
52MODULE_PARM_DESC(panel_ignore_lid, 52MODULE_PARM_DESC(panel_ignore_lid,
53 "Override lid status (0=autodetect [default], 1=lid open, " 53 "Override lid status (0=autodetect, 1=autodetect disabled [default], "
54 "-1=lid closed)"); 54 "-1=force lid closed, -2=force lid open)");
55 55
56unsigned int i915_powersave __read_mostly = 1; 56unsigned int i915_powersave __read_mostly = 1;
57module_param_named(powersave, i915_powersave, int, 0600); 57module_param_named(powersave, i915_powersave, int, 0600);
@@ -396,12 +396,6 @@ static const struct pci_device_id pciidlist[] = { /* aka */
396MODULE_DEVICE_TABLE(pci, pciidlist); 396MODULE_DEVICE_TABLE(pci, pciidlist);
397#endif 397#endif
398 398
399#define INTEL_PCH_DEVICE_ID_MASK 0xff00
400#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
401#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
402#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
403#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
404
405void intel_detect_pch(struct drm_device *dev) 399void intel_detect_pch(struct drm_device *dev)
406{ 400{
407 struct drm_i915_private *dev_priv = dev->dev_private; 401 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -416,8 +410,9 @@ void intel_detect_pch(struct drm_device *dev)
416 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); 410 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
417 if (pch) { 411 if (pch) {
418 if (pch->vendor == PCI_VENDOR_ID_INTEL) { 412 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
419 int id; 413 unsigned short id;
420 id = pch->device & INTEL_PCH_DEVICE_ID_MASK; 414 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
415 dev_priv->pch_id = id;
421 416
422 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 417 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
423 dev_priv->pch_type = PCH_IBX; 418 dev_priv->pch_type = PCH_IBX;
@@ -440,6 +435,11 @@ void intel_detect_pch(struct drm_device *dev)
440 dev_priv->num_pch_pll = 0; 435 dev_priv->num_pch_pll = 0;
441 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 436 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
442 WARN_ON(!IS_HASWELL(dev)); 437 WARN_ON(!IS_HASWELL(dev));
438 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
439 dev_priv->pch_type = PCH_LPT;
440 dev_priv->num_pch_pll = 0;
441 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
442 WARN_ON(!IS_HASWELL(dev));
443 } 443 }
444 BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); 444 BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
445 } 445 }
@@ -884,7 +884,7 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
884 struct intel_device_info *intel_info = 884 struct intel_device_info *intel_info =
885 (struct intel_device_info *) ent->driver_data; 885 (struct intel_device_info *) ent->driver_data;
886 886
887 if (intel_info->is_haswell || intel_info->is_valleyview) 887 if (intel_info->is_valleyview)
888 if(!i915_preliminary_hw_support) { 888 if(!i915_preliminary_hw_support) {
889 DRM_ERROR("Preliminary hardware support disabled\n"); 889 DRM_ERROR("Preliminary hardware support disabled\n");
890 return -ENODEV; 890 return -ENODEV;
@@ -1258,6 +1258,10 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1258 } \ 1258 } \
1259 if (IS_GEN5(dev_priv->dev)) \ 1259 if (IS_GEN5(dev_priv->dev)) \
1260 ilk_dummy_write(dev_priv); \ 1260 ilk_dummy_write(dev_priv); \
1261 if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
1262 DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
1263 I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
1264 } \
1261 if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ 1265 if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
1262 write##y(val, dev_priv->regs + reg + 0x180000); \ 1266 write##y(val, dev_priv->regs + reg + 0x180000); \
1263 } else { \ 1267 } else { \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4b83e5f4b32e..87c06f97fa89 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -402,7 +402,6 @@ struct i915_suspend_saved_registers {
402 u32 saveDSPACNTR; 402 u32 saveDSPACNTR;
403 u32 saveDSPBCNTR; 403 u32 saveDSPBCNTR;
404 u32 saveDSPARB; 404 u32 saveDSPARB;
405 u32 saveHWS;
406 u32 savePIPEACONF; 405 u32 savePIPEACONF;
407 u32 savePIPEBCONF; 406 u32 savePIPEBCONF;
408 u32 savePIPEASRC; 407 u32 savePIPEASRC;
@@ -738,6 +737,7 @@ typedef struct drm_i915_private {
738 737
739 /* PCH chipset type */ 738 /* PCH chipset type */
740 enum intel_pch pch_type; 739 enum intel_pch pch_type;
740 unsigned short pch_id;
741 741
742 unsigned long quirks; 742 unsigned long quirks;
743 743
@@ -1161,6 +1161,8 @@ struct drm_i915_file_private {
1161#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 1161#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1162#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 1162#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1163#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1163#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1164#define IS_ULT(dev) (IS_HASWELL(dev) && \
1165 ((dev)->pci_device & 0xFF00) == 0x0A00)
1164 1166
1165/* 1167/*
1166 * The genX designation typically refers to the render engine, so render 1168 * The genX designation typically refers to the render engine, so render
@@ -1206,6 +1208,13 @@ struct drm_i915_file_private {
1206 1208
1207#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1209#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1208 1210
1211#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1212#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1213#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1214#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1215#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1216#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1217
1209#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1218#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1210#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 1219#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1211#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1220#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
@@ -1541,7 +1550,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
1541 unsigned long end); 1550 unsigned long end);
1542int i915_gem_gtt_init(struct drm_device *dev); 1551int i915_gem_gtt_init(struct drm_device *dev);
1543void i915_gem_gtt_fini(struct drm_device *dev); 1552void i915_gem_gtt_fini(struct drm_device *dev);
1544extern inline void i915_gem_chipset_flush(struct drm_device *dev) 1553static inline void i915_gem_chipset_flush(struct drm_device *dev)
1545{ 1554{
1546 if (INTEL_INFO(dev)->gen < 6) 1555 if (INTEL_INFO(dev)->gen < 6)
1547 intel_gtt_chipset_flush(); 1556 intel_gtt_chipset_flush();
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a2f1b8652d68..b0016bb65631 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1345,30 +1345,17 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1345 trace_i915_gem_object_fault(obj, page_offset, true, write); 1345 trace_i915_gem_object_fault(obj, page_offset, true, write);
1346 1346
1347 /* Now bind it into the GTT if needed */ 1347 /* Now bind it into the GTT if needed */
1348 if (!obj->map_and_fenceable) { 1348 ret = i915_gem_object_pin(obj, 0, true, false);
1349 ret = i915_gem_object_unbind(obj); 1349 if (ret)
1350 if (ret) 1350 goto unlock;
1351 goto unlock;
1352 }
1353 if (!obj->gtt_space) {
1354 ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
1355 if (ret)
1356 goto unlock;
1357
1358 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1359 if (ret)
1360 goto unlock;
1361 }
1362 1351
1363 if (!obj->has_global_gtt_mapping) 1352 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1364 i915_gem_gtt_bind_object(obj, obj->cache_level); 1353 if (ret)
1354 goto unpin;
1365 1355
1366 ret = i915_gem_object_get_fence(obj); 1356 ret = i915_gem_object_get_fence(obj);
1367 if (ret) 1357 if (ret)
1368 goto unlock; 1358 goto unpin;
1369
1370 if (i915_gem_object_is_inactive(obj))
1371 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1372 1359
1373 obj->fault_mappable = true; 1360 obj->fault_mappable = true;
1374 1361
@@ -1377,6 +1364,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1377 1364
1378 /* Finally, remap it using the new GTT offset */ 1365 /* Finally, remap it using the new GTT offset */
1379 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 1366 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1367unpin:
1368 i915_gem_object_unpin(obj);
1380unlock: 1369unlock:
1381 mutex_unlock(&dev->struct_mutex); 1370 mutex_unlock(&dev->struct_mutex);
1382out: 1371out:
@@ -2925,13 +2914,14 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2925 if (ret) 2914 if (ret)
2926 return ret; 2915 return ret;
2927 2916
2917 i915_gem_object_pin_pages(obj);
2918
2928 search_free: 2919 search_free:
2929 if (map_and_fenceable) 2920 if (map_and_fenceable)
2930 free_space = 2921 free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
2931 drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, 2922 size, alignment, obj->cache_level,
2932 size, alignment, obj->cache_level, 2923 0, dev_priv->mm.gtt_mappable_end,
2933 0, dev_priv->mm.gtt_mappable_end, 2924 false);
2934 false);
2935 else 2925 else
2936 free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, 2926 free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
2937 size, alignment, obj->cache_level, 2927 size, alignment, obj->cache_level,
@@ -2939,60 +2929,60 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2939 2929
2940 if (free_space != NULL) { 2930 if (free_space != NULL) {
2941 if (map_and_fenceable) 2931 if (map_and_fenceable)
2942 obj->gtt_space = 2932 free_space =
2943 drm_mm_get_block_range_generic(free_space, 2933 drm_mm_get_block_range_generic(free_space,
2944 size, alignment, obj->cache_level, 2934 size, alignment, obj->cache_level,
2945 0, dev_priv->mm.gtt_mappable_end, 2935 0, dev_priv->mm.gtt_mappable_end,
2946 false); 2936 false);
2947 else 2937 else
2948 obj->gtt_space = 2938 free_space =
2949 drm_mm_get_block_generic(free_space, 2939 drm_mm_get_block_generic(free_space,
2950 size, alignment, obj->cache_level, 2940 size, alignment, obj->cache_level,
2951 false); 2941 false);
2952 } 2942 }
2953 if (obj->gtt_space == NULL) { 2943 if (free_space == NULL) {
2954 ret = i915_gem_evict_something(dev, size, alignment, 2944 ret = i915_gem_evict_something(dev, size, alignment,
2955 obj->cache_level, 2945 obj->cache_level,
2956 map_and_fenceable, 2946 map_and_fenceable,
2957 nonblocking); 2947 nonblocking);
2958 if (ret) 2948 if (ret) {
2949 i915_gem_object_unpin_pages(obj);
2959 return ret; 2950 return ret;
2951 }
2960 2952
2961 goto search_free; 2953 goto search_free;
2962 } 2954 }
2963 if (WARN_ON(!i915_gem_valid_gtt_space(dev, 2955 if (WARN_ON(!i915_gem_valid_gtt_space(dev,
2964 obj->gtt_space, 2956 free_space,
2965 obj->cache_level))) { 2957 obj->cache_level))) {
2966 drm_mm_put_block(obj->gtt_space); 2958 i915_gem_object_unpin_pages(obj);
2967 obj->gtt_space = NULL; 2959 drm_mm_put_block(free_space);
2968 return -EINVAL; 2960 return -EINVAL;
2969 } 2961 }
2970 2962
2971
2972 ret = i915_gem_gtt_prepare_object(obj); 2963 ret = i915_gem_gtt_prepare_object(obj);
2973 if (ret) { 2964 if (ret) {
2974 drm_mm_put_block(obj->gtt_space); 2965 i915_gem_object_unpin_pages(obj);
2975 obj->gtt_space = NULL; 2966 drm_mm_put_block(free_space);
2976 return ret; 2967 return ret;
2977 } 2968 }
2978 2969
2979 if (!dev_priv->mm.aliasing_ppgtt)
2980 i915_gem_gtt_bind_object(obj, obj->cache_level);
2981
2982 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); 2970 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
2983 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 2971 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2984 2972
2985 obj->gtt_offset = obj->gtt_space->start; 2973 obj->gtt_space = free_space;
2974 obj->gtt_offset = free_space->start;
2986 2975
2987 fenceable = 2976 fenceable =
2988 obj->gtt_space->size == fence_size && 2977 free_space->size == fence_size &&
2989 (obj->gtt_space->start & (fence_alignment - 1)) == 0; 2978 (free_space->start & (fence_alignment - 1)) == 0;
2990 2979
2991 mappable = 2980 mappable =
2992 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; 2981 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2993 2982
2994 obj->map_and_fenceable = mappable && fenceable; 2983 obj->map_and_fenceable = mappable && fenceable;
2995 2984
2985 i915_gem_object_unpin_pages(obj);
2996 trace_i915_gem_object_bind(obj, map_and_fenceable); 2986 trace_i915_gem_object_bind(obj, map_and_fenceable);
2997 i915_gem_verify_gtt(dev); 2987 i915_gem_verify_gtt(dev);
2998 return 0; 2988 return 0;
@@ -3456,11 +3446,16 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3456 } 3446 }
3457 3447
3458 if (obj->gtt_space == NULL) { 3448 if (obj->gtt_space == NULL) {
3449 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3450
3459 ret = i915_gem_object_bind_to_gtt(obj, alignment, 3451 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3460 map_and_fenceable, 3452 map_and_fenceable,
3461 nonblocking); 3453 nonblocking);
3462 if (ret) 3454 if (ret)
3463 return ret; 3455 return ret;
3456
3457 if (!dev_priv->mm.aliasing_ppgtt)
3458 i915_gem_gtt_bind_object(obj, obj->cache_level);
3464 } 3459 }
3465 3460
3466 if (!obj->has_global_gtt_mapping && map_and_fenceable) 3461 if (!obj->has_global_gtt_mapping && map_and_fenceable)
@@ -4347,6 +4342,19 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4347 spin_unlock(&file_priv->mm.lock); 4342 spin_unlock(&file_priv->mm.lock);
4348} 4343}
4349 4344
4345static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4346{
4347 if (!mutex_is_locked(mutex))
4348 return false;
4349
4350#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4351 return mutex->owner == task;
4352#else
4353 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4354 return false;
4355#endif
4356}
4357
4350static int 4358static int
4351i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) 4359i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4352{ 4360{
@@ -4357,10 +4365,15 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4357 struct drm_device *dev = dev_priv->dev; 4365 struct drm_device *dev = dev_priv->dev;
4358 struct drm_i915_gem_object *obj; 4366 struct drm_i915_gem_object *obj;
4359 int nr_to_scan = sc->nr_to_scan; 4367 int nr_to_scan = sc->nr_to_scan;
4368 bool unlock = true;
4360 int cnt; 4369 int cnt;
4361 4370
4362 if (!mutex_trylock(&dev->struct_mutex)) 4371 if (!mutex_trylock(&dev->struct_mutex)) {
4363 return 0; 4372 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4373 return 0;
4374
4375 unlock = false;
4376 }
4364 4377
4365 if (nr_to_scan) { 4378 if (nr_to_scan) {
4366 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); 4379 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
@@ -4376,6 +4389,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4376 if (obj->pin_count == 0 && obj->pages_pin_count == 0) 4389 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4377 cnt += obj->base.size >> PAGE_SHIFT; 4390 cnt += obj->base.size >> PAGE_SHIFT;
4378 4391
4379 mutex_unlock(&dev->struct_mutex); 4392 if (unlock)
4393 mutex_unlock(&dev->struct_mutex);
4380 return cnt; 4394 return cnt;
4381} 4395}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d80e9dd00c48..48e4317e72dc 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -128,15 +128,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
128 target_i915_obj->cache_level); 128 target_i915_obj->cache_level);
129 } 129 }
130 130
131 /* The target buffer should have appeared before us in the
132 * exec_object list, so it should have a GTT space bound by now.
133 */
134 if (unlikely(target_offset == 0)) {
135 DRM_DEBUG("No GTT space found for object %d\n",
136 reloc->target_handle);
137 return ret;
138 }
139
140 /* Validate that the target is in a valid r/w GPU domain */ 131 /* Validate that the target is in a valid r/w GPU domain */
141 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { 132 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
142 DRM_DEBUG("reloc with multiple write domains: " 133 DRM_DEBUG("reloc with multiple write domains: "
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 35fec1e61346..f7ac61ee1504 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -367,8 +367,9 @@ static void i915_ggtt_clear_range(struct drm_device *dev,
367{ 367{
368 struct drm_i915_private *dev_priv = dev->dev_private; 368 struct drm_i915_private *dev_priv = dev->dev_private;
369 gtt_pte_t scratch_pte; 369 gtt_pte_t scratch_pte;
370 volatile void __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry; 370 gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
371 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; 371 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
372 int i;
372 373
373 if (INTEL_INFO(dev)->gen < 6) { 374 if (INTEL_INFO(dev)->gen < 6) {
374 intel_gtt_clear_range(first_entry, num_entries); 375 intel_gtt_clear_range(first_entry, num_entries);
@@ -381,7 +382,8 @@ static void i915_ggtt_clear_range(struct drm_device *dev,
381 num_entries = max_entries; 382 num_entries = max_entries;
382 383
383 scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC); 384 scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
384 memset_io(gtt_base, scratch_pte, num_entries * sizeof(scratch_pte)); 385 for (i = 0; i < num_entries; i++)
386 iowrite32(scratch_pte, &gtt_base[i]);
385 readl(gtt_base); 387 readl(gtt_base);
386} 388}
387 389
@@ -609,7 +611,6 @@ int i915_gem_gtt_init(struct drm_device *dev)
609 struct drm_i915_private *dev_priv = dev->dev_private; 611 struct drm_i915_private *dev_priv = dev->dev_private;
610 phys_addr_t gtt_bus_addr; 612 phys_addr_t gtt_bus_addr;
611 u16 snb_gmch_ctl; 613 u16 snb_gmch_ctl;
612 u32 tmp;
613 int ret; 614 int ret;
614 615
615 /* On modern platforms we need not worry ourself with the legacy 616 /* On modern platforms we need not worry ourself with the legacy
@@ -638,12 +639,9 @@ int i915_gem_gtt_init(struct drm_device *dev)
638 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) 639 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
639 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); 640 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
640 641
641 pci_read_config_dword(dev->pdev, PCI_BASE_ADDRESS_0, &tmp);
642 /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */ 642 /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
643 gtt_bus_addr = (tmp & PCI_BASE_ADDRESS_MEM_MASK) + (2<<20); 643 gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
644 644 dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
645 pci_read_config_dword(dev->pdev, PCI_BASE_ADDRESS_2, &tmp);
646 dev_priv->mm.gtt->gma_bus_addr = tmp & PCI_BASE_ADDRESS_MEM_MASK;
647 645
648 /* i9xx_setup */ 646 /* i9xx_setup */
649 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 647 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9118bd112589..97fbd9d1823b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3339,6 +3339,8 @@
3339#define _PFA_CTL_1 0x68080 3339#define _PFA_CTL_1 0x68080
3340#define _PFB_CTL_1 0x68880 3340#define _PFB_CTL_1 0x68880
3341#define PF_ENABLE (1<<31) 3341#define PF_ENABLE (1<<31)
3342#define PF_PIPE_SEL_MASK_IVB (3<<29)
3343#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
3342#define PF_FILTER_MASK (3<<23) 3344#define PF_FILTER_MASK (3<<23)
3343#define PF_FILTER_PROGRAMMED (0<<23) 3345#define PF_FILTER_PROGRAMMED (0<<23)
3344#define PF_FILTER_MED_3x3 (1<<23) 3346#define PF_FILTER_MED_3x3 (1<<23)
@@ -3851,6 +3853,7 @@
3851 3853
3852#define SOUTH_DSPCLK_GATE_D 0xc2020 3854#define SOUTH_DSPCLK_GATE_D 0xc2020
3853#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) 3855#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
3856#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
3854 3857
3855/* CPU: FDI_TX */ 3858/* CPU: FDI_TX */
3856#define _FDI_TXA_CTL 0x60100 3859#define _FDI_TXA_CTL 0x60100
@@ -4514,6 +4517,7 @@
4514#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ 4517#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
4515#define DDI_BUF_EMP_MASK (0xf<<24) 4518#define DDI_BUF_EMP_MASK (0xf<<24)
4516#define DDI_BUF_IS_IDLE (1<<7) 4519#define DDI_BUF_IS_IDLE (1<<7)
4520#define DDI_A_4_LANES (1<<4)
4517#define DDI_PORT_WIDTH_X1 (0<<1) 4521#define DDI_PORT_WIDTH_X1 (0<<1)
4518#define DDI_PORT_WIDTH_X2 (1<<1) 4522#define DDI_PORT_WIDTH_X2 (1<<1)
4519#define DDI_PORT_WIDTH_X4 (3<<1) 4523#define DDI_PORT_WIDTH_X4 (3<<1)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a818eba7cb66..63d4d30c39de 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -811,10 +811,6 @@ int i915_save_state(struct drm_device *dev)
811 811
812 mutex_lock(&dev->struct_mutex); 812 mutex_lock(&dev->struct_mutex);
813 813
814 /* Hardware status page */
815 if (!drm_core_check_feature(dev, DRIVER_MODESET))
816 dev_priv->regfile.saveHWS = I915_READ(HWS_PGA);
817
818 i915_save_display(dev); 814 i915_save_display(dev);
819 815
820 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 816 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -865,10 +861,6 @@ int i915_restore_state(struct drm_device *dev)
865 861
866 mutex_lock(&dev->struct_mutex); 862 mutex_lock(&dev->struct_mutex);
867 863
868 /* Hardware status page */
869 if (!drm_core_check_feature(dev, DRIVER_MODESET))
870 I915_WRITE(HWS_PGA, dev_priv->regfile.saveHWS);
871
872 i915_restore_display(dev); 864 i915_restore_display(dev);
873 865
874 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 866 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 0ed6baff4b0c..87e9b92039df 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -762,7 +762,8 @@ void intel_setup_bios(struct drm_device *dev)
762 struct drm_i915_private *dev_priv = dev->dev_private; 762 struct drm_i915_private *dev_priv = dev->dev_private;
763 763
764 /* Set the Panel Power On/Off timings if uninitialized. */ 764 /* Set the Panel Power On/Off timings if uninitialized. */
765 if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) { 765 if (!HAS_PCH_SPLIT(dev) &&
766 I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
766 /* Set T2 to 40ms and T5 to 200ms */ 767 /* Set T2 to 40ms and T5 to 200ms */
767 I915_WRITE(PP_ON_DELAYS, 0x019007d0); 768 I915_WRITE(PP_ON_DELAYS, 0x019007d0);
768 769
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 62a5b1154762..5c7774396e10 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -751,7 +751,7 @@ void intel_crt_init(struct drm_device *dev)
751 751
752 crt->base.type = INTEL_OUTPUT_ANALOG; 752 crt->base.type = INTEL_OUTPUT_ANALOG;
753 crt->base.cloneable = true; 753 crt->base.cloneable = true;
754 if (IS_HASWELL(dev) || IS_I830(dev)) 754 if (IS_I830(dev))
755 crt->base.crtc_mask = (1 << 0); 755 crt->base.crtc_mask = (1 << 0);
756 else 756 else
757 crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 757 crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 58f50ebdbef6..852012b6fc5b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -669,6 +669,15 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
669 break; 669 break;
670 } 670 }
671 671
672 if (intel_dp->has_audio) {
673 DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
674 pipe_name(intel_crtc->pipe));
675
676 /* write eld */
677 DRM_DEBUG_DRIVER("DP audio: write eld information\n");
678 intel_write_eld(encoder, adjusted_mode);
679 }
680
672 intel_dp_init_link_config(intel_dp); 681 intel_dp_init_link_config(intel_dp);
673 682
674 } else if (type == INTEL_OUTPUT_HDMI) { 683 } else if (type == INTEL_OUTPUT_HDMI) {
@@ -1300,6 +1309,8 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1300 else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) == 1309 else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
1301 LCPLL_CLK_FREQ_450) 1310 LCPLL_CLK_FREQ_450)
1302 return 450; 1311 return 450;
1312 else if (IS_ULT(dev_priv->dev))
1313 return 338;
1303 else 1314 else
1304 return 540; 1315 return 540;
1305} 1316}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6301d0cb45ee..3f7f62d370cb 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1149,14 +1149,9 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1149 u32 val; 1149 u32 val;
1150 bool cur_state; 1150 bool cur_state;
1151 1151
1152 if (IS_HASWELL(dev_priv->dev) && pipe > 0) { 1152 reg = FDI_RX_CTL(pipe);
1153 DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n"); 1153 val = I915_READ(reg);
1154 return; 1154 cur_state = !!(val & FDI_RX_ENABLE);
1155 } else {
1156 reg = FDI_RX_CTL(pipe);
1157 val = I915_READ(reg);
1158 cur_state = !!(val & FDI_RX_ENABLE);
1159 }
1160 WARN(cur_state != state, 1155 WARN(cur_state != state,
1161 "FDI RX state assertion failure (expected %s, current %s)\n", 1156 "FDI RX state assertion failure (expected %s, current %s)\n",
1162 state_string(state), state_string(cur_state)); 1157 state_string(state), state_string(cur_state));
@@ -1189,10 +1184,6 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
1189 int reg; 1184 int reg;
1190 u32 val; 1185 u32 val;
1191 1186
1192 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1193 DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
1194 return;
1195 }
1196 reg = FDI_RX_CTL(pipe); 1187 reg = FDI_RX_CTL(pipe);
1197 val = I915_READ(reg); 1188 val = I915_READ(reg);
1198 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); 1189 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
@@ -1821,9 +1812,15 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1821{ 1812{
1822 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1813 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1823 pipe); 1814 pipe);
1815 enum transcoder pch_transcoder;
1824 int reg; 1816 int reg;
1825 u32 val; 1817 u32 val;
1826 1818
1819 if (IS_HASWELL(dev_priv->dev))
1820 pch_transcoder = TRANSCODER_A;
1821 else
1822 pch_transcoder = pipe;
1823
1827 /* 1824 /*
1828 * A pipe without a PLL won't actually be able to drive bits from 1825 * A pipe without a PLL won't actually be able to drive bits from
1829 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1826 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
@@ -1834,8 +1831,8 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1834 else { 1831 else {
1835 if (pch_port) { 1832 if (pch_port) {
1836 /* if driving the PCH, we need FDI enabled */ 1833 /* if driving the PCH, we need FDI enabled */
1837 assert_fdi_rx_pll_enabled(dev_priv, pipe); 1834 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1838 assert_fdi_tx_pll_enabled(dev_priv, pipe); 1835 assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
1839 } 1836 }
1840 /* FIXME: assert CPU port conditions for SNB+ */ 1837 /* FIXME: assert CPU port conditions for SNB+ */
1841 } 1838 }
@@ -2924,9 +2921,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
2924 /* Ironlake workaround, disable clock pointer after downing FDI */ 2921 /* Ironlake workaround, disable clock pointer after downing FDI */
2925 if (HAS_PCH_IBX(dev)) { 2922 if (HAS_PCH_IBX(dev)) {
2926 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2923 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2927 I915_WRITE(FDI_RX_CHICKEN(pipe),
2928 I915_READ(FDI_RX_CHICKEN(pipe) &
2929 ~FDI_RX_PHASE_SYNC_POINTER_EN));
2930 } else if (HAS_PCH_CPT(dev)) { 2924 } else if (HAS_PCH_CPT(dev)) {
2931 cpt_phase_pointer_disable(dev, pipe); 2925 cpt_phase_pointer_disable(dev, pipe);
2932 } 2926 }
@@ -3393,7 +3387,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3393 * as some pre-programmed values are broken, 3387 * as some pre-programmed values are broken,
3394 * e.g. x201. 3388 * e.g. x201.
3395 */ 3389 */
3396 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 3390 if (IS_IVYBRIDGE(dev))
3391 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3392 PF_PIPE_SEL_IVB(pipe));
3393 else
3394 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3397 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); 3395 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3398 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); 3396 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3399 } 3397 }
@@ -3469,7 +3467,8 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3469 * as some pre-programmed values are broken, 3467 * as some pre-programmed values are broken,
3470 * e.g. x201. 3468 * e.g. x201.
3471 */ 3469 */
3472 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 3470 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3471 PF_PIPE_SEL_IVB(pipe));
3473 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); 3472 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3474 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); 3473 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3475 } 3474 }
@@ -6899,14 +6898,19 @@ static void intel_unpin_work_fn(struct work_struct *__work)
6899{ 6898{
6900 struct intel_unpin_work *work = 6899 struct intel_unpin_work *work =
6901 container_of(__work, struct intel_unpin_work, work); 6900 container_of(__work, struct intel_unpin_work, work);
6901 struct drm_device *dev = work->crtc->dev;
6902 6902
6903 mutex_lock(&work->dev->struct_mutex); 6903 mutex_lock(&dev->struct_mutex);
6904 intel_unpin_fb_obj(work->old_fb_obj); 6904 intel_unpin_fb_obj(work->old_fb_obj);
6905 drm_gem_object_unreference(&work->pending_flip_obj->base); 6905 drm_gem_object_unreference(&work->pending_flip_obj->base);
6906 drm_gem_object_unreference(&work->old_fb_obj->base); 6906 drm_gem_object_unreference(&work->old_fb_obj->base);
6907 6907
6908 intel_update_fbc(work->dev); 6908 intel_update_fbc(dev);
6909 mutex_unlock(&work->dev->struct_mutex); 6909 mutex_unlock(&dev->struct_mutex);
6910
6911 BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
6912 atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
6913
6910 kfree(work); 6914 kfree(work);
6911} 6915}
6912 6916
@@ -6917,8 +6921,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
6917 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6921 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6918 struct intel_unpin_work *work; 6922 struct intel_unpin_work *work;
6919 struct drm_i915_gem_object *obj; 6923 struct drm_i915_gem_object *obj;
6920 struct drm_pending_vblank_event *e;
6921 struct timeval tvbl;
6922 unsigned long flags; 6924 unsigned long flags;
6923 6925
6924 /* Ignore early vblank irqs */ 6926 /* Ignore early vblank irqs */
@@ -6934,17 +6936,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
6934 6936
6935 intel_crtc->unpin_work = NULL; 6937 intel_crtc->unpin_work = NULL;
6936 6938
6937 if (work->event) { 6939 if (work->event)
6938 e = work->event; 6940 drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
6939 e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
6940
6941 e->event.tv_sec = tvbl.tv_sec;
6942 e->event.tv_usec = tvbl.tv_usec;
6943
6944 list_add_tail(&e->base.link,
6945 &e->base.file_priv->event_list);
6946 wake_up_interruptible(&e->base.file_priv->event_wait);
6947 }
6948 6941
6949 drm_vblank_put(dev, intel_crtc->pipe); 6942 drm_vblank_put(dev, intel_crtc->pipe);
6950 6943
@@ -6954,9 +6947,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
6954 6947
6955 atomic_clear_mask(1 << intel_crtc->plane, 6948 atomic_clear_mask(1 << intel_crtc->plane,
6956 &obj->pending_flip.counter); 6949 &obj->pending_flip.counter);
6957
6958 wake_up(&dev_priv->pending_flip_queue); 6950 wake_up(&dev_priv->pending_flip_queue);
6959 schedule_work(&work->work); 6951
6952 queue_work(dev_priv->wq, &work->work);
6960 6953
6961 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); 6954 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
6962} 6955}
@@ -7257,7 +7250,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7257 return -ENOMEM; 7250 return -ENOMEM;
7258 7251
7259 work->event = event; 7252 work->event = event;
7260 work->dev = crtc->dev; 7253 work->crtc = crtc;
7261 intel_fb = to_intel_framebuffer(crtc->fb); 7254 intel_fb = to_intel_framebuffer(crtc->fb);
7262 work->old_fb_obj = intel_fb->obj; 7255 work->old_fb_obj = intel_fb->obj;
7263 INIT_WORK(&work->work, intel_unpin_work_fn); 7256 INIT_WORK(&work->work, intel_unpin_work_fn);
@@ -7282,6 +7275,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7282 intel_fb = to_intel_framebuffer(fb); 7275 intel_fb = to_intel_framebuffer(fb);
7283 obj = intel_fb->obj; 7276 obj = intel_fb->obj;
7284 7277
7278 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
7279 flush_workqueue(dev_priv->wq);
7280
7285 ret = i915_mutex_lock_interruptible(dev); 7281 ret = i915_mutex_lock_interruptible(dev);
7286 if (ret) 7282 if (ret)
7287 goto cleanup; 7283 goto cleanup;
@@ -7300,6 +7296,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7300 * the flip occurs and the object is no longer visible. 7296 * the flip occurs and the object is no longer visible.
7301 */ 7297 */
7302 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 7298 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7299 atomic_inc(&intel_crtc->unpin_work_count);
7303 7300
7304 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); 7301 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
7305 if (ret) 7302 if (ret)
@@ -7314,6 +7311,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7314 return 0; 7311 return 0;
7315 7312
7316cleanup_pending: 7313cleanup_pending:
7314 atomic_dec(&intel_crtc->unpin_work_count);
7317 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 7315 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7318 drm_gem_object_unreference(&work->old_fb_obj->base); 7316 drm_gem_object_unreference(&work->old_fb_obj->base);
7319 drm_gem_object_unreference(&obj->base); 7317 drm_gem_object_unreference(&obj->base);
@@ -7609,7 +7607,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
7609 dev->mode_config.dpms_property; 7607 dev->mode_config.dpms_property;
7610 7608
7611 connector->dpms = DRM_MODE_DPMS_ON; 7609 connector->dpms = DRM_MODE_DPMS_ON;
7612 drm_connector_property_set_value(connector, 7610 drm_object_property_set_value(&connector->base,
7613 dpms_property, 7611 dpms_property,
7614 DRM_MODE_DPMS_ON); 7612 DRM_MODE_DPMS_ON);
7615 7613
@@ -8263,7 +8261,9 @@ static void intel_setup_outputs(struct drm_device *dev)
8263 I915_WRITE(PFIT_CONTROL, 0); 8261 I915_WRITE(PFIT_CONTROL, 0);
8264 } 8262 }
8265 8263
8266 intel_crt_init(dev); 8264 if (!(IS_HASWELL(dev) &&
8265 (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
8266 intel_crt_init(dev);
8267 8267
8268 if (IS_HASWELL(dev)) { 8268 if (IS_HASWELL(dev)) {
8269 int found; 8269 int found;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index a9ba88a9b1ab..d76258dcb8f8 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2393,7 +2393,7 @@ intel_dp_set_property(struct drm_connector *connector,
2393 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2393 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2394 int ret; 2394 int ret;
2395 2395
2396 ret = drm_connector_property_set_value(connector, property, val); 2396 ret = drm_object_property_set_value(&connector->base, property, val);
2397 if (ret) 2397 if (ret)
2398 return ret; 2398 return ret;
2399 2399
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index bcc52412810f..522061ca0685 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -211,6 +211,8 @@ struct intel_crtc {
211 struct intel_unpin_work *unpin_work; 211 struct intel_unpin_work *unpin_work;
212 int fdi_lanes; 212 int fdi_lanes;
213 213
214 atomic_t unpin_work_count;
215
214 /* Display surface base address adjustement for pageflips. Note that on 216 /* Display surface base address adjustement for pageflips. Note that on
215 * gen4+ this only adjusts up to a tile, offsets within a tile are 217 * gen4+ this only adjusts up to a tile, offsets within a tile are
216 * handled in the hw itself (with the TILEOFF register). */ 218 * handled in the hw itself (with the TILEOFF register). */
@@ -395,7 +397,7 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
395 397
396struct intel_unpin_work { 398struct intel_unpin_work {
397 struct work_struct work; 399 struct work_struct work;
398 struct drm_device *dev; 400 struct drm_crtc *crtc;
399 struct drm_i915_gem_object *old_fb_obj; 401 struct drm_i915_gem_object *old_fb_obj;
400 struct drm_i915_gem_object *pending_flip_obj; 402 struct drm_i915_gem_object *pending_flip_obj;
401 struct drm_pending_vblank_event *event; 403 struct drm_pending_vblank_event *event;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 1dcfd5b6e141..5c279b48df97 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -874,7 +874,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
874 struct drm_i915_private *dev_priv = connector->dev->dev_private; 874 struct drm_i915_private *dev_priv = connector->dev->dev_private;
875 int ret; 875 int ret;
876 876
877 ret = drm_connector_property_set_value(connector, property, val); 877 ret = drm_object_property_set_value(&connector->base, property, val);
878 if (ret) 878 if (ret)
879 return ret; 879 return ret;
880 880
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index d8318821f37b..81502e8be26b 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -460,13 +460,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
460 struct drm_display_mode *mode; 460 struct drm_display_mode *mode;
461 461
462 /* use cached edid if we have one */ 462 /* use cached edid if we have one */
463 if (lvds_connector->base.edid) { 463 if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
464 /* invalid edid */
465 if (IS_ERR(lvds_connector->base.edid))
466 return 0;
467
468 return drm_add_edid_modes(connector, lvds_connector->base.edid); 464 return drm_add_edid_modes(connector, lvds_connector->base.edid);
469 }
470 465
471 mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode); 466 mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
472 if (mode == NULL) 467 if (mode == NULL)
@@ -1016,7 +1011,7 @@ bool intel_lvds_init(struct drm_device *dev)
1016 1011
1017 /* create the scaling mode property */ 1012 /* create the scaling mode property */
1018 drm_mode_create_scaling_mode_property(dev); 1013 drm_mode_create_scaling_mode_property(dev);
1019 drm_connector_attach_property(&intel_connector->base, 1014 drm_object_attach_property(&connector->base,
1020 dev->mode_config.scaling_mode_property, 1015 dev->mode_config.scaling_mode_property,
1021 DRM_MODE_SCALE_ASPECT); 1016 DRM_MODE_SCALE_ASPECT);
1022 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; 1017 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
@@ -1061,14 +1056,23 @@ bool intel_lvds_init(struct drm_device *dev)
1061 1056
1062 list_for_each_entry(scan, &connector->probed_modes, head) { 1057 list_for_each_entry(scan, &connector->probed_modes, head) {
1063 if (scan->type & DRM_MODE_TYPE_PREFERRED) { 1058 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
1059 DRM_DEBUG_KMS("using preferred mode from EDID: ");
1060 drm_mode_debug_printmodeline(scan);
1061
1064 fixed_mode = drm_mode_duplicate(dev, scan); 1062 fixed_mode = drm_mode_duplicate(dev, scan);
1065 intel_find_lvds_downclock(dev, fixed_mode, connector); 1063 if (fixed_mode) {
1066 goto out; 1064 intel_find_lvds_downclock(dev, fixed_mode,
1065 connector);
1066 goto out;
1067 }
1067 } 1068 }
1068 } 1069 }
1069 1070
1070 /* Failed to get EDID, what about VBT? */ 1071 /* Failed to get EDID, what about VBT? */
1071 if (dev_priv->lfp_lvds_vbt_mode) { 1072 if (dev_priv->lfp_lvds_vbt_mode) {
1073 DRM_DEBUG_KMS("using mode from VBT: ");
1074 drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
1075
1072 fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 1076 fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
1073 if (fixed_mode) { 1077 if (fixed_mode) {
1074 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 1078 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
@@ -1093,6 +1097,8 @@ bool intel_lvds_init(struct drm_device *dev)
1093 if (crtc && (lvds & LVDS_PORT_EN)) { 1097 if (crtc && (lvds & LVDS_PORT_EN)) {
1094 fixed_mode = intel_crtc_mode_get(dev, crtc); 1098 fixed_mode = intel_crtc_mode_get(dev, crtc);
1095 if (fixed_mode) { 1099 if (fixed_mode) {
1100 DRM_DEBUG_KMS("using current (BIOS) mode: ");
1101 drm_mode_debug_printmodeline(fixed_mode);
1096 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 1102 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
1097 goto out; 1103 goto out;
1098 } 1104 }
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index d49985fcb27f..b00f1c83adce 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -97,7 +97,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
97 97
98 dev_priv->force_audio_property = prop; 98 dev_priv->force_audio_property = prop;
99 } 99 }
100 drm_connector_attach_property(connector, prop, 0); 100 drm_object_attach_property(&connector->base, prop, 0);
101} 101}
102 102
103static const struct drm_prop_enum_list broadcast_rgb_names[] = { 103static const struct drm_prop_enum_list broadcast_rgb_names[] = {
@@ -124,5 +124,5 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
124 dev_priv->broadcast_rgb_property = prop; 124 dev_priv->broadcast_rgb_property = prop;
125 } 125 }
126 126
127 drm_connector_attach_property(connector, prop, 0); 127 drm_object_attach_property(&connector->base, prop, 0);
128} 128}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 41d463573baa..c758ad277473 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -275,7 +275,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
275 } 275 }
276 276
277 tmp = I915_READ(BLC_PWM_CTL); 277 tmp = I915_READ(BLC_PWM_CTL);
278 if (INTEL_INFO(dev)->gen < 4) 278 if (INTEL_INFO(dev)->gen < 4)
279 level <<= 1; 279 level <<= 1;
280 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; 280 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
281 I915_WRITE(BLC_PWM_CTL, tmp | level); 281 I915_WRITE(BLC_PWM_CTL, tmp | level);
@@ -374,26 +374,23 @@ static void intel_panel_init_backlight(struct drm_device *dev)
374enum drm_connector_status 374enum drm_connector_status
375intel_panel_detect(struct drm_device *dev) 375intel_panel_detect(struct drm_device *dev)
376{ 376{
377#if 0
378 struct drm_i915_private *dev_priv = dev->dev_private; 377 struct drm_i915_private *dev_priv = dev->dev_private;
379#endif
380
381 if (i915_panel_ignore_lid)
382 return i915_panel_ignore_lid > 0 ?
383 connector_status_connected :
384 connector_status_disconnected;
385 378
386 /* opregion lid state on HP 2540p is wrong at boot up,
387 * appears to be either the BIOS or Linux ACPI fault */
388#if 0
389 /* Assume that the BIOS does not lie through the OpRegion... */ 379 /* Assume that the BIOS does not lie through the OpRegion... */
390 if (dev_priv->opregion.lid_state) 380 if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
391 return ioread32(dev_priv->opregion.lid_state) & 0x1 ? 381 return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
392 connector_status_connected : 382 connector_status_connected :
393 connector_status_disconnected; 383 connector_status_disconnected;
394#endif 384 }
395 385
396 return connector_status_unknown; 386 switch (i915_panel_ignore_lid) {
387 case -2:
388 return connector_status_connected;
389 case -1:
390 return connector_status_disconnected;
391 default:
392 return connector_status_unknown;
393 }
397} 394}
398 395
399#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 396#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0cbc0e6402b4..58c2f210154a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2552,7 +2552,8 @@ static void gen6_update_ring_freq(struct drm_device *dev)
2552{ 2552{
2553 struct drm_i915_private *dev_priv = dev->dev_private; 2553 struct drm_i915_private *dev_priv = dev->dev_private;
2554 int min_freq = 15; 2554 int min_freq = 15;
2555 int gpu_freq, ia_freq, max_ia_freq; 2555 int gpu_freq;
2556 unsigned int ia_freq, max_ia_freq;
2556 int scaling_factor = 180; 2557 int scaling_factor = 180;
2557 2558
2558 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 2559 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -3518,6 +3519,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
3518 ILK_DPARBUNIT_CLOCK_GATE_ENABLE | 3519 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
3519 ILK_DPFDUNIT_CLOCK_GATE_ENABLE); 3520 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
3520 3521
3522 /* WaMbcDriverBootEnable */
3521 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | 3523 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3522 GEN6_MBCTL_ENABLE_BOOT_FETCH); 3524 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3523 3525
@@ -3548,6 +3550,20 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
3548 I915_WRITE(GEN7_FF_THREAD_MODE, reg); 3550 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
3549} 3551}
3550 3552
3553static void lpt_init_clock_gating(struct drm_device *dev)
3554{
3555 struct drm_i915_private *dev_priv = dev->dev_private;
3556
3557 /*
3558 * TODO: this bit should only be enabled when really needed, then
3559 * disabled when not needed anymore in order to save power.
3560 */
3561 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
3562 I915_WRITE(SOUTH_DSPCLK_GATE_D,
3563 I915_READ(SOUTH_DSPCLK_GATE_D) |
3564 PCH_LP_PARTITION_LEVEL_DISABLE);
3565}
3566
3551static void haswell_init_clock_gating(struct drm_device *dev) 3567static void haswell_init_clock_gating(struct drm_device *dev)
3552{ 3568{
3553 struct drm_i915_private *dev_priv = dev->dev_private; 3569 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3590,6 +3606,10 @@ static void haswell_init_clock_gating(struct drm_device *dev)
3590 I915_WRITE(CACHE_MODE_1, 3606 I915_WRITE(CACHE_MODE_1,
3591 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 3607 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3592 3608
3609 /* WaMbcDriverBootEnable */
3610 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3611 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3612
3593 /* XXX: This is a workaround for early silicon revisions and should be 3613 /* XXX: This is a workaround for early silicon revisions and should be
3594 * removed later. 3614 * removed later.
3595 */ 3615 */
@@ -3599,6 +3619,7 @@ static void haswell_init_clock_gating(struct drm_device *dev)
3599 WM_DBG_DISALLOW_SPRITE | 3619 WM_DBG_DISALLOW_SPRITE |
3600 WM_DBG_DISALLOW_MAXFIFO); 3620 WM_DBG_DISALLOW_MAXFIFO);
3601 3621
3622 lpt_init_clock_gating(dev);
3602} 3623}
3603 3624
3604static void ivybridge_init_clock_gating(struct drm_device *dev) 3625static void ivybridge_init_clock_gating(struct drm_device *dev)
@@ -3680,6 +3701,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
3680 intel_flush_display_plane(dev_priv, pipe); 3701 intel_flush_display_plane(dev_priv, pipe);
3681 } 3702 }
3682 3703
3704 /* WaMbcDriverBootEnable */
3683 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | 3705 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3684 GEN6_MBCTL_ENABLE_BOOT_FETCH); 3706 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3685 3707
@@ -3745,6 +3767,7 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
3745 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 3767 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3746 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 3768 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3747 3769
3770 /* WaMbcDriverBootEnable */
3748 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | 3771 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3749 GEN6_MBCTL_ENABLE_BOOT_FETCH); 3772 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3750 3773
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1aa76892a830..987eb5fdaf39 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -558,12 +558,9 @@ update_mboxes(struct intel_ring_buffer *ring,
558 u32 seqno, 558 u32 seqno,
559 u32 mmio_offset) 559 u32 mmio_offset)
560{ 560{
561 intel_ring_emit(ring, MI_SEMAPHORE_MBOX | 561 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
562 MI_SEMAPHORE_GLOBAL_GTT |
563 MI_SEMAPHORE_REGISTER |
564 MI_SEMAPHORE_UPDATE);
565 intel_ring_emit(ring, seqno);
566 intel_ring_emit(ring, mmio_offset); 562 intel_ring_emit(ring, mmio_offset);
563 intel_ring_emit(ring, seqno);
567} 564}
568 565
569/** 566/**
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index aea64425b1a2..a4bee83df745 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1228,6 +1228,30 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
1228 1228
1229 temp = I915_READ(intel_sdvo->sdvo_reg); 1229 temp = I915_READ(intel_sdvo->sdvo_reg);
1230 if ((temp & SDVO_ENABLE) != 0) { 1230 if ((temp & SDVO_ENABLE) != 0) {
1231 /* HW workaround for IBX, we need to move the port to
1232 * transcoder A before disabling it. */
1233 if (HAS_PCH_IBX(encoder->base.dev)) {
1234 struct drm_crtc *crtc = encoder->base.crtc;
1235 int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
1236
1237 if (temp & SDVO_PIPE_B_SELECT) {
1238 temp &= ~SDVO_PIPE_B_SELECT;
1239 I915_WRITE(intel_sdvo->sdvo_reg, temp);
1240 POSTING_READ(intel_sdvo->sdvo_reg);
1241
1242 /* Again we need to write this twice. */
1243 I915_WRITE(intel_sdvo->sdvo_reg, temp);
1244 POSTING_READ(intel_sdvo->sdvo_reg);
1245
1246 /* Transcoder selection bits only update
1247 * effectively on vblank. */
1248 if (crtc)
1249 intel_wait_for_vblank(encoder->base.dev, pipe);
1250 else
1251 msleep(50);
1252 }
1253 }
1254
1231 intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); 1255 intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
1232 } 1256 }
1233} 1257}
@@ -1244,8 +1268,20 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
1244 u8 status; 1268 u8 status;
1245 1269
1246 temp = I915_READ(intel_sdvo->sdvo_reg); 1270 temp = I915_READ(intel_sdvo->sdvo_reg);
1247 if ((temp & SDVO_ENABLE) == 0) 1271 if ((temp & SDVO_ENABLE) == 0) {
1272 /* HW workaround for IBX, we need to move the port
1273 * to transcoder A before disabling it. */
1274 if (HAS_PCH_IBX(dev)) {
1275 struct drm_crtc *crtc = encoder->base.crtc;
1276 int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
1277
1278 /* Restore the transcoder select bit. */
1279 if (pipe == PIPE_B)
1280 temp |= SDVO_PIPE_B_SELECT;
1281 }
1282
1248 intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); 1283 intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
1284 }
1249 for (i = 0; i < 2; i++) 1285 for (i = 0; i < 2; i++)
1250 intel_wait_for_vblank(dev, intel_crtc->pipe); 1286 intel_wait_for_vblank(dev, intel_crtc->pipe);
1251 1287
@@ -1796,7 +1832,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1796 intel_sdvo_destroy_enhance_property(connector); 1832 intel_sdvo_destroy_enhance_property(connector);
1797 drm_sysfs_connector_remove(connector); 1833 drm_sysfs_connector_remove(connector);
1798 drm_connector_cleanup(connector); 1834 drm_connector_cleanup(connector);
1799 kfree(connector); 1835 kfree(intel_sdvo_connector);
1800} 1836}
1801 1837
1802static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) 1838static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
@@ -1828,7 +1864,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1828 uint8_t cmd; 1864 uint8_t cmd;
1829 int ret; 1865 int ret;
1830 1866
1831 ret = drm_connector_property_set_value(connector, property, val); 1867 ret = drm_object_property_set_value(&connector->base, property, val);
1832 if (ret) 1868 if (ret)
1833 return ret; 1869 return ret;
1834 1870
@@ -1883,7 +1919,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1883 } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) { 1919 } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
1884 temp_value = val; 1920 temp_value = val;
1885 if (intel_sdvo_connector->left == property) { 1921 if (intel_sdvo_connector->left == property) {
1886 drm_connector_property_set_value(connector, 1922 drm_object_property_set_value(&connector->base,
1887 intel_sdvo_connector->right, val); 1923 intel_sdvo_connector->right, val);
1888 if (intel_sdvo_connector->left_margin == temp_value) 1924 if (intel_sdvo_connector->left_margin == temp_value)
1889 return 0; 1925 return 0;
@@ -1895,7 +1931,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1895 cmd = SDVO_CMD_SET_OVERSCAN_H; 1931 cmd = SDVO_CMD_SET_OVERSCAN_H;
1896 goto set_value; 1932 goto set_value;
1897 } else if (intel_sdvo_connector->right == property) { 1933 } else if (intel_sdvo_connector->right == property) {
1898 drm_connector_property_set_value(connector, 1934 drm_object_property_set_value(&connector->base,
1899 intel_sdvo_connector->left, val); 1935 intel_sdvo_connector->left, val);
1900 if (intel_sdvo_connector->right_margin == temp_value) 1936 if (intel_sdvo_connector->right_margin == temp_value)
1901 return 0; 1937 return 0;
@@ -1907,7 +1943,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1907 cmd = SDVO_CMD_SET_OVERSCAN_H; 1943 cmd = SDVO_CMD_SET_OVERSCAN_H;
1908 goto set_value; 1944 goto set_value;
1909 } else if (intel_sdvo_connector->top == property) { 1945 } else if (intel_sdvo_connector->top == property) {
1910 drm_connector_property_set_value(connector, 1946 drm_object_property_set_value(&connector->base,
1911 intel_sdvo_connector->bottom, val); 1947 intel_sdvo_connector->bottom, val);
1912 if (intel_sdvo_connector->top_margin == temp_value) 1948 if (intel_sdvo_connector->top_margin == temp_value)
1913 return 0; 1949 return 0;
@@ -1919,7 +1955,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1919 cmd = SDVO_CMD_SET_OVERSCAN_V; 1955 cmd = SDVO_CMD_SET_OVERSCAN_V;
1920 goto set_value; 1956 goto set_value;
1921 } else if (intel_sdvo_connector->bottom == property) { 1957 } else if (intel_sdvo_connector->bottom == property) {
1922 drm_connector_property_set_value(connector, 1958 drm_object_property_set_value(&connector->base,
1923 intel_sdvo_connector->top, val); 1959 intel_sdvo_connector->top, val);
1924 if (intel_sdvo_connector->bottom_margin == temp_value) 1960 if (intel_sdvo_connector->bottom_margin == temp_value)
1925 return 0; 1961 return 0;
@@ -2429,7 +2465,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
2429 i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); 2465 i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
2430 2466
2431 intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0]; 2467 intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
2432 drm_connector_attach_property(&intel_sdvo_connector->base.base, 2468 drm_object_attach_property(&intel_sdvo_connector->base.base.base,
2433 intel_sdvo_connector->tv_format, 0); 2469 intel_sdvo_connector->tv_format, 0);
2434 return true; 2470 return true;
2435 2471
@@ -2445,7 +2481,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
2445 intel_sdvo_connector->name = \ 2481 intel_sdvo_connector->name = \
2446 drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ 2482 drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
2447 if (!intel_sdvo_connector->name) return false; \ 2483 if (!intel_sdvo_connector->name) return false; \
2448 drm_connector_attach_property(connector, \ 2484 drm_object_attach_property(&connector->base, \
2449 intel_sdvo_connector->name, \ 2485 intel_sdvo_connector->name, \
2450 intel_sdvo_connector->cur_##name); \ 2486 intel_sdvo_connector->cur_##name); \
2451 DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ 2487 DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2482,7 +2518,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2482 if (!intel_sdvo_connector->left) 2518 if (!intel_sdvo_connector->left)
2483 return false; 2519 return false;
2484 2520
2485 drm_connector_attach_property(connector, 2521 drm_object_attach_property(&connector->base,
2486 intel_sdvo_connector->left, 2522 intel_sdvo_connector->left,
2487 intel_sdvo_connector->left_margin); 2523 intel_sdvo_connector->left_margin);
2488 2524
@@ -2491,7 +2527,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2491 if (!intel_sdvo_connector->right) 2527 if (!intel_sdvo_connector->right)
2492 return false; 2528 return false;
2493 2529
2494 drm_connector_attach_property(connector, 2530 drm_object_attach_property(&connector->base,
2495 intel_sdvo_connector->right, 2531 intel_sdvo_connector->right,
2496 intel_sdvo_connector->right_margin); 2532 intel_sdvo_connector->right_margin);
2497 DRM_DEBUG_KMS("h_overscan: max %d, " 2533 DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2519,7 +2555,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2519 if (!intel_sdvo_connector->top) 2555 if (!intel_sdvo_connector->top)
2520 return false; 2556 return false;
2521 2557
2522 drm_connector_attach_property(connector, 2558 drm_object_attach_property(&connector->base,
2523 intel_sdvo_connector->top, 2559 intel_sdvo_connector->top,
2524 intel_sdvo_connector->top_margin); 2560 intel_sdvo_connector->top_margin);
2525 2561
@@ -2529,7 +2565,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2529 if (!intel_sdvo_connector->bottom) 2565 if (!intel_sdvo_connector->bottom)
2530 return false; 2566 return false;
2531 2567
2532 drm_connector_attach_property(connector, 2568 drm_object_attach_property(&connector->base,
2533 intel_sdvo_connector->bottom, 2569 intel_sdvo_connector->bottom,
2534 intel_sdvo_connector->bottom_margin); 2570 intel_sdvo_connector->bottom_margin);
2535 DRM_DEBUG_KMS("v_overscan: max %d, " 2571 DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2561,7 +2597,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2561 if (!intel_sdvo_connector->dot_crawl) 2597 if (!intel_sdvo_connector->dot_crawl)
2562 return false; 2598 return false;
2563 2599
2564 drm_connector_attach_property(connector, 2600 drm_object_attach_property(&connector->base,
2565 intel_sdvo_connector->dot_crawl, 2601 intel_sdvo_connector->dot_crawl,
2566 intel_sdvo_connector->cur_dot_crawl); 2602 intel_sdvo_connector->cur_dot_crawl);
2567 DRM_DEBUG_KMS("dot crawl: current %d\n", response); 2603 DRM_DEBUG_KMS("dot crawl: current %d\n", response);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 86d5c20c325a..ea93520c1278 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1289,7 +1289,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
1289 } 1289 }
1290 1290
1291 intel_tv->tv_format = tv_mode->name; 1291 intel_tv->tv_format = tv_mode->name;
1292 drm_connector_property_set_value(connector, 1292 drm_object_property_set_value(&connector->base,
1293 connector->dev->mode_config.tv_mode_property, i); 1293 connector->dev->mode_config.tv_mode_property, i);
1294} 1294}
1295 1295
@@ -1443,7 +1443,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1443 int ret = 0; 1443 int ret = 0;
1444 bool changed = false; 1444 bool changed = false;
1445 1445
1446 ret = drm_connector_property_set_value(connector, property, val); 1446 ret = drm_object_property_set_value(&connector->base, property, val);
1447 if (ret < 0) 1447 if (ret < 0)
1448 goto out; 1448 goto out;
1449 1449
@@ -1655,18 +1655,18 @@ intel_tv_init(struct drm_device *dev)
1655 ARRAY_SIZE(tv_modes), 1655 ARRAY_SIZE(tv_modes),
1656 tv_format_names); 1656 tv_format_names);
1657 1657
1658 drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, 1658 drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
1659 initial_mode); 1659 initial_mode);
1660 drm_connector_attach_property(connector, 1660 drm_object_attach_property(&connector->base,
1661 dev->mode_config.tv_left_margin_property, 1661 dev->mode_config.tv_left_margin_property,
1662 intel_tv->margin[TV_MARGIN_LEFT]); 1662 intel_tv->margin[TV_MARGIN_LEFT]);
1663 drm_connector_attach_property(connector, 1663 drm_object_attach_property(&connector->base,
1664 dev->mode_config.tv_top_margin_property, 1664 dev->mode_config.tv_top_margin_property,
1665 intel_tv->margin[TV_MARGIN_TOP]); 1665 intel_tv->margin[TV_MARGIN_TOP]);
1666 drm_connector_attach_property(connector, 1666 drm_object_attach_property(&connector->base,
1667 dev->mode_config.tv_right_margin_property, 1667 dev->mode_config.tv_right_margin_property,
1668 intel_tv->margin[TV_MARGIN_RIGHT]); 1668 intel_tv->margin[TV_MARGIN_RIGHT]);
1669 drm_connector_attach_property(connector, 1669 drm_object_attach_property(&connector->base,
1670 dev->mode_config.tv_bottom_margin_property, 1670 dev->mode_config.tv_bottom_margin_property,
1671 intel_tv->margin[TV_MARGIN_BOTTOM]); 1671 intel_tv->margin[TV_MARGIN_BOTTOM]);
1672 drm_sysfs_connector_add(connector); 1672 drm_sysfs_connector_add(connector);