aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-10-07 00:30:02 -0400
committerDave Airlie <airlied@redhat.com>2014-10-07 00:30:02 -0400
commit436e94a4cb6f60c99edc7e424d32821c454ab8f0 (patch)
tree8135c911e6611230804f66ce6f205c5507f8ab8b /drivers/gpu
parentccb09a8e36b64a4d161dd61d4066a5c54181615b (diff)
parentebb69c95175609990af708ec90c46530f5a2c819 (diff)
Merge tag 'drm-intel-next-fixes-2014-10-03' of git://anongit.freedesktop.org/drm-intel into drm-next
Bunch of fixes for 3.18. Major parts: - ppgtt fixes (but full ppgtt is for 3.19) from Chris, Michel, ... - hdmi pixel replication fixes (Clint Taylor) - leftover i830M patches from Ville - small things all over * tag 'drm-intel-next-fixes-2014-10-03' of git://anongit.freedesktop.org/drm-intel: (21 commits) drm/i915: Enable pixel replicated modes on BDW and HSW. drm/i915: Don't spam dmesg with rps messages on vlv/chv drm/i915: Do not leak pages when freeing userptr objects drm/i915: Do not store the error pointer for a failed userptr registration Revert "drm/i915/bdw: BDW Software Turbo" drm/i915/bdw: Cleanup pre prod workarounds drm/i915: Use EIO instead of EAGAIN for sink CRC error. drm/i915: Extend BIOS stolen mem handling to all platform drm/i915: Match GTT space sanity checker with implementation drm/i915: HSW always use GGTT selector for secure batches drm/i915: add cherryview specfic forcewake in execlists_elsp_write drm/i915: fix another use-after-free in i915_gem_evict_everything drm/i915: Don't reinit hpd interrupts after gpu reset drm/i915: Wrap -EIO send-vblank event for failed pageflip in spinlock drm/i915: Drop any active reference before unbinding drm/i915: Objects on the unbound list may still have an active reference drm/i915/edp: use lane count and link rate from DPCD for eDP drm/i915/dp: add missing \n in the TPS3 debug message drm/i915/hdmi, dp: Do not dereference the encoder in the connector destroy drm/i915: Limit the watermark to at least 8 entries on gen2/3 ...
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c34
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h28
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c134
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c31
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c21
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h15
-rw-r--r--drivers/gpu/drm/i915/intel_display.c73
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c36
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c29
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c254
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c20
15 files changed, 297 insertions, 399 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 2cbc85f3b237..063b44817e08 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3826,7 +3826,6 @@ i915_drop_caches_set(void *data, u64 val)
3826{ 3826{
3827 struct drm_device *dev = data; 3827 struct drm_device *dev = data;
3828 struct drm_i915_private *dev_priv = dev->dev_private; 3828 struct drm_i915_private *dev_priv = dev->dev_private;
3829 struct drm_i915_gem_object *obj, *next;
3830 int ret; 3829 int ret;
3831 3830
3832 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 3831 DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
@@ -3846,36 +3845,11 @@ i915_drop_caches_set(void *data, u64 val)
3846 if (val & (DROP_RETIRE | DROP_ACTIVE)) 3845 if (val & (DROP_RETIRE | DROP_ACTIVE))
3847 i915_gem_retire_requests(dev); 3846 i915_gem_retire_requests(dev);
3848 3847
3849 if (val & DROP_BOUND) { 3848 if (val & DROP_BOUND)
3850 list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list, 3849 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
3851 global_list) {
3852 struct i915_vma *vma, *v;
3853 3850
3854 ret = 0; 3851 if (val & DROP_UNBOUND)
3855 drm_gem_object_reference(&obj->base); 3852 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
3856 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) {
3857 if (vma->pin_count)
3858 continue;
3859
3860 ret = i915_vma_unbind(vma);
3861 if (ret)
3862 break;
3863 }
3864 drm_gem_object_unreference(&obj->base);
3865 if (ret)
3866 goto unlock;
3867 }
3868 }
3869
3870 if (val & DROP_UNBOUND) {
3871 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
3872 global_list)
3873 if (obj->pages_pin_count == 0) {
3874 ret = i915_gem_object_put_pages(obj);
3875 if (ret)
3876 goto unlock;
3877 }
3878 }
3879 3853
3880unlock: 3854unlock:
3881 mutex_unlock(&dev->struct_mutex); 3855 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3870c7359a16..055d5e7fbf12 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -871,8 +871,6 @@ int i915_reset(struct drm_device *dev)
871 */ 871 */
872 if (INTEL_INFO(dev)->gen > 5) 872 if (INTEL_INFO(dev)->gen > 5)
873 intel_reset_gt_powersave(dev); 873 intel_reset_gt_powersave(dev);
874
875 intel_hpd_init(dev);
876 } else { 874 } else {
877 mutex_unlock(&dev->struct_mutex); 875 mutex_unlock(&dev->struct_mutex);
878 } 876 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 19c0dd8e255e..16a6f6d187a1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -946,23 +946,6 @@ struct intel_rps_ei {
946 u32 media_c0; 946 u32 media_c0;
947}; 947};
948 948
949struct intel_rps_bdw_cal {
950 u32 it_threshold_pct; /* interrupt, in percentage */
951 u32 eval_interval; /* evaluation interval, in us */
952 u32 last_ts;
953 u32 last_c0;
954 bool is_up;
955};
956
957struct intel_rps_bdw_turbo {
958 struct intel_rps_bdw_cal up;
959 struct intel_rps_bdw_cal down;
960 struct timer_list flip_timer;
961 u32 timeout;
962 atomic_t flip_received;
963 struct work_struct work_max_freq;
964};
965
966struct intel_gen6_power_mgmt { 949struct intel_gen6_power_mgmt {
967 /* work and pm_iir are protected by dev_priv->irq_lock */ 950 /* work and pm_iir are protected by dev_priv->irq_lock */
968 struct work_struct work; 951 struct work_struct work;
@@ -996,9 +979,6 @@ struct intel_gen6_power_mgmt {
996 bool enabled; 979 bool enabled;
997 struct delayed_work delayed_resume_work; 980 struct delayed_work delayed_resume_work;
998 981
999 bool is_bdw_sw_turbo; /* Switch of BDW software turbo */
1000 struct intel_rps_bdw_turbo sw_turbo; /* Calculate RP interrupt timing */
1001
1002 /* manual wa residency calculations */ 982 /* manual wa residency calculations */
1003 struct intel_rps_ei up_ei, down_ei; 983 struct intel_rps_ei up_ei, down_ei;
1004 984
@@ -2369,6 +2349,12 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
2369int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 2349int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
2370 struct drm_file *file_priv); 2350 struct drm_file *file_priv);
2371void i915_gem_load(struct drm_device *dev); 2351void i915_gem_load(struct drm_device *dev);
2352unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
2353 long target,
2354 unsigned flags);
2355#define I915_SHRINK_PURGEABLE 0x1
2356#define I915_SHRINK_UNBOUND 0x2
2357#define I915_SHRINK_BOUND 0x4
2372void *i915_gem_object_alloc(struct drm_device *dev); 2358void *i915_gem_object_alloc(struct drm_device *dev);
2373void i915_gem_object_free(struct drm_i915_gem_object *obj); 2359void i915_gem_object_free(struct drm_i915_gem_object *obj);
2374void i915_gem_object_init(struct drm_i915_gem_object *obj, 2360void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -2823,8 +2809,6 @@ extern void intel_disable_fbc(struct drm_device *dev);
2823extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 2809extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
2824extern void intel_init_pch_refclk(struct drm_device *dev); 2810extern void intel_init_pch_refclk(struct drm_device *dev);
2825extern void gen6_set_rps(struct drm_device *dev, u8 val); 2811extern void gen6_set_rps(struct drm_device *dev, u8 val);
2826extern void bdw_software_turbo(struct drm_device *dev);
2827extern void gen8_flip_interrupt(struct drm_device *dev);
2828extern void valleyview_set_rps(struct drm_device *dev, u8 val); 2812extern void valleyview_set_rps(struct drm_device *dev, u8 val);
2829extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 2813extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
2830 bool enable); 2814 bool enable);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4ca3a6dcf10b..28f91df2604d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -60,7 +60,6 @@ static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
60static int i915_gem_shrinker_oom(struct notifier_block *nb, 60static int i915_gem_shrinker_oom(struct notifier_block *nb,
61 unsigned long event, 61 unsigned long event,
62 void *ptr); 62 void *ptr);
63static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
64static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 63static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
65 64
66static bool cpu_cache_is_coherent(struct drm_device *dev, 65static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -1741,7 +1740,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1741 * offsets on purgeable objects by truncating it and marking it purged, 1740 * offsets on purgeable objects by truncating it and marking it purged,
1742 * which prevents userspace from ever using that object again. 1741 * which prevents userspace from ever using that object again.
1743 */ 1742 */
1744 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT); 1743 i915_gem_shrink(dev_priv,
1744 obj->base.size >> PAGE_SHIFT,
1745 I915_SHRINK_BOUND |
1746 I915_SHRINK_UNBOUND |
1747 I915_SHRINK_PURGEABLE);
1745 ret = drm_gem_create_mmap_offset(&obj->base); 1748 ret = drm_gem_create_mmap_offset(&obj->base);
1746 if (ret != -ENOSPC) 1749 if (ret != -ENOSPC)
1747 goto out; 1750 goto out;
@@ -1938,12 +1941,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1938 return 0; 1941 return 0;
1939} 1942}
1940 1943
1941static unsigned long 1944unsigned long
1942__i915_gem_shrink(struct drm_i915_private *dev_priv, long target, 1945i915_gem_shrink(struct drm_i915_private *dev_priv,
1943 bool purgeable_only) 1946 long target, unsigned flags)
1944{ 1947{
1945 struct list_head still_in_list; 1948 const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
1946 struct drm_i915_gem_object *obj;
1947 unsigned long count = 0; 1949 unsigned long count = 0;
1948 1950
1949 /* 1951 /*
@@ -1965,62 +1967,68 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1965 * dev->struct_mutex and so we won't ever be able to observe an 1967 * dev->struct_mutex and so we won't ever be able to observe an
1966 * object on the bound_list with a reference count equals 0. 1968 * object on the bound_list with a reference count equals 0.
1967 */ 1969 */
1968 INIT_LIST_HEAD(&still_in_list); 1970 if (flags & I915_SHRINK_UNBOUND) {
1969 while (count < target && !list_empty(&dev_priv->mm.unbound_list)) { 1971 struct list_head still_in_list;
1970 obj = list_first_entry(&dev_priv->mm.unbound_list,
1971 typeof(*obj), global_list);
1972 list_move_tail(&obj->global_list, &still_in_list);
1973 1972
1974 if (!i915_gem_object_is_purgeable(obj) && purgeable_only) 1973 INIT_LIST_HEAD(&still_in_list);
1975 continue; 1974 while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
1975 struct drm_i915_gem_object *obj;
1976 1976
1977 drm_gem_object_reference(&obj->base); 1977 obj = list_first_entry(&dev_priv->mm.unbound_list,
1978 typeof(*obj), global_list);
1979 list_move_tail(&obj->global_list, &still_in_list);
1978 1980
1979 if (i915_gem_object_put_pages(obj) == 0) 1981 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1980 count += obj->base.size >> PAGE_SHIFT; 1982 continue;
1983
1984 drm_gem_object_reference(&obj->base);
1981 1985
1982 drm_gem_object_unreference(&obj->base); 1986 if (i915_gem_object_put_pages(obj) == 0)
1987 count += obj->base.size >> PAGE_SHIFT;
1988
1989 drm_gem_object_unreference(&obj->base);
1990 }
1991 list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1983 } 1992 }
1984 list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1985 1993
1986 INIT_LIST_HEAD(&still_in_list); 1994 if (flags & I915_SHRINK_BOUND) {
1987 while (count < target && !list_empty(&dev_priv->mm.bound_list)) { 1995 struct list_head still_in_list;
1988 struct i915_vma *vma, *v;
1989 1996
1990 obj = list_first_entry(&dev_priv->mm.bound_list, 1997 INIT_LIST_HEAD(&still_in_list);
1991 typeof(*obj), global_list); 1998 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
1992 list_move_tail(&obj->global_list, &still_in_list); 1999 struct drm_i915_gem_object *obj;
2000 struct i915_vma *vma, *v;
1993 2001
1994 if (!i915_gem_object_is_purgeable(obj) && purgeable_only) 2002 obj = list_first_entry(&dev_priv->mm.bound_list,
1995 continue; 2003 typeof(*obj), global_list);
2004 list_move_tail(&obj->global_list, &still_in_list);
1996 2005
1997 drm_gem_object_reference(&obj->base); 2006 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
2007 continue;
1998 2008
1999 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) 2009 drm_gem_object_reference(&obj->base);
2000 if (i915_vma_unbind(vma))
2001 break;
2002 2010
2003 if (i915_gem_object_put_pages(obj) == 0) 2011 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
2004 count += obj->base.size >> PAGE_SHIFT; 2012 if (i915_vma_unbind(vma))
2013 break;
2014
2015 if (i915_gem_object_put_pages(obj) == 0)
2016 count += obj->base.size >> PAGE_SHIFT;
2005 2017
2006 drm_gem_object_unreference(&obj->base); 2018 drm_gem_object_unreference(&obj->base);
2019 }
2020 list_splice(&still_in_list, &dev_priv->mm.bound_list);
2007 } 2021 }
2008 list_splice(&still_in_list, &dev_priv->mm.bound_list);
2009 2022
2010 return count; 2023 return count;
2011} 2024}
2012 2025
2013static unsigned long 2026static unsigned long
2014i915_gem_purge(struct drm_i915_private *dev_priv, long target)
2015{
2016 return __i915_gem_shrink(dev_priv, target, true);
2017}
2018
2019static unsigned long
2020i915_gem_shrink_all(struct drm_i915_private *dev_priv) 2027i915_gem_shrink_all(struct drm_i915_private *dev_priv)
2021{ 2028{
2022 i915_gem_evict_everything(dev_priv->dev); 2029 i915_gem_evict_everything(dev_priv->dev);
2023 return __i915_gem_shrink(dev_priv, LONG_MAX, false); 2030 return i915_gem_shrink(dev_priv, LONG_MAX,
2031 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
2024} 2032}
2025 2033
2026static int 2034static int
@@ -2067,7 +2075,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2067 for (i = 0; i < page_count; i++) { 2075 for (i = 0; i < page_count; i++) {
2068 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2076 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2069 if (IS_ERR(page)) { 2077 if (IS_ERR(page)) {
2070 i915_gem_purge(dev_priv, page_count); 2078 i915_gem_shrink(dev_priv,
2079 page_count,
2080 I915_SHRINK_BOUND |
2081 I915_SHRINK_UNBOUND |
2082 I915_SHRINK_PURGEABLE);
2071 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2083 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2072 } 2084 }
2073 if (IS_ERR(page)) { 2085 if (IS_ERR(page)) {
@@ -2944,6 +2956,9 @@ int i915_vma_unbind(struct i915_vma *vma)
2944 * cause memory corruption through use-after-free. 2956 * cause memory corruption through use-after-free.
2945 */ 2957 */
2946 2958
2959 /* Throw away the active reference before moving to the unbound list */
2960 i915_gem_object_retire(obj);
2961
2947 if (i915_is_ggtt(vma->vm)) { 2962 if (i915_is_ggtt(vma->vm)) {
2948 i915_gem_object_finish_gtt(obj); 2963 i915_gem_object_finish_gtt(obj);
2949 2964
@@ -3336,17 +3351,20 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3336 return 0; 3351 return 0;
3337} 3352}
3338 3353
3339static bool i915_gem_valid_gtt_space(struct drm_device *dev, 3354static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3340 struct drm_mm_node *gtt_space,
3341 unsigned long cache_level) 3355 unsigned long cache_level)
3342{ 3356{
3357 struct drm_mm_node *gtt_space = &vma->node;
3343 struct drm_mm_node *other; 3358 struct drm_mm_node *other;
3344 3359
3345 /* On non-LLC machines we have to be careful when putting differing 3360 /*
3346 * types of snoopable memory together to avoid the prefetcher 3361 * On some machines we have to be careful when putting differing types
3347 * crossing memory domains and dying. 3362 * of snoopable memory together to avoid the prefetcher crossing memory
3363 * domains and dying. During vm initialisation, we decide whether or not
3364 * these constraints apply and set the drm_mm.color_adjust
3365 * appropriately.
3348 */ 3366 */
3349 if (HAS_LLC(dev)) 3367 if (vma->vm->mm.color_adjust == NULL)
3350 return true; 3368 return true;
3351 3369
3352 if (!drm_mm_node_allocated(gtt_space)) 3370 if (!drm_mm_node_allocated(gtt_space))
@@ -3484,8 +3502,7 @@ search_free:
3484 3502
3485 goto err_free_vma; 3503 goto err_free_vma;
3486 } 3504 }
3487 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node, 3505 if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
3488 obj->cache_level))) {
3489 ret = -EINVAL; 3506 ret = -EINVAL;
3490 goto err_remove_node; 3507 goto err_remove_node;
3491 } 3508 }
@@ -3695,7 +3712,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3695 } 3712 }
3696 3713
3697 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 3714 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3698 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { 3715 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
3699 ret = i915_vma_unbind(vma); 3716 ret = i915_vma_unbind(vma);
3700 if (ret) 3717 if (ret)
3701 return ret; 3718 return ret;
@@ -5261,11 +5278,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
5261 if (!i915_gem_shrinker_lock(dev, &unlock)) 5278 if (!i915_gem_shrinker_lock(dev, &unlock))
5262 return SHRINK_STOP; 5279 return SHRINK_STOP;
5263 5280
5264 freed = i915_gem_purge(dev_priv, sc->nr_to_scan); 5281 freed = i915_gem_shrink(dev_priv,
5282 sc->nr_to_scan,
5283 I915_SHRINK_BOUND |
5284 I915_SHRINK_UNBOUND |
5285 I915_SHRINK_PURGEABLE);
5265 if (freed < sc->nr_to_scan) 5286 if (freed < sc->nr_to_scan)
5266 freed += __i915_gem_shrink(dev_priv, 5287 freed += i915_gem_shrink(dev_priv,
5267 sc->nr_to_scan - freed, 5288 sc->nr_to_scan - freed,
5268 false); 5289 I915_SHRINK_BOUND |
5290 I915_SHRINK_UNBOUND);
5269 if (unlock) 5291 if (unlock)
5270 mutex_unlock(&dev->struct_mutex); 5292 mutex_unlock(&dev->struct_mutex);
5271 5293
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index bbf4b12d842e..886ff2ee7a28 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -243,7 +243,7 @@ int
243i915_gem_evict_everything(struct drm_device *dev) 243i915_gem_evict_everything(struct drm_device *dev)
244{ 244{
245 struct drm_i915_private *dev_priv = dev->dev_private; 245 struct drm_i915_private *dev_priv = dev->dev_private;
246 struct i915_address_space *vm; 246 struct i915_address_space *vm, *v;
247 bool lists_empty = true; 247 bool lists_empty = true;
248 int ret; 248 int ret;
249 249
@@ -270,7 +270,7 @@ i915_gem_evict_everything(struct drm_device *dev)
270 i915_gem_retire_requests(dev); 270 i915_gem_retire_requests(dev);
271 271
272 /* Having flushed everything, unbind() should never raise an error */ 272 /* Having flushed everything, unbind() should never raise an error */
273 list_for_each_entry(vm, &dev_priv->vm_list, global_link) 273 list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link)
274 WARN_ON(i915_gem_evict_vm(vm, false)); 274 WARN_ON(i915_gem_evict_vm(vm, false));
275 275
276 return 0; 276 return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 21c025a209c0..85fda6b803e4 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -289,6 +289,7 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
289int i915_gem_init_stolen(struct drm_device *dev) 289int i915_gem_init_stolen(struct drm_device *dev)
290{ 290{
291 struct drm_i915_private *dev_priv = dev->dev_private; 291 struct drm_i915_private *dev_priv = dev->dev_private;
292 u32 tmp;
292 int bios_reserved = 0; 293 int bios_reserved = 0;
293 294
294#ifdef CONFIG_INTEL_IOMMU 295#ifdef CONFIG_INTEL_IOMMU
@@ -308,8 +309,16 @@ int i915_gem_init_stolen(struct drm_device *dev)
308 DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n", 309 DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
309 dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base); 310 dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
310 311
311 if (IS_VALLEYVIEW(dev)) 312 if (INTEL_INFO(dev)->gen >= 8) {
312 bios_reserved = 1024*1024; /* top 1M on VLV/BYT */ 313 tmp = I915_READ(GEN7_BIOS_RESERVED);
314 tmp >>= GEN8_BIOS_RESERVED_SHIFT;
315 tmp &= GEN8_BIOS_RESERVED_MASK;
316 bios_reserved = (1024*1024) << tmp;
317 } else if (IS_GEN7(dev)) {
318 tmp = I915_READ(GEN7_BIOS_RESERVED);
319 bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
320 256*1024 : 1024*1024;
321 }
313 322
314 if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size)) 323 if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
315 return 0; 324 return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index d38413997379..d182058383a9 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -293,15 +293,23 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
293static struct i915_mmu_notifier * 293static struct i915_mmu_notifier *
294i915_mmu_notifier_find(struct i915_mm_struct *mm) 294i915_mmu_notifier_find(struct i915_mm_struct *mm)
295{ 295{
296 if (mm->mn == NULL) { 296 struct i915_mmu_notifier *mn = mm->mn;
297 down_write(&mm->mm->mmap_sem); 297
298 mutex_lock(&to_i915(mm->dev)->mm_lock); 298 mn = mm->mn;
299 if (mm->mn == NULL) 299 if (mn)
300 mm->mn = i915_mmu_notifier_create(mm->mm); 300 return mn;
301 mutex_unlock(&to_i915(mm->dev)->mm_lock); 301
302 up_write(&mm->mm->mmap_sem); 302 down_write(&mm->mm->mmap_sem);
303 mutex_lock(&to_i915(mm->dev)->mm_lock);
304 if ((mn = mm->mn) == NULL) {
305 mn = i915_mmu_notifier_create(mm->mm);
306 if (!IS_ERR(mn))
307 mm->mn = mn;
303 } 308 }
304 return mm->mn; 309 mutex_unlock(&to_i915(mm->dev)->mm_lock);
310 up_write(&mm->mm->mmap_sem);
311
312 return mn;
305} 313}
306 314
307static int 315static int
@@ -681,16 +689,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
681static void 689static void
682i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) 690i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
683{ 691{
684 struct scatterlist *sg; 692 struct sg_page_iter sg_iter;
685 int i;
686 693
687 BUG_ON(obj->userptr.work != NULL); 694 BUG_ON(obj->userptr.work != NULL);
688 695
689 if (obj->madv != I915_MADV_WILLNEED) 696 if (obj->madv != I915_MADV_WILLNEED)
690 obj->dirty = 0; 697 obj->dirty = 0;
691 698
692 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { 699 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
693 struct page *page = sg_page(sg); 700 struct page *page = sg_page_iter_page(&sg_iter);
694 701
695 if (obj->dirty) 702 if (obj->dirty)
696 set_page_dirty(page); 703 set_page_dirty(page);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c96ddc953531..3201986bf25e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1979,27 +1979,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1979 res1, res2); 1979 res1, res2);
1980} 1980}
1981 1981
1982void gen8_flip_interrupt(struct drm_device *dev)
1983{
1984 struct drm_i915_private *dev_priv = dev->dev_private;
1985
1986 if (!dev_priv->rps.is_bdw_sw_turbo)
1987 return;
1988
1989 if(atomic_read(&dev_priv->rps.sw_turbo.flip_received)) {
1990 mod_timer(&dev_priv->rps.sw_turbo.flip_timer,
1991 usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies);
1992 }
1993 else {
1994 dev_priv->rps.sw_turbo.flip_timer.expires =
1995 usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies;
1996 add_timer(&dev_priv->rps.sw_turbo.flip_timer);
1997 atomic_set(&dev_priv->rps.sw_turbo.flip_received, true);
1998 }
1999
2000 bdw_software_turbo(dev);
2001}
2002
2003/* The RPS events need forcewake, so we add them to a work queue and mask their 1982/* The RPS events need forcewake, so we add them to a work queue and mask their
2004 * IMR bits until the work is done. Other interrupts can be processed without 1983 * IMR bits until the work is done. Other interrupts can be processed without
2005 * the work queue. */ 1984 * the work queue. */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b65bdfc23ccb..c01e5f31430e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -143,6 +143,14 @@
143#define GAB_CTL 0x24000 143#define GAB_CTL 0x24000
144#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8) 144#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
145 145
146#define GEN7_BIOS_RESERVED 0x1082C0
147#define GEN7_BIOS_RESERVED_1M (0 << 5)
148#define GEN7_BIOS_RESERVED_256K (1 << 5)
149#define GEN8_BIOS_RESERVED_SHIFT 7
150#define GEN7_BIOS_RESERVED_MASK 0x1
151#define GEN8_BIOS_RESERVED_MASK 0x3
152
153
146/* VGA stuff */ 154/* VGA stuff */
147 155
148#define VGA_ST01_MDA 0x3ba 156#define VGA_ST01_MDA 0x3ba
@@ -2435,6 +2443,7 @@ enum punit_power_well {
2435#define _PIPEASRC 0x6001c 2443#define _PIPEASRC 0x6001c
2436#define _BCLRPAT_A 0x60020 2444#define _BCLRPAT_A 0x60020
2437#define _VSYNCSHIFT_A 0x60028 2445#define _VSYNCSHIFT_A 0x60028
2446#define _PIPE_MULT_A 0x6002c
2438 2447
2439/* Pipe B timing regs */ 2448/* Pipe B timing regs */
2440#define _HTOTAL_B 0x61000 2449#define _HTOTAL_B 0x61000
@@ -2446,6 +2455,7 @@ enum punit_power_well {
2446#define _PIPEBSRC 0x6101c 2455#define _PIPEBSRC 0x6101c
2447#define _BCLRPAT_B 0x61020 2456#define _BCLRPAT_B 0x61020
2448#define _VSYNCSHIFT_B 0x61028 2457#define _VSYNCSHIFT_B 0x61028
2458#define _PIPE_MULT_B 0x6102c
2449 2459
2450#define TRANSCODER_A_OFFSET 0x60000 2460#define TRANSCODER_A_OFFSET 0x60000
2451#define TRANSCODER_B_OFFSET 0x61000 2461#define TRANSCODER_B_OFFSET 0x61000
@@ -2466,6 +2476,7 @@ enum punit_power_well {
2466#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A) 2476#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A)
2467#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A) 2477#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A)
2468#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC) 2478#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
2479#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A)
2469 2480
2470/* HSW+ eDP PSR registers */ 2481/* HSW+ eDP PSR registers */
2471#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) 2482#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
@@ -5577,10 +5588,6 @@ enum punit_power_well {
5577#define GEN8_UCGCTL6 0x9430 5588#define GEN8_UCGCTL6 0x9430
5578#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14) 5589#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
5579 5590
5580#define TIMESTAMP_CTR 0x44070
5581#define FREQ_1_28_US(us) (((us) * 100) >> 7)
5582#define MCHBAR_PCU_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5960)
5583
5584#define GEN6_GFXPAUSE 0xA000 5591#define GEN6_GFXPAUSE 0xA000
5585#define GEN6_RPNSWREQ 0xA008 5592#define GEN6_RPNSWREQ 0xA008
5586#define GEN6_TURBO_DISABLE (1<<31) 5593#define GEN6_TURBO_DISABLE (1<<31)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1386086ec245..507370513f3d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1612,6 +1612,18 @@ static void chv_enable_pll(struct intel_crtc *crtc)
1612 mutex_unlock(&dev_priv->dpio_lock); 1612 mutex_unlock(&dev_priv->dpio_lock);
1613} 1613}
1614 1614
1615static int intel_num_dvo_pipes(struct drm_device *dev)
1616{
1617 struct intel_crtc *crtc;
1618 int count = 0;
1619
1620 for_each_intel_crtc(dev, crtc)
1621 count += crtc->active &&
1622 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO);
1623
1624 return count;
1625}
1626
1615static void i9xx_enable_pll(struct intel_crtc *crtc) 1627static void i9xx_enable_pll(struct intel_crtc *crtc)
1616{ 1628{
1617 struct drm_device *dev = crtc->base.dev; 1629 struct drm_device *dev = crtc->base.dev;
@@ -1628,7 +1640,18 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
1628 if (IS_MOBILE(dev) && !IS_I830(dev)) 1640 if (IS_MOBILE(dev) && !IS_I830(dev))
1629 assert_panel_unlocked(dev_priv, crtc->pipe); 1641 assert_panel_unlocked(dev_priv, crtc->pipe);
1630 1642
1631 I915_WRITE(reg, dpll); 1643 /* Enable DVO 2x clock on both PLLs if necessary */
1644 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1645 /*
1646 * It appears to be important that we don't enable this
1647 * for the current pipe before otherwise configuring the
1648 * PLL. No idea how this should be handled if multiple
1649 * DVO outputs are enabled simultaneosly.
1650 */
1651 dpll |= DPLL_DVO_2X_MODE;
1652 I915_WRITE(DPLL(!crtc->pipe),
1653 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1654 }
1632 1655
1633 /* Wait for the clocks to stabilize. */ 1656 /* Wait for the clocks to stabilize. */
1634 POSTING_READ(reg); 1657 POSTING_READ(reg);
@@ -1667,8 +1690,22 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
1667 * 1690 *
1668 * Note! This is for pre-ILK only. 1691 * Note! This is for pre-ILK only.
1669 */ 1692 */
1670static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1693static void i9xx_disable_pll(struct intel_crtc *crtc)
1671{ 1694{
1695 struct drm_device *dev = crtc->base.dev;
1696 struct drm_i915_private *dev_priv = dev->dev_private;
1697 enum pipe pipe = crtc->pipe;
1698
1699 /* Disable DVO 2x clock on both PLLs if necessary */
1700 if (IS_I830(dev) &&
1701 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO) &&
1702 intel_num_dvo_pipes(dev) == 1) {
1703 I915_WRITE(DPLL(PIPE_B),
1704 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1705 I915_WRITE(DPLL(PIPE_A),
1706 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1707 }
1708
1672 /* Don't disable pipe or pipe PLLs if needed */ 1709 /* Don't disable pipe or pipe PLLs if needed */
1673 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1710 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1674 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1711 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
@@ -4185,6 +4222,11 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4185 4222
4186 intel_set_pipe_timings(intel_crtc); 4223 intel_set_pipe_timings(intel_crtc);
4187 4224
4225 if (intel_crtc->config.cpu_transcoder != TRANSCODER_EDP) {
4226 I915_WRITE(PIPE_MULT(intel_crtc->config.cpu_transcoder),
4227 intel_crtc->config.pixel_multiplier - 1);
4228 }
4229
4188 if (intel_crtc->config.has_pch_encoder) { 4230 if (intel_crtc->config.has_pch_encoder) {
4189 intel_cpu_transcoder_set_m_n(intel_crtc, 4231 intel_cpu_transcoder_set_m_n(intel_crtc,
4190 &intel_crtc->config.fdi_m_n, NULL); 4232 &intel_crtc->config.fdi_m_n, NULL);
@@ -4941,7 +4983,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4941 else if (IS_VALLEYVIEW(dev)) 4983 else if (IS_VALLEYVIEW(dev))
4942 vlv_disable_pll(dev_priv, pipe); 4984 vlv_disable_pll(dev_priv, pipe);
4943 else 4985 else
4944 i9xx_disable_pll(dev_priv, pipe); 4986 i9xx_disable_pll(intel_crtc);
4945 } 4987 }
4946 4988
4947 if (!IS_GEN2(dev)) 4989 if (!IS_GEN2(dev))
@@ -5945,7 +5987,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
5945 dpll |= PLL_P2_DIVIDE_BY_4; 5987 dpll |= PLL_P2_DIVIDE_BY_4;
5946 } 5988 }
5947 5989
5948 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO)) 5990 if (!IS_I830(dev) && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
5949 dpll |= DPLL_DVO_2X_MODE; 5991 dpll |= DPLL_DVO_2X_MODE;
5950 5992
5951 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5993 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
@@ -6451,6 +6493,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6451 } 6493 }
6452 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 6494 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
6453 if (!IS_VALLEYVIEW(dev)) { 6495 if (!IS_VALLEYVIEW(dev)) {
6496 /*
6497 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
6498 * on 830. Filter it out here so that we don't
6499 * report errors due to that.
6500 */
6501 if (IS_I830(dev))
6502 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
6503
6454 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 6504 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
6455 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 6505 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
6456 } else { 6506 } else {
@@ -7845,7 +7895,12 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7845 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 7895 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
7846 (I915_READ(IPS_CTL) & IPS_ENABLE); 7896 (I915_READ(IPS_CTL) & IPS_ENABLE);
7847 7897
7848 pipe_config->pixel_multiplier = 1; 7898 if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
7899 pipe_config->pixel_multiplier =
7900 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
7901 } else {
7902 pipe_config->pixel_multiplier = 1;
7903 }
7849 7904
7850 return true; 7905 return true;
7851} 7906}
@@ -9881,9 +9936,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9881 unsigned long flags; 9936 unsigned long flags;
9882 int ret; 9937 int ret;
9883 9938
9884 //trigger software GT busyness calculation
9885 gen8_flip_interrupt(dev);
9886
9887 /* 9939 /*
9888 * drm_mode_page_flip_ioctl() should already catch this, but double 9940 * drm_mode_page_flip_ioctl() should already catch this, but double
9889 * check to be safe. In the future we may enable pageflipping from 9941 * check to be safe. In the future we may enable pageflipping from
@@ -10039,8 +10091,11 @@ free_work:
10039out_hang: 10091out_hang:
10040 intel_crtc_wait_for_pending_flips(crtc); 10092 intel_crtc_wait_for_pending_flips(crtc);
10041 ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb); 10093 ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
10042 if (ret == 0 && event) 10094 if (ret == 0 && event) {
10095 spin_lock_irqsave(&dev->event_lock, flags);
10043 drm_send_vblank_event(dev, pipe, event); 10096 drm_send_vblank_event(dev, pipe, event);
10097 spin_unlock_irqrestore(&dev->event_lock, flags);
10098 }
10044 } 10099 }
10045 return ret; 10100 return ret;
10046} 10101}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2a26774ddb68..59754451ae50 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1068,23 +1068,15 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1068 bpp = dev_priv->vbt.edp_bpp; 1068 bpp = dev_priv->vbt.edp_bpp;
1069 } 1069 }
1070 1070
1071 if (IS_BROADWELL(dev)) { 1071 /*
1072 /* Yes, it's an ugly hack. */ 1072 * Use the maximum clock and number of lanes the eDP panel
1073 min_lane_count = max_lane_count; 1073 * advertizes being capable of. The panels are generally
1074 DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n", 1074 * designed to support only a single clock and lane
1075 min_lane_count); 1075 * configuration, and typically these values correspond to the
1076 } else if (dev_priv->vbt.edp_lanes) { 1076 * native resolution of the panel.
1077 min_lane_count = min(dev_priv->vbt.edp_lanes, 1077 */
1078 max_lane_count); 1078 min_lane_count = max_lane_count;
1079 DRM_DEBUG_KMS("using min %u lanes per VBT\n", 1079 min_clock = max_clock;
1080 min_lane_count);
1081 }
1082
1083 if (dev_priv->vbt.edp_rate) {
1084 min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
1085 DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
1086 bws[min_clock]);
1087 }
1088 } 1080 }
1089 1081
1090 for (; bpp >= 6*3; bpp -= 2*3) { 1082 for (; bpp >= 6*3; bpp -= 2*3) {
@@ -3732,7 +3724,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3732 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && 3724 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3733 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) { 3725 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
3734 intel_dp->use_tps3 = true; 3726 intel_dp->use_tps3 = true;
3735 DRM_DEBUG_KMS("Displayport TPS3 supported"); 3727 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3736 } else 3728 } else
3737 intel_dp->use_tps3 = false; 3729 intel_dp->use_tps3 = false;
3738 3730
@@ -3808,21 +3800,21 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3808 u8 buf[1]; 3800 u8 buf[1];
3809 3801
3810 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0) 3802 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
3811 return -EAGAIN; 3803 return -EIO;
3812 3804
3813 if (!(buf[0] & DP_TEST_CRC_SUPPORTED)) 3805 if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
3814 return -ENOTTY; 3806 return -ENOTTY;
3815 3807
3816 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 3808 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3817 DP_TEST_SINK_START) < 0) 3809 DP_TEST_SINK_START) < 0)
3818 return -EAGAIN; 3810 return -EIO;
3819 3811
3820 /* Wait 2 vblanks to be sure we will have the correct CRC value */ 3812 /* Wait 2 vblanks to be sure we will have the correct CRC value */
3821 intel_wait_for_vblank(dev, intel_crtc->pipe); 3813 intel_wait_for_vblank(dev, intel_crtc->pipe);
3822 intel_wait_for_vblank(dev, intel_crtc->pipe); 3814 intel_wait_for_vblank(dev, intel_crtc->pipe);
3823 3815
3824 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) 3816 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3825 return -EAGAIN; 3817 return -EIO;
3826 3818
3827 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0); 3819 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
3828 return 0; 3820 return 0;
@@ -4395,7 +4387,7 @@ intel_dp_connector_destroy(struct drm_connector *connector)
4395{ 4387{
4396 struct intel_connector *intel_connector = to_intel_connector(connector); 4388 struct intel_connector *intel_connector = to_intel_connector(connector);
4397 4389
4398 intel_dp_unset_edid(intel_attached_dp(connector)); 4390 kfree(intel_connector->detect_edid);
4399 4391
4400 if (!IS_ERR_OR_NULL(intel_connector->edid)) 4392 if (!IS_ERR_OR_NULL(intel_connector->edid))
4401 kfree(intel_connector->edid); 4393 kfree(intel_connector->edid);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index c5861736b4b0..7fed5bedc10f 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1501,7 +1501,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1501 1501
1502static void intel_hdmi_destroy(struct drm_connector *connector) 1502static void intel_hdmi_destroy(struct drm_connector *connector)
1503{ 1503{
1504 intel_hdmi_unset_edid(connector); 1504 kfree(to_intel_connector(connector)->detect_edid);
1505 drm_connector_cleanup(connector); 1505 drm_connector_cleanup(connector);
1506 kfree(connector); 1506 kfree(connector);
1507} 1507}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index bd1b28d99920..bafd38b5703e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -300,8 +300,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
300 * Instead, we do the runtime_pm_get/put when creating/destroying requests. 300 * Instead, we do the runtime_pm_get/put when creating/destroying requests.
301 */ 301 */
302 spin_lock_irqsave(&dev_priv->uncore.lock, flags); 302 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
303 if (dev_priv->uncore.forcewake_count++ == 0) 303 if (IS_CHERRYVIEW(dev_priv->dev)) {
304 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); 304 if (dev_priv->uncore.fw_rendercount++ == 0)
305 dev_priv->uncore.funcs.force_wake_get(dev_priv,
306 FORCEWAKE_RENDER);
307 if (dev_priv->uncore.fw_mediacount++ == 0)
308 dev_priv->uncore.funcs.force_wake_get(dev_priv,
309 FORCEWAKE_MEDIA);
310 } else {
311 if (dev_priv->uncore.forcewake_count++ == 0)
312 dev_priv->uncore.funcs.force_wake_get(dev_priv,
313 FORCEWAKE_ALL);
314 }
305 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); 315 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
306 316
307 I915_WRITE(RING_ELSP(ring), desc[1]); 317 I915_WRITE(RING_ELSP(ring), desc[1]);
@@ -315,8 +325,19 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
315 325
316 /* Release Force Wakeup (see the big comment above). */ 326 /* Release Force Wakeup (see the big comment above). */
317 spin_lock_irqsave(&dev_priv->uncore.lock, flags); 327 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
318 if (--dev_priv->uncore.forcewake_count == 0) 328 if (IS_CHERRYVIEW(dev_priv->dev)) {
319 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); 329 if (--dev_priv->uncore.fw_rendercount == 0)
330 dev_priv->uncore.funcs.force_wake_put(dev_priv,
331 FORCEWAKE_RENDER);
332 if (--dev_priv->uncore.fw_mediacount == 0)
333 dev_priv->uncore.funcs.force_wake_put(dev_priv,
334 FORCEWAKE_MEDIA);
335 } else {
336 if (--dev_priv->uncore.forcewake_count == 0)
337 dev_priv->uncore.funcs.force_wake_put(dev_priv,
338 FORCEWAKE_ALL);
339 }
340
320 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); 341 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
321} 342}
322 343
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 45f71e6dc544..c27b6140bfd1 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1070,6 +1070,17 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1070 wm_size = wm->max_wm; 1070 wm_size = wm->max_wm;
1071 if (wm_size <= 0) 1071 if (wm_size <= 0)
1072 wm_size = wm->default_wm; 1072 wm_size = wm->default_wm;
1073
1074 /*
1075 * Bspec seems to indicate that the value shouldn't be lower than
1076 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
1077 * Lets go for 8 which is the burst size since certain platforms
1078 * already use a hardcoded 8 (which is what the spec says should be
1079 * done).
1080 */
1081 if (wm_size <= 8)
1082 wm_size = 8;
1083
1073 return wm_size; 1084 return wm_size;
1074} 1085}
1075 1086
@@ -2274,6 +2285,7 @@ int ilk_wm_max_level(const struct drm_device *dev)
2274 else 2285 else
2275 return 2; 2286 return 2;
2276} 2287}
2288
2277static void intel_print_wm_latency(struct drm_device *dev, 2289static void intel_print_wm_latency(struct drm_device *dev,
2278 const char *name, 2290 const char *name,
2279 const uint16_t wm[5]) 2291 const uint16_t wm[5])
@@ -3242,9 +3254,6 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3242{ 3254{
3243 int new_power; 3255 int new_power;
3244 3256
3245 if (dev_priv->rps.is_bdw_sw_turbo)
3246 return;
3247
3248 new_power = dev_priv->rps.power; 3257 new_power = dev_priv->rps.power;
3249 switch (dev_priv->rps.power) { 3258 switch (dev_priv->rps.power) {
3250 case LOW_POWER: 3259 case LOW_POWER:
@@ -3452,11 +3461,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
3452 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 3461 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3453 else if (IS_VALLEYVIEW(dev)) 3462 else if (IS_VALLEYVIEW(dev))
3454 vlv_set_rps_idle(dev_priv); 3463 vlv_set_rps_idle(dev_priv);
3455 else if (!dev_priv->rps.is_bdw_sw_turbo 3464 else
3456 || atomic_read(&dev_priv->rps.sw_turbo.flip_received)){
3457 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 3465 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3458 }
3459
3460 dev_priv->rps.last_adj = 0; 3466 dev_priv->rps.last_adj = 0;
3461 } 3467 }
3462 mutex_unlock(&dev_priv->rps.hw_lock); 3468 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3470,11 +3476,8 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv)
3470 if (dev_priv->rps.enabled) { 3476 if (dev_priv->rps.enabled) {
3471 if (IS_VALLEYVIEW(dev)) 3477 if (IS_VALLEYVIEW(dev))
3472 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); 3478 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3473 else if (!dev_priv->rps.is_bdw_sw_turbo 3479 else
3474 || atomic_read(&dev_priv->rps.sw_turbo.flip_received)){
3475 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); 3480 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3476 }
3477
3478 dev_priv->rps.last_adj = 0; 3481 dev_priv->rps.last_adj = 0;
3479 } 3482 }
3480 mutex_unlock(&dev_priv->rps.hw_lock); 3483 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3488,17 +3491,18 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
3488 WARN_ON(val > dev_priv->rps.max_freq_softlimit); 3491 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3489 WARN_ON(val < dev_priv->rps.min_freq_softlimit); 3492 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3490 3493
3491 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3492 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3493 dev_priv->rps.cur_freq,
3494 vlv_gpu_freq(dev_priv, val), val);
3495
3496 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1), 3494 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
3497 "Odd GPU freq value\n")) 3495 "Odd GPU freq value\n"))
3498 val &= ~1; 3496 val &= ~1;
3499 3497
3500 if (val != dev_priv->rps.cur_freq) 3498 if (val != dev_priv->rps.cur_freq) {
3499 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3500 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3501 dev_priv->rps.cur_freq,
3502 vlv_gpu_freq(dev_priv, val), val);
3503
3501 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 3504 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3505 }
3502 3506
3503 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 3507 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3504 3508
@@ -3509,26 +3513,21 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
3509static void gen8_disable_rps_interrupts(struct drm_device *dev) 3513static void gen8_disable_rps_interrupts(struct drm_device *dev)
3510{ 3514{
3511 struct drm_i915_private *dev_priv = dev->dev_private; 3515 struct drm_i915_private *dev_priv = dev->dev_private;
3512 if (IS_BROADWELL(dev) && dev_priv->rps.is_bdw_sw_turbo){
3513 if (atomic_read(&dev_priv->rps.sw_turbo.flip_received))
3514 del_timer(&dev_priv->rps.sw_turbo.flip_timer);
3515 dev_priv-> rps.is_bdw_sw_turbo = false;
3516 } else {
3517 I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
3518 I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
3519 ~dev_priv->pm_rps_events);
3520 /* Complete PM interrupt masking here doesn't race with the rps work
3521 * item again unmasking PM interrupts because that is using a different
3522 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
3523 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
3524 * gen8_enable_rps will clean up. */
3525 3516
3526 spin_lock_irq(&dev_priv->irq_lock); 3517 I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
3527 dev_priv->rps.pm_iir = 0; 3518 I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
3528 spin_unlock_irq(&dev_priv->irq_lock); 3519 ~dev_priv->pm_rps_events);
3520 /* Complete PM interrupt masking here doesn't race with the rps work
3521 * item again unmasking PM interrupts because that is using a different
3522 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
3523 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
3524 * gen8_enable_rps will clean up. */
3525
3526 spin_lock_irq(&dev_priv->irq_lock);
3527 dev_priv->rps.pm_iir = 0;
3528 spin_unlock_irq(&dev_priv->irq_lock);
3529 3529
3530 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events); 3530 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3531 }
3532} 3531}
3533 3532
3534static void gen6_disable_rps_interrupts(struct drm_device *dev) 3533static void gen6_disable_rps_interrupts(struct drm_device *dev)
@@ -3686,111 +3685,13 @@ static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_c
3686 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; 3685 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3687} 3686}
3688 3687
3689static void bdw_sw_calculate_freq(struct drm_device *dev,
3690 struct intel_rps_bdw_cal *c, u32 *cur_time, u32 *c0)
3691{
3692 struct drm_i915_private *dev_priv = dev->dev_private;
3693 u64 busy = 0;
3694 u32 busyness_pct = 0;
3695 u32 elapsed_time = 0;
3696 u16 new_freq = 0;
3697
3698 if (!c || !cur_time || !c0)
3699 return;
3700
3701 if (0 == c->last_c0)
3702 goto out;
3703
3704 /* Check Evaluation interval */
3705 elapsed_time = *cur_time - c->last_ts;
3706 if (elapsed_time < c->eval_interval)
3707 return;
3708
3709 mutex_lock(&dev_priv->rps.hw_lock);
3710
3711 /*
3712 * c0 unit in 32*1.28 usec, elapsed_time unit in 1 usec.
3713 * Whole busyness_pct calculation should be
3714 * busy = ((u64)(*c0 - c->last_c0) << 5 << 7) / 100;
3715 * busyness_pct = (u32)(busy * 100 / elapsed_time);
3716 * The final formula is to simplify CPU calculation
3717 */
3718 busy = (u64)(*c0 - c->last_c0) << 12;
3719 do_div(busy, elapsed_time);
3720 busyness_pct = (u32)busy;
3721
3722 if (c->is_up && busyness_pct >= c->it_threshold_pct)
3723 new_freq = (u16)dev_priv->rps.cur_freq + 3;
3724 if (!c->is_up && busyness_pct <= c->it_threshold_pct)
3725 new_freq = (u16)dev_priv->rps.cur_freq - 1;
3726
3727 /* Adjust to new frequency busyness and compare with threshold */
3728 if (0 != new_freq) {
3729 if (new_freq > dev_priv->rps.max_freq_softlimit)
3730 new_freq = dev_priv->rps.max_freq_softlimit;
3731 else if (new_freq < dev_priv->rps.min_freq_softlimit)
3732 new_freq = dev_priv->rps.min_freq_softlimit;
3733
3734 gen6_set_rps(dev, new_freq);
3735 }
3736
3737 mutex_unlock(&dev_priv->rps.hw_lock);
3738
3739out:
3740 c->last_c0 = *c0;
3741 c->last_ts = *cur_time;
3742}
3743
3744static void gen8_set_frequency_RP0(struct work_struct *work)
3745{
3746 struct intel_rps_bdw_turbo *p_bdw_turbo =
3747 container_of(work, struct intel_rps_bdw_turbo, work_max_freq);
3748 struct intel_gen6_power_mgmt *p_power_mgmt =
3749 container_of(p_bdw_turbo, struct intel_gen6_power_mgmt, sw_turbo);
3750 struct drm_i915_private *dev_priv =
3751 container_of(p_power_mgmt, struct drm_i915_private, rps);
3752
3753 mutex_lock(&dev_priv->rps.hw_lock);
3754 gen6_set_rps(dev_priv->dev, dev_priv->rps.rp0_freq);
3755 mutex_unlock(&dev_priv->rps.hw_lock);
3756}
3757
3758static void flip_active_timeout_handler(unsigned long var)
3759{
3760 struct drm_i915_private *dev_priv = (struct drm_i915_private *) var;
3761
3762 del_timer(&dev_priv->rps.sw_turbo.flip_timer);
3763 atomic_set(&dev_priv->rps.sw_turbo.flip_received, false);
3764
3765 queue_work(dev_priv->wq, &dev_priv->rps.sw_turbo.work_max_freq);
3766}
3767
3768void bdw_software_turbo(struct drm_device *dev)
3769{
3770 struct drm_i915_private *dev_priv = dev->dev_private;
3771
3772 u32 current_time = I915_READ(TIMESTAMP_CTR); /* unit in usec */
3773 u32 current_c0 = I915_READ(MCHBAR_PCU_C0); /* unit in 32*1.28 usec */
3774
3775 bdw_sw_calculate_freq(dev, &dev_priv->rps.sw_turbo.up,
3776 &current_time, &current_c0);
3777 bdw_sw_calculate_freq(dev, &dev_priv->rps.sw_turbo.down,
3778 &current_time, &current_c0);
3779}
3780
3781static void gen8_enable_rps(struct drm_device *dev) 3688static void gen8_enable_rps(struct drm_device *dev)
3782{ 3689{
3783 struct drm_i915_private *dev_priv = dev->dev_private; 3690 struct drm_i915_private *dev_priv = dev->dev_private;
3784 struct intel_engine_cs *ring; 3691 struct intel_engine_cs *ring;
3785 uint32_t rc6_mask = 0, rp_state_cap; 3692 uint32_t rc6_mask = 0, rp_state_cap;
3786 uint32_t threshold_up_pct, threshold_down_pct;
3787 uint32_t ei_up, ei_down; /* up and down evaluation interval */
3788 u32 rp_ctl_flag;
3789 int unused; 3693 int unused;
3790 3694
3791 /* Use software Turbo for BDW */
3792 dev_priv->rps.is_bdw_sw_turbo = IS_BROADWELL(dev);
3793
3794 /* 1a: Software RC state - RC0 */ 3695 /* 1a: Software RC state - RC0 */
3795 I915_WRITE(GEN6_RC_STATE, 0); 3696 I915_WRITE(GEN6_RC_STATE, 0);
3796 3697
@@ -3834,74 +3735,35 @@ static void gen8_enable_rps(struct drm_device *dev)
3834 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 3735 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3835 I915_WRITE(GEN6_RC_VIDEO_FREQ, 3736 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3836 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 3737 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3837 ei_up = 84480; /* 84.48ms */ 3738 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
3838 ei_down = 448000; 3739 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
3839 threshold_up_pct = 90; /* x percent busy */
3840 threshold_down_pct = 70;
3841
3842 if (dev_priv->rps.is_bdw_sw_turbo) {
3843 dev_priv->rps.sw_turbo.up.it_threshold_pct = threshold_up_pct;
3844 dev_priv->rps.sw_turbo.up.eval_interval = ei_up;
3845 dev_priv->rps.sw_turbo.up.is_up = true;
3846 dev_priv->rps.sw_turbo.up.last_ts = 0;
3847 dev_priv->rps.sw_turbo.up.last_c0 = 0;
3848
3849 dev_priv->rps.sw_turbo.down.it_threshold_pct = threshold_down_pct;
3850 dev_priv->rps.sw_turbo.down.eval_interval = ei_down;
3851 dev_priv->rps.sw_turbo.down.is_up = false;
3852 dev_priv->rps.sw_turbo.down.last_ts = 0;
3853 dev_priv->rps.sw_turbo.down.last_c0 = 0;
3854
3855 /* Start the timer to track if flip comes*/
3856 dev_priv->rps.sw_turbo.timeout = 200*1000; /* in us */
3857
3858 init_timer(&dev_priv->rps.sw_turbo.flip_timer);
3859 dev_priv->rps.sw_turbo.flip_timer.function = flip_active_timeout_handler;
3860 dev_priv->rps.sw_turbo.flip_timer.data = (unsigned long) dev_priv;
3861 dev_priv->rps.sw_turbo.flip_timer.expires =
3862 usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies;
3863 add_timer(&dev_priv->rps.sw_turbo.flip_timer);
3864 INIT_WORK(&dev_priv->rps.sw_turbo.work_max_freq, gen8_set_frequency_RP0);
3865
3866 atomic_set(&dev_priv->rps.sw_turbo.flip_received, true);
3867 } else {
3868 /* NB: Docs say 1s, and 1000000 - which aren't equivalent
3869 * 1 second timeout*/
3870 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, FREQ_1_28_US(1000000));
3871 3740
3872 /* Docs recommend 900MHz, and 300 MHz respectively */ 3741 /* Docs recommend 900MHz, and 300 MHz respectively */
3873 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 3742 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3874 dev_priv->rps.max_freq_softlimit << 24 | 3743 dev_priv->rps.max_freq_softlimit << 24 |
3875 dev_priv->rps.min_freq_softlimit << 16); 3744 dev_priv->rps.min_freq_softlimit << 16);
3876 3745
3877 I915_WRITE(GEN6_RP_UP_THRESHOLD, 3746 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
3878 FREQ_1_28_US(ei_up * threshold_up_pct / 100)); 3747 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
3879 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 3748 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
3880 FREQ_1_28_US(ei_down * threshold_down_pct / 100)); 3749 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
3881 I915_WRITE(GEN6_RP_UP_EI,
3882 FREQ_1_28_US(ei_up));
3883 I915_WRITE(GEN6_RP_DOWN_EI,
3884 FREQ_1_28_US(ei_down));
3885 3750
3886 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 3751 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3887 }
3888 3752
3889 /* 5: Enable RPS */ 3753 /* 5: Enable RPS */
3890 rp_ctl_flag = GEN6_RP_MEDIA_TURBO | 3754 I915_WRITE(GEN6_RP_CONTROL,
3891 GEN6_RP_MEDIA_HW_NORMAL_MODE | 3755 GEN6_RP_MEDIA_TURBO |
3892 GEN6_RP_MEDIA_IS_GFX | 3756 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3893 GEN6_RP_UP_BUSY_AVG | 3757 GEN6_RP_MEDIA_IS_GFX |
3894 GEN6_RP_DOWN_IDLE_AVG; 3758 GEN6_RP_ENABLE |
3895 if (!dev_priv->rps.is_bdw_sw_turbo) 3759 GEN6_RP_UP_BUSY_AVG |
3896 rp_ctl_flag |= GEN6_RP_ENABLE; 3760 GEN6_RP_DOWN_IDLE_AVG);
3897 3761
3898 I915_WRITE(GEN6_RP_CONTROL, rp_ctl_flag); 3762 /* 6: Ring frequency + overclocking (our driver does this later */
3899 3763
3900 /* 6: Ring frequency + overclocking
3901 * (our driver does this later */
3902 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8); 3764 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3903 if (!dev_priv->rps.is_bdw_sw_turbo) 3765
3904 gen8_enable_rps_interrupts(dev); 3766 gen8_enable_rps_interrupts(dev);
3905 3767
3906 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3768 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3907} 3769}
@@ -5375,8 +5237,6 @@ static void intel_gen6_powersave_work(struct work_struct *work)
5375 rps.delayed_resume_work.work); 5237 rps.delayed_resume_work.work);
5376 struct drm_device *dev = dev_priv->dev; 5238 struct drm_device *dev = dev_priv->dev;
5377 5239
5378 dev_priv->rps.is_bdw_sw_turbo = false;
5379
5380 mutex_lock(&dev_priv->rps.hw_lock); 5240 mutex_lock(&dev_priv->rps.hw_lock);
5381 5241
5382 if (IS_CHERRYVIEW(dev)) { 5242 if (IS_CHERRYVIEW(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 109de2eeb9a8..6dc981f0671e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -707,7 +707,7 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
707 * update the number of dwords required based on the 707 * update the number of dwords required based on the
708 * actual number of workarounds applied 708 * actual number of workarounds applied
709 */ 709 */
710 ret = intel_ring_begin(ring, 24); 710 ret = intel_ring_begin(ring, 18);
711 if (ret) 711 if (ret)
712 return ret; 712 return ret;
713 713
@@ -722,19 +722,8 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
722 intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2, 722 intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
723 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 723 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
724 724
725 /*
726 * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
727 * pre-production hardware
728 */
729 intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3, 725 intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
730 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS 726 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
731 | GEN8_SAMPLER_POWER_BYPASS_DIS));
732
733 intel_ring_emit_wa(ring, GEN7_HALF_SLICE_CHICKEN1,
734 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
735
736 intel_ring_emit_wa(ring, COMMON_SLICE_CHICKEN2,
737 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
738 727
739 /* Use Force Non-Coherent whenever executing a 3D context. This is a 728 /* Use Force Non-Coherent whenever executing a 3D context. This is a
740 * workaround for for a possible hang in the unlikely event a TLB 729 * workaround for for a possible hang in the unlikely event a TLB
@@ -2203,8 +2192,9 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2203 return ret; 2192 return ret;
2204 2193
2205 intel_ring_emit(ring, 2194 intel_ring_emit(ring,
2206 MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | 2195 MI_BATCH_BUFFER_START |
2207 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); 2196 (flags & I915_DISPATCH_SECURE ?
2197 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
2208 /* bit0-7 is the length on GEN6+ */ 2198 /* bit0-7 is the length on GEN6+ */
2209 intel_ring_emit(ring, offset); 2199 intel_ring_emit(ring, offset);
2210 intel_ring_advance(ring); 2200 intel_ring_advance(ring);