aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-02-26 23:36:01 -0500
committerDave Airlie <airlied@redhat.com>2014-02-26 23:36:01 -0500
commit3e09dcd5bde5c1c3bf6aa3f848fe065f0c8fae9c (patch)
tree4cb4c7b4bbd51fdeac28a6a04b3dbd2eabc9c3fa
parent6ba6b7cdaf1a48b24c23c303871ffe1640a866e8 (diff)
parentb8a5ff8d7c676a04e0da5ec16bb068dd39459042 (diff)
Merge tag 'drm-intel-next-2014-02-07' of ssh://git.freedesktop.org/git/drm-intel into drm-next
- Yet more steps towards atomic modeset from Ville. - DP panel power sequencing improvements from Paulo. - irq code cleanups from Ville. - 5.4 GHz dp lane clock support for bdw/hsw from Todd. - Clock readout support for hsw/bdw (aka fastboot) from Jesse. - Make pipe underruns report at ERROR level (Ville). This is to check our improved watermarks code. - Full ppgtt support from Ben for gen7. - More fbc fixes and improvements from Ville all over the place, unfortunately not yet enabled by default on more platforms. - w/a cleanups from Ville. - HiZ stall optimization settings (Chia-I Wu). - Display register mmio offset refactor patch from Antti. - RPS improvements for corner-cases from Jeff McGee. * tag 'drm-intel-next-2014-02-07' of ssh://git.freedesktop.org/git/drm-intel: (166 commits) drm/i915: Update rps interrupt limits drm/i915: Restore rps/rc6 on reset drm/i915: Prevent recursion by retiring requests when the ring is full drm/i915: Generate a hang error code drm/i915: unify FLIP_DONE macro names drm/i915: vlv: s/spin_lock_irqsave/spin_lock/ in irq handler drm/i915: factor out valleyview_pipestat_irq_handler drm/i915: vlv: don't unmask IIR[DISPLAY_PIPE_A/B_VBLANK] interrupt drm/i915: Reorganize display pipe register accesses drm/i915: Treat using a purged buffer as a source of EFAULT drm/i915: Convert EFAULT into a silent SIGBUS drm/i915: release mutex in i915_gem_init()'s error path drm/i915: check for oom when allocating private_default_ctx drm/i915/vlv: WA to fix Voltage not getting dropped to Vmin when Gfx is power gated. drm/i915: Get rid of acthd based guilty batch search drm/i915: Use hangcheck score to find guilty context drm/i915: Drop WaDisablePSDDualDispatchEnable:ivb for IVB GT2 drm/i915: Fix IVB GT2 WaDisableDopClockGating and WaDisablePSDDualDispatchEnable drm/i915: Don't access snooped pages through the GTT (even for error capture) drm/i915: Only print information for filing bug reports once ... Conflicts: drivers/gpu/drm/i915/intel_dp.c
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c301
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c182
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h415
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c412
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c435
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c49
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c164
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c675
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c442
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c259
-rw-r--r--drivers/gpu/drm/i915/i915_params.c155
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h336
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c40
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c10
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c4
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c101
-rw-r--r--drivers/gpu/drm/i915/intel_display.c216
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c365
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h28
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c6
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c8
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c17
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c294
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c37
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h2
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c18
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c8
-rw-r--r--include/drm/drm_dp_helper.h10
35 files changed, 3342 insertions, 1676 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9fd44f5f3b3b..f33902ff2c22 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -14,6 +14,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
14 i915_gem_gtt.o \ 14 i915_gem_gtt.o \
15 i915_gem_stolen.o \ 15 i915_gem_stolen.o \
16 i915_gem_tiling.o \ 16 i915_gem_tiling.o \
17 i915_params.o \
17 i915_sysfs.o \ 18 i915_sysfs.o \
18 i915_trace_points.o \ 19 i915_trace_points.o \
19 i915_ums.o \ 20 i915_ums.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b2b46c52294c..2dc05c30b800 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -98,7 +98,7 @@ static const char *get_pin_flag(struct drm_i915_gem_object *obj)
98{ 98{
99 if (obj->user_pin_count > 0) 99 if (obj->user_pin_count > 0)
100 return "P"; 100 return "P";
101 else if (obj->pin_count > 0) 101 else if (i915_gem_obj_is_pinned(obj))
102 return "p"; 102 return "p";
103 else 103 else
104 return " "; 104 return " ";
@@ -123,6 +123,8 @@ static void
123describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 123describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
124{ 124{
125 struct i915_vma *vma; 125 struct i915_vma *vma;
126 int pin_count = 0;
127
126 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s", 128 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
127 &obj->base, 129 &obj->base,
128 get_pin_flag(obj), 130 get_pin_flag(obj),
@@ -139,8 +141,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
139 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
140 if (obj->base.name) 142 if (obj->base.name)
141 seq_printf(m, " (name: %d)", obj->base.name); 143 seq_printf(m, " (name: %d)", obj->base.name);
142 if (obj->pin_count) 144 list_for_each_entry(vma, &obj->vma_list, vma_link)
143 seq_printf(m, " (pinned x %d)", obj->pin_count); 145 if (vma->pin_count > 0)
146 pin_count++;
147 seq_printf(m, " (pinned x %d)", pin_count);
144 if (obj->pin_display) 148 if (obj->pin_display)
145 seq_printf(m, " (display)"); 149 seq_printf(m, " (display)");
146 if (obj->fence_reg != I915_FENCE_REG_NONE) 150 if (obj->fence_reg != I915_FENCE_REG_NONE)
@@ -447,7 +451,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
447 451
448 total_obj_size = total_gtt_size = count = 0; 452 total_obj_size = total_gtt_size = count = 0;
449 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 453 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
450 if (list == PINNED_LIST && obj->pin_count == 0) 454 if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
451 continue; 455 continue;
452 456
453 seq_puts(m, " "); 457 seq_puts(m, " ");
@@ -712,8 +716,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
712 seq_printf(m, "Graphics Interrupt mask: %08x\n", 716 seq_printf(m, "Graphics Interrupt mask: %08x\n",
713 I915_READ(GTIMR)); 717 I915_READ(GTIMR));
714 } 718 }
715 seq_printf(m, "Interrupts received: %d\n",
716 atomic_read(&dev_priv->irq_received));
717 for_each_ring(ring, dev_priv, i) { 719 for_each_ring(ring, dev_priv, i) {
718 if (INTEL_INFO(dev)->gen >= 6) { 720 if (INTEL_INFO(dev)->gen >= 6) {
719 seq_printf(m, 721 seq_printf(m,
@@ -1733,6 +1735,17 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1733 return 0; 1735 return 0;
1734} 1736}
1735 1737
1738static int per_file_ctx(int id, void *ptr, void *data)
1739{
1740 struct i915_hw_context *ctx = ptr;
1741 struct seq_file *m = data;
1742 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
1743
1744 ppgtt->debug_dump(ppgtt, m);
1745
1746 return 0;
1747}
1748
1736static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) 1749static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1737{ 1750{
1738 struct drm_i915_private *dev_priv = dev->dev_private; 1751 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1762,6 +1775,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1762{ 1775{
1763 struct drm_i915_private *dev_priv = dev->dev_private; 1776 struct drm_i915_private *dev_priv = dev->dev_private;
1764 struct intel_ring_buffer *ring; 1777 struct intel_ring_buffer *ring;
1778 struct drm_file *file;
1765 int i; 1779 int i;
1766 1780
1767 if (INTEL_INFO(dev)->gen == 6) 1781 if (INTEL_INFO(dev)->gen == 6)
@@ -1780,6 +1794,20 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1780 1794
1781 seq_puts(m, "aliasing PPGTT:\n"); 1795 seq_puts(m, "aliasing PPGTT:\n");
1782 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1796 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1797
1798 ppgtt->debug_dump(ppgtt, m);
1799 } else
1800 return;
1801
1802 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
1803 struct drm_i915_file_private *file_priv = file->driver_priv;
1804 struct i915_hw_ppgtt *pvt_ppgtt;
1805
1806 pvt_ppgtt = ctx_to_ppgtt(file_priv->private_default_ctx);
1807 seq_printf(m, "proc: %s\n",
1808 get_pid_task(file->pid, PIDTYPE_PID)->comm);
1809 seq_puts(m, " default context:\n");
1810 idr_for_each(&file_priv->context_idr, per_file_ctx, m);
1783 } 1811 }
1784 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1812 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1785} 1813}
@@ -1892,6 +1920,44 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
1892 return 0; 1920 return 0;
1893} 1921}
1894 1922
1923static int i915_sink_crc(struct seq_file *m, void *data)
1924{
1925 struct drm_info_node *node = m->private;
1926 struct drm_device *dev = node->minor->dev;
1927 struct intel_encoder *encoder;
1928 struct intel_connector *connector;
1929 struct intel_dp *intel_dp = NULL;
1930 int ret;
1931 u8 crc[6];
1932
1933 drm_modeset_lock_all(dev);
1934 list_for_each_entry(connector, &dev->mode_config.connector_list,
1935 base.head) {
1936
1937 if (connector->base.dpms != DRM_MODE_DPMS_ON)
1938 continue;
1939
1940 encoder = to_intel_encoder(connector->base.encoder);
1941 if (encoder->type != INTEL_OUTPUT_EDP)
1942 continue;
1943
1944 intel_dp = enc_to_intel_dp(&encoder->base);
1945
1946 ret = intel_dp_sink_crc(intel_dp, crc);
1947 if (ret)
1948 goto out;
1949
1950 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
1951 crc[0], crc[1], crc[2],
1952 crc[3], crc[4], crc[5]);
1953 goto out;
1954 }
1955 ret = -ENODEV;
1956out:
1957 drm_modeset_unlock_all(dev);
1958 return ret;
1959}
1960
1895static int i915_energy_uJ(struct seq_file *m, void *data) 1961static int i915_energy_uJ(struct seq_file *m, void *data)
1896{ 1962{
1897 struct drm_info_node *node = m->private; 1963 struct drm_info_node *node = m->private;
@@ -2756,6 +2822,174 @@ static const struct file_operations i915_display_crc_ctl_fops = {
2756 .write = display_crc_ctl_write 2822 .write = display_crc_ctl_write
2757}; 2823};
2758 2824
2825static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
2826{
2827 struct drm_device *dev = m->private;
2828 int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
2829 int level;
2830
2831 drm_modeset_lock_all(dev);
2832
2833 for (level = 0; level < num_levels; level++) {
2834 unsigned int latency = wm[level];
2835
2836 /* WM1+ latency values in 0.5us units */
2837 if (level > 0)
2838 latency *= 5;
2839
2840 seq_printf(m, "WM%d %u (%u.%u usec)\n",
2841 level, wm[level],
2842 latency / 10, latency % 10);
2843 }
2844
2845 drm_modeset_unlock_all(dev);
2846}
2847
2848static int pri_wm_latency_show(struct seq_file *m, void *data)
2849{
2850 struct drm_device *dev = m->private;
2851
2852 wm_latency_show(m, to_i915(dev)->wm.pri_latency);
2853
2854 return 0;
2855}
2856
2857static int spr_wm_latency_show(struct seq_file *m, void *data)
2858{
2859 struct drm_device *dev = m->private;
2860
2861 wm_latency_show(m, to_i915(dev)->wm.spr_latency);
2862
2863 return 0;
2864}
2865
2866static int cur_wm_latency_show(struct seq_file *m, void *data)
2867{
2868 struct drm_device *dev = m->private;
2869
2870 wm_latency_show(m, to_i915(dev)->wm.cur_latency);
2871
2872 return 0;
2873}
2874
2875static int pri_wm_latency_open(struct inode *inode, struct file *file)
2876{
2877 struct drm_device *dev = inode->i_private;
2878
2879 if (!HAS_PCH_SPLIT(dev))
2880 return -ENODEV;
2881
2882 return single_open(file, pri_wm_latency_show, dev);
2883}
2884
2885static int spr_wm_latency_open(struct inode *inode, struct file *file)
2886{
2887 struct drm_device *dev = inode->i_private;
2888
2889 if (!HAS_PCH_SPLIT(dev))
2890 return -ENODEV;
2891
2892 return single_open(file, spr_wm_latency_show, dev);
2893}
2894
2895static int cur_wm_latency_open(struct inode *inode, struct file *file)
2896{
2897 struct drm_device *dev = inode->i_private;
2898
2899 if (!HAS_PCH_SPLIT(dev))
2900 return -ENODEV;
2901
2902 return single_open(file, cur_wm_latency_show, dev);
2903}
2904
2905static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
2906 size_t len, loff_t *offp, uint16_t wm[5])
2907{
2908 struct seq_file *m = file->private_data;
2909 struct drm_device *dev = m->private;
2910 uint16_t new[5] = { 0 };
2911 int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
2912 int level;
2913 int ret;
2914 char tmp[32];
2915
2916 if (len >= sizeof(tmp))
2917 return -EINVAL;
2918
2919 if (copy_from_user(tmp, ubuf, len))
2920 return -EFAULT;
2921
2922 tmp[len] = '\0';
2923
2924 ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]);
2925 if (ret != num_levels)
2926 return -EINVAL;
2927
2928 drm_modeset_lock_all(dev);
2929
2930 for (level = 0; level < num_levels; level++)
2931 wm[level] = new[level];
2932
2933 drm_modeset_unlock_all(dev);
2934
2935 return len;
2936}
2937
2938
2939static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
2940 size_t len, loff_t *offp)
2941{
2942 struct seq_file *m = file->private_data;
2943 struct drm_device *dev = m->private;
2944
2945 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency);
2946}
2947
2948static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
2949 size_t len, loff_t *offp)
2950{
2951 struct seq_file *m = file->private_data;
2952 struct drm_device *dev = m->private;
2953
2954 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency);
2955}
2956
2957static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
2958 size_t len, loff_t *offp)
2959{
2960 struct seq_file *m = file->private_data;
2961 struct drm_device *dev = m->private;
2962
2963 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency);
2964}
2965
2966static const struct file_operations i915_pri_wm_latency_fops = {
2967 .owner = THIS_MODULE,
2968 .open = pri_wm_latency_open,
2969 .read = seq_read,
2970 .llseek = seq_lseek,
2971 .release = single_release,
2972 .write = pri_wm_latency_write
2973};
2974
2975static const struct file_operations i915_spr_wm_latency_fops = {
2976 .owner = THIS_MODULE,
2977 .open = spr_wm_latency_open,
2978 .read = seq_read,
2979 .llseek = seq_lseek,
2980 .release = single_release,
2981 .write = spr_wm_latency_write
2982};
2983
2984static const struct file_operations i915_cur_wm_latency_fops = {
2985 .owner = THIS_MODULE,
2986 .open = cur_wm_latency_open,
2987 .read = seq_read,
2988 .llseek = seq_lseek,
2989 .release = single_release,
2990 .write = cur_wm_latency_write
2991};
2992
2759static int 2993static int
2760i915_wedged_get(void *data, u64 *val) 2994i915_wedged_get(void *data, u64 *val)
2761{ 2995{
@@ -2929,7 +3163,7 @@ i915_drop_caches_set(void *data, u64 val)
2929 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 3163 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2930 list_for_each_entry_safe(vma, x, &vm->inactive_list, 3164 list_for_each_entry_safe(vma, x, &vm->inactive_list,
2931 mm_list) { 3165 mm_list) {
2932 if (vma->obj->pin_count) 3166 if (vma->pin_count)
2933 continue; 3167 continue;
2934 3168
2935 ret = i915_vma_unbind(vma); 3169 ret = i915_vma_unbind(vma);
@@ -2989,6 +3223,7 @@ i915_max_freq_set(void *data, u64 val)
2989{ 3223{
2990 struct drm_device *dev = data; 3224 struct drm_device *dev = data;
2991 struct drm_i915_private *dev_priv = dev->dev_private; 3225 struct drm_i915_private *dev_priv = dev->dev_private;
3226 u32 rp_state_cap, hw_max, hw_min;
2992 int ret; 3227 int ret;
2993 3228
2994 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3229 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -3007,14 +3242,29 @@ i915_max_freq_set(void *data, u64 val)
3007 */ 3242 */
3008 if (IS_VALLEYVIEW(dev)) { 3243 if (IS_VALLEYVIEW(dev)) {
3009 val = vlv_freq_opcode(dev_priv, val); 3244 val = vlv_freq_opcode(dev_priv, val);
3010 dev_priv->rps.max_delay = val; 3245
3011 valleyview_set_rps(dev, val); 3246 hw_max = valleyview_rps_max_freq(dev_priv);
3247 hw_min = valleyview_rps_min_freq(dev_priv);
3012 } else { 3248 } else {
3013 do_div(val, GT_FREQUENCY_MULTIPLIER); 3249 do_div(val, GT_FREQUENCY_MULTIPLIER);
3014 dev_priv->rps.max_delay = val; 3250
3015 gen6_set_rps(dev, val); 3251 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3252 hw_max = dev_priv->rps.hw_max;
3253 hw_min = (rp_state_cap >> 16) & 0xff;
3254 }
3255
3256 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
3257 mutex_unlock(&dev_priv->rps.hw_lock);
3258 return -EINVAL;
3016 } 3259 }
3017 3260
3261 dev_priv->rps.max_delay = val;
3262
3263 if (IS_VALLEYVIEW(dev))
3264 valleyview_set_rps(dev, val);
3265 else
3266 gen6_set_rps(dev, val);
3267
3018 mutex_unlock(&dev_priv->rps.hw_lock); 3268 mutex_unlock(&dev_priv->rps.hw_lock);
3019 3269
3020 return 0; 3270 return 0;
@@ -3054,6 +3304,7 @@ i915_min_freq_set(void *data, u64 val)
3054{ 3304{
3055 struct drm_device *dev = data; 3305 struct drm_device *dev = data;
3056 struct drm_i915_private *dev_priv = dev->dev_private; 3306 struct drm_i915_private *dev_priv = dev->dev_private;
3307 u32 rp_state_cap, hw_max, hw_min;
3057 int ret; 3308 int ret;
3058 3309
3059 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3310 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -3072,13 +3323,29 @@ i915_min_freq_set(void *data, u64 val)
3072 */ 3323 */
3073 if (IS_VALLEYVIEW(dev)) { 3324 if (IS_VALLEYVIEW(dev)) {
3074 val = vlv_freq_opcode(dev_priv, val); 3325 val = vlv_freq_opcode(dev_priv, val);
3075 dev_priv->rps.min_delay = val; 3326
3076 valleyview_set_rps(dev, val); 3327 hw_max = valleyview_rps_max_freq(dev_priv);
3328 hw_min = valleyview_rps_min_freq(dev_priv);
3077 } else { 3329 } else {
3078 do_div(val, GT_FREQUENCY_MULTIPLIER); 3330 do_div(val, GT_FREQUENCY_MULTIPLIER);
3079 dev_priv->rps.min_delay = val; 3331
3080 gen6_set_rps(dev, val); 3332 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3333 hw_max = dev_priv->rps.hw_max;
3334 hw_min = (rp_state_cap >> 16) & 0xff;
3081 } 3335 }
3336
3337 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
3338 mutex_unlock(&dev_priv->rps.hw_lock);
3339 return -EINVAL;
3340 }
3341
3342 dev_priv->rps.min_delay = val;
3343
3344 if (IS_VALLEYVIEW(dev))
3345 valleyview_set_rps(dev, val);
3346 else
3347 gen6_set_rps(dev, val);
3348
3082 mutex_unlock(&dev_priv->rps.hw_lock); 3349 mutex_unlock(&dev_priv->rps.hw_lock);
3083 3350
3084 return 0; 3351 return 0;
@@ -3248,6 +3515,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
3248 {"i915_dpio", i915_dpio_info, 0}, 3515 {"i915_dpio", i915_dpio_info, 0},
3249 {"i915_llc", i915_llc, 0}, 3516 {"i915_llc", i915_llc, 0},
3250 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 3517 {"i915_edp_psr_status", i915_edp_psr_status, 0},
3518 {"i915_sink_crc_eDP1", i915_sink_crc, 0},
3251 {"i915_energy_uJ", i915_energy_uJ, 0}, 3519 {"i915_energy_uJ", i915_energy_uJ, 0},
3252 {"i915_pc8_status", i915_pc8_status, 0}, 3520 {"i915_pc8_status", i915_pc8_status, 0},
3253 {"i915_power_domain_info", i915_power_domain_info, 0}, 3521 {"i915_power_domain_info", i915_power_domain_info, 0},
@@ -3269,6 +3537,9 @@ static const struct i915_debugfs_files {
3269 {"i915_error_state", &i915_error_state_fops}, 3537 {"i915_error_state", &i915_error_state_fops},
3270 {"i915_next_seqno", &i915_next_seqno_fops}, 3538 {"i915_next_seqno", &i915_next_seqno_fops},
3271 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 3539 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
3540 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
3541 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
3542 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
3272}; 3543};
3273 3544
3274void intel_display_crc_init(struct drm_device *dev) 3545void intel_display_crc_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 15a74f979b4b..258b1be20db3 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -990,7 +990,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
990 value = HAS_WT(dev); 990 value = HAS_WT(dev);
991 break; 991 break;
992 case I915_PARAM_HAS_ALIASING_PPGTT: 992 case I915_PARAM_HAS_ALIASING_PPGTT:
993 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; 993 value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev);
994 break; 994 break;
995 case I915_PARAM_HAS_WAIT_TIMEOUT: 995 case I915_PARAM_HAS_WAIT_TIMEOUT:
996 value = 1; 996 value = 1;
@@ -1374,7 +1374,7 @@ cleanup_gem:
1374 i915_gem_cleanup_ringbuffer(dev); 1374 i915_gem_cleanup_ringbuffer(dev);
1375 i915_gem_context_fini(dev); 1375 i915_gem_context_fini(dev);
1376 mutex_unlock(&dev->struct_mutex); 1376 mutex_unlock(&dev->struct_mutex);
1377 i915_gem_cleanup_aliasing_ppgtt(dev); 1377 WARN_ON(dev_priv->mm.aliasing_ppgtt);
1378 drm_mm_takedown(&dev_priv->gtt.base.mm); 1378 drm_mm_takedown(&dev_priv->gtt.base.mm);
1379cleanup_power: 1379cleanup_power:
1380 intel_display_power_put(dev, POWER_DOMAIN_VGA); 1380 intel_display_power_put(dev, POWER_DOMAIN_VGA);
@@ -1776,8 +1776,8 @@ int i915_driver_unload(struct drm_device *dev)
1776 i915_gem_free_all_phys_object(dev); 1776 i915_gem_free_all_phys_object(dev);
1777 i915_gem_cleanup_ringbuffer(dev); 1777 i915_gem_cleanup_ringbuffer(dev);
1778 i915_gem_context_fini(dev); 1778 i915_gem_context_fini(dev);
1779 WARN_ON(dev_priv->mm.aliasing_ppgtt);
1779 mutex_unlock(&dev->struct_mutex); 1780 mutex_unlock(&dev->struct_mutex);
1780 i915_gem_cleanup_aliasing_ppgtt(dev);
1781 i915_gem_cleanup_stolen(dev); 1781 i915_gem_cleanup_stolen(dev);
1782 1782
1783 if (!I915_NEED_GFX_HWS(dev)) 1783 if (!I915_NEED_GFX_HWS(dev))
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 04f1f02c4019..2d05d7ce4c29 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -38,134 +38,30 @@
38#include <linux/module.h> 38#include <linux/module.h>
39#include <drm/drm_crtc_helper.h> 39#include <drm/drm_crtc_helper.h>
40 40
41static int i915_modeset __read_mostly = -1;
42module_param_named(modeset, i915_modeset, int, 0400);
43MODULE_PARM_DESC(modeset,
44 "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
45 "1=on, -1=force vga console preference [default])");
46
47unsigned int i915_fbpercrtc __always_unused = 0;
48module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
49
50int i915_panel_ignore_lid __read_mostly = 1;
51module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
52MODULE_PARM_DESC(panel_ignore_lid,
53 "Override lid status (0=autodetect, 1=autodetect disabled [default], "
54 "-1=force lid closed, -2=force lid open)");
55
56unsigned int i915_powersave __read_mostly = 1;
57module_param_named(powersave, i915_powersave, int, 0600);
58MODULE_PARM_DESC(powersave,
59 "Enable powersavings, fbc, downclocking, etc. (default: true)");
60
61int i915_semaphores __read_mostly = -1;
62module_param_named(semaphores, i915_semaphores, int, 0400);
63MODULE_PARM_DESC(semaphores,
64 "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
65
66int i915_enable_rc6 __read_mostly = -1;
67module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
68MODULE_PARM_DESC(i915_enable_rc6,
69 "Enable power-saving render C-state 6. "
70 "Different stages can be selected via bitmask values "
71 "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
72 "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
73 "default: -1 (use per-chip default)");
74
75int i915_enable_fbc __read_mostly = -1;
76module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
77MODULE_PARM_DESC(i915_enable_fbc,
78 "Enable frame buffer compression for power savings "
79 "(default: -1 (use per-chip default))");
80
81unsigned int i915_lvds_downclock __read_mostly = 0;
82module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
83MODULE_PARM_DESC(lvds_downclock,
84 "Use panel (LVDS/eDP) downclocking for power savings "
85 "(default: false)");
86
87int i915_lvds_channel_mode __read_mostly;
88module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
89MODULE_PARM_DESC(lvds_channel_mode,
90 "Specify LVDS channel mode "
91 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
92
93int i915_panel_use_ssc __read_mostly = -1;
94module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
95MODULE_PARM_DESC(lvds_use_ssc,
96 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
97 "(default: auto from VBT)");
98
99int i915_vbt_sdvo_panel_type __read_mostly = -1;
100module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
101MODULE_PARM_DESC(vbt_sdvo_panel_type,
102 "Override/Ignore selection of SDVO panel mode in the VBT "
103 "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
104
105static bool i915_try_reset __read_mostly = true;
106module_param_named(reset, i915_try_reset, bool, 0600);
107MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
108
109bool i915_enable_hangcheck __read_mostly = true;
110module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
111MODULE_PARM_DESC(enable_hangcheck,
112 "Periodically check GPU activity for detecting hangs. "
113 "WARNING: Disabling this can cause system wide hangs. "
114 "(default: true)");
115
116int i915_enable_ppgtt __read_mostly = -1;
117module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400);
118MODULE_PARM_DESC(i915_enable_ppgtt,
119 "Enable PPGTT (default: true)");
120
121int i915_enable_psr __read_mostly = 0;
122module_param_named(enable_psr, i915_enable_psr, int, 0600);
123MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
124
125unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
126module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
127MODULE_PARM_DESC(preliminary_hw_support,
128 "Enable preliminary hardware support.");
129
130int i915_disable_power_well __read_mostly = 1;
131module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
132MODULE_PARM_DESC(disable_power_well,
133 "Disable the power well when possible (default: true)");
134
135int i915_enable_ips __read_mostly = 1;
136module_param_named(enable_ips, i915_enable_ips, int, 0600);
137MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
138
139bool i915_fastboot __read_mostly = 0;
140module_param_named(fastboot, i915_fastboot, bool, 0600);
141MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
142 "(default: false)");
143
144int i915_enable_pc8 __read_mostly = 1;
145module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
146MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
147
148int i915_pc8_timeout __read_mostly = 5000;
149module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
150MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
151
152bool i915_prefault_disable __read_mostly;
153module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
154MODULE_PARM_DESC(prefault_disable,
155 "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
156
157static struct drm_driver driver; 41static struct drm_driver driver;
158 42
43#define GEN_DEFAULT_PIPEOFFSETS \
44 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
45 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
46 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
47 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
48 .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
49 .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
50 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
51
52
159static const struct intel_device_info intel_i830_info = { 53static const struct intel_device_info intel_i830_info = {
160 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, 54 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
161 .has_overlay = 1, .overlay_needs_physical = 1, 55 .has_overlay = 1, .overlay_needs_physical = 1,
162 .ring_mask = RENDER_RING, 56 .ring_mask = RENDER_RING,
57 GEN_DEFAULT_PIPEOFFSETS,
163}; 58};
164 59
165static const struct intel_device_info intel_845g_info = { 60static const struct intel_device_info intel_845g_info = {
166 .gen = 2, .num_pipes = 1, 61 .gen = 2, .num_pipes = 1,
167 .has_overlay = 1, .overlay_needs_physical = 1, 62 .has_overlay = 1, .overlay_needs_physical = 1,
168 .ring_mask = RENDER_RING, 63 .ring_mask = RENDER_RING,
64 GEN_DEFAULT_PIPEOFFSETS,
169}; 65};
170 66
171static const struct intel_device_info intel_i85x_info = { 67static const struct intel_device_info intel_i85x_info = {
@@ -174,18 +70,21 @@ static const struct intel_device_info intel_i85x_info = {
174 .has_overlay = 1, .overlay_needs_physical = 1, 70 .has_overlay = 1, .overlay_needs_physical = 1,
175 .has_fbc = 1, 71 .has_fbc = 1,
176 .ring_mask = RENDER_RING, 72 .ring_mask = RENDER_RING,
73 GEN_DEFAULT_PIPEOFFSETS,
177}; 74};
178 75
179static const struct intel_device_info intel_i865g_info = { 76static const struct intel_device_info intel_i865g_info = {
180 .gen = 2, .num_pipes = 1, 77 .gen = 2, .num_pipes = 1,
181 .has_overlay = 1, .overlay_needs_physical = 1, 78 .has_overlay = 1, .overlay_needs_physical = 1,
182 .ring_mask = RENDER_RING, 79 .ring_mask = RENDER_RING,
80 GEN_DEFAULT_PIPEOFFSETS,
183}; 81};
184 82
185static const struct intel_device_info intel_i915g_info = { 83static const struct intel_device_info intel_i915g_info = {
186 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, 84 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
187 .has_overlay = 1, .overlay_needs_physical = 1, 85 .has_overlay = 1, .overlay_needs_physical = 1,
188 .ring_mask = RENDER_RING, 86 .ring_mask = RENDER_RING,
87 GEN_DEFAULT_PIPEOFFSETS,
189}; 88};
190static const struct intel_device_info intel_i915gm_info = { 89static const struct intel_device_info intel_i915gm_info = {
191 .gen = 3, .is_mobile = 1, .num_pipes = 2, 90 .gen = 3, .is_mobile = 1, .num_pipes = 2,
@@ -194,11 +93,13 @@ static const struct intel_device_info intel_i915gm_info = {
194 .supports_tv = 1, 93 .supports_tv = 1,
195 .has_fbc = 1, 94 .has_fbc = 1,
196 .ring_mask = RENDER_RING, 95 .ring_mask = RENDER_RING,
96 GEN_DEFAULT_PIPEOFFSETS,
197}; 97};
198static const struct intel_device_info intel_i945g_info = { 98static const struct intel_device_info intel_i945g_info = {
199 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, 99 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
200 .has_overlay = 1, .overlay_needs_physical = 1, 100 .has_overlay = 1, .overlay_needs_physical = 1,
201 .ring_mask = RENDER_RING, 101 .ring_mask = RENDER_RING,
102 GEN_DEFAULT_PIPEOFFSETS,
202}; 103};
203static const struct intel_device_info intel_i945gm_info = { 104static const struct intel_device_info intel_i945gm_info = {
204 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, 105 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
@@ -207,6 +108,7 @@ static const struct intel_device_info intel_i945gm_info = {
207 .supports_tv = 1, 108 .supports_tv = 1,
208 .has_fbc = 1, 109 .has_fbc = 1,
209 .ring_mask = RENDER_RING, 110 .ring_mask = RENDER_RING,
111 GEN_DEFAULT_PIPEOFFSETS,
210}; 112};
211 113
212static const struct intel_device_info intel_i965g_info = { 114static const struct intel_device_info intel_i965g_info = {
@@ -214,6 +116,7 @@ static const struct intel_device_info intel_i965g_info = {
214 .has_hotplug = 1, 116 .has_hotplug = 1,
215 .has_overlay = 1, 117 .has_overlay = 1,
216 .ring_mask = RENDER_RING, 118 .ring_mask = RENDER_RING,
119 GEN_DEFAULT_PIPEOFFSETS,
217}; 120};
218 121
219static const struct intel_device_info intel_i965gm_info = { 122static const struct intel_device_info intel_i965gm_info = {
@@ -222,6 +125,7 @@ static const struct intel_device_info intel_i965gm_info = {
222 .has_overlay = 1, 125 .has_overlay = 1,
223 .supports_tv = 1, 126 .supports_tv = 1,
224 .ring_mask = RENDER_RING, 127 .ring_mask = RENDER_RING,
128 GEN_DEFAULT_PIPEOFFSETS,
225}; 129};
226 130
227static const struct intel_device_info intel_g33_info = { 131static const struct intel_device_info intel_g33_info = {
@@ -229,12 +133,14 @@ static const struct intel_device_info intel_g33_info = {
229 .need_gfx_hws = 1, .has_hotplug = 1, 133 .need_gfx_hws = 1, .has_hotplug = 1,
230 .has_overlay = 1, 134 .has_overlay = 1,
231 .ring_mask = RENDER_RING, 135 .ring_mask = RENDER_RING,
136 GEN_DEFAULT_PIPEOFFSETS,
232}; 137};
233 138
234static const struct intel_device_info intel_g45_info = { 139static const struct intel_device_info intel_g45_info = {
235 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, 140 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
236 .has_pipe_cxsr = 1, .has_hotplug = 1, 141 .has_pipe_cxsr = 1, .has_hotplug = 1,
237 .ring_mask = RENDER_RING | BSD_RING, 142 .ring_mask = RENDER_RING | BSD_RING,
143 GEN_DEFAULT_PIPEOFFSETS,
238}; 144};
239 145
240static const struct intel_device_info intel_gm45_info = { 146static const struct intel_device_info intel_gm45_info = {
@@ -243,18 +149,21 @@ static const struct intel_device_info intel_gm45_info = {
243 .has_pipe_cxsr = 1, .has_hotplug = 1, 149 .has_pipe_cxsr = 1, .has_hotplug = 1,
244 .supports_tv = 1, 150 .supports_tv = 1,
245 .ring_mask = RENDER_RING | BSD_RING, 151 .ring_mask = RENDER_RING | BSD_RING,
152 GEN_DEFAULT_PIPEOFFSETS,
246}; 153};
247 154
248static const struct intel_device_info intel_pineview_info = { 155static const struct intel_device_info intel_pineview_info = {
249 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, 156 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
250 .need_gfx_hws = 1, .has_hotplug = 1, 157 .need_gfx_hws = 1, .has_hotplug = 1,
251 .has_overlay = 1, 158 .has_overlay = 1,
159 GEN_DEFAULT_PIPEOFFSETS,
252}; 160};
253 161
254static const struct intel_device_info intel_ironlake_d_info = { 162static const struct intel_device_info intel_ironlake_d_info = {
255 .gen = 5, .num_pipes = 2, 163 .gen = 5, .num_pipes = 2,
256 .need_gfx_hws = 1, .has_hotplug = 1, 164 .need_gfx_hws = 1, .has_hotplug = 1,
257 .ring_mask = RENDER_RING | BSD_RING, 165 .ring_mask = RENDER_RING | BSD_RING,
166 GEN_DEFAULT_PIPEOFFSETS,
258}; 167};
259 168
260static const struct intel_device_info intel_ironlake_m_info = { 169static const struct intel_device_info intel_ironlake_m_info = {
@@ -262,6 +171,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
262 .need_gfx_hws = 1, .has_hotplug = 1, 171 .need_gfx_hws = 1, .has_hotplug = 1,
263 .has_fbc = 1, 172 .has_fbc = 1,
264 .ring_mask = RENDER_RING | BSD_RING, 173 .ring_mask = RENDER_RING | BSD_RING,
174 GEN_DEFAULT_PIPEOFFSETS,
265}; 175};
266 176
267static const struct intel_device_info intel_sandybridge_d_info = { 177static const struct intel_device_info intel_sandybridge_d_info = {
@@ -270,6 +180,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
270 .has_fbc = 1, 180 .has_fbc = 1,
271 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, 181 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
272 .has_llc = 1, 182 .has_llc = 1,
183 GEN_DEFAULT_PIPEOFFSETS,
273}; 184};
274 185
275static const struct intel_device_info intel_sandybridge_m_info = { 186static const struct intel_device_info intel_sandybridge_m_info = {
@@ -278,6 +189,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
278 .has_fbc = 1, 189 .has_fbc = 1,
279 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, 190 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
280 .has_llc = 1, 191 .has_llc = 1,
192 GEN_DEFAULT_PIPEOFFSETS,
281}; 193};
282 194
283#define GEN7_FEATURES \ 195#define GEN7_FEATURES \
@@ -290,18 +202,21 @@ static const struct intel_device_info intel_sandybridge_m_info = {
290static const struct intel_device_info intel_ivybridge_d_info = { 202static const struct intel_device_info intel_ivybridge_d_info = {
291 GEN7_FEATURES, 203 GEN7_FEATURES,
292 .is_ivybridge = 1, 204 .is_ivybridge = 1,
205 GEN_DEFAULT_PIPEOFFSETS,
293}; 206};
294 207
295static const struct intel_device_info intel_ivybridge_m_info = { 208static const struct intel_device_info intel_ivybridge_m_info = {
296 GEN7_FEATURES, 209 GEN7_FEATURES,
297 .is_ivybridge = 1, 210 .is_ivybridge = 1,
298 .is_mobile = 1, 211 .is_mobile = 1,
212 GEN_DEFAULT_PIPEOFFSETS,
299}; 213};
300 214
301static const struct intel_device_info intel_ivybridge_q_info = { 215static const struct intel_device_info intel_ivybridge_q_info = {
302 GEN7_FEATURES, 216 GEN7_FEATURES,
303 .is_ivybridge = 1, 217 .is_ivybridge = 1,
304 .num_pipes = 0, /* legal, last one wins */ 218 .num_pipes = 0, /* legal, last one wins */
219 GEN_DEFAULT_PIPEOFFSETS,
305}; 220};
306 221
307static const struct intel_device_info intel_valleyview_m_info = { 222static const struct intel_device_info intel_valleyview_m_info = {
@@ -312,6 +227,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
312 .display_mmio_offset = VLV_DISPLAY_BASE, 227 .display_mmio_offset = VLV_DISPLAY_BASE,
313 .has_fbc = 0, /* legal, last one wins */ 228 .has_fbc = 0, /* legal, last one wins */
314 .has_llc = 0, /* legal, last one wins */ 229 .has_llc = 0, /* legal, last one wins */
230 GEN_DEFAULT_PIPEOFFSETS,
315}; 231};
316 232
317static const struct intel_device_info intel_valleyview_d_info = { 233static const struct intel_device_info intel_valleyview_d_info = {
@@ -321,6 +237,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
321 .display_mmio_offset = VLV_DISPLAY_BASE, 237 .display_mmio_offset = VLV_DISPLAY_BASE,
322 .has_fbc = 0, /* legal, last one wins */ 238 .has_fbc = 0, /* legal, last one wins */
323 .has_llc = 0, /* legal, last one wins */ 239 .has_llc = 0, /* legal, last one wins */
240 GEN_DEFAULT_PIPEOFFSETS,
324}; 241};
325 242
326static const struct intel_device_info intel_haswell_d_info = { 243static const struct intel_device_info intel_haswell_d_info = {
@@ -329,6 +246,7 @@ static const struct intel_device_info intel_haswell_d_info = {
329 .has_ddi = 1, 246 .has_ddi = 1,
330 .has_fpga_dbg = 1, 247 .has_fpga_dbg = 1,
331 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 248 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
249 GEN_DEFAULT_PIPEOFFSETS,
332}; 250};
333 251
334static const struct intel_device_info intel_haswell_m_info = { 252static const struct intel_device_info intel_haswell_m_info = {
@@ -338,6 +256,7 @@ static const struct intel_device_info intel_haswell_m_info = {
338 .has_ddi = 1, 256 .has_ddi = 1,
339 .has_fpga_dbg = 1, 257 .has_fpga_dbg = 1,
340 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 258 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
259 GEN_DEFAULT_PIPEOFFSETS,
341}; 260};
342 261
343static const struct intel_device_info intel_broadwell_d_info = { 262static const struct intel_device_info intel_broadwell_d_info = {
@@ -346,6 +265,7 @@ static const struct intel_device_info intel_broadwell_d_info = {
346 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 265 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
347 .has_llc = 1, 266 .has_llc = 1,
348 .has_ddi = 1, 267 .has_ddi = 1,
268 GEN_DEFAULT_PIPEOFFSETS,
349}; 269};
350 270
351static const struct intel_device_info intel_broadwell_m_info = { 271static const struct intel_device_info intel_broadwell_m_info = {
@@ -354,6 +274,7 @@ static const struct intel_device_info intel_broadwell_m_info = {
354 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 274 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
355 .has_llc = 1, 275 .has_llc = 1,
356 .has_ddi = 1, 276 .has_ddi = 1,
277 GEN_DEFAULT_PIPEOFFSETS,
357}; 278};
358 279
359/* 280/*
@@ -482,12 +403,12 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
482 403
483 /* Until we get further testing... */ 404 /* Until we get further testing... */
484 if (IS_GEN8(dev)) { 405 if (IS_GEN8(dev)) {
485 WARN_ON(!i915_preliminary_hw_support); 406 WARN_ON(!i915.preliminary_hw_support);
486 return false; 407 return false;
487 } 408 }
488 409
489 if (i915_semaphores >= 0) 410 if (i915.semaphores >= 0)
490 return i915_semaphores; 411 return i915.semaphores;
491 412
492#ifdef CONFIG_INTEL_IOMMU 413#ifdef CONFIG_INTEL_IOMMU
493 /* Enable semaphores on SNB when IO remapping is off */ 414 /* Enable semaphores on SNB when IO remapping is off */
@@ -643,6 +564,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
643 /* KMS EnterVT equivalent */ 564 /* KMS EnterVT equivalent */
644 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 565 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
645 intel_init_pch_refclk(dev); 566 intel_init_pch_refclk(dev);
567 drm_mode_config_reset(dev);
646 568
647 mutex_lock(&dev->struct_mutex); 569 mutex_lock(&dev->struct_mutex);
648 570
@@ -655,7 +577,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
655 intel_modeset_init_hw(dev); 577 intel_modeset_init_hw(dev);
656 578
657 drm_modeset_lock_all(dev); 579 drm_modeset_lock_all(dev);
658 drm_mode_config_reset(dev);
659 intel_modeset_setup_hw_state(dev, true); 580 intel_modeset_setup_hw_state(dev, true);
660 drm_modeset_unlock_all(dev); 581 drm_modeset_unlock_all(dev);
661 582
@@ -752,7 +673,7 @@ int i915_reset(struct drm_device *dev)
752 bool simulated; 673 bool simulated;
753 int ret; 674 int ret;
754 675
755 if (!i915_try_reset) 676 if (!i915.reset)
756 return 0; 677 return 0;
757 678
758 mutex_lock(&dev->struct_mutex); 679 mutex_lock(&dev->struct_mutex);
@@ -807,6 +728,17 @@ int i915_reset(struct drm_device *dev)
807 728
808 drm_irq_uninstall(dev); 729 drm_irq_uninstall(dev);
809 drm_irq_install(dev); 730 drm_irq_install(dev);
731
732 /* rps/rc6 re-init is necessary to restore state lost after the
733 * reset and the re-install of drm irq. Skip for ironlake per
734 * previous concerns that it doesn't respond well to some forms
735 * of re-init after reset. */
736 if (INTEL_INFO(dev)->gen > 5) {
737 mutex_lock(&dev->struct_mutex);
738 intel_enable_gt_powersave(dev);
739 mutex_unlock(&dev->struct_mutex);
740 }
741
810 intel_hpd_init(dev); 742 intel_hpd_init(dev);
811 } else { 743 } else {
812 mutex_unlock(&dev->struct_mutex); 744 mutex_unlock(&dev->struct_mutex);
@@ -820,7 +752,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
820 struct intel_device_info *intel_info = 752 struct intel_device_info *intel_info =
821 (struct intel_device_info *) ent->driver_data; 753 (struct intel_device_info *) ent->driver_data;
822 754
823 if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) { 755 if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
824 DRM_INFO("This hardware requires preliminary hardware support.\n" 756 DRM_INFO("This hardware requires preliminary hardware support.\n"
825 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); 757 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
826 return -ENODEV; 758 return -ENODEV;
@@ -1051,14 +983,14 @@ static int __init i915_init(void)
1051 * the default behavior. 983 * the default behavior.
1052 */ 984 */
1053#if defined(CONFIG_DRM_I915_KMS) 985#if defined(CONFIG_DRM_I915_KMS)
1054 if (i915_modeset != 0) 986 if (i915.modeset != 0)
1055 driver.driver_features |= DRIVER_MODESET; 987 driver.driver_features |= DRIVER_MODESET;
1056#endif 988#endif
1057 if (i915_modeset == 1) 989 if (i915.modeset == 1)
1058 driver.driver_features |= DRIVER_MODESET; 990 driver.driver_features |= DRIVER_MODESET;
1059 991
1060#ifdef CONFIG_VGA_CONSOLE 992#ifdef CONFIG_VGA_CONSOLE
1061 if (vgacon_text_force() && i915_modeset == -1) 993 if (vgacon_text_force() && i915.modeset == -1)
1062 driver.driver_features &= ~DRIVER_MODESET; 994 driver.driver_features &= ~DRIVER_MODESET;
1063#endif 995#endif
1064 996
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index df77e20e3c3d..9d8ca2a36fde 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -58,7 +58,8 @@ enum pipe {
58 PIPE_A = 0, 58 PIPE_A = 0,
59 PIPE_B, 59 PIPE_B,
60 PIPE_C, 60 PIPE_C,
61 I915_MAX_PIPES 61 _PIPE_EDP,
62 I915_MAX_PIPES = _PIPE_EDP
62}; 63};
63#define pipe_name(p) ((p) + 'A') 64#define pipe_name(p) ((p) + 'A')
64 65
@@ -66,7 +67,8 @@ enum transcoder {
66 TRANSCODER_A = 0, 67 TRANSCODER_A = 0,
67 TRANSCODER_B, 68 TRANSCODER_B,
68 TRANSCODER_C, 69 TRANSCODER_C,
69 TRANSCODER_EDP = 0xF, 70 TRANSCODER_EDP,
71 I915_MAX_TRANSCODERS
70}; 72};
71#define transcoder_name(t) ((t) + 'A') 73#define transcoder_name(t) ((t) + 'A')
72 74
@@ -295,53 +297,80 @@ struct intel_display_error_state;
295 297
296struct drm_i915_error_state { 298struct drm_i915_error_state {
297 struct kref ref; 299 struct kref ref;
300 struct timeval time;
301
302 /* Generic register state */
298 u32 eir; 303 u32 eir;
299 u32 pgtbl_er; 304 u32 pgtbl_er;
300 u32 ier; 305 u32 ier;
301 u32 ccid; 306 u32 ccid;
302 u32 derrmr; 307 u32 derrmr;
303 u32 forcewake; 308 u32 forcewake;
304 bool waiting[I915_NUM_RINGS];
305 u32 pipestat[I915_MAX_PIPES];
306 u32 tail[I915_NUM_RINGS];
307 u32 head[I915_NUM_RINGS];
308 u32 ctl[I915_NUM_RINGS];
309 u32 ipeir[I915_NUM_RINGS];
310 u32 ipehr[I915_NUM_RINGS];
311 u32 instdone[I915_NUM_RINGS];
312 u32 acthd[I915_NUM_RINGS];
313 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
314 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
315 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
316 /* our own tracking of ring head and tail */
317 u32 cpu_ring_head[I915_NUM_RINGS];
318 u32 cpu_ring_tail[I915_NUM_RINGS];
319 u32 error; /* gen6+ */ 309 u32 error; /* gen6+ */
320 u32 err_int; /* gen7 */ 310 u32 err_int; /* gen7 */
321 u32 bbstate[I915_NUM_RINGS];
322 u32 instpm[I915_NUM_RINGS];
323 u32 instps[I915_NUM_RINGS];
324 u32 extra_instdone[I915_NUM_INSTDONE_REG];
325 u32 seqno[I915_NUM_RINGS];
326 u64 bbaddr[I915_NUM_RINGS];
327 u32 fault_reg[I915_NUM_RINGS];
328 u32 done_reg; 311 u32 done_reg;
329 u32 faddr[I915_NUM_RINGS]; 312 u32 gac_eco;
313 u32 gam_ecochk;
314 u32 gab_ctl;
315 u32 gfx_mode;
316 u32 extra_instdone[I915_NUM_INSTDONE_REG];
317 u32 pipestat[I915_MAX_PIPES];
330 u64 fence[I915_MAX_NUM_FENCES]; 318 u64 fence[I915_MAX_NUM_FENCES];
331 struct timeval time; 319 struct intel_overlay_error_state *overlay;
320 struct intel_display_error_state *display;
321
332 struct drm_i915_error_ring { 322 struct drm_i915_error_ring {
333 bool valid; 323 bool valid;
324 /* Software tracked state */
325 bool waiting;
326 int hangcheck_score;
327 enum intel_ring_hangcheck_action hangcheck_action;
328 int num_requests;
329
330 /* our own tracking of ring head and tail */
331 u32 cpu_ring_head;
332 u32 cpu_ring_tail;
333
334 u32 semaphore_seqno[I915_NUM_RINGS - 1];
335
336 /* Register state */
337 u32 tail;
338 u32 head;
339 u32 ctl;
340 u32 hws;
341 u32 ipeir;
342 u32 ipehr;
343 u32 instdone;
344 u32 acthd;
345 u32 bbstate;
346 u32 instpm;
347 u32 instps;
348 u32 seqno;
349 u64 bbaddr;
350 u32 fault_reg;
351 u32 faddr;
352 u32 rc_psmi; /* sleep state */
353 u32 semaphore_mboxes[I915_NUM_RINGS - 1];
354
334 struct drm_i915_error_object { 355 struct drm_i915_error_object {
335 int page_count; 356 int page_count;
336 u32 gtt_offset; 357 u32 gtt_offset;
337 u32 *pages[0]; 358 u32 *pages[0];
338 } *ringbuffer, *batchbuffer, *ctx; 359 } *ringbuffer, *batchbuffer, *ctx, *hws_page;
360
339 struct drm_i915_error_request { 361 struct drm_i915_error_request {
340 long jiffies; 362 long jiffies;
341 u32 seqno; 363 u32 seqno;
342 u32 tail; 364 u32 tail;
343 } *requests; 365 } *requests;
344 int num_requests; 366
367 struct {
368 u32 gfx_mode;
369 union {
370 u64 pdp[4];
371 u32 pp_dir_base;
372 };
373 } vm_info;
345 } ring[I915_NUM_RINGS]; 374 } ring[I915_NUM_RINGS];
346 struct drm_i915_error_buffer { 375 struct drm_i915_error_buffer {
347 u32 size; 376 u32 size;
@@ -358,11 +387,8 @@ struct drm_i915_error_state {
358 s32 ring:4; 387 s32 ring:4;
359 u32 cache_level:3; 388 u32 cache_level:3;
360 } **active_bo, **pinned_bo; 389 } **active_bo, **pinned_bo;
390
361 u32 *active_bo_count, *pinned_bo_count; 391 u32 *active_bo_count, *pinned_bo_count;
362 struct intel_overlay_error_state *overlay;
363 struct intel_display_error_state *display;
364 int hangcheck_score[I915_NUM_RINGS];
365 enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
366}; 392};
367 393
368struct intel_connector; 394struct intel_connector;
@@ -507,6 +533,12 @@ struct intel_device_info {
507 u8 gen; 533 u8 gen;
508 u8 ring_mask; /* Rings supported by the HW */ 534 u8 ring_mask; /* Rings supported by the HW */
509 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 535 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
536 /* Register offsets for the various display pipes and transcoders */
537 int pipe_offsets[I915_MAX_TRANSCODERS];
538 int trans_offsets[I915_MAX_TRANSCODERS];
539 int dpll_offsets[I915_MAX_PIPES];
540 int dpll_md_offsets[I915_MAX_PIPES];
541 int palette_offsets[I915_MAX_PIPES];
510}; 542};
511 543
512#undef DEFINE_FLAG 544#undef DEFINE_FLAG
@@ -524,6 +556,57 @@ enum i915_cache_level {
524 556
525typedef uint32_t gen6_gtt_pte_t; 557typedef uint32_t gen6_gtt_pte_t;
526 558
559/**
560 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
561 * VMA's presence cannot be guaranteed before binding, or after unbinding the
562 * object into/from the address space.
563 *
564 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
565 * will always be <= an objects lifetime. So object refcounting should cover us.
566 */
567struct i915_vma {
568 struct drm_mm_node node;
569 struct drm_i915_gem_object *obj;
570 struct i915_address_space *vm;
571
572 /** This object's place on the active/inactive lists */
573 struct list_head mm_list;
574
575 struct list_head vma_link; /* Link in the object's VMA list */
576
577 /** This vma's place in the batchbuffer or on the eviction list */
578 struct list_head exec_list;
579
580 /**
581 * Used for performing relocations during execbuffer insertion.
582 */
583 struct hlist_node exec_node;
584 unsigned long exec_handle;
585 struct drm_i915_gem_exec_object2 *exec_entry;
586
587 /**
588 * How many users have pinned this object in GTT space. The following
589 * users can each hold at most one reference: pwrite/pread, pin_ioctl
590 * (via user_pin_count), execbuffer (objects are not allowed multiple
591 * times for the same batchbuffer), and the framebuffer code. When
592 * switching/pageflipping, the framebuffer code has at most two buffers
593 * pinned per crtc.
594 *
595 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
596 * bits with absolutely no headroom. So use 4 bits. */
597 unsigned int pin_count:4;
598#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
599
600 /** Unmap an object from an address space. This usually consists of
601 * setting the valid PTE entries to a reserved scratch page. */
602 void (*unbind_vma)(struct i915_vma *vma);
603 /* Map an object into an address space with the given cache flags. */
604#define GLOBAL_BIND (1<<0)
605 void (*bind_vma)(struct i915_vma *vma,
606 enum i915_cache_level cache_level,
607 u32 flags);
608};
609
527struct i915_address_space { 610struct i915_address_space {
528 struct drm_mm mm; 611 struct drm_mm mm;
529 struct drm_device *dev; 612 struct drm_device *dev;
@@ -605,6 +688,8 @@ struct i915_gtt {
605 688
606struct i915_hw_ppgtt { 689struct i915_hw_ppgtt {
607 struct i915_address_space base; 690 struct i915_address_space base;
691 struct kref ref;
692 struct drm_mm_node node;
608 unsigned num_pd_entries; 693 unsigned num_pd_entries;
609 union { 694 union {
610 struct page **pt_pages; 695 struct page **pt_pages;
@@ -621,37 +706,12 @@ struct i915_hw_ppgtt {
621 dma_addr_t *pt_dma_addr; 706 dma_addr_t *pt_dma_addr;
622 dma_addr_t *gen8_pt_dma_addr[4]; 707 dma_addr_t *gen8_pt_dma_addr[4];
623 }; 708 };
624 int (*enable)(struct drm_device *dev);
625};
626
627/**
628 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
629 * VMA's presence cannot be guaranteed before binding, or after unbinding the
630 * object into/from the address space.
631 *
632 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
633 * will always be <= an objects lifetime. So object refcounting should cover us.
634 */
635struct i915_vma {
636 struct drm_mm_node node;
637 struct drm_i915_gem_object *obj;
638 struct i915_address_space *vm;
639
640 /** This object's place on the active/inactive lists */
641 struct list_head mm_list;
642
643 struct list_head vma_link; /* Link in the object's VMA list */
644
645 /** This vma's place in the batchbuffer or on the eviction list */
646 struct list_head exec_list;
647
648 /**
649 * Used for performing relocations during execbuffer insertion.
650 */
651 struct hlist_node exec_node;
652 unsigned long exec_handle;
653 struct drm_i915_gem_exec_object2 *exec_entry;
654 709
710 int (*enable)(struct i915_hw_ppgtt *ppgtt);
711 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
712 struct intel_ring_buffer *ring,
713 bool synchronous);
714 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
655}; 715};
656 716
657struct i915_ctx_hang_stats { 717struct i915_ctx_hang_stats {
@@ -676,9 +736,10 @@ struct i915_hw_context {
676 bool is_initialized; 736 bool is_initialized;
677 uint8_t remap_slice; 737 uint8_t remap_slice;
678 struct drm_i915_file_private *file_priv; 738 struct drm_i915_file_private *file_priv;
679 struct intel_ring_buffer *ring; 739 struct intel_ring_buffer *last_ring;
680 struct drm_i915_gem_object *obj; 740 struct drm_i915_gem_object *obj;
681 struct i915_ctx_hang_stats hang_stats; 741 struct i915_ctx_hang_stats hang_stats;
742 struct i915_address_space *vm;
682 743
683 struct list_head link; 744 struct list_head link;
684}; 745};
@@ -831,11 +892,7 @@ struct i915_suspend_saved_registers {
831 u32 savePFIT_CONTROL; 892 u32 savePFIT_CONTROL;
832 u32 save_palette_a[256]; 893 u32 save_palette_a[256];
833 u32 save_palette_b[256]; 894 u32 save_palette_b[256];
834 u32 saveDPFC_CB_BASE;
835 u32 saveFBC_CFB_BASE;
836 u32 saveFBC_LL_BASE;
837 u32 saveFBC_CONTROL; 895 u32 saveFBC_CONTROL;
838 u32 saveFBC_CONTROL2;
839 u32 saveIER; 896 u32 saveIER;
840 u32 saveIIR; 897 u32 saveIIR;
841 u32 saveIMR; 898 u32 saveIMR;
@@ -905,8 +962,6 @@ struct intel_gen6_power_mgmt {
905 struct work_struct work; 962 struct work_struct work;
906 u32 pm_iir; 963 u32 pm_iir;
907 964
908 /* The below variables an all the rps hw state are protected by
909 * dev->struct mutext. */
910 u8 cur_delay; 965 u8 cur_delay;
911 u8 min_delay; 966 u8 min_delay;
912 u8 max_delay; 967 u8 max_delay;
@@ -915,6 +970,9 @@ struct intel_gen6_power_mgmt {
915 u8 rp0_delay; 970 u8 rp0_delay;
916 u8 hw_max; 971 u8 hw_max;
917 972
973 bool rp_up_masked;
974 bool rp_down_masked;
975
918 int last_adj; 976 int last_adj;
919 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 977 enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
920 978
@@ -1361,8 +1419,6 @@ typedef struct drm_i915_private {
1361 drm_dma_handle_t *status_page_dmah; 1419 drm_dma_handle_t *status_page_dmah;
1362 struct resource mch_res; 1420 struct resource mch_res;
1363 1421
1364 atomic_t irq_received;
1365
1366 /* protects the irq masks */ 1422 /* protects the irq masks */
1367 spinlock_t irq_lock; 1423 spinlock_t irq_lock;
1368 1424
@@ -1627,18 +1683,6 @@ struct drm_i915_gem_object {
1627 */ 1683 */
1628 unsigned int fence_dirty:1; 1684 unsigned int fence_dirty:1;
1629 1685
1630 /** How many users have pinned this object in GTT space. The following
1631 * users can each hold at most one reference: pwrite/pread, pin_ioctl
1632 * (via user_pin_count), execbuffer (objects are not allowed multiple
1633 * times for the same batchbuffer), and the framebuffer code. When
1634 * switching/pageflipping, the framebuffer code has at most two buffers
1635 * pinned per crtc.
1636 *
1637 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1638 * bits with absolutely no headroom. So use 4 bits. */
1639 unsigned int pin_count:4;
1640#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
1641
1642 /** 1686 /**
1643 * Is the object at the current location in the gtt mappable and 1687 * Is the object at the current location in the gtt mappable and
1644 * fenceable? Used to avoid costly recalculations. 1688 * fenceable? Used to avoid costly recalculations.
@@ -1751,7 +1795,7 @@ struct drm_i915_file_private {
1751 } mm; 1795 } mm;
1752 struct idr context_idr; 1796 struct idr context_idr;
1753 1797
1754 struct i915_ctx_hang_stats hang_stats; 1798 struct i915_hw_context *private_default_ctx;
1755 atomic_t rps_wait_boost; 1799 atomic_t rps_wait_boost;
1756}; 1800};
1757 1801
@@ -1824,7 +1868,11 @@ struct drm_i915_file_private {
1824#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1868#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1825 1869
1826#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 1870#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
1827#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev)) 1871#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev))
1872#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) \
1873 && !IS_BROADWELL(dev))
1874#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
1875#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
1828 1876
1829#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 1877#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1830#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 1878#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
@@ -1887,32 +1935,39 @@ struct drm_i915_file_private {
1887 1935
1888extern const struct drm_ioctl_desc i915_ioctls[]; 1936extern const struct drm_ioctl_desc i915_ioctls[];
1889extern int i915_max_ioctl; 1937extern int i915_max_ioctl;
1890extern unsigned int i915_fbpercrtc __always_unused;
1891extern int i915_panel_ignore_lid __read_mostly;
1892extern unsigned int i915_powersave __read_mostly;
1893extern int i915_semaphores __read_mostly;
1894extern unsigned int i915_lvds_downclock __read_mostly;
1895extern int i915_lvds_channel_mode __read_mostly;
1896extern int i915_panel_use_ssc __read_mostly;
1897extern int i915_vbt_sdvo_panel_type __read_mostly;
1898extern int i915_enable_rc6 __read_mostly;
1899extern int i915_enable_fbc __read_mostly;
1900extern bool i915_enable_hangcheck __read_mostly;
1901extern int i915_enable_ppgtt __read_mostly;
1902extern int i915_enable_psr __read_mostly;
1903extern unsigned int i915_preliminary_hw_support __read_mostly;
1904extern int i915_disable_power_well __read_mostly;
1905extern int i915_enable_ips __read_mostly;
1906extern bool i915_fastboot __read_mostly;
1907extern int i915_enable_pc8 __read_mostly;
1908extern int i915_pc8_timeout __read_mostly;
1909extern bool i915_prefault_disable __read_mostly;
1910 1938
1911extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1939extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1912extern int i915_resume(struct drm_device *dev); 1940extern int i915_resume(struct drm_device *dev);
1913extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 1941extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
1914extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 1942extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1915 1943
1944/* i915_params.c */
1945struct i915_params {
1946 int modeset;
1947 int panel_ignore_lid;
1948 unsigned int powersave;
1949 int semaphores;
1950 unsigned int lvds_downclock;
1951 int lvds_channel_mode;
1952 int panel_use_ssc;
1953 int vbt_sdvo_panel_type;
1954 int enable_rc6;
1955 int enable_fbc;
1956 bool enable_hangcheck;
1957 int enable_ppgtt;
1958 int enable_psr;
1959 unsigned int preliminary_hw_support;
1960 int disable_power_well;
1961 int enable_ips;
1962 bool fastboot;
1963 int enable_pc8;
1964 int pc8_timeout;
1965 bool prefault_disable;
1966 bool reset;
1967 int invert_brightness;
1968};
1969extern struct i915_params i915 __read_mostly;
1970
1916 /* i915_dma.c */ 1971 /* i915_dma.c */
1917void i915_update_dri1_breadcrumb(struct drm_device *dev); 1972void i915_update_dri1_breadcrumb(struct drm_device *dev);
1918extern void i915_kernel_lost_context(struct drm_device * dev); 1973extern void i915_kernel_lost_context(struct drm_device * dev);
@@ -1945,6 +2000,8 @@ extern void intel_console_resume(struct work_struct *work);
1945void i915_queue_hangcheck(struct drm_device *dev); 2000void i915_queue_hangcheck(struct drm_device *dev);
1946void i915_handle_error(struct drm_device *dev, bool wedged); 2001void i915_handle_error(struct drm_device *dev, bool wedged);
1947 2002
2003void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
2004 int new_delay);
1948extern void intel_irq_init(struct drm_device *dev); 2005extern void intel_irq_init(struct drm_device *dev);
1949extern void intel_hpd_init(struct drm_device *dev); 2006extern void intel_hpd_init(struct drm_device *dev);
1950 2007
@@ -2014,6 +2071,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
2014 const struct drm_i915_gem_object_ops *ops); 2071 const struct drm_i915_gem_object_ops *ops);
2015struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 2072struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
2016 size_t size); 2073 size_t size);
2074void i915_init_vm(struct drm_i915_private *dev_priv,
2075 struct i915_address_space *vm);
2017void i915_gem_free_object(struct drm_gem_object *obj); 2076void i915_gem_free_object(struct drm_gem_object *obj);
2018void i915_gem_vma_destroy(struct i915_vma *vma); 2077void i915_gem_vma_destroy(struct i915_vma *vma);
2019 2078
@@ -2022,7 +2081,7 @@ int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
2022 uint32_t alignment, 2081 uint32_t alignment,
2023 bool map_and_fenceable, 2082 bool map_and_fenceable,
2024 bool nonblocking); 2083 bool nonblocking);
2025void i915_gem_object_unpin(struct drm_i915_gem_object *obj); 2084void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
2026int __must_check i915_vma_unbind(struct i915_vma *vma); 2085int __must_check i915_vma_unbind(struct i915_vma *vma);
2027int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj); 2086int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
2028int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2087int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
@@ -2186,6 +2245,13 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2186 struct i915_address_space *vm); 2245 struct i915_address_space *vm);
2187 2246
2188struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); 2247struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
2248static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
2249 struct i915_vma *vma;
2250 list_for_each_entry(vma, &obj->vma_list, vma_link)
2251 if (vma->pin_count > 0)
2252 return true;
2253 return false;
2254}
2189 2255
2190/* Some GGTT VM helpers */ 2256/* Some GGTT VM helpers */
2191#define obj_to_ggtt(obj) \ 2257#define obj_to_ggtt(obj) \
@@ -2225,46 +2291,56 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2225} 2291}
2226 2292
2227/* i915_gem_context.c */ 2293/* i915_gem_context.c */
2294#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
2228int __must_check i915_gem_context_init(struct drm_device *dev); 2295int __must_check i915_gem_context_init(struct drm_device *dev);
2229void i915_gem_context_fini(struct drm_device *dev); 2296void i915_gem_context_fini(struct drm_device *dev);
2297void i915_gem_context_reset(struct drm_device *dev);
2298int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
2299int i915_gem_context_enable(struct drm_i915_private *dev_priv);
2230void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 2300void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
2231int i915_switch_context(struct intel_ring_buffer *ring, 2301int i915_switch_context(struct intel_ring_buffer *ring,
2232 struct drm_file *file, int to_id); 2302 struct drm_file *file, struct i915_hw_context *to);
2303struct i915_hw_context *
2304i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
2233void i915_gem_context_free(struct kref *ctx_ref); 2305void i915_gem_context_free(struct kref *ctx_ref);
2234static inline void i915_gem_context_reference(struct i915_hw_context *ctx) 2306static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
2235{ 2307{
2236 kref_get(&ctx->ref); 2308 if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev))
2309 kref_get(&ctx->ref);
2237} 2310}
2238 2311
2239static inline void i915_gem_context_unreference(struct i915_hw_context *ctx) 2312static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
2240{ 2313{
2241 kref_put(&ctx->ref, i915_gem_context_free); 2314 if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev))
2315 kref_put(&ctx->ref, i915_gem_context_free);
2316}
2317
2318static inline bool i915_gem_context_is_default(const struct i915_hw_context *c)
2319{
2320 return c->id == DEFAULT_CONTEXT_ID;
2242} 2321}
2243 2322
2244struct i915_ctx_hang_stats * __must_check
2245i915_gem_context_get_hang_stats(struct drm_device *dev,
2246 struct drm_file *file,
2247 u32 id);
2248int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 2323int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2249 struct drm_file *file); 2324 struct drm_file *file);
2250int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 2325int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2251 struct drm_file *file); 2326 struct drm_file *file);
2252 2327
2253/* i915_gem_gtt.c */ 2328/* i915_gem_evict.c */
2254void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); 2329int __must_check i915_gem_evict_something(struct drm_device *dev,
2255void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 2330 struct i915_address_space *vm,
2256 struct drm_i915_gem_object *obj, 2331 int min_size,
2257 enum i915_cache_level cache_level); 2332 unsigned alignment,
2258void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 2333 unsigned cache_level,
2259 struct drm_i915_gem_object *obj); 2334 bool mappable,
2335 bool nonblock);
2336int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2337int i915_gem_evict_everything(struct drm_device *dev);
2260 2338
2339/* i915_gem_gtt.c */
2261void i915_check_and_clear_faults(struct drm_device *dev); 2340void i915_check_and_clear_faults(struct drm_device *dev);
2262void i915_gem_suspend_gtt_mappings(struct drm_device *dev); 2341void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
2263void i915_gem_restore_gtt_mappings(struct drm_device *dev); 2342void i915_gem_restore_gtt_mappings(struct drm_device *dev);
2264int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); 2343int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
2265void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
2266 enum i915_cache_level cache_level);
2267void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
2268void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); 2344void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
2269void i915_gem_init_global_gtt(struct drm_device *dev); 2345void i915_gem_init_global_gtt(struct drm_device *dev);
2270void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, 2346void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
@@ -2275,18 +2351,64 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
2275 if (INTEL_INFO(dev)->gen < 6) 2351 if (INTEL_INFO(dev)->gen < 6)
2276 intel_gtt_chipset_flush(); 2352 intel_gtt_chipset_flush();
2277} 2353}
2354int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
2355static inline bool intel_enable_ppgtt(struct drm_device *dev, bool full)
2356{
2357 if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
2358 return false;
2278 2359
2360 if (i915.enable_ppgtt == 1 && full)
2361 return false;
2279 2362
2280/* i915_gem_evict.c */ 2363#ifdef CONFIG_INTEL_IOMMU
2281int __must_check i915_gem_evict_something(struct drm_device *dev, 2364 /* Disable ppgtt on SNB if VT-d is on. */
2282 struct i915_address_space *vm, 2365 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
2283 int min_size, 2366 DRM_INFO("Disabling PPGTT because VT-d is on\n");
2284 unsigned alignment, 2367 return false;
2285 unsigned cache_level, 2368 }
2286 bool mappable, 2369#endif
2287 bool nonblock); 2370
2288int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 2371 if (full)
2289int i915_gem_evict_everything(struct drm_device *dev); 2372 return HAS_PPGTT(dev);
2373 else
2374 return HAS_ALIASING_PPGTT(dev);
2375}
2376
2377static inline void ppgtt_release(struct kref *kref)
2378{
2379 struct i915_hw_ppgtt *ppgtt = container_of(kref, struct i915_hw_ppgtt, ref);
2380 struct drm_device *dev = ppgtt->base.dev;
2381 struct drm_i915_private *dev_priv = dev->dev_private;
2382 struct i915_address_space *vm = &ppgtt->base;
2383
2384 if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
2385 (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
2386 ppgtt->base.cleanup(&ppgtt->base);
2387 return;
2388 }
2389
2390 /*
2391 * Make sure vmas are unbound before we take down the drm_mm
2392 *
2393 * FIXME: Proper refcounting should take care of this, this shouldn't be
2394 * needed at all.
2395 */
2396 if (!list_empty(&vm->active_list)) {
2397 struct i915_vma *vma;
2398
2399 list_for_each_entry(vma, &vm->active_list, mm_list)
2400 if (WARN_ON(list_empty(&vma->vma_link) ||
2401 list_is_singular(&vma->vma_link)))
2402 break;
2403
2404 i915_gem_evict_vm(&ppgtt->base, true);
2405 } else {
2406 i915_gem_retire_requests(dev);
2407 i915_gem_evict_vm(&ppgtt->base, false);
2408 }
2409
2410 ppgtt->base.cleanup(&ppgtt->base);
2411}
2290 2412
2291/* i915_gem_stolen.c */ 2413/* i915_gem_stolen.c */
2292int i915_gem_init_stolen(struct drm_device *dev); 2414int i915_gem_init_stolen(struct drm_device *dev);
@@ -2566,4 +2688,31 @@ timespec_to_jiffies_timeout(const struct timespec *value)
2566 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 2688 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2567} 2689}
2568 2690
2691/*
2692 * If you need to wait X milliseconds between events A and B, but event B
2693 * doesn't happen exactly after event A, you record the timestamp (jiffies) of
2694 * when event A happened, then just before event B you call this function and
2695 * pass the timestamp as the first argument, and X as the second argument.
2696 */
2697static inline void
2698wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
2699{
2700 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
2701
2702 /*
2703 * Don't re-read the value of "jiffies" every time since it may change
2704 * behind our back and break the math.
2705 */
2706 tmp_jiffies = jiffies;
2707 target_jiffies = timestamp_jiffies +
2708 msecs_to_jiffies_timeout(to_wait_ms);
2709
2710 if (time_after(target_jiffies, tmp_jiffies)) {
2711 remaining_jiffies = target_jiffies - tmp_jiffies;
2712 while (remaining_jiffies)
2713 remaining_jiffies =
2714 schedule_timeout_uninterruptible(remaining_jiffies);
2715 }
2716}
2717
2569#endif 2718#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 00c836154725..a8a069f97c56 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -204,7 +204,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
204 pinned = 0; 204 pinned = 0;
205 mutex_lock(&dev->struct_mutex); 205 mutex_lock(&dev->struct_mutex);
206 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 206 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
207 if (obj->pin_count) 207 if (i915_gem_obj_is_pinned(obj))
208 pinned += i915_gem_obj_ggtt_size(obj); 208 pinned += i915_gem_obj_ggtt_size(obj);
209 mutex_unlock(&dev->struct_mutex); 209 mutex_unlock(&dev->struct_mutex);
210 210
@@ -476,7 +476,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
476 476
477 mutex_unlock(&dev->struct_mutex); 477 mutex_unlock(&dev->struct_mutex);
478 478
479 if (likely(!i915_prefault_disable) && !prefaulted) { 479 if (likely(!i915.prefault_disable) && !prefaulted) {
480 ret = fault_in_multipages_writeable(user_data, remain); 480 ret = fault_in_multipages_writeable(user_data, remain);
481 /* Userspace is tricking us, but we've already clobbered 481 /* Userspace is tricking us, but we've already clobbered
482 * its pages with the prefault and promised to write the 482 * its pages with the prefault and promised to write the
@@ -651,7 +651,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
651 } 651 }
652 652
653out_unpin: 653out_unpin:
654 i915_gem_object_unpin(obj); 654 i915_gem_object_ggtt_unpin(obj);
655out: 655out:
656 return ret; 656 return ret;
657} 657}
@@ -868,7 +868,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
868 args->size)) 868 args->size))
869 return -EFAULT; 869 return -EFAULT;
870 870
871 if (likely(!i915_prefault_disable)) { 871 if (likely(!i915.prefault_disable)) {
872 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), 872 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
873 args->size); 873 args->size);
874 if (ret) 874 if (ret)
@@ -1420,7 +1420,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1420 /* Finally, remap it using the new GTT offset */ 1420 /* Finally, remap it using the new GTT offset */
1421 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 1421 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1422unpin: 1422unpin:
1423 i915_gem_object_unpin(obj); 1423 i915_gem_object_ggtt_unpin(obj);
1424unlock: 1424unlock:
1425 mutex_unlock(&dev->struct_mutex); 1425 mutex_unlock(&dev->struct_mutex);
1426out: 1426out:
@@ -1453,6 +1453,7 @@ out:
1453 ret = VM_FAULT_OOM; 1453 ret = VM_FAULT_OOM;
1454 break; 1454 break;
1455 case -ENOSPC: 1455 case -ENOSPC:
1456 case -EFAULT:
1456 ret = VM_FAULT_SIGBUS; 1457 ret = VM_FAULT_SIGBUS;
1457 break; 1458 break;
1458 default: 1459 default:
@@ -1618,7 +1619,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
1618 1619
1619 if (obj->madv != I915_MADV_WILLNEED) { 1620 if (obj->madv != I915_MADV_WILLNEED) {
1620 DRM_ERROR("Attempting to mmap a purgeable buffer\n"); 1621 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1621 ret = -EINVAL; 1622 ret = -EFAULT;
1622 goto out; 1623 goto out;
1623 } 1624 }
1624 1625
@@ -1972,7 +1973,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1972 1973
1973 if (obj->madv != I915_MADV_WILLNEED) { 1974 if (obj->madv != I915_MADV_WILLNEED) {
1974 DRM_ERROR("Attempting to obtain a purgeable object\n"); 1975 DRM_ERROR("Attempting to obtain a purgeable object\n");
1975 return -EINVAL; 1976 return -EFAULT;
1976 } 1977 }
1977 1978
1978 BUG_ON(obj->pages_pin_count); 1979 BUG_ON(obj->pages_pin_count);
@@ -2035,13 +2036,17 @@ static void
2035i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) 2036i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2036{ 2037{
2037 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2038 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2038 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; 2039 struct i915_address_space *vm;
2039 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); 2040 struct i915_vma *vma;
2040 2041
2041 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); 2042 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2042 BUG_ON(!obj->active); 2043 BUG_ON(!obj->active);
2043 2044
2044 list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list); 2045 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2046 vma = i915_gem_obj_to_vma(obj, vm);
2047 if (vma && !list_empty(&vma->mm_list))
2048 list_move_tail(&vma->mm_list, &vm->inactive_list);
2049 }
2045 2050
2046 list_del_init(&obj->ring_list); 2051 list_del_init(&obj->ring_list);
2047 obj->ring = NULL; 2052 obj->ring = NULL;
@@ -2237,125 +2242,47 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2237 spin_unlock(&file_priv->mm.lock); 2242 spin_unlock(&file_priv->mm.lock);
2238} 2243}
2239 2244
2240static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj, 2245static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2241 struct i915_address_space *vm) 2246 const struct i915_hw_context *ctx)
2242{
2243 if (acthd >= i915_gem_obj_offset(obj, vm) &&
2244 acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
2245 return true;
2246
2247 return false;
2248}
2249
2250static bool i915_head_inside_request(const u32 acthd_unmasked,
2251 const u32 request_start,
2252 const u32 request_end)
2253{
2254 const u32 acthd = acthd_unmasked & HEAD_ADDR;
2255
2256 if (request_start < request_end) {
2257 if (acthd >= request_start && acthd < request_end)
2258 return true;
2259 } else if (request_start > request_end) {
2260 if (acthd >= request_start || acthd < request_end)
2261 return true;
2262 }
2263
2264 return false;
2265}
2266
2267static struct i915_address_space *
2268request_to_vm(struct drm_i915_gem_request *request)
2269{ 2247{
2270 struct drm_i915_private *dev_priv = request->ring->dev->dev_private; 2248 unsigned long elapsed;
2271 struct i915_address_space *vm;
2272
2273 vm = &dev_priv->gtt.base;
2274
2275 return vm;
2276}
2277 2249
2278static bool i915_request_guilty(struct drm_i915_gem_request *request, 2250 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2279 const u32 acthd, bool *inside)
2280{
2281 /* There is a possibility that unmasked head address
2282 * pointing inside the ring, matches the batch_obj address range.
2283 * However this is extremely unlikely.
2284 */
2285 if (request->batch_obj) {
2286 if (i915_head_inside_object(acthd, request->batch_obj,
2287 request_to_vm(request))) {
2288 *inside = true;
2289 return true;
2290 }
2291 }
2292
2293 if (i915_head_inside_request(acthd, request->head, request->tail)) {
2294 *inside = false;
2295 return true;
2296 }
2297
2298 return false;
2299}
2300
2301static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
2302{
2303 const unsigned long elapsed = get_seconds() - hs->guilty_ts;
2304 2251
2305 if (hs->banned) 2252 if (ctx->hang_stats.banned)
2306 return true; 2253 return true;
2307 2254
2308 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) { 2255 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2309 DRM_ERROR("context hanging too fast, declaring banned!\n"); 2256 if (dev_priv->gpu_error.stop_rings == 0 &&
2257 i915_gem_context_is_default(ctx)) {
2258 DRM_ERROR("gpu hanging too fast, banning!\n");
2259 } else {
2260 DRM_DEBUG("context hanging too fast, banning!\n");
2261 }
2262
2310 return true; 2263 return true;
2311 } 2264 }
2312 2265
2313 return false; 2266 return false;
2314} 2267}
2315 2268
2316static void i915_set_reset_status(struct intel_ring_buffer *ring, 2269static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2317 struct drm_i915_gem_request *request, 2270 struct i915_hw_context *ctx,
2318 u32 acthd) 2271 const bool guilty)
2319{ 2272{
2320 struct i915_ctx_hang_stats *hs = NULL; 2273 struct i915_ctx_hang_stats *hs;
2321 bool inside, guilty;
2322 unsigned long offset = 0;
2323
2324 /* Innocent until proven guilty */
2325 guilty = false;
2326
2327 if (request->batch_obj)
2328 offset = i915_gem_obj_offset(request->batch_obj,
2329 request_to_vm(request));
2330 2274
2331 if (ring->hangcheck.action != HANGCHECK_WAIT && 2275 if (WARN_ON(!ctx))
2332 i915_request_guilty(request, acthd, &inside)) { 2276 return;
2333 DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2334 ring->name,
2335 inside ? "inside" : "flushing",
2336 offset,
2337 request->ctx ? request->ctx->id : 0,
2338 acthd);
2339 2277
2340 guilty = true; 2278 hs = &ctx->hang_stats;
2341 }
2342 2279
2343 /* If contexts are disabled or this is the default context, use 2280 if (guilty) {
2344 * file_priv->reset_state 2281 hs->banned = i915_context_is_banned(dev_priv, ctx);
2345 */ 2282 hs->batch_active++;
2346 if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID) 2283 hs->guilty_ts = get_seconds();
2347 hs = &request->ctx->hang_stats; 2284 } else {
2348 else if (request->file_priv) 2285 hs->batch_pending++;
2349 hs = &request->file_priv->hang_stats;
2350
2351 if (hs) {
2352 if (guilty) {
2353 hs->banned = i915_context_is_banned(hs);
2354 hs->batch_active++;
2355 hs->guilty_ts = get_seconds();
2356 } else {
2357 hs->batch_pending++;
2358 }
2359 } 2286 }
2360} 2287}
2361 2288
@@ -2370,19 +2297,39 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
2370 kfree(request); 2297 kfree(request);
2371} 2298}
2372 2299
2373static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, 2300static struct drm_i915_gem_request *
2374 struct intel_ring_buffer *ring) 2301i915_gem_find_first_non_complete(struct intel_ring_buffer *ring)
2375{ 2302{
2376 u32 completed_seqno = ring->get_seqno(ring, false);
2377 u32 acthd = intel_ring_get_active_head(ring);
2378 struct drm_i915_gem_request *request; 2303 struct drm_i915_gem_request *request;
2304 const u32 completed_seqno = ring->get_seqno(ring, false);
2379 2305
2380 list_for_each_entry(request, &ring->request_list, list) { 2306 list_for_each_entry(request, &ring->request_list, list) {
2381 if (i915_seqno_passed(completed_seqno, request->seqno)) 2307 if (i915_seqno_passed(completed_seqno, request->seqno))
2382 continue; 2308 continue;
2383 2309
2384 i915_set_reset_status(ring, request, acthd); 2310 return request;
2385 } 2311 }
2312
2313 return NULL;
2314}
2315
2316static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2317 struct intel_ring_buffer *ring)
2318{
2319 struct drm_i915_gem_request *request;
2320 bool ring_hung;
2321
2322 request = i915_gem_find_first_non_complete(ring);
2323
2324 if (request == NULL)
2325 return;
2326
2327 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2328
2329 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2330
2331 list_for_each_entry_continue(request, &ring->request_list, list)
2332 i915_set_reset_status(dev_priv, request->ctx, false);
2386} 2333}
2387 2334
2388static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, 2335static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
@@ -2456,6 +2403,8 @@ void i915_gem_reset(struct drm_device *dev)
2456 2403
2457 i915_gem_cleanup_ringbuffer(dev); 2404 i915_gem_cleanup_ringbuffer(dev);
2458 2405
2406 i915_gem_context_reset(dev);
2407
2459 i915_gem_restore_fences(dev); 2408 i915_gem_restore_fences(dev);
2460} 2409}
2461 2410
@@ -2474,6 +2423,24 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2474 2423
2475 seqno = ring->get_seqno(ring, true); 2424 seqno = ring->get_seqno(ring, true);
2476 2425
2426 /* Move any buffers on the active list that are no longer referenced
2427 * by the ringbuffer to the flushing/inactive lists as appropriate,
2428 * before we free the context associated with the requests.
2429 */
2430 while (!list_empty(&ring->active_list)) {
2431 struct drm_i915_gem_object *obj;
2432
2433 obj = list_first_entry(&ring->active_list,
2434 struct drm_i915_gem_object,
2435 ring_list);
2436
2437 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2438 break;
2439
2440 i915_gem_object_move_to_inactive(obj);
2441 }
2442
2443
2477 while (!list_empty(&ring->request_list)) { 2444 while (!list_empty(&ring->request_list)) {
2478 struct drm_i915_gem_request *request; 2445 struct drm_i915_gem_request *request;
2479 2446
@@ -2495,22 +2462,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2495 i915_gem_free_request(request); 2462 i915_gem_free_request(request);
2496 } 2463 }
2497 2464
2498 /* Move any buffers on the active list that are no longer referenced
2499 * by the ringbuffer to the flushing/inactive lists as appropriate.
2500 */
2501 while (!list_empty(&ring->active_list)) {
2502 struct drm_i915_gem_object *obj;
2503
2504 obj = list_first_entry(&ring->active_list,
2505 struct drm_i915_gem_object,
2506 ring_list);
2507
2508 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2509 break;
2510
2511 i915_gem_object_move_to_inactive(obj);
2512 }
2513
2514 if (unlikely(ring->trace_irq_seqno && 2465 if (unlikely(ring->trace_irq_seqno &&
2515 i915_seqno_passed(seqno, ring->trace_irq_seqno))) { 2466 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2516 ring->irq_put(ring); 2467 ring->irq_put(ring);
@@ -2753,9 +2704,6 @@ int i915_vma_unbind(struct i915_vma *vma)
2753 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 2704 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2754 int ret; 2705 int ret;
2755 2706
2756 /* For now we only ever use 1 vma per object */
2757 WARN_ON(!list_is_singular(&obj->vma_list));
2758
2759 if (list_empty(&vma->vma_link)) 2707 if (list_empty(&vma->vma_link))
2760 return 0; 2708 return 0;
2761 2709
@@ -2765,7 +2713,7 @@ int i915_vma_unbind(struct i915_vma *vma)
2765 return 0; 2713 return 0;
2766 } 2714 }
2767 2715
2768 if (obj->pin_count) 2716 if (vma->pin_count)
2769 return -EBUSY; 2717 return -EBUSY;
2770 2718
2771 BUG_ON(obj->pages == NULL); 2719 BUG_ON(obj->pages == NULL);
@@ -2787,12 +2735,8 @@ int i915_vma_unbind(struct i915_vma *vma)
2787 2735
2788 trace_i915_vma_unbind(vma); 2736 trace_i915_vma_unbind(vma);
2789 2737
2790 if (obj->has_global_gtt_mapping) 2738 vma->unbind_vma(vma);
2791 i915_gem_gtt_unbind_object(obj); 2739
2792 if (obj->has_aliasing_ppgtt_mapping) {
2793 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2794 obj->has_aliasing_ppgtt_mapping = 0;
2795 }
2796 i915_gem_gtt_finish_object(obj); 2740 i915_gem_gtt_finish_object(obj);
2797 2741
2798 list_del(&vma->mm_list); 2742 list_del(&vma->mm_list);
@@ -2829,7 +2773,7 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2829 if (!i915_gem_obj_ggtt_bound(obj)) 2773 if (!i915_gem_obj_ggtt_bound(obj))
2830 return 0; 2774 return 0;
2831 2775
2832 if (obj->pin_count) 2776 if (i915_gem_obj_to_ggtt(obj)->pin_count)
2833 return -EBUSY; 2777 return -EBUSY;
2834 2778
2835 BUG_ON(obj->pages == NULL); 2779 BUG_ON(obj->pages == NULL);
@@ -2845,7 +2789,7 @@ int i915_gpu_idle(struct drm_device *dev)
2845 2789
2846 /* Flush everything onto the inactive list. */ 2790 /* Flush everything onto the inactive list. */
2847 for_each_ring(ring, dev_priv, i) { 2791 for_each_ring(ring, dev_priv, i) {
2848 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID); 2792 ret = i915_switch_context(ring, NULL, ring->default_context);
2849 if (ret) 2793 if (ret)
2850 return ret; 2794 return ret;
2851 2795
@@ -3312,17 +3256,12 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3312 3256
3313 i915_gem_object_pin_pages(obj); 3257 i915_gem_object_pin_pages(obj);
3314 3258
3315 BUG_ON(!i915_is_ggtt(vm));
3316
3317 vma = i915_gem_obj_lookup_or_create_vma(obj, vm); 3259 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3318 if (IS_ERR(vma)) { 3260 if (IS_ERR(vma)) {
3319 ret = PTR_ERR(vma); 3261 ret = PTR_ERR(vma);
3320 goto err_unpin; 3262 goto err_unpin;
3321 } 3263 }
3322 3264
3323 /* For now we only ever use 1 vma per object */
3324 WARN_ON(!list_is_singular(&obj->vma_list));
3325
3326search_free: 3265search_free:
3327 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, 3266 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3328 size, alignment, 3267 size, alignment,
@@ -3528,14 +3467,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3528 enum i915_cache_level cache_level) 3467 enum i915_cache_level cache_level)
3529{ 3468{
3530 struct drm_device *dev = obj->base.dev; 3469 struct drm_device *dev = obj->base.dev;
3531 drm_i915_private_t *dev_priv = dev->dev_private;
3532 struct i915_vma *vma; 3470 struct i915_vma *vma;
3533 int ret; 3471 int ret;
3534 3472
3535 if (obj->cache_level == cache_level) 3473 if (obj->cache_level == cache_level)
3536 return 0; 3474 return 0;
3537 3475
3538 if (obj->pin_count) { 3476 if (i915_gem_obj_is_pinned(obj)) {
3539 DRM_DEBUG("can not change the cache level of pinned objects\n"); 3477 DRM_DEBUG("can not change the cache level of pinned objects\n");
3540 return -EBUSY; 3478 return -EBUSY;
3541 } 3479 }
@@ -3567,11 +3505,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3567 return ret; 3505 return ret;
3568 } 3506 }
3569 3507
3570 if (obj->has_global_gtt_mapping) 3508 list_for_each_entry(vma, &obj->vma_list, vma_link)
3571 i915_gem_gtt_bind_object(obj, cache_level); 3509 vma->bind_vma(vma, cache_level, 0);
3572 if (obj->has_aliasing_ppgtt_mapping)
3573 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3574 obj, cache_level);
3575 } 3510 }
3576 3511
3577 list_for_each_entry(vma, &obj->vma_list, vma_link) 3512 list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -3695,7 +3630,7 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
3695 * subtracting the potential reference by the user, any pin_count 3630 * subtracting the potential reference by the user, any pin_count
3696 * remains, it must be due to another use by the display engine. 3631 * remains, it must be due to another use by the display engine.
3697 */ 3632 */
3698 return obj->pin_count - !!obj->user_pin_count; 3633 return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
3699} 3634}
3700 3635
3701/* 3636/*
@@ -3769,7 +3704,7 @@ err_unpin_display:
3769void 3704void
3770i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj) 3705i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3771{ 3706{
3772 i915_gem_object_unpin(obj); 3707 i915_gem_object_ggtt_unpin(obj);
3773 obj->pin_display = is_pin_display(obj); 3708 obj->pin_display = is_pin_display(obj);
3774} 3709}
3775 3710
@@ -3899,21 +3834,22 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3899 bool map_and_fenceable, 3834 bool map_and_fenceable,
3900 bool nonblocking) 3835 bool nonblocking)
3901{ 3836{
3837 const u32 flags = map_and_fenceable ? GLOBAL_BIND : 0;
3902 struct i915_vma *vma; 3838 struct i915_vma *vma;
3903 int ret; 3839 int ret;
3904 3840
3905 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3906 return -EBUSY;
3907
3908 WARN_ON(map_and_fenceable && !i915_is_ggtt(vm)); 3841 WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3909 3842
3910 vma = i915_gem_obj_to_vma(obj, vm); 3843 vma = i915_gem_obj_to_vma(obj, vm);
3911 3844
3912 if (vma) { 3845 if (vma) {
3846 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3847 return -EBUSY;
3848
3913 if ((alignment && 3849 if ((alignment &&
3914 vma->node.start & (alignment - 1)) || 3850 vma->node.start & (alignment - 1)) ||
3915 (map_and_fenceable && !obj->map_and_fenceable)) { 3851 (map_and_fenceable && !obj->map_and_fenceable)) {
3916 WARN(obj->pin_count, 3852 WARN(vma->pin_count,
3917 "bo is already pinned with incorrect alignment:" 3853 "bo is already pinned with incorrect alignment:"
3918 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," 3854 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3919 " obj->map_and_fenceable=%d\n", 3855 " obj->map_and_fenceable=%d\n",
@@ -3927,34 +3863,34 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3927 } 3863 }
3928 3864
3929 if (!i915_gem_obj_bound(obj, vm)) { 3865 if (!i915_gem_obj_bound(obj, vm)) {
3930 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3931
3932 ret = i915_gem_object_bind_to_vm(obj, vm, alignment, 3866 ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3933 map_and_fenceable, 3867 map_and_fenceable,
3934 nonblocking); 3868 nonblocking);
3935 if (ret) 3869 if (ret)
3936 return ret; 3870 return ret;
3937 3871
3938 if (!dev_priv->mm.aliasing_ppgtt)
3939 i915_gem_gtt_bind_object(obj, obj->cache_level);
3940 } 3872 }
3941 3873
3942 if (!obj->has_global_gtt_mapping && map_and_fenceable) 3874 vma = i915_gem_obj_to_vma(obj, vm);
3943 i915_gem_gtt_bind_object(obj, obj->cache_level);
3944 3875
3945 obj->pin_count++; 3876 vma->bind_vma(vma, obj->cache_level, flags);
3877
3878 i915_gem_obj_to_vma(obj, vm)->pin_count++;
3946 obj->pin_mappable |= map_and_fenceable; 3879 obj->pin_mappable |= map_and_fenceable;
3947 3880
3948 return 0; 3881 return 0;
3949} 3882}
3950 3883
3951void 3884void
3952i915_gem_object_unpin(struct drm_i915_gem_object *obj) 3885i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
3953{ 3886{
3954 BUG_ON(obj->pin_count == 0); 3887 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3955 BUG_ON(!i915_gem_obj_bound_any(obj)); 3888
3889 BUG_ON(!vma);
3890 BUG_ON(vma->pin_count == 0);
3891 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
3956 3892
3957 if (--obj->pin_count == 0) 3893 if (--vma->pin_count == 0)
3958 obj->pin_mappable = false; 3894 obj->pin_mappable = false;
3959} 3895}
3960 3896
@@ -3966,6 +3902,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3966 struct drm_i915_gem_object *obj; 3902 struct drm_i915_gem_object *obj;
3967 int ret; 3903 int ret;
3968 3904
3905 if (INTEL_INFO(dev)->gen >= 6)
3906 return -ENODEV;
3907
3969 ret = i915_mutex_lock_interruptible(dev); 3908 ret = i915_mutex_lock_interruptible(dev);
3970 if (ret) 3909 if (ret)
3971 return ret; 3910 return ret;
@@ -3978,7 +3917,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3978 3917
3979 if (obj->madv != I915_MADV_WILLNEED) { 3918 if (obj->madv != I915_MADV_WILLNEED) {
3980 DRM_ERROR("Attempting to pin a purgeable buffer\n"); 3919 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3981 ret = -EINVAL; 3920 ret = -EFAULT;
3982 goto out; 3921 goto out;
3983 } 3922 }
3984 3923
@@ -4038,7 +3977,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4038 obj->user_pin_count--; 3977 obj->user_pin_count--;
4039 if (obj->user_pin_count == 0) { 3978 if (obj->user_pin_count == 0) {
4040 obj->pin_filp = NULL; 3979 obj->pin_filp = NULL;
4041 i915_gem_object_unpin(obj); 3980 i915_gem_object_ggtt_unpin(obj);
4042 } 3981 }
4043 3982
4044out: 3983out:
@@ -4118,7 +4057,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4118 goto unlock; 4057 goto unlock;
4119 } 4058 }
4120 4059
4121 if (obj->pin_count) { 4060 if (i915_gem_obj_is_pinned(obj)) {
4122 ret = -EINVAL; 4061 ret = -EINVAL;
4123 goto out; 4062 goto out;
4124 } 4063 }
@@ -4229,12 +4168,11 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4229 if (obj->phys_obj) 4168 if (obj->phys_obj)
4230 i915_gem_detach_phys_object(dev, obj); 4169 i915_gem_detach_phys_object(dev, obj);
4231 4170
4232 obj->pin_count = 0;
4233 /* NB: 0 or 1 elements */
4234 WARN_ON(!list_empty(&obj->vma_list) &&
4235 !list_is_singular(&obj->vma_list));
4236 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 4171 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4237 int ret = i915_vma_unbind(vma); 4172 int ret;
4173
4174 vma->pin_count = 0;
4175 ret = i915_vma_unbind(vma);
4238 if (WARN_ON(ret == -ERESTARTSYS)) { 4176 if (WARN_ON(ret == -ERESTARTSYS)) {
4239 bool was_interruptible; 4177 bool was_interruptible;
4240 4178
@@ -4283,41 +4221,6 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4283 return NULL; 4221 return NULL;
4284} 4222}
4285 4223
4286static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
4287 struct i915_address_space *vm)
4288{
4289 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4290 if (vma == NULL)
4291 return ERR_PTR(-ENOMEM);
4292
4293 INIT_LIST_HEAD(&vma->vma_link);
4294 INIT_LIST_HEAD(&vma->mm_list);
4295 INIT_LIST_HEAD(&vma->exec_list);
4296 vma->vm = vm;
4297 vma->obj = obj;
4298
4299 /* Keep GGTT vmas first to make debug easier */
4300 if (i915_is_ggtt(vm))
4301 list_add(&vma->vma_link, &obj->vma_list);
4302 else
4303 list_add_tail(&vma->vma_link, &obj->vma_list);
4304
4305 return vma;
4306}
4307
4308struct i915_vma *
4309i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4310 struct i915_address_space *vm)
4311{
4312 struct i915_vma *vma;
4313
4314 vma = i915_gem_obj_to_vma(obj, vm);
4315 if (!vma)
4316 vma = __i915_gem_vma_create(obj, vm);
4317
4318 return vma;
4319}
4320
4321void i915_gem_vma_destroy(struct i915_vma *vma) 4224void i915_gem_vma_destroy(struct i915_vma *vma)
4322{ 4225{
4323 WARN_ON(vma->node.allocated); 4226 WARN_ON(vma->node.allocated);
@@ -4508,9 +4411,15 @@ i915_gem_init_hw(struct drm_device *dev)
4508 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); 4411 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4509 4412
4510 if (HAS_PCH_NOP(dev)) { 4413 if (HAS_PCH_NOP(dev)) {
4511 u32 temp = I915_READ(GEN7_MSG_CTL); 4414 if (IS_IVYBRIDGE(dev)) {
4512 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); 4415 u32 temp = I915_READ(GEN7_MSG_CTL);
4513 I915_WRITE(GEN7_MSG_CTL, temp); 4416 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4417 I915_WRITE(GEN7_MSG_CTL, temp);
4418 } else if (INTEL_INFO(dev)->gen >= 7) {
4419 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4420 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4421 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4422 }
4514 } 4423 }
4515 4424
4516 i915_gem_init_swizzling(dev); 4425 i915_gem_init_swizzling(dev);
@@ -4523,25 +4432,23 @@ i915_gem_init_hw(struct drm_device *dev)
4523 i915_gem_l3_remap(&dev_priv->ring[RCS], i); 4432 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4524 4433
4525 /* 4434 /*
4526 * XXX: There was some w/a described somewhere suggesting loading 4435 * XXX: Contexts should only be initialized once. Doing a switch to the
4527 * contexts before PPGTT. 4436 * default context switch however is something we'd like to do after
4437 * reset or thaw (the latter may not actually be necessary for HW, but
4438 * goes with our code better). Context switching requires rings (for
4439 * the do_switch), but before enabling PPGTT. So don't move this.
4528 */ 4440 */
4529 ret = i915_gem_context_init(dev); 4441 ret = i915_gem_context_enable(dev_priv);
4530 if (ret) { 4442 if (ret) {
4531 i915_gem_cleanup_ringbuffer(dev); 4443 DRM_ERROR("Context enable failed %d\n", ret);
4532 DRM_ERROR("Context initialization failed %d\n", ret); 4444 goto err_out;
4533 return ret;
4534 }
4535
4536 if (dev_priv->mm.aliasing_ppgtt) {
4537 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4538 if (ret) {
4539 i915_gem_cleanup_aliasing_ppgtt(dev);
4540 DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4541 }
4542 } 4445 }
4543 4446
4544 return 0; 4447 return 0;
4448
4449err_out:
4450 i915_gem_cleanup_ringbuffer(dev);
4451 return ret;
4545} 4452}
4546 4453
4547int i915_gem_init(struct drm_device *dev) 4454int i915_gem_init(struct drm_device *dev)
@@ -4560,10 +4467,18 @@ int i915_gem_init(struct drm_device *dev)
4560 4467
4561 i915_gem_init_global_gtt(dev); 4468 i915_gem_init_global_gtt(dev);
4562 4469
4470 ret = i915_gem_context_init(dev);
4471 if (ret) {
4472 mutex_unlock(&dev->struct_mutex);
4473 return ret;
4474 }
4475
4563 ret = i915_gem_init_hw(dev); 4476 ret = i915_gem_init_hw(dev);
4564 mutex_unlock(&dev->struct_mutex); 4477 mutex_unlock(&dev->struct_mutex);
4565 if (ret) { 4478 if (ret) {
4566 i915_gem_cleanup_aliasing_ppgtt(dev); 4479 WARN_ON(dev_priv->mm.aliasing_ppgtt);
4480 i915_gem_context_fini(dev);
4481 drm_mm_takedown(&dev_priv->gtt.base.mm);
4567 return ret; 4482 return ret;
4568 } 4483 }
4569 4484
@@ -4658,14 +4573,16 @@ init_ring_lists(struct intel_ring_buffer *ring)
4658 INIT_LIST_HEAD(&ring->request_list); 4573 INIT_LIST_HEAD(&ring->request_list);
4659} 4574}
4660 4575
4661static void i915_init_vm(struct drm_i915_private *dev_priv, 4576void i915_init_vm(struct drm_i915_private *dev_priv,
4662 struct i915_address_space *vm) 4577 struct i915_address_space *vm)
4663{ 4578{
4579 if (!i915_is_ggtt(vm))
4580 drm_mm_init(&vm->mm, vm->start, vm->total);
4664 vm->dev = dev_priv->dev; 4581 vm->dev = dev_priv->dev;
4665 INIT_LIST_HEAD(&vm->active_list); 4582 INIT_LIST_HEAD(&vm->active_list);
4666 INIT_LIST_HEAD(&vm->inactive_list); 4583 INIT_LIST_HEAD(&vm->inactive_list);
4667 INIT_LIST_HEAD(&vm->global_link); 4584 INIT_LIST_HEAD(&vm->global_link);
4668 list_add(&vm->global_link, &dev_priv->vm_list); 4585 list_add_tail(&vm->global_link, &dev_priv->vm_list);
4669} 4586}
4670 4587
4671void 4588void
@@ -4950,6 +4867,7 @@ i915_gem_file_idle_work_handler(struct work_struct *work)
4950int i915_gem_open(struct drm_device *dev, struct drm_file *file) 4867int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4951{ 4868{
4952 struct drm_i915_file_private *file_priv; 4869 struct drm_i915_file_private *file_priv;
4870 int ret;
4953 4871
4954 DRM_DEBUG_DRIVER("\n"); 4872 DRM_DEBUG_DRIVER("\n");
4955 4873
@@ -4965,9 +4883,11 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4965 INIT_DELAYED_WORK(&file_priv->mm.idle_work, 4883 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4966 i915_gem_file_idle_work_handler); 4884 i915_gem_file_idle_work_handler);
4967 4885
4968 idr_init(&file_priv->context_idr); 4886 ret = i915_gem_context_open(dev, file);
4887 if (ret)
4888 kfree(file_priv);
4969 4889
4970 return 0; 4890 return ret;
4971} 4891}
4972 4892
4973static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) 4893static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
@@ -5014,7 +4934,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
5014 if (obj->active) 4934 if (obj->active)
5015 continue; 4935 continue;
5016 4936
5017 if (obj->pin_count == 0 && obj->pages_pin_count == 0) 4937 if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
5018 count += obj->base.size >> PAGE_SHIFT; 4938 count += obj->base.size >> PAGE_SHIFT;
5019 } 4939 }
5020 4940
@@ -5031,7 +4951,8 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5031 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 4951 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5032 struct i915_vma *vma; 4952 struct i915_vma *vma;
5033 4953
5034 if (vm == &dev_priv->mm.aliasing_ppgtt->base) 4954 if (!dev_priv->mm.aliasing_ppgtt ||
4955 vm == &dev_priv->mm.aliasing_ppgtt->base)
5035 vm = &dev_priv->gtt.base; 4956 vm = &dev_priv->gtt.base;
5036 4957
5037 BUG_ON(list_empty(&o->vma_list)); 4958 BUG_ON(list_empty(&o->vma_list));
@@ -5072,7 +4993,8 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5072 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 4993 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5073 struct i915_vma *vma; 4994 struct i915_vma *vma;
5074 4995
5075 if (vm == &dev_priv->mm.aliasing_ppgtt->base) 4996 if (!dev_priv->mm.aliasing_ppgtt ||
4997 vm == &dev_priv->mm.aliasing_ppgtt->base)
5076 vm = &dev_priv->gtt.base; 4998 vm = &dev_priv->gtt.base;
5077 4999
5078 BUG_ON(list_empty(&o->vma_list)); 5000 BUG_ON(list_empty(&o->vma_list));
@@ -5127,7 +5049,7 @@ struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5127 return NULL; 5049 return NULL;
5128 5050
5129 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link); 5051 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5130 if (WARN_ON(vma->vm != obj_to_ggtt(obj))) 5052 if (vma->vm != obj_to_ggtt(obj))
5131 return NULL; 5053 return NULL;
5132 5054
5133 return vma; 5055 return vma;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index e08acaba5402..19fd3629795c 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -93,11 +93,19 @@
93 * I've seen in a spec to date, and that was a workaround for a non-shipping 93 * I've seen in a spec to date, and that was a workaround for a non-shipping
94 * part. It should be safe to decrease this, but it's more future proof as is. 94 * part. It should be safe to decrease this, but it's more future proof as is.
95 */ 95 */
96#define CONTEXT_ALIGN (64<<10) 96#define GEN6_CONTEXT_ALIGN (64<<10)
97#define GEN7_CONTEXT_ALIGN 4096
97 98
98static struct i915_hw_context * 99static int do_switch(struct intel_ring_buffer *ring,
99i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 100 struct i915_hw_context *to);
100static int do_switch(struct i915_hw_context *to); 101
102static size_t get_context_alignment(struct drm_device *dev)
103{
104 if (IS_GEN6(dev))
105 return GEN6_CONTEXT_ALIGN;
106
107 return GEN7_CONTEXT_ALIGN;
108}
101 109
102static int get_context_size(struct drm_device *dev) 110static int get_context_size(struct drm_device *dev)
103{ 111{
@@ -131,14 +139,43 @@ void i915_gem_context_free(struct kref *ctx_ref)
131{ 139{
132 struct i915_hw_context *ctx = container_of(ctx_ref, 140 struct i915_hw_context *ctx = container_of(ctx_ref,
133 typeof(*ctx), ref); 141 typeof(*ctx), ref);
142 struct i915_hw_ppgtt *ppgtt = NULL;
134 143
135 list_del(&ctx->link); 144 /* We refcount even the aliasing PPGTT to keep the code symmetric */
145 if (USES_PPGTT(ctx->obj->base.dev))
146 ppgtt = ctx_to_ppgtt(ctx);
147
148 /* XXX: Free up the object before tearing down the address space, in
149 * case we're bound in the PPGTT */
136 drm_gem_object_unreference(&ctx->obj->base); 150 drm_gem_object_unreference(&ctx->obj->base);
151
152 if (ppgtt)
153 kref_put(&ppgtt->ref, ppgtt_release);
154 list_del(&ctx->link);
137 kfree(ctx); 155 kfree(ctx);
138} 156}
139 157
158static struct i915_hw_ppgtt *
159create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx)
160{
161 struct i915_hw_ppgtt *ppgtt;
162 int ret;
163
164 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
165 if (!ppgtt)
166 return ERR_PTR(-ENOMEM);
167
168 ret = i915_gem_init_ppgtt(dev, ppgtt);
169 if (ret) {
170 kfree(ppgtt);
171 return ERR_PTR(ret);
172 }
173
174 return ppgtt;
175}
176
140static struct i915_hw_context * 177static struct i915_hw_context *
141create_hw_context(struct drm_device *dev, 178__create_hw_context(struct drm_device *dev,
142 struct drm_i915_file_private *file_priv) 179 struct drm_i915_file_private *file_priv)
143{ 180{
144 struct drm_i915_private *dev_priv = dev->dev_private; 181 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -166,18 +203,13 @@ create_hw_context(struct drm_device *dev,
166 goto err_out; 203 goto err_out;
167 } 204 }
168 205
169 /* The ring associated with the context object is handled by the normal
170 * object tracking code. We give an initial ring value simple to pass an
171 * assertion in the context switch code.
172 */
173 ctx->ring = &dev_priv->ring[RCS];
174 list_add_tail(&ctx->link, &dev_priv->context_list); 206 list_add_tail(&ctx->link, &dev_priv->context_list);
175 207
176 /* Default context will never have a file_priv */ 208 /* Default context will never have a file_priv */
177 if (file_priv == NULL) 209 if (file_priv == NULL)
178 return ctx; 210 return ctx;
179 211
180 ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0, 212 ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID, 0,
181 GFP_KERNEL); 213 GFP_KERNEL);
182 if (ret < 0) 214 if (ret < 0)
183 goto err_out; 215 goto err_out;
@@ -196,67 +228,138 @@ err_out:
196 return ERR_PTR(ret); 228 return ERR_PTR(ret);
197} 229}
198 230
199static inline bool is_default_context(struct i915_hw_context *ctx)
200{
201 return (ctx == ctx->ring->default_context);
202}
203
204/** 231/**
205 * The default context needs to exist per ring that uses contexts. It stores the 232 * The default context needs to exist per ring that uses contexts. It stores the
206 * context state of the GPU for applications that don't utilize HW contexts, as 233 * context state of the GPU for applications that don't utilize HW contexts, as
207 * well as an idle case. 234 * well as an idle case.
208 */ 235 */
209static int create_default_context(struct drm_i915_private *dev_priv) 236static struct i915_hw_context *
237i915_gem_create_context(struct drm_device *dev,
238 struct drm_i915_file_private *file_priv,
239 bool create_vm)
210{ 240{
241 const bool is_global_default_ctx = file_priv == NULL;
242 struct drm_i915_private *dev_priv = dev->dev_private;
211 struct i915_hw_context *ctx; 243 struct i915_hw_context *ctx;
212 int ret; 244 int ret = 0;
213 245
214 BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 246 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
215 247
216 ctx = create_hw_context(dev_priv->dev, NULL); 248 ctx = __create_hw_context(dev, file_priv);
217 if (IS_ERR(ctx)) 249 if (IS_ERR(ctx))
218 return PTR_ERR(ctx); 250 return ctx;
219
220 /* We may need to do things with the shrinker which require us to
221 * immediately switch back to the default context. This can cause a
222 * problem as pinning the default context also requires GTT space which
223 * may not be available. To avoid this we always pin the
224 * default context.
225 */
226 ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
227 if (ret) {
228 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
229 goto err_destroy;
230 }
231 251
232 ret = do_switch(ctx); 252 if (is_global_default_ctx) {
233 if (ret) { 253 /* We may need to do things with the shrinker which
234 DRM_DEBUG_DRIVER("Switch failed %d\n", ret); 254 * require us to immediately switch back to the default
235 goto err_unpin; 255 * context. This can cause a problem as pinning the
256 * default context also requires GTT space which may not
257 * be available. To avoid this we always pin the default
258 * context.
259 */
260 ret = i915_gem_obj_ggtt_pin(ctx->obj,
261 get_context_alignment(dev),
262 false, false);
263 if (ret) {
264 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
265 goto err_destroy;
266 }
236 } 267 }
237 268
238 dev_priv->ring[RCS].default_context = ctx; 269 if (create_vm) {
270 struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx);
271
272 if (IS_ERR_OR_NULL(ppgtt)) {
273 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
274 PTR_ERR(ppgtt));
275 ret = PTR_ERR(ppgtt);
276 goto err_unpin;
277 } else
278 ctx->vm = &ppgtt->base;
279
280 /* This case is reserved for the global default context and
281 * should only happen once. */
282 if (is_global_default_ctx) {
283 if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
284 ret = -EEXIST;
285 goto err_unpin;
286 }
287
288 dev_priv->mm.aliasing_ppgtt = ppgtt;
289 }
290 } else if (USES_PPGTT(dev)) {
291 /* For platforms which only have aliasing PPGTT, we fake the
292 * address space and refcounting. */
293 ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
294 kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
295 } else
296 ctx->vm = &dev_priv->gtt.base;
239 297
240 DRM_DEBUG_DRIVER("Default HW context loaded\n"); 298 return ctx;
241 return 0;
242 299
243err_unpin: 300err_unpin:
244 i915_gem_object_unpin(ctx->obj); 301 if (is_global_default_ctx)
302 i915_gem_object_ggtt_unpin(ctx->obj);
245err_destroy: 303err_destroy:
246 i915_gem_context_unreference(ctx); 304 i915_gem_context_unreference(ctx);
247 return ret; 305 return ERR_PTR(ret);
306}
307
308void i915_gem_context_reset(struct drm_device *dev)
309{
310 struct drm_i915_private *dev_priv = dev->dev_private;
311 struct intel_ring_buffer *ring;
312 int i;
313
314 if (!HAS_HW_CONTEXTS(dev))
315 return;
316
317 /* Prevent the hardware from restoring the last context (which hung) on
318 * the next switch */
319 for (i = 0; i < I915_NUM_RINGS; i++) {
320 struct i915_hw_context *dctx;
321 if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
322 continue;
323
324 /* Do a fake switch to the default context */
325 ring = &dev_priv->ring[i];
326 dctx = ring->default_context;
327 if (WARN_ON(!dctx))
328 continue;
329
330 if (!ring->last_context)
331 continue;
332
333 if (ring->last_context == dctx)
334 continue;
335
336 if (i == RCS) {
337 WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
338 get_context_alignment(dev),
339 false, false));
340 /* Fake a finish/inactive */
341 dctx->obj->base.write_domain = 0;
342 dctx->obj->active = 0;
343 }
344
345 i915_gem_context_unreference(ring->last_context);
346 i915_gem_context_reference(dctx);
347 ring->last_context = dctx;
348 }
248} 349}
249 350
250int i915_gem_context_init(struct drm_device *dev) 351int i915_gem_context_init(struct drm_device *dev)
251{ 352{
252 struct drm_i915_private *dev_priv = dev->dev_private; 353 struct drm_i915_private *dev_priv = dev->dev_private;
253 int ret; 354 struct intel_ring_buffer *ring;
355 int i;
254 356
255 if (!HAS_HW_CONTEXTS(dev)) 357 if (!HAS_HW_CONTEXTS(dev))
256 return 0; 358 return 0;
257 359
258 /* If called from reset, or thaw... we've been here already */ 360 /* Init should only be called once per module load. Eventually the
259 if (dev_priv->ring[RCS].default_context) 361 * restriction on the context_disabled check can be loosened. */
362 if (WARN_ON(dev_priv->ring[RCS].default_context))
260 return 0; 363 return 0;
261 364
262 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); 365 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
@@ -266,11 +369,23 @@ int i915_gem_context_init(struct drm_device *dev)
266 return -E2BIG; 369 return -E2BIG;
267 } 370 }
268 371
269 ret = create_default_context(dev_priv); 372 dev_priv->ring[RCS].default_context =
270 if (ret) { 373 i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
271 DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %d\n", 374
272 ret); 375 if (IS_ERR_OR_NULL(dev_priv->ring[RCS].default_context)) {
273 return ret; 376 DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %ld\n",
377 PTR_ERR(dev_priv->ring[RCS].default_context));
378 return PTR_ERR(dev_priv->ring[RCS].default_context);
379 }
380
381 for (i = RCS + 1; i < I915_NUM_RINGS; i++) {
382 if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
383 continue;
384
385 ring = &dev_priv->ring[i];
386
387 /* NB: RCS will hold a ref for all rings */
388 ring->default_context = dev_priv->ring[RCS].default_context;
274 } 389 }
275 390
276 DRM_DEBUG_DRIVER("HW context support initialized\n"); 391 DRM_DEBUG_DRIVER("HW context support initialized\n");
@@ -281,6 +396,7 @@ void i915_gem_context_fini(struct drm_device *dev)
281{ 396{
282 struct drm_i915_private *dev_priv = dev->dev_private; 397 struct drm_i915_private *dev_priv = dev->dev_private;
283 struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context; 398 struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
399 int i;
284 400
285 if (!HAS_HW_CONTEXTS(dev)) 401 if (!HAS_HW_CONTEXTS(dev))
286 return; 402 return;
@@ -300,59 +416,129 @@ void i915_gem_context_fini(struct drm_device *dev)
300 if (dev_priv->ring[RCS].last_context == dctx) { 416 if (dev_priv->ring[RCS].last_context == dctx) {
301 /* Fake switch to NULL context */ 417 /* Fake switch to NULL context */
302 WARN_ON(dctx->obj->active); 418 WARN_ON(dctx->obj->active);
303 i915_gem_object_unpin(dctx->obj); 419 i915_gem_object_ggtt_unpin(dctx->obj);
304 i915_gem_context_unreference(dctx); 420 i915_gem_context_unreference(dctx);
421 dev_priv->ring[RCS].last_context = NULL;
305 } 422 }
306 423
307 i915_gem_object_unpin(dctx->obj); 424 for (i = 0; i < I915_NUM_RINGS; i++) {
425 struct intel_ring_buffer *ring = &dev_priv->ring[i];
426 if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
427 continue;
428
429 if (ring->last_context)
430 i915_gem_context_unreference(ring->last_context);
431
432 ring->default_context = NULL;
433 ring->last_context = NULL;
434 }
435
436 i915_gem_object_ggtt_unpin(dctx->obj);
308 i915_gem_context_unreference(dctx); 437 i915_gem_context_unreference(dctx);
309 dev_priv->ring[RCS].default_context = NULL; 438 dev_priv->mm.aliasing_ppgtt = NULL;
310 dev_priv->ring[RCS].last_context = NULL; 439}
440
441int i915_gem_context_enable(struct drm_i915_private *dev_priv)
442{
443 struct intel_ring_buffer *ring;
444 int ret, i;
445
446 if (!HAS_HW_CONTEXTS(dev_priv->dev))
447 return 0;
448
449 /* This is the only place the aliasing PPGTT gets enabled, which means
450 * it has to happen before we bail on reset */
451 if (dev_priv->mm.aliasing_ppgtt) {
452 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
453 ppgtt->enable(ppgtt);
454 }
455
456 /* FIXME: We should make this work, even in reset */
457 if (i915_reset_in_progress(&dev_priv->gpu_error))
458 return 0;
459
460 BUG_ON(!dev_priv->ring[RCS].default_context);
461
462 for_each_ring(ring, dev_priv, i) {
463 ret = do_switch(ring, ring->default_context);
464 if (ret)
465 return ret;
466 }
467
468 return 0;
311} 469}
312 470
313static int context_idr_cleanup(int id, void *p, void *data) 471static int context_idr_cleanup(int id, void *p, void *data)
314{ 472{
315 struct i915_hw_context *ctx = p; 473 struct i915_hw_context *ctx = p;
316 474
317 BUG_ON(id == DEFAULT_CONTEXT_ID); 475 /* Ignore the default context because close will handle it */
476 if (i915_gem_context_is_default(ctx))
477 return 0;
318 478
319 i915_gem_context_unreference(ctx); 479 i915_gem_context_unreference(ctx);
320 return 0; 480 return 0;
321} 481}
322 482
323struct i915_ctx_hang_stats * 483int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
324i915_gem_context_get_hang_stats(struct drm_device *dev,
325 struct drm_file *file,
326 u32 id)
327{ 484{
328 struct drm_i915_file_private *file_priv = file->driver_priv; 485 struct drm_i915_file_private *file_priv = file->driver_priv;
329 struct i915_hw_context *ctx; 486 struct drm_i915_private *dev_priv = dev->dev_private;
330 487
331 if (id == DEFAULT_CONTEXT_ID) 488 if (!HAS_HW_CONTEXTS(dev)) {
332 return &file_priv->hang_stats; 489 /* Cheat for hang stats */
490 file_priv->private_default_ctx =
491 kzalloc(sizeof(struct i915_hw_context), GFP_KERNEL);
333 492
334 if (!HAS_HW_CONTEXTS(dev)) 493 if (file_priv->private_default_ctx == NULL)
335 return ERR_PTR(-ENOENT); 494 return -ENOMEM;
336 495
337 ctx = i915_gem_context_get(file->driver_priv, id); 496 file_priv->private_default_ctx->vm = &dev_priv->gtt.base;
338 if (ctx == NULL) 497 return 0;
339 return ERR_PTR(-ENOENT); 498 }
340 499
341 return &ctx->hang_stats; 500 idr_init(&file_priv->context_idr);
501
502 mutex_lock(&dev->struct_mutex);
503 file_priv->private_default_ctx =
504 i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
505 mutex_unlock(&dev->struct_mutex);
506
507 if (IS_ERR(file_priv->private_default_ctx)) {
508 idr_destroy(&file_priv->context_idr);
509 return PTR_ERR(file_priv->private_default_ctx);
510 }
511
512 return 0;
342} 513}
343 514
344void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) 515void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
345{ 516{
346 struct drm_i915_file_private *file_priv = file->driver_priv; 517 struct drm_i915_file_private *file_priv = file->driver_priv;
347 518
519 if (!HAS_HW_CONTEXTS(dev)) {
520 kfree(file_priv->private_default_ctx);
521 return;
522 }
523
348 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 524 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
525 i915_gem_context_unreference(file_priv->private_default_ctx);
349 idr_destroy(&file_priv->context_idr); 526 idr_destroy(&file_priv->context_idr);
350} 527}
351 528
352static struct i915_hw_context * 529struct i915_hw_context *
353i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) 530i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
354{ 531{
355 return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id); 532 struct i915_hw_context *ctx;
533
534 if (!HAS_HW_CONTEXTS(file_priv->dev_priv->dev))
535 return file_priv->private_default_ctx;
536
537 ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
538 if (!ctx)
539 return ERR_PTR(-ENOENT);
540
541 return ctx;
356} 542}
357 543
358static inline int 544static inline int
@@ -390,7 +576,10 @@ mi_set_context(struct intel_ring_buffer *ring,
390 MI_SAVE_EXT_STATE_EN | 576 MI_SAVE_EXT_STATE_EN |
391 MI_RESTORE_EXT_STATE_EN | 577 MI_RESTORE_EXT_STATE_EN |
392 hw_flags); 578 hw_flags);
393 /* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */ 579 /*
580 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
581 * WaMiSetContext_Hang:snb,ivb,vlv
582 */
394 intel_ring_emit(ring, MI_NOOP); 583 intel_ring_emit(ring, MI_NOOP);
395 584
396 if (IS_GEN7(ring->dev)) 585 if (IS_GEN7(ring->dev))
@@ -403,21 +592,31 @@ mi_set_context(struct intel_ring_buffer *ring,
403 return ret; 592 return ret;
404} 593}
405 594
406static int do_switch(struct i915_hw_context *to) 595static int do_switch(struct intel_ring_buffer *ring,
596 struct i915_hw_context *to)
407{ 597{
408 struct intel_ring_buffer *ring = to->ring; 598 struct drm_i915_private *dev_priv = ring->dev->dev_private;
409 struct i915_hw_context *from = ring->last_context; 599 struct i915_hw_context *from = ring->last_context;
600 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
410 u32 hw_flags = 0; 601 u32 hw_flags = 0;
411 int ret, i; 602 int ret, i;
412 603
413 BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0); 604 if (from != NULL && ring == &dev_priv->ring[RCS]) {
605 BUG_ON(from->obj == NULL);
606 BUG_ON(!i915_gem_obj_is_pinned(from->obj));
607 }
414 608
415 if (from == to && !to->remap_slice) 609 if (from == to && from->last_ring == ring && !to->remap_slice)
416 return 0; 610 return 0;
417 611
418 ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false); 612 /* Trying to pin first makes error handling easier. */
419 if (ret) 613 if (ring == &dev_priv->ring[RCS]) {
420 return ret; 614 ret = i915_gem_obj_ggtt_pin(to->obj,
615 get_context_alignment(ring->dev),
616 false, false);
617 if (ret)
618 return ret;
619 }
421 620
422 /* 621 /*
423 * Pin can switch back to the default context if we end up calling into 622 * Pin can switch back to the default context if we end up calling into
@@ -426,6 +625,18 @@ static int do_switch(struct i915_hw_context *to)
426 */ 625 */
427 from = ring->last_context; 626 from = ring->last_context;
428 627
628 if (USES_FULL_PPGTT(ring->dev)) {
629 ret = ppgtt->switch_mm(ppgtt, ring, false);
630 if (ret)
631 goto unpin_out;
632 }
633
634 if (ring != &dev_priv->ring[RCS]) {
635 if (from)
636 i915_gem_context_unreference(from);
637 goto done;
638 }
639
429 /* 640 /*
430 * Clear this page out of any CPU caches for coherent swap-in/out. Note 641 * Clear this page out of any CPU caches for coherent swap-in/out. Note
431 * that thanks to write = false in this call and us not setting any gpu 642 * that thanks to write = false in this call and us not setting any gpu
@@ -435,22 +646,21 @@ static int do_switch(struct i915_hw_context *to)
435 * XXX: We need a real interface to do this instead of trickery. 646 * XXX: We need a real interface to do this instead of trickery.
436 */ 647 */
437 ret = i915_gem_object_set_to_gtt_domain(to->obj, false); 648 ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
438 if (ret) { 649 if (ret)
439 i915_gem_object_unpin(to->obj); 650 goto unpin_out;
440 return ret;
441 }
442 651
443 if (!to->obj->has_global_gtt_mapping) 652 if (!to->obj->has_global_gtt_mapping) {
444 i915_gem_gtt_bind_object(to->obj, to->obj->cache_level); 653 struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
654 &dev_priv->gtt.base);
655 vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
656 }
445 657
446 if (!to->is_initialized || is_default_context(to)) 658 if (!to->is_initialized || i915_gem_context_is_default(to))
447 hw_flags |= MI_RESTORE_INHIBIT; 659 hw_flags |= MI_RESTORE_INHIBIT;
448 660
449 ret = mi_set_context(ring, to, hw_flags); 661 ret = mi_set_context(ring, to, hw_flags);
450 if (ret) { 662 if (ret)
451 i915_gem_object_unpin(to->obj); 663 goto unpin_out;
452 return ret;
453 }
454 664
455 for (i = 0; i < MAX_L3_SLICES; i++) { 665 for (i = 0; i < MAX_L3_SLICES; i++) {
456 if (!(to->remap_slice & (1<<i))) 666 if (!(to->remap_slice & (1<<i)))
@@ -484,15 +694,23 @@ static int do_switch(struct i915_hw_context *to)
484 BUG_ON(from->obj->ring != ring); 694 BUG_ON(from->obj->ring != ring);
485 695
486 /* obj is kept alive until the next request by its active ref */ 696 /* obj is kept alive until the next request by its active ref */
487 i915_gem_object_unpin(from->obj); 697 i915_gem_object_ggtt_unpin(from->obj);
488 i915_gem_context_unreference(from); 698 i915_gem_context_unreference(from);
489 } 699 }
490 700
701 to->is_initialized = true;
702
703done:
491 i915_gem_context_reference(to); 704 i915_gem_context_reference(to);
492 ring->last_context = to; 705 ring->last_context = to;
493 to->is_initialized = true; 706 to->last_ring = ring;
494 707
495 return 0; 708 return 0;
709
710unpin_out:
711 if (ring->id == RCS)
712 i915_gem_object_ggtt_unpin(to->obj);
713 return ret;
496} 714}
497 715
498/** 716/**
@@ -508,31 +726,19 @@ static int do_switch(struct i915_hw_context *to)
508 */ 726 */
509int i915_switch_context(struct intel_ring_buffer *ring, 727int i915_switch_context(struct intel_ring_buffer *ring,
510 struct drm_file *file, 728 struct drm_file *file,
511 int to_id) 729 struct i915_hw_context *to)
512{ 730{
513 struct drm_i915_private *dev_priv = ring->dev->dev_private; 731 struct drm_i915_private *dev_priv = ring->dev->dev_private;
514 struct i915_hw_context *to;
515
516 if (!HAS_HW_CONTEXTS(ring->dev))
517 return 0;
518 732
519 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 733 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
520 734
521 if (ring != &dev_priv->ring[RCS]) 735 BUG_ON(file && to == NULL);
522 return 0;
523
524 if (to_id == DEFAULT_CONTEXT_ID) {
525 to = ring->default_context;
526 } else {
527 if (file == NULL)
528 return -EINVAL;
529 736
530 to = i915_gem_context_get(file->driver_priv, to_id); 737 /* We have the fake context, but don't supports switching. */
531 if (to == NULL) 738 if (!HAS_HW_CONTEXTS(ring->dev))
532 return -ENOENT; 739 return 0;
533 }
534 740
535 return do_switch(to); 741 return do_switch(ring, to);
536} 742}
537 743
538int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 744int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -553,7 +759,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
553 if (ret) 759 if (ret)
554 return ret; 760 return ret;
555 761
556 ctx = create_hw_context(dev, file_priv); 762 ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
557 mutex_unlock(&dev->struct_mutex); 763 mutex_unlock(&dev->struct_mutex);
558 if (IS_ERR(ctx)) 764 if (IS_ERR(ctx))
559 return PTR_ERR(ctx); 765 return PTR_ERR(ctx);
@@ -575,14 +781,17 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
575 if (!(dev->driver->driver_features & DRIVER_GEM)) 781 if (!(dev->driver->driver_features & DRIVER_GEM))
576 return -ENODEV; 782 return -ENODEV;
577 783
784 if (args->ctx_id == DEFAULT_CONTEXT_ID)
785 return -ENOENT;
786
578 ret = i915_mutex_lock_interruptible(dev); 787 ret = i915_mutex_lock_interruptible(dev);
579 if (ret) 788 if (ret)
580 return ret; 789 return ret;
581 790
582 ctx = i915_gem_context_get(file_priv, args->ctx_id); 791 ctx = i915_gem_context_get(file_priv, args->ctx_id);
583 if (!ctx) { 792 if (IS_ERR(ctx)) {
584 mutex_unlock(&dev->struct_mutex); 793 mutex_unlock(&dev->struct_mutex);
585 return -ENOENT; 794 return PTR_ERR(ctx);
586 } 795 }
587 796
588 idr_remove(&ctx->file_priv->context_idr, ctx->id); 797 idr_remove(&ctx->file_priv->context_idr, ctx->id);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 2ca280f9ee53..5168d6a08054 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -36,7 +36,7 @@
36static bool 36static bool
37mark_free(struct i915_vma *vma, struct list_head *unwind) 37mark_free(struct i915_vma *vma, struct list_head *unwind)
38{ 38{
39 if (vma->obj->pin_count) 39 if (vma->pin_count)
40 return false; 40 return false;
41 41
42 if (WARN_ON(!list_empty(&vma->exec_list))) 42 if (WARN_ON(!list_empty(&vma->exec_list)))
@@ -46,6 +46,25 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
46 return drm_mm_scan_add_block(&vma->node); 46 return drm_mm_scan_add_block(&vma->node);
47} 47}
48 48
49/**
50 * i915_gem_evict_something - Evict vmas to make room for binding a new one
51 * @dev: drm_device
52 * @vm: address space to evict from
53 * @size: size of the desired free space
54 * @alignment: alignment constraint of the desired free space
55 * @cache_level: cache_level for the desired space
56 * @mappable: whether the free space must be mappable
57 * @nonblocking: whether evicting active objects is allowed or not
58 *
59 * This function will try to evict vmas until a free space satisfying the
60 * requirements is found. Callers must check first whether any such hole exists
61 * already before calling this function.
62 *
63 * This function is used by the object/vma binding code.
64 *
65 * To clarify: This is for freeing up virtual address space, not for freeing
66 * memory in e.g. the shrinker.
67 */
49int 68int
50i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, 69i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
51 int min_size, unsigned alignment, unsigned cache_level, 70 int min_size, unsigned alignment, unsigned cache_level,
@@ -177,19 +196,19 @@ found:
177} 196}
178 197
179/** 198/**
180 * i915_gem_evict_vm - Try to free up VM space 199 * i915_gem_evict_vm - Evict all idle vmas from a vm
181 * 200 *
182 * @vm: Address space to evict from 201 * @vm: Address space to cleanse
183 * @do_idle: Boolean directing whether to idle first. 202 * @do_idle: Boolean directing whether to idle first.
184 * 203 *
185 * VM eviction is about freeing up virtual address space. If one wants fine 204 * This function evicts all idles vmas from a vm. If all unpinned vmas should be
186 * grained eviction, they should see evict something for more details. In terms 205 * evicted the @do_idle needs to be set to true.
187 * of freeing up actual system memory, this function may not accomplish the
188 * desired result. An object may be shared in multiple address space, and this
189 * function will not assert those objects be freed.
190 * 206 *
191 * Using do_idle will result in a more complete eviction because it retires, and 207 * This is used by the execbuf code as a last-ditch effort to defragment the
192 * inactivates current BOs. 208 * address space.
209 *
210 * To clarify: This is for freeing up virtual address space, not for freeing
211 * memory in e.g. the shrinker.
193 */ 212 */
194int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) 213int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
195{ 214{
@@ -207,12 +226,20 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
207 } 226 }
208 227
209 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) 228 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
210 if (vma->obj->pin_count == 0) 229 if (vma->pin_count == 0)
211 WARN_ON(i915_vma_unbind(vma)); 230 WARN_ON(i915_vma_unbind(vma));
212 231
213 return 0; 232 return 0;
214} 233}
215 234
235/**
236 * i915_gem_evict_everything - Try to evict all objects
237 * @dev: Device to evict objects for
238 *
239 * This functions tries to evict all gem objects from all address spaces. Used
240 * by the shrinker as a last-ditch effort and for suspend, before releasing the
241 * backing storage of all unbound objects.
242 */
216int 243int
217i915_gem_evict_everything(struct drm_device *dev) 244i915_gem_evict_everything(struct drm_device *dev)
218{ 245{
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d269ecf46e26..032def901f98 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -91,6 +91,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
91 struct i915_address_space *vm, 91 struct i915_address_space *vm,
92 struct drm_file *file) 92 struct drm_file *file)
93{ 93{
94 struct drm_i915_private *dev_priv = vm->dev->dev_private;
94 struct drm_i915_gem_object *obj; 95 struct drm_i915_gem_object *obj;
95 struct list_head objects; 96 struct list_head objects;
96 int i, ret; 97 int i, ret;
@@ -125,6 +126,20 @@ eb_lookup_vmas(struct eb_vmas *eb,
125 i = 0; 126 i = 0;
126 while (!list_empty(&objects)) { 127 while (!list_empty(&objects)) {
127 struct i915_vma *vma; 128 struct i915_vma *vma;
129 struct i915_address_space *bind_vm = vm;
130
131 if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
132 USES_FULL_PPGTT(vm->dev)) {
133 ret = -EINVAL;
134 goto err;
135 }
136
137 /* If we have secure dispatch, or the userspace assures us that
138 * they know what they're doing, use the GGTT VM.
139 */
140 if (((args->flags & I915_EXEC_SECURE) &&
141 (i == (args->buffer_count - 1))))
142 bind_vm = &dev_priv->gtt.base;
128 143
129 obj = list_first_entry(&objects, 144 obj = list_first_entry(&objects,
130 struct drm_i915_gem_object, 145 struct drm_i915_gem_object,
@@ -138,7 +153,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
138 * from the (obj, vm) we don't run the risk of creating 153 * from the (obj, vm) we don't run the risk of creating
139 * duplicated vmas for the same vm. 154 * duplicated vmas for the same vm.
140 */ 155 */
141 vma = i915_gem_obj_lookup_or_create_vma(obj, vm); 156 vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
142 if (IS_ERR(vma)) { 157 if (IS_ERR(vma)) {
143 DRM_DEBUG("Failed to lookup VMA\n"); 158 DRM_DEBUG("Failed to lookup VMA\n");
144 ret = PTR_ERR(vma); 159 ret = PTR_ERR(vma);
@@ -217,7 +232,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
217 i915_gem_object_unpin_fence(obj); 232 i915_gem_object_unpin_fence(obj);
218 233
219 if (entry->flags & __EXEC_OBJECT_HAS_PIN) 234 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
220 i915_gem_object_unpin(obj); 235 vma->pin_count--;
221 236
222 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); 237 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
223} 238}
@@ -327,8 +342,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
327static int 342static int
328i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, 343i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
329 struct eb_vmas *eb, 344 struct eb_vmas *eb,
330 struct drm_i915_gem_relocation_entry *reloc, 345 struct drm_i915_gem_relocation_entry *reloc)
331 struct i915_address_space *vm)
332{ 346{
333 struct drm_device *dev = obj->base.dev; 347 struct drm_device *dev = obj->base.dev;
334 struct drm_gem_object *target_obj; 348 struct drm_gem_object *target_obj;
@@ -352,8 +366,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
352 if (unlikely(IS_GEN6(dev) && 366 if (unlikely(IS_GEN6(dev) &&
353 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && 367 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
354 !target_i915_obj->has_global_gtt_mapping)) { 368 !target_i915_obj->has_global_gtt_mapping)) {
355 i915_gem_gtt_bind_object(target_i915_obj, 369 struct i915_vma *vma =
356 target_i915_obj->cache_level); 370 list_first_entry(&target_i915_obj->vma_list,
371 typeof(*vma), vma_link);
372 vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
357 } 373 }
358 374
359 /* Validate that the target is in a valid r/w GPU domain */ 375 /* Validate that the target is in a valid r/w GPU domain */
@@ -451,8 +467,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
451 do { 467 do {
452 u64 offset = r->presumed_offset; 468 u64 offset = r->presumed_offset;
453 469
454 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, 470 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
455 vma->vm);
456 if (ret) 471 if (ret)
457 return ret; 472 return ret;
458 473
@@ -481,8 +496,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
481 int i, ret; 496 int i, ret;
482 497
483 for (i = 0; i < entry->relocation_count; i++) { 498 for (i = 0; i < entry->relocation_count; i++) {
484 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], 499 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
485 vma->vm);
486 if (ret) 500 if (ret)
487 return ret; 501 return ret;
488 } 502 }
@@ -527,11 +541,12 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
527 struct intel_ring_buffer *ring, 541 struct intel_ring_buffer *ring,
528 bool *need_reloc) 542 bool *need_reloc)
529{ 543{
530 struct drm_i915_private *dev_priv = ring->dev->dev_private; 544 struct drm_i915_gem_object *obj = vma->obj;
531 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 545 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
532 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 546 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
533 bool need_fence, need_mappable; 547 bool need_fence, need_mappable;
534 struct drm_i915_gem_object *obj = vma->obj; 548 u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
549 !vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
535 int ret; 550 int ret;
536 551
537 need_fence = 552 need_fence =
@@ -560,14 +575,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
560 } 575 }
561 } 576 }
562 577
563 /* Ensure ppgtt mapping exists if needed */
564 if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
565 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
566 obj, obj->cache_level);
567
568 obj->has_aliasing_ppgtt_mapping = 1;
569 }
570
571 if (entry->offset != vma->node.start) { 578 if (entry->offset != vma->node.start) {
572 entry->offset = vma->node.start; 579 entry->offset = vma->node.start;
573 *need_reloc = true; 580 *need_reloc = true;
@@ -578,9 +585,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
578 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; 585 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
579 } 586 }
580 587
581 if (entry->flags & EXEC_OBJECT_NEEDS_GTT && 588 vma->bind_vma(vma, obj->cache_level, flags);
582 !obj->has_global_gtt_mapping)
583 i915_gem_gtt_bind_object(obj, obj->cache_level);
584 589
585 return 0; 590 return 0;
586} 591}
@@ -891,7 +896,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
891 if (!access_ok(VERIFY_WRITE, ptr, length)) 896 if (!access_ok(VERIFY_WRITE, ptr, length))
892 return -EFAULT; 897 return -EFAULT;
893 898
894 if (likely(!i915_prefault_disable)) { 899 if (likely(!i915.prefault_disable)) {
895 if (fault_in_multipages_readable(ptr, length)) 900 if (fault_in_multipages_readable(ptr, length))
896 return -EFAULT; 901 return -EFAULT;
897 } 902 }
@@ -900,22 +905,27 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
900 return 0; 905 return 0;
901} 906}
902 907
903static int 908static struct i915_hw_context *
904i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, 909i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
905 const u32 ctx_id) 910 struct intel_ring_buffer *ring, const u32 ctx_id)
906{ 911{
912 struct i915_hw_context *ctx = NULL;
907 struct i915_ctx_hang_stats *hs; 913 struct i915_ctx_hang_stats *hs;
908 914
909 hs = i915_gem_context_get_hang_stats(dev, file, ctx_id); 915 if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
910 if (IS_ERR(hs)) 916 return ERR_PTR(-EINVAL);
911 return PTR_ERR(hs); 917
918 ctx = i915_gem_context_get(file->driver_priv, ctx_id);
919 if (IS_ERR(ctx))
920 return ctx;
912 921
922 hs = &ctx->hang_stats;
913 if (hs->banned) { 923 if (hs->banned) {
914 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id); 924 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
915 return -EIO; 925 return ERR_PTR(-EIO);
916 } 926 }
917 927
918 return 0; 928 return ctx;
919} 929}
920 930
921static void 931static void
@@ -939,7 +949,9 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
939 if (obj->base.write_domain) { 949 if (obj->base.write_domain) {
940 obj->dirty = 1; 950 obj->dirty = 1;
941 obj->last_write_seqno = intel_ring_get_seqno(ring); 951 obj->last_write_seqno = intel_ring_get_seqno(ring);
942 if (obj->pin_count) /* check for potential scanout */ 952 /* check for potential scanout */
953 if (i915_gem_obj_ggtt_bound(obj) &&
954 i915_gem_obj_to_ggtt(obj)->pin_count)
943 intel_mark_fb_busy(obj, ring); 955 intel_mark_fb_busy(obj, ring);
944 } 956 }
945 957
@@ -989,16 +1001,17 @@ static int
989i915_gem_do_execbuffer(struct drm_device *dev, void *data, 1001i915_gem_do_execbuffer(struct drm_device *dev, void *data,
990 struct drm_file *file, 1002 struct drm_file *file,
991 struct drm_i915_gem_execbuffer2 *args, 1003 struct drm_i915_gem_execbuffer2 *args,
992 struct drm_i915_gem_exec_object2 *exec, 1004 struct drm_i915_gem_exec_object2 *exec)
993 struct i915_address_space *vm)
994{ 1005{
995 drm_i915_private_t *dev_priv = dev->dev_private; 1006 drm_i915_private_t *dev_priv = dev->dev_private;
996 struct eb_vmas *eb; 1007 struct eb_vmas *eb;
997 struct drm_i915_gem_object *batch_obj; 1008 struct drm_i915_gem_object *batch_obj;
998 struct drm_clip_rect *cliprects = NULL; 1009 struct drm_clip_rect *cliprects = NULL;
999 struct intel_ring_buffer *ring; 1010 struct intel_ring_buffer *ring;
1011 struct i915_hw_context *ctx;
1012 struct i915_address_space *vm;
1000 const u32 ctx_id = i915_execbuffer2_get_context_id(*args); 1013 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1001 u32 exec_start, exec_len; 1014 u32 exec_start = args->batch_start_offset, exec_len;
1002 u32 mask, flags; 1015 u32 mask, flags;
1003 int ret, mode, i; 1016 int ret, mode, i;
1004 bool need_relocs; 1017 bool need_relocs;
@@ -1020,41 +1033,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1020 if (args->flags & I915_EXEC_IS_PINNED) 1033 if (args->flags & I915_EXEC_IS_PINNED)
1021 flags |= I915_DISPATCH_PINNED; 1034 flags |= I915_DISPATCH_PINNED;
1022 1035
1023 switch (args->flags & I915_EXEC_RING_MASK) { 1036 if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) {
1024 case I915_EXEC_DEFAULT:
1025 case I915_EXEC_RENDER:
1026 ring = &dev_priv->ring[RCS];
1027 break;
1028 case I915_EXEC_BSD:
1029 ring = &dev_priv->ring[VCS];
1030 if (ctx_id != DEFAULT_CONTEXT_ID) {
1031 DRM_DEBUG("Ring %s doesn't support contexts\n",
1032 ring->name);
1033 return -EPERM;
1034 }
1035 break;
1036 case I915_EXEC_BLT:
1037 ring = &dev_priv->ring[BCS];
1038 if (ctx_id != DEFAULT_CONTEXT_ID) {
1039 DRM_DEBUG("Ring %s doesn't support contexts\n",
1040 ring->name);
1041 return -EPERM;
1042 }
1043 break;
1044 case I915_EXEC_VEBOX:
1045 ring = &dev_priv->ring[VECS];
1046 if (ctx_id != DEFAULT_CONTEXT_ID) {
1047 DRM_DEBUG("Ring %s doesn't support contexts\n",
1048 ring->name);
1049 return -EPERM;
1050 }
1051 break;
1052
1053 default:
1054 DRM_DEBUG("execbuf with unknown ring: %d\n", 1037 DRM_DEBUG("execbuf with unknown ring: %d\n",
1055 (int)(args->flags & I915_EXEC_RING_MASK)); 1038 (int)(args->flags & I915_EXEC_RING_MASK));
1056 return -EINVAL; 1039 return -EINVAL;
1057 } 1040 }
1041
1042 if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
1043 ring = &dev_priv->ring[RCS];
1044 else
1045 ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
1046
1058 if (!intel_ring_initialized(ring)) { 1047 if (!intel_ring_initialized(ring)) {
1059 DRM_DEBUG("execbuf with invalid ring: %d\n", 1048 DRM_DEBUG("execbuf with invalid ring: %d\n",
1060 (int)(args->flags & I915_EXEC_RING_MASK)); 1049 (int)(args->flags & I915_EXEC_RING_MASK));
@@ -1136,11 +1125,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1136 goto pre_mutex_err; 1125 goto pre_mutex_err;
1137 } 1126 }
1138 1127
1139 ret = i915_gem_validate_context(dev, file, ctx_id); 1128 ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1140 if (ret) { 1129 if (IS_ERR(ctx)) {
1141 mutex_unlock(&dev->struct_mutex); 1130 mutex_unlock(&dev->struct_mutex);
1131 ret = PTR_ERR(ctx);
1142 goto pre_mutex_err; 1132 goto pre_mutex_err;
1143 } 1133 }
1134
1135 i915_gem_context_reference(ctx);
1136
1137 vm = ctx->vm;
1138 if (!USES_FULL_PPGTT(dev))
1139 vm = &dev_priv->gtt.base;
1144 1140
1145 eb = eb_create(args); 1141 eb = eb_create(args);
1146 if (eb == NULL) { 1142 if (eb == NULL) {
@@ -1187,14 +1183,25 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1187 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure 1183 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1188 * batch" bit. Hence we need to pin secure batches into the global gtt. 1184 * batch" bit. Hence we need to pin secure batches into the global gtt.
1189 * hsw should have this fixed, but bdw mucks it up again. */ 1185 * hsw should have this fixed, but bdw mucks it up again. */
1190 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) 1186 if (flags & I915_DISPATCH_SECURE &&
1191 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); 1187 !batch_obj->has_global_gtt_mapping) {
1188 /* When we have multiple VMs, we'll need to make sure that we
1189 * allocate space first */
1190 struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
1191 BUG_ON(!vma);
1192 vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
1193 }
1194
1195 if (flags & I915_DISPATCH_SECURE)
1196 exec_start += i915_gem_obj_ggtt_offset(batch_obj);
1197 else
1198 exec_start += i915_gem_obj_offset(batch_obj, vm);
1192 1199
1193 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas); 1200 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
1194 if (ret) 1201 if (ret)
1195 goto err; 1202 goto err;
1196 1203
1197 ret = i915_switch_context(ring, file, ctx_id); 1204 ret = i915_switch_context(ring, file, ctx);
1198 if (ret) 1205 if (ret)
1199 goto err; 1206 goto err;
1200 1207
@@ -1219,8 +1226,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1219 goto err; 1226 goto err;
1220 } 1227 }
1221 1228
1222 exec_start = i915_gem_obj_offset(batch_obj, vm) + 1229
1223 args->batch_start_offset;
1224 exec_len = args->batch_len; 1230 exec_len = args->batch_len;
1225 if (cliprects) { 1231 if (cliprects) {
1226 for (i = 0; i < args->num_cliprects; i++) { 1232 for (i = 0; i < args->num_cliprects; i++) {
@@ -1249,6 +1255,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1249 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); 1255 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1250 1256
1251err: 1257err:
1258 /* the request owns the ref now */
1259 i915_gem_context_unreference(ctx);
1252 eb_destroy(eb); 1260 eb_destroy(eb);
1253 1261
1254 mutex_unlock(&dev->struct_mutex); 1262 mutex_unlock(&dev->struct_mutex);
@@ -1270,7 +1278,6 @@ int
1270i915_gem_execbuffer(struct drm_device *dev, void *data, 1278i915_gem_execbuffer(struct drm_device *dev, void *data,
1271 struct drm_file *file) 1279 struct drm_file *file)
1272{ 1280{
1273 struct drm_i915_private *dev_priv = dev->dev_private;
1274 struct drm_i915_gem_execbuffer *args = data; 1281 struct drm_i915_gem_execbuffer *args = data;
1275 struct drm_i915_gem_execbuffer2 exec2; 1282 struct drm_i915_gem_execbuffer2 exec2;
1276 struct drm_i915_gem_exec_object *exec_list = NULL; 1283 struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -1326,8 +1333,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1326 exec2.flags = I915_EXEC_RENDER; 1333 exec2.flags = I915_EXEC_RENDER;
1327 i915_execbuffer2_set_context_id(exec2, 0); 1334 i915_execbuffer2_set_context_id(exec2, 0);
1328 1335
1329 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list, 1336 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1330 &dev_priv->gtt.base);
1331 if (!ret) { 1337 if (!ret) {
1332 /* Copy the new buffer offsets back to the user's exec list. */ 1338 /* Copy the new buffer offsets back to the user's exec list. */
1333 for (i = 0; i < args->buffer_count; i++) 1339 for (i = 0; i < args->buffer_count; i++)
@@ -1353,7 +1359,6 @@ int
1353i915_gem_execbuffer2(struct drm_device *dev, void *data, 1359i915_gem_execbuffer2(struct drm_device *dev, void *data,
1354 struct drm_file *file) 1360 struct drm_file *file)
1355{ 1361{
1356 struct drm_i915_private *dev_priv = dev->dev_private;
1357 struct drm_i915_gem_execbuffer2 *args = data; 1362 struct drm_i915_gem_execbuffer2 *args = data;
1358 struct drm_i915_gem_exec_object2 *exec2_list = NULL; 1363 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1359 int ret; 1364 int ret;
@@ -1384,8 +1389,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1384 return -EFAULT; 1389 return -EFAULT;
1385 } 1390 }
1386 1391
1387 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list, 1392 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1388 &dev_priv->gtt.base);
1389 if (!ret) { 1393 if (!ret) {
1390 /* Copy the new buffer offsets back to the user's exec list. */ 1394 /* Copy the new buffer offsets back to the user's exec list. */
1391 ret = copy_to_user(to_user_ptr(args->buffers_ptr), 1395 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 40a2b36b276b..a4364ae1a2d6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -22,6 +22,7 @@
22 * 22 *
23 */ 23 */
24 24
25#include <linux/seq_file.h>
25#include <drm/drmP.h> 26#include <drm/drmP.h>
26#include <drm/i915_drm.h> 27#include <drm/i915_drm.h>
27#include "i915_drv.h" 28#include "i915_drv.h"
@@ -70,6 +71,12 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
70#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */ 71#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
71#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */ 72#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
72 73
74static void ppgtt_bind_vma(struct i915_vma *vma,
75 enum i915_cache_level cache_level,
76 u32 flags);
77static void ppgtt_unbind_vma(struct i915_vma *vma);
78static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
79
73static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, 80static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
74 enum i915_cache_level level, 81 enum i915_cache_level level,
75 bool valid) 82 bool valid)
@@ -199,12 +206,19 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
199 206
200/* Broadwell Page Directory Pointer Descriptors */ 207/* Broadwell Page Directory Pointer Descriptors */
201static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry, 208static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
202 uint64_t val) 209 uint64_t val, bool synchronous)
203{ 210{
211 struct drm_i915_private *dev_priv = ring->dev->dev_private;
204 int ret; 212 int ret;
205 213
206 BUG_ON(entry >= 4); 214 BUG_ON(entry >= 4);
207 215
216 if (synchronous) {
217 I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
218 I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
219 return 0;
220 }
221
208 ret = intel_ring_begin(ring, 6); 222 ret = intel_ring_begin(ring, 6);
209 if (ret) 223 if (ret)
210 return ret; 224 return ret;
@@ -220,36 +234,23 @@ static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
220 return 0; 234 return 0;
221} 235}
222 236
223static int gen8_ppgtt_enable(struct drm_device *dev) 237static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
238 struct intel_ring_buffer *ring,
239 bool synchronous)
224{ 240{
225 struct drm_i915_private *dev_priv = dev->dev_private; 241 int i, ret;
226 struct intel_ring_buffer *ring;
227 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
228 int i, j, ret;
229 242
230 /* bit of a hack to find the actual last used pd */ 243 /* bit of a hack to find the actual last used pd */
231 int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE; 244 int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
232 245
233 for_each_ring(ring, dev_priv, j) {
234 I915_WRITE(RING_MODE_GEN7(ring),
235 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
236 }
237
238 for (i = used_pd - 1; i >= 0; i--) { 246 for (i = used_pd - 1; i >= 0; i--) {
239 dma_addr_t addr = ppgtt->pd_dma_addr[i]; 247 dma_addr_t addr = ppgtt->pd_dma_addr[i];
240 for_each_ring(ring, dev_priv, j) { 248 ret = gen8_write_pdp(ring, i, addr, synchronous);
241 ret = gen8_write_pdp(ring, i, addr); 249 if (ret)
242 if (ret) 250 return ret;
243 goto err_out;
244 }
245 } 251 }
246 return 0;
247 252
248err_out: 253 return 0;
249 for_each_ring(ring, dev_priv, j)
250 I915_WRITE(RING_MODE_GEN7(ring),
251 _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
252 return ret;
253} 254}
254 255
255static void gen8_ppgtt_clear_range(struct i915_address_space *vm, 256static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
@@ -324,6 +325,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
324 container_of(vm, struct i915_hw_ppgtt, base); 325 container_of(vm, struct i915_hw_ppgtt, base);
325 int i, j; 326 int i, j;
326 327
328 list_del(&vm->global_link);
327 drm_mm_takedown(&vm->mm); 329 drm_mm_takedown(&vm->mm);
328 330
329 for (i = 0; i < ppgtt->num_pd_pages ; i++) { 331 for (i = 0; i < ppgtt->num_pd_pages ; i++) {
@@ -386,6 +388,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
386 ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT); 388 ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
387 ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; 389 ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
388 ppgtt->enable = gen8_ppgtt_enable; 390 ppgtt->enable = gen8_ppgtt_enable;
391 ppgtt->switch_mm = gen8_mm_switch;
389 ppgtt->base.clear_range = gen8_ppgtt_clear_range; 392 ppgtt->base.clear_range = gen8_ppgtt_clear_range;
390 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; 393 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
391 ppgtt->base.cleanup = gen8_ppgtt_cleanup; 394 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
@@ -458,6 +461,62 @@ err_out:
458 return ret; 461 return ret;
459} 462}
460 463
464static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
465{
466 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
467 struct i915_address_space *vm = &ppgtt->base;
468 gen6_gtt_pte_t __iomem *pd_addr;
469 gen6_gtt_pte_t scratch_pte;
470 uint32_t pd_entry;
471 int pte, pde;
472
473 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
474
475 pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
476 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
477
478 seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm,
479 ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries);
480 for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
481 u32 expected;
482 gen6_gtt_pte_t *pt_vaddr;
483 dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde];
484 pd_entry = readl(pd_addr + pde);
485 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
486
487 if (pd_entry != expected)
488 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
489 pde,
490 pd_entry,
491 expected);
492 seq_printf(m, "\tPDE: %x\n", pd_entry);
493
494 pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]);
495 for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
496 unsigned long va =
497 (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
498 (pte * PAGE_SIZE);
499 int i;
500 bool found = false;
501 for (i = 0; i < 4; i++)
502 if (pt_vaddr[pte + i] != scratch_pte)
503 found = true;
504 if (!found)
505 continue;
506
507 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
508 for (i = 0; i < 4; i++) {
509 if (pt_vaddr[pte + i] != scratch_pte)
510 seq_printf(m, " %08x", pt_vaddr[pte + i]);
511 else
512 seq_puts(m, " SCRATCH ");
513 }
514 seq_puts(m, "\n");
515 }
516 kunmap_atomic(pt_vaddr);
517 }
518}
519
461static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) 520static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
462{ 521{
463 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; 522 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
@@ -480,61 +539,221 @@ static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
480 readl(pd_addr); 539 readl(pd_addr);
481} 540}
482 541
483static int gen6_ppgtt_enable(struct drm_device *dev) 542static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
484{ 543{
485 drm_i915_private_t *dev_priv = dev->dev_private; 544 BUG_ON(ppgtt->pd_offset & 0x3f);
486 uint32_t pd_offset; 545
546 return (ppgtt->pd_offset / 64) << 16;
547}
548
549static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
550 struct intel_ring_buffer *ring,
551 bool synchronous)
552{
553 struct drm_device *dev = ppgtt->base.dev;
554 struct drm_i915_private *dev_priv = dev->dev_private;
555 int ret;
556
557 /* If we're in reset, we can assume the GPU is sufficiently idle to
558 * manually frob these bits. Ideally we could use the ring functions,
559 * except our error handling makes it quite difficult (can't use
560 * intel_ring_begin, ring->flush, or intel_ring_advance)
561 *
562 * FIXME: We should try not to special case reset
563 */
564 if (synchronous ||
565 i915_reset_in_progress(&dev_priv->gpu_error)) {
566 WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
567 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
568 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
569 POSTING_READ(RING_PP_DIR_BASE(ring));
570 return 0;
571 }
572
573 /* NB: TLBs must be flushed and invalidated before a switch */
574 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
575 if (ret)
576 return ret;
577
578 ret = intel_ring_begin(ring, 6);
579 if (ret)
580 return ret;
581
582 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
583 intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
584 intel_ring_emit(ring, PP_DIR_DCLV_2G);
585 intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
586 intel_ring_emit(ring, get_pd_offset(ppgtt));
587 intel_ring_emit(ring, MI_NOOP);
588 intel_ring_advance(ring);
589
590 return 0;
591}
592
593static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
594 struct intel_ring_buffer *ring,
595 bool synchronous)
596{
597 struct drm_device *dev = ppgtt->base.dev;
598 struct drm_i915_private *dev_priv = dev->dev_private;
599 int ret;
600
601 /* If we're in reset, we can assume the GPU is sufficiently idle to
602 * manually frob these bits. Ideally we could use the ring functions,
603 * except our error handling makes it quite difficult (can't use
604 * intel_ring_begin, ring->flush, or intel_ring_advance)
605 *
606 * FIXME: We should try not to special case reset
607 */
608 if (synchronous ||
609 i915_reset_in_progress(&dev_priv->gpu_error)) {
610 WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
611 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
612 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
613 POSTING_READ(RING_PP_DIR_BASE(ring));
614 return 0;
615 }
616
617 /* NB: TLBs must be flushed and invalidated before a switch */
618 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
619 if (ret)
620 return ret;
621
622 ret = intel_ring_begin(ring, 6);
623 if (ret)
624 return ret;
625
626 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
627 intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
628 intel_ring_emit(ring, PP_DIR_DCLV_2G);
629 intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
630 intel_ring_emit(ring, get_pd_offset(ppgtt));
631 intel_ring_emit(ring, MI_NOOP);
632 intel_ring_advance(ring);
633
634 /* XXX: RCS is the only one to auto invalidate the TLBs? */
635 if (ring->id != RCS) {
636 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
637 if (ret)
638 return ret;
639 }
640
641 return 0;
642}
643
644static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
645 struct intel_ring_buffer *ring,
646 bool synchronous)
647{
648 struct drm_device *dev = ppgtt->base.dev;
649 struct drm_i915_private *dev_priv = dev->dev_private;
650
651 if (!synchronous)
652 return 0;
653
654 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
655 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
656
657 POSTING_READ(RING_PP_DIR_DCLV(ring));
658
659 return 0;
660}
661
662static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
663{
664 struct drm_device *dev = ppgtt->base.dev;
665 struct drm_i915_private *dev_priv = dev->dev_private;
487 struct intel_ring_buffer *ring; 666 struct intel_ring_buffer *ring;
488 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 667 int j, ret;
489 int i;
490 668
491 BUG_ON(ppgtt->pd_offset & 0x3f); 669 for_each_ring(ring, dev_priv, j) {
670 I915_WRITE(RING_MODE_GEN7(ring),
671 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
492 672
493 gen6_write_pdes(ppgtt); 673 /* We promise to do a switch later with FULL PPGTT. If this is
674 * aliasing, this is the one and only switch we'll do */
675 if (USES_FULL_PPGTT(dev))
676 continue;
494 677
495 pd_offset = ppgtt->pd_offset; 678 ret = ppgtt->switch_mm(ppgtt, ring, true);
496 pd_offset /= 64; /* in cachelines, */ 679 if (ret)
497 pd_offset <<= 16; 680 goto err_out;
681 }
498 682
499 if (INTEL_INFO(dev)->gen == 6) { 683 return 0;
500 uint32_t ecochk, gab_ctl, ecobits;
501 684
502 ecobits = I915_READ(GAC_ECO_BITS); 685err_out:
503 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | 686 for_each_ring(ring, dev_priv, j)
504 ECOBITS_PPGTT_CACHE64B); 687 I915_WRITE(RING_MODE_GEN7(ring),
688 _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
689 return ret;
690}
505 691
506 gab_ctl = I915_READ(GAB_CTL); 692static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
507 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); 693{
694 struct drm_device *dev = ppgtt->base.dev;
695 drm_i915_private_t *dev_priv = dev->dev_private;
696 struct intel_ring_buffer *ring;
697 uint32_t ecochk, ecobits;
698 int i;
508 699
509 ecochk = I915_READ(GAM_ECOCHK); 700 ecobits = I915_READ(GAC_ECO_BITS);
510 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | 701 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
511 ECOCHK_PPGTT_CACHE64B);
512 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
513 } else if (INTEL_INFO(dev)->gen >= 7) {
514 uint32_t ecochk, ecobits;
515 702
516 ecobits = I915_READ(GAC_ECO_BITS); 703 ecochk = I915_READ(GAM_ECOCHK);
517 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); 704 if (IS_HASWELL(dev)) {
705 ecochk |= ECOCHK_PPGTT_WB_HSW;
706 } else {
707 ecochk |= ECOCHK_PPGTT_LLC_IVB;
708 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
709 }
710 I915_WRITE(GAM_ECOCHK, ecochk);
518 711
519 ecochk = I915_READ(GAM_ECOCHK); 712 for_each_ring(ring, dev_priv, i) {
520 if (IS_HASWELL(dev)) { 713 int ret;
521 ecochk |= ECOCHK_PPGTT_WB_HSW;
522 } else {
523 ecochk |= ECOCHK_PPGTT_LLC_IVB;
524 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
525 }
526 I915_WRITE(GAM_ECOCHK, ecochk);
527 /* GFX_MODE is per-ring on gen7+ */ 714 /* GFX_MODE is per-ring on gen7+ */
715 I915_WRITE(RING_MODE_GEN7(ring),
716 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
717
718 /* We promise to do a switch later with FULL PPGTT. If this is
719 * aliasing, this is the one and only switch we'll do */
720 if (USES_FULL_PPGTT(dev))
721 continue;
722
723 ret = ppgtt->switch_mm(ppgtt, ring, true);
724 if (ret)
725 return ret;
528 } 726 }
529 727
530 for_each_ring(ring, dev_priv, i) { 728 return 0;
531 if (INTEL_INFO(dev)->gen >= 7) 729}
532 I915_WRITE(RING_MODE_GEN7(ring),
533 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
534 730
535 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); 731static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
536 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); 732{
733 struct drm_device *dev = ppgtt->base.dev;
734 drm_i915_private_t *dev_priv = dev->dev_private;
735 struct intel_ring_buffer *ring;
736 uint32_t ecochk, gab_ctl, ecobits;
737 int i;
738
739 ecobits = I915_READ(GAC_ECO_BITS);
740 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
741 ECOBITS_PPGTT_CACHE64B);
742
743 gab_ctl = I915_READ(GAB_CTL);
744 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
745
746 ecochk = I915_READ(GAM_ECOCHK);
747 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
748
749 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
750
751 for_each_ring(ring, dev_priv, i) {
752 int ret = ppgtt->switch_mm(ppgtt, ring, true);
753 if (ret)
754 return ret;
537 } 755 }
756
538 return 0; 757 return 0;
539} 758}
540 759
@@ -608,7 +827,9 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
608 container_of(vm, struct i915_hw_ppgtt, base); 827 container_of(vm, struct i915_hw_ppgtt, base);
609 int i; 828 int i;
610 829
830 list_del(&vm->global_link);
611 drm_mm_takedown(&ppgtt->base.mm); 831 drm_mm_takedown(&ppgtt->base.mm);
832 drm_mm_remove_node(&ppgtt->node);
612 833
613 if (ppgtt->pt_dma_addr) { 834 if (ppgtt->pt_dma_addr) {
614 for (i = 0; i < ppgtt->num_pd_entries; i++) 835 for (i = 0; i < ppgtt->num_pd_entries; i++)
@@ -626,20 +847,51 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
626 847
627static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) 848static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
628{ 849{
850#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
851#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
629 struct drm_device *dev = ppgtt->base.dev; 852 struct drm_device *dev = ppgtt->base.dev;
630 struct drm_i915_private *dev_priv = dev->dev_private; 853 struct drm_i915_private *dev_priv = dev->dev_private;
631 unsigned first_pd_entry_in_global_pt; 854 bool retried = false;
632 int i; 855 int i, ret;
633 int ret = -ENOMEM;
634 856
635 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 857 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
636 * entries. For aliasing ppgtt support we just steal them at the end for 858 * allocator works in address space sizes, so it's multiplied by page
637 * now. */ 859 * size. We allocate at the top of the GTT to avoid fragmentation.
638 first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt); 860 */
861 BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
862alloc:
863 ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
864 &ppgtt->node, GEN6_PD_SIZE,
865 GEN6_PD_ALIGN, 0,
866 0, dev_priv->gtt.base.total,
867 DRM_MM_SEARCH_DEFAULT);
868 if (ret == -ENOSPC && !retried) {
869 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
870 GEN6_PD_SIZE, GEN6_PD_ALIGN,
871 I915_CACHE_NONE, false, true);
872 if (ret)
873 return ret;
874
875 retried = true;
876 goto alloc;
877 }
878
879 if (ppgtt->node.start < dev_priv->gtt.mappable_end)
880 DRM_DEBUG("Forced to use aperture for PDEs\n");
639 881
640 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; 882 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
641 ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; 883 ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
642 ppgtt->enable = gen6_ppgtt_enable; 884 if (IS_GEN6(dev)) {
885 ppgtt->enable = gen6_ppgtt_enable;
886 ppgtt->switch_mm = gen6_mm_switch;
887 } else if (IS_HASWELL(dev)) {
888 ppgtt->enable = gen7_ppgtt_enable;
889 ppgtt->switch_mm = hsw_mm_switch;
890 } else if (IS_GEN7(dev)) {
891 ppgtt->enable = gen7_ppgtt_enable;
892 ppgtt->switch_mm = gen7_mm_switch;
893 } else
894 BUG();
643 ppgtt->base.clear_range = gen6_ppgtt_clear_range; 895 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
644 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; 896 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
645 ppgtt->base.cleanup = gen6_ppgtt_cleanup; 897 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
@@ -648,8 +900,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
648 ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; 900 ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
649 ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *), 901 ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
650 GFP_KERNEL); 902 GFP_KERNEL);
651 if (!ppgtt->pt_pages) 903 if (!ppgtt->pt_pages) {
904 drm_mm_remove_node(&ppgtt->node);
652 return -ENOMEM; 905 return -ENOMEM;
906 }
653 907
654 for (i = 0; i < ppgtt->num_pd_entries; i++) { 908 for (i = 0; i < ppgtt->num_pd_entries; i++) {
655 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); 909 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
@@ -678,8 +932,13 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
678 932
679 ppgtt->base.clear_range(&ppgtt->base, 0, 933 ppgtt->base.clear_range(&ppgtt->base, 0,
680 ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true); 934 ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
935 ppgtt->debug_dump = gen6_dump_ppgtt;
681 936
682 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); 937 DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
938 ppgtt->node.size >> 20,
939 ppgtt->node.start / PAGE_SIZE);
940 ppgtt->pd_offset =
941 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
683 942
684 return 0; 943 return 0;
685 944
@@ -696,19 +955,15 @@ err_pt_alloc:
696 __free_page(ppgtt->pt_pages[i]); 955 __free_page(ppgtt->pt_pages[i]);
697 } 956 }
698 kfree(ppgtt->pt_pages); 957 kfree(ppgtt->pt_pages);
958 drm_mm_remove_node(&ppgtt->node);
699 959
700 return ret; 960 return ret;
701} 961}
702 962
703static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) 963int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
704{ 964{
705 struct drm_i915_private *dev_priv = dev->dev_private; 965 struct drm_i915_private *dev_priv = dev->dev_private;
706 struct i915_hw_ppgtt *ppgtt; 966 int ret = 0;
707 int ret;
708
709 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
710 if (!ppgtt)
711 return -ENOMEM;
712 967
713 ppgtt->base.dev = dev; 968 ppgtt->base.dev = dev;
714 969
@@ -719,45 +974,42 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
719 else 974 else
720 BUG(); 975 BUG();
721 976
722 if (ret) 977 if (!ret) {
723 kfree(ppgtt); 978 struct drm_i915_private *dev_priv = dev->dev_private;
724 else { 979 kref_init(&ppgtt->ref);
725 dev_priv->mm.aliasing_ppgtt = ppgtt;
726 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, 980 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
727 ppgtt->base.total); 981 ppgtt->base.total);
982 i915_init_vm(dev_priv, &ppgtt->base);
983 if (INTEL_INFO(dev)->gen < 8) {
984 gen6_write_pdes(ppgtt);
985 DRM_DEBUG("Adding PPGTT at offset %x\n",
986 ppgtt->pd_offset << 10);
987 }
728 } 988 }
729 989
730 return ret; 990 return ret;
731} 991}
732 992
733void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) 993static void
994ppgtt_bind_vma(struct i915_vma *vma,
995 enum i915_cache_level cache_level,
996 u32 flags)
734{ 997{
735 struct drm_i915_private *dev_priv = dev->dev_private; 998 const unsigned long entry = vma->node.start >> PAGE_SHIFT;
736 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
737 999
738 if (!ppgtt) 1000 WARN_ON(flags);
739 return;
740 1001
741 ppgtt->base.cleanup(&ppgtt->base); 1002 vma->vm->insert_entries(vma->vm, vma->obj->pages, entry, cache_level);
742 dev_priv->mm.aliasing_ppgtt = NULL;
743} 1003}
744 1004
745void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 1005static void ppgtt_unbind_vma(struct i915_vma *vma)
746 struct drm_i915_gem_object *obj,
747 enum i915_cache_level cache_level)
748{ 1006{
749 ppgtt->base.insert_entries(&ppgtt->base, obj->pages, 1007 const unsigned long entry = vma->node.start >> PAGE_SHIFT;
750 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
751 cache_level);
752}
753 1008
754void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 1009 vma->vm->clear_range(vma->vm,
755 struct drm_i915_gem_object *obj) 1010 entry,
756{ 1011 vma->obj->base.size >> PAGE_SHIFT,
757 ppgtt->base.clear_range(&ppgtt->base, 1012 true);
758 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
759 obj->base.size >> PAGE_SHIFT,
760 true);
761} 1013}
762 1014
763extern int intel_iommu_gfx_mapped; 1015extern int intel_iommu_gfx_mapped;
@@ -849,6 +1101,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
849{ 1101{
850 struct drm_i915_private *dev_priv = dev->dev_private; 1102 struct drm_i915_private *dev_priv = dev->dev_private;
851 struct drm_i915_gem_object *obj; 1103 struct drm_i915_gem_object *obj;
1104 struct i915_address_space *vm;
852 1105
853 i915_check_and_clear_faults(dev); 1106 i915_check_and_clear_faults(dev);
854 1107
@@ -859,8 +1112,33 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
859 true); 1112 true);
860 1113
861 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 1114 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1115 struct i915_vma *vma = i915_gem_obj_to_vma(obj,
1116 &dev_priv->gtt.base);
1117 if (!vma)
1118 continue;
1119
862 i915_gem_clflush_object(obj, obj->pin_display); 1120 i915_gem_clflush_object(obj, obj->pin_display);
863 i915_gem_gtt_bind_object(obj, obj->cache_level); 1121 /* The bind_vma code tries to be smart about tracking mappings.
1122 * Unfortunately above, we've just wiped out the mappings
1123 * without telling our object about it. So we need to fake it.
1124 */
1125 obj->has_global_gtt_mapping = 0;
1126 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
1127 }
1128
1129
1130 if (INTEL_INFO(dev)->gen >= 8)
1131 return;
1132
1133 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
1134 /* TODO: Perhaps it shouldn't be gen6 specific */
1135 if (i915_is_ggtt(vm)) {
1136 if (dev_priv->mm.aliasing_ppgtt)
1137 gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
1138 continue;
1139 }
1140
1141 gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
864 } 1142 }
865 1143
866 i915_gem_chipset_flush(dev); 1144 i915_gem_chipset_flush(dev);
@@ -1017,16 +1295,18 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
1017 readl(gtt_base); 1295 readl(gtt_base);
1018} 1296}
1019 1297
1020static void i915_ggtt_insert_entries(struct i915_address_space *vm, 1298
1021 struct sg_table *st, 1299static void i915_ggtt_bind_vma(struct i915_vma *vma,
1022 unsigned int pg_start, 1300 enum i915_cache_level cache_level,
1023 enum i915_cache_level cache_level) 1301 u32 unused)
1024{ 1302{
1303 const unsigned long entry = vma->node.start >> PAGE_SHIFT;
1025 unsigned int flags = (cache_level == I915_CACHE_NONE) ? 1304 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
1026 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 1305 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
1027 1306
1028 intel_gtt_insert_sg_entries(st, pg_start, flags); 1307 BUG_ON(!i915_is_ggtt(vma->vm));
1029 1308 intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
1309 vma->obj->has_global_gtt_mapping = 1;
1030} 1310}
1031 1311
1032static void i915_ggtt_clear_range(struct i915_address_space *vm, 1312static void i915_ggtt_clear_range(struct i915_address_space *vm,
@@ -1037,33 +1317,77 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
1037 intel_gtt_clear_range(first_entry, num_entries); 1317 intel_gtt_clear_range(first_entry, num_entries);
1038} 1318}
1039 1319
1320static void i915_ggtt_unbind_vma(struct i915_vma *vma)
1321{
1322 const unsigned int first = vma->node.start >> PAGE_SHIFT;
1323 const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
1324
1325 BUG_ON(!i915_is_ggtt(vma->vm));
1326 vma->obj->has_global_gtt_mapping = 0;
1327 intel_gtt_clear_range(first, size);
1328}
1040 1329
1041void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 1330static void ggtt_bind_vma(struct i915_vma *vma,
1042 enum i915_cache_level cache_level) 1331 enum i915_cache_level cache_level,
1332 u32 flags)
1043{ 1333{
1044 struct drm_device *dev = obj->base.dev; 1334 struct drm_device *dev = vma->vm->dev;
1045 struct drm_i915_private *dev_priv = dev->dev_private; 1335 struct drm_i915_private *dev_priv = dev->dev_private;
1046 const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; 1336 struct drm_i915_gem_object *obj = vma->obj;
1337 const unsigned long entry = vma->node.start >> PAGE_SHIFT;
1047 1338
1048 dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages, 1339 /* If there is no aliasing PPGTT, or the caller needs a global mapping,
1049 entry, 1340 * or we have a global mapping already but the cacheability flags have
1050 cache_level); 1341 * changed, set the global PTEs.
1342 *
1343 * If there is an aliasing PPGTT it is anecdotally faster, so use that
1344 * instead if none of the above hold true.
1345 *
1346 * NB: A global mapping should only be needed for special regions like
1347 * "gtt mappable", SNB errata, or if specified via special execbuf
1348 * flags. At all other times, the GPU will use the aliasing PPGTT.
1349 */
1350 if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
1351 if (!obj->has_global_gtt_mapping ||
1352 (cache_level != obj->cache_level)) {
1353 vma->vm->insert_entries(vma->vm, obj->pages, entry,
1354 cache_level);
1355 obj->has_global_gtt_mapping = 1;
1356 }
1357 }
1051 1358
1052 obj->has_global_gtt_mapping = 1; 1359 if (dev_priv->mm.aliasing_ppgtt &&
1360 (!obj->has_aliasing_ppgtt_mapping ||
1361 (cache_level != obj->cache_level))) {
1362 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
1363 appgtt->base.insert_entries(&appgtt->base,
1364 vma->obj->pages, entry, cache_level);
1365 vma->obj->has_aliasing_ppgtt_mapping = 1;
1366 }
1053} 1367}
1054 1368
1055void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 1369static void ggtt_unbind_vma(struct i915_vma *vma)
1056{ 1370{
1057 struct drm_device *dev = obj->base.dev; 1371 struct drm_device *dev = vma->vm->dev;
1058 struct drm_i915_private *dev_priv = dev->dev_private; 1372 struct drm_i915_private *dev_priv = dev->dev_private;
1059 const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; 1373 struct drm_i915_gem_object *obj = vma->obj;
1060 1374 const unsigned long entry = vma->node.start >> PAGE_SHIFT;
1061 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 1375
1062 entry, 1376 if (obj->has_global_gtt_mapping) {
1063 obj->base.size >> PAGE_SHIFT, 1377 vma->vm->clear_range(vma->vm, entry,
1064 true); 1378 vma->obj->base.size >> PAGE_SHIFT,
1379 true);
1380 obj->has_global_gtt_mapping = 0;
1381 }
1065 1382
1066 obj->has_global_gtt_mapping = 0; 1383 if (obj->has_aliasing_ppgtt_mapping) {
1384 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
1385 appgtt->base.clear_range(&appgtt->base,
1386 entry,
1387 obj->base.size >> PAGE_SHIFT,
1388 true);
1389 obj->has_aliasing_ppgtt_mapping = 0;
1390 }
1067} 1391}
1068 1392
1069void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) 1393void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
@@ -1155,21 +1479,6 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
1155 ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true); 1479 ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
1156} 1480}
1157 1481
1158static bool
1159intel_enable_ppgtt(struct drm_device *dev)
1160{
1161 if (i915_enable_ppgtt >= 0)
1162 return i915_enable_ppgtt;
1163
1164#ifdef CONFIG_INTEL_IOMMU
1165 /* Disable ppgtt on SNB if VT-d is on. */
1166 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
1167 return false;
1168#endif
1169
1170 return true;
1171}
1172
1173void i915_gem_init_global_gtt(struct drm_device *dev) 1482void i915_gem_init_global_gtt(struct drm_device *dev)
1174{ 1483{
1175 struct drm_i915_private *dev_priv = dev->dev_private; 1484 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1178,26 +1487,6 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
1178 gtt_size = dev_priv->gtt.base.total; 1487 gtt_size = dev_priv->gtt.base.total;
1179 mappable_size = dev_priv->gtt.mappable_end; 1488 mappable_size = dev_priv->gtt.mappable_end;
1180 1489
1181 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
1182 int ret;
1183
1184 if (INTEL_INFO(dev)->gen <= 7) {
1185 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
1186 * aperture accordingly when using aliasing ppgtt. */
1187 gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
1188 }
1189
1190 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
1191
1192 ret = i915_gem_init_aliasing_ppgtt(dev);
1193 if (!ret)
1194 return;
1195
1196 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
1197 drm_mm_takedown(&dev_priv->gtt.base.mm);
1198 if (INTEL_INFO(dev)->gen < 8)
1199 gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE;
1200 }
1201 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 1490 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
1202} 1491}
1203 1492
@@ -1253,7 +1542,7 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
1253 if (bdw_gmch_ctl) 1542 if (bdw_gmch_ctl)
1254 bdw_gmch_ctl = 1 << bdw_gmch_ctl; 1543 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
1255 if (bdw_gmch_ctl > 4) { 1544 if (bdw_gmch_ctl > 4) {
1256 WARN_ON(!i915_preliminary_hw_support); 1545 WARN_ON(!i915.preliminary_hw_support);
1257 return 4<<20; 1546 return 4<<20;
1258 } 1547 }
1259 1548
@@ -1438,7 +1727,6 @@ static int i915_gmch_probe(struct drm_device *dev,
1438 1727
1439 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); 1728 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
1440 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; 1729 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
1441 dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
1442 1730
1443 if (unlikely(dev_priv->gtt.do_idle_maps)) 1731 if (unlikely(dev_priv->gtt.do_idle_maps))
1444 DRM_INFO("applying Ironlake quirks for intel_iommu\n"); 1732 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@ -1493,3 +1781,62 @@ int i915_gem_gtt_init(struct drm_device *dev)
1493 1781
1494 return 0; 1782 return 0;
1495} 1783}
1784
1785static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
1786 struct i915_address_space *vm)
1787{
1788 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
1789 if (vma == NULL)
1790 return ERR_PTR(-ENOMEM);
1791
1792 INIT_LIST_HEAD(&vma->vma_link);
1793 INIT_LIST_HEAD(&vma->mm_list);
1794 INIT_LIST_HEAD(&vma->exec_list);
1795 vma->vm = vm;
1796 vma->obj = obj;
1797
1798 switch (INTEL_INFO(vm->dev)->gen) {
1799 case 8:
1800 case 7:
1801 case 6:
1802 if (i915_is_ggtt(vm)) {
1803 vma->unbind_vma = ggtt_unbind_vma;
1804 vma->bind_vma = ggtt_bind_vma;
1805 } else {
1806 vma->unbind_vma = ppgtt_unbind_vma;
1807 vma->bind_vma = ppgtt_bind_vma;
1808 }
1809 break;
1810 case 5:
1811 case 4:
1812 case 3:
1813 case 2:
1814 BUG_ON(!i915_is_ggtt(vm));
1815 vma->unbind_vma = i915_ggtt_unbind_vma;
1816 vma->bind_vma = i915_ggtt_bind_vma;
1817 break;
1818 default:
1819 BUG();
1820 }
1821
1822 /* Keep GGTT vmas first to make debug easier */
1823 if (i915_is_ggtt(vm))
1824 list_add(&vma->vma_link, &obj->vma_list);
1825 else
1826 list_add_tail(&vma->vma_link, &obj->vma_list);
1827
1828 return vma;
1829}
1830
1831struct i915_vma *
1832i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
1833 struct i915_address_space *vm)
1834{
1835 struct i915_vma *vma;
1836
1837 vma = i915_gem_obj_to_vma(obj, vm);
1838 if (!vma)
1839 vma = __i915_gem_vma_create(obj, vm);
1840
1841 return vma;
1842}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b13905348048..eb993584aa6b 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -308,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
308 return -EINVAL; 308 return -EINVAL;
309 } 309 }
310 310
311 if (obj->pin_count || obj->framebuffer_references) { 311 if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
312 drm_gem_object_unreference_unlocked(&obj->base); 312 drm_gem_object_unreference_unlocked(&obj->base);
313 return -EBUSY; 313 return -EBUSY;
314 } 314 }
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 990cf8f43efd..000b3694f349 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -238,50 +238,61 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
238 238
239static void i915_ring_error_state(struct drm_i915_error_state_buf *m, 239static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
240 struct drm_device *dev, 240 struct drm_device *dev,
241 struct drm_i915_error_state *error, 241 struct drm_i915_error_ring *ring)
242 unsigned ring)
243{ 242{
244 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ 243 if (!ring->valid)
245 if (!error->ring[ring].valid)
246 return; 244 return;
247 245
248 err_printf(m, "%s command stream:\n", ring_str(ring)); 246 err_printf(m, " HEAD: 0x%08x\n", ring->head);
249 err_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 247 err_printf(m, " TAIL: 0x%08x\n", ring->tail);
250 err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 248 err_printf(m, " CTL: 0x%08x\n", ring->ctl);
251 err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); 249 err_printf(m, " HWS: 0x%08x\n", ring->hws);
252 err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); 250 err_printf(m, " ACTHD: 0x%08x\n", ring->acthd);
253 err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 251 err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir);
254 err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 252 err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr);
255 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 253 err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone);
256 if (INTEL_INFO(dev)->gen >= 4) { 254 if (INTEL_INFO(dev)->gen >= 4) {
257 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr[ring]); 255 err_printf(m, " BBADDR: 0x%08llx\n", ring->bbaddr);
258 err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]); 256 err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate);
259 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 257 err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
260 } 258 }
261 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 259 err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
262 err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); 260 err_printf(m, " FADDR: 0x%08x\n", ring->faddr);
263 if (INTEL_INFO(dev)->gen >= 6) { 261 if (INTEL_INFO(dev)->gen >= 6) {
264 err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); 262 err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
265 err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 263 err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
266 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", 264 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
267 error->semaphore_mboxes[ring][0], 265 ring->semaphore_mboxes[0],
268 error->semaphore_seqno[ring][0]); 266 ring->semaphore_seqno[0]);
269 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", 267 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
270 error->semaphore_mboxes[ring][1], 268 ring->semaphore_mboxes[1],
271 error->semaphore_seqno[ring][1]); 269 ring->semaphore_seqno[1]);
272 if (HAS_VEBOX(dev)) { 270 if (HAS_VEBOX(dev)) {
273 err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n", 271 err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
274 error->semaphore_mboxes[ring][2], 272 ring->semaphore_mboxes[2],
275 error->semaphore_seqno[ring][2]); 273 ring->semaphore_seqno[2]);
276 } 274 }
277 } 275 }
278 err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 276 if (USES_PPGTT(dev)) {
279 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 277 err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
280 err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 278
281 err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 279 if (INTEL_INFO(dev)->gen >= 8) {
280 int i;
281 for (i = 0; i < 4; i++)
282 err_printf(m, " PDP%d: 0x%016llx\n",
283 i, ring->vm_info.pdp[i]);
284 } else {
285 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
286 ring->vm_info.pp_dir_base);
287 }
288 }
289 err_printf(m, " seqno: 0x%08x\n", ring->seqno);
290 err_printf(m, " waiting: %s\n", yesno(ring->waiting));
291 err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
292 err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
282 err_printf(m, " hangcheck: %s [%d]\n", 293 err_printf(m, " hangcheck: %s [%d]\n",
283 hangcheck_action_to_str(error->hangcheck_action[ring]), 294 hangcheck_action_to_str(ring->hangcheck_action),
284 error->hangcheck_score[ring]); 295 ring->hangcheck_score);
285} 296}
286 297
287void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) 298void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -333,8 +344,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
333 if (INTEL_INFO(dev)->gen == 7) 344 if (INTEL_INFO(dev)->gen == 7)
334 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 345 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
335 346
336 for (i = 0; i < ARRAY_SIZE(error->ring); i++) 347 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
337 i915_ring_error_state(m, dev, error, i); 348 err_printf(m, "%s command stream:\n", ring_str(i));
349 i915_ring_error_state(m, dev, &error->ring[i]);
350 }
338 351
339 if (error->active_bo) 352 if (error->active_bo)
340 print_error_buffers(m, "Active", 353 print_error_buffers(m, "Active",
@@ -390,6 +403,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
390 } 403 }
391 } 404 }
392 405
406 if ((obj = error->ring[i].hws_page)) {
407 err_printf(m, "%s --- HW Status = 0x%08x\n",
408 dev_priv->ring[i].name,
409 obj->gtt_offset);
410 offset = 0;
411 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
412 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
413 offset,
414 obj->pages[0][elt],
415 obj->pages[0][elt+1],
416 obj->pages[0][elt+2],
417 obj->pages[0][elt+3]);
418 offset += 16;
419 }
420 }
421
393 if ((obj = error->ring[i].ctx)) { 422 if ((obj = error->ring[i].ctx)) {
394 err_printf(m, "%s --- HW Context = 0x%08x\n", 423 err_printf(m, "%s --- HW Context = 0x%08x\n",
395 dev_priv->ring[i].name, 424 dev_priv->ring[i].name,
@@ -472,6 +501,7 @@ static void i915_error_state_free(struct kref *error_ref)
472 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 501 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
473 i915_error_object_free(error->ring[i].batchbuffer); 502 i915_error_object_free(error->ring[i].batchbuffer);
474 i915_error_object_free(error->ring[i].ringbuffer); 503 i915_error_object_free(error->ring[i].ringbuffer);
504 i915_error_object_free(error->ring[i].hws_page);
475 i915_error_object_free(error->ring[i].ctx); 505 i915_error_object_free(error->ring[i].ctx);
476 kfree(error->ring[i].requests); 506 kfree(error->ring[i].requests);
477 } 507 }
@@ -485,6 +515,7 @@ static void i915_error_state_free(struct kref *error_ref)
485static struct drm_i915_error_object * 515static struct drm_i915_error_object *
486i915_error_object_create_sized(struct drm_i915_private *dev_priv, 516i915_error_object_create_sized(struct drm_i915_private *dev_priv,
487 struct drm_i915_gem_object *src, 517 struct drm_i915_gem_object *src,
518 struct i915_address_space *vm,
488 const int num_pages) 519 const int num_pages)
489{ 520{
490 struct drm_i915_error_object *dst; 521 struct drm_i915_error_object *dst;
@@ -498,7 +529,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
498 if (dst == NULL) 529 if (dst == NULL)
499 return NULL; 530 return NULL;
500 531
501 reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src); 532 reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm);
502 for (i = 0; i < num_pages; i++) { 533 for (i = 0; i < num_pages; i++) {
503 unsigned long flags; 534 unsigned long flags;
504 void *d; 535 void *d;
@@ -508,8 +539,10 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
508 goto unwind; 539 goto unwind;
509 540
510 local_irq_save(flags); 541 local_irq_save(flags);
511 if (reloc_offset < dev_priv->gtt.mappable_end && 542 if (src->cache_level == I915_CACHE_NONE &&
512 src->has_global_gtt_mapping) { 543 reloc_offset < dev_priv->gtt.mappable_end &&
544 src->has_global_gtt_mapping &&
545 i915_is_ggtt(vm)) {
513 void __iomem *s; 546 void __iomem *s;
514 547
515 /* Simply ignore tiling or any overlapping fence. 548 /* Simply ignore tiling or any overlapping fence.
@@ -559,8 +592,12 @@ unwind:
559 kfree(dst); 592 kfree(dst);
560 return NULL; 593 return NULL;
561} 594}
562#define i915_error_object_create(dev_priv, src) \ 595#define i915_error_object_create(dev_priv, src, vm) \
563 i915_error_object_create_sized((dev_priv), (src), \ 596 i915_error_object_create_sized((dev_priv), (src), (vm), \
597 (src)->base.size>>PAGE_SHIFT)
598
599#define i915_error_ggtt_object_create(dev_priv, src) \
600 i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \
564 (src)->base.size>>PAGE_SHIFT) 601 (src)->base.size>>PAGE_SHIFT)
565 602
566static void capture_bo(struct drm_i915_error_buffer *err, 603static void capture_bo(struct drm_i915_error_buffer *err,
@@ -575,7 +612,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
575 err->write_domain = obj->base.write_domain; 612 err->write_domain = obj->base.write_domain;
576 err->fence_reg = obj->fence_reg; 613 err->fence_reg = obj->fence_reg;
577 err->pinned = 0; 614 err->pinned = 0;
578 if (obj->pin_count > 0) 615 if (i915_gem_obj_is_pinned(obj))
579 err->pinned = 1; 616 err->pinned = 1;
580 if (obj->user_pin_count > 0) 617 if (obj->user_pin_count > 0)
581 err->pinned = -1; 618 err->pinned = -1;
@@ -608,7 +645,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
608 int i = 0; 645 int i = 0;
609 646
610 list_for_each_entry(obj, head, global_list) { 647 list_for_each_entry(obj, head, global_list) {
611 if (obj->pin_count == 0) 648 if (!i915_gem_obj_is_pinned(obj))
612 continue; 649 continue;
613 650
614 capture_bo(err++, obj); 651 capture_bo(err++, obj);
@@ -619,6 +656,33 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
619 return i; 656 return i;
620} 657}
621 658
659/* Generate a semi-unique error code. The code is not meant to have meaning, The
660 * code's only purpose is to try to prevent false duplicated bug reports by
661 * grossly estimating a GPU error state.
662 *
663 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
664 * the hang if we could strip the GTT offset information from it.
665 *
666 * It's only a small step better than a random number in its current form.
667 */
668static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
669 struct drm_i915_error_state *error)
670{
671 uint32_t error_code = 0;
672 int i;
673
674 /* IPEHR would be an ideal way to detect errors, as it's the gross
675 * measure of "the command that hung." However, has some very common
676 * synchronization commands which almost always appear in the case
677 * strictly a client bug. Use instdone to differentiate those some.
678 */
679 for (i = 0; i < I915_NUM_RINGS; i++)
680 if (error->ring[i].hangcheck_action == HANGCHECK_HUNG)
681 return error->ring[i].ipehr ^ error->ring[i].instdone;
682
683 return error_code;
684}
685
622static void i915_gem_record_fences(struct drm_device *dev, 686static void i915_gem_record_fences(struct drm_device *dev,
623 struct drm_i915_error_state *error) 687 struct drm_i915_error_state *error)
624{ 688{
@@ -652,6 +716,32 @@ static void i915_gem_record_fences(struct drm_device *dev,
652 } 716 }
653} 717}
654 718
719/* This assumes all batchbuffers are executed from the PPGTT. It might have to
720 * change in the future. */
721static bool is_active_vm(struct i915_address_space *vm,
722 struct intel_ring_buffer *ring)
723{
724 struct drm_device *dev = vm->dev;
725 struct drm_i915_private *dev_priv = dev->dev_private;
726 struct i915_hw_ppgtt *ppgtt;
727
728 if (INTEL_INFO(dev)->gen < 7)
729 return i915_is_ggtt(vm);
730
731 /* FIXME: This ignores that the global gtt vm is also on this list. */
732 ppgtt = container_of(vm, struct i915_hw_ppgtt, base);
733
734 if (INTEL_INFO(dev)->gen >= 8) {
735 u64 pdp0 = (u64)I915_READ(GEN8_RING_PDP_UDW(ring, 0)) << 32;
736 pdp0 |= I915_READ(GEN8_RING_PDP_LDW(ring, 0));
737 return pdp0 == ppgtt->pd_dma_addr[0];
738 } else {
739 u32 pp_db;
740 pp_db = I915_READ(RING_PP_DIR_BASE(ring));
741 return (pp_db >> 10) == ppgtt->pd_offset;
742 }
743}
744
655static struct drm_i915_error_object * 745static struct drm_i915_error_object *
656i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, 746i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
657 struct intel_ring_buffer *ring) 747 struct intel_ring_buffer *ring)
@@ -659,6 +749,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
659 struct i915_address_space *vm; 749 struct i915_address_space *vm;
660 struct i915_vma *vma; 750 struct i915_vma *vma;
661 struct drm_i915_gem_object *obj; 751 struct drm_i915_gem_object *obj;
752 bool found_active = false;
662 u32 seqno; 753 u32 seqno;
663 754
664 if (!ring->get_seqno) 755 if (!ring->get_seqno)
@@ -674,11 +765,16 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
674 if (obj != NULL && 765 if (obj != NULL &&
675 acthd >= i915_gem_obj_ggtt_offset(obj) && 766 acthd >= i915_gem_obj_ggtt_offset(obj) &&
676 acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) 767 acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
677 return i915_error_object_create(dev_priv, obj); 768 return i915_error_ggtt_object_create(dev_priv, obj);
678 } 769 }
679 770
680 seqno = ring->get_seqno(ring, false); 771 seqno = ring->get_seqno(ring, false);
681 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 772 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
773 if (!is_active_vm(vm, ring))
774 continue;
775
776 found_active = true;
777
682 list_for_each_entry(vma, &vm->active_list, mm_list) { 778 list_for_each_entry(vma, &vm->active_list, mm_list) {
683 obj = vma->obj; 779 obj = vma->obj;
684 if (obj->ring != ring) 780 if (obj->ring != ring)
@@ -693,66 +789,120 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
693 /* We need to copy these to an anonymous buffer as the simplest 789 /* We need to copy these to an anonymous buffer as the simplest
694 * method to avoid being overwritten by userspace. 790 * method to avoid being overwritten by userspace.
695 */ 791 */
696 return i915_error_object_create(dev_priv, obj); 792 return i915_error_object_create(dev_priv, obj, vm);
697 } 793 }
698 } 794 }
699 795
796 WARN_ON(!found_active);
700 return NULL; 797 return NULL;
701} 798}
702 799
703static void i915_record_ring_state(struct drm_device *dev, 800static void i915_record_ring_state(struct drm_device *dev,
704 struct drm_i915_error_state *error, 801 struct intel_ring_buffer *ring,
705 struct intel_ring_buffer *ring) 802 struct drm_i915_error_ring *ering)
706{ 803{
707 struct drm_i915_private *dev_priv = dev->dev_private; 804 struct drm_i915_private *dev_priv = dev->dev_private;
708 805
709 if (INTEL_INFO(dev)->gen >= 6) { 806 if (INTEL_INFO(dev)->gen >= 6) {
710 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); 807 ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
711 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); 808 ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
712 error->semaphore_mboxes[ring->id][0] 809 ering->semaphore_mboxes[0]
713 = I915_READ(RING_SYNC_0(ring->mmio_base)); 810 = I915_READ(RING_SYNC_0(ring->mmio_base));
714 error->semaphore_mboxes[ring->id][1] 811 ering->semaphore_mboxes[1]
715 = I915_READ(RING_SYNC_1(ring->mmio_base)); 812 = I915_READ(RING_SYNC_1(ring->mmio_base));
716 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; 813 ering->semaphore_seqno[0] = ring->sync_seqno[0];
717 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; 814 ering->semaphore_seqno[1] = ring->sync_seqno[1];
718 } 815 }
719 816
720 if (HAS_VEBOX(dev)) { 817 if (HAS_VEBOX(dev)) {
721 error->semaphore_mboxes[ring->id][2] = 818 ering->semaphore_mboxes[2] =
722 I915_READ(RING_SYNC_2(ring->mmio_base)); 819 I915_READ(RING_SYNC_2(ring->mmio_base));
723 error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2]; 820 ering->semaphore_seqno[2] = ring->sync_seqno[2];
724 } 821 }
725 822
726 if (INTEL_INFO(dev)->gen >= 4) { 823 if (INTEL_INFO(dev)->gen >= 4) {
727 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); 824 ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
728 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); 825 ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
729 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 826 ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
730 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 827 ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
731 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 828 ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
732 error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base)); 829 ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
733 if (INTEL_INFO(dev)->gen >= 8) 830 if (INTEL_INFO(dev)->gen >= 8)
734 error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32; 831 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
735 error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base)); 832 ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
736 } else { 833 } else {
737 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); 834 ering->faddr = I915_READ(DMA_FADD_I8XX);
738 error->ipeir[ring->id] = I915_READ(IPEIR); 835 ering->ipeir = I915_READ(IPEIR);
739 error->ipehr[ring->id] = I915_READ(IPEHR); 836 ering->ipehr = I915_READ(IPEHR);
740 error->instdone[ring->id] = I915_READ(INSTDONE); 837 ering->instdone = I915_READ(INSTDONE);
741 } 838 }
742 839
743 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); 840 ering->waiting = waitqueue_active(&ring->irq_queue);
744 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 841 ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
745 error->seqno[ring->id] = ring->get_seqno(ring, false); 842 ering->seqno = ring->get_seqno(ring, false);
746 error->acthd[ring->id] = intel_ring_get_active_head(ring); 843 ering->acthd = intel_ring_get_active_head(ring);
747 error->head[ring->id] = I915_READ_HEAD(ring); 844 ering->head = I915_READ_HEAD(ring);
748 error->tail[ring->id] = I915_READ_TAIL(ring); 845 ering->tail = I915_READ_TAIL(ring);
749 error->ctl[ring->id] = I915_READ_CTL(ring); 846 ering->ctl = I915_READ_CTL(ring);
847
848 if (I915_NEED_GFX_HWS(dev)) {
849 int mmio;
850
851 if (IS_GEN7(dev)) {
852 switch (ring->id) {
853 default:
854 case RCS:
855 mmio = RENDER_HWS_PGA_GEN7;
856 break;
857 case BCS:
858 mmio = BLT_HWS_PGA_GEN7;
859 break;
860 case VCS:
861 mmio = BSD_HWS_PGA_GEN7;
862 break;
863 case VECS:
864 mmio = VEBOX_HWS_PGA_GEN7;
865 break;
866 }
867 } else if (IS_GEN6(ring->dev)) {
868 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
869 } else {
870 /* XXX: gen8 returns to sanity */
871 mmio = RING_HWS_PGA(ring->mmio_base);
872 }
873
874 ering->hws = I915_READ(mmio);
875 }
876
877 ering->cpu_ring_head = ring->head;
878 ering->cpu_ring_tail = ring->tail;
879
880 ering->hangcheck_score = ring->hangcheck.score;
881 ering->hangcheck_action = ring->hangcheck.action;
882
883 if (USES_PPGTT(dev)) {
884 int i;
750 885
751 error->cpu_ring_head[ring->id] = ring->head; 886 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
752 error->cpu_ring_tail[ring->id] = ring->tail;
753 887
754 error->hangcheck_score[ring->id] = ring->hangcheck.score; 888 switch (INTEL_INFO(dev)->gen) {
755 error->hangcheck_action[ring->id] = ring->hangcheck.action; 889 case 8:
890 for (i = 0; i < 4; i++) {
891 ering->vm_info.pdp[i] =
892 I915_READ(GEN8_RING_PDP_UDW(ring, i));
893 ering->vm_info.pdp[i] <<= 32;
894 ering->vm_info.pdp[i] |=
895 I915_READ(GEN8_RING_PDP_LDW(ring, i));
896 }
897 break;
898 case 7:
899 ering->vm_info.pp_dir_base = RING_PP_DIR_BASE(ring);
900 break;
901 case 6:
902 ering->vm_info.pp_dir_base = RING_PP_DIR_BASE_READ(ring);
903 break;
904 }
905 }
756} 906}
757 907
758 908
@@ -770,7 +920,9 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
770 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 920 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
771 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { 921 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
772 ering->ctx = i915_error_object_create_sized(dev_priv, 922 ering->ctx = i915_error_object_create_sized(dev_priv,
773 obj, 1); 923 obj,
924 &dev_priv->gtt.base,
925 1);
774 break; 926 break;
775 } 927 }
776 } 928 }
@@ -791,14 +943,17 @@ static void i915_gem_record_rings(struct drm_device *dev,
791 943
792 error->ring[i].valid = true; 944 error->ring[i].valid = true;
793 945
794 i915_record_ring_state(dev, error, ring); 946 i915_record_ring_state(dev, ring, &error->ring[i]);
795 947
796 error->ring[i].batchbuffer = 948 error->ring[i].batchbuffer =
797 i915_error_first_batchbuffer(dev_priv, ring); 949 i915_error_first_batchbuffer(dev_priv, ring);
798 950
799 error->ring[i].ringbuffer = 951 error->ring[i].ringbuffer =
800 i915_error_object_create(dev_priv, ring->obj); 952 i915_error_ggtt_object_create(dev_priv, ring->obj);
801 953
954 if (ring->status_page.obj)
955 error->ring[i].hws_page =
956 i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
802 957
803 i915_gem_record_active_context(ring, error, &error->ring[i]); 958 i915_gem_record_active_context(ring, error, &error->ring[i]);
804 959
@@ -845,7 +1000,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
845 i++; 1000 i++;
846 error->active_bo_count[ndx] = i; 1001 error->active_bo_count[ndx] = i;
847 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 1002 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
848 if (obj->pin_count) 1003 if (i915_gem_obj_is_pinned(obj))
849 i++; 1004 i++;
850 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; 1005 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
851 1006
@@ -879,11 +1034,6 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
879 list_for_each_entry(vm, &dev_priv->vm_list, global_link) 1034 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
880 cnt++; 1035 cnt++;
881 1036
882 if (WARN(cnt > 1, "Multiple VMs not yet supported\n"))
883 cnt = 1;
884
885 vm = &dev_priv->gtt.base;
886
887 error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC); 1037 error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
888 error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC); 1038 error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
889 error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count), 1039 error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
@@ -895,6 +1045,74 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
895 i915_gem_capture_vm(dev_priv, error, vm, i++); 1045 i915_gem_capture_vm(dev_priv, error, vm, i++);
896} 1046}
897 1047
1048/* Capture all registers which don't fit into another category. */
1049static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1050 struct drm_i915_error_state *error)
1051{
1052 struct drm_device *dev = dev_priv->dev;
1053 int pipe;
1054
1055 /* General organization
1056 * 1. Registers specific to a single generation
1057 * 2. Registers which belong to multiple generations
1058 * 3. Feature specific registers.
1059 * 4. Everything else
1060 * Please try to follow the order.
1061 */
1062
1063 /* 1: Registers specific to a single generation */
1064 if (IS_VALLEYVIEW(dev)) {
1065 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1066 error->forcewake = I915_READ(FORCEWAKE_VLV);
1067 }
1068
1069 if (IS_GEN7(dev))
1070 error->err_int = I915_READ(GEN7_ERR_INT);
1071
1072 if (IS_GEN6(dev)) {
1073 error->forcewake = I915_READ(FORCEWAKE);
1074 error->gab_ctl = I915_READ(GAB_CTL);
1075 error->gfx_mode = I915_READ(GFX_MODE);
1076 }
1077
1078 if (IS_GEN2(dev))
1079 error->ier = I915_READ16(IER);
1080
1081 /* 2: Registers which belong to multiple generations */
1082 if (INTEL_INFO(dev)->gen >= 7)
1083 error->forcewake = I915_READ(FORCEWAKE_MT);
1084
1085 if (INTEL_INFO(dev)->gen >= 6) {
1086 error->derrmr = I915_READ(DERRMR);
1087 error->error = I915_READ(ERROR_GEN6);
1088 error->done_reg = I915_READ(DONE_REG);
1089 }
1090
1091 /* 3: Feature specific registers */
1092 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1093 error->gam_ecochk = I915_READ(GAM_ECOCHK);
1094 error->gac_eco = I915_READ(GAC_ECO_BITS);
1095 }
1096
1097 /* 4: Everything else */
1098 if (HAS_HW_CONTEXTS(dev))
1099 error->ccid = I915_READ(CCID);
1100
1101 if (HAS_PCH_SPLIT(dev))
1102 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1103 else {
1104 error->ier = I915_READ(IER);
1105 for_each_pipe(pipe)
1106 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1107 }
1108
1109 /* 4: Everything else */
1110 error->eir = I915_READ(EIR);
1111 error->pgtbl_er = I915_READ(PGTBL_ER);
1112
1113 i915_get_extra_instdone(dev, error->extra_instdone);
1114}
1115
898/** 1116/**
899 * i915_capture_error_state - capture an error record for later analysis 1117 * i915_capture_error_state - capture an error record for later analysis
900 * @dev: drm device 1118 * @dev: drm device
@@ -906,10 +1124,11 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
906 */ 1124 */
907void i915_capture_error_state(struct drm_device *dev) 1125void i915_capture_error_state(struct drm_device *dev)
908{ 1126{
1127 static bool warned;
909 struct drm_i915_private *dev_priv = dev->dev_private; 1128 struct drm_i915_private *dev_priv = dev->dev_private;
910 struct drm_i915_error_state *error; 1129 struct drm_i915_error_state *error;
911 unsigned long flags; 1130 unsigned long flags;
912 int pipe; 1131 uint32_t ecode;
913 1132
914 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1133 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
915 error = dev_priv->gpu_error.first_error; 1134 error = dev_priv->gpu_error.first_error;
@@ -926,53 +1145,22 @@ void i915_capture_error_state(struct drm_device *dev)
926 1145
927 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", 1146 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
928 dev->primary->index); 1147 dev->primary->index);
929 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
930 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
931 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
932 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
933
934 kref_init(&error->ref); 1148 kref_init(&error->ref);
935 error->eir = I915_READ(EIR);
936 error->pgtbl_er = I915_READ(PGTBL_ER);
937 if (HAS_HW_CONTEXTS(dev))
938 error->ccid = I915_READ(CCID);
939
940 if (HAS_PCH_SPLIT(dev))
941 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
942 else if (IS_VALLEYVIEW(dev))
943 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
944 else if (IS_GEN2(dev))
945 error->ier = I915_READ16(IER);
946 else
947 error->ier = I915_READ(IER);
948
949 if (INTEL_INFO(dev)->gen >= 6)
950 error->derrmr = I915_READ(DERRMR);
951
952 if (IS_VALLEYVIEW(dev))
953 error->forcewake = I915_READ(FORCEWAKE_VLV);
954 else if (INTEL_INFO(dev)->gen >= 7)
955 error->forcewake = I915_READ(FORCEWAKE_MT);
956 else if (INTEL_INFO(dev)->gen == 6)
957 error->forcewake = I915_READ(FORCEWAKE);
958
959 if (!HAS_PCH_SPLIT(dev))
960 for_each_pipe(pipe)
961 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
962
963 if (INTEL_INFO(dev)->gen >= 6) {
964 error->error = I915_READ(ERROR_GEN6);
965 error->done_reg = I915_READ(DONE_REG);
966 }
967
968 if (INTEL_INFO(dev)->gen == 7)
969 error->err_int = I915_READ(GEN7_ERR_INT);
970
971 i915_get_extra_instdone(dev, error->extra_instdone);
972 1149
1150 i915_capture_reg_state(dev_priv, error);
973 i915_gem_capture_buffers(dev_priv, error); 1151 i915_gem_capture_buffers(dev_priv, error);
974 i915_gem_record_fences(dev, error); 1152 i915_gem_record_fences(dev, error);
975 i915_gem_record_rings(dev, error); 1153 i915_gem_record_rings(dev, error);
1154 ecode = i915_error_generate_code(dev_priv, error);
1155
1156 if (!warned) {
1157 DRM_INFO("GPU HANG [%x]\n", ecode);
1158 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1159 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1160 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1161 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1162 warned = true;
1163 }
976 1164
977 do_gettimeofday(&error->time); 1165 do_gettimeofday(&error->time);
978 1166
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9fec71175571..e9c94c91c6a5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -232,6 +232,18 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
232 return true; 232 return true;
233} 233}
234 234
235static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum pipe pipe)
236{
237 struct drm_i915_private *dev_priv = dev->dev_private;
238 u32 reg = PIPESTAT(pipe);
239 u32 pipestat = I915_READ(reg) & 0x7fff0000;
240
241 assert_spin_locked(&dev_priv->irq_lock);
242
243 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
244 POSTING_READ(reg);
245}
246
235static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 247static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
236 enum pipe pipe, bool enable) 248 enum pipe pipe, bool enable)
237{ 249{
@@ -393,7 +405,9 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
393 405
394 intel_crtc->cpu_fifo_underrun_disabled = !enable; 406 intel_crtc->cpu_fifo_underrun_disabled = !enable;
395 407
396 if (IS_GEN5(dev) || IS_GEN6(dev)) 408 if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)))
409 i9xx_clear_fifo_underrun(dev, pipe);
410 else if (IS_GEN5(dev) || IS_GEN6(dev))
397 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 411 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
398 else if (IS_GEN7(dev)) 412 else if (IS_GEN7(dev))
399 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 413 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
@@ -915,6 +929,11 @@ static void i915_hotplug_work_func(struct work_struct *work)
915 drm_kms_helper_hotplug_event(dev); 929 drm_kms_helper_hotplug_event(dev);
916} 930}
917 931
932static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
933{
934 del_timer_sync(&dev_priv->hotplug_reenable_timer);
935}
936
918static void ironlake_rps_change_irq_handler(struct drm_device *dev) 937static void ironlake_rps_change_irq_handler(struct drm_device *dev)
919{ 938{
920 drm_i915_private_t *dev_priv = dev->dev_private; 939 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -966,6 +985,43 @@ static void notify_ring(struct drm_device *dev,
966 i915_queue_hangcheck(dev); 985 i915_queue_hangcheck(dev);
967} 986}
968 987
988void gen6_set_pm_mask(struct drm_i915_private *dev_priv,
989 u32 pm_iir, int new_delay)
990{
991 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
992 if (new_delay >= dev_priv->rps.max_delay) {
993 /* Mask UP THRESHOLD Interrupts */
994 I915_WRITE(GEN6_PMINTRMSK,
995 I915_READ(GEN6_PMINTRMSK) |
996 GEN6_PM_RP_UP_THRESHOLD);
997 dev_priv->rps.rp_up_masked = true;
998 }
999 if (dev_priv->rps.rp_down_masked) {
1000 /* UnMask DOWN THRESHOLD Interrupts */
1001 I915_WRITE(GEN6_PMINTRMSK,
1002 I915_READ(GEN6_PMINTRMSK) &
1003 ~GEN6_PM_RP_DOWN_THRESHOLD);
1004 dev_priv->rps.rp_down_masked = false;
1005 }
1006 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1007 if (new_delay <= dev_priv->rps.min_delay) {
1008 /* Mask DOWN THRESHOLD Interrupts */
1009 I915_WRITE(GEN6_PMINTRMSK,
1010 I915_READ(GEN6_PMINTRMSK) |
1011 GEN6_PM_RP_DOWN_THRESHOLD);
1012 dev_priv->rps.rp_down_masked = true;
1013 }
1014
1015 if (dev_priv->rps.rp_up_masked) {
1016 /* UnMask UP THRESHOLD Interrupts */
1017 I915_WRITE(GEN6_PMINTRMSK,
1018 I915_READ(GEN6_PMINTRMSK) &
1019 ~GEN6_PM_RP_UP_THRESHOLD);
1020 dev_priv->rps.rp_up_masked = false;
1021 }
1022 }
1023}
1024
969static void gen6_pm_rps_work(struct work_struct *work) 1025static void gen6_pm_rps_work(struct work_struct *work)
970{ 1026{
971 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 1027 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
@@ -1023,6 +1079,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
1023 */ 1079 */
1024 new_delay = clamp_t(int, new_delay, 1080 new_delay = clamp_t(int, new_delay,
1025 dev_priv->rps.min_delay, dev_priv->rps.max_delay); 1081 dev_priv->rps.min_delay, dev_priv->rps.max_delay);
1082
1083 gen6_set_pm_mask(dev_priv, pm_iir, new_delay);
1026 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; 1084 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
1027 1085
1028 if (IS_VALLEYVIEW(dev_priv->dev)) 1086 if (IS_VALLEYVIEW(dev_priv->dev))
@@ -1236,6 +1294,9 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1236 if (!hotplug_trigger) 1294 if (!hotplug_trigger)
1237 return; 1295 return;
1238 1296
1297 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1298 hotplug_trigger);
1299
1239 spin_lock(&dev_priv->irq_lock); 1300 spin_lock(&dev_priv->irq_lock);
1240 for (i = 1; i < HPD_NUM_PINS; i++) { 1301 for (i = 1; i < HPD_NUM_PINS; i++) {
1241 1302
@@ -1415,17 +1476,52 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1415 } 1476 }
1416} 1477}
1417 1478
1479static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1480{
1481 struct drm_i915_private *dev_priv = dev->dev_private;
1482 u32 pipe_stats[I915_MAX_PIPES];
1483 int pipe;
1484
1485 spin_lock(&dev_priv->irq_lock);
1486 for_each_pipe(pipe) {
1487 int reg = PIPESTAT(pipe);
1488 pipe_stats[pipe] = I915_READ(reg);
1489
1490 /*
1491 * Clear the PIPE*STAT regs before the IIR
1492 */
1493 if (pipe_stats[pipe] & 0x8000ffff)
1494 I915_WRITE(reg, pipe_stats[pipe]);
1495 }
1496 spin_unlock(&dev_priv->irq_lock);
1497
1498 for_each_pipe(pipe) {
1499 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1500 drm_handle_vblank(dev, pipe);
1501
1502 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1503 intel_prepare_page_flip(dev, pipe);
1504 intel_finish_page_flip(dev, pipe);
1505 }
1506
1507 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1508 i9xx_pipe_crc_irq_handler(dev, pipe);
1509
1510 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
1511 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1512 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
1513 }
1514
1515 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1516 gmbus_irq_handler(dev);
1517}
1518
1418static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1519static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1419{ 1520{
1420 struct drm_device *dev = (struct drm_device *) arg; 1521 struct drm_device *dev = (struct drm_device *) arg;
1421 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1522 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1422 u32 iir, gt_iir, pm_iir; 1523 u32 iir, gt_iir, pm_iir;
1423 irqreturn_t ret = IRQ_NONE; 1524 irqreturn_t ret = IRQ_NONE;
1424 unsigned long irqflags;
1425 int pipe;
1426 u32 pipe_stats[I915_MAX_PIPES];
1427
1428 atomic_inc(&dev_priv->irq_received);
1429 1525
1430 while (true) { 1526 while (true) {
1431 iir = I915_READ(VLV_IIR); 1527 iir = I915_READ(VLV_IIR);
@@ -1439,44 +1535,13 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1439 1535
1440 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1536 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1441 1537
1442 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1538 valleyview_pipestat_irq_handler(dev, iir);
1443 for_each_pipe(pipe) {
1444 int reg = PIPESTAT(pipe);
1445 pipe_stats[pipe] = I915_READ(reg);
1446
1447 /*
1448 * Clear the PIPE*STAT regs before the IIR
1449 */
1450 if (pipe_stats[pipe] & 0x8000ffff) {
1451 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1452 DRM_DEBUG_DRIVER("pipe %c underrun\n",
1453 pipe_name(pipe));
1454 I915_WRITE(reg, pipe_stats[pipe]);
1455 }
1456 }
1457 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1458
1459 for_each_pipe(pipe) {
1460 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1461 drm_handle_vblank(dev, pipe);
1462
1463 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1464 intel_prepare_page_flip(dev, pipe);
1465 intel_finish_page_flip(dev, pipe);
1466 }
1467
1468 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1469 i9xx_pipe_crc_irq_handler(dev, pipe);
1470 }
1471 1539
1472 /* Consume port. Then clear IIR or we'll miss events */ 1540 /* Consume port. Then clear IIR or we'll miss events */
1473 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 1541 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1474 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1542 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1475 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1543 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1476 1544
1477 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1478 hotplug_status);
1479
1480 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 1545 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1481 1546
1482 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1547 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
@@ -1486,8 +1551,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1486 I915_READ(PORT_HOTPLUG_STAT); 1551 I915_READ(PORT_HOTPLUG_STAT);
1487 } 1552 }
1488 1553
1489 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1490 gmbus_irq_handler(dev);
1491 1554
1492 if (pm_iir) 1555 if (pm_iir)
1493 gen6_rps_irq_handler(dev_priv, pm_iir); 1556 gen6_rps_irq_handler(dev_priv, pm_iir);
@@ -1546,12 +1609,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1546 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1609 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1547 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1610 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1548 false)) 1611 false))
1549 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1612 DRM_ERROR("PCH transcoder A FIFO underrun\n");
1550 1613
1551 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1614 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1552 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1615 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1553 false)) 1616 false))
1554 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1617 DRM_ERROR("PCH transcoder B FIFO underrun\n");
1555} 1618}
1556 1619
1557static void ivb_err_int_handler(struct drm_device *dev) 1620static void ivb_err_int_handler(struct drm_device *dev)
@@ -1567,8 +1630,8 @@ static void ivb_err_int_handler(struct drm_device *dev)
1567 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 1630 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
1568 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1631 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1569 false)) 1632 false))
1570 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1633 DRM_ERROR("Pipe %c FIFO underrun\n",
1571 pipe_name(pipe)); 1634 pipe_name(pipe));
1572 } 1635 }
1573 1636
1574 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1637 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
@@ -1593,17 +1656,17 @@ static void cpt_serr_int_handler(struct drm_device *dev)
1593 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1656 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1594 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1657 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1595 false)) 1658 false))
1596 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1659 DRM_ERROR("PCH transcoder A FIFO underrun\n");
1597 1660
1598 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1661 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1599 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1662 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1600 false)) 1663 false))
1601 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1664 DRM_ERROR("PCH transcoder B FIFO underrun\n");
1602 1665
1603 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1666 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1604 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 1667 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1605 false)) 1668 false))
1606 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 1669 DRM_ERROR("PCH transcoder C FIFO underrun\n");
1607 1670
1608 I915_WRITE(SERR_INT, serr_int); 1671 I915_WRITE(SERR_INT, serr_int);
1609} 1672}
@@ -1665,8 +1728,8 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1665 1728
1666 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 1729 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1667 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 1730 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1668 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1731 DRM_ERROR("Pipe %c FIFO underrun\n",
1669 pipe_name(pipe)); 1732 pipe_name(pipe));
1670 1733
1671 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 1734 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1672 i9xx_pipe_crc_irq_handler(dev, pipe); 1735 i9xx_pipe_crc_irq_handler(dev, pipe);
@@ -1738,8 +1801,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1738 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1801 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1739 irqreturn_t ret = IRQ_NONE; 1802 irqreturn_t ret = IRQ_NONE;
1740 1803
1741 atomic_inc(&dev_priv->irq_received);
1742
1743 /* We get interrupts on unclaimed registers, so check for this before we 1804 /* We get interrupts on unclaimed registers, so check for this before we
1744 * do any I915_{READ,WRITE}. */ 1805 * do any I915_{READ,WRITE}. */
1745 intel_uncore_check_errors(dev); 1806 intel_uncore_check_errors(dev);
@@ -1808,8 +1869,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
1808 uint32_t tmp = 0; 1869 uint32_t tmp = 0;
1809 enum pipe pipe; 1870 enum pipe pipe;
1810 1871
1811 atomic_inc(&dev_priv->irq_received);
1812
1813 master_ctl = I915_READ(GEN8_MASTER_IRQ); 1872 master_ctl = I915_READ(GEN8_MASTER_IRQ);
1814 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 1873 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
1815 if (!master_ctl) 1874 if (!master_ctl)
@@ -1871,8 +1930,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
1871 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 1930 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
1872 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1931 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1873 false)) 1932 false))
1874 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1933 DRM_ERROR("Pipe %c FIFO underrun\n",
1875 pipe_name(pipe)); 1934 pipe_name(pipe));
1876 } 1935 }
1877 1936
1878 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { 1937 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
@@ -2244,18 +2303,11 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2244{ 2303{
2245 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2304 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2246 unsigned long irqflags; 2305 unsigned long irqflags;
2247 u32 imr;
2248 2306
2249 if (!i915_pipe_enabled(dev, pipe)) 2307 if (!i915_pipe_enabled(dev, pipe))
2250 return -EINVAL; 2308 return -EINVAL;
2251 2309
2252 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2310 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2253 imr = I915_READ(VLV_IMR);
2254 if (pipe == PIPE_A)
2255 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2256 else
2257 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2258 I915_WRITE(VLV_IMR, imr);
2259 i915_enable_pipestat(dev_priv, pipe, 2311 i915_enable_pipestat(dev_priv, pipe,
2260 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2312 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2261 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2313 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -2313,17 +2365,10 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2313{ 2365{
2314 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2366 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2315 unsigned long irqflags; 2367 unsigned long irqflags;
2316 u32 imr;
2317 2368
2318 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2369 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2319 i915_disable_pipestat(dev_priv, pipe, 2370 i915_disable_pipestat(dev_priv, pipe,
2320 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2371 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2321 imr = I915_READ(VLV_IMR);
2322 if (pipe == PIPE_A)
2323 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2324 else
2325 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2326 I915_WRITE(VLV_IMR, imr);
2327 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2372 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2328} 2373}
2329 2374
@@ -2479,9 +2524,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
2479#define BUSY 1 2524#define BUSY 1
2480#define KICK 5 2525#define KICK 5
2481#define HUNG 20 2526#define HUNG 20
2482#define FIRE 30
2483 2527
2484 if (!i915_enable_hangcheck) 2528 if (!i915.enable_hangcheck)
2485 return; 2529 return;
2486 2530
2487 for_each_ring(ring, dev_priv, i) { 2531 for_each_ring(ring, dev_priv, i) {
@@ -2563,7 +2607,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
2563 } 2607 }
2564 2608
2565 for_each_ring(ring, dev_priv, i) { 2609 for_each_ring(ring, dev_priv, i) {
2566 if (ring->hangcheck.score > FIRE) { 2610 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
2567 DRM_INFO("%s on %s\n", 2611 DRM_INFO("%s on %s\n",
2568 stuck[i] ? "stuck" : "no progress", 2612 stuck[i] ? "stuck" : "no progress",
2569 ring->name); 2613 ring->name);
@@ -2583,7 +2627,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
2583void i915_queue_hangcheck(struct drm_device *dev) 2627void i915_queue_hangcheck(struct drm_device *dev)
2584{ 2628{
2585 struct drm_i915_private *dev_priv = dev->dev_private; 2629 struct drm_i915_private *dev_priv = dev->dev_private;
2586 if (!i915_enable_hangcheck) 2630 if (!i915.enable_hangcheck)
2587 return; 2631 return;
2588 2632
2589 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2633 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
@@ -2632,8 +2676,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
2632{ 2676{
2633 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2677 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2634 2678
2635 atomic_set(&dev_priv->irq_received, 0);
2636
2637 I915_WRITE(HWSTAM, 0xeffe); 2679 I915_WRITE(HWSTAM, 0xeffe);
2638 2680
2639 I915_WRITE(DEIMR, 0xffffffff); 2681 I915_WRITE(DEIMR, 0xffffffff);
@@ -2650,8 +2692,6 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
2650 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2692 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2651 int pipe; 2693 int pipe;
2652 2694
2653 atomic_set(&dev_priv->irq_received, 0);
2654
2655 /* VLV magic */ 2695 /* VLV magic */
2656 I915_WRITE(VLV_IMR, 0); 2696 I915_WRITE(VLV_IMR, 0);
2657 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 2697 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
@@ -2681,8 +2721,6 @@ static void gen8_irq_preinstall(struct drm_device *dev)
2681 struct drm_i915_private *dev_priv = dev->dev_private; 2721 struct drm_i915_private *dev_priv = dev->dev_private;
2682 int pipe; 2722 int pipe;
2683 2723
2684 atomic_set(&dev_priv->irq_received, 0);
2685
2686 I915_WRITE(GEN8_MASTER_IRQ, 0); 2724 I915_WRITE(GEN8_MASTER_IRQ, 0);
2687 POSTING_READ(GEN8_MASTER_IRQ); 2725 POSTING_READ(GEN8_MASTER_IRQ);
2688 2726
@@ -3007,8 +3045,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
3007 if (!dev_priv) 3045 if (!dev_priv)
3008 return; 3046 return;
3009 3047
3010 atomic_set(&dev_priv->irq_received, 0);
3011
3012 I915_WRITE(GEN8_MASTER_IRQ, 0); 3048 I915_WRITE(GEN8_MASTER_IRQ, 0);
3013 3049
3014#define GEN8_IRQ_FINI_NDX(type, which) do { \ 3050#define GEN8_IRQ_FINI_NDX(type, which) do { \
@@ -3049,7 +3085,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
3049 if (!dev_priv) 3085 if (!dev_priv)
3050 return; 3086 return;
3051 3087
3052 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3088 intel_hpd_irq_uninstall(dev_priv);
3053 3089
3054 for_each_pipe(pipe) 3090 for_each_pipe(pipe)
3055 I915_WRITE(PIPESTAT(pipe), 0xffff); 3091 I915_WRITE(PIPESTAT(pipe), 0xffff);
@@ -3072,7 +3108,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
3072 if (!dev_priv) 3108 if (!dev_priv)
3073 return; 3109 return;
3074 3110
3075 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3111 intel_hpd_irq_uninstall(dev_priv);
3076 3112
3077 I915_WRITE(HWSTAM, 0xffffffff); 3113 I915_WRITE(HWSTAM, 0xffffffff);
3078 3114
@@ -3101,8 +3137,6 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
3101 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3137 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3102 int pipe; 3138 int pipe;
3103 3139
3104 atomic_set(&dev_priv->irq_received, 0);
3105
3106 for_each_pipe(pipe) 3140 for_each_pipe(pipe)
3107 I915_WRITE(PIPESTAT(pipe), 0); 3141 I915_WRITE(PIPESTAT(pipe), 0);
3108 I915_WRITE16(IMR, 0xffff); 3142 I915_WRITE16(IMR, 0xffff);
@@ -3187,8 +3221,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3187 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3221 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3188 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3222 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3189 3223
3190 atomic_inc(&dev_priv->irq_received);
3191
3192 iir = I915_READ16(IIR); 3224 iir = I915_READ16(IIR);
3193 if (iir == 0) 3225 if (iir == 0)
3194 return IRQ_NONE; 3226 return IRQ_NONE;
@@ -3210,12 +3242,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3210 /* 3242 /*
3211 * Clear the PIPE*STAT regs before the IIR 3243 * Clear the PIPE*STAT regs before the IIR
3212 */ 3244 */
3213 if (pipe_stats[pipe] & 0x8000ffff) { 3245 if (pipe_stats[pipe] & 0x8000ffff)
3214 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3215 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3216 pipe_name(pipe));
3217 I915_WRITE(reg, pipe_stats[pipe]); 3246 I915_WRITE(reg, pipe_stats[pipe]);
3218 }
3219 } 3247 }
3220 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3248 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3221 3249
@@ -3238,6 +3266,10 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3238 3266
3239 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3267 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3240 i9xx_pipe_crc_irq_handler(dev, pipe); 3268 i9xx_pipe_crc_irq_handler(dev, pipe);
3269
3270 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
3271 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
3272 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
3241 } 3273 }
3242 3274
3243 iir = new_iir; 3275 iir = new_iir;
@@ -3266,8 +3298,6 @@ static void i915_irq_preinstall(struct drm_device * dev)
3266 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3298 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3267 int pipe; 3299 int pipe;
3268 3300
3269 atomic_set(&dev_priv->irq_received, 0);
3270
3271 if (I915_HAS_HOTPLUG(dev)) { 3301 if (I915_HAS_HOTPLUG(dev)) {
3272 I915_WRITE(PORT_HOTPLUG_EN, 0); 3302 I915_WRITE(PORT_HOTPLUG_EN, 0);
3273 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3303 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -3373,8 +3403,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3373 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3403 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3374 int pipe, ret = IRQ_NONE; 3404 int pipe, ret = IRQ_NONE;
3375 3405
3376 atomic_inc(&dev_priv->irq_received);
3377
3378 iir = I915_READ(IIR); 3406 iir = I915_READ(IIR);
3379 do { 3407 do {
3380 bool irq_received = (iir & ~flip_mask) != 0; 3408 bool irq_received = (iir & ~flip_mask) != 0;
@@ -3395,9 +3423,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3395 3423
3396 /* Clear the PIPE*STAT regs before the IIR */ 3424 /* Clear the PIPE*STAT regs before the IIR */
3397 if (pipe_stats[pipe] & 0x8000ffff) { 3425 if (pipe_stats[pipe] & 0x8000ffff) {
3398 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3399 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3400 pipe_name(pipe));
3401 I915_WRITE(reg, pipe_stats[pipe]); 3426 I915_WRITE(reg, pipe_stats[pipe]);
3402 irq_received = true; 3427 irq_received = true;
3403 } 3428 }
@@ -3413,9 +3438,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3413 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3438 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3414 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 3439 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3415 3440
3416 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3417 hotplug_status);
3418
3419 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 3441 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
3420 3442
3421 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3443 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
@@ -3442,6 +3464,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3442 3464
3443 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3465 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3444 i9xx_pipe_crc_irq_handler(dev, pipe); 3466 i9xx_pipe_crc_irq_handler(dev, pipe);
3467
3468 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
3469 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
3470 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
3445 } 3471 }
3446 3472
3447 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3473 if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -3476,7 +3502,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
3476 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3502 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3477 int pipe; 3503 int pipe;
3478 3504
3479 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3505 intel_hpd_irq_uninstall(dev_priv);
3480 3506
3481 if (I915_HAS_HOTPLUG(dev)) { 3507 if (I915_HAS_HOTPLUG(dev)) {
3482 I915_WRITE(PORT_HOTPLUG_EN, 0); 3508 I915_WRITE(PORT_HOTPLUG_EN, 0);
@@ -3500,8 +3526,6 @@ static void i965_irq_preinstall(struct drm_device * dev)
3500 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3526 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3501 int pipe; 3527 int pipe;
3502 3528
3503 atomic_set(&dev_priv->irq_received, 0);
3504
3505 I915_WRITE(PORT_HOTPLUG_EN, 0); 3529 I915_WRITE(PORT_HOTPLUG_EN, 0);
3506 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3530 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3507 3531
@@ -3610,21 +3634,17 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3610 u32 iir, new_iir; 3634 u32 iir, new_iir;
3611 u32 pipe_stats[I915_MAX_PIPES]; 3635 u32 pipe_stats[I915_MAX_PIPES];
3612 unsigned long irqflags; 3636 unsigned long irqflags;
3613 int irq_received;
3614 int ret = IRQ_NONE, pipe; 3637 int ret = IRQ_NONE, pipe;
3615 u32 flip_mask = 3638 u32 flip_mask =
3616 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3639 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3617 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3640 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3618 3641
3619 atomic_inc(&dev_priv->irq_received);
3620
3621 iir = I915_READ(IIR); 3642 iir = I915_READ(IIR);
3622 3643
3623 for (;;) { 3644 for (;;) {
3645 bool irq_received = (iir & ~flip_mask) != 0;
3624 bool blc_event = false; 3646 bool blc_event = false;
3625 3647
3626 irq_received = (iir & ~flip_mask) != 0;
3627
3628 /* Can't rely on pipestat interrupt bit in iir as it might 3648 /* Can't rely on pipestat interrupt bit in iir as it might
3629 * have been cleared after the pipestat interrupt was received. 3649 * have been cleared after the pipestat interrupt was received.
3630 * It doesn't set the bit in iir again, but it still produces 3650 * It doesn't set the bit in iir again, but it still produces
@@ -3642,11 +3662,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3642 * Clear the PIPE*STAT regs before the IIR 3662 * Clear the PIPE*STAT regs before the IIR
3643 */ 3663 */
3644 if (pipe_stats[pipe] & 0x8000ffff) { 3664 if (pipe_stats[pipe] & 0x8000ffff) {
3645 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3646 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3647 pipe_name(pipe));
3648 I915_WRITE(reg, pipe_stats[pipe]); 3665 I915_WRITE(reg, pipe_stats[pipe]);
3649 irq_received = 1; 3666 irq_received = true;
3650 } 3667 }
3651 } 3668 }
3652 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3669 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -3663,9 +3680,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3663 HOTPLUG_INT_STATUS_G4X : 3680 HOTPLUG_INT_STATUS_G4X :
3664 HOTPLUG_INT_STATUS_I915); 3681 HOTPLUG_INT_STATUS_I915);
3665 3682
3666 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3667 hotplug_status);
3668
3669 intel_hpd_irq_handler(dev, hotplug_trigger, 3683 intel_hpd_irq_handler(dev, hotplug_trigger,
3670 IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915); 3684 IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
3671 3685
@@ -3695,8 +3709,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3695 3709
3696 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3710 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3697 i9xx_pipe_crc_irq_handler(dev, pipe); 3711 i9xx_pipe_crc_irq_handler(dev, pipe);
3698 }
3699 3712
3713 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
3714 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
3715 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
3716 }
3700 3717
3701 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3718 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3702 intel_opregion_asle_intr(dev); 3719 intel_opregion_asle_intr(dev);
@@ -3735,7 +3752,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
3735 if (!dev_priv) 3752 if (!dev_priv)
3736 return; 3753 return;
3737 3754
3738 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3755 intel_hpd_irq_uninstall(dev_priv);
3739 3756
3740 I915_WRITE(PORT_HOTPLUG_EN, 0); 3757 I915_WRITE(PORT_HOTPLUG_EN, 0);
3741 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3758 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -3752,7 +3769,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
3752 I915_WRITE(IIR, I915_READ(IIR)); 3769 I915_WRITE(IIR, I915_READ(IIR));
3753} 3770}
3754 3771
3755static void i915_reenable_hotplug_timer_func(unsigned long data) 3772static void intel_hpd_irq_reenable(unsigned long data)
3756{ 3773{
3757 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3774 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3758 struct drm_device *dev = dev_priv->dev; 3775 struct drm_device *dev = dev_priv->dev;
@@ -3799,7 +3816,7 @@ void intel_irq_init(struct drm_device *dev)
3799 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 3816 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3800 i915_hangcheck_elapsed, 3817 i915_hangcheck_elapsed,
3801 (unsigned long) dev); 3818 (unsigned long) dev);
3802 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 3819 setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
3803 (unsigned long) dev_priv); 3820 (unsigned long) dev_priv);
3804 3821
3805 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 3822 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
new file mode 100644
index 000000000000..c743057b6511
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -0,0 +1,155 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25#include "i915_drv.h"
26
27struct i915_params i915 __read_mostly = {
28 .modeset = -1,
29 .panel_ignore_lid = 1,
30 .powersave = 1,
31 .semaphores = -1,
32 .lvds_downclock = 0,
33 .lvds_channel_mode = 0,
34 .panel_use_ssc = -1,
35 .vbt_sdvo_panel_type = -1,
36 .enable_rc6 = -1,
37 .enable_fbc = -1,
38 .enable_hangcheck = true,
39 .enable_ppgtt = -1,
40 .enable_psr = 0,
41 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
42 .disable_power_well = 1,
43 .enable_ips = 1,
44 .fastboot = 0,
45 .enable_pc8 = 1,
46 .pc8_timeout = 5000,
47 .prefault_disable = 0,
48 .reset = true,
49 .invert_brightness = 0,
50};
51
52module_param_named(modeset, i915.modeset, int, 0400);
53MODULE_PARM_DESC(modeset,
54 "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
55 "1=on, -1=force vga console preference [default])");
56
57module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
58MODULE_PARM_DESC(panel_ignore_lid,
59 "Override lid status (0=autodetect, 1=autodetect disabled [default], "
60 "-1=force lid closed, -2=force lid open)");
61
62module_param_named(powersave, i915.powersave, int, 0600);
63MODULE_PARM_DESC(powersave,
64 "Enable powersavings, fbc, downclocking, etc. (default: true)");
65
66module_param_named(semaphores, i915.semaphores, int, 0400);
67MODULE_PARM_DESC(semaphores,
68 "Use semaphores for inter-ring sync "
69 "(default: -1 (use per-chip defaults))");
70
71module_param_named(enable_rc6, i915.enable_rc6, int, 0400);
72MODULE_PARM_DESC(enable_rc6,
73 "Enable power-saving render C-state 6. "
74 "Different stages can be selected via bitmask values "
75 "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
76 "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
77 "default: -1 (use per-chip default)");
78
79module_param_named(enable_fbc, i915.enable_fbc, int, 0600);
80MODULE_PARM_DESC(enable_fbc,
81 "Enable frame buffer compression for power savings "
82 "(default: -1 (use per-chip default))");
83
84module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400);
85MODULE_PARM_DESC(lvds_downclock,
86 "Use panel (LVDS/eDP) downclocking for power savings "
87 "(default: false)");
88
89module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
90MODULE_PARM_DESC(lvds_channel_mode,
91 "Specify LVDS channel mode "
92 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
93
94module_param_named(lvds_use_ssc, i915.panel_use_ssc, int, 0600);
95MODULE_PARM_DESC(lvds_use_ssc,
96 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
97 "(default: auto from VBT)");
98
99module_param_named(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
100MODULE_PARM_DESC(vbt_sdvo_panel_type,
101 "Override/Ignore selection of SDVO panel mode in the VBT "
102 "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
103
104module_param_named(reset, i915.reset, bool, 0600);
105MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
106
107module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
108MODULE_PARM_DESC(enable_hangcheck,
109 "Periodically check GPU activity for detecting hangs. "
110 "WARNING: Disabling this can cause system wide hangs. "
111 "(default: true)");
112
113module_param_named(enable_ppgtt, i915.enable_ppgtt, int, 0400);
114MODULE_PARM_DESC(enable_ppgtt,
115 "Override PPGTT usage. "
116 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
117
118module_param_named(enable_psr, i915.enable_psr, int, 0600);
119MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
120
121module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
122MODULE_PARM_DESC(preliminary_hw_support,
123 "Enable preliminary hardware support.");
124
125module_param_named(disable_power_well, i915.disable_power_well, int, 0600);
126MODULE_PARM_DESC(disable_power_well,
127 "Disable the power well when possible (default: true)");
128
129module_param_named(enable_ips, i915.enable_ips, int, 0600);
130MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
131
132module_param_named(fastboot, i915.fastboot, bool, 0600);
133MODULE_PARM_DESC(fastboot,
134 "Try to skip unnecessary mode sets at boot time (default: false)");
135
136module_param_named(enable_pc8, i915.enable_pc8, int, 0600);
137MODULE_PARM_DESC(enable_pc8,
138 "Enable support for low power package C states (PC8+) (default: true)");
139
140module_param_named(pc8_timeout, i915.pc8_timeout, int, 0600);
141MODULE_PARM_DESC(pc8_timeout,
142 "Number of msecs of idleness required to enter PC8+ (default: 5000)");
143
144module_param_named(prefault_disable, i915.prefault_disable, bool, 0600);
145MODULE_PARM_DESC(prefault_disable,
146 "Disable page prefaulting for pread/pwrite/reloc (default:false). "
147 "For developers only.");
148
149module_param_named(invert_brightness, i915.invert_brightness, int, 0600);
150MODULE_PARM_DESC(invert_brightness,
151 "Invert backlight brightness "
152 "(-1 force normal, 0 machine defaults, 1 force inversion), please "
153 "report PCI device ID, subsystem vendor and subsystem device ID "
154 "to dri-devel@lists.freedesktop.org, if your machine needs it. "
155 "It will then be included in an upcoming module version.");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a48b7cad6f11..cc3ea049269b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,7 +26,6 @@
26#define _I915_REG_H_ 26#define _I915_REG_H_
27 27
28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) 28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
29#define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc))
30#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) 29#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
31 30
32#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) 31#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@@ -73,7 +72,8 @@
73#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) 72#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
74#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) 73#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
75#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) 74#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
76#define LBB 0xf4 75#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
76
77 77
78/* Graphics reset regs */ 78/* Graphics reset regs */
79#define I965_GDRST 0xc0 /* PCI config register */ 79#define I965_GDRST 0xc0 /* PCI config register */
@@ -934,6 +934,8 @@
934#define ECO_GATING_CX_ONLY (1<<3) 934#define ECO_GATING_CX_ONLY (1<<3)
935#define ECO_FLIP_DONE (1<<0) 935#define ECO_FLIP_DONE (1<<0)
936 936
937#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */
938#define HIZ_RAW_STALL_OPT_DISABLE (1<<2)
937#define CACHE_MODE_1 0x7004 /* IVB+ */ 939#define CACHE_MODE_1 0x7004 /* IVB+ */
938#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) 940#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
939 941
@@ -1046,9 +1048,8 @@
1046#define FBC_CTL_IDLE_LINE (2<<2) 1048#define FBC_CTL_IDLE_LINE (2<<2)
1047#define FBC_CTL_IDLE_DEBUG (3<<2) 1049#define FBC_CTL_IDLE_DEBUG (3<<2)
1048#define FBC_CTL_CPU_FENCE (1<<1) 1050#define FBC_CTL_CPU_FENCE (1<<1)
1049#define FBC_CTL_PLANEA (0<<0) 1051#define FBC_CTL_PLANE(plane) ((plane)<<0)
1050#define FBC_CTL_PLANEB (1<<0) 1052#define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */
1051#define FBC_FENCE_OFF 0x0321b
1052#define FBC_TAG 0x03300 1053#define FBC_TAG 0x03300
1053 1054
1054#define FBC_LL_SIZE (1536) 1055#define FBC_LL_SIZE (1536)
@@ -1057,9 +1058,8 @@
1057#define DPFC_CB_BASE 0x3200 1058#define DPFC_CB_BASE 0x3200
1058#define DPFC_CONTROL 0x3208 1059#define DPFC_CONTROL 0x3208
1059#define DPFC_CTL_EN (1<<31) 1060#define DPFC_CTL_EN (1<<31)
1060#define DPFC_CTL_PLANEA (0<<30) 1061#define DPFC_CTL_PLANE(plane) ((plane)<<30)
1061#define DPFC_CTL_PLANEB (1<<30) 1062#define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29)
1062#define IVB_DPFC_CTL_PLANE_SHIFT (29)
1063#define DPFC_CTL_FENCE_EN (1<<29) 1063#define DPFC_CTL_FENCE_EN (1<<29)
1064#define IVB_DPFC_CTL_FENCE_EN (1<<28) 1064#define IVB_DPFC_CTL_FENCE_EN (1<<28)
1065#define DPFC_CTL_PERSISTENT_MODE (1<<25) 1065#define DPFC_CTL_PERSISTENT_MODE (1<<25)
@@ -1202,6 +1202,10 @@
1202/* 1202/*
1203 * Clock control & power management 1203 * Clock control & power management
1204 */ 1204 */
1205#define DPLL_A_OFFSET 0x6014
1206#define DPLL_B_OFFSET 0x6018
1207#define DPLL(pipe) (dev_priv->info->dpll_offsets[pipe] + \
1208 dev_priv->info->display_mmio_offset)
1205 1209
1206#define VGA0 0x6000 1210#define VGA0 0x6000
1207#define VGA1 0x6004 1211#define VGA1 0x6004
@@ -1214,9 +1218,6 @@
1214#define VGA1_PD_P1_DIV_2 (1 << 13) 1218#define VGA1_PD_P1_DIV_2 (1 << 13)
1215#define VGA1_PD_P1_SHIFT 8 1219#define VGA1_PD_P1_SHIFT 8
1216#define VGA1_PD_P1_MASK (0x1f << 8) 1220#define VGA1_PD_P1_MASK (0x1f << 8)
1217#define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014)
1218#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
1219#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
1220#define DPLL_VCO_ENABLE (1 << 31) 1221#define DPLL_VCO_ENABLE (1 << 31)
1221#define DPLL_SDVO_HIGH_SPEED (1 << 30) 1222#define DPLL_SDVO_HIGH_SPEED (1 << 30)
1222#define DPLL_DVO_2X_MODE (1 << 30) 1223#define DPLL_DVO_2X_MODE (1 << 30)
@@ -1278,7 +1279,12 @@
1278#define SDVO_MULTIPLIER_MASK 0x000000ff 1279#define SDVO_MULTIPLIER_MASK 0x000000ff
1279#define SDVO_MULTIPLIER_SHIFT_HIRES 4 1280#define SDVO_MULTIPLIER_SHIFT_HIRES 4
1280#define SDVO_MULTIPLIER_SHIFT_VGA 0 1281#define SDVO_MULTIPLIER_SHIFT_VGA 0
1281#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */ 1282
1283#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */
1284#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */
1285#define DPLL_MD(pipe) (dev_priv->info->dpll_md_offsets[pipe] + \
1286 dev_priv->info->display_mmio_offset)
1287
1282/* 1288/*
1283 * UDI pixel divider, controlling how many pixels are stuffed into a packet. 1289 * UDI pixel divider, controlling how many pixels are stuffed into a packet.
1284 * 1290 *
@@ -1315,8 +1321,6 @@
1315 */ 1321 */
1316#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f 1322#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
1317#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 1323#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
1318#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */
1319#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
1320 1324
1321#define _FPA0 0x06040 1325#define _FPA0 0x06040
1322#define _FPA1 0x06044 1326#define _FPA1 0x06044
@@ -1472,10 +1476,10 @@
1472/* 1476/*
1473 * Palette regs 1477 * Palette regs
1474 */ 1478 */
1475 1479#define PALETTE_A_OFFSET 0xa000
1476#define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000) 1480#define PALETTE_B_OFFSET 0xa800
1477#define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800) 1481#define PALETTE(pipe) (dev_priv->info->palette_offsets[pipe] + \
1478#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B) 1482 dev_priv->info->display_mmio_offset)
1479 1483
1480/* MCH MMIO space */ 1484/* MCH MMIO space */
1481 1485
@@ -1862,7 +1866,7 @@
1862 */ 1866 */
1863 1867
1864/* Pipe A CRC regs */ 1868/* Pipe A CRC regs */
1865#define _PIPE_CRC_CTL_A (dev_priv->info->display_mmio_offset + 0x60050) 1869#define _PIPE_CRC_CTL_A 0x60050
1866#define PIPE_CRC_ENABLE (1 << 31) 1870#define PIPE_CRC_ENABLE (1 << 31)
1867/* ivb+ source selection */ 1871/* ivb+ source selection */
1868#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29) 1872#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
@@ -1902,11 +1906,11 @@
1902#define _PIPE_CRC_RES_4_A_IVB 0x60070 1906#define _PIPE_CRC_RES_4_A_IVB 0x60070
1903#define _PIPE_CRC_RES_5_A_IVB 0x60074 1907#define _PIPE_CRC_RES_5_A_IVB 0x60074
1904 1908
1905#define _PIPE_CRC_RES_RED_A (dev_priv->info->display_mmio_offset + 0x60060) 1909#define _PIPE_CRC_RES_RED_A 0x60060
1906#define _PIPE_CRC_RES_GREEN_A (dev_priv->info->display_mmio_offset + 0x60064) 1910#define _PIPE_CRC_RES_GREEN_A 0x60064
1907#define _PIPE_CRC_RES_BLUE_A (dev_priv->info->display_mmio_offset + 0x60068) 1911#define _PIPE_CRC_RES_BLUE_A 0x60068
1908#define _PIPE_CRC_RES_RES1_A_I915 (dev_priv->info->display_mmio_offset + 0x6006c) 1912#define _PIPE_CRC_RES_RES1_A_I915 0x6006c
1909#define _PIPE_CRC_RES_RES2_A_G4X (dev_priv->info->display_mmio_offset + 0x60080) 1913#define _PIPE_CRC_RES_RES2_A_G4X 0x60080
1910 1914
1911/* Pipe B CRC regs */ 1915/* Pipe B CRC regs */
1912#define _PIPE_CRC_RES_1_B_IVB 0x61064 1916#define _PIPE_CRC_RES_1_B_IVB 0x61064
@@ -1915,59 +1919,69 @@
1915#define _PIPE_CRC_RES_4_B_IVB 0x61070 1919#define _PIPE_CRC_RES_4_B_IVB 0x61070
1916#define _PIPE_CRC_RES_5_B_IVB 0x61074 1920#define _PIPE_CRC_RES_5_B_IVB 0x61074
1917 1921
1918#define PIPE_CRC_CTL(pipe) _PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000) 1922#define PIPE_CRC_CTL(pipe) _TRANSCODER2(pipe, _PIPE_CRC_CTL_A)
1919#define PIPE_CRC_RES_1_IVB(pipe) \ 1923#define PIPE_CRC_RES_1_IVB(pipe) \
1920 _PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB) 1924 _TRANSCODER2(pipe, _PIPE_CRC_RES_1_A_IVB)
1921#define PIPE_CRC_RES_2_IVB(pipe) \ 1925#define PIPE_CRC_RES_2_IVB(pipe) \
1922 _PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB) 1926 _TRANSCODER2(pipe, _PIPE_CRC_RES_2_A_IVB)
1923#define PIPE_CRC_RES_3_IVB(pipe) \ 1927#define PIPE_CRC_RES_3_IVB(pipe) \
1924 _PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB) 1928 _TRANSCODER2(pipe, _PIPE_CRC_RES_3_A_IVB)
1925#define PIPE_CRC_RES_4_IVB(pipe) \ 1929#define PIPE_CRC_RES_4_IVB(pipe) \
1926 _PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB) 1930 _TRANSCODER2(pipe, _PIPE_CRC_RES_4_A_IVB)
1927#define PIPE_CRC_RES_5_IVB(pipe) \ 1931#define PIPE_CRC_RES_5_IVB(pipe) \
1928 _PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB) 1932 _TRANSCODER2(pipe, _PIPE_CRC_RES_5_A_IVB)
1929 1933
1930#define PIPE_CRC_RES_RED(pipe) \ 1934#define PIPE_CRC_RES_RED(pipe) \
1931 _PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000) 1935 _TRANSCODER2(pipe, _PIPE_CRC_RES_RED_A)
1932#define PIPE_CRC_RES_GREEN(pipe) \ 1936#define PIPE_CRC_RES_GREEN(pipe) \
1933 _PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000) 1937 _TRANSCODER2(pipe, _PIPE_CRC_RES_GREEN_A)
1934#define PIPE_CRC_RES_BLUE(pipe) \ 1938#define PIPE_CRC_RES_BLUE(pipe) \
1935 _PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000) 1939 _TRANSCODER2(pipe, _PIPE_CRC_RES_BLUE_A)
1936#define PIPE_CRC_RES_RES1_I915(pipe) \ 1940#define PIPE_CRC_RES_RES1_I915(pipe) \
1937 _PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000) 1941 _TRANSCODER2(pipe, _PIPE_CRC_RES_RES1_A_I915)
1938#define PIPE_CRC_RES_RES2_G4X(pipe) \ 1942#define PIPE_CRC_RES_RES2_G4X(pipe) \
1939 _PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000) 1943 _TRANSCODER2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
1940 1944
1941/* Pipe A timing regs */ 1945/* Pipe A timing regs */
1942#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000) 1946#define _HTOTAL_A 0x60000
1943#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004) 1947#define _HBLANK_A 0x60004
1944#define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008) 1948#define _HSYNC_A 0x60008
1945#define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c) 1949#define _VTOTAL_A 0x6000c
1946#define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010) 1950#define _VBLANK_A 0x60010
1947#define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014) 1951#define _VSYNC_A 0x60014
1948#define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c) 1952#define _PIPEASRC 0x6001c
1949#define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020) 1953#define _BCLRPAT_A 0x60020
1950#define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028) 1954#define _VSYNCSHIFT_A 0x60028
1951 1955
1952/* Pipe B timing regs */ 1956/* Pipe B timing regs */
1953#define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000) 1957#define _HTOTAL_B 0x61000
1954#define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004) 1958#define _HBLANK_B 0x61004
1955#define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008) 1959#define _HSYNC_B 0x61008
1956#define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c) 1960#define _VTOTAL_B 0x6100c
1957#define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010) 1961#define _VBLANK_B 0x61010
1958#define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014) 1962#define _VSYNC_B 0x61014
1959#define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c) 1963#define _PIPEBSRC 0x6101c
1960#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020) 1964#define _BCLRPAT_B 0x61020
1961#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028) 1965#define _VSYNCSHIFT_B 0x61028
1962 1966
1963#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) 1967#define TRANSCODER_A_OFFSET 0x60000
1964#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B) 1968#define TRANSCODER_B_OFFSET 0x61000
1965#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B) 1969#define TRANSCODER_C_OFFSET 0x62000
1966#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B) 1970#define TRANSCODER_EDP_OFFSET 0x6f000
1967#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B) 1971
1968#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B) 1972#define _TRANSCODER2(pipe, reg) (dev_priv->info->trans_offsets[(pipe)] - \
1969#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) 1973 dev_priv->info->trans_offsets[TRANSCODER_A] + (reg) + \
1970#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) 1974 dev_priv->info->display_mmio_offset)
1975
1976#define HTOTAL(trans) _TRANSCODER2(trans, _HTOTAL_A)
1977#define HBLANK(trans) _TRANSCODER2(trans, _HBLANK_A)
1978#define HSYNC(trans) _TRANSCODER2(trans, _HSYNC_A)
1979#define VTOTAL(trans) _TRANSCODER2(trans, _VTOTAL_A)
1980#define VBLANK(trans) _TRANSCODER2(trans, _VBLANK_A)
1981#define VSYNC(trans) _TRANSCODER2(trans, _VSYNC_A)
1982#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A)
1983#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A)
1984#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
1971 1985
1972/* HSW+ eDP PSR registers */ 1986/* HSW+ eDP PSR registers */
1973#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) 1987#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
@@ -3178,10 +3192,10 @@
3178/* Display & cursor control */ 3192/* Display & cursor control */
3179 3193
3180/* Pipe A */ 3194/* Pipe A */
3181#define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000) 3195#define _PIPEADSL 0x70000
3182#define DSL_LINEMASK_GEN2 0x00000fff 3196#define DSL_LINEMASK_GEN2 0x00000fff
3183#define DSL_LINEMASK_GEN3 0x00001fff 3197#define DSL_LINEMASK_GEN3 0x00001fff
3184#define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008) 3198#define _PIPEACONF 0x70008
3185#define PIPECONF_ENABLE (1<<31) 3199#define PIPECONF_ENABLE (1<<31)
3186#define PIPECONF_DISABLE 0 3200#define PIPECONF_DISABLE 0
3187#define PIPECONF_DOUBLE_WIDE (1<<30) 3201#define PIPECONF_DOUBLE_WIDE (1<<30)
@@ -3224,9 +3238,9 @@
3224#define PIPECONF_DITHER_TYPE_ST1 (1<<2) 3238#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
3225#define PIPECONF_DITHER_TYPE_ST2 (2<<2) 3239#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
3226#define PIPECONF_DITHER_TYPE_TEMP (3<<2) 3240#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
3227#define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024) 3241#define _PIPEASTAT 0x70024
3228#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) 3242#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
3229#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30) 3243#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30)
3230#define PIPE_CRC_ERROR_ENABLE (1UL<<29) 3244#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
3231#define PIPE_CRC_DONE_ENABLE (1UL<<28) 3245#define PIPE_CRC_DONE_ENABLE (1UL<<28)
3232#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) 3246#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
@@ -3244,12 +3258,12 @@
3244#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) 3258#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
3245#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16) 3259#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
3246#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) 3260#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
3247#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15) 3261#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL<<15)
3248#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14) 3262#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14)
3249#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) 3263#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
3250#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) 3264#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
3251#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) 3265#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
3252#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10) 3266#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10)
3253#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) 3267#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
3254#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) 3268#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
3255#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) 3269#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
@@ -3262,12 +3276,26 @@
3262#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) 3276#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
3263#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) 3277#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
3264 3278
3265#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) 3279#define PIPE_A_OFFSET 0x70000
3266#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF) 3280#define PIPE_B_OFFSET 0x71000
3267#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) 3281#define PIPE_C_OFFSET 0x72000
3268#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) 3282/*
3269#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) 3283 * There's actually no pipe EDP. Some pipe registers have
3270#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) 3284 * simply shifted from the pipe to the transcoder, while
3285 * keeping their original offset. Thus we need PIPE_EDP_OFFSET
3286 * to access such registers in transcoder EDP.
3287 */
3288#define PIPE_EDP_OFFSET 0x7f000
3289
3290#define _PIPE2(pipe, reg) (dev_priv->info->pipe_offsets[pipe] - \
3291 dev_priv->info->pipe_offsets[PIPE_A] + (reg) + \
3292 dev_priv->info->display_mmio_offset)
3293
3294#define PIPECONF(pipe) _PIPE2(pipe, _PIPEACONF)
3295#define PIPEDSL(pipe) _PIPE2(pipe, _PIPEADSL)
3296#define PIPEFRAME(pipe) _PIPE2(pipe, _PIPEAFRAMEHIGH)
3297#define PIPEFRAMEPIXEL(pipe) _PIPE2(pipe, _PIPEAFRAMEPIXEL)
3298#define PIPESTAT(pipe) _PIPE2(pipe, _PIPEASTAT)
3271 3299
3272#define _PIPE_MISC_A 0x70030 3300#define _PIPE_MISC_A 0x70030
3273#define _PIPE_MISC_B 0x71030 3301#define _PIPE_MISC_B 0x71030
@@ -3279,20 +3307,20 @@
3279#define PIPEMISC_DITHER_ENABLE (1<<4) 3307#define PIPEMISC_DITHER_ENABLE (1<<4)
3280#define PIPEMISC_DITHER_TYPE_MASK (3<<2) 3308#define PIPEMISC_DITHER_TYPE_MASK (3<<2)
3281#define PIPEMISC_DITHER_TYPE_SP (0<<2) 3309#define PIPEMISC_DITHER_TYPE_SP (0<<2)
3282#define PIPEMISC(pipe) _PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B) 3310#define PIPEMISC(pipe) _PIPE2(pipe, _PIPE_MISC_A)
3283 3311
3284#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028) 3312#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
3285#define PIPEB_LINE_COMPARE_INT_EN (1<<29) 3313#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
3286#define PIPEB_HLINE_INT_EN (1<<28) 3314#define PIPEB_HLINE_INT_EN (1<<28)
3287#define PIPEB_VBLANK_INT_EN (1<<27) 3315#define PIPEB_VBLANK_INT_EN (1<<27)
3288#define SPRITED_FLIPDONE_INT_EN (1<<26) 3316#define SPRITED_FLIP_DONE_INT_EN (1<<26)
3289#define SPRITEC_FLIPDONE_INT_EN (1<<25) 3317#define SPRITEC_FLIP_DONE_INT_EN (1<<25)
3290#define PLANEB_FLIPDONE_INT_EN (1<<24) 3318#define PLANEB_FLIP_DONE_INT_EN (1<<24)
3291#define PIPEA_LINE_COMPARE_INT_EN (1<<21) 3319#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
3292#define PIPEA_HLINE_INT_EN (1<<20) 3320#define PIPEA_HLINE_INT_EN (1<<20)
3293#define PIPEA_VBLANK_INT_EN (1<<19) 3321#define PIPEA_VBLANK_INT_EN (1<<19)
3294#define SPRITEB_FLIPDONE_INT_EN (1<<18) 3322#define SPRITEB_FLIP_DONE_INT_EN (1<<18)
3295#define SPRITEA_FLIPDONE_INT_EN (1<<17) 3323#define SPRITEA_FLIP_DONE_INT_EN (1<<17)
3296#define PLANEA_FLIPDONE_INT_EN (1<<16) 3324#define PLANEA_FLIPDONE_INT_EN (1<<16)
3297 3325
3298#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */ 3326#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */
@@ -3520,7 +3548,7 @@
3520#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB) 3548#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
3521 3549
3522/* Display A control */ 3550/* Display A control */
3523#define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180) 3551#define _DSPACNTR 0x70180
3524#define DISPLAY_PLANE_ENABLE (1<<31) 3552#define DISPLAY_PLANE_ENABLE (1<<31)
3525#define DISPLAY_PLANE_DISABLE 0 3553#define DISPLAY_PLANE_DISABLE 0
3526#define DISPPLANE_GAMMA_ENABLE (1<<30) 3554#define DISPPLANE_GAMMA_ENABLE (1<<30)
@@ -3554,25 +3582,25 @@
3554#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) 3582#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
3555#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ 3583#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
3556#define DISPPLANE_TILED (1<<10) 3584#define DISPPLANE_TILED (1<<10)
3557#define _DSPAADDR (dev_priv->info->display_mmio_offset + 0x70184) 3585#define _DSPAADDR 0x70184
3558#define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188) 3586#define _DSPASTRIDE 0x70188
3559#define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */ 3587#define _DSPAPOS 0x7018C /* reserved */
3560#define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190) 3588#define _DSPASIZE 0x70190
3561#define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */ 3589#define _DSPASURF 0x7019C /* 965+ only */
3562#define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */ 3590#define _DSPATILEOFF 0x701A4 /* 965+ only */
3563#define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */ 3591#define _DSPAOFFSET 0x701A4 /* HSW */
3564#define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC) 3592#define _DSPASURFLIVE 0x701AC
3565 3593
3566#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) 3594#define DSPCNTR(plane) _PIPE2(plane, _DSPACNTR)
3567#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) 3595#define DSPADDR(plane) _PIPE2(plane, _DSPAADDR)
3568#define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE) 3596#define DSPSTRIDE(plane) _PIPE2(plane, _DSPASTRIDE)
3569#define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS) 3597#define DSPPOS(plane) _PIPE2(plane, _DSPAPOS)
3570#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE) 3598#define DSPSIZE(plane) _PIPE2(plane, _DSPASIZE)
3571#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) 3599#define DSPSURF(plane) _PIPE2(plane, _DSPASURF)
3572#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) 3600#define DSPTILEOFF(plane) _PIPE2(plane, _DSPATILEOFF)
3573#define DSPLINOFF(plane) DSPADDR(plane) 3601#define DSPLINOFF(plane) DSPADDR(plane)
3574#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET) 3602#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET)
3575#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE) 3603#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE)
3576 3604
3577/* Display/Sprite base address macros */ 3605/* Display/Sprite base address macros */
3578#define DISP_BASEADDR_MASK (0xfffff000) 3606#define DISP_BASEADDR_MASK (0xfffff000)
@@ -3866,48 +3894,45 @@
3866#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff 3894#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
3867 3895
3868 3896
3869#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030) 3897#define _PIPEA_DATA_M1 0x60030
3870#define PIPE_DATA_M1_OFFSET 0 3898#define PIPE_DATA_M1_OFFSET 0
3871#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034) 3899#define _PIPEA_DATA_N1 0x60034
3872#define PIPE_DATA_N1_OFFSET 0 3900#define PIPE_DATA_N1_OFFSET 0
3873 3901
3874#define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038) 3902#define _PIPEA_DATA_M2 0x60038
3875#define PIPE_DATA_M2_OFFSET 0 3903#define PIPE_DATA_M2_OFFSET 0
3876#define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c) 3904#define _PIPEA_DATA_N2 0x6003c
3877#define PIPE_DATA_N2_OFFSET 0 3905#define PIPE_DATA_N2_OFFSET 0
3878 3906
3879#define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040) 3907#define _PIPEA_LINK_M1 0x60040
3880#define PIPE_LINK_M1_OFFSET 0 3908#define PIPE_LINK_M1_OFFSET 0
3881#define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044) 3909#define _PIPEA_LINK_N1 0x60044
3882#define PIPE_LINK_N1_OFFSET 0 3910#define PIPE_LINK_N1_OFFSET 0
3883 3911
3884#define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048) 3912#define _PIPEA_LINK_M2 0x60048
3885#define PIPE_LINK_M2_OFFSET 0 3913#define PIPE_LINK_M2_OFFSET 0
3886#define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c) 3914#define _PIPEA_LINK_N2 0x6004c
3887#define PIPE_LINK_N2_OFFSET 0 3915#define PIPE_LINK_N2_OFFSET 0
3888 3916
3889/* PIPEB timing regs are same start from 0x61000 */ 3917/* PIPEB timing regs are same start from 0x61000 */
3890 3918
3891#define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030) 3919#define _PIPEB_DATA_M1 0x61030
3892#define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034) 3920#define _PIPEB_DATA_N1 0x61034
3893 3921#define _PIPEB_DATA_M2 0x61038
3894#define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038) 3922#define _PIPEB_DATA_N2 0x6103c
3895#define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c) 3923#define _PIPEB_LINK_M1 0x61040
3896 3924#define _PIPEB_LINK_N1 0x61044
3897#define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040) 3925#define _PIPEB_LINK_M2 0x61048
3898#define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044) 3926#define _PIPEB_LINK_N2 0x6104c
3899 3927
3900#define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048) 3928#define PIPE_DATA_M1(tran) _TRANSCODER2(tran, _PIPEA_DATA_M1)
3901#define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c) 3929#define PIPE_DATA_N1(tran) _TRANSCODER2(tran, _PIPEA_DATA_N1)
3902 3930#define PIPE_DATA_M2(tran) _TRANSCODER2(tran, _PIPEA_DATA_M2)
3903#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1) 3931#define PIPE_DATA_N2(tran) _TRANSCODER2(tran, _PIPEA_DATA_N2)
3904#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1) 3932#define PIPE_LINK_M1(tran) _TRANSCODER2(tran, _PIPEA_LINK_M1)
3905#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2) 3933#define PIPE_LINK_N1(tran) _TRANSCODER2(tran, _PIPEA_LINK_N1)
3906#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2) 3934#define PIPE_LINK_M2(tran) _TRANSCODER2(tran, _PIPEA_LINK_M2)
3907#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1) 3935#define PIPE_LINK_N2(tran) _TRANSCODER2(tran, _PIPEA_LINK_N2)
3908#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
3909#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
3910#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
3911 3936
3912/* CPU panel fitter */ 3937/* CPU panel fitter */
3913/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ 3938/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
@@ -4120,6 +4145,8 @@
4120#define GEN7_MSG_CTL 0x45010 4145#define GEN7_MSG_CTL 0x45010
4121#define WAIT_FOR_PCH_RESET_ACK (1<<1) 4146#define WAIT_FOR_PCH_RESET_ACK (1<<1)
4122#define WAIT_FOR_PCH_FLR_ACK (1<<0) 4147#define WAIT_FOR_PCH_FLR_ACK (1<<0)
4148#define HSW_NDE_RSTWRN_OPT 0x46408
4149#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
4123 4150
4124/* GEN7 chicken */ 4151/* GEN7 chicken */
4125#define GEN7_COMMON_SLICE_CHICKEN1 0x7010 4152#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
@@ -4127,6 +4154,9 @@
4127#define COMMON_SLICE_CHICKEN2 0x7014 4154#define COMMON_SLICE_CHICKEN2 0x7014
4128# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) 4155# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
4129 4156
4157#define GEN7_L3SQCREG1 0xB010
4158#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
4159
4130#define GEN7_L3CNTLREG1 0xB01C 4160#define GEN7_L3CNTLREG1 0xB01C
4131#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C 4161#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
4132#define GEN7_L3AGDIS (1<<19) 4162#define GEN7_L3AGDIS (1<<19)
@@ -4436,24 +4466,24 @@
4436#define HSW_VIDEO_DIP_GCP_B 0x61210 4466#define HSW_VIDEO_DIP_GCP_B 0x61210
4437 4467
4438#define HSW_TVIDEO_DIP_CTL(trans) \ 4468#define HSW_TVIDEO_DIP_CTL(trans) \
4439 _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B) 4469 _TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A)
4440#define HSW_TVIDEO_DIP_AVI_DATA(trans) \ 4470#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
4441 _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B) 4471 _TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A)
4442#define HSW_TVIDEO_DIP_VS_DATA(trans) \ 4472#define HSW_TVIDEO_DIP_VS_DATA(trans) \
4443 _TRANSCODER(trans, HSW_VIDEO_DIP_VS_DATA_A, HSW_VIDEO_DIP_VS_DATA_B) 4473 _TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A)
4444#define HSW_TVIDEO_DIP_SPD_DATA(trans) \ 4474#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
4445 _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B) 4475 _TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A)
4446#define HSW_TVIDEO_DIP_GCP(trans) \ 4476#define HSW_TVIDEO_DIP_GCP(trans) \
4447 _TRANSCODER(trans, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B) 4477 _TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A)
4448#define HSW_TVIDEO_DIP_VSC_DATA(trans) \ 4478#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
4449 _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B) 4479 _TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A)
4450 4480
4451#define HSW_STEREO_3D_CTL_A 0x70020 4481#define HSW_STEREO_3D_CTL_A 0x70020
4452#define S3D_ENABLE (1<<31) 4482#define S3D_ENABLE (1<<31)
4453#define HSW_STEREO_3D_CTL_B 0x71020 4483#define HSW_STEREO_3D_CTL_B 0x71020
4454 4484
4455#define HSW_STEREO_3D_CTL(trans) \ 4485#define HSW_STEREO_3D_CTL(trans) \
4456 _TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A) 4486 _PIPE2(trans, HSW_STEREO_3D_CTL_A)
4457 4487
4458#define _PCH_TRANS_HTOTAL_B 0xe1000 4488#define _PCH_TRANS_HTOTAL_B 0xe1000
4459#define _PCH_TRANS_HBLANK_B 0xe1004 4489#define _PCH_TRANS_HBLANK_B 0xe1004
@@ -4945,6 +4975,10 @@
4945 GEN6_PM_RP_DOWN_THRESHOLD | \ 4975 GEN6_PM_RP_DOWN_THRESHOLD | \
4946 GEN6_PM_RP_DOWN_TIMEOUT) 4976 GEN6_PM_RP_DOWN_TIMEOUT)
4947 4977
4978#define VLV_GTLC_SURVIVABILITY_REG 0x130098
4979#define VLV_GFX_CLK_STATUS_BIT (1<<3)
4980#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2)
4981
4948#define GEN6_GT_GFX_RC6_LOCKED 0x138104 4982#define GEN6_GT_GFX_RC6_LOCKED 0x138104
4949#define VLV_COUNTER_CONTROL 0x138104 4983#define VLV_COUNTER_CONTROL 0x138104
4950#define VLV_COUNT_RANGE_HIGH (1<<15) 4984#define VLV_COUNT_RANGE_HIGH (1<<15)
@@ -5178,8 +5212,8 @@
5178#define TRANS_DDI_FUNC_CTL_B 0x61400 5212#define TRANS_DDI_FUNC_CTL_B 0x61400
5179#define TRANS_DDI_FUNC_CTL_C 0x62400 5213#define TRANS_DDI_FUNC_CTL_C 0x62400
5180#define TRANS_DDI_FUNC_CTL_EDP 0x6F400 5214#define TRANS_DDI_FUNC_CTL_EDP 0x6F400
5181#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \ 5215#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER2(tran, TRANS_DDI_FUNC_CTL_A)
5182 TRANS_DDI_FUNC_CTL_B) 5216
5183#define TRANS_DDI_FUNC_ENABLE (1<<31) 5217#define TRANS_DDI_FUNC_ENABLE (1<<31)
5184/* Those bits are ignored by pipe EDP since it can only connect to DDI A */ 5218/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
5185#define TRANS_DDI_PORT_MASK (7<<28) 5219#define TRANS_DDI_PORT_MASK (7<<28)
@@ -5311,8 +5345,12 @@
5311#define SPLL_PLL_ENABLE (1<<31) 5345#define SPLL_PLL_ENABLE (1<<31)
5312#define SPLL_PLL_SSC (1<<28) 5346#define SPLL_PLL_SSC (1<<28)
5313#define SPLL_PLL_NON_SSC (2<<28) 5347#define SPLL_PLL_NON_SSC (2<<28)
5348#define SPLL_PLL_LCPLL (3<<28)
5349#define SPLL_PLL_REF_MASK (3<<28)
5314#define SPLL_PLL_FREQ_810MHz (0<<26) 5350#define SPLL_PLL_FREQ_810MHz (0<<26)
5315#define SPLL_PLL_FREQ_1350MHz (1<<26) 5351#define SPLL_PLL_FREQ_1350MHz (1<<26)
5352#define SPLL_PLL_FREQ_2700MHz (2<<26)
5353#define SPLL_PLL_FREQ_MASK (3<<26)
5316 5354
5317/* WRPLL */ 5355/* WRPLL */
5318#define WRPLL_CTL1 0x46040 5356#define WRPLL_CTL1 0x46040
@@ -5323,8 +5361,13 @@
5323#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) 5361#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
5324/* WRPLL divider programming */ 5362/* WRPLL divider programming */
5325#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) 5363#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
5364#define WRPLL_DIVIDER_REF_MASK (0xff)
5326#define WRPLL_DIVIDER_POST(x) ((x)<<8) 5365#define WRPLL_DIVIDER_POST(x) ((x)<<8)
5366#define WRPLL_DIVIDER_POST_MASK (0x3f<<8)
5367#define WRPLL_DIVIDER_POST_SHIFT 8
5327#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16) 5368#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
5369#define WRPLL_DIVIDER_FB_SHIFT 16
5370#define WRPLL_DIVIDER_FB_MASK (0xff<<16)
5328 5371
5329/* Port clock selection */ 5372/* Port clock selection */
5330#define PORT_CLK_SEL_A 0x46100 5373#define PORT_CLK_SEL_A 0x46100
@@ -5337,6 +5380,7 @@
5337#define PORT_CLK_SEL_WRPLL1 (4<<29) 5380#define PORT_CLK_SEL_WRPLL1 (4<<29)
5338#define PORT_CLK_SEL_WRPLL2 (5<<29) 5381#define PORT_CLK_SEL_WRPLL2 (5<<29)
5339#define PORT_CLK_SEL_NONE (7<<29) 5382#define PORT_CLK_SEL_NONE (7<<29)
5383#define PORT_CLK_SEL_MASK (7<<29)
5340 5384
5341/* Transcoder clock selection */ 5385/* Transcoder clock selection */
5342#define TRANS_CLK_SEL_A 0x46140 5386#define TRANS_CLK_SEL_A 0x46140
@@ -5346,10 +5390,12 @@
5346#define TRANS_CLK_SEL_DISABLED (0x0<<29) 5390#define TRANS_CLK_SEL_DISABLED (0x0<<29)
5347#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29) 5391#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
5348 5392
5349#define _TRANSA_MSA_MISC 0x60410 5393#define TRANSA_MSA_MISC 0x60410
5350#define _TRANSB_MSA_MISC 0x61410 5394#define TRANSB_MSA_MISC 0x61410
5351#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \ 5395#define TRANSC_MSA_MISC 0x62410
5352 _TRANSB_MSA_MISC) 5396#define TRANS_EDP_MSA_MISC 0x6f410
5397#define TRANS_MSA_MISC(tran) _TRANSCODER2(tran, TRANSA_MSA_MISC)
5398
5353#define TRANS_MSA_SYNC_CLK (1<<0) 5399#define TRANS_MSA_SYNC_CLK (1<<0)
5354#define TRANS_MSA_6_BPC (0<<5) 5400#define TRANS_MSA_6_BPC (0<<5)
5355#define TRANS_MSA_8_BPC (1<<5) 5401#define TRANS_MSA_8_BPC (1<<5)
@@ -5857,4 +5903,12 @@
5857#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID) 5903#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
5858#define READ_DATA_VALID(n) (1 << (n)) 5904#define READ_DATA_VALID(n) (1 << (n))
5859 5905
5906/* For UMS only (deprecated): */
5907#define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000)
5908#define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800)
5909#define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014)
5910#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
5911#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c)
5912#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020)
5913
5860#endif /* _I915_REG_H_ */ 5914#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 8150fdc08d49..56785e8fb2eb 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -236,19 +236,9 @@ static void i915_save_display(struct drm_device *dev)
236 dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR); 236 dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
237 } 237 }
238 238
239 /* Only regfile.save FBC state on the platform that supports FBC */ 239 /* save FBC interval */
240 if (HAS_FBC(dev)) { 240 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
241 if (HAS_PCH_SPLIT(dev)) { 241 dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
242 dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
243 } else if (IS_GM45(dev)) {
244 dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
245 } else {
246 dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
247 dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
248 dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
249 dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
250 }
251 }
252 242
253 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 243 if (!drm_core_check_feature(dev, DRIVER_MODESET))
254 i915_save_vga(dev); 244 i915_save_vga(dev);
@@ -300,18 +290,10 @@ static void i915_restore_display(struct drm_device *dev)
300 290
301 /* only restore FBC info on the platform that supports FBC*/ 291 /* only restore FBC info on the platform that supports FBC*/
302 intel_disable_fbc(dev); 292 intel_disable_fbc(dev);
303 if (HAS_FBC(dev)) { 293
304 if (HAS_PCH_SPLIT(dev)) { 294 /* restore FBC interval */
305 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); 295 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
306 } else if (IS_GM45(dev)) { 296 I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
307 I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
308 } else {
309 I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE);
310 I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE);
311 I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2);
312 I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
313 }
314 }
315 297
316 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 298 if (!drm_core_check_feature(dev, DRIVER_MODESET))
317 i915_restore_vga(dev); 299 i915_restore_vga(dev);
@@ -324,10 +306,6 @@ int i915_save_state(struct drm_device *dev)
324 struct drm_i915_private *dev_priv = dev->dev_private; 306 struct drm_i915_private *dev_priv = dev->dev_private;
325 int i; 307 int i;
326 308
327 if (INTEL_INFO(dev)->gen <= 4)
328 pci_read_config_byte(dev->pdev, LBB,
329 &dev_priv->regfile.saveLBB);
330
331 mutex_lock(&dev->struct_mutex); 309 mutex_lock(&dev->struct_mutex);
332 310
333 i915_save_display(dev); 311 i915_save_display(dev);
@@ -377,10 +355,6 @@ int i915_restore_state(struct drm_device *dev)
377 struct drm_i915_private *dev_priv = dev->dev_private; 355 struct drm_i915_private *dev_priv = dev->dev_private;
378 int i; 356 int i;
379 357
380 if (INTEL_INFO(dev)->gen <= 4)
381 pci_write_config_byte(dev->pdev, LBB,
382 dev_priv->regfile.saveLBB);
383
384 mutex_lock(&dev->struct_mutex); 358 mutex_lock(&dev->struct_mutex);
385 359
386 i915_gem_restore_fences(dev); 360 i915_gem_restore_fences(dev);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 33bcae314bf8..0c741f4eefb0 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -357,6 +357,11 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
357 else 357 else
358 gen6_set_rps(dev, val); 358 gen6_set_rps(dev, val);
359 } 359 }
360 else if (!IS_VALLEYVIEW(dev))
361 /* We still need gen6_set_rps to process the new max_delay
362 and update the interrupt limits even though frequency
363 request is unchanged. */
364 gen6_set_rps(dev, dev_priv->rps.cur_delay);
360 365
361 mutex_unlock(&dev_priv->rps.hw_lock); 366 mutex_unlock(&dev_priv->rps.hw_lock);
362 367
@@ -426,6 +431,11 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
426 else 431 else
427 gen6_set_rps(dev, val); 432 gen6_set_rps(dev, val);
428 } 433 }
434 else if (!IS_VALLEYVIEW(dev))
435 /* We still need gen6_set_rps to process the new min_delay
436 and update the interrupt limits even though frequency
437 request is unchanged. */
438 gen6_set_rps(dev, dev_priv->rps.cur_delay);
429 439
430 mutex_unlock(&dev_priv->rps.hw_lock); 440 mutex_unlock(&dev_priv->rps.hw_lock);
431 441
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
index caa18e855815..480da593e6c0 100644
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -271,6 +271,10 @@ void i915_save_display_reg(struct drm_device *dev)
271 /* FIXME: regfile.save TV & SDVO state */ 271 /* FIXME: regfile.save TV & SDVO state */
272 272
273 /* Backlight */ 273 /* Backlight */
274 if (INTEL_INFO(dev)->gen <= 4)
275 pci_read_config_byte(dev->pdev, PCI_LBPC,
276 &dev_priv->regfile.saveLBB);
277
274 if (HAS_PCH_SPLIT(dev)) { 278 if (HAS_PCH_SPLIT(dev)) {
275 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); 279 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
276 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); 280 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -293,6 +297,10 @@ void i915_restore_display_reg(struct drm_device *dev)
293 int i; 297 int i;
294 298
295 /* Backlight */ 299 /* Backlight */
300 if (INTEL_INFO(dev)->gen <= 4)
301 pci_write_config_byte(dev->pdev, PCI_LBPC,
302 dev_priv->regfile.saveLBB);
303
296 if (HAS_PCH_SPLIT(dev)) { 304 if (HAS_PCH_SPLIT(dev)) {
297 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL); 305 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
298 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); 306 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f22041973f3a..86b95ca413d1 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -259,7 +259,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
259 downclock = dvo_timing->clock; 259 downclock = dvo_timing->clock;
260 } 260 }
261 261
262 if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) { 262 if (downclock < panel_dvo_timing->clock && i915.lvds_downclock) {
263 dev_priv->lvds_downclock_avail = 1; 263 dev_priv->lvds_downclock_avail = 1;
264 dev_priv->lvds_downclock = downclock * 10; 264 dev_priv->lvds_downclock = downclock * 10;
265 DRM_DEBUG_KMS("LVDS downclock is found in VBT. " 265 DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
@@ -318,7 +318,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
318 struct drm_display_mode *panel_fixed_mode; 318 struct drm_display_mode *panel_fixed_mode;
319 int index; 319 int index;
320 320
321 index = i915_vbt_sdvo_panel_type; 321 index = i915.vbt_sdvo_panel_type;
322 if (index == -2) { 322 if (index == -2) {
323 DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n"); 323 DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
324 return; 324 return;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e2e39e65f109..5b444a4b625c 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -857,4 +857,6 @@ void intel_crt_init(struct drm_device *dev)
857 857
858 dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config; 858 dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
859 } 859 }
860
861 intel_crt_reset(connector);
860} 862}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index e06b9e017d6b..cd65dd04ba20 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -633,6 +633,97 @@ static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
633 /* Otherwise a < c && b >= d, do nothing */ 633 /* Otherwise a < c && b >= d, do nothing */
634} 634}
635 635
636static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
637 int reg)
638{
639 int refclk = LC_FREQ;
640 int n, p, r;
641 u32 wrpll;
642
643 wrpll = I915_READ(reg);
644 switch (wrpll & SPLL_PLL_REF_MASK) {
645 case SPLL_PLL_SSC:
646 case SPLL_PLL_NON_SSC:
647 /*
648 * We could calculate spread here, but our checking
649 * code only cares about 5% accuracy, and spread is a max of
650 * 0.5% downspread.
651 */
652 refclk = 135;
653 break;
654 case SPLL_PLL_LCPLL:
655 refclk = LC_FREQ;
656 break;
657 default:
658 WARN(1, "bad wrpll refclk\n");
659 return 0;
660 }
661
662 r = wrpll & WRPLL_DIVIDER_REF_MASK;
663 p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
664 n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
665
666 /* Convert to KHz, p & r have a fixed point portion */
667 return (refclk * n * 100) / (p * r);
668}
669
670static void intel_ddi_clock_get(struct intel_encoder *encoder,
671 struct intel_crtc_config *pipe_config)
672{
673 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
674 enum port port = intel_ddi_get_encoder_port(encoder);
675 int link_clock = 0;
676 u32 val, pll;
677
678 val = I915_READ(PORT_CLK_SEL(port));
679 switch (val & PORT_CLK_SEL_MASK) {
680 case PORT_CLK_SEL_LCPLL_810:
681 link_clock = 81000;
682 break;
683 case PORT_CLK_SEL_LCPLL_1350:
684 link_clock = 135000;
685 break;
686 case PORT_CLK_SEL_LCPLL_2700:
687 link_clock = 270000;
688 break;
689 case PORT_CLK_SEL_WRPLL1:
690 link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
691 break;
692 case PORT_CLK_SEL_WRPLL2:
693 link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
694 break;
695 case PORT_CLK_SEL_SPLL:
696 pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
697 if (pll == SPLL_PLL_FREQ_810MHz)
698 link_clock = 81000;
699 else if (pll == SPLL_PLL_FREQ_1350MHz)
700 link_clock = 135000;
701 else if (pll == SPLL_PLL_FREQ_2700MHz)
702 link_clock = 270000;
703 else {
704 WARN(1, "bad spll freq\n");
705 return;
706 }
707 break;
708 default:
709 WARN(1, "bad port clock sel\n");
710 return;
711 }
712
713 pipe_config->port_clock = link_clock * 2;
714
715 if (pipe_config->has_pch_encoder)
716 pipe_config->adjusted_mode.crtc_clock =
717 intel_dotclock_calculate(pipe_config->port_clock,
718 &pipe_config->fdi_m_n);
719 else if (pipe_config->has_dp_encoder)
720 pipe_config->adjusted_mode.crtc_clock =
721 intel_dotclock_calculate(pipe_config->port_clock,
722 &pipe_config->dp_m_n);
723 else
724 pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
725}
726
636static void 727static void
637intel_ddi_calculate_wrpll(int clock /* in Hz */, 728intel_ddi_calculate_wrpll(int clock /* in Hz */,
638 unsigned *r2_out, unsigned *n2_out, unsigned *p_out) 729 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
@@ -1200,7 +1291,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1200 1291
1201 if (type == INTEL_OUTPUT_EDP) { 1292 if (type == INTEL_OUTPUT_EDP) {
1202 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1293 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1203 ironlake_edp_panel_on(intel_dp); 1294 intel_edp_panel_on(intel_dp);
1204 } 1295 }
1205 1296
1206 WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE); 1297 WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
@@ -1244,7 +1335,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1244 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 1335 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
1245 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1336 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1246 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 1337 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1247 ironlake_edp_panel_off(intel_dp); 1338 intel_edp_panel_off(intel_dp);
1248 } 1339 }
1249 1340
1250 I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); 1341 I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
@@ -1279,7 +1370,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1279 if (port == PORT_A) 1370 if (port == PORT_A)
1280 intel_dp_stop_link_train(intel_dp); 1371 intel_dp_stop_link_train(intel_dp);
1281 1372
1282 ironlake_edp_backlight_on(intel_dp); 1373 intel_edp_backlight_on(intel_dp);
1283 intel_edp_psr_enable(intel_dp); 1374 intel_edp_psr_enable(intel_dp);
1284 } 1375 }
1285 1376
@@ -1312,7 +1403,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1312 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1403 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1313 1404
1314 intel_edp_psr_disable(intel_dp); 1405 intel_edp_psr_disable(intel_dp);
1315 ironlake_edp_backlight_off(intel_dp); 1406 intel_edp_backlight_off(intel_dp);
1316 } 1407 }
1317} 1408}
1318 1409
@@ -1509,6 +1600,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
1509 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); 1600 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
1510 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; 1601 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1511 } 1602 }
1603
1604 intel_ddi_clock_get(encoder, pipe_config);
1512} 1605}
1513 1606
1514static void intel_ddi_destroy(struct drm_encoder *encoder) 1607static void intel_ddi_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 4c1672809493..0f4cbd0aa59e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2372,7 +2372,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2372 * whether the platform allows pfit disable with pipe active, and only 2372 * whether the platform allows pfit disable with pipe active, and only
2373 * then update the pipesrc and pfit state, even on the flip path. 2373 * then update the pipesrc and pfit state, even on the flip path.
2374 */ 2374 */
2375 if (i915_fastboot) { 2375 if (i915.fastboot) {
2376 const struct drm_display_mode *adjusted_mode = 2376 const struct drm_display_mode *adjusted_mode =
2377 &intel_crtc->config.adjusted_mode; 2377 &intel_crtc->config.adjusted_mode;
2378 2378
@@ -4088,9 +4088,8 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4088 /* Looks like the 200MHz CDclk freq doesn't work on some configs */ 4088 /* Looks like the 200MHz CDclk freq doesn't work on some configs */
4089} 4089}
4090 4090
4091static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv, 4091/* compute the max pixel clock for new configuration */
4092 unsigned modeset_pipes, 4092static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
4093 struct intel_crtc_config *pipe_config)
4094{ 4093{
4095 struct drm_device *dev = dev_priv->dev; 4094 struct drm_device *dev = dev_priv->dev;
4096 struct intel_crtc *intel_crtc; 4095 struct intel_crtc *intel_crtc;
@@ -4098,31 +4097,26 @@ static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
4098 4097
4099 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 4098 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
4100 base.head) { 4099 base.head) {
4101 if (modeset_pipes & (1 << intel_crtc->pipe)) 4100 if (intel_crtc->new_enabled)
4102 max_pixclk = max(max_pixclk,
4103 pipe_config->adjusted_mode.crtc_clock);
4104 else if (intel_crtc->base.enabled)
4105 max_pixclk = max(max_pixclk, 4101 max_pixclk = max(max_pixclk,
4106 intel_crtc->config.adjusted_mode.crtc_clock); 4102 intel_crtc->new_config->adjusted_mode.crtc_clock);
4107 } 4103 }
4108 4104
4109 return max_pixclk; 4105 return max_pixclk;
4110} 4106}
4111 4107
4112static void valleyview_modeset_global_pipes(struct drm_device *dev, 4108static void valleyview_modeset_global_pipes(struct drm_device *dev,
4113 unsigned *prepare_pipes, 4109 unsigned *prepare_pipes)
4114 unsigned modeset_pipes,
4115 struct intel_crtc_config *pipe_config)
4116{ 4110{
4117 struct drm_i915_private *dev_priv = dev->dev_private; 4111 struct drm_i915_private *dev_priv = dev->dev_private;
4118 struct intel_crtc *intel_crtc; 4112 struct intel_crtc *intel_crtc;
4119 int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes, 4113 int max_pixclk = intel_mode_max_pixclk(dev_priv);
4120 pipe_config);
4121 int cur_cdclk = valleyview_cur_cdclk(dev_priv); 4114 int cur_cdclk = valleyview_cur_cdclk(dev_priv);
4122 4115
4123 if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk) 4116 if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
4124 return; 4117 return;
4125 4118
4119 /* disable/enable all currently active pipes while we change cdclk */
4126 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 4120 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
4127 base.head) 4121 base.head)
4128 if (intel_crtc->base.enabled) 4122 if (intel_crtc->base.enabled)
@@ -4132,7 +4126,7 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev,
4132static void valleyview_modeset_global_resources(struct drm_device *dev) 4126static void valleyview_modeset_global_resources(struct drm_device *dev)
4133{ 4127{
4134 struct drm_i915_private *dev_priv = dev->dev_private; 4128 struct drm_i915_private *dev_priv = dev->dev_private;
4135 int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL); 4129 int max_pixclk = intel_mode_max_pixclk(dev_priv);
4136 int cur_cdclk = valleyview_cur_cdclk(dev_priv); 4130 int cur_cdclk = valleyview_cur_cdclk(dev_priv);
4137 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); 4131 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4138 4132
@@ -4176,6 +4170,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4176 4170
4177 intel_update_watermarks(crtc); 4171 intel_update_watermarks(crtc);
4178 intel_enable_pipe(dev_priv, pipe, false, is_dsi); 4172 intel_enable_pipe(dev_priv, pipe, false, is_dsi);
4173 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4179 intel_enable_primary_plane(dev_priv, plane, pipe); 4174 intel_enable_primary_plane(dev_priv, plane, pipe);
4180 intel_enable_planes(crtc); 4175 intel_enable_planes(crtc);
4181 intel_crtc_update_cursor(crtc, true); 4176 intel_crtc_update_cursor(crtc, true);
@@ -4214,6 +4209,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
4214 4209
4215 intel_update_watermarks(crtc); 4210 intel_update_watermarks(crtc);
4216 intel_enable_pipe(dev_priv, pipe, false, false); 4211 intel_enable_pipe(dev_priv, pipe, false, false);
4212 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4217 intel_enable_primary_plane(dev_priv, plane, pipe); 4213 intel_enable_primary_plane(dev_priv, plane, pipe);
4218 intel_enable_planes(crtc); 4214 intel_enable_planes(crtc);
4219 /* The fixup needs to happen before cursor is enabled */ 4215 /* The fixup needs to happen before cursor is enabled */
@@ -4272,6 +4268,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4272 intel_disable_planes(crtc); 4268 intel_disable_planes(crtc);
4273 intel_disable_primary_plane(dev_priv, plane, pipe); 4269 intel_disable_primary_plane(dev_priv, plane, pipe);
4274 4270
4271 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4275 intel_disable_pipe(dev_priv, pipe); 4272 intel_disable_pipe(dev_priv, pipe);
4276 4273
4277 i9xx_pfit_disable(intel_crtc); 4274 i9xx_pfit_disable(intel_crtc);
@@ -4583,7 +4580,7 @@ retry:
4583static void hsw_compute_ips_config(struct intel_crtc *crtc, 4580static void hsw_compute_ips_config(struct intel_crtc *crtc,
4584 struct intel_crtc_config *pipe_config) 4581 struct intel_crtc_config *pipe_config)
4585{ 4582{
4586 pipe_config->ips_enabled = i915_enable_ips && 4583 pipe_config->ips_enabled = i915.enable_ips &&
4587 hsw_crtc_supports_ips(crtc) && 4584 hsw_crtc_supports_ips(crtc) &&
4588 pipe_config->pipe_bpp <= 24; 4585 pipe_config->pipe_bpp <= 24;
4589} 4586}
@@ -4784,8 +4781,8 @@ intel_link_compute_m_n(int bits_per_pixel, int nlanes,
4784 4781
4785static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 4782static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4786{ 4783{
4787 if (i915_panel_use_ssc >= 0) 4784 if (i915.panel_use_ssc >= 0)
4788 return i915_panel_use_ssc != 0; 4785 return i915.panel_use_ssc != 0;
4789 return dev_priv->vbt.lvds_use_ssc 4786 return dev_priv->vbt.lvds_use_ssc
4790 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 4787 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4791} 4788}
@@ -4844,7 +4841,7 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
4844 4841
4845 crtc->lowfreq_avail = false; 4842 crtc->lowfreq_avail = false;
4846 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 4843 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4847 reduced_clock && i915_powersave) { 4844 reduced_clock && i915.powersave) {
4848 I915_WRITE(FP1(pipe), fp2); 4845 I915_WRITE(FP1(pipe), fp2);
4849 crtc->config.dpll_hw_state.fp1 = fp2; 4846 crtc->config.dpll_hw_state.fp1 = fp2;
4850 crtc->lowfreq_avail = true; 4847 crtc->lowfreq_avail = true;
@@ -6348,7 +6345,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6348 if (intel_crtc->config.has_dp_encoder) 6345 if (intel_crtc->config.has_dp_encoder)
6349 intel_dp_set_m_n(intel_crtc); 6346 intel_dp_set_m_n(intel_crtc);
6350 6347
6351 if (is_lvds && has_reduced_clock && i915_powersave) 6348 if (is_lvds && has_reduced_clock && i915.powersave)
6352 intel_crtc->lowfreq_avail = true; 6349 intel_crtc->lowfreq_avail = true;
6353 else 6350 else
6354 intel_crtc->lowfreq_avail = false; 6351 intel_crtc->lowfreq_avail = false;
@@ -6716,7 +6713,7 @@ static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6716 return; 6713 return;
6717 6714
6718 schedule_delayed_work(&dev_priv->pc8.enable_work, 6715 schedule_delayed_work(&dev_priv->pc8.enable_work,
6719 msecs_to_jiffies(i915_pc8_timeout)); 6716 msecs_to_jiffies(i915.pc8_timeout));
6720} 6717}
6721 6718
6722static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv) 6719static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
@@ -6815,7 +6812,7 @@ static void hsw_update_package_c8(struct drm_device *dev)
6815 if (!HAS_PC8(dev_priv->dev)) 6812 if (!HAS_PC8(dev_priv->dev))
6816 return; 6813 return;
6817 6814
6818 if (!i915_enable_pc8) 6815 if (!i915.enable_pc8)
6819 return; 6816 return;
6820 6817
6821 mutex_lock(&dev_priv->pc8.lock); 6818 mutex_lock(&dev_priv->pc8.lock);
@@ -7855,6 +7852,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
7855 to_intel_connector(connector)->new_encoder = intel_encoder; 7852 to_intel_connector(connector)->new_encoder = intel_encoder;
7856 7853
7857 intel_crtc = to_intel_crtc(crtc); 7854 intel_crtc = to_intel_crtc(crtc);
7855 intel_crtc->new_enabled = true;
7856 intel_crtc->new_config = &intel_crtc->config;
7858 old->dpms_mode = connector->dpms; 7857 old->dpms_mode = connector->dpms;
7859 old->load_detect_temp = true; 7858 old->load_detect_temp = true;
7860 old->release_fb = NULL; 7859 old->release_fb = NULL;
@@ -7878,21 +7877,28 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
7878 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 7877 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
7879 if (IS_ERR(fb)) { 7878 if (IS_ERR(fb)) {
7880 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 7879 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
7881 mutex_unlock(&crtc->mutex); 7880 goto fail;
7882 return false;
7883 } 7881 }
7884 7882
7885 if (intel_set_mode(crtc, mode, 0, 0, fb)) { 7883 if (intel_set_mode(crtc, mode, 0, 0, fb)) {
7886 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 7884 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
7887 if (old->release_fb) 7885 if (old->release_fb)
7888 old->release_fb->funcs->destroy(old->release_fb); 7886 old->release_fb->funcs->destroy(old->release_fb);
7889 mutex_unlock(&crtc->mutex); 7887 goto fail;
7890 return false;
7891 } 7888 }
7892 7889
7893 /* let the connector get through one full cycle before testing */ 7890 /* let the connector get through one full cycle before testing */
7894 intel_wait_for_vblank(dev, intel_crtc->pipe); 7891 intel_wait_for_vblank(dev, intel_crtc->pipe);
7895 return true; 7892 return true;
7893
7894 fail:
7895 intel_crtc->new_enabled = crtc->enabled;
7896 if (intel_crtc->new_enabled)
7897 intel_crtc->new_config = &intel_crtc->config;
7898 else
7899 intel_crtc->new_config = NULL;
7900 mutex_unlock(&crtc->mutex);
7901 return false;
7896} 7902}
7897 7903
7898void intel_release_load_detect_pipe(struct drm_connector *connector, 7904void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -7902,6 +7908,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
7902 intel_attached_encoder(connector); 7908 intel_attached_encoder(connector);
7903 struct drm_encoder *encoder = &intel_encoder->base; 7909 struct drm_encoder *encoder = &intel_encoder->base;
7904 struct drm_crtc *crtc = encoder->crtc; 7910 struct drm_crtc *crtc = encoder->crtc;
7911 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7905 7912
7906 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 7913 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7907 connector->base.id, drm_get_connector_name(connector), 7914 connector->base.id, drm_get_connector_name(connector),
@@ -7910,6 +7917,8 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
7910 if (old->load_detect_temp) { 7917 if (old->load_detect_temp) {
7911 to_intel_connector(connector)->new_encoder = NULL; 7918 to_intel_connector(connector)->new_encoder = NULL;
7912 intel_encoder->new_crtc = NULL; 7919 intel_encoder->new_crtc = NULL;
7920 intel_crtc->new_enabled = false;
7921 intel_crtc->new_config = NULL;
7913 intel_set_mode(crtc, NULL, 0, 0, NULL); 7922 intel_set_mode(crtc, NULL, 0, 0, NULL);
7914 7923
7915 if (old->release_fb) { 7924 if (old->release_fb) {
@@ -8201,7 +8210,7 @@ void intel_mark_idle(struct drm_device *dev)
8201 8210
8202 hsw_package_c8_gpu_idle(dev_priv); 8211 hsw_package_c8_gpu_idle(dev_priv);
8203 8212
8204 if (!i915_powersave) 8213 if (!i915.powersave)
8205 return; 8214 return;
8206 8215
8207 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 8216 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -8221,7 +8230,7 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
8221 struct drm_device *dev = obj->base.dev; 8230 struct drm_device *dev = obj->base.dev;
8222 struct drm_crtc *crtc; 8231 struct drm_crtc *crtc;
8223 8232
8224 if (!i915_powersave) 8233 if (!i915.powersave)
8225 return; 8234 return;
8226 8235
8227 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 8236 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -8766,6 +8775,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = {
8766 */ 8775 */
8767static void intel_modeset_update_staged_output_state(struct drm_device *dev) 8776static void intel_modeset_update_staged_output_state(struct drm_device *dev)
8768{ 8777{
8778 struct intel_crtc *crtc;
8769 struct intel_encoder *encoder; 8779 struct intel_encoder *encoder;
8770 struct intel_connector *connector; 8780 struct intel_connector *connector;
8771 8781
@@ -8780,6 +8790,16 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
8780 encoder->new_crtc = 8790 encoder->new_crtc =
8781 to_intel_crtc(encoder->base.crtc); 8791 to_intel_crtc(encoder->base.crtc);
8782 } 8792 }
8793
8794 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
8795 base.head) {
8796 crtc->new_enabled = crtc->base.enabled;
8797
8798 if (crtc->new_enabled)
8799 crtc->new_config = &crtc->config;
8800 else
8801 crtc->new_config = NULL;
8802 }
8783} 8803}
8784 8804
8785/** 8805/**
@@ -8789,6 +8809,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
8789 */ 8809 */
8790static void intel_modeset_commit_output_state(struct drm_device *dev) 8810static void intel_modeset_commit_output_state(struct drm_device *dev)
8791{ 8811{
8812 struct intel_crtc *crtc;
8792 struct intel_encoder *encoder; 8813 struct intel_encoder *encoder;
8793 struct intel_connector *connector; 8814 struct intel_connector *connector;
8794 8815
@@ -8801,6 +8822,11 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
8801 base.head) { 8822 base.head) {
8802 encoder->base.crtc = &encoder->new_crtc->base; 8823 encoder->base.crtc = &encoder->new_crtc->base;
8803 } 8824 }
8825
8826 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
8827 base.head) {
8828 crtc->base.enabled = crtc->new_enabled;
8829 }
8804} 8830}
8805 8831
8806static void 8832static void
@@ -9127,29 +9153,22 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
9127 *prepare_pipes |= 1 << encoder->new_crtc->pipe; 9153 *prepare_pipes |= 1 << encoder->new_crtc->pipe;
9128 } 9154 }
9129 9155
9130 /* Check for any pipes that will be fully disabled ... */ 9156 /* Check for pipes that will be enabled/disabled ... */
9131 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 9157 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
9132 base.head) { 9158 base.head) {
9133 bool used = false; 9159 if (intel_crtc->base.enabled == intel_crtc->new_enabled)
9134
9135 /* Don't try to disable disabled crtcs. */
9136 if (!intel_crtc->base.enabled)
9137 continue; 9160 continue;
9138 9161
9139 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 9162 if (!intel_crtc->new_enabled)
9140 base.head) {
9141 if (encoder->new_crtc == intel_crtc)
9142 used = true;
9143 }
9144
9145 if (!used)
9146 *disable_pipes |= 1 << intel_crtc->pipe; 9163 *disable_pipes |= 1 << intel_crtc->pipe;
9164 else
9165 *prepare_pipes |= 1 << intel_crtc->pipe;
9147 } 9166 }
9148 9167
9149 9168
9150 /* set_mode is also used to update properties on life display pipes. */ 9169 /* set_mode is also used to update properties on life display pipes. */
9151 intel_crtc = to_intel_crtc(crtc); 9170 intel_crtc = to_intel_crtc(crtc);
9152 if (crtc->enabled) 9171 if (intel_crtc->new_enabled)
9153 *prepare_pipes |= 1 << intel_crtc->pipe; 9172 *prepare_pipes |= 1 << intel_crtc->pipe;
9154 9173
9155 /* 9174 /*
@@ -9208,10 +9227,13 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
9208 9227
9209 intel_modeset_commit_output_state(dev); 9228 intel_modeset_commit_output_state(dev);
9210 9229
9211 /* Update computed state. */ 9230 /* Double check state. */
9212 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 9231 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
9213 base.head) { 9232 base.head) {
9214 intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base); 9233 WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
9234 WARN_ON(intel_crtc->new_config &&
9235 intel_crtc->new_config != &intel_crtc->config);
9236 WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
9215 } 9237 }
9216 9238
9217 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 9239 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -9380,10 +9402,8 @@ intel_pipe_config_compare(struct drm_device *dev,
9380 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 9402 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
9381 PIPE_CONF_CHECK_I(pipe_bpp); 9403 PIPE_CONF_CHECK_I(pipe_bpp);
9382 9404
9383 if (!HAS_DDI(dev)) { 9405 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
9384 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); 9406 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9385 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9386 }
9387 9407
9388#undef PIPE_CONF_CHECK_X 9408#undef PIPE_CONF_CHECK_X
9389#undef PIPE_CONF_CHECK_I 9409#undef PIPE_CONF_CHECK_I
@@ -9643,6 +9663,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
9643 } 9663 }
9644 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 9664 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
9645 "[modeset]"); 9665 "[modeset]");
9666 to_intel_crtc(crtc)->new_config = pipe_config;
9646 } 9667 }
9647 9668
9648 /* 9669 /*
@@ -9653,8 +9674,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
9653 * adjusted_mode bits in the crtc directly. 9674 * adjusted_mode bits in the crtc directly.
9654 */ 9675 */
9655 if (IS_VALLEYVIEW(dev)) { 9676 if (IS_VALLEYVIEW(dev)) {
9656 valleyview_modeset_global_pipes(dev, &prepare_pipes, 9677 valleyview_modeset_global_pipes(dev, &prepare_pipes);
9657 modeset_pipes, pipe_config);
9658 9678
9659 /* may have added more to prepare_pipes than we should */ 9679 /* may have added more to prepare_pipes than we should */
9660 prepare_pipes &= ~disable_pipes; 9680 prepare_pipes &= ~disable_pipes;
@@ -9676,6 +9696,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
9676 /* mode_set/enable/disable functions rely on a correct pipe 9696 /* mode_set/enable/disable functions rely on a correct pipe
9677 * config. */ 9697 * config. */
9678 to_intel_crtc(crtc)->config = *pipe_config; 9698 to_intel_crtc(crtc)->config = *pipe_config;
9699 to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
9679 9700
9680 /* 9701 /*
9681 * Calculate and store various constants which 9702 * Calculate and store various constants which
@@ -9746,16 +9767,24 @@ static void intel_set_config_free(struct intel_set_config *config)
9746 9767
9747 kfree(config->save_connector_encoders); 9768 kfree(config->save_connector_encoders);
9748 kfree(config->save_encoder_crtcs); 9769 kfree(config->save_encoder_crtcs);
9770 kfree(config->save_crtc_enabled);
9749 kfree(config); 9771 kfree(config);
9750} 9772}
9751 9773
9752static int intel_set_config_save_state(struct drm_device *dev, 9774static int intel_set_config_save_state(struct drm_device *dev,
9753 struct intel_set_config *config) 9775 struct intel_set_config *config)
9754{ 9776{
9777 struct drm_crtc *crtc;
9755 struct drm_encoder *encoder; 9778 struct drm_encoder *encoder;
9756 struct drm_connector *connector; 9779 struct drm_connector *connector;
9757 int count; 9780 int count;
9758 9781
9782 config->save_crtc_enabled =
9783 kcalloc(dev->mode_config.num_crtc,
9784 sizeof(bool), GFP_KERNEL);
9785 if (!config->save_crtc_enabled)
9786 return -ENOMEM;
9787
9759 config->save_encoder_crtcs = 9788 config->save_encoder_crtcs =
9760 kcalloc(dev->mode_config.num_encoder, 9789 kcalloc(dev->mode_config.num_encoder,
9761 sizeof(struct drm_crtc *), GFP_KERNEL); 9790 sizeof(struct drm_crtc *), GFP_KERNEL);
@@ -9773,6 +9802,11 @@ static int intel_set_config_save_state(struct drm_device *dev,
9773 * restored, not the drivers personal bookkeeping. 9802 * restored, not the drivers personal bookkeeping.
9774 */ 9803 */
9775 count = 0; 9804 count = 0;
9805 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9806 config->save_crtc_enabled[count++] = crtc->enabled;
9807 }
9808
9809 count = 0;
9776 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 9810 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
9777 config->save_encoder_crtcs[count++] = encoder->crtc; 9811 config->save_encoder_crtcs[count++] = encoder->crtc;
9778 } 9812 }
@@ -9788,11 +9822,22 @@ static int intel_set_config_save_state(struct drm_device *dev,
9788static void intel_set_config_restore_state(struct drm_device *dev, 9822static void intel_set_config_restore_state(struct drm_device *dev,
9789 struct intel_set_config *config) 9823 struct intel_set_config *config)
9790{ 9824{
9825 struct intel_crtc *crtc;
9791 struct intel_encoder *encoder; 9826 struct intel_encoder *encoder;
9792 struct intel_connector *connector; 9827 struct intel_connector *connector;
9793 int count; 9828 int count;
9794 9829
9795 count = 0; 9830 count = 0;
9831 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
9832 crtc->new_enabled = config->save_crtc_enabled[count++];
9833
9834 if (crtc->new_enabled)
9835 crtc->new_config = &crtc->config;
9836 else
9837 crtc->new_config = NULL;
9838 }
9839
9840 count = 0;
9796 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 9841 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
9797 encoder->new_crtc = 9842 encoder->new_crtc =
9798 to_intel_crtc(config->save_encoder_crtcs[count++]); 9843 to_intel_crtc(config->save_encoder_crtcs[count++]);
@@ -9840,7 +9885,7 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
9840 struct intel_crtc *intel_crtc = 9885 struct intel_crtc *intel_crtc =
9841 to_intel_crtc(set->crtc); 9886 to_intel_crtc(set->crtc);
9842 9887
9843 if (intel_crtc->active && i915_fastboot) { 9888 if (intel_crtc->active && i915.fastboot) {
9844 DRM_DEBUG_KMS("crtc has no fb, will flip\n"); 9889 DRM_DEBUG_KMS("crtc has no fb, will flip\n");
9845 config->fb_changed = true; 9890 config->fb_changed = true;
9846 } else { 9891 } else {
@@ -9876,9 +9921,9 @@ intel_modeset_stage_output_state(struct drm_device *dev,
9876 struct drm_mode_set *set, 9921 struct drm_mode_set *set,
9877 struct intel_set_config *config) 9922 struct intel_set_config *config)
9878{ 9923{
9879 struct drm_crtc *new_crtc;
9880 struct intel_connector *connector; 9924 struct intel_connector *connector;
9881 struct intel_encoder *encoder; 9925 struct intel_encoder *encoder;
9926 struct intel_crtc *crtc;
9882 int ro; 9927 int ro;
9883 9928
9884 /* The upper layers ensure that we either disable a crtc or have a list 9929 /* The upper layers ensure that we either disable a crtc or have a list
@@ -9921,6 +9966,8 @@ intel_modeset_stage_output_state(struct drm_device *dev,
9921 /* Update crtc of enabled connectors. */ 9966 /* Update crtc of enabled connectors. */
9922 list_for_each_entry(connector, &dev->mode_config.connector_list, 9967 list_for_each_entry(connector, &dev->mode_config.connector_list,
9923 base.head) { 9968 base.head) {
9969 struct drm_crtc *new_crtc;
9970
9924 if (!connector->new_encoder) 9971 if (!connector->new_encoder)
9925 continue; 9972 continue;
9926 9973
@@ -9971,9 +10018,58 @@ intel_modeset_stage_output_state(struct drm_device *dev,
9971 } 10018 }
9972 /* Now we've also updated encoder->new_crtc for all encoders. */ 10019 /* Now we've also updated encoder->new_crtc for all encoders. */
9973 10020
10021 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10022 base.head) {
10023 crtc->new_enabled = false;
10024
10025 list_for_each_entry(encoder,
10026 &dev->mode_config.encoder_list,
10027 base.head) {
10028 if (encoder->new_crtc == crtc) {
10029 crtc->new_enabled = true;
10030 break;
10031 }
10032 }
10033
10034 if (crtc->new_enabled != crtc->base.enabled) {
10035 DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
10036 crtc->new_enabled ? "en" : "dis");
10037 config->mode_changed = true;
10038 }
10039
10040 if (crtc->new_enabled)
10041 crtc->new_config = &crtc->config;
10042 else
10043 crtc->new_config = NULL;
10044 }
10045
9974 return 0; 10046 return 0;
9975} 10047}
9976 10048
10049static void disable_crtc_nofb(struct intel_crtc *crtc)
10050{
10051 struct drm_device *dev = crtc->base.dev;
10052 struct intel_encoder *encoder;
10053 struct intel_connector *connector;
10054
10055 DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
10056 pipe_name(crtc->pipe));
10057
10058 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
10059 if (connector->new_encoder &&
10060 connector->new_encoder->new_crtc == crtc)
10061 connector->new_encoder = NULL;
10062 }
10063
10064 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
10065 if (encoder->new_crtc == crtc)
10066 encoder->new_crtc = NULL;
10067 }
10068
10069 crtc->new_enabled = false;
10070 crtc->new_config = NULL;
10071}
10072
9977static int intel_crtc_set_config(struct drm_mode_set *set) 10073static int intel_crtc_set_config(struct drm_mode_set *set)
9978{ 10074{
9979 struct drm_device *dev; 10075 struct drm_device *dev;
@@ -10040,7 +10136,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
10040 * flipping, so increasing its cost here shouldn't be a big 10136 * flipping, so increasing its cost here shouldn't be a big
10041 * deal). 10137 * deal).
10042 */ 10138 */
10043 if (i915_fastboot && ret == 0) 10139 if (i915.fastboot && ret == 0)
10044 intel_modeset_check_state(set->crtc->dev); 10140 intel_modeset_check_state(set->crtc->dev);
10045 } 10141 }
10046 10142
@@ -10050,6 +10146,15 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
10050fail: 10146fail:
10051 intel_set_config_restore_state(dev, config); 10147 intel_set_config_restore_state(dev, config);
10052 10148
10149 /*
10150 * HACK: if the pipe was on, but we didn't have a framebuffer,
10151 * force the pipe off to avoid oopsing in the modeset code
10152 * due to fb==NULL. This should only happen during boot since
10153 * we don't yet reconstruct the FB from the hardware state.
10154 */
10155 if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
10156 disable_crtc_nofb(to_intel_crtc(save_set.crtc));
10157
10053 /* Try to restore the config */ 10158 /* Try to restore the config */
10054 if (config->mode_changed && 10159 if (config->mode_changed &&
10055 intel_set_mode(save_set.crtc, save_set.mode, 10160 intel_set_mode(save_set.crtc, save_set.mode,
@@ -10839,6 +10944,9 @@ static struct intel_quirk intel_quirks[] = {
10839 10944
10840 /* Acer Aspire 4736Z */ 10945 /* Acer Aspire 4736Z */
10841 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 10946 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
10947
10948 /* Acer Aspire 5336 */
10949 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
10842}; 10950};
10843 10951
10844static void intel_init_quirks(struct drm_device *dev) 10952static void intel_init_quirks(struct drm_device *dev)
@@ -10869,6 +10977,7 @@ static void i915_disable_vga(struct drm_device *dev)
10869 u8 sr1; 10977 u8 sr1;
10870 u32 vga_reg = i915_vgacntrl_reg(dev); 10978 u32 vga_reg = i915_vgacntrl_reg(dev);
10871 10979
10980 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
10872 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 10981 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10873 outb(SR01, VGA_SR_INDEX); 10982 outb(SR01, VGA_SR_INDEX);
10874 sr1 = inb(VGA_SR_DATA); 10983 sr1 = inb(VGA_SR_DATA);
@@ -11265,7 +11374,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
11265 */ 11374 */
11266 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 11375 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
11267 base.head) { 11376 base.head) {
11268 if (crtc->active && i915_fastboot) { 11377 if (crtc->active && i915.fastboot) {
11269 intel_crtc_mode_from_pipe_config(crtc, &crtc->config); 11378 intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
11270 11379
11271 DRM_DEBUG_KMS("[CRTC:%d] found active mode: ", 11380 DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
@@ -11329,7 +11438,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
11329 intel_setup_overlay(dev); 11438 intel_setup_overlay(dev);
11330 11439
11331 mutex_lock(&dev->mode_config.mutex); 11440 mutex_lock(&dev->mode_config.mutex);
11332 drm_mode_config_reset(dev);
11333 intel_modeset_setup_hw_state(dev, false); 11441 intel_modeset_setup_hw_state(dev, false);
11334 mutex_unlock(&dev->mode_config.mutex); 11442 mutex_unlock(&dev->mode_config.mutex);
11335} 11443}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 57552eb386b0..bd1df502bc34 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -91,18 +91,25 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
91} 91}
92 92
93static void intel_dp_link_down(struct intel_dp *intel_dp); 93static void intel_dp_link_down(struct intel_dp *intel_dp);
94static void edp_panel_vdd_on(struct intel_dp *intel_dp);
95static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
94 96
95static int 97static int
96intel_dp_max_link_bw(struct intel_dp *intel_dp) 98intel_dp_max_link_bw(struct intel_dp *intel_dp)
97{ 99{
98 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 100 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
101 struct drm_device *dev = intel_dp->attached_connector->base.dev;
99 102
100 switch (max_link_bw) { 103 switch (max_link_bw) {
101 case DP_LINK_BW_1_62: 104 case DP_LINK_BW_1_62:
102 case DP_LINK_BW_2_7: 105 case DP_LINK_BW_2_7:
103 break; 106 break;
104 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ 107 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
105 max_link_bw = DP_LINK_BW_2_7; 108 if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) &&
109 intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
110 max_link_bw = DP_LINK_BW_5_4;
111 else
112 max_link_bw = DP_LINK_BW_2_7;
106 break; 113 break;
107 default: 114 default:
108 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", 115 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
@@ -294,7 +301,7 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp)
294 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp)); 301 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
295} 302}
296 303
297static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 304static bool edp_have_panel_power(struct intel_dp *intel_dp)
298{ 305{
299 struct drm_device *dev = intel_dp_to_dev(intel_dp); 306 struct drm_device *dev = intel_dp_to_dev(intel_dp);
300 struct drm_i915_private *dev_priv = dev->dev_private; 307 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -302,7 +309,7 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
302 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0; 309 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
303} 310}
304 311
305static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 312static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
306{ 313{
307 struct drm_device *dev = intel_dp_to_dev(intel_dp); 314 struct drm_device *dev = intel_dp_to_dev(intel_dp);
308 struct drm_i915_private *dev_priv = dev->dev_private; 315 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -319,7 +326,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
319 if (!is_edp(intel_dp)) 326 if (!is_edp(intel_dp))
320 return; 327 return;
321 328
322 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 329 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
323 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 330 WARN(1, "eDP powered off while attempting aux channel communication.\n");
324 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 331 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
325 I915_READ(_pp_stat_reg(intel_dp)), 332 I915_READ(_pp_stat_reg(intel_dp)),
@@ -351,31 +358,46 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
351 return status; 358 return status;
352} 359}
353 360
354static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp, 361static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
355 int index)
356{ 362{
357 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 363 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
358 struct drm_device *dev = intel_dig_port->base.base.dev; 364 struct drm_device *dev = intel_dig_port->base.base.dev;
359 struct drm_i915_private *dev_priv = dev->dev_private;
360 365
361 /* The clock divider is based off the hrawclk, 366 /*
362 * and would like to run at 2MHz. So, take the 367 * The clock divider is based off the hrawclk, and would like to run at
363 * hrawclk value and divide by 2 and use that 368 * 2MHz. So, take the hrawclk value and divide by 2 and use that
364 *
365 * Note that PCH attached eDP panels should use a 125MHz input
366 * clock divider.
367 */ 369 */
368 if (IS_VALLEYVIEW(dev)) { 370 return index ? 0 : intel_hrawclk(dev) / 2;
369 return index ? 0 : 100; 371}
370 } else if (intel_dig_port->port == PORT_A) { 372
371 if (index) 373static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
372 return 0; 374{
373 if (HAS_DDI(dev)) 375 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
374 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000); 376 struct drm_device *dev = intel_dig_port->base.base.dev;
375 else if (IS_GEN6(dev) || IS_GEN7(dev)) 377
378 if (index)
379 return 0;
380
381 if (intel_dig_port->port == PORT_A) {
382 if (IS_GEN6(dev) || IS_GEN7(dev))
376 return 200; /* SNB & IVB eDP input clock at 400Mhz */ 383 return 200; /* SNB & IVB eDP input clock at 400Mhz */
377 else 384 else
378 return 225; /* eDP input clock at 450Mhz */ 385 return 225; /* eDP input clock at 450Mhz */
386 } else {
387 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
388 }
389}
390
391static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
392{
393 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
394 struct drm_device *dev = intel_dig_port->base.base.dev;
395 struct drm_i915_private *dev_priv = dev->dev_private;
396
397 if (intel_dig_port->port == PORT_A) {
398 if (index)
399 return 0;
400 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
379 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 401 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
380 /* Workaround for non-ULT HSW */ 402 /* Workaround for non-ULT HSW */
381 switch (index) { 403 switch (index) {
@@ -383,13 +405,46 @@ static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
383 case 1: return 72; 405 case 1: return 72;
384 default: return 0; 406 default: return 0;
385 } 407 }
386 } else if (HAS_PCH_SPLIT(dev)) { 408 } else {
387 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 409 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
388 } else {
389 return index ? 0 :intel_hrawclk(dev) / 2;
390 } 410 }
391} 411}
392 412
413static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
414{
415 return index ? 0 : 100;
416}
417
418static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
419 bool has_aux_irq,
420 int send_bytes,
421 uint32_t aux_clock_divider)
422{
423 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
424 struct drm_device *dev = intel_dig_port->base.base.dev;
425 uint32_t precharge, timeout;
426
427 if (IS_GEN6(dev))
428 precharge = 3;
429 else
430 precharge = 5;
431
432 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
433 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
434 else
435 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
436
437 return DP_AUX_CH_CTL_SEND_BUSY |
438 DP_AUX_CH_CTL_DONE |
439 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
440 DP_AUX_CH_CTL_TIME_OUT_ERROR |
441 timeout |
442 DP_AUX_CH_CTL_RECEIVE_ERROR |
443 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
444 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
445 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
446}
447
393static int 448static int
394intel_dp_aux_ch(struct intel_dp *intel_dp, 449intel_dp_aux_ch(struct intel_dp *intel_dp,
395 uint8_t *send, int send_bytes, 450 uint8_t *send, int send_bytes,
@@ -403,9 +458,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
403 uint32_t aux_clock_divider; 458 uint32_t aux_clock_divider;
404 int i, ret, recv_bytes; 459 int i, ret, recv_bytes;
405 uint32_t status; 460 uint32_t status;
406 int try, precharge, clock = 0; 461 int try, clock = 0;
407 bool has_aux_irq = HAS_AUX_IRQ(dev); 462 bool has_aux_irq = HAS_AUX_IRQ(dev);
408 uint32_t timeout;
409 463
410 /* dp aux is extremely sensitive to irq latency, hence request the 464 /* dp aux is extremely sensitive to irq latency, hence request the
411 * lowest possible wakeup latency and so prevent the cpu from going into 465 * lowest possible wakeup latency and so prevent the cpu from going into
@@ -415,16 +469,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
415 469
416 intel_dp_check_edp(intel_dp); 470 intel_dp_check_edp(intel_dp);
417 471
418 if (IS_GEN6(dev))
419 precharge = 3;
420 else
421 precharge = 5;
422
423 if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL)
424 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
425 else
426 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
427
428 intel_aux_display_runtime_get(dev_priv); 472 intel_aux_display_runtime_get(dev_priv);
429 473
430 /* Try to wait for any previous AUX channel activity */ 474 /* Try to wait for any previous AUX channel activity */
@@ -448,7 +492,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
448 goto out; 492 goto out;
449 } 493 }
450 494
451 while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) { 495 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
496 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
497 has_aux_irq,
498 send_bytes,
499 aux_clock_divider);
500
452 /* Must try at least 3 times according to DP spec */ 501 /* Must try at least 3 times according to DP spec */
453 for (try = 0; try < 5; try++) { 502 for (try = 0; try < 5; try++) {
454 /* Load the send data into the aux channel data registers */ 503 /* Load the send data into the aux channel data registers */
@@ -457,16 +506,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
457 pack_aux(send + i, send_bytes - i)); 506 pack_aux(send + i, send_bytes - i));
458 507
459 /* Send the command and wait for it to complete */ 508 /* Send the command and wait for it to complete */
460 I915_WRITE(ch_ctl, 509 I915_WRITE(ch_ctl, send_ctl);
461 DP_AUX_CH_CTL_SEND_BUSY |
462 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
463 timeout |
464 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
465 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
466 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
467 DP_AUX_CH_CTL_DONE |
468 DP_AUX_CH_CTL_TIME_OUT_ERROR |
469 DP_AUX_CH_CTL_RECEIVE_ERROR);
470 510
471 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); 511 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
472 512
@@ -637,7 +677,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
637 int reply_bytes; 677 int reply_bytes;
638 int ret; 678 int ret;
639 679
640 ironlake_edp_panel_vdd_on(intel_dp); 680 edp_panel_vdd_on(intel_dp);
641 intel_dp_check_edp(intel_dp); 681 intel_dp_check_edp(intel_dp);
642 /* Set up the command byte */ 682 /* Set up the command byte */
643 if (mode & MODE_I2C_READ) 683 if (mode & MODE_I2C_READ)
@@ -740,7 +780,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
740 ret = -EREMOTEIO; 780 ret = -EREMOTEIO;
741 781
742out: 782out:
743 ironlake_edp_panel_vdd_off(intel_dp, false); 783 edp_panel_vdd_off(intel_dp, false);
744 return ret; 784 return ret;
745} 785}
746 786
@@ -812,9 +852,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
812 struct intel_connector *intel_connector = intel_dp->attached_connector; 852 struct intel_connector *intel_connector = intel_dp->attached_connector;
813 int lane_count, clock; 853 int lane_count, clock;
814 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 854 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
815 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 855 /* Conveniently, the link BW constants become indices with a shift...*/
856 int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
816 int bpp, mode_rate; 857 int bpp, mode_rate;
817 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 858 static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
818 int link_avail, link_clock; 859 int link_avail, link_clock;
819 860
820 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) 861 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
@@ -1015,16 +1056,16 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
1015 ironlake_set_pll_cpu_edp(intel_dp); 1056 ironlake_set_pll_cpu_edp(intel_dp);
1016} 1057}
1017 1058
1018#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 1059#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1019#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 1060#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1020 1061
1021#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 1062#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1022#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 1063#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1023 1064
1024#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 1065#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1025#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 1066#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1026 1067
1027static void ironlake_wait_panel_status(struct intel_dp *intel_dp, 1068static void wait_panel_status(struct intel_dp *intel_dp,
1028 u32 mask, 1069 u32 mask,
1029 u32 value) 1070 u32 value)
1030{ 1071{
@@ -1049,24 +1090,41 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
1049 DRM_DEBUG_KMS("Wait complete\n"); 1090 DRM_DEBUG_KMS("Wait complete\n");
1050} 1091}
1051 1092
1052static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 1093static void wait_panel_on(struct intel_dp *intel_dp)
1053{ 1094{
1054 DRM_DEBUG_KMS("Wait for panel power on\n"); 1095 DRM_DEBUG_KMS("Wait for panel power on\n");
1055 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 1096 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1056} 1097}
1057 1098
1058static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 1099static void wait_panel_off(struct intel_dp *intel_dp)
1059{ 1100{
1060 DRM_DEBUG_KMS("Wait for panel power off time\n"); 1101 DRM_DEBUG_KMS("Wait for panel power off time\n");
1061 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 1102 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1062} 1103}
1063 1104
1064static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) 1105static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1065{ 1106{
1066 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 1107 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1067 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 1108
1109 /* When we disable the VDD override bit last we have to do the manual
1110 * wait. */
1111 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1112 intel_dp->panel_power_cycle_delay);
1113
1114 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1115}
1116
1117static void wait_backlight_on(struct intel_dp *intel_dp)
1118{
1119 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1120 intel_dp->backlight_on_delay);
1068} 1121}
1069 1122
1123static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1124{
1125 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1126 intel_dp->backlight_off_delay);
1127}
1070 1128
1071/* Read the current pp_control value, unlocking the register if it 1129/* Read the current pp_control value, unlocking the register if it
1072 * is locked 1130 * is locked
@@ -1084,7 +1142,7 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1084 return control; 1142 return control;
1085} 1143}
1086 1144
1087void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 1145static void edp_panel_vdd_on(struct intel_dp *intel_dp)
1088{ 1146{
1089 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1147 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1090 struct drm_i915_private *dev_priv = dev->dev_private; 1148 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1099,15 +1157,15 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1099 1157
1100 intel_dp->want_panel_vdd = true; 1158 intel_dp->want_panel_vdd = true;
1101 1159
1102 if (ironlake_edp_have_panel_vdd(intel_dp)) 1160 if (edp_have_panel_vdd(intel_dp))
1103 return; 1161 return;
1104 1162
1105 intel_runtime_pm_get(dev_priv); 1163 intel_runtime_pm_get(dev_priv);
1106 1164
1107 DRM_DEBUG_KMS("Turning eDP VDD on\n"); 1165 DRM_DEBUG_KMS("Turning eDP VDD on\n");
1108 1166
1109 if (!ironlake_edp_have_panel_power(intel_dp)) 1167 if (!edp_have_panel_power(intel_dp))
1110 ironlake_wait_panel_power_cycle(intel_dp); 1168 wait_panel_power_cycle(intel_dp);
1111 1169
1112 pp = ironlake_get_pp_control(intel_dp); 1170 pp = ironlake_get_pp_control(intel_dp);
1113 pp |= EDP_FORCE_VDD; 1171 pp |= EDP_FORCE_VDD;
@@ -1122,13 +1180,13 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1122 /* 1180 /*
1123 * If the panel wasn't on, delay before accessing aux channel 1181 * If the panel wasn't on, delay before accessing aux channel
1124 */ 1182 */
1125 if (!ironlake_edp_have_panel_power(intel_dp)) { 1183 if (!edp_have_panel_power(intel_dp)) {
1126 DRM_DEBUG_KMS("eDP was not running\n"); 1184 DRM_DEBUG_KMS("eDP was not running\n");
1127 msleep(intel_dp->panel_power_up_delay); 1185 msleep(intel_dp->panel_power_up_delay);
1128 } 1186 }
1129} 1187}
1130 1188
1131static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) 1189static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1132{ 1190{
1133 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1191 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1134 struct drm_i915_private *dev_priv = dev->dev_private; 1192 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1137,7 +1195,7 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1137 1195
1138 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1196 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1139 1197
1140 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1198 if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
1141 DRM_DEBUG_KMS("Turning eDP VDD off\n"); 1199 DRM_DEBUG_KMS("Turning eDP VDD off\n");
1142 1200
1143 pp = ironlake_get_pp_control(intel_dp); 1201 pp = ironlake_get_pp_control(intel_dp);
@@ -1154,24 +1212,24 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1154 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); 1212 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1155 1213
1156 if ((pp & POWER_TARGET_ON) == 0) 1214 if ((pp & POWER_TARGET_ON) == 0)
1157 msleep(intel_dp->panel_power_cycle_delay); 1215 intel_dp->last_power_cycle = jiffies;
1158 1216
1159 intel_runtime_pm_put(dev_priv); 1217 intel_runtime_pm_put(dev_priv);
1160 } 1218 }
1161} 1219}
1162 1220
1163static void ironlake_panel_vdd_work(struct work_struct *__work) 1221static void edp_panel_vdd_work(struct work_struct *__work)
1164{ 1222{
1165 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1223 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1166 struct intel_dp, panel_vdd_work); 1224 struct intel_dp, panel_vdd_work);
1167 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1225 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1168 1226
1169 mutex_lock(&dev->mode_config.mutex); 1227 mutex_lock(&dev->mode_config.mutex);
1170 ironlake_panel_vdd_off_sync(intel_dp); 1228 edp_panel_vdd_off_sync(intel_dp);
1171 mutex_unlock(&dev->mode_config.mutex); 1229 mutex_unlock(&dev->mode_config.mutex);
1172} 1230}
1173 1231
1174void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1232static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1175{ 1233{
1176 if (!is_edp(intel_dp)) 1234 if (!is_edp(intel_dp))
1177 return; 1235 return;
@@ -1181,7 +1239,7 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1181 intel_dp->want_panel_vdd = false; 1239 intel_dp->want_panel_vdd = false;
1182 1240
1183 if (sync) { 1241 if (sync) {
1184 ironlake_panel_vdd_off_sync(intel_dp); 1242 edp_panel_vdd_off_sync(intel_dp);
1185 } else { 1243 } else {
1186 /* 1244 /*
1187 * Queue the timer to fire a long 1245 * Queue the timer to fire a long
@@ -1193,7 +1251,7 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1193 } 1251 }
1194} 1252}
1195 1253
1196void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1254void intel_edp_panel_on(struct intel_dp *intel_dp)
1197{ 1255{
1198 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1256 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1199 struct drm_i915_private *dev_priv = dev->dev_private; 1257 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1205,12 +1263,12 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1205 1263
1206 DRM_DEBUG_KMS("Turn eDP power on\n"); 1264 DRM_DEBUG_KMS("Turn eDP power on\n");
1207 1265
1208 if (ironlake_edp_have_panel_power(intel_dp)) { 1266 if (edp_have_panel_power(intel_dp)) {
1209 DRM_DEBUG_KMS("eDP power already on\n"); 1267 DRM_DEBUG_KMS("eDP power already on\n");
1210 return; 1268 return;
1211 } 1269 }
1212 1270
1213 ironlake_wait_panel_power_cycle(intel_dp); 1271 wait_panel_power_cycle(intel_dp);
1214 1272
1215 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1273 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1216 pp = ironlake_get_pp_control(intel_dp); 1274 pp = ironlake_get_pp_control(intel_dp);
@@ -1228,7 +1286,8 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1228 I915_WRITE(pp_ctrl_reg, pp); 1286 I915_WRITE(pp_ctrl_reg, pp);
1229 POSTING_READ(pp_ctrl_reg); 1287 POSTING_READ(pp_ctrl_reg);
1230 1288
1231 ironlake_wait_panel_on(intel_dp); 1289 wait_panel_on(intel_dp);
1290 intel_dp->last_power_on = jiffies;
1232 1291
1233 if (IS_GEN5(dev)) { 1292 if (IS_GEN5(dev)) {
1234 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1293 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
@@ -1237,7 +1296,7 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1237 } 1296 }
1238} 1297}
1239 1298
1240void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1299void intel_edp_panel_off(struct intel_dp *intel_dp)
1241{ 1300{
1242 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1301 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1243 struct drm_i915_private *dev_priv = dev->dev_private; 1302 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1249,6 +1308,8 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1249 1308
1250 DRM_DEBUG_KMS("Turn eDP power off\n"); 1309 DRM_DEBUG_KMS("Turn eDP power off\n");
1251 1310
1311 edp_wait_backlight_off(intel_dp);
1312
1252 pp = ironlake_get_pp_control(intel_dp); 1313 pp = ironlake_get_pp_control(intel_dp);
1253 /* We need to switch off panel power _and_ force vdd, for otherwise some 1314 /* We need to switch off panel power _and_ force vdd, for otherwise some
1254 * panels get very unhappy and cease to work. */ 1315 * panels get very unhappy and cease to work. */
@@ -1259,10 +1320,11 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1259 I915_WRITE(pp_ctrl_reg, pp); 1320 I915_WRITE(pp_ctrl_reg, pp);
1260 POSTING_READ(pp_ctrl_reg); 1321 POSTING_READ(pp_ctrl_reg);
1261 1322
1262 ironlake_wait_panel_off(intel_dp); 1323 intel_dp->last_power_cycle = jiffies;
1324 wait_panel_off(intel_dp);
1263} 1325}
1264 1326
1265void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1327void intel_edp_backlight_on(struct intel_dp *intel_dp)
1266{ 1328{
1267 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1329 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1268 struct drm_device *dev = intel_dig_port->base.base.dev; 1330 struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -1280,7 +1342,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1280 * link. So delay a bit to make sure the image is solid before 1342 * link. So delay a bit to make sure the image is solid before
1281 * allowing it to appear. 1343 * allowing it to appear.
1282 */ 1344 */
1283 msleep(intel_dp->backlight_on_delay); 1345 wait_backlight_on(intel_dp);
1284 pp = ironlake_get_pp_control(intel_dp); 1346 pp = ironlake_get_pp_control(intel_dp);
1285 pp |= EDP_BLC_ENABLE; 1347 pp |= EDP_BLC_ENABLE;
1286 1348
@@ -1292,7 +1354,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1292 intel_panel_enable_backlight(intel_dp->attached_connector); 1354 intel_panel_enable_backlight(intel_dp->attached_connector);
1293} 1355}
1294 1356
1295void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1357void intel_edp_backlight_off(struct intel_dp *intel_dp)
1296{ 1358{
1297 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1359 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1298 struct drm_i915_private *dev_priv = dev->dev_private; 1360 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1312,7 +1374,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1312 1374
1313 I915_WRITE(pp_ctrl_reg, pp); 1375 I915_WRITE(pp_ctrl_reg, pp);
1314 POSTING_READ(pp_ctrl_reg); 1376 POSTING_READ(pp_ctrl_reg);
1315 msleep(intel_dp->backlight_off_delay); 1377 intel_dp->last_backlight_off = jiffies;
1316} 1378}
1317 1379
1318static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1380static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
@@ -1597,10 +1659,12 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1597{ 1659{
1598 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1660 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1599 struct drm_i915_private *dev_priv = dev->dev_private; 1661 struct drm_i915_private *dev_priv = dev->dev_private;
1600 uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0); 1662 uint32_t aux_clock_divider;
1601 int precharge = 0x3; 1663 int precharge = 0x3;
1602 int msg_size = 5; /* Header(4) + Message(1) */ 1664 int msg_size = 5; /* Header(4) + Message(1) */
1603 1665
1666 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
1667
1604 /* Enable PSR in sink */ 1668 /* Enable PSR in sink */
1605 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) 1669 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
1606 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, 1670 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
@@ -1668,7 +1732,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1668 return false; 1732 return false;
1669 } 1733 }
1670 1734
1671 if (!i915_enable_psr) { 1735 if (!i915.enable_psr) {
1672 DRM_DEBUG_KMS("PSR disable by flag\n"); 1736 DRM_DEBUG_KMS("PSR disable by flag\n");
1673 return false; 1737 return false;
1674 } 1738 }
@@ -1784,9 +1848,9 @@ static void intel_disable_dp(struct intel_encoder *encoder)
1784 1848
1785 /* Make sure the panel is off before trying to change the mode. But also 1849 /* Make sure the panel is off before trying to change the mode. But also
1786 * ensure that we have vdd while we switch off the panel. */ 1850 * ensure that we have vdd while we switch off the panel. */
1787 ironlake_edp_backlight_off(intel_dp); 1851 intel_edp_backlight_off(intel_dp);
1788 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 1852 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1789 ironlake_edp_panel_off(intel_dp); 1853 intel_edp_panel_off(intel_dp);
1790 1854
1791 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 1855 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1792 if (!(port == PORT_A || IS_VALLEYVIEW(dev))) 1856 if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
@@ -1816,11 +1880,11 @@ static void intel_enable_dp(struct intel_encoder *encoder)
1816 if (WARN_ON(dp_reg & DP_PORT_EN)) 1880 if (WARN_ON(dp_reg & DP_PORT_EN))
1817 return; 1881 return;
1818 1882
1819 ironlake_edp_panel_vdd_on(intel_dp); 1883 edp_panel_vdd_on(intel_dp);
1820 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1884 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1821 intel_dp_start_link_train(intel_dp); 1885 intel_dp_start_link_train(intel_dp);
1822 ironlake_edp_panel_on(intel_dp); 1886 intel_edp_panel_on(intel_dp);
1823 ironlake_edp_panel_vdd_off(intel_dp, true); 1887 edp_panel_vdd_off(intel_dp, true);
1824 intel_dp_complete_link_train(intel_dp); 1888 intel_dp_complete_link_train(intel_dp);
1825 intel_dp_stop_link_train(intel_dp); 1889 intel_dp_stop_link_train(intel_dp);
1826} 1890}
@@ -1830,14 +1894,14 @@ static void g4x_enable_dp(struct intel_encoder *encoder)
1830 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1894 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1831 1895
1832 intel_enable_dp(encoder); 1896 intel_enable_dp(encoder);
1833 ironlake_edp_backlight_on(intel_dp); 1897 intel_edp_backlight_on(intel_dp);
1834} 1898}
1835 1899
1836static void vlv_enable_dp(struct intel_encoder *encoder) 1900static void vlv_enable_dp(struct intel_encoder *encoder)
1837{ 1901{
1838 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1902 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1839 1903
1840 ironlake_edp_backlight_on(intel_dp); 1904 intel_edp_backlight_on(intel_dp);
1841} 1905}
1842 1906
1843static void g4x_pre_enable_dp(struct intel_encoder *encoder) 1907static void g4x_pre_enable_dp(struct intel_encoder *encoder)
@@ -2630,10 +2694,15 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2630 bool channel_eq = false; 2694 bool channel_eq = false;
2631 int tries, cr_tries; 2695 int tries, cr_tries;
2632 uint32_t DP = intel_dp->DP; 2696 uint32_t DP = intel_dp->DP;
2697 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
2698
2699 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
2700 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
2701 training_pattern = DP_TRAINING_PATTERN_3;
2633 2702
2634 /* channel equalization */ 2703 /* channel equalization */
2635 if (!intel_dp_set_link_train(intel_dp, &DP, 2704 if (!intel_dp_set_link_train(intel_dp, &DP,
2636 DP_TRAINING_PATTERN_2 | 2705 training_pattern |
2637 DP_LINK_SCRAMBLING_DISABLE)) { 2706 DP_LINK_SCRAMBLING_DISABLE)) {
2638 DRM_ERROR("failed to start channel equalization\n"); 2707 DRM_ERROR("failed to start channel equalization\n");
2639 return; 2708 return;
@@ -2660,7 +2729,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2660 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 2729 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2661 intel_dp_start_link_train(intel_dp); 2730 intel_dp_start_link_train(intel_dp);
2662 intel_dp_set_link_train(intel_dp, &DP, 2731 intel_dp_set_link_train(intel_dp, &DP,
2663 DP_TRAINING_PATTERN_2 | 2732 training_pattern |
2664 DP_LINK_SCRAMBLING_DISABLE); 2733 DP_LINK_SCRAMBLING_DISABLE);
2665 cr_tries++; 2734 cr_tries++;
2666 continue; 2735 continue;
@@ -2676,7 +2745,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2676 intel_dp_link_down(intel_dp); 2745 intel_dp_link_down(intel_dp);
2677 intel_dp_start_link_train(intel_dp); 2746 intel_dp_start_link_train(intel_dp);
2678 intel_dp_set_link_train(intel_dp, &DP, 2747 intel_dp_set_link_train(intel_dp, &DP,
2679 DP_TRAINING_PATTERN_2 | 2748 training_pattern |
2680 DP_LINK_SCRAMBLING_DISABLE); 2749 DP_LINK_SCRAMBLING_DISABLE);
2681 tries = 0; 2750 tries = 0;
2682 cr_tries++; 2751 cr_tries++;
@@ -2818,6 +2887,14 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
2818 } 2887 }
2819 } 2888 }
2820 2889
2890 /* Training Pattern 3 support */
2891 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
2892 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
2893 intel_dp->use_tps3 = true;
2894 DRM_DEBUG_KMS("Displayport TPS3 supported");
2895 } else
2896 intel_dp->use_tps3 = false;
2897
2821 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2898 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2822 DP_DWN_STRM_PORT_PRESENT)) 2899 DP_DWN_STRM_PORT_PRESENT))
2823 return true; /* native DP sink */ 2900 return true; /* native DP sink */
@@ -2841,7 +2918,7 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
2841 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 2918 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2842 return; 2919 return;
2843 2920
2844 ironlake_edp_panel_vdd_on(intel_dp); 2921 edp_panel_vdd_on(intel_dp);
2845 2922
2846 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) 2923 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
2847 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 2924 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
@@ -2851,7 +2928,36 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
2851 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 2928 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
2852 buf[0], buf[1], buf[2]); 2929 buf[0], buf[1], buf[2]);
2853 2930
2854 ironlake_edp_panel_vdd_off(intel_dp, false); 2931 edp_panel_vdd_off(intel_dp, false);
2932}
2933
2934int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
2935{
2936 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2937 struct drm_device *dev = intel_dig_port->base.base.dev;
2938 struct intel_crtc *intel_crtc =
2939 to_intel_crtc(intel_dig_port->base.base.crtc);
2940 u8 buf[1];
2941
2942 if (!intel_dp_aux_native_read(intel_dp, DP_TEST_SINK_MISC, buf, 1))
2943 return -EAGAIN;
2944
2945 if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
2946 return -ENOTTY;
2947
2948 if (!intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK,
2949 DP_TEST_SINK_START))
2950 return -EAGAIN;
2951
2952 /* Wait 2 vblanks to be sure we will have the correct CRC value */
2953 intel_wait_for_vblank(dev, intel_crtc->pipe);
2954 intel_wait_for_vblank(dev, intel_crtc->pipe);
2955
2956 if (!intel_dp_aux_native_read(intel_dp, DP_TEST_CRC_R_CR, crc, 6))
2957 return -EAGAIN;
2958
2959 intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK, 0);
2960 return 0;
2855} 2961}
2856 2962
2857static bool 2963static bool
@@ -3295,7 +3401,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
3295 if (is_edp(intel_dp)) { 3401 if (is_edp(intel_dp)) {
3296 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 3402 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3297 mutex_lock(&dev->mode_config.mutex); 3403 mutex_lock(&dev->mode_config.mutex);
3298 ironlake_panel_vdd_off_sync(intel_dp); 3404 edp_panel_vdd_off_sync(intel_dp);
3299 mutex_unlock(&dev->mode_config.mutex); 3405 mutex_unlock(&dev->mode_config.mutex);
3300 } 3406 }
3301 kfree(intel_dig_port); 3407 kfree(intel_dig_port);
@@ -3394,6 +3500,13 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
3394 } 3500 }
3395} 3501}
3396 3502
3503static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
3504{
3505 intel_dp->last_power_cycle = jiffies;
3506 intel_dp->last_power_on = jiffies;
3507 intel_dp->last_backlight_off = jiffies;
3508}
3509
3397static void 3510static void
3398intel_dp_init_panel_power_sequencer(struct drm_device *dev, 3511intel_dp_init_panel_power_sequencer(struct drm_device *dev,
3399 struct intel_dp *intel_dp, 3512 struct intel_dp *intel_dp,
@@ -3516,10 +3629,17 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3516 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); 3629 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3517 } 3630 }
3518 3631
3519 /* And finally store the new values in the power sequencer. */ 3632 /*
3633 * And finally store the new values in the power sequencer. The
3634 * backlight delays are set to 1 because we do manual waits on them. For
3635 * T8, even BSpec recommends doing it. For T9, if we don't do this,
3636 * we'll end up waiting for the backlight off delay twice: once when we
3637 * do the manual sleep, and once when we disable the panel and wait for
3638 * the PP_STATUS bit to become zero.
3639 */
3520 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 3640 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
3521 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 3641 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
3522 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 3642 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
3523 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 3643 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
3524 /* Compute the divisor for the pp clock, simply match the Bspec 3644 /* Compute the divisor for the pp clock, simply match the Bspec
3525 * formula. */ 3645 * formula. */
@@ -3554,14 +3674,14 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3554} 3674}
3555 3675
3556static bool intel_edp_init_connector(struct intel_dp *intel_dp, 3676static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3557 struct intel_connector *intel_connector) 3677 struct intel_connector *intel_connector,
3678 struct edp_power_seq *power_seq)
3558{ 3679{
3559 struct drm_connector *connector = &intel_connector->base; 3680 struct drm_connector *connector = &intel_connector->base;
3560 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3681 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3561 struct drm_device *dev = intel_dig_port->base.base.dev; 3682 struct drm_device *dev = intel_dig_port->base.base.dev;
3562 struct drm_i915_private *dev_priv = dev->dev_private; 3683 struct drm_i915_private *dev_priv = dev->dev_private;
3563 struct drm_display_mode *fixed_mode = NULL; 3684 struct drm_display_mode *fixed_mode = NULL;
3564 struct edp_power_seq power_seq = { 0 };
3565 bool has_dpcd; 3685 bool has_dpcd;
3566 struct drm_display_mode *scan; 3686 struct drm_display_mode *scan;
3567 struct edid *edid; 3687 struct edid *edid;
@@ -3569,12 +3689,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3569 if (!is_edp(intel_dp)) 3689 if (!is_edp(intel_dp))
3570 return true; 3690 return true;
3571 3691
3572 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
3573
3574 /* Cache DPCD and EDID for edp. */ 3692 /* Cache DPCD and EDID for edp. */
3575 ironlake_edp_panel_vdd_on(intel_dp); 3693 edp_panel_vdd_on(intel_dp);
3576 has_dpcd = intel_dp_get_dpcd(intel_dp); 3694 has_dpcd = intel_dp_get_dpcd(intel_dp);
3577 ironlake_edp_panel_vdd_off(intel_dp, false); 3695 edp_panel_vdd_off(intel_dp, false);
3578 3696
3579 if (has_dpcd) { 3697 if (has_dpcd) {
3580 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 3698 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
@@ -3588,8 +3706,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3588 } 3706 }
3589 3707
3590 /* We now know it's not a ghost, init power sequence regs. */ 3708 /* We now know it's not a ghost, init power sequence regs. */
3591 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 3709 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
3592 &power_seq);
3593 3710
3594 edid = drm_get_edid(connector, &intel_dp->adapter); 3711 edid = drm_get_edid(connector, &intel_dp->adapter);
3595 if (edid) { 3712 if (edid) {
@@ -3638,9 +3755,22 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3638 struct drm_device *dev = intel_encoder->base.dev; 3755 struct drm_device *dev = intel_encoder->base.dev;
3639 struct drm_i915_private *dev_priv = dev->dev_private; 3756 struct drm_i915_private *dev_priv = dev->dev_private;
3640 enum port port = intel_dig_port->port; 3757 enum port port = intel_dig_port->port;
3758 struct edp_power_seq power_seq = { 0 };
3641 const char *name = NULL; 3759 const char *name = NULL;
3642 int type, error; 3760 int type, error;
3643 3761
3762 /* intel_dp vfuncs */
3763 if (IS_VALLEYVIEW(dev))
3764 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
3765 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3766 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
3767 else if (HAS_PCH_SPLIT(dev))
3768 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
3769 else
3770 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
3771
3772 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
3773
3644 /* Preserve the current hw state. */ 3774 /* Preserve the current hw state. */
3645 intel_dp->DP = I915_READ(intel_dp->output_reg); 3775 intel_dp->DP = I915_READ(intel_dp->output_reg);
3646 intel_dp->attached_connector = intel_connector; 3776 intel_dp->attached_connector = intel_connector;
@@ -3669,7 +3799,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3669 connector->doublescan_allowed = 0; 3799 connector->doublescan_allowed = 0;
3670 3800
3671 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 3801 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
3672 ironlake_panel_vdd_work); 3802 edp_panel_vdd_work);
3673 3803
3674 intel_connector_attach_encoder(intel_connector, intel_encoder); 3804 intel_connector_attach_encoder(intel_connector, intel_encoder);
3675 drm_sysfs_connector_add(connector); 3805 drm_sysfs_connector_add(connector);
@@ -3721,18 +3851,23 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3721 BUG(); 3851 BUG();
3722 } 3852 }
3723 3853
3854 if (is_edp(intel_dp)) {
3855 intel_dp_init_panel_power_timestamps(intel_dp);
3856 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
3857 }
3858
3724 error = intel_dp_i2c_init(intel_dp, intel_connector, name); 3859 error = intel_dp_i2c_init(intel_dp, intel_connector, name);
3725 WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n", 3860 WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
3726 error, port_name(port)); 3861 error, port_name(port));
3727 3862
3728 intel_dp->psr_setup_done = false; 3863 intel_dp->psr_setup_done = false;
3729 3864
3730 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 3865 if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
3731 i2c_del_adapter(&intel_dp->adapter); 3866 i2c_del_adapter(&intel_dp->adapter);
3732 if (is_edp(intel_dp)) { 3867 if (is_edp(intel_dp)) {
3733 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 3868 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3734 mutex_lock(&dev->mode_config.mutex); 3869 mutex_lock(&dev->mode_config.mutex);
3735 ironlake_panel_vdd_off_sync(intel_dp); 3870 edp_panel_vdd_off_sync(intel_dp);
3736 mutex_unlock(&dev->mode_config.mutex); 3871 mutex_unlock(&dev->mode_config.mutex);
3737 } 3872 }
3738 drm_sysfs_connector_remove(connector); 3873 drm_sysfs_connector_remove(connector);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index fbfaaba5cc3b..44067bce5e04 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -359,6 +359,8 @@ struct intel_crtc {
359 bool cursor_visible; 359 bool cursor_visible;
360 360
361 struct intel_crtc_config config; 361 struct intel_crtc_config config;
362 struct intel_crtc_config *new_config;
363 bool new_enabled;
362 364
363 uint32_t ddi_pll_sel; 365 uint32_t ddi_pll_sel;
364 366
@@ -485,8 +487,22 @@ struct intel_dp {
485 int backlight_off_delay; 487 int backlight_off_delay;
486 struct delayed_work panel_vdd_work; 488 struct delayed_work panel_vdd_work;
487 bool want_panel_vdd; 489 bool want_panel_vdd;
490 unsigned long last_power_cycle;
491 unsigned long last_power_on;
492 unsigned long last_backlight_off;
488 bool psr_setup_done; 493 bool psr_setup_done;
494 bool use_tps3;
489 struct intel_connector *attached_connector; 495 struct intel_connector *attached_connector;
496
497 uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
498 /*
499 * This function returns the value we have to program the AUX_CTL
500 * register with to kick off an AUX transaction.
501 */
502 uint32_t (*get_aux_send_ctl)(struct intel_dp *dp,
503 bool has_aux_irq,
504 int send_bytes,
505 uint32_t aux_clock_divider);
490}; 506};
491 507
492struct intel_digital_port { 508struct intel_digital_port {
@@ -540,6 +556,7 @@ struct intel_unpin_work {
540struct intel_set_config { 556struct intel_set_config {
541 struct drm_encoder **save_connector_encoders; 557 struct drm_encoder **save_connector_encoders;
542 struct drm_crtc **save_encoder_crtcs; 558 struct drm_crtc **save_encoder_crtcs;
559 bool *save_crtc_enabled;
543 560
544 bool fb_changed; 561 bool fb_changed;
545 bool mode_changed; 562 bool mode_changed;
@@ -721,15 +738,14 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp);
721void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); 738void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
722void intel_dp_encoder_destroy(struct drm_encoder *encoder); 739void intel_dp_encoder_destroy(struct drm_encoder *encoder);
723void intel_dp_check_link_status(struct intel_dp *intel_dp); 740void intel_dp_check_link_status(struct intel_dp *intel_dp);
741int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
724bool intel_dp_compute_config(struct intel_encoder *encoder, 742bool intel_dp_compute_config(struct intel_encoder *encoder,
725 struct intel_crtc_config *pipe_config); 743 struct intel_crtc_config *pipe_config);
726bool intel_dp_is_edp(struct drm_device *dev, enum port port); 744bool intel_dp_is_edp(struct drm_device *dev, enum port port);
727void ironlake_edp_backlight_on(struct intel_dp *intel_dp); 745void intel_edp_backlight_on(struct intel_dp *intel_dp);
728void ironlake_edp_backlight_off(struct intel_dp *intel_dp); 746void intel_edp_backlight_off(struct intel_dp *intel_dp);
729void ironlake_edp_panel_on(struct intel_dp *intel_dp); 747void intel_edp_panel_on(struct intel_dp *intel_dp);
730void ironlake_edp_panel_off(struct intel_dp *intel_dp); 748void intel_edp_panel_off(struct intel_dp *intel_dp);
731void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
732void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
733void intel_edp_psr_enable(struct intel_dp *intel_dp); 749void intel_edp_psr_enable(struct intel_dp *intel_dp);
734void intel_edp_psr_disable(struct intel_dp *intel_dp); 750void intel_edp_psr_disable(struct intel_dp *intel_dp);
735void intel_edp_psr_update(struct drm_device *dev); 751void intel_edp_psr_update(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 39eac9937a4a..d6a8a716018d 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -104,7 +104,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
104 return 0; 104 return 0;
105 105
106out_unpin: 106out_unpin:
107 i915_gem_object_unpin(obj); 107 i915_gem_object_ggtt_unpin(obj);
108out_unref: 108out_unref:
109 drm_gem_object_unreference(&obj->base); 109 drm_gem_object_unreference(&obj->base);
110out: 110out:
@@ -208,7 +208,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
208 return 0; 208 return 0;
209 209
210out_unpin: 210out_unpin:
211 i915_gem_object_unpin(obj); 211 i915_gem_object_ggtt_unpin(obj);
212 drm_gem_object_unreference(&obj->base); 212 drm_gem_object_unreference(&obj->base);
213out_unlock: 213out_unlock:
214 mutex_unlock(&dev->struct_mutex); 214 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 6db0d9d17f47..43872f00822a 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -113,7 +113,8 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
113} 113}
114 114
115static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type, 115static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
116 enum transcoder cpu_transcoder) 116 enum transcoder cpu_transcoder,
117 struct drm_i915_private *dev_priv)
117{ 118{
118 switch (type) { 119 switch (type) {
119 case HDMI_INFOFRAME_TYPE_AVI: 120 case HDMI_INFOFRAME_TYPE_AVI:
@@ -296,7 +297,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
296 u32 val = I915_READ(ctl_reg); 297 u32 val = I915_READ(ctl_reg);
297 298
298 data_reg = hsw_infoframe_data_reg(type, 299 data_reg = hsw_infoframe_data_reg(type,
299 intel_crtc->config.cpu_transcoder); 300 intel_crtc->config.cpu_transcoder,
301 dev_priv);
300 if (data_reg == 0) 302 if (data_reg == 0)
301 return; 303 return;
302 304
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 8bcb93a2a9f6..3f3043b4ff26 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -848,8 +848,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
848 struct drm_i915_private *dev_priv = dev->dev_private; 848 struct drm_i915_private *dev_priv = dev->dev_private;
849 849
850 /* use the module option value if specified */ 850 /* use the module option value if specified */
851 if (i915_lvds_channel_mode > 0) 851 if (i915.lvds_channel_mode > 0)
852 return i915_lvds_channel_mode == 2; 852 return i915.lvds_channel_mode == 2;
853 853
854 if (dmi_check_system(intel_dual_link_lvds)) 854 if (dmi_check_system(intel_dual_link_lvds))
855 return true; 855 return true;
@@ -1036,7 +1036,7 @@ void intel_lvds_init(struct drm_device *dev)
1036 intel_find_panel_downclock(dev, 1036 intel_find_panel_downclock(dev,
1037 fixed_mode, connector); 1037 fixed_mode, connector);
1038 if (intel_connector->panel.downclock_mode != 1038 if (intel_connector->panel.downclock_mode !=
1039 NULL && i915_lvds_downclock) { 1039 NULL && i915.lvds_downclock) {
1040 /* We found the downclock for LVDS. */ 1040 /* We found the downclock for LVDS. */
1041 dev_priv->lvds_downclock_avail = true; 1041 dev_priv->lvds_downclock_avail = true;
1042 dev_priv->lvds_downclock = 1042 dev_priv->lvds_downclock =
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a759ecdb7a6e..424f0946d8c4 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -293,7 +293,7 @@ static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
293{ 293{
294 struct drm_i915_gem_object *obj = overlay->old_vid_bo; 294 struct drm_i915_gem_object *obj = overlay->old_vid_bo;
295 295
296 i915_gem_object_unpin(obj); 296 i915_gem_object_ggtt_unpin(obj);
297 drm_gem_object_unreference(&obj->base); 297 drm_gem_object_unreference(&obj->base);
298 298
299 overlay->old_vid_bo = NULL; 299 overlay->old_vid_bo = NULL;
@@ -306,7 +306,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
306 /* never have the overlay hw on without showing a frame */ 306 /* never have the overlay hw on without showing a frame */
307 BUG_ON(!overlay->vid_bo); 307 BUG_ON(!overlay->vid_bo);
308 308
309 i915_gem_object_unpin(obj); 309 i915_gem_object_ggtt_unpin(obj);
310 drm_gem_object_unreference(&obj->base); 310 drm_gem_object_unreference(&obj->base);
311 overlay->vid_bo = NULL; 311 overlay->vid_bo = NULL;
312 312
@@ -782,7 +782,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
782 return 0; 782 return 0;
783 783
784out_unpin: 784out_unpin:
785 i915_gem_object_unpin(new_bo); 785 i915_gem_object_ggtt_unpin(new_bo);
786 return ret; 786 return ret;
787} 787}
788 788
@@ -1386,7 +1386,7 @@ void intel_setup_overlay(struct drm_device *dev)
1386 1386
1387out_unpin_bo: 1387out_unpin_bo:
1388 if (!OVERLAY_NEEDS_PHYSICAL(dev)) 1388 if (!OVERLAY_NEEDS_PHYSICAL(dev))
1389 i915_gem_object_unpin(reg_bo); 1389 i915_gem_object_ggtt_unpin(reg_bo);
1390out_free_bo: 1390out_free_bo:
1391 drm_gem_object_unreference(&reg_bo->base); 1391 drm_gem_object_unreference(&reg_bo->base);
1392out_free: 1392out_free:
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 350de359123a..f1ee2c4d282e 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -33,8 +33,6 @@
33#include <linux/moduleparam.h> 33#include <linux/moduleparam.h>
34#include "intel_drv.h" 34#include "intel_drv.h"
35 35
36#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
37
38void 36void
39intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, 37intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
40 struct drm_display_mode *adjusted_mode) 38 struct drm_display_mode *adjusted_mode)
@@ -325,13 +323,6 @@ out:
325 pipe_config->gmch_pfit.lvds_border_bits = border; 323 pipe_config->gmch_pfit.lvds_border_bits = border;
326} 324}
327 325
328static int i915_panel_invert_brightness;
329MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
330 "(-1 force normal, 0 machine defaults, 1 force inversion), please "
331 "report PCI device ID, subsystem vendor and subsystem device ID "
332 "to dri-devel@lists.freedesktop.org, if your machine needs it. "
333 "It will then be included in an upcoming module version.");
334module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
335static u32 intel_panel_compute_brightness(struct intel_connector *connector, 326static u32 intel_panel_compute_brightness(struct intel_connector *connector,
336 u32 val) 327 u32 val)
337{ 328{
@@ -341,10 +332,10 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
341 332
342 WARN_ON(panel->backlight.max == 0); 333 WARN_ON(panel->backlight.max == 0);
343 334
344 if (i915_panel_invert_brightness < 0) 335 if (i915.invert_brightness < 0)
345 return val; 336 return val;
346 337
347 if (i915_panel_invert_brightness > 0 || 338 if (i915.invert_brightness > 0 ||
348 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { 339 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
349 return panel->backlight.max - val; 340 return panel->backlight.max - val;
350 } 341 }
@@ -810,13 +801,13 @@ intel_panel_detect(struct drm_device *dev)
810 struct drm_i915_private *dev_priv = dev->dev_private; 801 struct drm_i915_private *dev_priv = dev->dev_private;
811 802
812 /* Assume that the BIOS does not lie through the OpRegion... */ 803 /* Assume that the BIOS does not lie through the OpRegion... */
813 if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) { 804 if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
814 return ioread32(dev_priv->opregion.lid_state) & 0x1 ? 805 return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
815 connector_status_connected : 806 connector_status_connected :
816 connector_status_disconnected; 807 connector_status_disconnected;
817 } 808 }
818 809
819 switch (i915_panel_ignore_lid) { 810 switch (i915.panel_ignore_lid) {
820 case -2: 811 case -2:
821 return connector_status_connected; 812 return connector_status_connected;
822 case -1: 813 case -1:
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d77cc81900f9..f74d7f506aa9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -97,7 +97,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
97 struct drm_i915_gem_object *obj = intel_fb->obj; 97 struct drm_i915_gem_object *obj = intel_fb->obj;
98 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 98 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
99 int cfb_pitch; 99 int cfb_pitch;
100 int plane, i; 100 int i;
101 u32 fbc_ctl; 101 u32 fbc_ctl;
102 102
103 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE; 103 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
@@ -109,7 +109,6 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
109 cfb_pitch = (cfb_pitch / 32) - 1; 109 cfb_pitch = (cfb_pitch / 32) - 1;
110 else 110 else
111 cfb_pitch = (cfb_pitch / 64) - 1; 111 cfb_pitch = (cfb_pitch / 64) - 1;
112 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
113 112
114 /* Clear old tags */ 113 /* Clear old tags */
115 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 114 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
@@ -120,7 +119,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
120 119
121 /* Set it up... */ 120 /* Set it up... */
122 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; 121 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
123 fbc_ctl2 |= plane; 122 fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
124 I915_WRITE(FBC_CONTROL2, fbc_ctl2); 123 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
125 I915_WRITE(FBC_FENCE_OFF, crtc->y); 124 I915_WRITE(FBC_FENCE_OFF, crtc->y);
126 } 125 }
@@ -135,7 +134,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
135 fbc_ctl |= obj->fence_reg; 134 fbc_ctl |= obj->fence_reg;
136 I915_WRITE(FBC_CONTROL, fbc_ctl); 135 I915_WRITE(FBC_CONTROL, fbc_ctl);
137 136
138 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ", 137 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
139 cfb_pitch, crtc->y, plane_name(intel_crtc->plane)); 138 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
140} 139}
141 140
@@ -154,17 +153,19 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
154 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 153 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
155 struct drm_i915_gem_object *obj = intel_fb->obj; 154 struct drm_i915_gem_object *obj = intel_fb->obj;
156 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 155 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
157 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
158 u32 dpfc_ctl; 156 u32 dpfc_ctl;
159 157
160 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; 158 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
159 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
160 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
161 else
162 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
161 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; 163 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
162 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
163 164
164 I915_WRITE(DPFC_FENCE_YOFF, crtc->y); 165 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
165 166
166 /* enable it... */ 167 /* enable it... */
167 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); 168 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
168 169
169 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); 170 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
170} 171}
@@ -224,18 +225,16 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
224 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 225 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
225 struct drm_i915_gem_object *obj = intel_fb->obj; 226 struct drm_i915_gem_object *obj = intel_fb->obj;
226 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 227 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
227 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
228 u32 dpfc_ctl; 228 u32 dpfc_ctl;
229 229
230 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 230 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
231 dpfc_ctl &= DPFC_RESERVED; 231 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
232 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); 232 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
233 /* Set persistent mode for front-buffer rendering, ala X. */ 233 else
234 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; 234 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
235 dpfc_ctl |= DPFC_CTL_FENCE_EN; 235 dpfc_ctl |= DPFC_CTL_FENCE_EN;
236 if (IS_GEN5(dev)) 236 if (IS_GEN5(dev))
237 dpfc_ctl |= obj->fence_reg; 237 dpfc_ctl |= obj->fence_reg;
238 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
239 238
240 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); 239 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
241 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); 240 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
@@ -282,12 +281,16 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
282 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 281 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
283 struct drm_i915_gem_object *obj = intel_fb->obj; 282 struct drm_i915_gem_object *obj = intel_fb->obj;
284 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 283 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
284 u32 dpfc_ctl;
285 285
286 I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj)); 286 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
287 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
288 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
289 else
290 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
291 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
287 292
288 I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X | 293 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
289 IVB_DPFC_CTL_FENCE_EN |
290 intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
291 294
292 if (IS_IVYBRIDGE(dev)) { 295 if (IS_IVYBRIDGE(dev)) {
293 /* WaFbcAsynchFlipDisableFbcQueue:ivb */ 296 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
@@ -466,7 +469,7 @@ void intel_update_fbc(struct drm_device *dev)
466 return; 469 return;
467 } 470 }
468 471
469 if (!i915_powersave) { 472 if (!i915.powersave) {
470 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) 473 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
471 DRM_DEBUG_KMS("fbc disabled per module param\n"); 474 DRM_DEBUG_KMS("fbc disabled per module param\n");
472 return; 475 return;
@@ -505,13 +508,13 @@ void intel_update_fbc(struct drm_device *dev)
505 obj = intel_fb->obj; 508 obj = intel_fb->obj;
506 adjusted_mode = &intel_crtc->config.adjusted_mode; 509 adjusted_mode = &intel_crtc->config.adjusted_mode;
507 510
508 if (i915_enable_fbc < 0 && 511 if (i915.enable_fbc < 0 &&
509 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) { 512 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
510 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT)) 513 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
511 DRM_DEBUG_KMS("disabled per chip default\n"); 514 DRM_DEBUG_KMS("disabled per chip default\n");
512 goto out_disable; 515 goto out_disable;
513 } 516 }
514 if (!i915_enable_fbc) { 517 if (!i915.enable_fbc) {
515 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) 518 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
516 DRM_DEBUG_KMS("fbc disabled per module param\n"); 519 DRM_DEBUG_KMS("fbc disabled per module param\n");
517 goto out_disable; 520 goto out_disable;
@@ -1886,7 +1889,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1886} 1889}
1887 1890
1888/* Calculate the maximum FBC watermark */ 1891/* Calculate the maximum FBC watermark */
1889static unsigned int ilk_fbc_wm_max(struct drm_device *dev) 1892static unsigned int ilk_fbc_wm_max(const struct drm_device *dev)
1890{ 1893{
1891 /* max that registers can hold */ 1894 /* max that registers can hold */
1892 if (INTEL_INFO(dev)->gen >= 8) 1895 if (INTEL_INFO(dev)->gen >= 8)
@@ -1895,7 +1898,7 @@ static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
1895 return 15; 1898 return 15;
1896} 1899}
1897 1900
1898static void ilk_compute_wm_maximums(struct drm_device *dev, 1901static void ilk_compute_wm_maximums(const struct drm_device *dev,
1899 int level, 1902 int level,
1900 const struct intel_wm_config *config, 1903 const struct intel_wm_config *config,
1901 enum intel_ddb_partitioning ddb_partitioning, 1904 enum intel_ddb_partitioning ddb_partitioning,
@@ -1948,7 +1951,7 @@ static bool ilk_validate_wm_level(int level,
1948 return ret; 1951 return ret;
1949} 1952}
1950 1953
1951static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, 1954static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1952 int level, 1955 int level,
1953 const struct ilk_pipe_wm_parameters *p, 1956 const struct ilk_pipe_wm_parameters *p,
1954 struct intel_wm_level *result) 1957 struct intel_wm_level *result)
@@ -2140,7 +2143,7 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2140 struct intel_pipe_wm *pipe_wm) 2143 struct intel_pipe_wm *pipe_wm)
2141{ 2144{
2142 struct drm_device *dev = crtc->dev; 2145 struct drm_device *dev = crtc->dev;
2143 struct drm_i915_private *dev_priv = dev->dev_private; 2146 const struct drm_i915_private *dev_priv = dev->dev_private;
2144 int level, max_level = ilk_wm_max_level(dev); 2147 int level, max_level = ilk_wm_max_level(dev);
2145 /* LP0 watermark maximums depend on this pipe alone */ 2148 /* LP0 watermark maximums depend on this pipe alone */
2146 struct intel_wm_config config = { 2149 struct intel_wm_config config = {
@@ -2753,7 +2756,7 @@ intel_alloc_context_page(struct drm_device *dev)
2753 return ctx; 2756 return ctx;
2754 2757
2755err_unpin: 2758err_unpin:
2756 i915_gem_object_unpin(ctx); 2759 i915_gem_object_ggtt_unpin(ctx);
2757err_unref: 2760err_unref:
2758 drm_gem_object_unreference(&ctx->base); 2761 drm_gem_object_unreference(&ctx->base);
2759 return NULL; 2762 return NULL;
@@ -3000,6 +3003,9 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3000 dev_priv->rps.last_adj = 0; 3003 dev_priv->rps.last_adj = 0;
3001} 3004}
3002 3005
3006/* gen6_set_rps is called to update the frequency request, but should also be
3007 * called when the range (min_delay and max_delay) is modified so that we can
3008 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
3003void gen6_set_rps(struct drm_device *dev, u8 val) 3009void gen6_set_rps(struct drm_device *dev, u8 val)
3004{ 3010{
3005 struct drm_i915_private *dev_priv = dev->dev_private; 3011 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3008,8 +3014,14 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3008 WARN_ON(val > dev_priv->rps.max_delay); 3014 WARN_ON(val > dev_priv->rps.max_delay);
3009 WARN_ON(val < dev_priv->rps.min_delay); 3015 WARN_ON(val < dev_priv->rps.min_delay);
3010 3016
3011 if (val == dev_priv->rps.cur_delay) 3017 if (val == dev_priv->rps.cur_delay) {
3018 /* min/max delay may still have been modified so be sure to
3019 * write the limits value */
3020 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3021 gen6_rps_limits(dev_priv, val));
3022
3012 return; 3023 return;
3024 }
3013 3025
3014 gen6_set_rps_thresholds(dev_priv, val); 3026 gen6_set_rps_thresholds(dev_priv, val);
3015 3027
@@ -3035,6 +3047,58 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3035 trace_intel_gpu_freq_change(val * 50); 3047 trace_intel_gpu_freq_change(val * 50);
3036} 3048}
3037 3049
3050/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
3051 *
3052 * * If Gfx is Idle, then
3053 * 1. Mask Turbo interrupts
3054 * 2. Bring up Gfx clock
3055 * 3. Change the freq to Rpn and wait till P-Unit updates freq
3056 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
3057 * 5. Unmask Turbo interrupts
3058*/
3059static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3060{
3061 /*
3062 * When we are idle. Drop to min voltage state.
3063 */
3064
3065 if (dev_priv->rps.cur_delay <= dev_priv->rps.min_delay)
3066 return;
3067
3068 /* Mask turbo interrupt so that they will not come in between */
3069 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3070
3071 /* Bring up the Gfx clock */
3072 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
3073 I915_READ(VLV_GTLC_SURVIVABILITY_REG) |
3074 VLV_GFX_CLK_FORCE_ON_BIT);
3075
3076 if (wait_for(((VLV_GFX_CLK_STATUS_BIT &
3077 I915_READ(VLV_GTLC_SURVIVABILITY_REG)) != 0), 5)) {
3078 DRM_ERROR("GFX_CLK_ON request timed out\n");
3079 return;
3080 }
3081
3082 dev_priv->rps.cur_delay = dev_priv->rps.min_delay;
3083
3084 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
3085 dev_priv->rps.min_delay);
3086
3087 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
3088 & GENFREQSTATUS) == 0, 5))
3089 DRM_ERROR("timed out waiting for Punit\n");
3090
3091 /* Release the Gfx clock */
3092 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
3093 I915_READ(VLV_GTLC_SURVIVABILITY_REG) &
3094 ~VLV_GFX_CLK_FORCE_ON_BIT);
3095
3096 /* Unmask Up interrupts */
3097 dev_priv->rps.rp_up_masked = true;
3098 gen6_set_pm_mask(dev_priv, GEN6_PM_RP_DOWN_THRESHOLD,
3099 dev_priv->rps.min_delay);
3100}
3101
3038void gen6_rps_idle(struct drm_i915_private *dev_priv) 3102void gen6_rps_idle(struct drm_i915_private *dev_priv)
3039{ 3103{
3040 struct drm_device *dev = dev_priv->dev; 3104 struct drm_device *dev = dev_priv->dev;
@@ -3042,7 +3106,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
3042 mutex_lock(&dev_priv->rps.hw_lock); 3106 mutex_lock(&dev_priv->rps.hw_lock);
3043 if (dev_priv->rps.enabled) { 3107 if (dev_priv->rps.enabled) {
3044 if (IS_VALLEYVIEW(dev)) 3108 if (IS_VALLEYVIEW(dev))
3045 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay); 3109 vlv_set_rps_idle(dev_priv);
3046 else 3110 else
3047 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); 3111 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3048 dev_priv->rps.last_adj = 0; 3112 dev_priv->rps.last_adj = 0;
@@ -3151,8 +3215,8 @@ int intel_enable_rc6(const struct drm_device *dev)
3151 return 0; 3215 return 0;
3152 3216
3153 /* Respect the kernel parameter if it is set */ 3217 /* Respect the kernel parameter if it is set */
3154 if (i915_enable_rc6 >= 0) 3218 if (i915.enable_rc6 >= 0)
3155 return i915_enable_rc6; 3219 return i915.enable_rc6;
3156 3220
3157 /* Disable RC6 on Ironlake */ 3221 /* Disable RC6 on Ironlake */
3158 if (INTEL_INFO(dev)->gen == 5) 3222 if (INTEL_INFO(dev)->gen == 5)
@@ -3267,7 +3331,7 @@ static void gen6_enable_rps(struct drm_device *dev)
3267{ 3331{
3268 struct drm_i915_private *dev_priv = dev->dev_private; 3332 struct drm_i915_private *dev_priv = dev->dev_private;
3269 struct intel_ring_buffer *ring; 3333 struct intel_ring_buffer *ring;
3270 u32 rp_state_cap; 3334 u32 rp_state_cap, hw_max, hw_min;
3271 u32 gt_perf_status; 3335 u32 gt_perf_status;
3272 u32 rc6vids, pcu_mbox, rc6_mask = 0; 3336 u32 rc6vids, pcu_mbox, rc6_mask = 0;
3273 u32 gtfifodbg; 3337 u32 gtfifodbg;
@@ -3296,13 +3360,20 @@ static void gen6_enable_rps(struct drm_device *dev)
3296 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 3360 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3297 3361
3298 /* In units of 50MHz */ 3362 /* In units of 50MHz */
3299 dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff; 3363 dev_priv->rps.hw_max = hw_max = rp_state_cap & 0xff;
3300 dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff; 3364 hw_min = (rp_state_cap >> 16) & 0xff;
3301 dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff; 3365 dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff;
3302 dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff; 3366 dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff;
3303 dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay; 3367 dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
3304 dev_priv->rps.cur_delay = 0; 3368 dev_priv->rps.cur_delay = 0;
3305 3369
3370 /* Preserve min/max settings in case of re-init */
3371 if (dev_priv->rps.max_delay == 0)
3372 dev_priv->rps.max_delay = hw_max;
3373
3374 if (dev_priv->rps.min_delay == 0)
3375 dev_priv->rps.min_delay = hw_min;
3376
3306 /* disable the counters and set deterministic thresholds */ 3377 /* disable the counters and set deterministic thresholds */
3307 I915_WRITE(GEN6_RC_CONTROL, 0); 3378 I915_WRITE(GEN6_RC_CONTROL, 0);
3308 3379
@@ -3531,7 +3602,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
3531{ 3602{
3532 struct drm_i915_private *dev_priv = dev->dev_private; 3603 struct drm_i915_private *dev_priv = dev->dev_private;
3533 struct intel_ring_buffer *ring; 3604 struct intel_ring_buffer *ring;
3534 u32 gtfifodbg, val, rc6_mode = 0; 3605 u32 gtfifodbg, val, hw_max, hw_min, rc6_mode = 0;
3535 int i; 3606 int i;
3536 3607
3537 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3608 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -3593,21 +3664,27 @@ static void valleyview_enable_rps(struct drm_device *dev)
3593 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay), 3664 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
3594 dev_priv->rps.cur_delay); 3665 dev_priv->rps.cur_delay);
3595 3666
3596 dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv); 3667 dev_priv->rps.hw_max = hw_max = valleyview_rps_max_freq(dev_priv);
3597 dev_priv->rps.hw_max = dev_priv->rps.max_delay;
3598 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 3668 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3599 vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay), 3669 vlv_gpu_freq(dev_priv, hw_max),
3600 dev_priv->rps.max_delay); 3670 hw_max);
3601 3671
3602 dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv); 3672 dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
3603 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 3673 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3604 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), 3674 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
3605 dev_priv->rps.rpe_delay); 3675 dev_priv->rps.rpe_delay);
3606 3676
3607 dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv); 3677 hw_min = valleyview_rps_min_freq(dev_priv);
3608 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 3678 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3609 vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay), 3679 vlv_gpu_freq(dev_priv, hw_min),
3610 dev_priv->rps.min_delay); 3680 hw_min);
3681
3682 /* Preserve min/max settings in case of re-init */
3683 if (dev_priv->rps.max_delay == 0)
3684 dev_priv->rps.max_delay = hw_max;
3685
3686 if (dev_priv->rps.min_delay == 0)
3687 dev_priv->rps.min_delay = hw_min;
3611 3688
3612 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", 3689 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
3613 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), 3690 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
@@ -3615,6 +3692,9 @@ static void valleyview_enable_rps(struct drm_device *dev)
3615 3692
3616 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); 3693 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3617 3694
3695 dev_priv->rps.rp_up_masked = false;
3696 dev_priv->rps.rp_down_masked = false;
3697
3618 gen6_enable_rps_interrupts(dev); 3698 gen6_enable_rps_interrupts(dev);
3619 3699
3620 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3700 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
@@ -3625,13 +3705,13 @@ void ironlake_teardown_rc6(struct drm_device *dev)
3625 struct drm_i915_private *dev_priv = dev->dev_private; 3705 struct drm_i915_private *dev_priv = dev->dev_private;
3626 3706
3627 if (dev_priv->ips.renderctx) { 3707 if (dev_priv->ips.renderctx) {
3628 i915_gem_object_unpin(dev_priv->ips.renderctx); 3708 i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
3629 drm_gem_object_unreference(&dev_priv->ips.renderctx->base); 3709 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
3630 dev_priv->ips.renderctx = NULL; 3710 dev_priv->ips.renderctx = NULL;
3631 } 3711 }
3632 3712
3633 if (dev_priv->ips.pwrctx) { 3713 if (dev_priv->ips.pwrctx) {
3634 i915_gem_object_unpin(dev_priv->ips.pwrctx); 3714 i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
3635 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base); 3715 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
3636 dev_priv->ips.pwrctx = NULL; 3716 dev_priv->ips.pwrctx = NULL;
3637 } 3717 }
@@ -4270,6 +4350,7 @@ void intel_gpu_ips_teardown(void)
4270 i915_mch_dev = NULL; 4350 i915_mch_dev = NULL;
4271 spin_unlock_irq(&mchdev_lock); 4351 spin_unlock_irq(&mchdev_lock);
4272} 4352}
4353
4273static void intel_init_emon(struct drm_device *dev) 4354static void intel_init_emon(struct drm_device *dev)
4274{ 4355{
4275 struct drm_i915_private *dev_priv = dev->dev_private; 4356 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4605,11 +4686,10 @@ static void gen6_init_clock_gating(struct drm_device *dev)
4605 * According to the spec, bit 11 (RCCUNIT) must also be set, 4686 * According to the spec, bit 11 (RCCUNIT) must also be set,
4606 * but we didn't debug actual testcases to find it out. 4687 * but we didn't debug actual testcases to find it out.
4607 * 4688 *
4608 * Also apply WaDisableVDSUnitClockGating:snb and 4689 * WaDisableRCCUnitClockGating:snb
4609 * WaDisableRCPBUnitClockGating:snb. 4690 * WaDisableRCPBUnitClockGating:snb
4610 */ 4691 */
4611 I915_WRITE(GEN6_UCGCTL2, 4692 I915_WRITE(GEN6_UCGCTL2,
4612 GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
4613 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | 4693 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
4614 GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 4694 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4615 4695
@@ -4655,14 +4735,17 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
4655{ 4735{
4656 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); 4736 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
4657 4737
4738 /*
4739 * WaVSThreadDispatchOverride:ivb,vlv
4740 *
4741 * This actually overrides the dispatch
4742 * mode for all thread types.
4743 */
4658 reg &= ~GEN7_FF_SCHED_MASK; 4744 reg &= ~GEN7_FF_SCHED_MASK;
4659 reg |= GEN7_FF_TS_SCHED_HW; 4745 reg |= GEN7_FF_TS_SCHED_HW;
4660 reg |= GEN7_FF_VS_SCHED_HW; 4746 reg |= GEN7_FF_VS_SCHED_HW;
4661 reg |= GEN7_FF_DS_SCHED_HW; 4747 reg |= GEN7_FF_DS_SCHED_HW;
4662 4748
4663 if (IS_HASWELL(dev_priv->dev))
4664 reg &= ~GEN7_FF_VS_REF_CNT_FFME;
4665
4666 I915_WRITE(GEN7_FF_THREAD_MODE, reg); 4749 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
4667} 4750}
4668 4751
@@ -4709,8 +4792,10 @@ static void gen8_init_clock_gating(struct drm_device *dev)
4709 /* FIXME(BDW): Check all the w/a, some might only apply to 4792 /* FIXME(BDW): Check all the w/a, some might only apply to
4710 * pre-production hw. */ 4793 * pre-production hw. */
4711 4794
4712 WARN(!i915_preliminary_hw_support, 4795 /*
4713 "GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n"); 4796 * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
4797 * pre-production hardware
4798 */
4714 I915_WRITE(HALF_SLICE_CHICKEN3, 4799 I915_WRITE(HALF_SLICE_CHICKEN3,
4715 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS)); 4800 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
4716 I915_WRITE(HALF_SLICE_CHICKEN3, 4801 I915_WRITE(HALF_SLICE_CHICKEN3,
@@ -4761,21 +4846,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
4761 4846
4762 ilk_init_lp_watermarks(dev); 4847 ilk_init_lp_watermarks(dev);
4763 4848
4764 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
4765 * This implements the WaDisableRCZUnitClockGating:hsw workaround.
4766 */
4767 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
4768
4769 /* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */
4770 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
4771 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
4772
4773 /* WaApplyL3ControlAndL3ChickenMode:hsw */
4774 I915_WRITE(GEN7_L3CNTLREG1,
4775 GEN7_WA_FOR_GEN7_L3_CONTROL);
4776 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
4777 GEN7_WA_L3_CHICKEN_MODE);
4778
4779 /* L3 caching of data atomics doesn't work -- disable it. */ 4849 /* L3 caching of data atomics doesn't work -- disable it. */
4780 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); 4850 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
4781 I915_WRITE(HSW_ROW_CHICKEN3, 4851 I915_WRITE(HSW_ROW_CHICKEN3,
@@ -4787,7 +4857,12 @@ static void haswell_init_clock_gating(struct drm_device *dev)
4787 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 4857 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4788 4858
4789 /* WaVSRefCountFullforceMissDisable:hsw */ 4859 /* WaVSRefCountFullforceMissDisable:hsw */
4790 gen7_setup_fixed_func_scheduler(dev_priv); 4860 I915_WRITE(GEN7_FF_THREAD_MODE,
4861 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
4862
4863 /* enable HiZ Raw Stall Optimization */
4864 I915_WRITE(CACHE_MODE_0_GEN7,
4865 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
4791 4866
4792 /* WaDisable4x2SubspanOptimization:hsw */ 4867 /* WaDisable4x2SubspanOptimization:hsw */
4793 I915_WRITE(CACHE_MODE_1, 4868 I915_WRITE(CACHE_MODE_1,
@@ -4825,9 +4900,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
4825 if (IS_IVB_GT1(dev)) 4900 if (IS_IVB_GT1(dev))
4826 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 4901 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
4827 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 4902 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4828 else
4829 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
4830 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4831 4903
4832 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ 4904 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
4833 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 4905 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
@@ -4841,31 +4913,24 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
4841 if (IS_IVB_GT1(dev)) 4913 if (IS_IVB_GT1(dev))
4842 I915_WRITE(GEN7_ROW_CHICKEN2, 4914 I915_WRITE(GEN7_ROW_CHICKEN2,
4843 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 4915 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4844 else 4916 else {
4917 /* must write both registers */
4918 I915_WRITE(GEN7_ROW_CHICKEN2,
4919 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4845 I915_WRITE(GEN7_ROW_CHICKEN2_GT2, 4920 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
4846 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 4921 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4847 4922 }
4848 4923
4849 /* WaForceL3Serialization:ivb */ 4924 /* WaForceL3Serialization:ivb */
4850 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 4925 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
4851 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 4926 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
4852 4927
4853 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 4928 /*
4854 * gating disable must be set. Failure to set it results in
4855 * flickering pixels due to Z write ordering failures after
4856 * some amount of runtime in the Mesa "fire" demo, and Unigine
4857 * Sanctuary and Tropics, and apparently anything else with
4858 * alpha test or pixel discard.
4859 *
4860 * According to the spec, bit 11 (RCCUNIT) must also be set,
4861 * but we didn't debug actual testcases to find it out.
4862 *
4863 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 4929 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
4864 * This implements the WaDisableRCZUnitClockGating:ivb workaround. 4930 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
4865 */ 4931 */
4866 I915_WRITE(GEN6_UCGCTL2, 4932 I915_WRITE(GEN6_UCGCTL2,
4867 GEN6_RCZUNIT_CLOCK_GATE_DISABLE | 4933 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
4868 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4869 4934
4870 /* This is required by WaCatErrorRejectionIssue:ivb */ 4935 /* This is required by WaCatErrorRejectionIssue:ivb */
4871 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 4936 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
@@ -4874,9 +4939,12 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
4874 4939
4875 g4x_disable_trickle_feed(dev); 4940 g4x_disable_trickle_feed(dev);
4876 4941
4877 /* WaVSRefCountFullforceMissDisable:ivb */
4878 gen7_setup_fixed_func_scheduler(dev_priv); 4942 gen7_setup_fixed_func_scheduler(dev_priv);
4879 4943
4944 /* enable HiZ Raw Stall Optimization */
4945 I915_WRITE(CACHE_MODE_0_GEN7,
4946 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
4947
4880 /* WaDisable4x2SubspanOptimization:ivb */ 4948 /* WaDisable4x2SubspanOptimization:ivb */
4881 I915_WRITE(CACHE_MODE_1, 4949 I915_WRITE(CACHE_MODE_1,
4882 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 4950 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
@@ -4927,18 +4995,14 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
4927 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 4995 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
4928 CHICKEN3_DGMG_DONE_FIX_DISABLE); 4996 CHICKEN3_DGMG_DONE_FIX_DISABLE);
4929 4997
4998 /* WaPsdDispatchEnable:vlv */
4930 /* WaDisablePSDDualDispatchEnable:vlv */ 4999 /* WaDisablePSDDualDispatchEnable:vlv */
4931 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 5000 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
4932 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP | 5001 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
4933 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 5002 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4934 5003
4935 /* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */ 5004 /* WaDisableL3CacheAging:vlv */
4936 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
4937 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
4938
4939 /* WaApplyL3ControlAndL3ChickenMode:vlv */
4940 I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS); 5005 I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
4941 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
4942 5006
4943 /* WaForceL3Serialization:vlv */ 5007 /* WaForceL3Serialization:vlv */
4944 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 5008 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
@@ -4953,51 +5017,39 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
4953 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 5017 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4954 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 5018 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4955 5019
4956 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 5020 gen7_setup_fixed_func_scheduler(dev_priv);
4957 * gating disable must be set. Failure to set it results in 5021
4958 * flickering pixels due to Z write ordering failures after 5022 /*
4959 * some amount of runtime in the Mesa "fire" demo, and Unigine
4960 * Sanctuary and Tropics, and apparently anything else with
4961 * alpha test or pixel discard.
4962 *
4963 * According to the spec, bit 11 (RCCUNIT) must also be set,
4964 * but we didn't debug actual testcases to find it out.
4965 *
4966 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 5023 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
4967 * This implements the WaDisableRCZUnitClockGating:vlv workaround. 5024 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
4968 *
4969 * Also apply WaDisableVDSUnitClockGating:vlv and
4970 * WaDisableRCPBUnitClockGating:vlv.
4971 */ 5025 */
4972 I915_WRITE(GEN6_UCGCTL2, 5026 I915_WRITE(GEN6_UCGCTL2,
4973 GEN7_VDSUNIT_CLOCK_GATE_DISABLE | 5027 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
4974 GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
4975 GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
4976 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
4977 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4978 5028
5029 /* WaDisableL3Bank2xClockGate:vlv */
4979 I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE); 5030 I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
4980 5031
4981 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 5032 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
4982 5033
5034 /*
5035 * BSpec says this must be set, even though
5036 * WaDisable4x2SubspanOptimization isn't listed for VLV.
5037 */
4983 I915_WRITE(CACHE_MODE_1, 5038 I915_WRITE(CACHE_MODE_1,
4984 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 5039 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4985 5040
4986 /* 5041 /*
5042 * WaIncreaseL3CreditsForVLVB0:vlv
5043 * This is the hardware default actually.
5044 */
5045 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
5046
5047 /*
4987 * WaDisableVLVClockGating_VBIIssue:vlv 5048 * WaDisableVLVClockGating_VBIIssue:vlv
4988 * Disable clock gating on th GCFG unit to prevent a delay 5049 * Disable clock gating on th GCFG unit to prevent a delay
4989 * in the reporting of vblank events. 5050 * in the reporting of vblank events.
4990 */ 5051 */
4991 I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff); 5052 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
4992
4993 /* Conservative clock gating settings for now */
4994 I915_WRITE(0x9400, 0xffffffff);
4995 I915_WRITE(0x9404, 0xffffffff);
4996 I915_WRITE(0x9408, 0xffffffff);
4997 I915_WRITE(0x940c, 0xffffffff);
4998 I915_WRITE(0x9410, 0xffffffff);
4999 I915_WRITE(0x9414, 0xffffffff);
5000 I915_WRITE(0x9418, 0xffffffff);
5001} 5053}
5002 5054
5003static void g4x_init_clock_gating(struct drm_device *dev) 5055static void g4x_init_clock_gating(struct drm_device *dev)
@@ -5272,7 +5324,7 @@ static void __intel_power_well_put(struct drm_device *dev,
5272 WARN_ON(!power_well->count); 5324 WARN_ON(!power_well->count);
5273 5325
5274 if (!--power_well->count && power_well->set && 5326 if (!--power_well->count && power_well->set &&
5275 i915_disable_power_well) { 5327 i915.disable_power_well) {
5276 power_well->set(dev, power_well, false); 5328 power_well->set(dev, power_well, false);
5277 hsw_enable_package_c8(dev_priv); 5329 hsw_enable_package_c8(dev_priv);
5278 } 5330 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 31b36c5ac894..8c1c0bc3e630 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -549,7 +549,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
549 return 0; 549 return 0;
550 550
551err_unpin: 551err_unpin:
552 i915_gem_object_unpin(ring->scratch.obj); 552 i915_gem_object_ggtt_unpin(ring->scratch.obj);
553err_unref: 553err_unref:
554 drm_gem_object_unreference(&ring->scratch.obj->base); 554 drm_gem_object_unreference(&ring->scratch.obj->base);
555err: 555err:
@@ -625,7 +625,7 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
625 625
626 if (INTEL_INFO(dev)->gen >= 5) { 626 if (INTEL_INFO(dev)->gen >= 5) {
627 kunmap(sg_page(ring->scratch.obj->pages->sgl)); 627 kunmap(sg_page(ring->scratch.obj->pages->sgl));
628 i915_gem_object_unpin(ring->scratch.obj); 628 i915_gem_object_ggtt_unpin(ring->scratch.obj);
629 } 629 }
630 630
631 drm_gem_object_unreference(&ring->scratch.obj->base); 631 drm_gem_object_unreference(&ring->scratch.obj->base);
@@ -1253,7 +1253,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
1253 return; 1253 return;
1254 1254
1255 kunmap(sg_page(obj->pages->sgl)); 1255 kunmap(sg_page(obj->pages->sgl));
1256 i915_gem_object_unpin(obj); 1256 i915_gem_object_ggtt_unpin(obj);
1257 drm_gem_object_unreference(&obj->base); 1257 drm_gem_object_unreference(&obj->base);
1258 ring->status_page.obj = NULL; 1258 ring->status_page.obj = NULL;
1259} 1259}
@@ -1293,7 +1293,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
1293 return 0; 1293 return 0;
1294 1294
1295err_unpin: 1295err_unpin:
1296 i915_gem_object_unpin(obj); 1296 i915_gem_object_ggtt_unpin(obj);
1297err_unref: 1297err_unref:
1298 drm_gem_object_unreference(&obj->base); 1298 drm_gem_object_unreference(&obj->base);
1299err: 1299err:
@@ -1390,7 +1390,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1390err_unmap: 1390err_unmap:
1391 iounmap(ring->virtual_start); 1391 iounmap(ring->virtual_start);
1392err_unpin: 1392err_unpin:
1393 i915_gem_object_unpin(obj); 1393 i915_gem_object_ggtt_unpin(obj);
1394err_unref: 1394err_unref:
1395 drm_gem_object_unreference(&obj->base); 1395 drm_gem_object_unreference(&obj->base);
1396 ring->obj = NULL; 1396 ring->obj = NULL;
@@ -1418,7 +1418,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1418 1418
1419 iounmap(ring->virtual_start); 1419 iounmap(ring->virtual_start);
1420 1420
1421 i915_gem_object_unpin(ring->obj); 1421 i915_gem_object_ggtt_unpin(ring->obj);
1422 drm_gem_object_unreference(&ring->obj->base); 1422 drm_gem_object_unreference(&ring->obj->base);
1423 ring->obj = NULL; 1423 ring->obj = NULL;
1424 ring->preallocated_lazy_request = NULL; 1424 ring->preallocated_lazy_request = NULL;
@@ -1430,28 +1430,16 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1430 cleanup_status_page(ring); 1430 cleanup_status_page(ring);
1431} 1431}
1432 1432
1433static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1434{
1435 int ret;
1436
1437 ret = i915_wait_seqno(ring, seqno);
1438 if (!ret)
1439 i915_gem_retire_requests_ring(ring);
1440
1441 return ret;
1442}
1443
1444static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) 1433static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1445{ 1434{
1446 struct drm_i915_gem_request *request; 1435 struct drm_i915_gem_request *request;
1447 u32 seqno = 0; 1436 u32 seqno = 0, tail;
1448 int ret; 1437 int ret;
1449 1438
1450 i915_gem_retire_requests_ring(ring);
1451
1452 if (ring->last_retired_head != -1) { 1439 if (ring->last_retired_head != -1) {
1453 ring->head = ring->last_retired_head; 1440 ring->head = ring->last_retired_head;
1454 ring->last_retired_head = -1; 1441 ring->last_retired_head = -1;
1442
1455 ring->space = ring_space(ring); 1443 ring->space = ring_space(ring);
1456 if (ring->space >= n) 1444 if (ring->space >= n)
1457 return 0; 1445 return 0;
@@ -1468,6 +1456,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1468 space += ring->size; 1456 space += ring->size;
1469 if (space >= n) { 1457 if (space >= n) {
1470 seqno = request->seqno; 1458 seqno = request->seqno;
1459 tail = request->tail;
1471 break; 1460 break;
1472 } 1461 }
1473 1462
@@ -1482,15 +1471,11 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1482 if (seqno == 0) 1471 if (seqno == 0)
1483 return -ENOSPC; 1472 return -ENOSPC;
1484 1473
1485 ret = intel_ring_wait_seqno(ring, seqno); 1474 ret = i915_wait_seqno(ring, seqno);
1486 if (ret) 1475 if (ret)
1487 return ret; 1476 return ret;
1488 1477
1489 if (WARN_ON(ring->last_retired_head == -1)) 1478 ring->head = tail;
1490 return -ENOSPC;
1491
1492 ring->head = ring->last_retired_head;
1493 ring->last_retired_head = -1;
1494 ring->space = ring_space(ring); 1479 ring->space = ring_space(ring);
1495 if (WARN_ON(ring->space < n)) 1480 if (WARN_ON(ring->space < n))
1496 return -ENOSPC; 1481 return -ENOSPC;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 0b243ce33714..08b91c6ac70a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -41,6 +41,8 @@ enum intel_ring_hangcheck_action {
41 HANGCHECK_HUNG, 41 HANGCHECK_HUNG,
42}; 42};
43 43
44#define HANGCHECK_SCORE_RING_HUNG 31
45
44struct intel_ring_hangcheck { 46struct intel_ring_hangcheck {
45 bool deadlock; 47 bool deadlock;
46 u32 seqno; 48 u32 seqno;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 716a3c9c0751..336ae6c602f2 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -124,9 +124,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
124 crtc_w--; 124 crtc_w--;
125 crtc_h--; 125 crtc_h--;
126 126
127 I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
128 I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
129
130 linear_offset = y * fb->pitches[0] + x * pixel_size; 127 linear_offset = y * fb->pitches[0] + x * pixel_size;
131 sprsurf_offset = intel_gen4_compute_page_offset(&x, &y, 128 sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
132 obj->tiling_mode, 129 obj->tiling_mode,
@@ -134,6 +131,9 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
134 fb->pitches[0]); 131 fb->pitches[0]);
135 linear_offset -= sprsurf_offset; 132 linear_offset -= sprsurf_offset;
136 133
134 I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
135 I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
136
137 if (obj->tiling_mode != I915_TILING_NONE) 137 if (obj->tiling_mode != I915_TILING_NONE)
138 I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x); 138 I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
139 else 139 else
@@ -293,15 +293,15 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
293 if (crtc_w != src_w || crtc_h != src_h) 293 if (crtc_w != src_w || crtc_h != src_h)
294 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; 294 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
295 295
296 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
297 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
298
299 linear_offset = y * fb->pitches[0] + x * pixel_size; 296 linear_offset = y * fb->pitches[0] + x * pixel_size;
300 sprsurf_offset = 297 sprsurf_offset =
301 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 298 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
302 pixel_size, fb->pitches[0]); 299 pixel_size, fb->pitches[0]);
303 linear_offset -= sprsurf_offset; 300 linear_offset -= sprsurf_offset;
304 301
302 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
303 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
304
305 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET 305 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
306 * register */ 306 * register */
307 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 307 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@@ -472,15 +472,15 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
472 if (crtc_w != src_w || crtc_h != src_h) 472 if (crtc_w != src_w || crtc_h != src_h)
473 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; 473 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
474 474
475 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
476 I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
477
478 linear_offset = y * fb->pitches[0] + x * pixel_size; 475 linear_offset = y * fb->pitches[0] + x * pixel_size;
479 dvssurf_offset = 476 dvssurf_offset =
480 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 477 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
481 pixel_size, fb->pitches[0]); 478 pixel_size, fb->pitches[0]);
482 linear_offset -= dvssurf_offset; 479 linear_offset -= dvssurf_offset;
483 480
481 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
482 I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
483
484 if (obj->tiling_mode != I915_TILING_NONE) 484 if (obj->tiling_mode != I915_TILING_NONE)
485 I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x); 485 I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
486 else 486 else
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 87df68f5f504..c62841404c82 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -852,6 +852,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
852 struct drm_i915_private *dev_priv = dev->dev_private; 852 struct drm_i915_private *dev_priv = dev->dev_private;
853 struct drm_i915_reset_stats *args = data; 853 struct drm_i915_reset_stats *args = data;
854 struct i915_ctx_hang_stats *hs; 854 struct i915_ctx_hang_stats *hs;
855 struct i915_hw_context *ctx;
855 int ret; 856 int ret;
856 857
857 if (args->flags || args->pad) 858 if (args->flags || args->pad)
@@ -864,11 +865,12 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
864 if (ret) 865 if (ret)
865 return ret; 866 return ret;
866 867
867 hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id); 868 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
868 if (IS_ERR(hs)) { 869 if (IS_ERR(ctx)) {
869 mutex_unlock(&dev->struct_mutex); 870 mutex_unlock(&dev->struct_mutex);
870 return PTR_ERR(hs); 871 return PTR_ERR(ctx);
871 } 872 }
873 hs = &ctx->hang_stats;
872 874
873 if (capable(CAP_SYS_ADMIN)) 875 if (capable(CAP_SYS_ADMIN))
874 args->reset_count = i915_reset_count(&dev_priv->gpu_error); 876 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index b7488c9849ad..42947566e755 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -279,11 +279,21 @@
279 279
280#define DP_TEST_PATTERN 0x221 280#define DP_TEST_PATTERN 0x221
281 281
282#define DP_TEST_CRC_R_CR 0x240
283#define DP_TEST_CRC_G_Y 0x242
284#define DP_TEST_CRC_B_CB 0x244
285
286#define DP_TEST_SINK_MISC 0x246
287#define DP_TEST_CRC_SUPPORTED (1 << 5)
288
282#define DP_TEST_RESPONSE 0x260 289#define DP_TEST_RESPONSE 0x260
283# define DP_TEST_ACK (1 << 0) 290# define DP_TEST_ACK (1 << 0)
284# define DP_TEST_NAK (1 << 1) 291# define DP_TEST_NAK (1 << 1)
285# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2) 292# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2)
286 293
294#define DP_TEST_SINK 0x270
295#define DP_TEST_SINK_START (1 << 0)
296
287#define DP_SOURCE_OUI 0x300 297#define DP_SOURCE_OUI 0x300
288#define DP_SINK_OUI 0x400 298#define DP_SINK_OUI 0x400
289#define DP_BRANCH_OUI 0x500 299#define DP_BRANCH_OUI 0x500