diff options
author | Dave Airlie <airlied@redhat.com> | 2018-12-05 17:09:33 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-12-05 18:17:51 -0500 |
commit | 467e8a516dcf922d1ea343cebb0e751f81f0dca3 (patch) | |
tree | c22b6d971e922223286a4a181d6701c7d32e523d | |
parent | 818182dd1097fdc492aaef9b08755ea13274352d (diff) | |
parent | 4377d4e0d3d511986033ba7b4182d5a80b7f9ea2 (diff) |
Merge tag 'drm-intel-next-2018-12-04' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
Final drm/i915 changes for v4.21:
- ICL DSI video mode enabling (Madhav, Vandita, Jani, Imre)
- eDP sink count fix (José)
- PSR fixes (José)
- DRM DP helper and i915 DSC enabling (Manasi, Gaurav, Anusha)
- DP FEC enabling (Anusha)
- SKL+ watermark/ddb programming improvements (Ville)
- Pixel format fixes (Ville)
- Selftest updates (Chris, Tvrtko)
- GT and engine workaround improvements (Tvrtko)
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87va496uoe.fsf@intel.com
61 files changed, 5364 insertions, 1972 deletions
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst index 4b4dc236ef6f..b422eb8edf16 100644 --- a/Documentation/gpu/drm-kms-helpers.rst +++ b/Documentation/gpu/drm-kms-helpers.rst | |||
@@ -232,6 +232,18 @@ MIPI DSI Helper Functions Reference | |||
232 | .. kernel-doc:: drivers/gpu/drm/drm_mipi_dsi.c | 232 | .. kernel-doc:: drivers/gpu/drm/drm_mipi_dsi.c |
233 | :export: | 233 | :export: |
234 | 234 | ||
235 | Display Stream Compression Helper Functions Reference | ||
236 | ===================================================== | ||
237 | |||
238 | .. kernel-doc:: drivers/gpu/drm/drm_dsc.c | ||
239 | :doc: dsc helpers | ||
240 | |||
241 | .. kernel-doc:: include/drm/drm_dsc.h | ||
242 | :internal: | ||
243 | |||
244 | .. kernel-doc:: drivers/gpu/drm/drm_dsc.c | ||
245 | :export: | ||
246 | |||
235 | Output Probing Helper Functions Reference | 247 | Output Probing Helper Functions Reference |
236 | ========================================= | 248 | ========================================= |
237 | 249 | ||
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 1fafc2f8e8f9..e490fe2687db 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -32,7 +32,7 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o | |||
32 | drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o | 32 | drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o |
33 | drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o | 33 | drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o |
34 | 34 | ||
35 | drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ | 35 | drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \ |
36 | drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ | 36 | drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ |
37 | drm_kms_helper_common.o drm_dp_dual_mode_helper.o \ | 37 | drm_kms_helper_common.o drm_dp_dual_mode_helper.o \ |
38 | drm_simple_kms_helper.o drm_modeset_helper.o \ | 38 | drm_simple_kms_helper.o drm_modeset_helper.o \ |
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 6d483487f2b4..2d6c491a0542 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c | |||
@@ -1428,17 +1428,19 @@ u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) | |||
1428 | } | 1428 | } |
1429 | EXPORT_SYMBOL(drm_dp_dsc_sink_line_buf_depth); | 1429 | EXPORT_SYMBOL(drm_dp_dsc_sink_line_buf_depth); |
1430 | 1430 | ||
1431 | u8 drm_dp_dsc_sink_max_color_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) | 1431 | int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], |
1432 | u8 dsc_bpc[3]) | ||
1432 | { | 1433 | { |
1434 | int num_bpc = 0; | ||
1433 | u8 color_depth = dsc_dpcd[DP_DSC_DEC_COLOR_DEPTH_CAP - DP_DSC_SUPPORT]; | 1435 | u8 color_depth = dsc_dpcd[DP_DSC_DEC_COLOR_DEPTH_CAP - DP_DSC_SUPPORT]; |
1434 | 1436 | ||
1435 | if (color_depth & DP_DSC_12_BPC) | 1437 | if (color_depth & DP_DSC_12_BPC) |
1436 | return 12; | 1438 | dsc_bpc[num_bpc++] = 12; |
1437 | if (color_depth & DP_DSC_10_BPC) | 1439 | if (color_depth & DP_DSC_10_BPC) |
1438 | return 10; | 1440 | dsc_bpc[num_bpc++] = 10; |
1439 | if (color_depth & DP_DSC_8_BPC) | 1441 | if (color_depth & DP_DSC_8_BPC) |
1440 | return 8; | 1442 | dsc_bpc[num_bpc++] = 8; |
1441 | 1443 | ||
1442 | return 0; | 1444 | return num_bpc; |
1443 | } | 1445 | } |
1444 | EXPORT_SYMBOL(drm_dp_dsc_sink_max_color_depth); | 1446 | EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs); |
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c new file mode 100644 index 000000000000..bc2b23adb072 --- /dev/null +++ b/drivers/gpu/drm/drm_dsc.c | |||
@@ -0,0 +1,228 @@ | |||
1 | // SPDX-License-Identifier: MIT | ||
2 | /* | ||
3 | * Copyright © 2018 Intel Corp | ||
4 | * | ||
5 | * Author: | ||
6 | * Manasi Navare <manasi.d.navare@intel.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/byteorder/generic.h> | ||
14 | #include <drm/drm_dp_helper.h> | ||
15 | #include <drm/drm_dsc.h> | ||
16 | |||
17 | /** | ||
18 | * DOC: dsc helpers | ||
19 | * | ||
20 | * These functions contain some common logic and helpers to deal with VESA | ||
21 | * Display Stream Compression standard required for DSC on Display Port/eDP or | ||
22 | * MIPI display interfaces. | ||
23 | */ | ||
24 | |||
25 | /** | ||
26 | * drm_dsc_dp_pps_header_init() - Initializes the PPS Header | ||
27 | * for DisplayPort as per the DP 1.4 spec. | ||
28 | * @pps_sdp: Secondary data packet for DSC Picture Parameter Set | ||
29 | */ | ||
30 | void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp) | ||
31 | { | ||
32 | memset(&pps_sdp->pps_header, 0, sizeof(pps_sdp->pps_header)); | ||
33 | |||
34 | pps_sdp->pps_header.HB1 = DP_SDP_PPS; | ||
35 | pps_sdp->pps_header.HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1; | ||
36 | } | ||
37 | EXPORT_SYMBOL(drm_dsc_dp_pps_header_init); | ||
38 | |||
39 | /** | ||
40 | * drm_dsc_pps_infoframe_pack() - Populates the DSC PPS infoframe | ||
41 | * using the DSC configuration parameters in the order expected | ||
42 | * by the DSC Display Sink device. For the DSC, the sink device | ||
43 | * expects the PPS payload in the big endian format for the fields | ||
44 | * that span more than 1 byte. | ||
45 | * | ||
46 | * @pps_sdp: | ||
47 | * Secondary data packet for DSC Picture Parameter Set | ||
48 | * @dsc_cfg: | ||
49 | * DSC Configuration data filled by driver | ||
50 | */ | ||
51 | void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp, | ||
52 | const struct drm_dsc_config *dsc_cfg) | ||
53 | { | ||
54 | int i; | ||
55 | |||
56 | /* Protect against someone accidently changing struct size */ | ||
57 | BUILD_BUG_ON(sizeof(pps_sdp->pps_payload) != | ||
58 | DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 + 1); | ||
59 | |||
60 | memset(&pps_sdp->pps_payload, 0, sizeof(pps_sdp->pps_payload)); | ||
61 | |||
62 | /* PPS 0 */ | ||
63 | pps_sdp->pps_payload.dsc_version = | ||
64 | dsc_cfg->dsc_version_minor | | ||
65 | dsc_cfg->dsc_version_major << DSC_PPS_VERSION_MAJOR_SHIFT; | ||
66 | |||
67 | /* PPS 1, 2 is 0 */ | ||
68 | |||
69 | /* PPS 3 */ | ||
70 | pps_sdp->pps_payload.pps_3 = | ||
71 | dsc_cfg->line_buf_depth | | ||
72 | dsc_cfg->bits_per_component << DSC_PPS_BPC_SHIFT; | ||
73 | |||
74 | /* PPS 4 */ | ||
75 | pps_sdp->pps_payload.pps_4 = | ||
76 | ((dsc_cfg->bits_per_pixel & DSC_PPS_BPP_HIGH_MASK) >> | ||
77 | DSC_PPS_MSB_SHIFT) | | ||
78 | dsc_cfg->vbr_enable << DSC_PPS_VBR_EN_SHIFT | | ||
79 | dsc_cfg->enable422 << DSC_PPS_SIMPLE422_SHIFT | | ||
80 | dsc_cfg->convert_rgb << DSC_PPS_CONVERT_RGB_SHIFT | | ||
81 | dsc_cfg->block_pred_enable << DSC_PPS_BLOCK_PRED_EN_SHIFT; | ||
82 | |||
83 | /* PPS 5 */ | ||
84 | pps_sdp->pps_payload.bits_per_pixel_low = | ||
85 | (dsc_cfg->bits_per_pixel & DSC_PPS_LSB_MASK); | ||
86 | |||
87 | /* | ||
88 | * The DSC panel expects the PPS packet to have big endian format | ||
89 | * for data spanning 2 bytes. Use a macro cpu_to_be16() to convert | ||
90 | * to big endian format. If format is little endian, it will swap | ||
91 | * bytes to convert to Big endian else keep it unchanged. | ||
92 | */ | ||
93 | |||
94 | /* PPS 6, 7 */ | ||
95 | pps_sdp->pps_payload.pic_height = cpu_to_be16(dsc_cfg->pic_height); | ||
96 | |||
97 | /* PPS 8, 9 */ | ||
98 | pps_sdp->pps_payload.pic_width = cpu_to_be16(dsc_cfg->pic_width); | ||
99 | |||
100 | /* PPS 10, 11 */ | ||
101 | pps_sdp->pps_payload.slice_height = cpu_to_be16(dsc_cfg->slice_height); | ||
102 | |||
103 | /* PPS 12, 13 */ | ||
104 | pps_sdp->pps_payload.slice_width = cpu_to_be16(dsc_cfg->slice_width); | ||
105 | |||
106 | /* PPS 14, 15 */ | ||
107 | pps_sdp->pps_payload.chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size); | ||
108 | |||
109 | /* PPS 16 */ | ||
110 | pps_sdp->pps_payload.initial_xmit_delay_high = | ||
111 | ((dsc_cfg->initial_xmit_delay & | ||
112 | DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK) >> | ||
113 | DSC_PPS_MSB_SHIFT); | ||
114 | |||
115 | /* PPS 17 */ | ||
116 | pps_sdp->pps_payload.initial_xmit_delay_low = | ||
117 | (dsc_cfg->initial_xmit_delay & DSC_PPS_LSB_MASK); | ||
118 | |||
119 | /* PPS 18, 19 */ | ||
120 | pps_sdp->pps_payload.initial_dec_delay = | ||
121 | cpu_to_be16(dsc_cfg->initial_dec_delay); | ||
122 | |||
123 | /* PPS 20 is 0 */ | ||
124 | |||
125 | /* PPS 21 */ | ||
126 | pps_sdp->pps_payload.initial_scale_value = | ||
127 | dsc_cfg->initial_scale_value; | ||
128 | |||
129 | /* PPS 22, 23 */ | ||
130 | pps_sdp->pps_payload.scale_increment_interval = | ||
131 | cpu_to_be16(dsc_cfg->scale_increment_interval); | ||
132 | |||
133 | /* PPS 24 */ | ||
134 | pps_sdp->pps_payload.scale_decrement_interval_high = | ||
135 | ((dsc_cfg->scale_decrement_interval & | ||
136 | DSC_PPS_SCALE_DEC_INT_HIGH_MASK) >> | ||
137 | DSC_PPS_MSB_SHIFT); | ||
138 | |||
139 | /* PPS 25 */ | ||
140 | pps_sdp->pps_payload.scale_decrement_interval_low = | ||
141 | (dsc_cfg->scale_decrement_interval & DSC_PPS_LSB_MASK); | ||
142 | |||
143 | /* PPS 26[7:0], PPS 27[7:5] RESERVED */ | ||
144 | |||
145 | /* PPS 27 */ | ||
146 | pps_sdp->pps_payload.first_line_bpg_offset = | ||
147 | dsc_cfg->first_line_bpg_offset; | ||
148 | |||
149 | /* PPS 28, 29 */ | ||
150 | pps_sdp->pps_payload.nfl_bpg_offset = | ||
151 | cpu_to_be16(dsc_cfg->nfl_bpg_offset); | ||
152 | |||
153 | /* PPS 30, 31 */ | ||
154 | pps_sdp->pps_payload.slice_bpg_offset = | ||
155 | cpu_to_be16(dsc_cfg->slice_bpg_offset); | ||
156 | |||
157 | /* PPS 32, 33 */ | ||
158 | pps_sdp->pps_payload.initial_offset = | ||
159 | cpu_to_be16(dsc_cfg->initial_offset); | ||
160 | |||
161 | /* PPS 34, 35 */ | ||
162 | pps_sdp->pps_payload.final_offset = cpu_to_be16(dsc_cfg->final_offset); | ||
163 | |||
164 | /* PPS 36 */ | ||
165 | pps_sdp->pps_payload.flatness_min_qp = dsc_cfg->flatness_min_qp; | ||
166 | |||
167 | /* PPS 37 */ | ||
168 | pps_sdp->pps_payload.flatness_max_qp = dsc_cfg->flatness_max_qp; | ||
169 | |||
170 | /* PPS 38, 39 */ | ||
171 | pps_sdp->pps_payload.rc_model_size = | ||
172 | cpu_to_be16(DSC_RC_MODEL_SIZE_CONST); | ||
173 | |||
174 | /* PPS 40 */ | ||
175 | pps_sdp->pps_payload.rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST; | ||
176 | |||
177 | /* PPS 41 */ | ||
178 | pps_sdp->pps_payload.rc_quant_incr_limit0 = | ||
179 | dsc_cfg->rc_quant_incr_limit0; | ||
180 | |||
181 | /* PPS 42 */ | ||
182 | pps_sdp->pps_payload.rc_quant_incr_limit1 = | ||
183 | dsc_cfg->rc_quant_incr_limit1; | ||
184 | |||
185 | /* PPS 43 */ | ||
186 | pps_sdp->pps_payload.rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST | | ||
187 | DSC_RC_TGT_OFFSET_HI_CONST << DSC_PPS_RC_TGT_OFFSET_HI_SHIFT; | ||
188 | |||
189 | /* PPS 44 - 57 */ | ||
190 | for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) | ||
191 | pps_sdp->pps_payload.rc_buf_thresh[i] = | ||
192 | dsc_cfg->rc_buf_thresh[i]; | ||
193 | |||
194 | /* PPS 58 - 87 */ | ||
195 | /* | ||
196 | * For DSC sink programming the RC Range parameter fields | ||
197 | * are as follows: Min_qp[15:11], max_qp[10:6], offset[5:0] | ||
198 | */ | ||
199 | for (i = 0; i < DSC_NUM_BUF_RANGES; i++) { | ||
200 | pps_sdp->pps_payload.rc_range_parameters[i] = | ||
201 | ((dsc_cfg->rc_range_params[i].range_min_qp << | ||
202 | DSC_PPS_RC_RANGE_MINQP_SHIFT) | | ||
203 | (dsc_cfg->rc_range_params[i].range_max_qp << | ||
204 | DSC_PPS_RC_RANGE_MAXQP_SHIFT) | | ||
205 | (dsc_cfg->rc_range_params[i].range_bpg_offset)); | ||
206 | pps_sdp->pps_payload.rc_range_parameters[i] = | ||
207 | cpu_to_be16(pps_sdp->pps_payload.rc_range_parameters[i]); | ||
208 | } | ||
209 | |||
210 | /* PPS 88 */ | ||
211 | pps_sdp->pps_payload.native_422_420 = dsc_cfg->native_422 | | ||
212 | dsc_cfg->native_420 << DSC_PPS_NATIVE_420_SHIFT; | ||
213 | |||
214 | /* PPS 89 */ | ||
215 | pps_sdp->pps_payload.second_line_bpg_offset = | ||
216 | dsc_cfg->second_line_bpg_offset; | ||
217 | |||
218 | /* PPS 90, 91 */ | ||
219 | pps_sdp->pps_payload.nsl_bpg_offset = | ||
220 | cpu_to_be16(dsc_cfg->nsl_bpg_offset); | ||
221 | |||
222 | /* PPS 92, 93 */ | ||
223 | pps_sdp->pps_payload.second_line_offset_adj = | ||
224 | cpu_to_be16(dsc_cfg->second_line_offset_adj); | ||
225 | |||
226 | /* PPS 94 - 127 are O */ | ||
227 | } | ||
228 | EXPORT_SYMBOL(drm_dsc_pps_infoframe_pack); | ||
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 0ff878c994e2..19b5fe5016bf 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -157,14 +157,17 @@ i915-y += dvo_ch7017.o \ | |||
157 | intel_sdvo.o \ | 157 | intel_sdvo.o \ |
158 | intel_tv.o \ | 158 | intel_tv.o \ |
159 | vlv_dsi.o \ | 159 | vlv_dsi.o \ |
160 | vlv_dsi_pll.o | 160 | vlv_dsi_pll.o \ |
161 | intel_vdsc.o | ||
161 | 162 | ||
162 | # Post-mortem debug and GPU hang state capture | 163 | # Post-mortem debug and GPU hang state capture |
163 | i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o | 164 | i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o |
164 | i915-$(CONFIG_DRM_I915_SELFTEST) += \ | 165 | i915-$(CONFIG_DRM_I915_SELFTEST) += \ |
165 | selftests/i915_random.o \ | 166 | selftests/i915_random.o \ |
166 | selftests/i915_selftest.o \ | 167 | selftests/i915_selftest.o \ |
167 | selftests/igt_flush_test.o | 168 | selftests/igt_flush_test.o \ |
169 | selftests/igt_reset.o \ | ||
170 | selftests/igt_spinner.o | ||
168 | 171 | ||
169 | # virtual gpu code | 172 | # virtual gpu code |
170 | i915-y += i915_vgpu.o | 173 | i915-y += i915_vgpu.o |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 7f455bca528e..38dcee1ca062 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -943,30 +943,30 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) | |||
943 | static ssize_t gpu_state_read(struct file *file, char __user *ubuf, | 943 | static ssize_t gpu_state_read(struct file *file, char __user *ubuf, |
944 | size_t count, loff_t *pos) | 944 | size_t count, loff_t *pos) |
945 | { | 945 | { |
946 | struct i915_gpu_state *error = file->private_data; | 946 | struct i915_gpu_state *error; |
947 | struct drm_i915_error_state_buf str; | ||
948 | ssize_t ret; | 947 | ssize_t ret; |
949 | loff_t tmp; | 948 | void *buf; |
950 | 949 | ||
950 | error = file->private_data; | ||
951 | if (!error) | 951 | if (!error) |
952 | return 0; | 952 | return 0; |
953 | 953 | ||
954 | ret = i915_error_state_buf_init(&str, error->i915, count, *pos); | 954 | /* Bounce buffer required because of kernfs __user API convenience. */ |
955 | if (ret) | 955 | buf = kmalloc(count, GFP_KERNEL); |
956 | return ret; | 956 | if (!buf) |
957 | return -ENOMEM; | ||
957 | 958 | ||
958 | ret = i915_error_state_to_str(&str, error); | 959 | ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count); |
959 | if (ret) | 960 | if (ret <= 0) |
960 | goto out; | 961 | goto out; |
961 | 962 | ||
962 | tmp = 0; | 963 | if (!copy_to_user(ubuf, buf, ret)) |
963 | ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes); | 964 | *pos += ret; |
964 | if (ret < 0) | 965 | else |
965 | goto out; | 966 | ret = -EFAULT; |
966 | 967 | ||
967 | *pos = str.start + ret; | ||
968 | out: | 968 | out: |
969 | i915_error_state_buf_release(&str); | 969 | kfree(buf); |
970 | return ret; | 970 | return ret; |
971 | } | 971 | } |
972 | 972 | ||
@@ -3375,13 +3375,15 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) | |||
3375 | 3375 | ||
3376 | static int i915_wa_registers(struct seq_file *m, void *unused) | 3376 | static int i915_wa_registers(struct seq_file *m, void *unused) |
3377 | { | 3377 | { |
3378 | struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds; | 3378 | struct drm_i915_private *i915 = node_to_i915(m->private); |
3379 | int i; | 3379 | const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list; |
3380 | struct i915_wa *wa; | ||
3381 | unsigned int i; | ||
3380 | 3382 | ||
3381 | seq_printf(m, "Workarounds applied: %d\n", wa->count); | 3383 | seq_printf(m, "Workarounds applied: %u\n", wal->count); |
3382 | for (i = 0; i < wa->count; ++i) | 3384 | for (i = 0, wa = wal->list; i < wal->count; i++, wa++) |
3383 | seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", | 3385 | seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", |
3384 | wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask); | 3386 | i915_mmio_reg_offset(wa->reg), wa->val, wa->mask); |
3385 | 3387 | ||
3386 | return 0; | 3388 | return 0; |
3387 | } | 3389 | } |
@@ -3441,31 +3443,32 @@ static int i915_ddb_info(struct seq_file *m, void *unused) | |||
3441 | { | 3443 | { |
3442 | struct drm_i915_private *dev_priv = node_to_i915(m->private); | 3444 | struct drm_i915_private *dev_priv = node_to_i915(m->private); |
3443 | struct drm_device *dev = &dev_priv->drm; | 3445 | struct drm_device *dev = &dev_priv->drm; |
3444 | struct skl_ddb_allocation *ddb; | ||
3445 | struct skl_ddb_entry *entry; | 3446 | struct skl_ddb_entry *entry; |
3446 | enum pipe pipe; | 3447 | struct intel_crtc *crtc; |
3447 | int plane; | ||
3448 | 3448 | ||
3449 | if (INTEL_GEN(dev_priv) < 9) | 3449 | if (INTEL_GEN(dev_priv) < 9) |
3450 | return -ENODEV; | 3450 | return -ENODEV; |
3451 | 3451 | ||
3452 | drm_modeset_lock_all(dev); | 3452 | drm_modeset_lock_all(dev); |
3453 | 3453 | ||
3454 | ddb = &dev_priv->wm.skl_hw.ddb; | ||
3455 | |||
3456 | seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); | 3454 | seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); |
3457 | 3455 | ||
3458 | for_each_pipe(dev_priv, pipe) { | 3456 | for_each_intel_crtc(&dev_priv->drm, crtc) { |
3457 | struct intel_crtc_state *crtc_state = | ||
3458 | to_intel_crtc_state(crtc->base.state); | ||
3459 | enum pipe pipe = crtc->pipe; | ||
3460 | enum plane_id plane_id; | ||
3461 | |||
3459 | seq_printf(m, "Pipe %c\n", pipe_name(pipe)); | 3462 | seq_printf(m, "Pipe %c\n", pipe_name(pipe)); |
3460 | 3463 | ||
3461 | for_each_universal_plane(dev_priv, pipe, plane) { | 3464 | for_each_plane_id_on_crtc(crtc, plane_id) { |
3462 | entry = &ddb->plane[pipe][plane]; | 3465 | entry = &crtc_state->wm.skl.plane_ddb_y[plane_id]; |
3463 | seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, | 3466 | seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1, |
3464 | entry->start, entry->end, | 3467 | entry->start, entry->end, |
3465 | skl_ddb_entry_size(entry)); | 3468 | skl_ddb_entry_size(entry)); |
3466 | } | 3469 | } |
3467 | 3470 | ||
3468 | entry = &ddb->plane[pipe][PLANE_CURSOR]; | 3471 | entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; |
3469 | seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, | 3472 | seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, |
3470 | entry->end, skl_ddb_entry_size(entry)); | 3473 | entry->end, skl_ddb_entry_size(entry)); |
3471 | } | 3474 | } |
@@ -4592,6 +4595,13 @@ static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) | |||
4592 | struct drm_i915_private *dev_priv = m->private; | 4595 | struct drm_i915_private *dev_priv = m->private; |
4593 | struct i915_hotplug *hotplug = &dev_priv->hotplug; | 4596 | struct i915_hotplug *hotplug = &dev_priv->hotplug; |
4594 | 4597 | ||
4598 | /* Synchronize with everything first in case there's been an HPD | ||
4599 | * storm, but we haven't finished handling it in the kernel yet | ||
4600 | */ | ||
4601 | synchronize_irq(dev_priv->drm.irq); | ||
4602 | flush_work(&dev_priv->hotplug.dig_port_work); | ||
4603 | flush_work(&dev_priv->hotplug.hotplug_work); | ||
4604 | |||
4595 | seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); | 4605 | seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); |
4596 | seq_printf(m, "Detected: %s\n", | 4606 | seq_printf(m, "Detected: %s\n", |
4597 | yesno(delayed_work_pending(&hotplug->reenable_work))); | 4607 | yesno(delayed_work_pending(&hotplug->reenable_work))); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index b1d23c73c147..b310a897a4ad 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -53,6 +53,7 @@ | |||
53 | #include "i915_vgpu.h" | 53 | #include "i915_vgpu.h" |
54 | #include "intel_drv.h" | 54 | #include "intel_drv.h" |
55 | #include "intel_uc.h" | 55 | #include "intel_uc.h" |
56 | #include "intel_workarounds.h" | ||
56 | 57 | ||
57 | static struct drm_driver driver; | 58 | static struct drm_driver driver; |
58 | 59 | ||
@@ -287,7 +288,7 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv) | |||
287 | * Use PCH_NOP (PCH but no South Display) for PCH platforms without | 288 | * Use PCH_NOP (PCH but no South Display) for PCH platforms without |
288 | * display. | 289 | * display. |
289 | */ | 290 | */ |
290 | if (pch && INTEL_INFO(dev_priv)->num_pipes == 0) { | 291 | if (pch && !HAS_DISPLAY(dev_priv)) { |
291 | DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n"); | 292 | DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n"); |
292 | dev_priv->pch_type = PCH_NOP; | 293 | dev_priv->pch_type = PCH_NOP; |
293 | dev_priv->pch_id = 0; | 294 | dev_priv->pch_id = 0; |
@@ -645,7 +646,7 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
645 | if (i915_inject_load_failure()) | 646 | if (i915_inject_load_failure()) |
646 | return -ENODEV; | 647 | return -ENODEV; |
647 | 648 | ||
648 | if (INTEL_INFO(dev_priv)->num_pipes) { | 649 | if (HAS_DISPLAY(dev_priv)) { |
649 | ret = drm_vblank_init(&dev_priv->drm, | 650 | ret = drm_vblank_init(&dev_priv->drm, |
650 | INTEL_INFO(dev_priv)->num_pipes); | 651 | INTEL_INFO(dev_priv)->num_pipes); |
651 | if (ret) | 652 | if (ret) |
@@ -696,7 +697,7 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
696 | 697 | ||
697 | intel_overlay_setup(dev_priv); | 698 | intel_overlay_setup(dev_priv); |
698 | 699 | ||
699 | if (INTEL_INFO(dev_priv)->num_pipes == 0) | 700 | if (!HAS_DISPLAY(dev_priv)) |
700 | return 0; | 701 | return 0; |
701 | 702 | ||
702 | ret = intel_fbdev_init(dev); | 703 | ret = intel_fbdev_init(dev); |
@@ -868,6 +869,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) | |||
868 | pre |= IS_HSW_EARLY_SDV(dev_priv); | 869 | pre |= IS_HSW_EARLY_SDV(dev_priv); |
869 | pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0); | 870 | pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0); |
870 | pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST); | 871 | pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST); |
872 | pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0); | ||
871 | 873 | ||
872 | if (pre) { | 874 | if (pre) { |
873 | DRM_ERROR("This is a pre-production stepping. " | 875 | DRM_ERROR("This is a pre-production stepping. " |
@@ -1383,6 +1385,20 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |||
1383 | } | 1385 | } |
1384 | } | 1386 | } |
1385 | 1387 | ||
1388 | if (HAS_EXECLISTS(dev_priv)) { | ||
1389 | /* | ||
1390 | * Older GVT emulation depends upon intercepting CSB mmio, | ||
1391 | * which we no longer use, preferring to use the HWSP cache | ||
1392 | * instead. | ||
1393 | */ | ||
1394 | if (intel_vgpu_active(dev_priv) && | ||
1395 | !intel_vgpu_has_hwsp_emulation(dev_priv)) { | ||
1396 | i915_report_error(dev_priv, | ||
1397 | "old vGPU host found, support for HWSP emulation required\n"); | ||
1398 | return -ENXIO; | ||
1399 | } | ||
1400 | } | ||
1401 | |||
1386 | intel_sanitize_options(dev_priv); | 1402 | intel_sanitize_options(dev_priv); |
1387 | 1403 | ||
1388 | i915_perf_init(dev_priv); | 1404 | i915_perf_init(dev_priv); |
@@ -1452,6 +1468,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |||
1452 | 1468 | ||
1453 | intel_uncore_sanitize(dev_priv); | 1469 | intel_uncore_sanitize(dev_priv); |
1454 | 1470 | ||
1471 | intel_gt_init_workarounds(dev_priv); | ||
1455 | i915_gem_load_init_fences(dev_priv); | 1472 | i915_gem_load_init_fences(dev_priv); |
1456 | 1473 | ||
1457 | /* On the 945G/GM, the chipset reports the MSI capability on the | 1474 | /* On the 945G/GM, the chipset reports the MSI capability on the |
@@ -1551,7 +1568,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) | |||
1551 | } else | 1568 | } else |
1552 | DRM_ERROR("Failed to register driver for userspace access!\n"); | 1569 | DRM_ERROR("Failed to register driver for userspace access!\n"); |
1553 | 1570 | ||
1554 | if (INTEL_INFO(dev_priv)->num_pipes) { | 1571 | if (HAS_DISPLAY(dev_priv)) { |
1555 | /* Must be done after probing outputs */ | 1572 | /* Must be done after probing outputs */ |
1556 | intel_opregion_register(dev_priv); | 1573 | intel_opregion_register(dev_priv); |
1557 | acpi_video_register(); | 1574 | acpi_video_register(); |
@@ -1575,7 +1592,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) | |||
1575 | * We need to coordinate the hotplugs with the asynchronous fbdev | 1592 | * We need to coordinate the hotplugs with the asynchronous fbdev |
1576 | * configuration, for which we use the fbdev->async_cookie. | 1593 | * configuration, for which we use the fbdev->async_cookie. |
1577 | */ | 1594 | */ |
1578 | if (INTEL_INFO(dev_priv)->num_pipes) | 1595 | if (HAS_DISPLAY(dev_priv)) |
1579 | drm_kms_helper_poll_init(dev); | 1596 | drm_kms_helper_poll_init(dev); |
1580 | 1597 | ||
1581 | intel_power_domains_enable(dev_priv); | 1598 | intel_power_domains_enable(dev_priv); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4064e49dbf70..b1c31967194b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -53,6 +53,7 @@ | |||
53 | #include <drm/drm_auth.h> | 53 | #include <drm/drm_auth.h> |
54 | #include <drm/drm_cache.h> | 54 | #include <drm/drm_cache.h> |
55 | #include <drm/drm_util.h> | 55 | #include <drm/drm_util.h> |
56 | #include <drm/drm_dsc.h> | ||
56 | 57 | ||
57 | #include "i915_fixed.h" | 58 | #include "i915_fixed.h" |
58 | #include "i915_params.h" | 59 | #include "i915_params.h" |
@@ -68,6 +69,7 @@ | |||
68 | #include "intel_ringbuffer.h" | 69 | #include "intel_ringbuffer.h" |
69 | #include "intel_uncore.h" | 70 | #include "intel_uncore.h" |
70 | #include "intel_wopcm.h" | 71 | #include "intel_wopcm.h" |
72 | #include "intel_workarounds.h" | ||
71 | #include "intel_uc.h" | 73 | #include "intel_uc.h" |
72 | 74 | ||
73 | #include "i915_gem.h" | 75 | #include "i915_gem.h" |
@@ -88,8 +90,8 @@ | |||
88 | 90 | ||
89 | #define DRIVER_NAME "i915" | 91 | #define DRIVER_NAME "i915" |
90 | #define DRIVER_DESC "Intel Graphics" | 92 | #define DRIVER_DESC "Intel Graphics" |
91 | #define DRIVER_DATE "20181122" | 93 | #define DRIVER_DATE "20181204" |
92 | #define DRIVER_TIMESTAMP 1542898187 | 94 | #define DRIVER_TIMESTAMP 1543944377 |
93 | 95 | ||
94 | /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and | 96 | /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and |
95 | * WARN_ON()) for hw state sanity checks to check for unexpected conditions | 97 | * WARN_ON()) for hw state sanity checks to check for unexpected conditions |
@@ -494,6 +496,7 @@ struct i915_psr { | |||
494 | bool sink_support; | 496 | bool sink_support; |
495 | bool prepared, enabled; | 497 | bool prepared, enabled; |
496 | struct intel_dp *dp; | 498 | struct intel_dp *dp; |
499 | enum pipe pipe; | ||
497 | bool active; | 500 | bool active; |
498 | struct work_struct work; | 501 | struct work_struct work; |
499 | unsigned busy_frontbuffer_bits; | 502 | unsigned busy_frontbuffer_bits; |
@@ -504,6 +507,8 @@ struct i915_psr { | |||
504 | u8 sink_sync_latency; | 507 | u8 sink_sync_latency; |
505 | ktime_t last_entry_attempt; | 508 | ktime_t last_entry_attempt; |
506 | ktime_t last_exit; | 509 | ktime_t last_exit; |
510 | bool sink_not_reliable; | ||
511 | bool irq_aux_error; | ||
507 | }; | 512 | }; |
508 | 513 | ||
509 | enum intel_pch { | 514 | enum intel_pch { |
@@ -1093,9 +1098,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, | |||
1093 | } | 1098 | } |
1094 | 1099 | ||
1095 | struct skl_ddb_allocation { | 1100 | struct skl_ddb_allocation { |
1096 | /* packed/y */ | ||
1097 | struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; | ||
1098 | struct skl_ddb_entry uv_plane[I915_MAX_PIPES][I915_MAX_PLANES]; | ||
1099 | u8 enabled_slices; /* GEN11 has configurable 2 slices */ | 1101 | u8 enabled_slices; /* GEN11 has configurable 2 slices */ |
1100 | }; | 1102 | }; |
1101 | 1103 | ||
@@ -1188,20 +1190,6 @@ struct i915_frontbuffer_tracking { | |||
1188 | unsigned flip_bits; | 1190 | unsigned flip_bits; |
1189 | }; | 1191 | }; |
1190 | 1192 | ||
1191 | struct i915_wa_reg { | ||
1192 | u32 addr; | ||
1193 | u32 value; | ||
1194 | /* bitmask representing WA bits */ | ||
1195 | u32 mask; | ||
1196 | }; | ||
1197 | |||
1198 | #define I915_MAX_WA_REGS 16 | ||
1199 | |||
1200 | struct i915_workarounds { | ||
1201 | struct i915_wa_reg reg[I915_MAX_WA_REGS]; | ||
1202 | u32 count; | ||
1203 | }; | ||
1204 | |||
1205 | struct i915_virtual_gpu { | 1193 | struct i915_virtual_gpu { |
1206 | bool active; | 1194 | bool active; |
1207 | u32 caps; | 1195 | u32 caps; |
@@ -1651,7 +1639,7 @@ struct drm_i915_private { | |||
1651 | 1639 | ||
1652 | int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; | 1640 | int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; |
1653 | 1641 | ||
1654 | struct i915_workarounds workarounds; | 1642 | struct i915_wa_list gt_wa_list; |
1655 | 1643 | ||
1656 | struct i915_frontbuffer_tracking fb_tracking; | 1644 | struct i915_frontbuffer_tracking fb_tracking; |
1657 | 1645 | ||
@@ -1995,6 +1983,8 @@ struct drm_i915_private { | |||
1995 | struct delayed_work idle_work; | 1983 | struct delayed_work idle_work; |
1996 | 1984 | ||
1997 | ktime_t last_init_time; | 1985 | ktime_t last_init_time; |
1986 | |||
1987 | struct i915_vma *scratch; | ||
1998 | } gt; | 1988 | } gt; |
1999 | 1989 | ||
2000 | /* perform PHY state sanity checks? */ | 1990 | /* perform PHY state sanity checks? */ |
@@ -2448,9 +2438,9 @@ intel_info(const struct drm_i915_private *dev_priv) | |||
2448 | ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \ | 2438 | ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \ |
2449 | }) | 2439 | }) |
2450 | 2440 | ||
2451 | #define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay) | 2441 | #define HAS_OVERLAY(dev_priv) ((dev_priv)->info.display.has_overlay) |
2452 | #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ | 2442 | #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ |
2453 | ((dev_priv)->info.overlay_needs_physical) | 2443 | ((dev_priv)->info.display.overlay_needs_physical) |
2454 | 2444 | ||
2455 | /* Early gen2 have a totally busted CS tlb and require pinned batches. */ | 2445 | /* Early gen2 have a totally busted CS tlb and require pinned batches. */ |
2456 | #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) | 2446 | #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) |
@@ -2471,31 +2461,31 @@ intel_info(const struct drm_i915_private *dev_priv) | |||
2471 | #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \ | 2461 | #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \ |
2472 | !(IS_I915G(dev_priv) || \ | 2462 | !(IS_I915G(dev_priv) || \ |
2473 | IS_I915GM(dev_priv))) | 2463 | IS_I915GM(dev_priv))) |
2474 | #define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv) | 2464 | #define SUPPORTS_TV(dev_priv) ((dev_priv)->info.display.supports_tv) |
2475 | #define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug) | 2465 | #define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.display.has_hotplug) |
2476 | 2466 | ||
2477 | #define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) | 2467 | #define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) |
2478 | #define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc) | 2468 | #define HAS_FBC(dev_priv) ((dev_priv)->info.display.has_fbc) |
2479 | #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7) | 2469 | #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7) |
2480 | 2470 | ||
2481 | #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) | 2471 | #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) |
2482 | 2472 | ||
2483 | #define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst) | 2473 | #define HAS_DP_MST(dev_priv) ((dev_priv)->info.display.has_dp_mst) |
2484 | 2474 | ||
2485 | #define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi) | 2475 | #define HAS_DDI(dev_priv) ((dev_priv)->info.display.has_ddi) |
2486 | #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg) | 2476 | #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg) |
2487 | #define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr) | 2477 | #define HAS_PSR(dev_priv) ((dev_priv)->info.display.has_psr) |
2488 | 2478 | ||
2489 | #define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6) | 2479 | #define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6) |
2490 | #define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p) | 2480 | #define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p) |
2491 | #define HAS_RC6pp(dev_priv) (false) /* HW was never validated */ | 2481 | #define HAS_RC6pp(dev_priv) (false) /* HW was never validated */ |
2492 | 2482 | ||
2493 | #define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr) | 2483 | #define HAS_CSR(dev_priv) ((dev_priv)->info.display.has_csr) |
2494 | 2484 | ||
2495 | #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm) | 2485 | #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm) |
2496 | #define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc) | 2486 | #define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc) |
2497 | 2487 | ||
2498 | #define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc) | 2488 | #define HAS_IPC(dev_priv) ((dev_priv)->info.display.has_ipc) |
2499 | 2489 | ||
2500 | /* | 2490 | /* |
2501 | * For now, anything with a GuC requires uCode loading, and then supports | 2491 | * For now, anything with a GuC requires uCode loading, and then supports |
@@ -2556,7 +2546,7 @@ intel_info(const struct drm_i915_private *dev_priv) | |||
2556 | #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP) | 2546 | #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP) |
2557 | #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE) | 2547 | #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE) |
2558 | 2548 | ||
2559 | #define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display) | 2549 | #define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.display.has_gmch_display) |
2560 | 2550 | ||
2561 | #define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9) | 2551 | #define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9) |
2562 | 2552 | ||
@@ -2568,6 +2558,8 @@ intel_info(const struct drm_i915_private *dev_priv) | |||
2568 | #define GT_FREQUENCY_MULTIPLIER 50 | 2558 | #define GT_FREQUENCY_MULTIPLIER 50 |
2569 | #define GEN9_FREQ_SCALER 3 | 2559 | #define GEN9_FREQ_SCALER 3 |
2570 | 2560 | ||
2561 | #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0) | ||
2562 | |||
2571 | #include "i915_trace.h" | 2563 | #include "i915_trace.h" |
2572 | 2564 | ||
2573 | static inline bool intel_vtd_active(void) | 2565 | static inline bool intel_vtd_active(void) |
@@ -3340,6 +3332,9 @@ extern void intel_rps_mark_interactive(struct drm_i915_private *i915, | |||
3340 | bool interactive); | 3332 | bool interactive); |
3341 | extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, | 3333 | extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, |
3342 | bool enable); | 3334 | bool enable); |
3335 | void intel_dsc_enable(struct intel_encoder *encoder, | ||
3336 | const struct intel_crtc_state *crtc_state); | ||
3337 | void intel_dsc_disable(const struct intel_crtc_state *crtc_state); | ||
3343 | 3338 | ||
3344 | int i915_reg_read_ioctl(struct drm_device *dev, void *data, | 3339 | int i915_reg_read_ioctl(struct drm_device *dev, void *data, |
3345 | struct drm_file *file); | 3340 | struct drm_file *file); |
@@ -3720,4 +3715,9 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) | |||
3720 | return I915_HWS_CSB_WRITE_INDEX; | 3715 | return I915_HWS_CSB_WRITE_INDEX; |
3721 | } | 3716 | } |
3722 | 3717 | ||
3718 | static inline u32 i915_scratch_offset(const struct drm_i915_private *i915) | ||
3719 | { | ||
3720 | return i915_ggtt_offset(i915->gt.scratch); | ||
3721 | } | ||
3722 | |||
3723 | #endif | 3723 | #endif |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c55b1f75c980..d36a9755ad91 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -3309,16 +3309,6 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv) | |||
3309 | 3309 | ||
3310 | static void nop_submit_request(struct i915_request *request) | 3310 | static void nop_submit_request(struct i915_request *request) |
3311 | { | 3311 | { |
3312 | GEM_TRACE("%s fence %llx:%d -> -EIO\n", | ||
3313 | request->engine->name, | ||
3314 | request->fence.context, request->fence.seqno); | ||
3315 | dma_fence_set_error(&request->fence, -EIO); | ||
3316 | |||
3317 | i915_request_submit(request); | ||
3318 | } | ||
3319 | |||
3320 | static void nop_complete_submit_request(struct i915_request *request) | ||
3321 | { | ||
3322 | unsigned long flags; | 3312 | unsigned long flags; |
3323 | 3313 | ||
3324 | GEM_TRACE("%s fence %llx:%d -> -EIO\n", | 3314 | GEM_TRACE("%s fence %llx:%d -> -EIO\n", |
@@ -3354,57 +3344,33 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) | |||
3354 | * rolling the global seqno forward (since this would complete requests | 3344 | * rolling the global seqno forward (since this would complete requests |
3355 | * for which we haven't set the fence error to EIO yet). | 3345 | * for which we haven't set the fence error to EIO yet). |
3356 | */ | 3346 | */ |
3357 | for_each_engine(engine, i915, id) { | 3347 | for_each_engine(engine, i915, id) |
3358 | i915_gem_reset_prepare_engine(engine); | 3348 | i915_gem_reset_prepare_engine(engine); |
3359 | 3349 | ||
3360 | engine->submit_request = nop_submit_request; | ||
3361 | engine->schedule = NULL; | ||
3362 | } | ||
3363 | i915->caps.scheduler = 0; | ||
3364 | |||
3365 | /* Even if the GPU reset fails, it should still stop the engines */ | 3350 | /* Even if the GPU reset fails, it should still stop the engines */ |
3366 | if (INTEL_GEN(i915) >= 5) | 3351 | if (INTEL_GEN(i915) >= 5) |
3367 | intel_gpu_reset(i915, ALL_ENGINES); | 3352 | intel_gpu_reset(i915, ALL_ENGINES); |
3368 | 3353 | ||
3369 | /* | ||
3370 | * Make sure no one is running the old callback before we proceed with | ||
3371 | * cancelling requests and resetting the completion tracking. Otherwise | ||
3372 | * we might submit a request to the hardware which never completes. | ||
3373 | */ | ||
3374 | synchronize_rcu(); | ||
3375 | |||
3376 | for_each_engine(engine, i915, id) { | 3354 | for_each_engine(engine, i915, id) { |
3377 | /* Mark all executing requests as skipped */ | 3355 | engine->submit_request = nop_submit_request; |
3378 | engine->cancel_requests(engine); | 3356 | engine->schedule = NULL; |
3379 | |||
3380 | /* | ||
3381 | * Only once we've force-cancelled all in-flight requests can we | ||
3382 | * start to complete all requests. | ||
3383 | */ | ||
3384 | engine->submit_request = nop_complete_submit_request; | ||
3385 | } | 3357 | } |
3358 | i915->caps.scheduler = 0; | ||
3386 | 3359 | ||
3387 | /* | 3360 | /* |
3388 | * Make sure no request can slip through without getting completed by | 3361 | * Make sure no request can slip through without getting completed by |
3389 | * either this call here to intel_engine_init_global_seqno, or the one | 3362 | * either this call here to intel_engine_init_global_seqno, or the one |
3390 | * in nop_complete_submit_request. | 3363 | * in nop_submit_request. |
3391 | */ | 3364 | */ |
3392 | synchronize_rcu(); | 3365 | synchronize_rcu(); |
3393 | 3366 | ||
3394 | for_each_engine(engine, i915, id) { | 3367 | /* Mark all executing requests as skipped */ |
3395 | unsigned long flags; | 3368 | for_each_engine(engine, i915, id) |
3396 | 3369 | engine->cancel_requests(engine); | |
3397 | /* | ||
3398 | * Mark all pending requests as complete so that any concurrent | ||
3399 | * (lockless) lookup doesn't try and wait upon the request as we | ||
3400 | * reset it. | ||
3401 | */ | ||
3402 | spin_lock_irqsave(&engine->timeline.lock, flags); | ||
3403 | intel_engine_init_global_seqno(engine, | ||
3404 | intel_engine_last_submit(engine)); | ||
3405 | spin_unlock_irqrestore(&engine->timeline.lock, flags); | ||
3406 | 3370 | ||
3371 | for_each_engine(engine, i915, id) { | ||
3407 | i915_gem_reset_finish_engine(engine); | 3372 | i915_gem_reset_finish_engine(engine); |
3373 | intel_engine_wakeup(engine); | ||
3408 | } | 3374 | } |
3409 | 3375 | ||
3410 | out: | 3376 | out: |
@@ -5334,7 +5300,10 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) | |||
5334 | I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? | 5300 | I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? |
5335 | LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); | 5301 | LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); |
5336 | 5302 | ||
5337 | intel_gt_workarounds_apply(dev_priv); | 5303 | /* Apply the GT workarounds... */ |
5304 | intel_gt_apply_workarounds(dev_priv); | ||
5305 | /* ...and determine whether they are sticking. */ | ||
5306 | intel_gt_verify_workarounds(dev_priv, "init"); | ||
5338 | 5307 | ||
5339 | i915_gem_init_swizzling(dev_priv); | 5308 | i915_gem_init_swizzling(dev_priv); |
5340 | 5309 | ||
@@ -5529,6 +5498,44 @@ err_active: | |||
5529 | goto out_ctx; | 5498 | goto out_ctx; |
5530 | } | 5499 | } |
5531 | 5500 | ||
5501 | static int | ||
5502 | i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size) | ||
5503 | { | ||
5504 | struct drm_i915_gem_object *obj; | ||
5505 | struct i915_vma *vma; | ||
5506 | int ret; | ||
5507 | |||
5508 | obj = i915_gem_object_create_stolen(i915, size); | ||
5509 | if (!obj) | ||
5510 | obj = i915_gem_object_create_internal(i915, size); | ||
5511 | if (IS_ERR(obj)) { | ||
5512 | DRM_ERROR("Failed to allocate scratch page\n"); | ||
5513 | return PTR_ERR(obj); | ||
5514 | } | ||
5515 | |||
5516 | vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); | ||
5517 | if (IS_ERR(vma)) { | ||
5518 | ret = PTR_ERR(vma); | ||
5519 | goto err_unref; | ||
5520 | } | ||
5521 | |||
5522 | ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); | ||
5523 | if (ret) | ||
5524 | goto err_unref; | ||
5525 | |||
5526 | i915->gt.scratch = vma; | ||
5527 | return 0; | ||
5528 | |||
5529 | err_unref: | ||
5530 | i915_gem_object_put(obj); | ||
5531 | return ret; | ||
5532 | } | ||
5533 | |||
5534 | static void i915_gem_fini_scratch(struct drm_i915_private *i915) | ||
5535 | { | ||
5536 | i915_vma_unpin_and_release(&i915->gt.scratch, 0); | ||
5537 | } | ||
5538 | |||
5532 | int i915_gem_init(struct drm_i915_private *dev_priv) | 5539 | int i915_gem_init(struct drm_i915_private *dev_priv) |
5533 | { | 5540 | { |
5534 | int ret; | 5541 | int ret; |
@@ -5575,12 +5582,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv) | |||
5575 | goto err_unlock; | 5582 | goto err_unlock; |
5576 | } | 5583 | } |
5577 | 5584 | ||
5578 | ret = i915_gem_contexts_init(dev_priv); | 5585 | ret = i915_gem_init_scratch(dev_priv, |
5586 | IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE); | ||
5579 | if (ret) { | 5587 | if (ret) { |
5580 | GEM_BUG_ON(ret == -EIO); | 5588 | GEM_BUG_ON(ret == -EIO); |
5581 | goto err_ggtt; | 5589 | goto err_ggtt; |
5582 | } | 5590 | } |
5583 | 5591 | ||
5592 | ret = i915_gem_contexts_init(dev_priv); | ||
5593 | if (ret) { | ||
5594 | GEM_BUG_ON(ret == -EIO); | ||
5595 | goto err_scratch; | ||
5596 | } | ||
5597 | |||
5584 | ret = intel_engines_init(dev_priv); | 5598 | ret = intel_engines_init(dev_priv); |
5585 | if (ret) { | 5599 | if (ret) { |
5586 | GEM_BUG_ON(ret == -EIO); | 5600 | GEM_BUG_ON(ret == -EIO); |
@@ -5653,6 +5667,8 @@ err_pm: | |||
5653 | err_context: | 5667 | err_context: |
5654 | if (ret != -EIO) | 5668 | if (ret != -EIO) |
5655 | i915_gem_contexts_fini(dev_priv); | 5669 | i915_gem_contexts_fini(dev_priv); |
5670 | err_scratch: | ||
5671 | i915_gem_fini_scratch(dev_priv); | ||
5656 | err_ggtt: | 5672 | err_ggtt: |
5657 | err_unlock: | 5673 | err_unlock: |
5658 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 5674 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
@@ -5704,8 +5720,11 @@ void i915_gem_fini(struct drm_i915_private *dev_priv) | |||
5704 | intel_uc_fini(dev_priv); | 5720 | intel_uc_fini(dev_priv); |
5705 | i915_gem_cleanup_engines(dev_priv); | 5721 | i915_gem_cleanup_engines(dev_priv); |
5706 | i915_gem_contexts_fini(dev_priv); | 5722 | i915_gem_contexts_fini(dev_priv); |
5723 | i915_gem_fini_scratch(dev_priv); | ||
5707 | mutex_unlock(&dev_priv->drm.struct_mutex); | 5724 | mutex_unlock(&dev_priv->drm.struct_mutex); |
5708 | 5725 | ||
5726 | intel_wa_list_free(&dev_priv->gt_wa_list); | ||
5727 | |||
5709 | intel_cleanup_gt_powersave(dev_priv); | 5728 | intel_cleanup_gt_powersave(dev_priv); |
5710 | 5729 | ||
5711 | intel_uc_fini_misc(dev_priv); | 5730 | intel_uc_fini_misc(dev_priv); |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index b97963db0287..371c07087095 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -535,16 +535,12 @@ static bool needs_preempt_context(struct drm_i915_private *i915) | |||
535 | int i915_gem_contexts_init(struct drm_i915_private *dev_priv) | 535 | int i915_gem_contexts_init(struct drm_i915_private *dev_priv) |
536 | { | 536 | { |
537 | struct i915_gem_context *ctx; | 537 | struct i915_gem_context *ctx; |
538 | int ret; | ||
539 | 538 | ||
540 | /* Reassure ourselves we are only called once */ | 539 | /* Reassure ourselves we are only called once */ |
541 | GEM_BUG_ON(dev_priv->kernel_context); | 540 | GEM_BUG_ON(dev_priv->kernel_context); |
542 | GEM_BUG_ON(dev_priv->preempt_context); | 541 | GEM_BUG_ON(dev_priv->preempt_context); |
543 | 542 | ||
544 | ret = intel_ctx_workarounds_init(dev_priv); | 543 | intel_engine_init_ctx_wa(dev_priv->engine[RCS]); |
545 | if (ret) | ||
546 | return ret; | ||
547 | |||
548 | init_contexts(dev_priv); | 544 | init_contexts(dev_priv); |
549 | 545 | ||
550 | /* lowest priority; idle task */ | 546 | /* lowest priority; idle task */ |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 8123bf0e4807..07465123c166 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -27,11 +27,14 @@ | |||
27 | * | 27 | * |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <linux/utsname.h> | 30 | #include <linux/ascii85.h> |
31 | #include <linux/nmi.h> | ||
32 | #include <linux/scatterlist.h> | ||
31 | #include <linux/stop_machine.h> | 33 | #include <linux/stop_machine.h> |
34 | #include <linux/utsname.h> | ||
32 | #include <linux/zlib.h> | 35 | #include <linux/zlib.h> |
36 | |||
33 | #include <drm/drm_print.h> | 37 | #include <drm/drm_print.h> |
34 | #include <linux/ascii85.h> | ||
35 | 38 | ||
36 | #include "i915_gpu_error.h" | 39 | #include "i915_gpu_error.h" |
37 | #include "i915_drv.h" | 40 | #include "i915_drv.h" |
@@ -77,112 +80,110 @@ static const char *purgeable_flag(int purgeable) | |||
77 | return purgeable ? " purgeable" : ""; | 80 | return purgeable ? " purgeable" : ""; |
78 | } | 81 | } |
79 | 82 | ||
80 | static bool __i915_error_ok(struct drm_i915_error_state_buf *e) | 83 | static void __sg_set_buf(struct scatterlist *sg, |
84 | void *addr, unsigned int len, loff_t it) | ||
81 | { | 85 | { |
82 | 86 | sg->page_link = (unsigned long)virt_to_page(addr); | |
83 | if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) { | 87 | sg->offset = offset_in_page(addr); |
84 | e->err = -ENOSPC; | 88 | sg->length = len; |
85 | return false; | 89 | sg->dma_address = it; |
86 | } | ||
87 | |||
88 | if (e->bytes == e->size - 1 || e->err) | ||
89 | return false; | ||
90 | |||
91 | return true; | ||
92 | } | 90 | } |
93 | 91 | ||
94 | static bool __i915_error_seek(struct drm_i915_error_state_buf *e, | 92 | static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len) |
95 | unsigned len) | ||
96 | { | 93 | { |
97 | if (e->pos + len <= e->start) { | 94 | if (!len) |
98 | e->pos += len; | ||
99 | return false; | 95 | return false; |
100 | } | ||
101 | 96 | ||
102 | /* First vsnprintf needs to fit in its entirety for memmove */ | 97 | if (e->bytes + len + 1 <= e->size) |
103 | if (len >= e->size) { | 98 | return true; |
104 | e->err = -EIO; | ||
105 | return false; | ||
106 | } | ||
107 | 99 | ||
108 | return true; | 100 | if (e->bytes) { |
109 | } | 101 | __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter); |
102 | e->iter += e->bytes; | ||
103 | e->buf = NULL; | ||
104 | e->bytes = 0; | ||
105 | } | ||
110 | 106 | ||
111 | static void __i915_error_advance(struct drm_i915_error_state_buf *e, | 107 | if (e->cur == e->end) { |
112 | unsigned len) | 108 | struct scatterlist *sgl; |
113 | { | ||
114 | /* If this is first printf in this window, adjust it so that | ||
115 | * start position matches start of the buffer | ||
116 | */ | ||
117 | 109 | ||
118 | if (e->pos < e->start) { | 110 | sgl = (typeof(sgl))__get_free_page(GFP_KERNEL); |
119 | const size_t off = e->start - e->pos; | 111 | if (!sgl) { |
112 | e->err = -ENOMEM; | ||
113 | return false; | ||
114 | } | ||
120 | 115 | ||
121 | /* Should not happen but be paranoid */ | 116 | if (e->cur) { |
122 | if (off > len || e->bytes) { | 117 | e->cur->offset = 0; |
123 | e->err = -EIO; | 118 | e->cur->length = 0; |
124 | return; | 119 | e->cur->page_link = |
120 | (unsigned long)sgl | SG_CHAIN; | ||
121 | } else { | ||
122 | e->sgl = sgl; | ||
125 | } | 123 | } |
126 | 124 | ||
127 | memmove(e->buf, e->buf + off, len - off); | 125 | e->cur = sgl; |
128 | e->bytes = len - off; | 126 | e->end = sgl + SG_MAX_SINGLE_ALLOC - 1; |
129 | e->pos = e->start; | ||
130 | return; | ||
131 | } | 127 | } |
132 | 128 | ||
133 | e->bytes += len; | 129 | e->size = ALIGN(len + 1, SZ_64K); |
134 | e->pos += len; | 130 | e->buf = kmalloc(e->size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); |
131 | if (!e->buf) { | ||
132 | e->size = PAGE_ALIGN(len + 1); | ||
133 | e->buf = kmalloc(e->size, GFP_KERNEL); | ||
134 | } | ||
135 | if (!e->buf) { | ||
136 | e->err = -ENOMEM; | ||
137 | return false; | ||
138 | } | ||
139 | |||
140 | return true; | ||
135 | } | 141 | } |
136 | 142 | ||
137 | __printf(2, 0) | 143 | __printf(2, 0) |
138 | static void i915_error_vprintf(struct drm_i915_error_state_buf *e, | 144 | static void i915_error_vprintf(struct drm_i915_error_state_buf *e, |
139 | const char *f, va_list args) | 145 | const char *fmt, va_list args) |
140 | { | 146 | { |
141 | unsigned len; | 147 | va_list ap; |
148 | int len; | ||
142 | 149 | ||
143 | if (!__i915_error_ok(e)) | 150 | if (e->err) |
144 | return; | 151 | return; |
145 | 152 | ||
146 | /* Seek the first printf which is hits start position */ | 153 | va_copy(ap, args); |
147 | if (e->pos < e->start) { | 154 | len = vsnprintf(NULL, 0, fmt, ap); |
148 | va_list tmp; | 155 | va_end(ap); |
149 | 156 | if (len <= 0) { | |
150 | va_copy(tmp, args); | 157 | e->err = len; |
151 | len = vsnprintf(NULL, 0, f, tmp); | 158 | return; |
152 | va_end(tmp); | ||
153 | |||
154 | if (!__i915_error_seek(e, len)) | ||
155 | return; | ||
156 | } | 159 | } |
157 | 160 | ||
158 | len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args); | 161 | if (!__i915_error_grow(e, len)) |
159 | if (len >= e->size - e->bytes) | 162 | return; |
160 | len = e->size - e->bytes - 1; | ||
161 | 163 | ||
162 | __i915_error_advance(e, len); | 164 | GEM_BUG_ON(e->bytes >= e->size); |
165 | len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args); | ||
166 | if (len < 0) { | ||
167 | e->err = len; | ||
168 | return; | ||
169 | } | ||
170 | e->bytes += len; | ||
163 | } | 171 | } |
164 | 172 | ||
165 | static void i915_error_puts(struct drm_i915_error_state_buf *e, | 173 | static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str) |
166 | const char *str) | ||
167 | { | 174 | { |
168 | unsigned len; | 175 | unsigned len; |
169 | 176 | ||
170 | if (!__i915_error_ok(e)) | 177 | if (e->err || !str) |
171 | return; | 178 | return; |
172 | 179 | ||
173 | len = strlen(str); | 180 | len = strlen(str); |
181 | if (!__i915_error_grow(e, len)) | ||
182 | return; | ||
174 | 183 | ||
175 | /* Seek the first printf which is hits start position */ | 184 | GEM_BUG_ON(e->bytes + len > e->size); |
176 | if (e->pos < e->start) { | ||
177 | if (!__i915_error_seek(e, len)) | ||
178 | return; | ||
179 | } | ||
180 | |||
181 | if (len >= e->size - e->bytes) | ||
182 | len = e->size - e->bytes - 1; | ||
183 | memcpy(e->buf + e->bytes, str, len); | 185 | memcpy(e->buf + e->bytes, str, len); |
184 | 186 | e->bytes += len; | |
185 | __i915_error_advance(e, len); | ||
186 | } | 187 | } |
187 | 188 | ||
188 | #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) | 189 | #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) |
@@ -268,6 +269,8 @@ static int compress_page(struct compress *c, | |||
268 | 269 | ||
269 | if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK) | 270 | if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK) |
270 | return -EIO; | 271 | return -EIO; |
272 | |||
273 | touch_nmi_watchdog(); | ||
271 | } while (zstream->avail_in); | 274 | } while (zstream->avail_in); |
272 | 275 | ||
273 | /* Fallback to uncompressed if we increase size? */ | 276 | /* Fallback to uncompressed if we increase size? */ |
@@ -635,21 +638,29 @@ static void err_print_uc(struct drm_i915_error_state_buf *m, | |||
635 | print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log); | 638 | print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log); |
636 | } | 639 | } |
637 | 640 | ||
638 | int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | 641 | static void err_free_sgl(struct scatterlist *sgl) |
639 | const struct i915_gpu_state *error) | ||
640 | { | 642 | { |
641 | struct drm_i915_private *dev_priv = m->i915; | 643 | while (sgl) { |
642 | struct drm_i915_error_object *obj; | 644 | struct scatterlist *sg; |
643 | struct timespec64 ts; | ||
644 | int i, j; | ||
645 | 645 | ||
646 | if (!error) { | 646 | for (sg = sgl; !sg_is_chain(sg); sg++) { |
647 | err_printf(m, "No error state collected\n"); | 647 | kfree(sg_virt(sg)); |
648 | return 0; | 648 | if (sg_is_last(sg)) |
649 | break; | ||
650 | } | ||
651 | |||
652 | sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg); | ||
653 | free_page((unsigned long)sgl); | ||
654 | sgl = sg; | ||
649 | } | 655 | } |
656 | } | ||
650 | 657 | ||
651 | if (IS_ERR(error)) | 658 | static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, |
652 | return PTR_ERR(error); | 659 | struct i915_gpu_state *error) |
660 | { | ||
661 | struct drm_i915_error_object *obj; | ||
662 | struct timespec64 ts; | ||
663 | int i, j; | ||
653 | 664 | ||
654 | if (*error->error_msg) | 665 | if (*error->error_msg) |
655 | err_printf(m, "%s\n", error->error_msg); | 666 | err_printf(m, "%s\n", error->error_msg); |
@@ -683,12 +694,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
683 | err_printf(m, "Reset count: %u\n", error->reset_count); | 694 | err_printf(m, "Reset count: %u\n", error->reset_count); |
684 | err_printf(m, "Suspend count: %u\n", error->suspend_count); | 695 | err_printf(m, "Suspend count: %u\n", error->suspend_count); |
685 | err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform)); | 696 | err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform)); |
686 | err_print_pciid(m, error->i915); | 697 | err_print_pciid(m, m->i915); |
687 | 698 | ||
688 | err_printf(m, "IOMMU enabled?: %d\n", error->iommu); | 699 | err_printf(m, "IOMMU enabled?: %d\n", error->iommu); |
689 | 700 | ||
690 | if (HAS_CSR(dev_priv)) { | 701 | if (HAS_CSR(m->i915)) { |
691 | struct intel_csr *csr = &dev_priv->csr; | 702 | struct intel_csr *csr = &m->i915->csr; |
692 | 703 | ||
693 | err_printf(m, "DMC loaded: %s\n", | 704 | err_printf(m, "DMC loaded: %s\n", |
694 | yesno(csr->dmc_payload != NULL)); | 705 | yesno(csr->dmc_payload != NULL)); |
@@ -708,22 +719,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
708 | err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); | 719 | err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); |
709 | err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); | 720 | err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); |
710 | err_printf(m, "CCID: 0x%08x\n", error->ccid); | 721 | err_printf(m, "CCID: 0x%08x\n", error->ccid); |
711 | err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings); | 722 | err_printf(m, "Missed interrupts: 0x%08lx\n", |
723 | m->i915->gpu_error.missed_irq_rings); | ||
712 | 724 | ||
713 | for (i = 0; i < error->nfence; i++) | 725 | for (i = 0; i < error->nfence; i++) |
714 | err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); | 726 | err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); |
715 | 727 | ||
716 | if (INTEL_GEN(dev_priv) >= 6) { | 728 | if (INTEL_GEN(m->i915) >= 6) { |
717 | err_printf(m, "ERROR: 0x%08x\n", error->error); | 729 | err_printf(m, "ERROR: 0x%08x\n", error->error); |
718 | 730 | ||
719 | if (INTEL_GEN(dev_priv) >= 8) | 731 | if (INTEL_GEN(m->i915) >= 8) |
720 | err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", | 732 | err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", |
721 | error->fault_data1, error->fault_data0); | 733 | error->fault_data1, error->fault_data0); |
722 | 734 | ||
723 | err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); | 735 | err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); |
724 | } | 736 | } |
725 | 737 | ||
726 | if (IS_GEN7(dev_priv)) | 738 | if (IS_GEN7(m->i915)) |
727 | err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); | 739 | err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); |
728 | 740 | ||
729 | for (i = 0; i < ARRAY_SIZE(error->engine); i++) { | 741 | for (i = 0; i < ARRAY_SIZE(error->engine); i++) { |
@@ -745,7 +757,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
745 | 757 | ||
746 | len += scnprintf(buf + len, sizeof(buf), "%s%s", | 758 | len += scnprintf(buf + len, sizeof(buf), "%s%s", |
747 | first ? "" : ", ", | 759 | first ? "" : ", ", |
748 | dev_priv->engine[j]->name); | 760 | m->i915->engine[j]->name); |
749 | first = 0; | 761 | first = 0; |
750 | } | 762 | } |
751 | scnprintf(buf + len, sizeof(buf), ")"); | 763 | scnprintf(buf + len, sizeof(buf), ")"); |
@@ -763,7 +775,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
763 | 775 | ||
764 | obj = ee->batchbuffer; | 776 | obj = ee->batchbuffer; |
765 | if (obj) { | 777 | if (obj) { |
766 | err_puts(m, dev_priv->engine[i]->name); | 778 | err_puts(m, m->i915->engine[i]->name); |
767 | if (ee->context.pid) | 779 | if (ee->context.pid) |
768 | err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)", | 780 | err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)", |
769 | ee->context.comm, | 781 | ee->context.comm, |
@@ -775,16 +787,16 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
775 | err_printf(m, " --- gtt_offset = 0x%08x %08x\n", | 787 | err_printf(m, " --- gtt_offset = 0x%08x %08x\n", |
776 | upper_32_bits(obj->gtt_offset), | 788 | upper_32_bits(obj->gtt_offset), |
777 | lower_32_bits(obj->gtt_offset)); | 789 | lower_32_bits(obj->gtt_offset)); |
778 | print_error_obj(m, dev_priv->engine[i], NULL, obj); | 790 | print_error_obj(m, m->i915->engine[i], NULL, obj); |
779 | } | 791 | } |
780 | 792 | ||
781 | for (j = 0; j < ee->user_bo_count; j++) | 793 | for (j = 0; j < ee->user_bo_count; j++) |
782 | print_error_obj(m, dev_priv->engine[i], | 794 | print_error_obj(m, m->i915->engine[i], |
783 | "user", ee->user_bo[j]); | 795 | "user", ee->user_bo[j]); |
784 | 796 | ||
785 | if (ee->num_requests) { | 797 | if (ee->num_requests) { |
786 | err_printf(m, "%s --- %d requests\n", | 798 | err_printf(m, "%s --- %d requests\n", |
787 | dev_priv->engine[i]->name, | 799 | m->i915->engine[i]->name, |
788 | ee->num_requests); | 800 | ee->num_requests); |
789 | for (j = 0; j < ee->num_requests; j++) | 801 | for (j = 0; j < ee->num_requests; j++) |
790 | error_print_request(m, " ", | 802 | error_print_request(m, " ", |
@@ -794,10 +806,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
794 | 806 | ||
795 | if (IS_ERR(ee->waiters)) { | 807 | if (IS_ERR(ee->waiters)) { |
796 | err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n", | 808 | err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n", |
797 | dev_priv->engine[i]->name); | 809 | m->i915->engine[i]->name); |
798 | } else if (ee->num_waiters) { | 810 | } else if (ee->num_waiters) { |
799 | err_printf(m, "%s --- %d waiters\n", | 811 | err_printf(m, "%s --- %d waiters\n", |
800 | dev_priv->engine[i]->name, | 812 | m->i915->engine[i]->name, |
801 | ee->num_waiters); | 813 | ee->num_waiters); |
802 | for (j = 0; j < ee->num_waiters; j++) { | 814 | for (j = 0; j < ee->num_waiters; j++) { |
803 | err_printf(m, " seqno 0x%08x for %s [%d]\n", | 815 | err_printf(m, " seqno 0x%08x for %s [%d]\n", |
@@ -807,22 +819,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
807 | } | 819 | } |
808 | } | 820 | } |
809 | 821 | ||
810 | print_error_obj(m, dev_priv->engine[i], | 822 | print_error_obj(m, m->i915->engine[i], |
811 | "ringbuffer", ee->ringbuffer); | 823 | "ringbuffer", ee->ringbuffer); |
812 | 824 | ||
813 | print_error_obj(m, dev_priv->engine[i], | 825 | print_error_obj(m, m->i915->engine[i], |
814 | "HW Status", ee->hws_page); | 826 | "HW Status", ee->hws_page); |
815 | 827 | ||
816 | print_error_obj(m, dev_priv->engine[i], | 828 | print_error_obj(m, m->i915->engine[i], |
817 | "HW context", ee->ctx); | 829 | "HW context", ee->ctx); |
818 | 830 | ||
819 | print_error_obj(m, dev_priv->engine[i], | 831 | print_error_obj(m, m->i915->engine[i], |
820 | "WA context", ee->wa_ctx); | 832 | "WA context", ee->wa_ctx); |
821 | 833 | ||
822 | print_error_obj(m, dev_priv->engine[i], | 834 | print_error_obj(m, m->i915->engine[i], |
823 | "WA batchbuffer", ee->wa_batchbuffer); | 835 | "WA batchbuffer", ee->wa_batchbuffer); |
824 | 836 | ||
825 | print_error_obj(m, dev_priv->engine[i], | 837 | print_error_obj(m, m->i915->engine[i], |
826 | "NULL context", ee->default_state); | 838 | "NULL context", ee->default_state); |
827 | } | 839 | } |
828 | 840 | ||
@@ -835,43 +847,107 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
835 | err_print_capabilities(m, &error->device_info, &error->driver_caps); | 847 | err_print_capabilities(m, &error->device_info, &error->driver_caps); |
836 | err_print_params(m, &error->params); | 848 | err_print_params(m, &error->params); |
837 | err_print_uc(m, &error->uc); | 849 | err_print_uc(m, &error->uc); |
850 | } | ||
851 | |||
852 | static int err_print_to_sgl(struct i915_gpu_state *error) | ||
853 | { | ||
854 | struct drm_i915_error_state_buf m; | ||
855 | |||
856 | if (IS_ERR(error)) | ||
857 | return PTR_ERR(error); | ||
858 | |||
859 | if (READ_ONCE(error->sgl)) | ||
860 | return 0; | ||
861 | |||
862 | memset(&m, 0, sizeof(m)); | ||
863 | m.i915 = error->i915; | ||
864 | |||
865 | __err_print_to_sgl(&m, error); | ||
866 | |||
867 | if (m.buf) { | ||
868 | __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter); | ||
869 | m.bytes = 0; | ||
870 | m.buf = NULL; | ||
871 | } | ||
872 | if (m.cur) { | ||
873 | GEM_BUG_ON(m.end < m.cur); | ||
874 | sg_mark_end(m.cur - 1); | ||
875 | } | ||
876 | GEM_BUG_ON(m.sgl && !m.cur); | ||
877 | |||
878 | if (m.err) { | ||
879 | err_free_sgl(m.sgl); | ||
880 | return m.err; | ||
881 | } | ||
838 | 882 | ||
839 | if (m->bytes == 0 && m->err) | 883 | if (cmpxchg(&error->sgl, NULL, m.sgl)) |
840 | return m->err; | 884 | err_free_sgl(m.sgl); |
841 | 885 | ||
842 | return 0; | 886 | return 0; |
843 | } | 887 | } |
844 | 888 | ||
845 | int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, | 889 | ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error, |
846 | struct drm_i915_private *i915, | 890 | char *buf, loff_t off, size_t rem) |
847 | size_t count, loff_t pos) | ||
848 | { | 891 | { |
849 | memset(ebuf, 0, sizeof(*ebuf)); | 892 | struct scatterlist *sg; |
850 | ebuf->i915 = i915; | 893 | size_t count; |
894 | loff_t pos; | ||
895 | int err; | ||
851 | 896 | ||
852 | /* We need to have enough room to store any i915_error_state printf | 897 | if (!error || !rem) |
853 | * so that we can move it to start position. | 898 | return 0; |
854 | */ | ||
855 | ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE; | ||
856 | ebuf->buf = kmalloc(ebuf->size, | ||
857 | GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); | ||
858 | 899 | ||
859 | if (ebuf->buf == NULL) { | 900 | err = err_print_to_sgl(error); |
860 | ebuf->size = PAGE_SIZE; | 901 | if (err) |
861 | ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL); | 902 | return err; |
862 | } | ||
863 | 903 | ||
864 | if (ebuf->buf == NULL) { | 904 | sg = READ_ONCE(error->fit); |
865 | ebuf->size = 128; | 905 | if (!sg || off < sg->dma_address) |
866 | ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL); | 906 | sg = error->sgl; |
867 | } | 907 | if (!sg) |
908 | return 0; | ||
868 | 909 | ||
869 | if (ebuf->buf == NULL) | 910 | pos = sg->dma_address; |
870 | return -ENOMEM; | 911 | count = 0; |
912 | do { | ||
913 | size_t len, start; | ||
914 | |||
915 | if (sg_is_chain(sg)) { | ||
916 | sg = sg_chain_ptr(sg); | ||
917 | GEM_BUG_ON(sg_is_chain(sg)); | ||
918 | } | ||
919 | |||
920 | len = sg->length; | ||
921 | if (pos + len <= off) { | ||
922 | pos += len; | ||
923 | continue; | ||
924 | } | ||
871 | 925 | ||
872 | ebuf->start = pos; | 926 | start = sg->offset; |
927 | if (pos < off) { | ||
928 | GEM_BUG_ON(off - pos > len); | ||
929 | len -= off - pos; | ||
930 | start += off - pos; | ||
931 | pos = off; | ||
932 | } | ||
873 | 933 | ||
874 | return 0; | 934 | len = min(len, rem); |
935 | GEM_BUG_ON(!len || len > sg->length); | ||
936 | |||
937 | memcpy(buf, page_address(sg_page(sg)) + start, len); | ||
938 | |||
939 | count += len; | ||
940 | pos += len; | ||
941 | |||
942 | buf += len; | ||
943 | rem -= len; | ||
944 | if (!rem) { | ||
945 | WRITE_ONCE(error->fit, sg); | ||
946 | break; | ||
947 | } | ||
948 | } while (!sg_is_last(sg++)); | ||
949 | |||
950 | return count; | ||
875 | } | 951 | } |
876 | 952 | ||
877 | static void i915_error_object_free(struct drm_i915_error_object *obj) | 953 | static void i915_error_object_free(struct drm_i915_error_object *obj) |
@@ -944,6 +1020,7 @@ void __i915_gpu_state_free(struct kref *error_ref) | |||
944 | cleanup_params(error); | 1020 | cleanup_params(error); |
945 | cleanup_uc_state(error); | 1021 | cleanup_uc_state(error); |
946 | 1022 | ||
1023 | err_free_sgl(error->sgl); | ||
947 | kfree(error); | 1024 | kfree(error); |
948 | } | 1025 | } |
949 | 1026 | ||
@@ -1494,7 +1571,7 @@ static void gem_record_rings(struct i915_gpu_state *error) | |||
1494 | if (HAS_BROKEN_CS_TLB(i915)) | 1571 | if (HAS_BROKEN_CS_TLB(i915)) |
1495 | ee->wa_batchbuffer = | 1572 | ee->wa_batchbuffer = |
1496 | i915_error_object_create(i915, | 1573 | i915_error_object_create(i915, |
1497 | engine->scratch); | 1574 | i915->gt.scratch); |
1498 | request_record_user_bo(request, ee); | 1575 | request_record_user_bo(request, ee); |
1499 | 1576 | ||
1500 | ee->ctx = | 1577 | ee->ctx = |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 3ec89a504de5..ff2652bbb0b0 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h | |||
@@ -192,6 +192,8 @@ struct i915_gpu_state { | |||
192 | } *active_bo[I915_NUM_ENGINES], *pinned_bo; | 192 | } *active_bo[I915_NUM_ENGINES], *pinned_bo; |
193 | u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; | 193 | u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; |
194 | struct i915_address_space *active_vm[I915_NUM_ENGINES]; | 194 | struct i915_address_space *active_vm[I915_NUM_ENGINES]; |
195 | |||
196 | struct scatterlist *sgl, *fit; | ||
195 | }; | 197 | }; |
196 | 198 | ||
197 | struct i915_gpu_error { | 199 | struct i915_gpu_error { |
@@ -298,29 +300,20 @@ struct i915_gpu_error { | |||
298 | 300 | ||
299 | struct drm_i915_error_state_buf { | 301 | struct drm_i915_error_state_buf { |
300 | struct drm_i915_private *i915; | 302 | struct drm_i915_private *i915; |
301 | unsigned int bytes; | 303 | struct scatterlist *sgl, *cur, *end; |
302 | unsigned int size; | 304 | |
305 | char *buf; | ||
306 | size_t bytes; | ||
307 | size_t size; | ||
308 | loff_t iter; | ||
309 | |||
303 | int err; | 310 | int err; |
304 | u8 *buf; | ||
305 | loff_t start; | ||
306 | loff_t pos; | ||
307 | }; | 311 | }; |
308 | 312 | ||
309 | #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) | 313 | #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) |
310 | 314 | ||
311 | __printf(2, 3) | 315 | __printf(2, 3) |
312 | void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); | 316 | void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); |
313 | int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, | ||
314 | const struct i915_gpu_state *gpu); | ||
315 | int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, | ||
316 | struct drm_i915_private *i915, | ||
317 | size_t count, loff_t pos); | ||
318 | |||
319 | static inline void | ||
320 | i915_error_state_buf_release(struct drm_i915_error_state_buf *eb) | ||
321 | { | ||
322 | kfree(eb->buf); | ||
323 | } | ||
324 | 317 | ||
325 | struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915); | 318 | struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915); |
326 | void i915_capture_error_state(struct drm_i915_private *dev_priv, | 319 | void i915_capture_error_state(struct drm_i915_private *dev_priv, |
@@ -334,6 +327,9 @@ i915_gpu_state_get(struct i915_gpu_state *gpu) | |||
334 | return gpu; | 327 | return gpu; |
335 | } | 328 | } |
336 | 329 | ||
330 | ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error, | ||
331 | char *buf, loff_t offset, size_t count); | ||
332 | |||
337 | void __i915_gpu_state_free(struct kref *kref); | 333 | void __i915_gpu_state_free(struct kref *kref); |
338 | static inline void i915_gpu_state_put(struct i915_gpu_state *gpu) | 334 | static inline void i915_gpu_state_put(struct i915_gpu_state *gpu) |
339 | { | 335 | { |
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 1b81d7cb209e..6350db5503cd 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c | |||
@@ -79,8 +79,9 @@ | |||
79 | #define GEN2_FEATURES \ | 79 | #define GEN2_FEATURES \ |
80 | GEN(2), \ | 80 | GEN(2), \ |
81 | .num_pipes = 1, \ | 81 | .num_pipes = 1, \ |
82 | .has_overlay = 1, .overlay_needs_physical = 1, \ | 82 | .display.has_overlay = 1, \ |
83 | .has_gmch_display = 1, \ | 83 | .display.overlay_needs_physical = 1, \ |
84 | .display.has_gmch_display = 1, \ | ||
84 | .hws_needs_physical = 1, \ | 85 | .hws_needs_physical = 1, \ |
85 | .unfenced_needs_alignment = 1, \ | 86 | .unfenced_needs_alignment = 1, \ |
86 | .ring_mask = RENDER_RING, \ | 87 | .ring_mask = RENDER_RING, \ |
@@ -93,7 +94,8 @@ | |||
93 | static const struct intel_device_info intel_i830_info = { | 94 | static const struct intel_device_info intel_i830_info = { |
94 | GEN2_FEATURES, | 95 | GEN2_FEATURES, |
95 | PLATFORM(INTEL_I830), | 96 | PLATFORM(INTEL_I830), |
96 | .is_mobile = 1, .cursor_needs_physical = 1, | 97 | .is_mobile = 1, |
98 | .display.cursor_needs_physical = 1, | ||
97 | .num_pipes = 2, /* legal, last one wins */ | 99 | .num_pipes = 2, /* legal, last one wins */ |
98 | }; | 100 | }; |
99 | 101 | ||
@@ -107,8 +109,8 @@ static const struct intel_device_info intel_i85x_info = { | |||
107 | PLATFORM(INTEL_I85X), | 109 | PLATFORM(INTEL_I85X), |
108 | .is_mobile = 1, | 110 | .is_mobile = 1, |
109 | .num_pipes = 2, /* legal, last one wins */ | 111 | .num_pipes = 2, /* legal, last one wins */ |
110 | .cursor_needs_physical = 1, | 112 | .display.cursor_needs_physical = 1, |
111 | .has_fbc = 1, | 113 | .display.has_fbc = 1, |
112 | }; | 114 | }; |
113 | 115 | ||
114 | static const struct intel_device_info intel_i865g_info = { | 116 | static const struct intel_device_info intel_i865g_info = { |
@@ -119,7 +121,7 @@ static const struct intel_device_info intel_i865g_info = { | |||
119 | #define GEN3_FEATURES \ | 121 | #define GEN3_FEATURES \ |
120 | GEN(3), \ | 122 | GEN(3), \ |
121 | .num_pipes = 2, \ | 123 | .num_pipes = 2, \ |
122 | .has_gmch_display = 1, \ | 124 | .display.has_gmch_display = 1, \ |
123 | .ring_mask = RENDER_RING, \ | 125 | .ring_mask = RENDER_RING, \ |
124 | .has_snoop = true, \ | 126 | .has_snoop = true, \ |
125 | .has_coherent_ggtt = true, \ | 127 | .has_coherent_ggtt = true, \ |
@@ -131,8 +133,9 @@ static const struct intel_device_info intel_i915g_info = { | |||
131 | GEN3_FEATURES, | 133 | GEN3_FEATURES, |
132 | PLATFORM(INTEL_I915G), | 134 | PLATFORM(INTEL_I915G), |
133 | .has_coherent_ggtt = false, | 135 | .has_coherent_ggtt = false, |
134 | .cursor_needs_physical = 1, | 136 | .display.cursor_needs_physical = 1, |
135 | .has_overlay = 1, .overlay_needs_physical = 1, | 137 | .display.has_overlay = 1, |
138 | .display.overlay_needs_physical = 1, | ||
136 | .hws_needs_physical = 1, | 139 | .hws_needs_physical = 1, |
137 | .unfenced_needs_alignment = 1, | 140 | .unfenced_needs_alignment = 1, |
138 | }; | 141 | }; |
@@ -141,10 +144,11 @@ static const struct intel_device_info intel_i915gm_info = { | |||
141 | GEN3_FEATURES, | 144 | GEN3_FEATURES, |
142 | PLATFORM(INTEL_I915GM), | 145 | PLATFORM(INTEL_I915GM), |
143 | .is_mobile = 1, | 146 | .is_mobile = 1, |
144 | .cursor_needs_physical = 1, | 147 | .display.cursor_needs_physical = 1, |
145 | .has_overlay = 1, .overlay_needs_physical = 1, | 148 | .display.has_overlay = 1, |
146 | .supports_tv = 1, | 149 | .display.overlay_needs_physical = 1, |
147 | .has_fbc = 1, | 150 | .display.supports_tv = 1, |
151 | .display.has_fbc = 1, | ||
148 | .hws_needs_physical = 1, | 152 | .hws_needs_physical = 1, |
149 | .unfenced_needs_alignment = 1, | 153 | .unfenced_needs_alignment = 1, |
150 | }; | 154 | }; |
@@ -152,8 +156,10 @@ static const struct intel_device_info intel_i915gm_info = { | |||
152 | static const struct intel_device_info intel_i945g_info = { | 156 | static const struct intel_device_info intel_i945g_info = { |
153 | GEN3_FEATURES, | 157 | GEN3_FEATURES, |
154 | PLATFORM(INTEL_I945G), | 158 | PLATFORM(INTEL_I945G), |
155 | .has_hotplug = 1, .cursor_needs_physical = 1, | 159 | .display.has_hotplug = 1, |
156 | .has_overlay = 1, .overlay_needs_physical = 1, | 160 | .display.cursor_needs_physical = 1, |
161 | .display.has_overlay = 1, | ||
162 | .display.overlay_needs_physical = 1, | ||
157 | .hws_needs_physical = 1, | 163 | .hws_needs_physical = 1, |
158 | .unfenced_needs_alignment = 1, | 164 | .unfenced_needs_alignment = 1, |
159 | }; | 165 | }; |
@@ -162,10 +168,12 @@ static const struct intel_device_info intel_i945gm_info = { | |||
162 | GEN3_FEATURES, | 168 | GEN3_FEATURES, |
163 | PLATFORM(INTEL_I945GM), | 169 | PLATFORM(INTEL_I945GM), |
164 | .is_mobile = 1, | 170 | .is_mobile = 1, |
165 | .has_hotplug = 1, .cursor_needs_physical = 1, | 171 | .display.has_hotplug = 1, |
166 | .has_overlay = 1, .overlay_needs_physical = 1, | 172 | .display.cursor_needs_physical = 1, |
167 | .supports_tv = 1, | 173 | .display.has_overlay = 1, |
168 | .has_fbc = 1, | 174 | .display.overlay_needs_physical = 1, |
175 | .display.supports_tv = 1, | ||
176 | .display.has_fbc = 1, | ||
169 | .hws_needs_physical = 1, | 177 | .hws_needs_physical = 1, |
170 | .unfenced_needs_alignment = 1, | 178 | .unfenced_needs_alignment = 1, |
171 | }; | 179 | }; |
@@ -173,23 +181,23 @@ static const struct intel_device_info intel_i945gm_info = { | |||
173 | static const struct intel_device_info intel_g33_info = { | 181 | static const struct intel_device_info intel_g33_info = { |
174 | GEN3_FEATURES, | 182 | GEN3_FEATURES, |
175 | PLATFORM(INTEL_G33), | 183 | PLATFORM(INTEL_G33), |
176 | .has_hotplug = 1, | 184 | .display.has_hotplug = 1, |
177 | .has_overlay = 1, | 185 | .display.has_overlay = 1, |
178 | }; | 186 | }; |
179 | 187 | ||
180 | static const struct intel_device_info intel_pineview_info = { | 188 | static const struct intel_device_info intel_pineview_info = { |
181 | GEN3_FEATURES, | 189 | GEN3_FEATURES, |
182 | PLATFORM(INTEL_PINEVIEW), | 190 | PLATFORM(INTEL_PINEVIEW), |
183 | .is_mobile = 1, | 191 | .is_mobile = 1, |
184 | .has_hotplug = 1, | 192 | .display.has_hotplug = 1, |
185 | .has_overlay = 1, | 193 | .display.has_overlay = 1, |
186 | }; | 194 | }; |
187 | 195 | ||
188 | #define GEN4_FEATURES \ | 196 | #define GEN4_FEATURES \ |
189 | GEN(4), \ | 197 | GEN(4), \ |
190 | .num_pipes = 2, \ | 198 | .num_pipes = 2, \ |
191 | .has_hotplug = 1, \ | 199 | .display.has_hotplug = 1, \ |
192 | .has_gmch_display = 1, \ | 200 | .display.has_gmch_display = 1, \ |
193 | .ring_mask = RENDER_RING, \ | 201 | .ring_mask = RENDER_RING, \ |
194 | .has_snoop = true, \ | 202 | .has_snoop = true, \ |
195 | .has_coherent_ggtt = true, \ | 203 | .has_coherent_ggtt = true, \ |
@@ -200,7 +208,7 @@ static const struct intel_device_info intel_pineview_info = { | |||
200 | static const struct intel_device_info intel_i965g_info = { | 208 | static const struct intel_device_info intel_i965g_info = { |
201 | GEN4_FEATURES, | 209 | GEN4_FEATURES, |
202 | PLATFORM(INTEL_I965G), | 210 | PLATFORM(INTEL_I965G), |
203 | .has_overlay = 1, | 211 | .display.has_overlay = 1, |
204 | .hws_needs_physical = 1, | 212 | .hws_needs_physical = 1, |
205 | .has_snoop = false, | 213 | .has_snoop = false, |
206 | }; | 214 | }; |
@@ -208,9 +216,10 @@ static const struct intel_device_info intel_i965g_info = { | |||
208 | static const struct intel_device_info intel_i965gm_info = { | 216 | static const struct intel_device_info intel_i965gm_info = { |
209 | GEN4_FEATURES, | 217 | GEN4_FEATURES, |
210 | PLATFORM(INTEL_I965GM), | 218 | PLATFORM(INTEL_I965GM), |
211 | .is_mobile = 1, .has_fbc = 1, | 219 | .is_mobile = 1, |
212 | .has_overlay = 1, | 220 | .display.has_fbc = 1, |
213 | .supports_tv = 1, | 221 | .display.has_overlay = 1, |
222 | .display.supports_tv = 1, | ||
214 | .hws_needs_physical = 1, | 223 | .hws_needs_physical = 1, |
215 | .has_snoop = false, | 224 | .has_snoop = false, |
216 | }; | 225 | }; |
@@ -224,15 +233,16 @@ static const struct intel_device_info intel_g45_info = { | |||
224 | static const struct intel_device_info intel_gm45_info = { | 233 | static const struct intel_device_info intel_gm45_info = { |
225 | GEN4_FEATURES, | 234 | GEN4_FEATURES, |
226 | PLATFORM(INTEL_GM45), | 235 | PLATFORM(INTEL_GM45), |
227 | .is_mobile = 1, .has_fbc = 1, | 236 | .is_mobile = 1, |
228 | .supports_tv = 1, | 237 | .display.has_fbc = 1, |
238 | .display.supports_tv = 1, | ||
229 | .ring_mask = RENDER_RING | BSD_RING, | 239 | .ring_mask = RENDER_RING | BSD_RING, |
230 | }; | 240 | }; |
231 | 241 | ||
232 | #define GEN5_FEATURES \ | 242 | #define GEN5_FEATURES \ |
233 | GEN(5), \ | 243 | GEN(5), \ |
234 | .num_pipes = 2, \ | 244 | .num_pipes = 2, \ |
235 | .has_hotplug = 1, \ | 245 | .display.has_hotplug = 1, \ |
236 | .ring_mask = RENDER_RING | BSD_RING, \ | 246 | .ring_mask = RENDER_RING | BSD_RING, \ |
237 | .has_snoop = true, \ | 247 | .has_snoop = true, \ |
238 | .has_coherent_ggtt = true, \ | 248 | .has_coherent_ggtt = true, \ |
@@ -250,14 +260,15 @@ static const struct intel_device_info intel_ironlake_d_info = { | |||
250 | static const struct intel_device_info intel_ironlake_m_info = { | 260 | static const struct intel_device_info intel_ironlake_m_info = { |
251 | GEN5_FEATURES, | 261 | GEN5_FEATURES, |
252 | PLATFORM(INTEL_IRONLAKE), | 262 | PLATFORM(INTEL_IRONLAKE), |
253 | .is_mobile = 1, .has_fbc = 1, | 263 | .is_mobile = 1, |
264 | .display.has_fbc = 1, | ||
254 | }; | 265 | }; |
255 | 266 | ||
256 | #define GEN6_FEATURES \ | 267 | #define GEN6_FEATURES \ |
257 | GEN(6), \ | 268 | GEN(6), \ |
258 | .num_pipes = 2, \ | 269 | .num_pipes = 2, \ |
259 | .has_hotplug = 1, \ | 270 | .display.has_hotplug = 1, \ |
260 | .has_fbc = 1, \ | 271 | .display.has_fbc = 1, \ |
261 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ | 272 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ |
262 | .has_coherent_ggtt = true, \ | 273 | .has_coherent_ggtt = true, \ |
263 | .has_llc = 1, \ | 274 | .has_llc = 1, \ |
@@ -301,8 +312,8 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = { | |||
301 | #define GEN7_FEATURES \ | 312 | #define GEN7_FEATURES \ |
302 | GEN(7), \ | 313 | GEN(7), \ |
303 | .num_pipes = 3, \ | 314 | .num_pipes = 3, \ |
304 | .has_hotplug = 1, \ | 315 | .display.has_hotplug = 1, \ |
305 | .has_fbc = 1, \ | 316 | .display.has_fbc = 1, \ |
306 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ | 317 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ |
307 | .has_coherent_ggtt = true, \ | 318 | .has_coherent_ggtt = true, \ |
308 | .has_llc = 1, \ | 319 | .has_llc = 1, \ |
@@ -359,8 +370,8 @@ static const struct intel_device_info intel_valleyview_info = { | |||
359 | .num_pipes = 2, | 370 | .num_pipes = 2, |
360 | .has_runtime_pm = 1, | 371 | .has_runtime_pm = 1, |
361 | .has_rc6 = 1, | 372 | .has_rc6 = 1, |
362 | .has_gmch_display = 1, | 373 | .display.has_gmch_display = 1, |
363 | .has_hotplug = 1, | 374 | .display.has_hotplug = 1, |
364 | .ppgtt = INTEL_PPGTT_FULL, | 375 | .ppgtt = INTEL_PPGTT_FULL, |
365 | .has_snoop = true, | 376 | .has_snoop = true, |
366 | .has_coherent_ggtt = false, | 377 | .has_coherent_ggtt = false, |
@@ -374,10 +385,10 @@ static const struct intel_device_info intel_valleyview_info = { | |||
374 | #define G75_FEATURES \ | 385 | #define G75_FEATURES \ |
375 | GEN7_FEATURES, \ | 386 | GEN7_FEATURES, \ |
376 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ | 387 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ |
377 | .has_ddi = 1, \ | 388 | .display.has_ddi = 1, \ |
378 | .has_fpga_dbg = 1, \ | 389 | .has_fpga_dbg = 1, \ |
379 | .has_psr = 1, \ | 390 | .display.has_psr = 1, \ |
380 | .has_dp_mst = 1, \ | 391 | .display.has_dp_mst = 1, \ |
381 | .has_rc6p = 0 /* RC6p removed-by HSW */, \ | 392 | .has_rc6p = 0 /* RC6p removed-by HSW */, \ |
382 | .has_runtime_pm = 1 | 393 | .has_runtime_pm = 1 |
383 | 394 | ||
@@ -444,14 +455,14 @@ static const struct intel_device_info intel_cherryview_info = { | |||
444 | PLATFORM(INTEL_CHERRYVIEW), | 455 | PLATFORM(INTEL_CHERRYVIEW), |
445 | GEN(8), | 456 | GEN(8), |
446 | .num_pipes = 3, | 457 | .num_pipes = 3, |
447 | .has_hotplug = 1, | 458 | .display.has_hotplug = 1, |
448 | .is_lp = 1, | 459 | .is_lp = 1, |
449 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | 460 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
450 | .has_64bit_reloc = 1, | 461 | .has_64bit_reloc = 1, |
451 | .has_runtime_pm = 1, | 462 | .has_runtime_pm = 1, |
452 | .has_rc6 = 1, | 463 | .has_rc6 = 1, |
453 | .has_logical_ring_contexts = 1, | 464 | .has_logical_ring_contexts = 1, |
454 | .has_gmch_display = 1, | 465 | .display.has_gmch_display = 1, |
455 | .ppgtt = INTEL_PPGTT_FULL, | 466 | .ppgtt = INTEL_PPGTT_FULL, |
456 | .has_reset_engine = 1, | 467 | .has_reset_engine = 1, |
457 | .has_snoop = true, | 468 | .has_snoop = true, |
@@ -473,15 +484,15 @@ static const struct intel_device_info intel_cherryview_info = { | |||
473 | GEN(9), \ | 484 | GEN(9), \ |
474 | GEN9_DEFAULT_PAGE_SIZES, \ | 485 | GEN9_DEFAULT_PAGE_SIZES, \ |
475 | .has_logical_ring_preemption = 1, \ | 486 | .has_logical_ring_preemption = 1, \ |
476 | .has_csr = 1, \ | 487 | .display.has_csr = 1, \ |
477 | .has_guc = 1, \ | 488 | .has_guc = 1, \ |
478 | .has_ipc = 1, \ | 489 | .display.has_ipc = 1, \ |
479 | .ddb_size = 896 | 490 | .ddb_size = 896 |
480 | 491 | ||
481 | #define SKL_PLATFORM \ | 492 | #define SKL_PLATFORM \ |
482 | GEN9_FEATURES, \ | 493 | GEN9_FEATURES, \ |
483 | /* Display WA #0477 WaDisableIPC: skl */ \ | 494 | /* Display WA #0477 WaDisableIPC: skl */ \ |
484 | .has_ipc = 0, \ | 495 | .display.has_ipc = 0, \ |
485 | PLATFORM(INTEL_SKYLAKE) | 496 | PLATFORM(INTEL_SKYLAKE) |
486 | 497 | ||
487 | static const struct intel_device_info intel_skylake_gt1_info = { | 498 | static const struct intel_device_info intel_skylake_gt1_info = { |
@@ -512,19 +523,19 @@ static const struct intel_device_info intel_skylake_gt4_info = { | |||
512 | #define GEN9_LP_FEATURES \ | 523 | #define GEN9_LP_FEATURES \ |
513 | GEN(9), \ | 524 | GEN(9), \ |
514 | .is_lp = 1, \ | 525 | .is_lp = 1, \ |
515 | .has_hotplug = 1, \ | 526 | .display.has_hotplug = 1, \ |
516 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ | 527 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ |
517 | .num_pipes = 3, \ | 528 | .num_pipes = 3, \ |
518 | .has_64bit_reloc = 1, \ | 529 | .has_64bit_reloc = 1, \ |
519 | .has_ddi = 1, \ | 530 | .display.has_ddi = 1, \ |
520 | .has_fpga_dbg = 1, \ | 531 | .has_fpga_dbg = 1, \ |
521 | .has_fbc = 1, \ | 532 | .display.has_fbc = 1, \ |
522 | .has_psr = 1, \ | 533 | .display.has_psr = 1, \ |
523 | .has_runtime_pm = 1, \ | 534 | .has_runtime_pm = 1, \ |
524 | .has_pooled_eu = 0, \ | 535 | .has_pooled_eu = 0, \ |
525 | .has_csr = 1, \ | 536 | .display.has_csr = 1, \ |
526 | .has_rc6 = 1, \ | 537 | .has_rc6 = 1, \ |
527 | .has_dp_mst = 1, \ | 538 | .display.has_dp_mst = 1, \ |
528 | .has_logical_ring_contexts = 1, \ | 539 | .has_logical_ring_contexts = 1, \ |
529 | .has_logical_ring_preemption = 1, \ | 540 | .has_logical_ring_preemption = 1, \ |
530 | .has_guc = 1, \ | 541 | .has_guc = 1, \ |
@@ -532,7 +543,7 @@ static const struct intel_device_info intel_skylake_gt4_info = { | |||
532 | .has_reset_engine = 1, \ | 543 | .has_reset_engine = 1, \ |
533 | .has_snoop = true, \ | 544 | .has_snoop = true, \ |
534 | .has_coherent_ggtt = false, \ | 545 | .has_coherent_ggtt = false, \ |
535 | .has_ipc = 1, \ | 546 | .display.has_ipc = 1, \ |
536 | GEN9_DEFAULT_PAGE_SIZES, \ | 547 | GEN9_DEFAULT_PAGE_SIZES, \ |
537 | GEN_DEFAULT_PIPEOFFSETS, \ | 548 | GEN_DEFAULT_PIPEOFFSETS, \ |
538 | IVB_CURSOR_OFFSETS, \ | 549 | IVB_CURSOR_OFFSETS, \ |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 47baf2fe8f71..0a7d60509ca7 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -4570,6 +4570,7 @@ enum { | |||
4570 | * of the infoframe structure specified by CEA-861. */ | 4570 | * of the infoframe structure specified by CEA-861. */ |
4571 | #define VIDEO_DIP_DATA_SIZE 32 | 4571 | #define VIDEO_DIP_DATA_SIZE 32 |
4572 | #define VIDEO_DIP_VSC_DATA_SIZE 36 | 4572 | #define VIDEO_DIP_VSC_DATA_SIZE 36 |
4573 | #define VIDEO_DIP_PPS_DATA_SIZE 132 | ||
4573 | #define VIDEO_DIP_CTL _MMIO(0x61170) | 4574 | #define VIDEO_DIP_CTL _MMIO(0x61170) |
4574 | /* Pre HSW: */ | 4575 | /* Pre HSW: */ |
4575 | #define VIDEO_DIP_ENABLE (1 << 31) | 4576 | #define VIDEO_DIP_ENABLE (1 << 31) |
@@ -4617,6 +4618,17 @@ enum { | |||
4617 | #define _PP_STATUS 0x61200 | 4618 | #define _PP_STATUS 0x61200 |
4618 | #define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS) | 4619 | #define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS) |
4619 | #define PP_ON (1 << 31) | 4620 | #define PP_ON (1 << 31) |
4621 | |||
4622 | #define _PP_CONTROL_1 0xc7204 | ||
4623 | #define _PP_CONTROL_2 0xc7304 | ||
4624 | #define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \ | ||
4625 | _PP_CONTROL_2) | ||
4626 | #define POWER_CYCLE_DELAY_MASK (0x1f << 4) | ||
4627 | #define POWER_CYCLE_DELAY_SHIFT 4 | ||
4628 | #define VDD_OVERRIDE_FORCE (1 << 3) | ||
4629 | #define BACKLIGHT_ENABLE (1 << 2) | ||
4630 | #define PWR_DOWN_ON_RESET (1 << 1) | ||
4631 | #define PWR_STATE_TARGET (1 << 0) | ||
4620 | /* | 4632 | /* |
4621 | * Indicates that all dependencies of the panel are on: | 4633 | * Indicates that all dependencies of the panel are on: |
4622 | * | 4634 | * |
@@ -7750,6 +7762,7 @@ enum { | |||
7750 | #define ICP_DDIB_HPD_LONG_DETECT (2 << 4) | 7762 | #define ICP_DDIB_HPD_LONG_DETECT (2 << 4) |
7751 | #define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4) | 7763 | #define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4) |
7752 | #define ICP_DDIA_HPD_ENABLE (1 << 3) | 7764 | #define ICP_DDIA_HPD_ENABLE (1 << 3) |
7765 | #define ICP_DDIA_HPD_OP_DRIVE_1 (1 << 2) | ||
7753 | #define ICP_DDIA_HPD_STATUS_MASK (3 << 0) | 7766 | #define ICP_DDIA_HPD_STATUS_MASK (3 << 0) |
7754 | #define ICP_DDIA_HPD_NO_DETECT (0 << 0) | 7767 | #define ICP_DDIA_HPD_NO_DETECT (0 << 0) |
7755 | #define ICP_DDIA_HPD_SHORT_DETECT (1 << 0) | 7768 | #define ICP_DDIA_HPD_SHORT_DETECT (1 << 0) |
@@ -9197,6 +9210,7 @@ enum skl_power_gate { | |||
9197 | #define _DP_TP_CTL_B 0x64140 | 9210 | #define _DP_TP_CTL_B 0x64140 |
9198 | #define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B) | 9211 | #define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B) |
9199 | #define DP_TP_CTL_ENABLE (1 << 31) | 9212 | #define DP_TP_CTL_ENABLE (1 << 31) |
9213 | #define DP_TP_CTL_FEC_ENABLE (1 << 30) | ||
9200 | #define DP_TP_CTL_MODE_SST (0 << 27) | 9214 | #define DP_TP_CTL_MODE_SST (0 << 27) |
9201 | #define DP_TP_CTL_MODE_MST (1 << 27) | 9215 | #define DP_TP_CTL_MODE_MST (1 << 27) |
9202 | #define DP_TP_CTL_FORCE_ACT (1 << 25) | 9216 | #define DP_TP_CTL_FORCE_ACT (1 << 25) |
@@ -9215,6 +9229,7 @@ enum skl_power_gate { | |||
9215 | #define _DP_TP_STATUS_A 0x64044 | 9229 | #define _DP_TP_STATUS_A 0x64044 |
9216 | #define _DP_TP_STATUS_B 0x64144 | 9230 | #define _DP_TP_STATUS_B 0x64144 |
9217 | #define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B) | 9231 | #define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B) |
9232 | #define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28) | ||
9218 | #define DP_TP_STATUS_IDLE_DONE (1 << 25) | 9233 | #define DP_TP_STATUS_IDLE_DONE (1 << 25) |
9219 | #define DP_TP_STATUS_ACT_SENT (1 << 24) | 9234 | #define DP_TP_STATUS_ACT_SENT (1 << 24) |
9220 | #define DP_TP_STATUS_MODE_STATUS_MST (1 << 23) | 9235 | #define DP_TP_STATUS_MODE_STATUS_MST (1 << 23) |
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 71107540581d..ca95ab2f4cfa 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c | |||
@@ -136,6 +136,9 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) | |||
136 | intel_engine_get_seqno(engine), | 136 | intel_engine_get_seqno(engine), |
137 | seqno); | 137 | seqno); |
138 | 138 | ||
139 | if (seqno == engine->timeline.seqno) | ||
140 | continue; | ||
141 | |||
139 | kthread_park(engine->breadcrumbs.signaler); | 142 | kthread_park(engine->breadcrumbs.signaler); |
140 | 143 | ||
141 | if (!i915_seqno_passed(seqno, engine->timeline.seqno)) { | 144 | if (!i915_seqno_passed(seqno, engine->timeline.seqno)) { |
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 6dbeed079ae5..fc2eeab823b7 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c | |||
@@ -1,10 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * (C) Copyright 2016 Intel Corporation | 2 | * SPDX-License-Identifier: MIT |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or | 4 | * (C) Copyright 2016 Intel Corporation |
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; version 2 | ||
7 | * of the License. | ||
8 | */ | 5 | */ |
9 | 6 | ||
10 | #include <linux/slab.h> | 7 | #include <linux/slab.h> |
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index fe2ef4dadfc6..0e055ea0179f 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h | |||
@@ -1,10 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * SPDX-License-Identifier: MIT | ||
3 | * | ||
2 | * i915_sw_fence.h - library routines for N:M synchronisation points | 4 | * i915_sw_fence.h - library routines for N:M synchronisation points |
3 | * | 5 | * |
4 | * Copyright (C) 2016 Intel Corporation | 6 | * Copyright (C) 2016 Intel Corporation |
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | * | ||
8 | */ | 7 | */ |
9 | 8 | ||
10 | #ifndef _I915_SW_FENCE_H_ | 9 | #ifndef _I915_SW_FENCE_H_ |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index e5e6f6bb2b05..535caebd9813 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
@@ -483,7 +483,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr | |||
483 | return snprintf(buf, PAGE_SIZE, "%d\n", val); | 483 | return snprintf(buf, PAGE_SIZE, "%d\n", val); |
484 | } | 484 | } |
485 | 485 | ||
486 | static const struct attribute *gen6_attrs[] = { | 486 | static const struct attribute * const gen6_attrs[] = { |
487 | &dev_attr_gt_act_freq_mhz.attr, | 487 | &dev_attr_gt_act_freq_mhz.attr, |
488 | &dev_attr_gt_cur_freq_mhz.attr, | 488 | &dev_attr_gt_cur_freq_mhz.attr, |
489 | &dev_attr_gt_boost_freq_mhz.attr, | 489 | &dev_attr_gt_boost_freq_mhz.attr, |
@@ -495,7 +495,7 @@ static const struct attribute *gen6_attrs[] = { | |||
495 | NULL, | 495 | NULL, |
496 | }; | 496 | }; |
497 | 497 | ||
498 | static const struct attribute *vlv_attrs[] = { | 498 | static const struct attribute * const vlv_attrs[] = { |
499 | &dev_attr_gt_act_freq_mhz.attr, | 499 | &dev_attr_gt_act_freq_mhz.attr, |
500 | &dev_attr_gt_cur_freq_mhz.attr, | 500 | &dev_attr_gt_cur_freq_mhz.attr, |
501 | &dev_attr_gt_boost_freq_mhz.attr, | 501 | &dev_attr_gt_boost_freq_mhz.attr, |
@@ -516,26 +516,21 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, | |||
516 | { | 516 | { |
517 | 517 | ||
518 | struct device *kdev = kobj_to_dev(kobj); | 518 | struct device *kdev = kobj_to_dev(kobj); |
519 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); | 519 | struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); |
520 | struct drm_i915_error_state_buf error_str; | ||
521 | struct i915_gpu_state *gpu; | 520 | struct i915_gpu_state *gpu; |
522 | ssize_t ret; | 521 | ssize_t ret; |
523 | 522 | ||
524 | ret = i915_error_state_buf_init(&error_str, dev_priv, count, off); | 523 | gpu = i915_first_error_state(i915); |
525 | if (ret) | 524 | if (gpu) { |
526 | return ret; | 525 | ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count); |
527 | 526 | i915_gpu_state_put(gpu); | |
528 | gpu = i915_first_error_state(dev_priv); | 527 | } else { |
529 | ret = i915_error_state_to_str(&error_str, gpu); | 528 | const char *str = "No error state collected\n"; |
530 | if (ret) | 529 | size_t len = strlen(str); |
531 | goto out; | ||
532 | |||
533 | ret = count < error_str.bytes ? count : error_str.bytes; | ||
534 | memcpy(buf, error_str.buf, ret); | ||
535 | 530 | ||
536 | out: | 531 | ret = min_t(size_t, count, len - off); |
537 | i915_gpu_state_put(gpu); | 532 | memcpy(buf, str + off, ret); |
538 | i915_error_state_buf_release(&error_str); | 533 | } |
539 | 534 | ||
540 | return ret; | 535 | return ret; |
541 | } | 536 | } |
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 5858a43e19da..9726df37c4c4 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h | |||
@@ -44,16 +44,19 @@ | |||
44 | __stringify(x), (long)(x)) | 44 | __stringify(x), (long)(x)) |
45 | 45 | ||
46 | #if defined(GCC_VERSION) && GCC_VERSION >= 70000 | 46 | #if defined(GCC_VERSION) && GCC_VERSION >= 70000 |
47 | #define add_overflows(A, B) \ | 47 | #define add_overflows_t(T, A, B) \ |
48 | __builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0) | 48 | __builtin_add_overflow_p((A), (B), (T)0) |
49 | #else | 49 | #else |
50 | #define add_overflows(A, B) ({ \ | 50 | #define add_overflows_t(T, A, B) ({ \ |
51 | typeof(A) a = (A); \ | 51 | typeof(A) a = (A); \ |
52 | typeof(B) b = (B); \ | 52 | typeof(B) b = (B); \ |
53 | a + b < a; \ | 53 | (T)(a + b) < a; \ |
54 | }) | 54 | }) |
55 | #endif | 55 | #endif |
56 | 56 | ||
57 | #define add_overflows(A, B) \ | ||
58 | add_overflows_t(typeof((A) + (B)), (A), (B)) | ||
59 | |||
57 | #define range_overflows(start, size, max) ({ \ | 60 | #define range_overflows(start, size, max) ({ \ |
58 | typeof(start) start__ = (start); \ | 61 | typeof(start) start__ = (start); \ |
59 | typeof(size) size__ = (size); \ | 62 | typeof(size) size__ = (size); \ |
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c index 01f422df8c23..4dd793b78996 100644 --- a/drivers/gpu/drm/i915/icl_dsi.c +++ b/drivers/gpu/drm/i915/icl_dsi.c | |||
@@ -26,6 +26,7 @@ | |||
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <drm/drm_mipi_dsi.h> | 28 | #include <drm/drm_mipi_dsi.h> |
29 | #include <drm/drm_atomic_helper.h> | ||
29 | #include "intel_dsi.h" | 30 | #include "intel_dsi.h" |
30 | 31 | ||
31 | static inline int header_credits_available(struct drm_i915_private *dev_priv, | 32 | static inline int header_credits_available(struct drm_i915_private *dev_priv, |
@@ -107,6 +108,90 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) | |||
107 | } | 108 | } |
108 | } | 109 | } |
109 | 110 | ||
111 | static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data, | ||
112 | u32 len) | ||
113 | { | ||
114 | struct intel_dsi *intel_dsi = host->intel_dsi; | ||
115 | struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); | ||
116 | enum transcoder dsi_trans = dsi_port_to_transcoder(host->port); | ||
117 | int free_credits; | ||
118 | int i, j; | ||
119 | |||
120 | for (i = 0; i < len; i += 4) { | ||
121 | u32 tmp = 0; | ||
122 | |||
123 | free_credits = payload_credits_available(dev_priv, dsi_trans); | ||
124 | if (free_credits < 1) { | ||
125 | DRM_ERROR("Payload credit not available\n"); | ||
126 | return false; | ||
127 | } | ||
128 | |||
129 | for (j = 0; j < min_t(u32, len - i, 4); j++) | ||
130 | tmp |= *data++ << 8 * j; | ||
131 | |||
132 | I915_WRITE(DSI_CMD_TXPYLD(dsi_trans), tmp); | ||
133 | } | ||
134 | |||
135 | return true; | ||
136 | } | ||
137 | |||
138 | static int dsi_send_pkt_hdr(struct intel_dsi_host *host, | ||
139 | struct mipi_dsi_packet pkt, bool enable_lpdt) | ||
140 | { | ||
141 | struct intel_dsi *intel_dsi = host->intel_dsi; | ||
142 | struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); | ||
143 | enum transcoder dsi_trans = dsi_port_to_transcoder(host->port); | ||
144 | u32 tmp; | ||
145 | int free_credits; | ||
146 | |||
147 | /* check if header credit available */ | ||
148 | free_credits = header_credits_available(dev_priv, dsi_trans); | ||
149 | if (free_credits < 1) { | ||
150 | DRM_ERROR("send pkt header failed, not enough hdr credits\n"); | ||
151 | return -1; | ||
152 | } | ||
153 | |||
154 | tmp = I915_READ(DSI_CMD_TXHDR(dsi_trans)); | ||
155 | |||
156 | if (pkt.payload) | ||
157 | tmp |= PAYLOAD_PRESENT; | ||
158 | else | ||
159 | tmp &= ~PAYLOAD_PRESENT; | ||
160 | |||
161 | tmp &= ~VBLANK_FENCE; | ||
162 | |||
163 | if (enable_lpdt) | ||
164 | tmp |= LP_DATA_TRANSFER; | ||
165 | |||
166 | tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK); | ||
167 | tmp |= ((pkt.header[0] & VC_MASK) << VC_SHIFT); | ||
168 | tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT); | ||
169 | tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT); | ||
170 | tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT); | ||
171 | I915_WRITE(DSI_CMD_TXHDR(dsi_trans), tmp); | ||
172 | |||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static int dsi_send_pkt_payld(struct intel_dsi_host *host, | ||
177 | struct mipi_dsi_packet pkt) | ||
178 | { | ||
179 | /* payload queue can accept *256 bytes*, check limit */ | ||
180 | if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) { | ||
181 | DRM_ERROR("payload size exceeds max queue limit\n"); | ||
182 | return -1; | ||
183 | } | ||
184 | |||
185 | /* load data into command payload queue */ | ||
186 | if (!add_payld_to_queue(host, pkt.payload, | ||
187 | pkt.payload_length)) { | ||
188 | DRM_ERROR("adding payload to queue failed\n"); | ||
189 | return -1; | ||
190 | } | ||
191 | |||
192 | return 0; | ||
193 | } | ||
194 | |||
110 | static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) | 195 | static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) |
111 | { | 196 | { |
112 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 197 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
@@ -172,6 +257,45 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) | |||
172 | } | 257 | } |
173 | } | 258 | } |
174 | 259 | ||
260 | static void configure_dual_link_mode(struct intel_encoder *encoder, | ||
261 | const struct intel_crtc_state *pipe_config) | ||
262 | { | ||
263 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
264 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
265 | u32 dss_ctl1; | ||
266 | |||
267 | dss_ctl1 = I915_READ(DSS_CTL1); | ||
268 | dss_ctl1 |= SPLITTER_ENABLE; | ||
269 | dss_ctl1 &= ~OVERLAP_PIXELS_MASK; | ||
270 | dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap); | ||
271 | |||
272 | if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { | ||
273 | const struct drm_display_mode *adjusted_mode = | ||
274 | &pipe_config->base.adjusted_mode; | ||
275 | u32 dss_ctl2; | ||
276 | u16 hactive = adjusted_mode->crtc_hdisplay; | ||
277 | u16 dl_buffer_depth; | ||
278 | |||
279 | dss_ctl1 &= ~DUAL_LINK_MODE_INTERLEAVE; | ||
280 | dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap; | ||
281 | |||
282 | if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH) | ||
283 | DRM_ERROR("DL buffer depth exceed max value\n"); | ||
284 | |||
285 | dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK; | ||
286 | dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth); | ||
287 | dss_ctl2 = I915_READ(DSS_CTL2); | ||
288 | dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK; | ||
289 | dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth); | ||
290 | I915_WRITE(DSS_CTL2, dss_ctl2); | ||
291 | } else { | ||
292 | /* Interleave */ | ||
293 | dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE; | ||
294 | } | ||
295 | |||
296 | I915_WRITE(DSS_CTL1, dss_ctl1); | ||
297 | } | ||
298 | |||
175 | static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder) | 299 | static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder) |
176 | { | 300 | { |
177 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 301 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
@@ -412,6 +536,62 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder) | |||
412 | } | 536 | } |
413 | } | 537 | } |
414 | 538 | ||
539 | static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) | ||
540 | { | ||
541 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
542 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
543 | u32 tmp; | ||
544 | enum port port; | ||
545 | |||
546 | mutex_lock(&dev_priv->dpll_lock); | ||
547 | tmp = I915_READ(DPCLKA_CFGCR0_ICL); | ||
548 | for_each_dsi_port(port, intel_dsi->ports) { | ||
549 | tmp |= DPCLKA_CFGCR0_DDI_CLK_OFF(port); | ||
550 | } | ||
551 | |||
552 | I915_WRITE(DPCLKA_CFGCR0_ICL, tmp); | ||
553 | mutex_unlock(&dev_priv->dpll_lock); | ||
554 | } | ||
555 | |||
556 | static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) | ||
557 | { | ||
558 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
559 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
560 | u32 tmp; | ||
561 | enum port port; | ||
562 | |||
563 | mutex_lock(&dev_priv->dpll_lock); | ||
564 | tmp = I915_READ(DPCLKA_CFGCR0_ICL); | ||
565 | for_each_dsi_port(port, intel_dsi->ports) { | ||
566 | tmp &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); | ||
567 | } | ||
568 | |||
569 | I915_WRITE(DPCLKA_CFGCR0_ICL, tmp); | ||
570 | mutex_unlock(&dev_priv->dpll_lock); | ||
571 | } | ||
572 | |||
573 | static void gen11_dsi_map_pll(struct intel_encoder *encoder, | ||
574 | const struct intel_crtc_state *crtc_state) | ||
575 | { | ||
576 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
577 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
578 | struct intel_shared_dpll *pll = crtc_state->shared_dpll; | ||
579 | enum port port; | ||
580 | u32 val; | ||
581 | |||
582 | mutex_lock(&dev_priv->dpll_lock); | ||
583 | |||
584 | val = I915_READ(DPCLKA_CFGCR0_ICL); | ||
585 | for_each_dsi_port(port, intel_dsi->ports) { | ||
586 | val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); | ||
587 | val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); | ||
588 | } | ||
589 | I915_WRITE(DPCLKA_CFGCR0_ICL, val); | ||
590 | POSTING_READ(DPCLKA_CFGCR0_ICL); | ||
591 | |||
592 | mutex_unlock(&dev_priv->dpll_lock); | ||
593 | } | ||
594 | |||
415 | static void | 595 | static void |
416 | gen11_dsi_configure_transcoder(struct intel_encoder *encoder, | 596 | gen11_dsi_configure_transcoder(struct intel_encoder *encoder, |
417 | const struct intel_crtc_state *pipe_config) | 597 | const struct intel_crtc_state *pipe_config) |
@@ -506,7 +686,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, | |||
506 | I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp); | 686 | I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp); |
507 | } | 687 | } |
508 | 688 | ||
509 | //TODO: configure DSS_CTL1 | 689 | /* configure stream splitting */ |
690 | configure_dual_link_mode(encoder, pipe_config); | ||
510 | } | 691 | } |
511 | 692 | ||
512 | for_each_dsi_port(port, intel_dsi->ports) { | 693 | for_each_dsi_port(port, intel_dsi->ports) { |
@@ -758,6 +939,9 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, | |||
758 | 939 | ||
759 | /* Step (4h, 4i, 4j, 4k): Configure transcoder */ | 940 | /* Step (4h, 4i, 4j, 4k): Configure transcoder */ |
760 | gen11_dsi_configure_transcoder(encoder, pipe_config); | 941 | gen11_dsi_configure_transcoder(encoder, pipe_config); |
942 | |||
943 | /* Step 4l: Gate DDI clocks */ | ||
944 | gen11_dsi_gate_clocks(encoder); | ||
761 | } | 945 | } |
762 | 946 | ||
763 | static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) | 947 | static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) |
@@ -799,18 +983,25 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) | |||
799 | wait_for_cmds_dispatched_to_panel(encoder); | 983 | wait_for_cmds_dispatched_to_panel(encoder); |
800 | } | 984 | } |
801 | 985 | ||
802 | static void __attribute__((unused)) | 986 | static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder, |
803 | gen11_dsi_pre_enable(struct intel_encoder *encoder, | 987 | const struct intel_crtc_state *pipe_config, |
804 | const struct intel_crtc_state *pipe_config, | 988 | const struct drm_connector_state *conn_state) |
805 | const struct drm_connector_state *conn_state) | ||
806 | { | 989 | { |
807 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
808 | |||
809 | /* step2: enable IO power */ | 990 | /* step2: enable IO power */ |
810 | gen11_dsi_enable_io_power(encoder); | 991 | gen11_dsi_enable_io_power(encoder); |
811 | 992 | ||
812 | /* step3: enable DSI PLL */ | 993 | /* step3: enable DSI PLL */ |
813 | gen11_dsi_program_esc_clk_div(encoder); | 994 | gen11_dsi_program_esc_clk_div(encoder); |
995 | } | ||
996 | |||
997 | static void gen11_dsi_pre_enable(struct intel_encoder *encoder, | ||
998 | const struct intel_crtc_state *pipe_config, | ||
999 | const struct drm_connector_state *conn_state) | ||
1000 | { | ||
1001 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
1002 | |||
1003 | /* step3b */ | ||
1004 | gen11_dsi_map_pll(encoder, pipe_config); | ||
814 | 1005 | ||
815 | /* step4: enable DSI port and DPHY */ | 1006 | /* step4: enable DSI port and DPHY */ |
816 | gen11_dsi_enable_port_and_phy(encoder, pipe_config); | 1007 | gen11_dsi_enable_port_and_phy(encoder, pipe_config); |
@@ -912,6 +1103,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder) | |||
912 | u32 tmp; | 1103 | u32 tmp; |
913 | enum port port; | 1104 | enum port port; |
914 | 1105 | ||
1106 | gen11_dsi_ungate_clocks(encoder); | ||
915 | for_each_dsi_port(port, intel_dsi->ports) { | 1107 | for_each_dsi_port(port, intel_dsi->ports) { |
916 | tmp = I915_READ(DDI_BUF_CTL(port)); | 1108 | tmp = I915_READ(DDI_BUF_CTL(port)); |
917 | tmp &= ~DDI_BUF_CTL_ENABLE; | 1109 | tmp &= ~DDI_BUF_CTL_ENABLE; |
@@ -923,6 +1115,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder) | |||
923 | DRM_ERROR("DDI port:%c buffer not idle\n", | 1115 | DRM_ERROR("DDI port:%c buffer not idle\n", |
924 | port_name(port)); | 1116 | port_name(port)); |
925 | } | 1117 | } |
1118 | gen11_dsi_ungate_clocks(encoder); | ||
926 | } | 1119 | } |
927 | 1120 | ||
928 | static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) | 1121 | static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) |
@@ -945,10 +1138,9 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) | |||
945 | } | 1138 | } |
946 | } | 1139 | } |
947 | 1140 | ||
948 | static void __attribute__((unused)) gen11_dsi_disable( | 1141 | static void gen11_dsi_disable(struct intel_encoder *encoder, |
949 | struct intel_encoder *encoder, | 1142 | const struct intel_crtc_state *old_crtc_state, |
950 | const struct intel_crtc_state *old_crtc_state, | 1143 | const struct drm_connector_state *old_conn_state) |
951 | const struct drm_connector_state *old_conn_state) | ||
952 | { | 1144 | { |
953 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 1145 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
954 | 1146 | ||
@@ -972,10 +1164,289 @@ static void __attribute__((unused)) gen11_dsi_disable( | |||
972 | gen11_dsi_disable_io_power(encoder); | 1164 | gen11_dsi_disable_io_power(encoder); |
973 | } | 1165 | } |
974 | 1166 | ||
1167 | static void gen11_dsi_get_config(struct intel_encoder *encoder, | ||
1168 | struct intel_crtc_state *pipe_config) | ||
1169 | { | ||
1170 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
1171 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
1172 | u32 pll_id; | ||
1173 | |||
1174 | /* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */ | ||
1175 | pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll); | ||
1176 | pipe_config->port_clock = cnl_calc_wrpll_link(dev_priv, pll_id); | ||
1177 | pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk; | ||
1178 | pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); | ||
1179 | } | ||
1180 | |||
1181 | static bool gen11_dsi_compute_config(struct intel_encoder *encoder, | ||
1182 | struct intel_crtc_state *pipe_config, | ||
1183 | struct drm_connector_state *conn_state) | ||
1184 | { | ||
1185 | struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, | ||
1186 | base); | ||
1187 | struct intel_connector *intel_connector = intel_dsi->attached_connector; | ||
1188 | struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); | ||
1189 | const struct drm_display_mode *fixed_mode = | ||
1190 | intel_connector->panel.fixed_mode; | ||
1191 | struct drm_display_mode *adjusted_mode = | ||
1192 | &pipe_config->base.adjusted_mode; | ||
1193 | |||
1194 | intel_fixed_panel_mode(fixed_mode, adjusted_mode); | ||
1195 | intel_pch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode); | ||
1196 | |||
1197 | adjusted_mode->flags = 0; | ||
1198 | |||
1199 | /* Dual link goes to trancoder DSI'0' */ | ||
1200 | if (intel_dsi->ports == BIT(PORT_B)) | ||
1201 | pipe_config->cpu_transcoder = TRANSCODER_DSI_1; | ||
1202 | else | ||
1203 | pipe_config->cpu_transcoder = TRANSCODER_DSI_0; | ||
1204 | |||
1205 | pipe_config->clock_set = true; | ||
1206 | pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5; | ||
1207 | |||
1208 | return true; | ||
1209 | } | ||
1210 | |||
1211 | static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder, | ||
1212 | struct intel_crtc_state *crtc_state) | ||
1213 | { | ||
1214 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
1215 | u64 domains = 0; | ||
1216 | enum port port; | ||
1217 | |||
1218 | for_each_dsi_port(port, intel_dsi->ports) | ||
1219 | if (port == PORT_A) | ||
1220 | domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO); | ||
1221 | else | ||
1222 | domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO); | ||
1223 | |||
1224 | return domains; | ||
1225 | } | ||
1226 | |||
1227 | static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, | ||
1228 | enum pipe *pipe) | ||
1229 | { | ||
1230 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
1231 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
1232 | u32 tmp; | ||
1233 | enum port port; | ||
1234 | enum transcoder dsi_trans; | ||
1235 | bool ret = false; | ||
1236 | |||
1237 | if (!intel_display_power_get_if_enabled(dev_priv, | ||
1238 | encoder->power_domain)) | ||
1239 | return false; | ||
1240 | |||
1241 | for_each_dsi_port(port, intel_dsi->ports) { | ||
1242 | dsi_trans = dsi_port_to_transcoder(port); | ||
1243 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans)); | ||
1244 | switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { | ||
1245 | case TRANS_DDI_EDP_INPUT_A_ON: | ||
1246 | *pipe = PIPE_A; | ||
1247 | break; | ||
1248 | case TRANS_DDI_EDP_INPUT_B_ONOFF: | ||
1249 | *pipe = PIPE_B; | ||
1250 | break; | ||
1251 | case TRANS_DDI_EDP_INPUT_C_ONOFF: | ||
1252 | *pipe = PIPE_C; | ||
1253 | break; | ||
1254 | default: | ||
1255 | DRM_ERROR("Invalid PIPE input\n"); | ||
1256 | goto out; | ||
1257 | } | ||
1258 | |||
1259 | tmp = I915_READ(PIPECONF(dsi_trans)); | ||
1260 | ret = tmp & PIPECONF_ENABLE; | ||
1261 | } | ||
1262 | out: | ||
1263 | intel_display_power_put(dev_priv, encoder->power_domain); | ||
1264 | return ret; | ||
1265 | } | ||
1266 | |||
1267 | static void gen11_dsi_encoder_destroy(struct drm_encoder *encoder) | ||
1268 | { | ||
1269 | intel_encoder_destroy(encoder); | ||
1270 | } | ||
1271 | |||
1272 | static const struct drm_encoder_funcs gen11_dsi_encoder_funcs = { | ||
1273 | .destroy = gen11_dsi_encoder_destroy, | ||
1274 | }; | ||
1275 | |||
1276 | static const struct drm_connector_funcs gen11_dsi_connector_funcs = { | ||
1277 | .late_register = intel_connector_register, | ||
1278 | .early_unregister = intel_connector_unregister, | ||
1279 | .destroy = intel_connector_destroy, | ||
1280 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
1281 | .atomic_get_property = intel_digital_connector_atomic_get_property, | ||
1282 | .atomic_set_property = intel_digital_connector_atomic_set_property, | ||
1283 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | ||
1284 | .atomic_duplicate_state = intel_digital_connector_duplicate_state, | ||
1285 | }; | ||
1286 | |||
1287 | static const struct drm_connector_helper_funcs gen11_dsi_connector_helper_funcs = { | ||
1288 | .get_modes = intel_dsi_get_modes, | ||
1289 | .mode_valid = intel_dsi_mode_valid, | ||
1290 | .atomic_check = intel_digital_connector_atomic_check, | ||
1291 | }; | ||
1292 | |||
1293 | static int gen11_dsi_host_attach(struct mipi_dsi_host *host, | ||
1294 | struct mipi_dsi_device *dsi) | ||
1295 | { | ||
1296 | return 0; | ||
1297 | } | ||
1298 | |||
1299 | static int gen11_dsi_host_detach(struct mipi_dsi_host *host, | ||
1300 | struct mipi_dsi_device *dsi) | ||
1301 | { | ||
1302 | return 0; | ||
1303 | } | ||
1304 | |||
1305 | static ssize_t gen11_dsi_host_transfer(struct mipi_dsi_host *host, | ||
1306 | const struct mipi_dsi_msg *msg) | ||
1307 | { | ||
1308 | struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host); | ||
1309 | struct mipi_dsi_packet dsi_pkt; | ||
1310 | ssize_t ret; | ||
1311 | bool enable_lpdt = false; | ||
1312 | |||
1313 | ret = mipi_dsi_create_packet(&dsi_pkt, msg); | ||
1314 | if (ret < 0) | ||
1315 | return ret; | ||
1316 | |||
1317 | if (msg->flags & MIPI_DSI_MSG_USE_LPM) | ||
1318 | enable_lpdt = true; | ||
1319 | |||
1320 | /* send packet header */ | ||
1321 | ret = dsi_send_pkt_hdr(intel_dsi_host, dsi_pkt, enable_lpdt); | ||
1322 | if (ret < 0) | ||
1323 | return ret; | ||
1324 | |||
1325 | /* only long packet contains payload */ | ||
1326 | if (mipi_dsi_packet_format_is_long(msg->type)) { | ||
1327 | ret = dsi_send_pkt_payld(intel_dsi_host, dsi_pkt); | ||
1328 | if (ret < 0) | ||
1329 | return ret; | ||
1330 | } | ||
1331 | |||
1332 | //TODO: add payload receive code if needed | ||
1333 | |||
1334 | ret = sizeof(dsi_pkt.header) + dsi_pkt.payload_length; | ||
1335 | |||
1336 | return ret; | ||
1337 | } | ||
1338 | |||
1339 | static const struct mipi_dsi_host_ops gen11_dsi_host_ops = { | ||
1340 | .attach = gen11_dsi_host_attach, | ||
1341 | .detach = gen11_dsi_host_detach, | ||
1342 | .transfer = gen11_dsi_host_transfer, | ||
1343 | }; | ||
1344 | |||
975 | void icl_dsi_init(struct drm_i915_private *dev_priv) | 1345 | void icl_dsi_init(struct drm_i915_private *dev_priv) |
976 | { | 1346 | { |
1347 | struct drm_device *dev = &dev_priv->drm; | ||
1348 | struct intel_dsi *intel_dsi; | ||
1349 | struct intel_encoder *encoder; | ||
1350 | struct intel_connector *intel_connector; | ||
1351 | struct drm_connector *connector; | ||
1352 | struct drm_display_mode *scan, *fixed_mode = NULL; | ||
977 | enum port port; | 1353 | enum port port; |
978 | 1354 | ||
979 | if (!intel_bios_is_dsi_present(dev_priv, &port)) | 1355 | if (!intel_bios_is_dsi_present(dev_priv, &port)) |
980 | return; | 1356 | return; |
1357 | |||
1358 | intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); | ||
1359 | if (!intel_dsi) | ||
1360 | return; | ||
1361 | |||
1362 | intel_connector = intel_connector_alloc(); | ||
1363 | if (!intel_connector) { | ||
1364 | kfree(intel_dsi); | ||
1365 | return; | ||
1366 | } | ||
1367 | |||
1368 | encoder = &intel_dsi->base; | ||
1369 | intel_dsi->attached_connector = intel_connector; | ||
1370 | connector = &intel_connector->base; | ||
1371 | |||
1372 | /* register DSI encoder with DRM subsystem */ | ||
1373 | drm_encoder_init(dev, &encoder->base, &gen11_dsi_encoder_funcs, | ||
1374 | DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port)); | ||
1375 | |||
1376 | encoder->pre_pll_enable = gen11_dsi_pre_pll_enable; | ||
1377 | encoder->pre_enable = gen11_dsi_pre_enable; | ||
1378 | encoder->disable = gen11_dsi_disable; | ||
1379 | encoder->port = port; | ||
1380 | encoder->get_config = gen11_dsi_get_config; | ||
1381 | encoder->compute_config = gen11_dsi_compute_config; | ||
1382 | encoder->get_hw_state = gen11_dsi_get_hw_state; | ||
1383 | encoder->type = INTEL_OUTPUT_DSI; | ||
1384 | encoder->cloneable = 0; | ||
1385 | encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C); | ||
1386 | encoder->power_domain = POWER_DOMAIN_PORT_DSI; | ||
1387 | encoder->get_power_domains = gen11_dsi_get_power_domains; | ||
1388 | |||
1389 | /* register DSI connector with DRM subsystem */ | ||
1390 | drm_connector_init(dev, connector, &gen11_dsi_connector_funcs, | ||
1391 | DRM_MODE_CONNECTOR_DSI); | ||
1392 | drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs); | ||
1393 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | ||
1394 | connector->interlace_allowed = false; | ||
1395 | connector->doublescan_allowed = false; | ||
1396 | intel_connector->get_hw_state = intel_connector_get_hw_state; | ||
1397 | |||
1398 | /* attach connector to encoder */ | ||
1399 | intel_connector_attach_encoder(intel_connector, encoder); | ||
1400 | |||
1401 | /* fill mode info from VBT */ | ||
1402 | mutex_lock(&dev->mode_config.mutex); | ||
1403 | intel_dsi_vbt_get_modes(intel_dsi); | ||
1404 | list_for_each_entry(scan, &connector->probed_modes, head) { | ||
1405 | if (scan->type & DRM_MODE_TYPE_PREFERRED) { | ||
1406 | fixed_mode = drm_mode_duplicate(dev, scan); | ||
1407 | break; | ||
1408 | } | ||
1409 | } | ||
1410 | mutex_unlock(&dev->mode_config.mutex); | ||
1411 | |||
1412 | if (!fixed_mode) { | ||
1413 | DRM_ERROR("DSI fixed mode info missing\n"); | ||
1414 | goto err; | ||
1415 | } | ||
1416 | |||
1417 | connector->display_info.width_mm = fixed_mode->width_mm; | ||
1418 | connector->display_info.height_mm = fixed_mode->height_mm; | ||
1419 | intel_panel_init(&intel_connector->panel, fixed_mode, NULL); | ||
1420 | intel_panel_setup_backlight(connector, INVALID_PIPE); | ||
1421 | |||
1422 | |||
1423 | if (dev_priv->vbt.dsi.config->dual_link) | ||
1424 | intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B); | ||
1425 | else | ||
1426 | intel_dsi->ports = BIT(port); | ||
1427 | |||
1428 | intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports; | ||
1429 | intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports; | ||
1430 | |||
1431 | for_each_dsi_port(port, intel_dsi->ports) { | ||
1432 | struct intel_dsi_host *host; | ||
1433 | |||
1434 | host = intel_dsi_host_init(intel_dsi, &gen11_dsi_host_ops, port); | ||
1435 | if (!host) | ||
1436 | goto err; | ||
1437 | |||
1438 | intel_dsi->dsi_hosts[port] = host; | ||
1439 | } | ||
1440 | |||
1441 | if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) { | ||
1442 | DRM_DEBUG_KMS("no device found\n"); | ||
1443 | goto err; | ||
1444 | } | ||
1445 | |||
1446 | return; | ||
1447 | |||
1448 | err: | ||
1449 | drm_encoder_cleanup(&encoder->base); | ||
1450 | kfree(intel_dsi); | ||
1451 | kfree(intel_connector); | ||
981 | } | 1452 | } |
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index a5a2c8fe58a7..8cb02f28d30c 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c | |||
@@ -184,6 +184,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) | |||
184 | crtc_state->fifo_changed = false; | 184 | crtc_state->fifo_changed = false; |
185 | crtc_state->wm.need_postvbl_update = false; | 185 | crtc_state->wm.need_postvbl_update = false; |
186 | crtc_state->fb_bits = 0; | 186 | crtc_state->fb_bits = 0; |
187 | crtc_state->update_planes = 0; | ||
187 | 188 | ||
188 | return &crtc_state->base; | 189 | return &crtc_state->base; |
189 | } | 190 | } |
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index 905f8ef3ba4f..0a73e6e65c20 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c | |||
@@ -139,6 +139,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ | |||
139 | if (state->visible && state->fb->format->format == DRM_FORMAT_NV12) | 139 | if (state->visible && state->fb->format->format == DRM_FORMAT_NV12) |
140 | crtc_state->nv12_planes |= BIT(intel_plane->id); | 140 | crtc_state->nv12_planes |= BIT(intel_plane->id); |
141 | 141 | ||
142 | if (state->visible || old_plane_state->base.visible) | ||
143 | crtc_state->update_planes |= BIT(intel_plane->id); | ||
144 | |||
142 | return intel_plane_atomic_calc_changes(old_crtc_state, | 145 | return intel_plane_atomic_calc_changes(old_crtc_state, |
143 | &crtc_state->base, | 146 | &crtc_state->base, |
144 | old_plane_state, | 147 | old_plane_state, |
@@ -168,27 +171,75 @@ static int intel_plane_atomic_check(struct drm_plane *plane, | |||
168 | to_intel_plane_state(new_plane_state)); | 171 | to_intel_plane_state(new_plane_state)); |
169 | } | 172 | } |
170 | 173 | ||
171 | void intel_update_planes_on_crtc(struct intel_atomic_state *old_state, | 174 | static struct intel_plane * |
172 | struct intel_crtc *crtc, | 175 | skl_next_plane_to_commit(struct intel_atomic_state *state, |
173 | struct intel_crtc_state *old_crtc_state, | 176 | struct intel_crtc *crtc, |
174 | struct intel_crtc_state *new_crtc_state) | 177 | struct skl_ddb_entry entries_y[I915_MAX_PLANES], |
178 | struct skl_ddb_entry entries_uv[I915_MAX_PLANES], | ||
179 | unsigned int *update_mask) | ||
175 | { | 180 | { |
176 | struct intel_plane_state *new_plane_state; | 181 | struct intel_crtc_state *crtc_state = |
182 | intel_atomic_get_new_crtc_state(state, crtc); | ||
183 | struct intel_plane_state *plane_state; | ||
177 | struct intel_plane *plane; | 184 | struct intel_plane *plane; |
178 | u32 update_mask; | ||
179 | int i; | 185 | int i; |
180 | 186 | ||
181 | update_mask = old_crtc_state->active_planes; | 187 | if (*update_mask == 0) |
182 | update_mask |= new_crtc_state->active_planes; | 188 | return NULL; |
189 | |||
190 | for_each_new_intel_plane_in_state(state, plane, plane_state, i) { | ||
191 | enum plane_id plane_id = plane->id; | ||
183 | 192 | ||
184 | for_each_new_intel_plane_in_state(old_state, plane, new_plane_state, i) { | ||
185 | if (crtc->pipe != plane->pipe || | 193 | if (crtc->pipe != plane->pipe || |
186 | !(update_mask & BIT(plane->id))) | 194 | !(*update_mask & BIT(plane_id))) |
195 | continue; | ||
196 | |||
197 | if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id], | ||
198 | entries_y, | ||
199 | I915_MAX_PLANES, plane_id) || | ||
200 | skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_uv[plane_id], | ||
201 | entries_uv, | ||
202 | I915_MAX_PLANES, plane_id)) | ||
187 | continue; | 203 | continue; |
188 | 204 | ||
205 | *update_mask &= ~BIT(plane_id); | ||
206 | entries_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id]; | ||
207 | entries_uv[plane_id] = crtc_state->wm.skl.plane_ddb_uv[plane_id]; | ||
208 | |||
209 | return plane; | ||
210 | } | ||
211 | |||
212 | /* should never happen */ | ||
213 | WARN_ON(1); | ||
214 | |||
215 | return NULL; | ||
216 | } | ||
217 | |||
218 | void skl_update_planes_on_crtc(struct intel_atomic_state *state, | ||
219 | struct intel_crtc *crtc) | ||
220 | { | ||
221 | struct intel_crtc_state *old_crtc_state = | ||
222 | intel_atomic_get_old_crtc_state(state, crtc); | ||
223 | struct intel_crtc_state *new_crtc_state = | ||
224 | intel_atomic_get_new_crtc_state(state, crtc); | ||
225 | struct skl_ddb_entry entries_y[I915_MAX_PLANES]; | ||
226 | struct skl_ddb_entry entries_uv[I915_MAX_PLANES]; | ||
227 | u32 update_mask = new_crtc_state->update_planes; | ||
228 | struct intel_plane *plane; | ||
229 | |||
230 | memcpy(entries_y, old_crtc_state->wm.skl.plane_ddb_y, | ||
231 | sizeof(old_crtc_state->wm.skl.plane_ddb_y)); | ||
232 | memcpy(entries_uv, old_crtc_state->wm.skl.plane_ddb_uv, | ||
233 | sizeof(old_crtc_state->wm.skl.plane_ddb_uv)); | ||
234 | |||
235 | while ((plane = skl_next_plane_to_commit(state, crtc, | ||
236 | entries_y, entries_uv, | ||
237 | &update_mask))) { | ||
238 | struct intel_plane_state *new_plane_state = | ||
239 | intel_atomic_get_new_plane_state(state, plane); | ||
240 | |||
189 | if (new_plane_state->base.visible) { | 241 | if (new_plane_state->base.visible) { |
190 | trace_intel_update_plane(&plane->base, crtc); | 242 | trace_intel_update_plane(&plane->base, crtc); |
191 | |||
192 | plane->update_plane(plane, new_crtc_state, new_plane_state); | 243 | plane->update_plane(plane, new_crtc_state, new_plane_state); |
193 | } else if (new_plane_state->slave) { | 244 | } else if (new_plane_state->slave) { |
194 | struct intel_plane *master = | 245 | struct intel_plane *master = |
@@ -204,15 +255,38 @@ void intel_update_planes_on_crtc(struct intel_atomic_state *old_state, | |||
204 | * plane_state. | 255 | * plane_state. |
205 | */ | 256 | */ |
206 | new_plane_state = | 257 | new_plane_state = |
207 | intel_atomic_get_new_plane_state(old_state, master); | 258 | intel_atomic_get_new_plane_state(state, master); |
208 | 259 | ||
209 | trace_intel_update_plane(&plane->base, crtc); | 260 | trace_intel_update_plane(&plane->base, crtc); |
210 | |||
211 | plane->update_slave(plane, new_crtc_state, new_plane_state); | 261 | plane->update_slave(plane, new_crtc_state, new_plane_state); |
212 | } else { | 262 | } else { |
213 | trace_intel_disable_plane(&plane->base, crtc); | 263 | trace_intel_disable_plane(&plane->base, crtc); |
264 | plane->disable_plane(plane, new_crtc_state); | ||
265 | } | ||
266 | } | ||
267 | } | ||
268 | |||
269 | void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, | ||
270 | struct intel_crtc *crtc) | ||
271 | { | ||
272 | struct intel_crtc_state *new_crtc_state = | ||
273 | intel_atomic_get_new_crtc_state(state, crtc); | ||
274 | u32 update_mask = new_crtc_state->update_planes; | ||
275 | struct intel_plane_state *new_plane_state; | ||
276 | struct intel_plane *plane; | ||
277 | int i; | ||
214 | 278 | ||
215 | plane->disable_plane(plane, crtc); | 279 | for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) { |
280 | if (crtc->pipe != plane->pipe || | ||
281 | !(update_mask & BIT(plane->id))) | ||
282 | continue; | ||
283 | |||
284 | if (new_plane_state->base.visible) { | ||
285 | trace_intel_update_plane(&plane->base, crtc); | ||
286 | plane->update_plane(plane, new_crtc_state, new_plane_state); | ||
287 | } else { | ||
288 | trace_intel_disable_plane(&plane->base, crtc); | ||
289 | plane->disable_plane(plane, new_crtc_state); | ||
216 | } | 290 | } |
217 | } | 291 | } |
218 | } | 292 | } |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 0694aa8bb9bc..6d3e0260d49c 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -1752,7 +1752,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv) | |||
1752 | const struct bdb_header *bdb; | 1752 | const struct bdb_header *bdb; |
1753 | u8 __iomem *bios = NULL; | 1753 | u8 __iomem *bios = NULL; |
1754 | 1754 | ||
1755 | if (INTEL_INFO(dev_priv)->num_pipes == 0) { | 1755 | if (!HAS_DISPLAY(dev_priv)) { |
1756 | DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n"); | 1756 | DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n"); |
1757 | return; | 1757 | return; |
1758 | } | 1758 | } |
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 84bf8d827136..447c5256f63a 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c | |||
@@ -27,11 +27,7 @@ | |||
27 | 27 | ||
28 | #include "i915_drv.h" | 28 | #include "i915_drv.h" |
29 | 29 | ||
30 | #ifdef CONFIG_SMP | 30 | #define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq) |
31 | #define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_cpu) | ||
32 | #else | ||
33 | #define task_asleep(tsk) ((tsk)->state & TASK_NORMAL) | ||
34 | #endif | ||
35 | 31 | ||
36 | static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b) | 32 | static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b) |
37 | { | 33 | { |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index ad11540ac436..f3e1d6a0b7dd 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <drm/drm_scdc_helper.h> | 28 | #include <drm/drm_scdc_helper.h> |
29 | #include "i915_drv.h" | 29 | #include "i915_drv.h" |
30 | #include "intel_drv.h" | 30 | #include "intel_drv.h" |
31 | #include "intel_dsi.h" | ||
31 | 32 | ||
32 | struct ddi_buf_trans { | 33 | struct ddi_buf_trans { |
33 | u32 trans1; /* balance leg enable, de-emph level */ | 34 | u32 trans1; /* balance leg enable, de-emph level */ |
@@ -1363,8 +1364,8 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv, | |||
1363 | return dco_freq / (p0 * p1 * p2 * 5); | 1364 | return dco_freq / (p0 * p1 * p2 * 5); |
1364 | } | 1365 | } |
1365 | 1366 | ||
1366 | static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, | 1367 | int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, |
1367 | enum intel_dpll_id pll_id) | 1368 | enum intel_dpll_id pll_id) |
1368 | { | 1369 | { |
1369 | uint32_t cfgcr0, cfgcr1; | 1370 | uint32_t cfgcr0, cfgcr1; |
1370 | uint32_t p0, p1, p2, dco_freq, ref_clock; | 1371 | uint32_t p0, p1, p2, dco_freq, ref_clock; |
@@ -2154,6 +2155,12 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, | |||
2154 | intel_port_is_tc(dev_priv, encoder->port)) | 2155 | intel_port_is_tc(dev_priv, encoder->port)) |
2155 | domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port)); | 2156 | domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port)); |
2156 | 2157 | ||
2158 | /* | ||
2159 | * VDSC power is needed when DSC is enabled | ||
2160 | */ | ||
2161 | if (crtc_state->dsc_params.compression_enable) | ||
2162 | domains |= BIT_ULL(intel_dsc_power_domain(crtc_state)); | ||
2163 | |||
2157 | return domains; | 2164 | return domains; |
2158 | } | 2165 | } |
2159 | 2166 | ||
@@ -2785,77 +2792,54 @@ uint32_t icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv, | |||
2785 | return 0; | 2792 | return 0; |
2786 | } | 2793 | } |
2787 | 2794 | ||
2788 | void icl_map_plls_to_ports(struct drm_crtc *crtc, | 2795 | static void icl_map_plls_to_ports(struct intel_encoder *encoder, |
2789 | struct intel_crtc_state *crtc_state, | 2796 | const struct intel_crtc_state *crtc_state) |
2790 | struct drm_atomic_state *old_state) | ||
2791 | { | 2797 | { |
2798 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
2792 | struct intel_shared_dpll *pll = crtc_state->shared_dpll; | 2799 | struct intel_shared_dpll *pll = crtc_state->shared_dpll; |
2793 | struct drm_i915_private *dev_priv = to_i915(crtc->dev); | 2800 | enum port port = encoder->port; |
2794 | struct drm_connector_state *conn_state; | 2801 | u32 val; |
2795 | struct drm_connector *conn; | ||
2796 | int i; | ||
2797 | |||
2798 | for_each_new_connector_in_state(old_state, conn, conn_state, i) { | ||
2799 | struct intel_encoder *encoder = | ||
2800 | to_intel_encoder(conn_state->best_encoder); | ||
2801 | enum port port; | ||
2802 | uint32_t val; | ||
2803 | |||
2804 | if (conn_state->crtc != crtc) | ||
2805 | continue; | ||
2806 | |||
2807 | port = encoder->port; | ||
2808 | mutex_lock(&dev_priv->dpll_lock); | ||
2809 | 2802 | ||
2810 | val = I915_READ(DPCLKA_CFGCR0_ICL); | 2803 | mutex_lock(&dev_priv->dpll_lock); |
2811 | WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0); | ||
2812 | 2804 | ||
2813 | if (intel_port_is_combophy(dev_priv, port)) { | 2805 | val = I915_READ(DPCLKA_CFGCR0_ICL); |
2814 | val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); | 2806 | WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0); |
2815 | val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); | ||
2816 | I915_WRITE(DPCLKA_CFGCR0_ICL, val); | ||
2817 | POSTING_READ(DPCLKA_CFGCR0_ICL); | ||
2818 | } | ||
2819 | 2807 | ||
2820 | val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port); | 2808 | if (intel_port_is_combophy(dev_priv, port)) { |
2809 | val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); | ||
2810 | val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); | ||
2821 | I915_WRITE(DPCLKA_CFGCR0_ICL, val); | 2811 | I915_WRITE(DPCLKA_CFGCR0_ICL, val); |
2822 | 2812 | POSTING_READ(DPCLKA_CFGCR0_ICL); | |
2823 | mutex_unlock(&dev_priv->dpll_lock); | ||
2824 | } | 2813 | } |
2814 | |||
2815 | val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port); | ||
2816 | I915_WRITE(DPCLKA_CFGCR0_ICL, val); | ||
2817 | |||
2818 | mutex_unlock(&dev_priv->dpll_lock); | ||
2825 | } | 2819 | } |
2826 | 2820 | ||
2827 | void icl_unmap_plls_to_ports(struct drm_crtc *crtc, | 2821 | static void icl_unmap_plls_to_ports(struct intel_encoder *encoder) |
2828 | struct intel_crtc_state *crtc_state, | ||
2829 | struct drm_atomic_state *old_state) | ||
2830 | { | 2822 | { |
2831 | struct drm_i915_private *dev_priv = to_i915(crtc->dev); | 2823 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
2832 | struct drm_connector_state *old_conn_state; | 2824 | enum port port = encoder->port; |
2833 | struct drm_connector *conn; | 2825 | u32 val; |
2834 | int i; | ||
2835 | 2826 | ||
2836 | for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { | 2827 | mutex_lock(&dev_priv->dpll_lock); |
2837 | struct intel_encoder *encoder = | ||
2838 | to_intel_encoder(old_conn_state->best_encoder); | ||
2839 | enum port port; | ||
2840 | 2828 | ||
2841 | if (old_conn_state->crtc != crtc) | 2829 | val = I915_READ(DPCLKA_CFGCR0_ICL); |
2842 | continue; | 2830 | val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port); |
2831 | I915_WRITE(DPCLKA_CFGCR0_ICL, val); | ||
2843 | 2832 | ||
2844 | port = encoder->port; | 2833 | mutex_unlock(&dev_priv->dpll_lock); |
2845 | mutex_lock(&dev_priv->dpll_lock); | ||
2846 | I915_WRITE(DPCLKA_CFGCR0_ICL, | ||
2847 | I915_READ(DPCLKA_CFGCR0_ICL) | | ||
2848 | icl_dpclka_cfgcr0_clk_off(dev_priv, port)); | ||
2849 | mutex_unlock(&dev_priv->dpll_lock); | ||
2850 | } | ||
2851 | } | 2834 | } |
2852 | 2835 | ||
2853 | void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) | 2836 | void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) |
2854 | { | 2837 | { |
2855 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 2838 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
2856 | u32 val; | 2839 | u32 val; |
2857 | enum port port = encoder->port; | 2840 | enum port port; |
2858 | bool clk_enabled; | 2841 | u32 port_mask; |
2842 | bool ddi_clk_needed; | ||
2859 | 2843 | ||
2860 | /* | 2844 | /* |
2861 | * In case of DP MST, we sanitize the primary encoder only, not the | 2845 | * In case of DP MST, we sanitize the primary encoder only, not the |
@@ -2864,9 +2848,6 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) | |||
2864 | if (encoder->type == INTEL_OUTPUT_DP_MST) | 2848 | if (encoder->type == INTEL_OUTPUT_DP_MST) |
2865 | return; | 2849 | return; |
2866 | 2850 | ||
2867 | val = I915_READ(DPCLKA_CFGCR0_ICL); | ||
2868 | clk_enabled = !(val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)); | ||
2869 | |||
2870 | if (!encoder->base.crtc && intel_encoder_is_dp(encoder)) { | 2851 | if (!encoder->base.crtc && intel_encoder_is_dp(encoder)) { |
2871 | u8 pipe_mask; | 2852 | u8 pipe_mask; |
2872 | bool is_mst; | 2853 | bool is_mst; |
@@ -2880,20 +2861,52 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) | |||
2880 | return; | 2861 | return; |
2881 | } | 2862 | } |
2882 | 2863 | ||
2883 | if (clk_enabled == !!encoder->base.crtc) | 2864 | port_mask = BIT(encoder->port); |
2884 | return; | 2865 | ddi_clk_needed = encoder->base.crtc; |
2885 | 2866 | ||
2886 | /* | 2867 | if (encoder->type == INTEL_OUTPUT_DSI) { |
2887 | * Punt on the case now where clock is disabled, but the encoder is | 2868 | struct intel_encoder *other_encoder; |
2888 | * enabled, something else is really broken then. | ||
2889 | */ | ||
2890 | if (WARN_ON(!clk_enabled)) | ||
2891 | return; | ||
2892 | 2869 | ||
2893 | DRM_NOTE("Port %c is disabled but it has a mapped PLL, unmap it\n", | 2870 | port_mask = intel_dsi_encoder_ports(encoder); |
2894 | port_name(port)); | 2871 | /* |
2895 | val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port); | 2872 | * Sanity check that we haven't incorrectly registered another |
2896 | I915_WRITE(DPCLKA_CFGCR0_ICL, val); | 2873 | * encoder using any of the ports of this DSI encoder. |
2874 | */ | ||
2875 | for_each_intel_encoder(&dev_priv->drm, other_encoder) { | ||
2876 | if (other_encoder == encoder) | ||
2877 | continue; | ||
2878 | |||
2879 | if (WARN_ON(port_mask & BIT(other_encoder->port))) | ||
2880 | return; | ||
2881 | } | ||
2882 | /* | ||
2883 | * DSI ports should have their DDI clock ungated when disabled | ||
2884 | * and gated when enabled. | ||
2885 | */ | ||
2886 | ddi_clk_needed = !encoder->base.crtc; | ||
2887 | } | ||
2888 | |||
2889 | val = I915_READ(DPCLKA_CFGCR0_ICL); | ||
2890 | for_each_port_masked(port, port_mask) { | ||
2891 | bool ddi_clk_ungated = !(val & | ||
2892 | icl_dpclka_cfgcr0_clk_off(dev_priv, | ||
2893 | port)); | ||
2894 | |||
2895 | if (ddi_clk_needed == ddi_clk_ungated) | ||
2896 | continue; | ||
2897 | |||
2898 | /* | ||
2899 | * Punt on the case now where clock is gated, but it would | ||
2900 | * be needed by the port. Something else is really broken then. | ||
2901 | */ | ||
2902 | if (WARN_ON(ddi_clk_needed)) | ||
2903 | continue; | ||
2904 | |||
2905 | DRM_NOTE("Port %c is disabled/in DSI mode with an ungated DDI clock, gate it\n", | ||
2906 | port_name(port)); | ||
2907 | val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port); | ||
2908 | I915_WRITE(DPCLKA_CFGCR0_ICL, val); | ||
2909 | } | ||
2897 | } | 2910 | } |
2898 | 2911 | ||
2899 | static void intel_ddi_clk_select(struct intel_encoder *encoder, | 2912 | static void intel_ddi_clk_select(struct intel_encoder *encoder, |
@@ -3096,6 +3109,53 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) | |||
3096 | I915_WRITE(MG_DP_MODE(port, 1), ln1); | 3109 | I915_WRITE(MG_DP_MODE(port, 1), ln1); |
3097 | } | 3110 | } |
3098 | 3111 | ||
3112 | static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp, | ||
3113 | const struct intel_crtc_state *crtc_state) | ||
3114 | { | ||
3115 | if (!crtc_state->fec_enable) | ||
3116 | return; | ||
3117 | |||
3118 | if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0) | ||
3119 | DRM_DEBUG_KMS("Failed to set FEC_READY in the sink\n"); | ||
3120 | } | ||
3121 | |||
3122 | static void intel_ddi_enable_fec(struct intel_encoder *encoder, | ||
3123 | const struct intel_crtc_state *crtc_state) | ||
3124 | { | ||
3125 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
3126 | enum port port = encoder->port; | ||
3127 | u32 val; | ||
3128 | |||
3129 | if (!crtc_state->fec_enable) | ||
3130 | return; | ||
3131 | |||
3132 | val = I915_READ(DP_TP_CTL(port)); | ||
3133 | val |= DP_TP_CTL_FEC_ENABLE; | ||
3134 | I915_WRITE(DP_TP_CTL(port), val); | ||
3135 | |||
3136 | if (intel_wait_for_register(dev_priv, DP_TP_STATUS(port), | ||
3137 | DP_TP_STATUS_FEC_ENABLE_LIVE, | ||
3138 | DP_TP_STATUS_FEC_ENABLE_LIVE, | ||
3139 | 1)) | ||
3140 | DRM_ERROR("Timed out waiting for FEC Enable Status\n"); | ||
3141 | } | ||
3142 | |||
3143 | static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, | ||
3144 | const struct intel_crtc_state *crtc_state) | ||
3145 | { | ||
3146 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
3147 | enum port port = encoder->port; | ||
3148 | u32 val; | ||
3149 | |||
3150 | if (!crtc_state->fec_enable) | ||
3151 | return; | ||
3152 | |||
3153 | val = I915_READ(DP_TP_CTL(port)); | ||
3154 | val &= ~DP_TP_CTL_FEC_ENABLE; | ||
3155 | I915_WRITE(DP_TP_CTL(port), val); | ||
3156 | POSTING_READ(DP_TP_CTL(port)); | ||
3157 | } | ||
3158 | |||
3099 | static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, | 3159 | static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, |
3100 | const struct intel_crtc_state *crtc_state, | 3160 | const struct intel_crtc_state *crtc_state, |
3101 | const struct drm_connector_state *conn_state) | 3161 | const struct drm_connector_state *conn_state) |
@@ -3134,14 +3194,21 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, | |||
3134 | intel_ddi_init_dp_buf_reg(encoder); | 3194 | intel_ddi_init_dp_buf_reg(encoder); |
3135 | if (!is_mst) | 3195 | if (!is_mst) |
3136 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); | 3196 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); |
3197 | intel_dp_sink_set_decompression_state(intel_dp, crtc_state, | ||
3198 | true); | ||
3199 | intel_dp_sink_set_fec_ready(intel_dp, crtc_state); | ||
3137 | intel_dp_start_link_train(intel_dp); | 3200 | intel_dp_start_link_train(intel_dp); |
3138 | if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) | 3201 | if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) |
3139 | intel_dp_stop_link_train(intel_dp); | 3202 | intel_dp_stop_link_train(intel_dp); |
3140 | 3203 | ||
3204 | intel_ddi_enable_fec(encoder, crtc_state); | ||
3205 | |||
3141 | icl_enable_phy_clock_gating(dig_port); | 3206 | icl_enable_phy_clock_gating(dig_port); |
3142 | 3207 | ||
3143 | if (!is_mst) | 3208 | if (!is_mst) |
3144 | intel_ddi_enable_pipe_clock(crtc_state); | 3209 | intel_ddi_enable_pipe_clock(crtc_state); |
3210 | |||
3211 | intel_dsc_enable(encoder, crtc_state); | ||
3145 | } | 3212 | } |
3146 | 3213 | ||
3147 | static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, | 3214 | static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, |
@@ -3208,6 +3275,9 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder, | |||
3208 | 3275 | ||
3209 | WARN_ON(crtc_state->has_pch_encoder); | 3276 | WARN_ON(crtc_state->has_pch_encoder); |
3210 | 3277 | ||
3278 | if (INTEL_GEN(dev_priv) >= 11) | ||
3279 | icl_map_plls_to_ports(encoder, crtc_state); | ||
3280 | |||
3211 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); | 3281 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); |
3212 | 3282 | ||
3213 | if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { | 3283 | if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { |
@@ -3228,7 +3298,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder, | |||
3228 | } | 3298 | } |
3229 | } | 3299 | } |
3230 | 3300 | ||
3231 | static void intel_disable_ddi_buf(struct intel_encoder *encoder) | 3301 | static void intel_disable_ddi_buf(struct intel_encoder *encoder, |
3302 | const struct intel_crtc_state *crtc_state) | ||
3232 | { | 3303 | { |
3233 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 3304 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
3234 | enum port port = encoder->port; | 3305 | enum port port = encoder->port; |
@@ -3247,6 +3318,9 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder) | |||
3247 | val |= DP_TP_CTL_LINK_TRAIN_PAT1; | 3318 | val |= DP_TP_CTL_LINK_TRAIN_PAT1; |
3248 | I915_WRITE(DP_TP_CTL(port), val); | 3319 | I915_WRITE(DP_TP_CTL(port), val); |
3249 | 3320 | ||
3321 | /* Disable FEC in DP Sink */ | ||
3322 | intel_ddi_disable_fec_state(encoder, crtc_state); | ||
3323 | |||
3250 | if (wait) | 3324 | if (wait) |
3251 | intel_wait_ddi_buf_idle(dev_priv, port); | 3325 | intel_wait_ddi_buf_idle(dev_priv, port); |
3252 | } | 3326 | } |
@@ -3270,7 +3344,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, | |||
3270 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); | 3344 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); |
3271 | } | 3345 | } |
3272 | 3346 | ||
3273 | intel_disable_ddi_buf(encoder); | 3347 | intel_disable_ddi_buf(encoder, old_crtc_state); |
3274 | 3348 | ||
3275 | intel_edp_panel_vdd_on(intel_dp); | 3349 | intel_edp_panel_vdd_on(intel_dp); |
3276 | intel_edp_panel_off(intel_dp); | 3350 | intel_edp_panel_off(intel_dp); |
@@ -3293,7 +3367,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, | |||
3293 | 3367 | ||
3294 | intel_ddi_disable_pipe_clock(old_crtc_state); | 3368 | intel_ddi_disable_pipe_clock(old_crtc_state); |
3295 | 3369 | ||
3296 | intel_disable_ddi_buf(encoder); | 3370 | intel_disable_ddi_buf(encoder, old_crtc_state); |
3297 | 3371 | ||
3298 | intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain); | 3372 | intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain); |
3299 | 3373 | ||
@@ -3306,6 +3380,8 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder, | |||
3306 | const struct intel_crtc_state *old_crtc_state, | 3380 | const struct intel_crtc_state *old_crtc_state, |
3307 | const struct drm_connector_state *old_conn_state) | 3381 | const struct drm_connector_state *old_conn_state) |
3308 | { | 3382 | { |
3383 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
3384 | |||
3309 | /* | 3385 | /* |
3310 | * When called from DP MST code: | 3386 | * When called from DP MST code: |
3311 | * - old_conn_state will be NULL | 3387 | * - old_conn_state will be NULL |
@@ -3325,6 +3401,9 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder, | |||
3325 | else | 3401 | else |
3326 | intel_ddi_post_disable_dp(encoder, | 3402 | intel_ddi_post_disable_dp(encoder, |
3327 | old_crtc_state, old_conn_state); | 3403 | old_crtc_state, old_conn_state); |
3404 | |||
3405 | if (INTEL_GEN(dev_priv) >= 11) | ||
3406 | icl_unmap_plls_to_ports(encoder); | ||
3328 | } | 3407 | } |
3329 | 3408 | ||
3330 | void intel_ddi_fdi_post_disable(struct intel_encoder *encoder, | 3409 | void intel_ddi_fdi_post_disable(struct intel_encoder *encoder, |
@@ -3344,7 +3423,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder, | |||
3344 | val &= ~FDI_RX_ENABLE; | 3423 | val &= ~FDI_RX_ENABLE; |
3345 | I915_WRITE(FDI_RX_CTL(PIPE_A), val); | 3424 | I915_WRITE(FDI_RX_CTL(PIPE_A), val); |
3346 | 3425 | ||
3347 | intel_disable_ddi_buf(encoder); | 3426 | intel_disable_ddi_buf(encoder, old_crtc_state); |
3348 | intel_ddi_clk_disable(encoder); | 3427 | intel_ddi_clk_disable(encoder); |
3349 | 3428 | ||
3350 | val = I915_READ(FDI_RX_MISC(PIPE_A)); | 3429 | val = I915_READ(FDI_RX_MISC(PIPE_A)); |
@@ -3491,6 +3570,9 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder, | |||
3491 | intel_edp_drrs_disable(intel_dp, old_crtc_state); | 3570 | intel_edp_drrs_disable(intel_dp, old_crtc_state); |
3492 | intel_psr_disable(intel_dp, old_crtc_state); | 3571 | intel_psr_disable(intel_dp, old_crtc_state); |
3493 | intel_edp_backlight_off(old_conn_state); | 3572 | intel_edp_backlight_off(old_conn_state); |
3573 | /* Disable the decompression in DP Sink */ | ||
3574 | intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state, | ||
3575 | false); | ||
3494 | } | 3576 | } |
3495 | 3577 | ||
3496 | static void intel_disable_ddi_hdmi(struct intel_encoder *encoder, | 3578 | static void intel_disable_ddi_hdmi(struct intel_encoder *encoder, |
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index ceecb5bd5226..1e56319334f3 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c | |||
@@ -77,6 +77,10 @@ void intel_device_info_dump_flags(const struct intel_device_info *info, | |||
77 | #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name)); | 77 | #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name)); |
78 | DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); | 78 | DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); |
79 | #undef PRINT_FLAG | 79 | #undef PRINT_FLAG |
80 | |||
81 | #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name)); | ||
82 | DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG); | ||
83 | #undef PRINT_FLAG | ||
80 | } | 84 | } |
81 | 85 | ||
82 | static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) | 86 | static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) |
@@ -782,7 +786,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info) | |||
782 | if (i915_modparams.disable_display) { | 786 | if (i915_modparams.disable_display) { |
783 | DRM_INFO("Display disabled (module parameter)\n"); | 787 | DRM_INFO("Display disabled (module parameter)\n"); |
784 | info->num_pipes = 0; | 788 | info->num_pipes = 0; |
785 | } else if (info->num_pipes > 0 && | 789 | } else if (HAS_DISPLAY(dev_priv) && |
786 | (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) && | 790 | (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) && |
787 | HAS_PCH_SPLIT(dev_priv)) { | 791 | HAS_PCH_SPLIT(dev_priv)) { |
788 | u32 fuse_strap = I915_READ(FUSE_STRAP); | 792 | u32 fuse_strap = I915_READ(FUSE_STRAP); |
@@ -807,7 +811,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info) | |||
807 | DRM_INFO("PipeC fused off\n"); | 811 | DRM_INFO("PipeC fused off\n"); |
808 | info->num_pipes -= 1; | 812 | info->num_pipes -= 1; |
809 | } | 813 | } |
810 | } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) { | 814 | } else if (HAS_DISPLAY(dev_priv) && IS_GEN9(dev_priv)) { |
811 | u32 dfsm = I915_READ(SKL_DFSM); | 815 | u32 dfsm = I915_READ(SKL_DFSM); |
812 | u8 disabled_mask = 0; | 816 | u8 disabled_mask = 0; |
813 | bool invalid; | 817 | bool invalid; |
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 88f97210dc49..1caf24e2cf0b 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h | |||
@@ -89,35 +89,38 @@ enum intel_ppgtt { | |||
89 | func(is_alpha_support); \ | 89 | func(is_alpha_support); \ |
90 | /* Keep has_* in alphabetical order */ \ | 90 | /* Keep has_* in alphabetical order */ \ |
91 | func(has_64bit_reloc); \ | 91 | func(has_64bit_reloc); \ |
92 | func(has_csr); \ | ||
93 | func(has_ddi); \ | ||
94 | func(has_dp_mst); \ | ||
95 | func(has_reset_engine); \ | 92 | func(has_reset_engine); \ |
96 | func(has_fbc); \ | ||
97 | func(has_fpga_dbg); \ | 93 | func(has_fpga_dbg); \ |
98 | func(has_gmch_display); \ | ||
99 | func(has_guc); \ | 94 | func(has_guc); \ |
100 | func(has_guc_ct); \ | 95 | func(has_guc_ct); \ |
101 | func(has_hotplug); \ | ||
102 | func(has_l3_dpf); \ | 96 | func(has_l3_dpf); \ |
103 | func(has_llc); \ | 97 | func(has_llc); \ |
104 | func(has_logical_ring_contexts); \ | 98 | func(has_logical_ring_contexts); \ |
105 | func(has_logical_ring_elsq); \ | 99 | func(has_logical_ring_elsq); \ |
106 | func(has_logical_ring_preemption); \ | 100 | func(has_logical_ring_preemption); \ |
107 | func(has_overlay); \ | ||
108 | func(has_pooled_eu); \ | 101 | func(has_pooled_eu); \ |
109 | func(has_psr); \ | ||
110 | func(has_rc6); \ | 102 | func(has_rc6); \ |
111 | func(has_rc6p); \ | 103 | func(has_rc6p); \ |
112 | func(has_runtime_pm); \ | 104 | func(has_runtime_pm); \ |
113 | func(has_snoop); \ | 105 | func(has_snoop); \ |
114 | func(has_coherent_ggtt); \ | 106 | func(has_coherent_ggtt); \ |
115 | func(unfenced_needs_alignment); \ | 107 | func(unfenced_needs_alignment); \ |
108 | func(hws_needs_physical); | ||
109 | |||
110 | #define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \ | ||
111 | /* Keep in alphabetical order */ \ | ||
116 | func(cursor_needs_physical); \ | 112 | func(cursor_needs_physical); \ |
117 | func(hws_needs_physical); \ | 113 | func(has_csr); \ |
114 | func(has_ddi); \ | ||
115 | func(has_dp_mst); \ | ||
116 | func(has_fbc); \ | ||
117 | func(has_gmch_display); \ | ||
118 | func(has_hotplug); \ | ||
119 | func(has_ipc); \ | ||
120 | func(has_overlay); \ | ||
121 | func(has_psr); \ | ||
118 | func(overlay_needs_physical); \ | 122 | func(overlay_needs_physical); \ |
119 | func(supports_tv); \ | 123 | func(supports_tv); |
120 | func(has_ipc); | ||
121 | 124 | ||
122 | #define GEN_MAX_SLICES (6) /* CNL upper bound */ | 125 | #define GEN_MAX_SLICES (6) /* CNL upper bound */ |
123 | #define GEN_MAX_SUBSLICES (8) /* ICL upper bound */ | 126 | #define GEN_MAX_SUBSLICES (8) /* ICL upper bound */ |
@@ -172,6 +175,13 @@ struct intel_device_info { | |||
172 | #define DEFINE_FLAG(name) u8 name:1 | 175 | #define DEFINE_FLAG(name) u8 name:1 |
173 | DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); | 176 | DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); |
174 | #undef DEFINE_FLAG | 177 | #undef DEFINE_FLAG |
178 | |||
179 | struct { | ||
180 | #define DEFINE_FLAG(name) u8 name:1 | ||
181 | DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG); | ||
182 | #undef DEFINE_FLAG | ||
183 | } display; | ||
184 | |||
175 | u16 ddb_size; /* in blocks */ | 185 | u16 ddb_size; /* in blocks */ |
176 | 186 | ||
177 | /* Register offsets for the various display pipes and transcoders */ | 187 | /* Register offsets for the various display pipes and transcoders */ |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 812ec5ae5c7b..07c861884c70 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -2341,10 +2341,26 @@ static int intel_fb_offset_to_xy(int *x, int *y, | |||
2341 | int color_plane) | 2341 | int color_plane) |
2342 | { | 2342 | { |
2343 | struct drm_i915_private *dev_priv = to_i915(fb->dev); | 2343 | struct drm_i915_private *dev_priv = to_i915(fb->dev); |
2344 | unsigned int height; | ||
2344 | 2345 | ||
2345 | if (fb->modifier != DRM_FORMAT_MOD_LINEAR && | 2346 | if (fb->modifier != DRM_FORMAT_MOD_LINEAR && |
2346 | fb->offsets[color_plane] % intel_tile_size(dev_priv)) | 2347 | fb->offsets[color_plane] % intel_tile_size(dev_priv)) { |
2348 | DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n", | ||
2349 | fb->offsets[color_plane], color_plane); | ||
2347 | return -EINVAL; | 2350 | return -EINVAL; |
2351 | } | ||
2352 | |||
2353 | height = drm_framebuffer_plane_height(fb->height, fb, color_plane); | ||
2354 | height = ALIGN(height, intel_tile_height(fb, color_plane)); | ||
2355 | |||
2356 | /* Catch potential overflows early */ | ||
2357 | if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]), | ||
2358 | fb->offsets[color_plane])) { | ||
2359 | DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n", | ||
2360 | fb->offsets[color_plane], fb->pitches[color_plane], | ||
2361 | color_plane); | ||
2362 | return -ERANGE; | ||
2363 | } | ||
2348 | 2364 | ||
2349 | *x = 0; | 2365 | *x = 0; |
2350 | *y = 0; | 2366 | *y = 0; |
@@ -2767,7 +2783,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, | |||
2767 | intel_pre_disable_primary_noatomic(&crtc->base); | 2783 | intel_pre_disable_primary_noatomic(&crtc->base); |
2768 | 2784 | ||
2769 | trace_intel_disable_plane(&plane->base, crtc); | 2785 | trace_intel_disable_plane(&plane->base, crtc); |
2770 | plane->disable_plane(plane, crtc); | 2786 | plane->disable_plane(plane, crtc_state); |
2771 | } | 2787 | } |
2772 | 2788 | ||
2773 | static void | 2789 | static void |
@@ -3315,7 +3331,6 @@ static void i9xx_update_plane(struct intel_plane *plane, | |||
3315 | enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; | 3331 | enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; |
3316 | u32 linear_offset; | 3332 | u32 linear_offset; |
3317 | u32 dspcntr = plane_state->ctl; | 3333 | u32 dspcntr = plane_state->ctl; |
3318 | i915_reg_t reg = DSPCNTR(i9xx_plane); | ||
3319 | int x = plane_state->color_plane[0].x; | 3334 | int x = plane_state->color_plane[0].x; |
3320 | int y = plane_state->color_plane[0].y; | 3335 | int y = plane_state->color_plane[0].y; |
3321 | unsigned long irqflags; | 3336 | unsigned long irqflags; |
@@ -3330,47 +3345,51 @@ static void i9xx_update_plane(struct intel_plane *plane, | |||
3330 | 3345 | ||
3331 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 3346 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
3332 | 3347 | ||
3348 | I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride); | ||
3349 | |||
3333 | if (INTEL_GEN(dev_priv) < 4) { | 3350 | if (INTEL_GEN(dev_priv) < 4) { |
3334 | /* pipesrc and dspsize control the size that is scaled from, | 3351 | /* pipesrc and dspsize control the size that is scaled from, |
3335 | * which should always be the user's requested size. | 3352 | * which should always be the user's requested size. |
3336 | */ | 3353 | */ |
3354 | I915_WRITE_FW(DSPPOS(i9xx_plane), 0); | ||
3337 | I915_WRITE_FW(DSPSIZE(i9xx_plane), | 3355 | I915_WRITE_FW(DSPSIZE(i9xx_plane), |
3338 | ((crtc_state->pipe_src_h - 1) << 16) | | 3356 | ((crtc_state->pipe_src_h - 1) << 16) | |
3339 | (crtc_state->pipe_src_w - 1)); | 3357 | (crtc_state->pipe_src_w - 1)); |
3340 | I915_WRITE_FW(DSPPOS(i9xx_plane), 0); | ||
3341 | } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { | 3358 | } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { |
3359 | I915_WRITE_FW(PRIMPOS(i9xx_plane), 0); | ||
3342 | I915_WRITE_FW(PRIMSIZE(i9xx_plane), | 3360 | I915_WRITE_FW(PRIMSIZE(i9xx_plane), |
3343 | ((crtc_state->pipe_src_h - 1) << 16) | | 3361 | ((crtc_state->pipe_src_h - 1) << 16) | |
3344 | (crtc_state->pipe_src_w - 1)); | 3362 | (crtc_state->pipe_src_w - 1)); |
3345 | I915_WRITE_FW(PRIMPOS(i9xx_plane), 0); | ||
3346 | I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0); | 3363 | I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0); |
3347 | } | 3364 | } |
3348 | 3365 | ||
3349 | I915_WRITE_FW(reg, dspcntr); | ||
3350 | |||
3351 | I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride); | ||
3352 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { | 3366 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
3353 | I915_WRITE_FW(DSPSURF(i9xx_plane), | ||
3354 | intel_plane_ggtt_offset(plane_state) + | ||
3355 | dspaddr_offset); | ||
3356 | I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x); | 3367 | I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x); |
3357 | } else if (INTEL_GEN(dev_priv) >= 4) { | 3368 | } else if (INTEL_GEN(dev_priv) >= 4) { |
3369 | I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset); | ||
3370 | I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x); | ||
3371 | } | ||
3372 | |||
3373 | /* | ||
3374 | * The control register self-arms if the plane was previously | ||
3375 | * disabled. Try to make the plane enable atomic by writing | ||
3376 | * the control register just before the surface register. | ||
3377 | */ | ||
3378 | I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr); | ||
3379 | if (INTEL_GEN(dev_priv) >= 4) | ||
3358 | I915_WRITE_FW(DSPSURF(i9xx_plane), | 3380 | I915_WRITE_FW(DSPSURF(i9xx_plane), |
3359 | intel_plane_ggtt_offset(plane_state) + | 3381 | intel_plane_ggtt_offset(plane_state) + |
3360 | dspaddr_offset); | 3382 | dspaddr_offset); |
3361 | I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x); | 3383 | else |
3362 | I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset); | ||
3363 | } else { | ||
3364 | I915_WRITE_FW(DSPADDR(i9xx_plane), | 3384 | I915_WRITE_FW(DSPADDR(i9xx_plane), |
3365 | intel_plane_ggtt_offset(plane_state) + | 3385 | intel_plane_ggtt_offset(plane_state) + |
3366 | dspaddr_offset); | 3386 | dspaddr_offset); |
3367 | } | ||
3368 | 3387 | ||
3369 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 3388 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
3370 | } | 3389 | } |
3371 | 3390 | ||
3372 | static void i9xx_disable_plane(struct intel_plane *plane, | 3391 | static void i9xx_disable_plane(struct intel_plane *plane, |
3373 | struct intel_crtc *crtc) | 3392 | const struct intel_crtc_state *crtc_state) |
3374 | { | 3393 | { |
3375 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | 3394 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
3376 | enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; | 3395 | enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; |
@@ -3456,6 +3475,21 @@ static void skl_detach_scalers(const struct intel_crtc_state *crtc_state) | |||
3456 | } | 3475 | } |
3457 | } | 3476 | } |
3458 | 3477 | ||
3478 | static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, | ||
3479 | int color_plane, unsigned int rotation) | ||
3480 | { | ||
3481 | /* | ||
3482 | * The stride is either expressed as a multiple of 64 bytes chunks for | ||
3483 | * linear buffers or in number of tiles for tiled buffers. | ||
3484 | */ | ||
3485 | if (fb->modifier == DRM_FORMAT_MOD_LINEAR) | ||
3486 | return 64; | ||
3487 | else if (drm_rotation_90_or_270(rotation)) | ||
3488 | return intel_tile_height(fb, color_plane); | ||
3489 | else | ||
3490 | return intel_tile_width_bytes(fb, color_plane); | ||
3491 | } | ||
3492 | |||
3459 | u32 skl_plane_stride(const struct intel_plane_state *plane_state, | 3493 | u32 skl_plane_stride(const struct intel_plane_state *plane_state, |
3460 | int color_plane) | 3494 | int color_plane) |
3461 | { | 3495 | { |
@@ -3466,16 +3500,7 @@ u32 skl_plane_stride(const struct intel_plane_state *plane_state, | |||
3466 | if (color_plane >= fb->format->num_planes) | 3500 | if (color_plane >= fb->format->num_planes) |
3467 | return 0; | 3501 | return 0; |
3468 | 3502 | ||
3469 | /* | 3503 | return stride / skl_plane_stride_mult(fb, color_plane, rotation); |
3470 | * The stride is either expressed as a multiple of 64 bytes chunks for | ||
3471 | * linear buffers or in number of tiles for tiled buffers. | ||
3472 | */ | ||
3473 | if (drm_rotation_90_or_270(rotation)) | ||
3474 | stride /= intel_tile_height(fb, color_plane); | ||
3475 | else | ||
3476 | stride /= intel_fb_stride_alignment(fb, color_plane); | ||
3477 | |||
3478 | return stride; | ||
3479 | } | 3504 | } |
3480 | 3505 | ||
3481 | static u32 skl_plane_ctl_format(uint32_t pixel_format) | 3506 | static u32 skl_plane_ctl_format(uint32_t pixel_format) |
@@ -5403,23 +5428,32 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, | |||
5403 | intel_update_watermarks(crtc); | 5428 | intel_update_watermarks(crtc); |
5404 | } | 5429 | } |
5405 | 5430 | ||
5406 | static void intel_crtc_disable_planes(struct intel_crtc *crtc, unsigned plane_mask) | 5431 | static void intel_crtc_disable_planes(struct intel_atomic_state *state, |
5432 | struct intel_crtc *crtc) | ||
5407 | { | 5433 | { |
5408 | struct drm_device *dev = crtc->base.dev; | 5434 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
5435 | const struct intel_crtc_state *new_crtc_state = | ||
5436 | intel_atomic_get_new_crtc_state(state, crtc); | ||
5437 | unsigned int update_mask = new_crtc_state->update_planes; | ||
5438 | const struct intel_plane_state *old_plane_state; | ||
5409 | struct intel_plane *plane; | 5439 | struct intel_plane *plane; |
5410 | unsigned fb_bits = 0; | 5440 | unsigned fb_bits = 0; |
5441 | int i; | ||
5411 | 5442 | ||
5412 | intel_crtc_dpms_overlay_disable(crtc); | 5443 | intel_crtc_dpms_overlay_disable(crtc); |
5413 | 5444 | ||
5414 | for_each_intel_plane_on_crtc(dev, crtc, plane) { | 5445 | for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { |
5415 | if (plane_mask & BIT(plane->id)) { | 5446 | if (crtc->pipe != plane->pipe || |
5416 | plane->disable_plane(plane, crtc); | 5447 | !(update_mask & BIT(plane->id))) |
5448 | continue; | ||
5417 | 5449 | ||
5450 | plane->disable_plane(plane, new_crtc_state); | ||
5451 | |||
5452 | if (old_plane_state->base.visible) | ||
5418 | fb_bits |= plane->frontbuffer_bit; | 5453 | fb_bits |= plane->frontbuffer_bit; |
5419 | } | ||
5420 | } | 5454 | } |
5421 | 5455 | ||
5422 | intel_frontbuffer_flip(to_i915(dev), fb_bits); | 5456 | intel_frontbuffer_flip(dev_priv, fb_bits); |
5423 | } | 5457 | } |
5424 | 5458 | ||
5425 | static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc, | 5459 | static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc, |
@@ -5692,9 +5726,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, | |||
5692 | if (pipe_config->shared_dpll) | 5726 | if (pipe_config->shared_dpll) |
5693 | intel_enable_shared_dpll(pipe_config); | 5727 | intel_enable_shared_dpll(pipe_config); |
5694 | 5728 | ||
5695 | if (INTEL_GEN(dev_priv) >= 11) | ||
5696 | icl_map_plls_to_ports(crtc, pipe_config, old_state); | ||
5697 | |||
5698 | intel_encoders_pre_enable(crtc, pipe_config, old_state); | 5729 | intel_encoders_pre_enable(crtc, pipe_config, old_state); |
5699 | 5730 | ||
5700 | if (intel_crtc_has_dp_encoder(pipe_config)) | 5731 | if (intel_crtc_has_dp_encoder(pipe_config)) |
@@ -5889,6 +5920,8 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, | |||
5889 | if (!transcoder_is_dsi(cpu_transcoder)) | 5920 | if (!transcoder_is_dsi(cpu_transcoder)) |
5890 | intel_ddi_disable_transcoder_func(old_crtc_state); | 5921 | intel_ddi_disable_transcoder_func(old_crtc_state); |
5891 | 5922 | ||
5923 | intel_dsc_disable(old_crtc_state); | ||
5924 | |||
5892 | if (INTEL_GEN(dev_priv) >= 9) | 5925 | if (INTEL_GEN(dev_priv) >= 9) |
5893 | skylake_scaler_disable(intel_crtc); | 5926 | skylake_scaler_disable(intel_crtc); |
5894 | else | 5927 | else |
@@ -5896,9 +5929,6 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, | |||
5896 | 5929 | ||
5897 | intel_encoders_post_disable(crtc, old_crtc_state, old_state); | 5930 | intel_encoders_post_disable(crtc, old_crtc_state, old_state); |
5898 | 5931 | ||
5899 | if (INTEL_GEN(dev_priv) >= 11) | ||
5900 | icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state); | ||
5901 | |||
5902 | intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); | 5932 | intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); |
5903 | } | 5933 | } |
5904 | 5934 | ||
@@ -6724,7 +6754,7 @@ static void compute_m_n(unsigned int m, unsigned int n, | |||
6724 | } | 6754 | } |
6725 | 6755 | ||
6726 | void | 6756 | void |
6727 | intel_link_compute_m_n(int bits_per_pixel, int nlanes, | 6757 | intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, |
6728 | int pixel_clock, int link_clock, | 6758 | int pixel_clock, int link_clock, |
6729 | struct intel_link_m_n *m_n, | 6759 | struct intel_link_m_n *m_n, |
6730 | bool constant_n) | 6760 | bool constant_n) |
@@ -8939,7 +8969,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, | |||
8939 | fb->width = ((val >> 0) & 0x1fff) + 1; | 8969 | fb->width = ((val >> 0) & 0x1fff) + 1; |
8940 | 8970 | ||
8941 | val = I915_READ(PLANE_STRIDE(pipe, plane_id)); | 8971 | val = I915_READ(PLANE_STRIDE(pipe, plane_id)); |
8942 | stride_mult = intel_fb_stride_alignment(fb, 0); | 8972 | stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); |
8943 | fb->pitches[0] = (val & 0x3ff) * stride_mult; | 8973 | fb->pitches[0] = (val & 0x3ff) * stride_mult; |
8944 | 8974 | ||
8945 | aligned_height = intel_fb_align_height(fb, 0, fb->height); | 8975 | aligned_height = intel_fb_align_height(fb, 0, fb->height); |
@@ -9303,10 +9333,12 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv) | |||
9303 | static int haswell_crtc_compute_clock(struct intel_crtc *crtc, | 9333 | static int haswell_crtc_compute_clock(struct intel_crtc *crtc, |
9304 | struct intel_crtc_state *crtc_state) | 9334 | struct intel_crtc_state *crtc_state) |
9305 | { | 9335 | { |
9336 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
9306 | struct intel_atomic_state *state = | 9337 | struct intel_atomic_state *state = |
9307 | to_intel_atomic_state(crtc_state->base.state); | 9338 | to_intel_atomic_state(crtc_state->base.state); |
9308 | 9339 | ||
9309 | if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) { | 9340 | if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) || |
9341 | IS_ICELAKE(dev_priv)) { | ||
9310 | struct intel_encoder *encoder = | 9342 | struct intel_encoder *encoder = |
9311 | intel_get_crtc_new_encoder(state, crtc_state); | 9343 | intel_get_crtc_new_encoder(state, crtc_state); |
9312 | 9344 | ||
@@ -9444,11 +9476,18 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, | |||
9444 | struct drm_device *dev = crtc->base.dev; | 9476 | struct drm_device *dev = crtc->base.dev; |
9445 | struct drm_i915_private *dev_priv = to_i915(dev); | 9477 | struct drm_i915_private *dev_priv = to_i915(dev); |
9446 | enum intel_display_power_domain power_domain; | 9478 | enum intel_display_power_domain power_domain; |
9479 | unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP); | ||
9480 | unsigned long enabled_panel_transcoders = 0; | ||
9481 | enum transcoder panel_transcoder; | ||
9447 | u32 tmp; | 9482 | u32 tmp; |
9448 | 9483 | ||
9484 | if (IS_ICELAKE(dev_priv)) | ||
9485 | panel_transcoder_mask |= | ||
9486 | BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); | ||
9487 | |||
9449 | /* | 9488 | /* |
9450 | * The pipe->transcoder mapping is fixed with the exception of the eDP | 9489 | * The pipe->transcoder mapping is fixed with the exception of the eDP |
9451 | * transcoder handled below. | 9490 | * and DSI transcoders handled below. |
9452 | */ | 9491 | */ |
9453 | pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; | 9492 | pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; |
9454 | 9493 | ||
@@ -9456,29 +9495,49 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, | |||
9456 | * XXX: Do intel_display_power_get_if_enabled before reading this (for | 9495 | * XXX: Do intel_display_power_get_if_enabled before reading this (for |
9457 | * consistency and less surprising code; it's in always on power). | 9496 | * consistency and less surprising code; it's in always on power). |
9458 | */ | 9497 | */ |
9459 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); | 9498 | for_each_set_bit(panel_transcoder, &panel_transcoder_mask, 32) { |
9460 | if (tmp & TRANS_DDI_FUNC_ENABLE) { | 9499 | enum pipe trans_pipe; |
9461 | enum pipe trans_edp_pipe; | 9500 | |
9501 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder)); | ||
9502 | if (!(tmp & TRANS_DDI_FUNC_ENABLE)) | ||
9503 | continue; | ||
9504 | |||
9505 | /* | ||
9506 | * Log all enabled ones, only use the first one. | ||
9507 | * | ||
9508 | * FIXME: This won't work for two separate DSI displays. | ||
9509 | */ | ||
9510 | enabled_panel_transcoders |= BIT(panel_transcoder); | ||
9511 | if (enabled_panel_transcoders != BIT(panel_transcoder)) | ||
9512 | continue; | ||
9513 | |||
9462 | switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { | 9514 | switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { |
9463 | default: | 9515 | default: |
9464 | WARN(1, "unknown pipe linked to edp transcoder\n"); | 9516 | WARN(1, "unknown pipe linked to transcoder %s\n", |
9517 | transcoder_name(panel_transcoder)); | ||
9465 | /* fall through */ | 9518 | /* fall through */ |
9466 | case TRANS_DDI_EDP_INPUT_A_ONOFF: | 9519 | case TRANS_DDI_EDP_INPUT_A_ONOFF: |
9467 | case TRANS_DDI_EDP_INPUT_A_ON: | 9520 | case TRANS_DDI_EDP_INPUT_A_ON: |
9468 | trans_edp_pipe = PIPE_A; | 9521 | trans_pipe = PIPE_A; |
9469 | break; | 9522 | break; |
9470 | case TRANS_DDI_EDP_INPUT_B_ONOFF: | 9523 | case TRANS_DDI_EDP_INPUT_B_ONOFF: |
9471 | trans_edp_pipe = PIPE_B; | 9524 | trans_pipe = PIPE_B; |
9472 | break; | 9525 | break; |
9473 | case TRANS_DDI_EDP_INPUT_C_ONOFF: | 9526 | case TRANS_DDI_EDP_INPUT_C_ONOFF: |
9474 | trans_edp_pipe = PIPE_C; | 9527 | trans_pipe = PIPE_C; |
9475 | break; | 9528 | break; |
9476 | } | 9529 | } |
9477 | 9530 | ||
9478 | if (trans_edp_pipe == crtc->pipe) | 9531 | if (trans_pipe == crtc->pipe) |
9479 | pipe_config->cpu_transcoder = TRANSCODER_EDP; | 9532 | pipe_config->cpu_transcoder = panel_transcoder; |
9480 | } | 9533 | } |
9481 | 9534 | ||
9535 | /* | ||
9536 | * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1 | ||
9537 | */ | ||
9538 | WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) && | ||
9539 | enabled_panel_transcoders != BIT(TRANSCODER_EDP)); | ||
9540 | |||
9482 | power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); | 9541 | power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); |
9483 | if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) | 9542 | if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) |
9484 | return false; | 9543 | return false; |
@@ -9611,7 +9670,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, | |||
9611 | if (!active) | 9670 | if (!active) |
9612 | goto out; | 9671 | goto out; |
9613 | 9672 | ||
9614 | if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { | 9673 | if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || |
9674 | IS_ICELAKE(dev_priv)) { | ||
9615 | haswell_get_ddi_port_state(crtc, pipe_config); | 9675 | haswell_get_ddi_port_state(crtc, pipe_config); |
9616 | intel_get_pipe_timings(crtc, pipe_config); | 9676 | intel_get_pipe_timings(crtc, pipe_config); |
9617 | } | 9677 | } |
@@ -9667,7 +9727,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state) | |||
9667 | const struct drm_i915_gem_object *obj = intel_fb_obj(fb); | 9727 | const struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
9668 | u32 base; | 9728 | u32 base; |
9669 | 9729 | ||
9670 | if (INTEL_INFO(dev_priv)->cursor_needs_physical) | 9730 | if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) |
9671 | base = obj->phys_handle->busaddr; | 9731 | base = obj->phys_handle->busaddr; |
9672 | else | 9732 | else |
9673 | base = intel_plane_ggtt_offset(plane_state); | 9733 | base = intel_plane_ggtt_offset(plane_state); |
@@ -9894,9 +9954,9 @@ static void i845_update_cursor(struct intel_plane *plane, | |||
9894 | } | 9954 | } |
9895 | 9955 | ||
9896 | static void i845_disable_cursor(struct intel_plane *plane, | 9956 | static void i845_disable_cursor(struct intel_plane *plane, |
9897 | struct intel_crtc *crtc) | 9957 | const struct intel_crtc_state *crtc_state) |
9898 | { | 9958 | { |
9899 | i845_update_cursor(plane, NULL, NULL); | 9959 | i845_update_cursor(plane, crtc_state, NULL); |
9900 | } | 9960 | } |
9901 | 9961 | ||
9902 | static bool i845_cursor_get_hw_state(struct intel_plane *plane, | 9962 | static bool i845_cursor_get_hw_state(struct intel_plane *plane, |
@@ -10087,8 +10147,8 @@ static void i9xx_update_cursor(struct intel_plane *plane, | |||
10087 | * On some platforms writing CURCNTR first will also | 10147 | * On some platforms writing CURCNTR first will also |
10088 | * cause CURPOS to be armed by the CURBASE write. | 10148 | * cause CURPOS to be armed by the CURBASE write. |
10089 | * Without the CURCNTR write the CURPOS write would | 10149 | * Without the CURCNTR write the CURPOS write would |
10090 | * arm itself. Thus we always start the full update | 10150 | * arm itself. Thus we always update CURCNTR before |
10091 | * with a CURCNTR write. | 10151 | * CURPOS. |
10092 | * | 10152 | * |
10093 | * On other platforms CURPOS always requires the | 10153 | * On other platforms CURPOS always requires the |
10094 | * CURBASE write to arm the update. Additonally | 10154 | * CURBASE write to arm the update. Additonally |
@@ -10098,15 +10158,20 @@ static void i9xx_update_cursor(struct intel_plane *plane, | |||
10098 | * cursor that doesn't appear to move, or even change | 10158 | * cursor that doesn't appear to move, or even change |
10099 | * shape. Thus we always write CURBASE. | 10159 | * shape. Thus we always write CURBASE. |
10100 | * | 10160 | * |
10101 | * CURCNTR and CUR_FBC_CTL are always | 10161 | * The other registers are armed by by the CURBASE write |
10102 | * armed by the CURBASE write only. | 10162 | * except when the plane is getting enabled at which time |
10163 | * the CURCNTR write arms the update. | ||
10103 | */ | 10164 | */ |
10165 | |||
10166 | if (INTEL_GEN(dev_priv) >= 9) | ||
10167 | skl_write_cursor_wm(plane, crtc_state); | ||
10168 | |||
10104 | if (plane->cursor.base != base || | 10169 | if (plane->cursor.base != base || |
10105 | plane->cursor.size != fbc_ctl || | 10170 | plane->cursor.size != fbc_ctl || |
10106 | plane->cursor.cntl != cntl) { | 10171 | plane->cursor.cntl != cntl) { |
10107 | I915_WRITE_FW(CURCNTR(pipe), cntl); | ||
10108 | if (HAS_CUR_FBC(dev_priv)) | 10172 | if (HAS_CUR_FBC(dev_priv)) |
10109 | I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl); | 10173 | I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl); |
10174 | I915_WRITE_FW(CURCNTR(pipe), cntl); | ||
10110 | I915_WRITE_FW(CURPOS(pipe), pos); | 10175 | I915_WRITE_FW(CURPOS(pipe), pos); |
10111 | I915_WRITE_FW(CURBASE(pipe), base); | 10176 | I915_WRITE_FW(CURBASE(pipe), base); |
10112 | 10177 | ||
@@ -10122,9 +10187,9 @@ static void i9xx_update_cursor(struct intel_plane *plane, | |||
10122 | } | 10187 | } |
10123 | 10188 | ||
10124 | static void i9xx_disable_cursor(struct intel_plane *plane, | 10189 | static void i9xx_disable_cursor(struct intel_plane *plane, |
10125 | struct intel_crtc *crtc) | 10190 | const struct intel_crtc_state *crtc_state) |
10126 | { | 10191 | { |
10127 | i9xx_update_cursor(plane, NULL, NULL); | 10192 | i9xx_update_cursor(plane, crtc_state, NULL); |
10128 | } | 10193 | } |
10129 | 10194 | ||
10130 | static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, | 10195 | static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, |
@@ -10835,8 +10900,10 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) | |||
10835 | continue; | 10900 | continue; |
10836 | 10901 | ||
10837 | plane_state->linked_plane = NULL; | 10902 | plane_state->linked_plane = NULL; |
10838 | if (plane_state->slave && !plane_state->base.visible) | 10903 | if (plane_state->slave && !plane_state->base.visible) { |
10839 | crtc_state->active_planes &= ~BIT(plane->id); | 10904 | crtc_state->active_planes &= ~BIT(plane->id); |
10905 | crtc_state->update_planes |= BIT(plane->id); | ||
10906 | } | ||
10840 | 10907 | ||
10841 | plane_state->slave = false; | 10908 | plane_state->slave = false; |
10842 | } | 10909 | } |
@@ -10877,6 +10944,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) | |||
10877 | linked_state->slave = true; | 10944 | linked_state->slave = true; |
10878 | linked_state->linked_plane = plane; | 10945 | linked_state->linked_plane = plane; |
10879 | crtc_state->active_planes |= BIT(linked->id); | 10946 | crtc_state->active_planes |= BIT(linked->id); |
10947 | crtc_state->update_planes |= BIT(linked->id); | ||
10880 | DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name); | 10948 | DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name); |
10881 | } | 10949 | } |
10882 | 10950 | ||
@@ -11887,6 +11955,8 @@ static void verify_wm_state(struct drm_crtc *crtc, | |||
11887 | struct skl_pipe_wm hw_wm, *sw_wm; | 11955 | struct skl_pipe_wm hw_wm, *sw_wm; |
11888 | struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; | 11956 | struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; |
11889 | struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; | 11957 | struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; |
11958 | struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES]; | ||
11959 | struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES]; | ||
11890 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 11960 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
11891 | const enum pipe pipe = intel_crtc->pipe; | 11961 | const enum pipe pipe = intel_crtc->pipe; |
11892 | int plane, level, max_level = ilk_wm_max_level(dev_priv); | 11962 | int plane, level, max_level = ilk_wm_max_level(dev_priv); |
@@ -11897,6 +11967,8 @@ static void verify_wm_state(struct drm_crtc *crtc, | |||
11897 | skl_pipe_wm_get_hw_state(crtc, &hw_wm); | 11967 | skl_pipe_wm_get_hw_state(crtc, &hw_wm); |
11898 | sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal; | 11968 | sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal; |
11899 | 11969 | ||
11970 | skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv); | ||
11971 | |||
11900 | skl_ddb_get_hw_state(dev_priv, &hw_ddb); | 11972 | skl_ddb_get_hw_state(dev_priv, &hw_ddb); |
11901 | sw_ddb = &dev_priv->wm.skl_hw.ddb; | 11973 | sw_ddb = &dev_priv->wm.skl_hw.ddb; |
11902 | 11974 | ||
@@ -11939,8 +12011,8 @@ static void verify_wm_state(struct drm_crtc *crtc, | |||
11939 | } | 12011 | } |
11940 | 12012 | ||
11941 | /* DDB */ | 12013 | /* DDB */ |
11942 | hw_ddb_entry = &hw_ddb.plane[pipe][plane]; | 12014 | hw_ddb_entry = &hw_ddb_y[plane]; |
11943 | sw_ddb_entry = &sw_ddb->plane[pipe][plane]; | 12015 | sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane]; |
11944 | 12016 | ||
11945 | if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { | 12017 | if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { |
11946 | DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", | 12018 | DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", |
@@ -11989,8 +12061,8 @@ static void verify_wm_state(struct drm_crtc *crtc, | |||
11989 | } | 12061 | } |
11990 | 12062 | ||
11991 | /* DDB */ | 12063 | /* DDB */ |
11992 | hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR]; | 12064 | hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR]; |
11993 | sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR]; | 12065 | sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR]; |
11994 | 12066 | ||
11995 | if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { | 12067 | if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { |
11996 | DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", | 12068 | DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", |
@@ -12668,7 +12740,6 @@ static void intel_update_crtc(struct drm_crtc *crtc, | |||
12668 | struct drm_device *dev = crtc->dev; | 12740 | struct drm_device *dev = crtc->dev; |
12669 | struct drm_i915_private *dev_priv = to_i915(dev); | 12741 | struct drm_i915_private *dev_priv = to_i915(dev); |
12670 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 12742 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
12671 | struct intel_crtc_state *old_intel_cstate = to_intel_crtc_state(old_crtc_state); | ||
12672 | struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state); | 12743 | struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state); |
12673 | bool modeset = needs_modeset(new_crtc_state); | 12744 | bool modeset = needs_modeset(new_crtc_state); |
12674 | struct intel_plane_state *new_plane_state = | 12745 | struct intel_plane_state *new_plane_state = |
@@ -12691,8 +12762,10 @@ static void intel_update_crtc(struct drm_crtc *crtc, | |||
12691 | 12762 | ||
12692 | intel_begin_crtc_commit(crtc, old_crtc_state); | 12763 | intel_begin_crtc_commit(crtc, old_crtc_state); |
12693 | 12764 | ||
12694 | intel_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc, | 12765 | if (INTEL_GEN(dev_priv) >= 9) |
12695 | old_intel_cstate, pipe_config); | 12766 | skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc); |
12767 | else | ||
12768 | i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc); | ||
12696 | 12769 | ||
12697 | intel_finish_crtc_commit(crtc, old_crtc_state); | 12770 | intel_finish_crtc_commit(crtc, old_crtc_state); |
12698 | } | 12771 | } |
@@ -12885,7 +12958,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) | |||
12885 | intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state); | 12958 | intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state); |
12886 | 12959 | ||
12887 | if (old_crtc_state->active) { | 12960 | if (old_crtc_state->active) { |
12888 | intel_crtc_disable_planes(intel_crtc, old_intel_crtc_state->active_planes); | 12961 | intel_crtc_disable_planes(intel_state, intel_crtc); |
12889 | 12962 | ||
12890 | /* | 12963 | /* |
12891 | * We need to disable pipe CRC before disabling the pipe, | 12964 | * We need to disable pipe CRC before disabling the pipe, |
@@ -13240,7 +13313,7 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state) | |||
13240 | struct i915_vma *vma; | 13313 | struct i915_vma *vma; |
13241 | 13314 | ||
13242 | if (plane->id == PLANE_CURSOR && | 13315 | if (plane->id == PLANE_CURSOR && |
13243 | INTEL_INFO(dev_priv)->cursor_needs_physical) { | 13316 | INTEL_INFO(dev_priv)->display.cursor_needs_physical) { |
13244 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | 13317 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
13245 | const int align = intel_cursor_alignment(dev_priv); | 13318 | const int align = intel_cursor_alignment(dev_priv); |
13246 | int err; | 13319 | int err; |
@@ -13735,7 +13808,7 @@ intel_legacy_cursor_update(struct drm_plane *plane, | |||
13735 | to_intel_plane_state(plane->state)); | 13808 | to_intel_plane_state(plane->state)); |
13736 | } else { | 13809 | } else { |
13737 | trace_intel_disable_plane(plane, to_intel_crtc(crtc)); | 13810 | trace_intel_disable_plane(plane, to_intel_crtc(crtc)); |
13738 | intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc)); | 13811 | intel_plane->disable_plane(intel_plane, crtc_state); |
13739 | } | 13812 | } |
13740 | 13813 | ||
13741 | intel_plane_unpin_fb(to_intel_plane_state(old_plane_state)); | 13814 | intel_plane_unpin_fb(to_intel_plane_state(old_plane_state)); |
@@ -14186,7 +14259,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) | |||
14186 | 14259 | ||
14187 | intel_pps_init(dev_priv); | 14260 | intel_pps_init(dev_priv); |
14188 | 14261 | ||
14189 | if (INTEL_INFO(dev_priv)->num_pipes == 0) | 14262 | if (!HAS_DISPLAY(dev_priv)) |
14190 | return; | 14263 | return; |
14191 | 14264 | ||
14192 | /* | 14265 | /* |
@@ -14451,7 +14524,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, | |||
14451 | { | 14524 | { |
14452 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | 14525 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
14453 | struct drm_framebuffer *fb = &intel_fb->base; | 14526 | struct drm_framebuffer *fb = &intel_fb->base; |
14454 | struct drm_format_name_buf format_name; | ||
14455 | u32 pitch_limit; | 14527 | u32 pitch_limit; |
14456 | unsigned int tiling, stride; | 14528 | unsigned int tiling, stride; |
14457 | int ret = -EINVAL; | 14529 | int ret = -EINVAL; |
@@ -14482,39 +14554,14 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, | |||
14482 | } | 14554 | } |
14483 | } | 14555 | } |
14484 | 14556 | ||
14485 | /* Passed in modifier sanity checking. */ | 14557 | if (!drm_any_plane_has_format(&dev_priv->drm, |
14486 | switch (mode_cmd->modifier[0]) { | 14558 | mode_cmd->pixel_format, |
14487 | case I915_FORMAT_MOD_Y_TILED_CCS: | 14559 | mode_cmd->modifier[0])) { |
14488 | case I915_FORMAT_MOD_Yf_TILED_CCS: | 14560 | struct drm_format_name_buf format_name; |
14489 | switch (mode_cmd->pixel_format) { | 14561 | |
14490 | case DRM_FORMAT_XBGR8888: | 14562 | DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n", |
14491 | case DRM_FORMAT_ABGR8888: | 14563 | drm_get_format_name(mode_cmd->pixel_format, |
14492 | case DRM_FORMAT_XRGB8888: | 14564 | &format_name), |
14493 | case DRM_FORMAT_ARGB8888: | ||
14494 | break; | ||
14495 | default: | ||
14496 | DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n"); | ||
14497 | goto err; | ||
14498 | } | ||
14499 | /* fall through */ | ||
14500 | case I915_FORMAT_MOD_Yf_TILED: | ||
14501 | if (mode_cmd->pixel_format == DRM_FORMAT_C8) { | ||
14502 | DRM_DEBUG_KMS("Indexed format does not support Yf tiling\n"); | ||
14503 | goto err; | ||
14504 | } | ||
14505 | /* fall through */ | ||
14506 | case I915_FORMAT_MOD_Y_TILED: | ||
14507 | if (INTEL_GEN(dev_priv) < 9) { | ||
14508 | DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n", | ||
14509 | mode_cmd->modifier[0]); | ||
14510 | goto err; | ||
14511 | } | ||
14512 | break; | ||
14513 | case DRM_FORMAT_MOD_LINEAR: | ||
14514 | case I915_FORMAT_MOD_X_TILED: | ||
14515 | break; | ||
14516 | default: | ||
14517 | DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n", | ||
14518 | mode_cmd->modifier[0]); | 14565 | mode_cmd->modifier[0]); |
14519 | goto err; | 14566 | goto err; |
14520 | } | 14567 | } |
@@ -14549,69 +14596,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, | |||
14549 | goto err; | 14596 | goto err; |
14550 | } | 14597 | } |
14551 | 14598 | ||
14552 | /* Reject formats not supported by any plane early. */ | ||
14553 | switch (mode_cmd->pixel_format) { | ||
14554 | case DRM_FORMAT_C8: | ||
14555 | case DRM_FORMAT_RGB565: | ||
14556 | case DRM_FORMAT_XRGB8888: | ||
14557 | case DRM_FORMAT_ARGB8888: | ||
14558 | break; | ||
14559 | case DRM_FORMAT_XRGB1555: | ||
14560 | if (INTEL_GEN(dev_priv) > 3) { | ||
14561 | DRM_DEBUG_KMS("unsupported pixel format: %s\n", | ||
14562 | drm_get_format_name(mode_cmd->pixel_format, &format_name)); | ||
14563 | goto err; | ||
14564 | } | ||
14565 | break; | ||
14566 | case DRM_FORMAT_ABGR8888: | ||
14567 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && | ||
14568 | INTEL_GEN(dev_priv) < 9) { | ||
14569 | DRM_DEBUG_KMS("unsupported pixel format: %s\n", | ||
14570 | drm_get_format_name(mode_cmd->pixel_format, &format_name)); | ||
14571 | goto err; | ||
14572 | } | ||
14573 | break; | ||
14574 | case DRM_FORMAT_XBGR8888: | ||
14575 | case DRM_FORMAT_XRGB2101010: | ||
14576 | case DRM_FORMAT_XBGR2101010: | ||
14577 | if (INTEL_GEN(dev_priv) < 4) { | ||
14578 | DRM_DEBUG_KMS("unsupported pixel format: %s\n", | ||
14579 | drm_get_format_name(mode_cmd->pixel_format, &format_name)); | ||
14580 | goto err; | ||
14581 | } | ||
14582 | break; | ||
14583 | case DRM_FORMAT_ABGR2101010: | ||
14584 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { | ||
14585 | DRM_DEBUG_KMS("unsupported pixel format: %s\n", | ||
14586 | drm_get_format_name(mode_cmd->pixel_format, &format_name)); | ||
14587 | goto err; | ||
14588 | } | ||
14589 | break; | ||
14590 | case DRM_FORMAT_YUYV: | ||
14591 | case DRM_FORMAT_UYVY: | ||
14592 | case DRM_FORMAT_YVYU: | ||
14593 | case DRM_FORMAT_VYUY: | ||
14594 | if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { | ||
14595 | DRM_DEBUG_KMS("unsupported pixel format: %s\n", | ||
14596 | drm_get_format_name(mode_cmd->pixel_format, &format_name)); | ||
14597 | goto err; | ||
14598 | } | ||
14599 | break; | ||
14600 | case DRM_FORMAT_NV12: | ||
14601 | if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) || | ||
14602 | IS_BROXTON(dev_priv)) { | ||
14603 | DRM_DEBUG_KMS("unsupported pixel format: %s\n", | ||
14604 | drm_get_format_name(mode_cmd->pixel_format, | ||
14605 | &format_name)); | ||
14606 | goto err; | ||
14607 | } | ||
14608 | break; | ||
14609 | default: | ||
14610 | DRM_DEBUG_KMS("unsupported pixel format: %s\n", | ||
14611 | drm_get_format_name(mode_cmd->pixel_format, &format_name)); | ||
14612 | goto err; | ||
14613 | } | ||
14614 | |||
14615 | /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ | 14599 | /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ |
14616 | if (mode_cmd->offsets[0] != 0) | 14600 | if (mode_cmd->offsets[0] != 0) |
14617 | goto err; | 14601 | goto err; |
@@ -16066,7 +16050,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) | |||
16066 | }; | 16050 | }; |
16067 | int i; | 16051 | int i; |
16068 | 16052 | ||
16069 | if (INTEL_INFO(dev_priv)->num_pipes == 0) | 16053 | if (!HAS_DISPLAY(dev_priv)) |
16070 | return NULL; | 16054 | return NULL; |
16071 | 16055 | ||
16072 | error = kzalloc(sizeof(*error), GFP_ATOMIC); | 16056 | error = kzalloc(sizeof(*error), GFP_ATOMIC); |
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h index 5f2955b944da..4262452963b3 100644 --- a/drivers/gpu/drm/i915/intel_display.h +++ b/drivers/gpu/drm/i915/intel_display.h | |||
@@ -242,6 +242,7 @@ enum intel_display_power_domain { | |||
242 | POWER_DOMAIN_TRANSCODER_B, | 242 | POWER_DOMAIN_TRANSCODER_B, |
243 | POWER_DOMAIN_TRANSCODER_C, | 243 | POWER_DOMAIN_TRANSCODER_C, |
244 | POWER_DOMAIN_TRANSCODER_EDP, | 244 | POWER_DOMAIN_TRANSCODER_EDP, |
245 | POWER_DOMAIN_TRANSCODER_EDP_VDSC, | ||
245 | POWER_DOMAIN_TRANSCODER_DSI_A, | 246 | POWER_DOMAIN_TRANSCODER_DSI_A, |
246 | POWER_DOMAIN_TRANSCODER_DSI_C, | 247 | POWER_DOMAIN_TRANSCODER_DSI_C, |
247 | POWER_DOMAIN_PORT_DDI_A_LANES, | 248 | POWER_DOMAIN_PORT_DDI_A_LANES, |
@@ -398,6 +399,14 @@ struct intel_link_m_n { | |||
398 | for_each_power_well_reverse(__dev_priv, __power_well) \ | 399 | for_each_power_well_reverse(__dev_priv, __power_well) \ |
399 | for_each_if((__power_well)->desc->domains & (__domain_mask)) | 400 | for_each_if((__power_well)->desc->domains & (__domain_mask)) |
400 | 401 | ||
402 | #define for_each_old_intel_plane_in_state(__state, plane, old_plane_state, __i) \ | ||
403 | for ((__i) = 0; \ | ||
404 | (__i) < (__state)->base.dev->mode_config.num_total_plane && \ | ||
405 | ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \ | ||
406 | (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), 1); \ | ||
407 | (__i)++) \ | ||
408 | for_each_if(plane) | ||
409 | |||
401 | #define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \ | 410 | #define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \ |
402 | for ((__i) = 0; \ | 411 | for ((__i) = 0; \ |
403 | (__i) < (__state)->base.dev->mode_config.num_total_plane && \ | 412 | (__i) < (__state)->base.dev->mode_config.num_total_plane && \ |
@@ -423,10 +432,18 @@ struct intel_link_m_n { | |||
423 | (__i)++) \ | 432 | (__i)++) \ |
424 | for_each_if(plane) | 433 | for_each_if(plane) |
425 | 434 | ||
426 | void intel_link_compute_m_n(int bpp, int nlanes, | 435 | #define for_each_oldnew_intel_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \ |
436 | for ((__i) = 0; \ | ||
437 | (__i) < (__state)->base.dev->mode_config.num_crtc && \ | ||
438 | ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \ | ||
439 | (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \ | ||
440 | (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \ | ||
441 | (__i)++) \ | ||
442 | for_each_if(crtc) | ||
443 | |||
444 | void intel_link_compute_m_n(u16 bpp, int nlanes, | ||
427 | int pixel_clock, int link_clock, | 445 | int pixel_clock, int link_clock, |
428 | struct intel_link_m_n *m_n, | 446 | struct intel_link_m_n *m_n, |
429 | bool constant_n); | 447 | bool constant_n); |
430 | |||
431 | bool is_ccs_modifier(u64 modifier); | 448 | bool is_ccs_modifier(u64 modifier); |
432 | #endif | 449 | #endif |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 7699f9b7b2d2..fdd2cbc56fa3 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -47,6 +47,8 @@ | |||
47 | 47 | ||
48 | /* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */ | 48 | /* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */ |
49 | #define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440 | 49 | #define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440 |
50 | #define DP_DSC_MIN_SUPPORTED_BPC 8 | ||
51 | #define DP_DSC_MAX_SUPPORTED_BPC 10 | ||
50 | 52 | ||
51 | /* DP DSC throughput values used for slice count calculations KPixels/s */ | 53 | /* DP DSC throughput values used for slice count calculations KPixels/s */ |
52 | #define DP_DSC_PEAK_PIXEL_RATE 2720000 | 54 | #define DP_DSC_PEAK_PIXEL_RATE 2720000 |
@@ -543,7 +545,7 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
543 | dsc_slice_count = | 545 | dsc_slice_count = |
544 | drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, | 546 | drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, |
545 | true); | 547 | true); |
546 | } else { | 548 | } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { |
547 | dsc_max_output_bpp = | 549 | dsc_max_output_bpp = |
548 | intel_dp_dsc_get_output_bpp(max_link_clock, | 550 | intel_dp_dsc_get_output_bpp(max_link_clock, |
549 | max_lanes, | 551 | max_lanes, |
@@ -1708,6 +1710,41 @@ struct link_config_limits { | |||
1708 | int min_bpp, max_bpp; | 1710 | int min_bpp, max_bpp; |
1709 | }; | 1711 | }; |
1710 | 1712 | ||
1713 | static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, | ||
1714 | const struct intel_crtc_state *pipe_config) | ||
1715 | { | ||
1716 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | ||
1717 | |||
1718 | return INTEL_GEN(dev_priv) >= 11 && | ||
1719 | pipe_config->cpu_transcoder != TRANSCODER_A; | ||
1720 | } | ||
1721 | |||
1722 | static bool intel_dp_supports_fec(struct intel_dp *intel_dp, | ||
1723 | const struct intel_crtc_state *pipe_config) | ||
1724 | { | ||
1725 | return intel_dp_source_supports_fec(intel_dp, pipe_config) && | ||
1726 | drm_dp_sink_supports_fec(intel_dp->fec_capable); | ||
1727 | } | ||
1728 | |||
1729 | static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp, | ||
1730 | const struct intel_crtc_state *pipe_config) | ||
1731 | { | ||
1732 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | ||
1733 | |||
1734 | return INTEL_GEN(dev_priv) >= 10 && | ||
1735 | pipe_config->cpu_transcoder != TRANSCODER_A; | ||
1736 | } | ||
1737 | |||
1738 | static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, | ||
1739 | const struct intel_crtc_state *pipe_config) | ||
1740 | { | ||
1741 | if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable) | ||
1742 | return false; | ||
1743 | |||
1744 | return intel_dp_source_supports_dsc(intel_dp, pipe_config) && | ||
1745 | drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); | ||
1746 | } | ||
1747 | |||
1711 | static int intel_dp_compute_bpp(struct intel_dp *intel_dp, | 1748 | static int intel_dp_compute_bpp(struct intel_dp *intel_dp, |
1712 | struct intel_crtc_state *pipe_config) | 1749 | struct intel_crtc_state *pipe_config) |
1713 | { | 1750 | { |
@@ -1842,14 +1879,122 @@ intel_dp_compute_link_config_fast(struct intel_dp *intel_dp, | |||
1842 | return false; | 1879 | return false; |
1843 | } | 1880 | } |
1844 | 1881 | ||
1882 | static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) | ||
1883 | { | ||
1884 | int i, num_bpc; | ||
1885 | u8 dsc_bpc[3] = {0}; | ||
1886 | |||
1887 | num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, | ||
1888 | dsc_bpc); | ||
1889 | for (i = 0; i < num_bpc; i++) { | ||
1890 | if (dsc_max_bpc >= dsc_bpc[i]) | ||
1891 | return dsc_bpc[i] * 3; | ||
1892 | } | ||
1893 | |||
1894 | return 0; | ||
1895 | } | ||
1896 | |||
1897 | static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp, | ||
1898 | struct intel_crtc_state *pipe_config, | ||
1899 | struct drm_connector_state *conn_state, | ||
1900 | struct link_config_limits *limits) | ||
1901 | { | ||
1902 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | ||
1903 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); | ||
1904 | struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; | ||
1905 | u8 dsc_max_bpc; | ||
1906 | int pipe_bpp; | ||
1907 | |||
1908 | if (!intel_dp_supports_dsc(intel_dp, pipe_config)) | ||
1909 | return false; | ||
1910 | |||
1911 | dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC, | ||
1912 | conn_state->max_requested_bpc); | ||
1913 | |||
1914 | pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); | ||
1915 | if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) { | ||
1916 | DRM_DEBUG_KMS("No DSC support for less than 8bpc\n"); | ||
1917 | return false; | ||
1918 | } | ||
1919 | |||
1920 | /* | ||
1921 | * For now enable DSC for max bpp, max link rate, max lane count. | ||
1922 | * Optimize this later for the minimum possible link rate/lane count | ||
1923 | * with DSC enabled for the requested mode. | ||
1924 | */ | ||
1925 | pipe_config->pipe_bpp = pipe_bpp; | ||
1926 | pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; | ||
1927 | pipe_config->lane_count = limits->max_lane_count; | ||
1928 | |||
1929 | if (intel_dp_is_edp(intel_dp)) { | ||
1930 | pipe_config->dsc_params.compressed_bpp = | ||
1931 | min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, | ||
1932 | pipe_config->pipe_bpp); | ||
1933 | pipe_config->dsc_params.slice_count = | ||
1934 | drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, | ||
1935 | true); | ||
1936 | } else { | ||
1937 | u16 dsc_max_output_bpp; | ||
1938 | u8 dsc_dp_slice_count; | ||
1939 | |||
1940 | dsc_max_output_bpp = | ||
1941 | intel_dp_dsc_get_output_bpp(pipe_config->port_clock, | ||
1942 | pipe_config->lane_count, | ||
1943 | adjusted_mode->crtc_clock, | ||
1944 | adjusted_mode->crtc_hdisplay); | ||
1945 | dsc_dp_slice_count = | ||
1946 | intel_dp_dsc_get_slice_count(intel_dp, | ||
1947 | adjusted_mode->crtc_clock, | ||
1948 | adjusted_mode->crtc_hdisplay); | ||
1949 | if (!dsc_max_output_bpp || !dsc_dp_slice_count) { | ||
1950 | DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n"); | ||
1951 | return false; | ||
1952 | } | ||
1953 | pipe_config->dsc_params.compressed_bpp = min_t(u16, | ||
1954 | dsc_max_output_bpp >> 4, | ||
1955 | pipe_config->pipe_bpp); | ||
1956 | pipe_config->dsc_params.slice_count = dsc_dp_slice_count; | ||
1957 | } | ||
1958 | /* | ||
1959 | * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate | ||
1960 | * is greater than the maximum Cdclock and if slice count is even | ||
1961 | * then we need to use 2 VDSC instances. | ||
1962 | */ | ||
1963 | if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) { | ||
1964 | if (pipe_config->dsc_params.slice_count > 1) { | ||
1965 | pipe_config->dsc_params.dsc_split = true; | ||
1966 | } else { | ||
1967 | DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n"); | ||
1968 | return false; | ||
1969 | } | ||
1970 | } | ||
1971 | if (intel_dp_compute_dsc_params(intel_dp, pipe_config) < 0) { | ||
1972 | DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d " | ||
1973 | "Compressed BPP = %d\n", | ||
1974 | pipe_config->pipe_bpp, | ||
1975 | pipe_config->dsc_params.compressed_bpp); | ||
1976 | return false; | ||
1977 | } | ||
1978 | pipe_config->dsc_params.compression_enable = true; | ||
1979 | DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d " | ||
1980 | "Compressed Bpp = %d Slice Count = %d\n", | ||
1981 | pipe_config->pipe_bpp, | ||
1982 | pipe_config->dsc_params.compressed_bpp, | ||
1983 | pipe_config->dsc_params.slice_count); | ||
1984 | |||
1985 | return true; | ||
1986 | } | ||
1987 | |||
1845 | static bool | 1988 | static bool |
1846 | intel_dp_compute_link_config(struct intel_encoder *encoder, | 1989 | intel_dp_compute_link_config(struct intel_encoder *encoder, |
1847 | struct intel_crtc_state *pipe_config) | 1990 | struct intel_crtc_state *pipe_config, |
1991 | struct drm_connector_state *conn_state) | ||
1848 | { | 1992 | { |
1849 | struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; | 1993 | struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; |
1850 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 1994 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1851 | struct link_config_limits limits; | 1995 | struct link_config_limits limits; |
1852 | int common_len; | 1996 | int common_len; |
1997 | bool ret; | ||
1853 | 1998 | ||
1854 | common_len = intel_dp_common_len_rate_limit(intel_dp, | 1999 | common_len = intel_dp_common_len_rate_limit(intel_dp, |
1855 | intel_dp->max_link_rate); | 2000 | intel_dp->max_link_rate); |
@@ -1888,7 +2033,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, | |||
1888 | intel_dp->common_rates[limits.max_clock], | 2033 | intel_dp->common_rates[limits.max_clock], |
1889 | limits.max_bpp, adjusted_mode->crtc_clock); | 2034 | limits.max_bpp, adjusted_mode->crtc_clock); |
1890 | 2035 | ||
1891 | if (intel_dp_is_edp(intel_dp)) { | 2036 | if (intel_dp_is_edp(intel_dp)) |
1892 | /* | 2037 | /* |
1893 | * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4 | 2038 | * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4 |
1894 | * section A.1: "It is recommended that the minimum number of | 2039 | * section A.1: "It is recommended that the minimum number of |
@@ -1898,26 +2043,42 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, | |||
1898 | * Note that we use the max clock and lane count for eDP 1.3 and | 2043 | * Note that we use the max clock and lane count for eDP 1.3 and |
1899 | * earlier, and fast vs. wide is irrelevant. | 2044 | * earlier, and fast vs. wide is irrelevant. |
1900 | */ | 2045 | */ |
1901 | if (!intel_dp_compute_link_config_fast(intel_dp, pipe_config, | 2046 | ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, |
1902 | &limits)) | 2047 | &limits); |
1903 | return false; | 2048 | else |
1904 | } else { | ||
1905 | /* Optimize for slow and wide. */ | 2049 | /* Optimize for slow and wide. */ |
1906 | if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config, | 2050 | ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, |
1907 | &limits)) | 2051 | &limits); |
2052 | |||
2053 | /* enable compression if the mode doesn't fit available BW */ | ||
2054 | if (!ret) { | ||
2055 | if (!intel_dp_dsc_compute_config(intel_dp, pipe_config, | ||
2056 | conn_state, &limits)) | ||
1908 | return false; | 2057 | return false; |
1909 | } | 2058 | } |
1910 | 2059 | ||
1911 | DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n", | 2060 | if (pipe_config->dsc_params.compression_enable) { |
1912 | pipe_config->lane_count, pipe_config->port_clock, | 2061 | DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", |
1913 | pipe_config->pipe_bpp); | 2062 | pipe_config->lane_count, pipe_config->port_clock, |
2063 | pipe_config->pipe_bpp, | ||
2064 | pipe_config->dsc_params.compressed_bpp); | ||
1914 | 2065 | ||
1915 | DRM_DEBUG_KMS("DP link rate required %i available %i\n", | 2066 | DRM_DEBUG_KMS("DP link rate required %i available %i\n", |
1916 | intel_dp_link_required(adjusted_mode->crtc_clock, | 2067 | intel_dp_link_required(adjusted_mode->crtc_clock, |
1917 | pipe_config->pipe_bpp), | 2068 | pipe_config->dsc_params.compressed_bpp), |
1918 | intel_dp_max_data_rate(pipe_config->port_clock, | 2069 | intel_dp_max_data_rate(pipe_config->port_clock, |
1919 | pipe_config->lane_count)); | 2070 | pipe_config->lane_count)); |
2071 | } else { | ||
2072 | DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n", | ||
2073 | pipe_config->lane_count, pipe_config->port_clock, | ||
2074 | pipe_config->pipe_bpp); | ||
1920 | 2075 | ||
2076 | DRM_DEBUG_KMS("DP link rate required %i available %i\n", | ||
2077 | intel_dp_link_required(adjusted_mode->crtc_clock, | ||
2078 | pipe_config->pipe_bpp), | ||
2079 | intel_dp_max_data_rate(pipe_config->port_clock, | ||
2080 | pipe_config->lane_count)); | ||
2081 | } | ||
1921 | return true; | 2082 | return true; |
1922 | } | 2083 | } |
1923 | 2084 | ||
@@ -1983,7 +2144,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
1983 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) | 2144 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) |
1984 | return false; | 2145 | return false; |
1985 | 2146 | ||
1986 | if (!intel_dp_compute_link_config(encoder, pipe_config)) | 2147 | pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && |
2148 | intel_dp_supports_fec(intel_dp, pipe_config); | ||
2149 | |||
2150 | if (!intel_dp_compute_link_config(encoder, pipe_config, conn_state)) | ||
1987 | return false; | 2151 | return false; |
1988 | 2152 | ||
1989 | if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { | 2153 | if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { |
@@ -2001,11 +2165,20 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
2001 | intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED; | 2165 | intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED; |
2002 | } | 2166 | } |
2003 | 2167 | ||
2004 | intel_link_compute_m_n(pipe_config->pipe_bpp, pipe_config->lane_count, | 2168 | if (!pipe_config->dsc_params.compression_enable) |
2005 | adjusted_mode->crtc_clock, | 2169 | intel_link_compute_m_n(pipe_config->pipe_bpp, |
2006 | pipe_config->port_clock, | 2170 | pipe_config->lane_count, |
2007 | &pipe_config->dp_m_n, | 2171 | adjusted_mode->crtc_clock, |
2008 | constant_n); | 2172 | pipe_config->port_clock, |
2173 | &pipe_config->dp_m_n, | ||
2174 | constant_n); | ||
2175 | else | ||
2176 | intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp, | ||
2177 | pipe_config->lane_count, | ||
2178 | adjusted_mode->crtc_clock, | ||
2179 | pipe_config->port_clock, | ||
2180 | &pipe_config->dp_m_n, | ||
2181 | constant_n); | ||
2009 | 2182 | ||
2010 | if (intel_connector->panel.downclock_mode != NULL && | 2183 | if (intel_connector->panel.downclock_mode != NULL && |
2011 | dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { | 2184 | dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { |
@@ -2702,6 +2875,22 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) | |||
2702 | intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; | 2875 | intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; |
2703 | } | 2876 | } |
2704 | 2877 | ||
2878 | void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, | ||
2879 | const struct intel_crtc_state *crtc_state, | ||
2880 | bool enable) | ||
2881 | { | ||
2882 | int ret; | ||
2883 | |||
2884 | if (!crtc_state->dsc_params.compression_enable) | ||
2885 | return; | ||
2886 | |||
2887 | ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, | ||
2888 | enable ? DP_DECOMPRESSION_EN : 0); | ||
2889 | if (ret < 0) | ||
2890 | DRM_DEBUG_KMS("Failed to %s sink decompression state\n", | ||
2891 | enable ? "enable" : "disable"); | ||
2892 | } | ||
2893 | |||
2705 | /* If the sink supports it, try to set the power state appropriately */ | 2894 | /* If the sink supports it, try to set the power state appropriately */ |
2706 | void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) | 2895 | void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) |
2707 | { | 2896 | { |
@@ -3837,15 +4026,14 @@ static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) | |||
3837 | DRM_DEBUG_KMS("DSC DPCD: %*ph\n", | 4026 | DRM_DEBUG_KMS("DSC DPCD: %*ph\n", |
3838 | (int)sizeof(intel_dp->dsc_dpcd), | 4027 | (int)sizeof(intel_dp->dsc_dpcd), |
3839 | intel_dp->dsc_dpcd); | 4028 | intel_dp->dsc_dpcd); |
4029 | |||
3840 | /* FEC is supported only on DP 1.4 */ | 4030 | /* FEC is supported only on DP 1.4 */ |
3841 | if (!intel_dp_is_edp(intel_dp)) { | 4031 | if (!intel_dp_is_edp(intel_dp) && |
3842 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, | 4032 | drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, |
3843 | &intel_dp->fec_capable) < 0) | 4033 | &intel_dp->fec_capable) < 0) |
3844 | DRM_ERROR("Failed to read FEC DPCD register\n"); | 4034 | DRM_ERROR("Failed to read FEC DPCD register\n"); |
3845 | 4035 | ||
3846 | DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", | 4036 | DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable); |
3847 | intel_dp->fec_capable); | ||
3848 | } | ||
3849 | } | 4037 | } |
3850 | } | 4038 | } |
3851 | 4039 | ||
@@ -3936,8 +4124,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) | |||
3936 | static bool | 4124 | static bool |
3937 | intel_dp_get_dpcd(struct intel_dp *intel_dp) | 4125 | intel_dp_get_dpcd(struct intel_dp *intel_dp) |
3938 | { | 4126 | { |
3939 | u8 sink_count; | ||
3940 | |||
3941 | if (!intel_dp_read_dpcd(intel_dp)) | 4127 | if (!intel_dp_read_dpcd(intel_dp)) |
3942 | return false; | 4128 | return false; |
3943 | 4129 | ||
@@ -3947,25 +4133,35 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
3947 | intel_dp_set_common_rates(intel_dp); | 4133 | intel_dp_set_common_rates(intel_dp); |
3948 | } | 4134 | } |
3949 | 4135 | ||
3950 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &sink_count) <= 0) | ||
3951 | return false; | ||
3952 | |||
3953 | /* | 4136 | /* |
3954 | * Sink count can change between short pulse hpd hence | 4137 | * Some eDP panels do not set a valid value for sink count, that is why |
3955 | * a member variable in intel_dp will track any changes | 4138 | * it don't care about read it here and in intel_edp_init_dpcd(). |
3956 | * between short pulse interrupts. | ||
3957 | */ | 4139 | */ |
3958 | intel_dp->sink_count = DP_GET_SINK_COUNT(sink_count); | 4140 | if (!intel_dp_is_edp(intel_dp)) { |
4141 | u8 count; | ||
4142 | ssize_t r; | ||
3959 | 4143 | ||
3960 | /* | 4144 | r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count); |
3961 | * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that | 4145 | if (r < 1) |
3962 | * a dongle is present but no display. Unless we require to know | 4146 | return false; |
3963 | * if a dongle is present or not, we don't need to update | 4147 | |
3964 | * downstream port information. So, an early return here saves | 4148 | /* |
3965 | * time from performing other operations which are not required. | 4149 | * Sink count can change between short pulse hpd hence |
3966 | */ | 4150 | * a member variable in intel_dp will track any changes |
3967 | if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count) | 4151 | * between short pulse interrupts. |
3968 | return false; | 4152 | */ |
4153 | intel_dp->sink_count = DP_GET_SINK_COUNT(count); | ||
4154 | |||
4155 | /* | ||
4156 | * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that | ||
4157 | * a dongle is present but no display. Unless we require to know | ||
4158 | * if a dongle is present or not, we don't need to update | ||
4159 | * downstream port information. So, an early return here saves | ||
4160 | * time from performing other operations which are not required. | ||
4161 | */ | ||
4162 | if (!intel_dp->sink_count) | ||
4163 | return false; | ||
4164 | } | ||
3969 | 4165 | ||
3970 | if (!drm_dp_is_branch(intel_dp->dpcd)) | 4166 | if (!drm_dp_is_branch(intel_dp->dpcd)) |
3971 | return true; /* native DP sink */ | 4167 | return true; /* native DP sink */ |
@@ -4375,6 +4571,17 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp) | |||
4375 | if (!intel_dp->link_trained) | 4571 | if (!intel_dp->link_trained) |
4376 | return false; | 4572 | return false; |
4377 | 4573 | ||
4574 | /* | ||
4575 | * While PSR source HW is enabled, it will control main-link sending | ||
4576 | * frames, enabling and disabling it so trying to do a retrain will fail | ||
4577 | * as the link would or not be on or it could mix training patterns | ||
4578 | * and frame data at the same time causing retrain to fail. | ||
4579 | * Also when exiting PSR, HW will retrain the link anyways fixing | ||
4580 | * any link status error. | ||
4581 | */ | ||
4582 | if (intel_psr_enabled(intel_dp)) | ||
4583 | return false; | ||
4584 | |||
4378 | if (!intel_dp_get_link_status(intel_dp, link_status)) | 4585 | if (!intel_dp_get_link_status(intel_dp, link_status)) |
4379 | return false; | 4586 | return false; |
4380 | 4587 | ||
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 901e15063b24..d513ca875c67 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c | |||
@@ -2523,7 +2523,8 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, | |||
2523 | 2523 | ||
2524 | if (intel_port_is_tc(dev_priv, encoder->port)) | 2524 | if (intel_port_is_tc(dev_priv, encoder->port)) |
2525 | ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params); | 2525 | ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params); |
2526 | else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) | 2526 | else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || |
2527 | intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) | ||
2527 | ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params); | 2528 | ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params); |
2528 | else | 2529 | else |
2529 | ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params); | 2530 | ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index a7d9ac912125..f94a04b4ad87 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -706,6 +706,8 @@ struct intel_crtc_wm_state { | |||
706 | /* gen9+ only needs 1-step wm programming */ | 706 | /* gen9+ only needs 1-step wm programming */ |
707 | struct skl_pipe_wm optimal; | 707 | struct skl_pipe_wm optimal; |
708 | struct skl_ddb_entry ddb; | 708 | struct skl_ddb_entry ddb; |
709 | struct skl_ddb_entry plane_ddb_y[I915_MAX_PLANES]; | ||
710 | struct skl_ddb_entry plane_ddb_uv[I915_MAX_PLANES]; | ||
709 | } skl; | 711 | } skl; |
710 | 712 | ||
711 | struct { | 713 | struct { |
@@ -926,6 +928,9 @@ struct intel_crtc_state { | |||
926 | u8 active_planes; | 928 | u8 active_planes; |
927 | u8 nv12_planes; | 929 | u8 nv12_planes; |
928 | 930 | ||
931 | /* bitmask of planes that will be updated during the commit */ | ||
932 | u8 update_planes; | ||
933 | |||
929 | /* HDMI scrambling status */ | 934 | /* HDMI scrambling status */ |
930 | bool hdmi_scrambling; | 935 | bool hdmi_scrambling; |
931 | 936 | ||
@@ -937,6 +942,18 @@ struct intel_crtc_state { | |||
937 | 942 | ||
938 | /* Output down scaling is done in LSPCON device */ | 943 | /* Output down scaling is done in LSPCON device */ |
939 | bool lspcon_downsampling; | 944 | bool lspcon_downsampling; |
945 | |||
946 | /* Display Stream compression state */ | ||
947 | struct { | ||
948 | bool compression_enable; | ||
949 | bool dsc_split; | ||
950 | u16 compressed_bpp; | ||
951 | u8 slice_count; | ||
952 | } dsc_params; | ||
953 | struct drm_dsc_config dp_dsc_cfg; | ||
954 | |||
955 | /* Forward Error correction State */ | ||
956 | bool fec_enable; | ||
940 | }; | 957 | }; |
941 | 958 | ||
942 | struct intel_crtc { | 959 | struct intel_crtc { |
@@ -1013,7 +1030,7 @@ struct intel_plane { | |||
1013 | const struct intel_crtc_state *crtc_state, | 1030 | const struct intel_crtc_state *crtc_state, |
1014 | const struct intel_plane_state *plane_state); | 1031 | const struct intel_plane_state *plane_state); |
1015 | void (*disable_plane)(struct intel_plane *plane, | 1032 | void (*disable_plane)(struct intel_plane *plane, |
1016 | struct intel_crtc *crtc); | 1033 | const struct intel_crtc_state *crtc_state); |
1017 | bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); | 1034 | bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); |
1018 | int (*check_plane)(struct intel_crtc_state *crtc_state, | 1035 | int (*check_plane)(struct intel_crtc_state *crtc_state, |
1019 | struct intel_plane_state *plane_state); | 1036 | struct intel_plane_state *plane_state); |
@@ -1517,13 +1534,9 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, | |||
1517 | u8 voltage_swing); | 1534 | u8 voltage_swing); |
1518 | int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, | 1535 | int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, |
1519 | bool enable); | 1536 | bool enable); |
1520 | void icl_map_plls_to_ports(struct drm_crtc *crtc, | ||
1521 | struct intel_crtc_state *crtc_state, | ||
1522 | struct drm_atomic_state *old_state); | ||
1523 | void icl_unmap_plls_to_ports(struct drm_crtc *crtc, | ||
1524 | struct intel_crtc_state *crtc_state, | ||
1525 | struct drm_atomic_state *old_state); | ||
1526 | void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder); | 1537 | void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder); |
1538 | int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, | ||
1539 | enum intel_dpll_id pll_id); | ||
1527 | 1540 | ||
1528 | unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, | 1541 | unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, |
1529 | int color_plane, unsigned int height); | 1542 | int color_plane, unsigned int height); |
@@ -1788,6 +1801,9 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp); | |||
1788 | int intel_dp_retrain_link(struct intel_encoder *encoder, | 1801 | int intel_dp_retrain_link(struct intel_encoder *encoder, |
1789 | struct drm_modeset_acquire_ctx *ctx); | 1802 | struct drm_modeset_acquire_ctx *ctx); |
1790 | void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); | 1803 | void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); |
1804 | void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, | ||
1805 | const struct intel_crtc_state *crtc_state, | ||
1806 | bool enable); | ||
1791 | void intel_dp_encoder_reset(struct drm_encoder *encoder); | 1807 | void intel_dp_encoder_reset(struct drm_encoder *encoder); |
1792 | void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); | 1808 | void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); |
1793 | void intel_dp_encoder_destroy(struct drm_encoder *encoder); | 1809 | void intel_dp_encoder_destroy(struct drm_encoder *encoder); |
@@ -1843,6 +1859,12 @@ uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count, | |||
1843 | uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock, | 1859 | uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock, |
1844 | int mode_hdisplay); | 1860 | int mode_hdisplay); |
1845 | 1861 | ||
1862 | /* intel_vdsc.c */ | ||
1863 | int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, | ||
1864 | struct intel_crtc_state *pipe_config); | ||
1865 | enum intel_display_power_domain | ||
1866 | intel_dsc_power_domain(const struct intel_crtc_state *crtc_state); | ||
1867 | |||
1846 | static inline unsigned int intel_dp_unused_lane_mask(int lane_count) | 1868 | static inline unsigned int intel_dp_unused_lane_mask(int lane_count) |
1847 | { | 1869 | { |
1848 | return ~((1 << lane_count) - 1) & 0xf; | 1870 | return ~((1 << lane_count) - 1) & 0xf; |
@@ -2047,6 +2069,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir); | |||
2047 | void intel_psr_short_pulse(struct intel_dp *intel_dp); | 2069 | void intel_psr_short_pulse(struct intel_dp *intel_dp); |
2048 | int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, | 2070 | int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, |
2049 | u32 *out_value); | 2071 | u32 *out_value); |
2072 | bool intel_psr_enabled(struct intel_dp *intel_dp); | ||
2050 | 2073 | ||
2051 | /* intel_quirks.c */ | 2074 | /* intel_quirks.c */ |
2052 | void intel_init_quirks(struct drm_i915_private *dev_priv); | 2075 | void intel_init_quirks(struct drm_i915_private *dev_priv); |
@@ -2181,6 +2204,9 @@ void g4x_wm_get_hw_state(struct drm_device *dev); | |||
2181 | void vlv_wm_get_hw_state(struct drm_device *dev); | 2204 | void vlv_wm_get_hw_state(struct drm_device *dev); |
2182 | void ilk_wm_get_hw_state(struct drm_device *dev); | 2205 | void ilk_wm_get_hw_state(struct drm_device *dev); |
2183 | void skl_wm_get_hw_state(struct drm_device *dev); | 2206 | void skl_wm_get_hw_state(struct drm_device *dev); |
2207 | void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, | ||
2208 | struct skl_ddb_entry *ddb_y, | ||
2209 | struct skl_ddb_entry *ddb_uv); | ||
2184 | void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, | 2210 | void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, |
2185 | struct skl_ddb_allocation *ddb /* out */); | 2211 | struct skl_ddb_allocation *ddb /* out */); |
2186 | void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc, | 2212 | void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc, |
@@ -2195,6 +2221,10 @@ bool skl_wm_level_equals(const struct skl_wm_level *l1, | |||
2195 | bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, | 2221 | bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, |
2196 | const struct skl_ddb_entry entries[], | 2222 | const struct skl_ddb_entry entries[], |
2197 | int num_entries, int ignore_idx); | 2223 | int num_entries, int ignore_idx); |
2224 | void skl_write_plane_wm(struct intel_plane *plane, | ||
2225 | const struct intel_crtc_state *crtc_state); | ||
2226 | void skl_write_cursor_wm(struct intel_plane *plane, | ||
2227 | const struct intel_crtc_state *crtc_state); | ||
2198 | bool ilk_disable_lp_wm(struct drm_device *dev); | 2228 | bool ilk_disable_lp_wm(struct drm_device *dev); |
2199 | int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, | 2229 | int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, |
2200 | struct intel_crtc_state *cstate); | 2230 | struct intel_crtc_state *cstate); |
@@ -2287,10 +2317,10 @@ struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane); | |||
2287 | void intel_plane_destroy_state(struct drm_plane *plane, | 2317 | void intel_plane_destroy_state(struct drm_plane *plane, |
2288 | struct drm_plane_state *state); | 2318 | struct drm_plane_state *state); |
2289 | extern const struct drm_plane_helper_funcs intel_plane_helper_funcs; | 2319 | extern const struct drm_plane_helper_funcs intel_plane_helper_funcs; |
2290 | void intel_update_planes_on_crtc(struct intel_atomic_state *old_state, | 2320 | void skl_update_planes_on_crtc(struct intel_atomic_state *state, |
2291 | struct intel_crtc *crtc, | 2321 | struct intel_crtc *crtc); |
2292 | struct intel_crtc_state *old_crtc_state, | 2322 | void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, |
2293 | struct intel_crtc_state *new_crtc_state); | 2323 | struct intel_crtc *crtc); |
2294 | int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, | 2324 | int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, |
2295 | struct intel_crtc_state *crtc_state, | 2325 | struct intel_crtc_state *crtc_state, |
2296 | const struct intel_plane_state *old_plane_state, | 2326 | const struct intel_plane_state *old_plane_state, |
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index ee93137f4433..d968f1f13e09 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h | |||
@@ -146,6 +146,11 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi) | |||
146 | return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE; | 146 | return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE; |
147 | } | 147 | } |
148 | 148 | ||
149 | static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder) | ||
150 | { | ||
151 | return enc_to_intel_dsi(&encoder->base)->ports; | ||
152 | } | ||
153 | |||
149 | /* intel_dsi.c */ | 154 | /* intel_dsi.c */ |
150 | int intel_dsi_bitrate(const struct intel_dsi *intel_dsi); | 155 | int intel_dsi_bitrate(const struct intel_dsi *intel_dsi); |
151 | int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi); | 156 | int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi); |
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c index a72de81f4832..a1a8b3790e61 100644 --- a/drivers/gpu/drm/i915/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c | |||
@@ -103,6 +103,18 @@ static struct gpio_map vlv_gpio_table[] = { | |||
103 | #define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4) | 103 | #define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4) |
104 | #define CHV_GPIO_CFGLOCK (1 << 31) | 104 | #define CHV_GPIO_CFGLOCK (1 << 31) |
105 | 105 | ||
106 | /* ICL DSI Display GPIO Pins */ | ||
107 | #define ICL_GPIO_DDSP_HPD_A 0 | ||
108 | #define ICL_GPIO_L_VDDEN_1 1 | ||
109 | #define ICL_GPIO_L_BKLTEN_1 2 | ||
110 | #define ICL_GPIO_DDPA_CTRLCLK_1 3 | ||
111 | #define ICL_GPIO_DDPA_CTRLDATA_1 4 | ||
112 | #define ICL_GPIO_DDSP_HPD_B 5 | ||
113 | #define ICL_GPIO_L_VDDEN_2 6 | ||
114 | #define ICL_GPIO_L_BKLTEN_2 7 | ||
115 | #define ICL_GPIO_DDPA_CTRLCLK_2 8 | ||
116 | #define ICL_GPIO_DDPA_CTRLDATA_2 9 | ||
117 | |||
106 | static inline enum port intel_dsi_seq_port_to_port(u8 port) | 118 | static inline enum port intel_dsi_seq_port_to_port(u8 port) |
107 | { | 119 | { |
108 | return port ? PORT_C : PORT_A; | 120 | return port ? PORT_C : PORT_A; |
@@ -324,6 +336,12 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv, | |||
324 | gpiod_set_value(gpio_desc, value); | 336 | gpiod_set_value(gpio_desc, value); |
325 | } | 337 | } |
326 | 338 | ||
339 | static void icl_exec_gpio(struct drm_i915_private *dev_priv, | ||
340 | u8 gpio_source, u8 gpio_index, bool value) | ||
341 | { | ||
342 | DRM_DEBUG_KMS("Skipping ICL GPIO element execution\n"); | ||
343 | } | ||
344 | |||
327 | static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) | 345 | static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) |
328 | { | 346 | { |
329 | struct drm_device *dev = intel_dsi->base.base.dev; | 347 | struct drm_device *dev = intel_dsi->base.base.dev; |
@@ -347,7 +365,9 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) | |||
347 | /* pull up/down */ | 365 | /* pull up/down */ |
348 | value = *data++ & 1; | 366 | value = *data++ & 1; |
349 | 367 | ||
350 | if (IS_VALLEYVIEW(dev_priv)) | 368 | if (IS_ICELAKE(dev_priv)) |
369 | icl_exec_gpio(dev_priv, gpio_source, gpio_index, value); | ||
370 | else if (IS_VALLEYVIEW(dev_priv)) | ||
351 | vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value); | 371 | vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value); |
352 | else if (IS_CHERRYVIEW(dev_priv)) | 372 | else if (IS_CHERRYVIEW(dev_priv)) |
353 | chv_exec_gpio(dev_priv, gpio_source, gpio_number, value); | 373 | chv_exec_gpio(dev_priv, gpio_source, gpio_number, value); |
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 759c0fd58f8c..af2873403009 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c | |||
@@ -493,46 +493,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine) | |||
493 | intel_engine_init_cmd_parser(engine); | 493 | intel_engine_init_cmd_parser(engine); |
494 | } | 494 | } |
495 | 495 | ||
496 | int intel_engine_create_scratch(struct intel_engine_cs *engine, | ||
497 | unsigned int size) | ||
498 | { | ||
499 | struct drm_i915_gem_object *obj; | ||
500 | struct i915_vma *vma; | ||
501 | int ret; | ||
502 | |||
503 | WARN_ON(engine->scratch); | ||
504 | |||
505 | obj = i915_gem_object_create_stolen(engine->i915, size); | ||
506 | if (!obj) | ||
507 | obj = i915_gem_object_create_internal(engine->i915, size); | ||
508 | if (IS_ERR(obj)) { | ||
509 | DRM_ERROR("Failed to allocate scratch page\n"); | ||
510 | return PTR_ERR(obj); | ||
511 | } | ||
512 | |||
513 | vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL); | ||
514 | if (IS_ERR(vma)) { | ||
515 | ret = PTR_ERR(vma); | ||
516 | goto err_unref; | ||
517 | } | ||
518 | |||
519 | ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); | ||
520 | if (ret) | ||
521 | goto err_unref; | ||
522 | |||
523 | engine->scratch = vma; | ||
524 | return 0; | ||
525 | |||
526 | err_unref: | ||
527 | i915_gem_object_put(obj); | ||
528 | return ret; | ||
529 | } | ||
530 | |||
531 | void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) | ||
532 | { | ||
533 | i915_vma_unpin_and_release(&engine->scratch, 0); | ||
534 | } | ||
535 | |||
536 | static void cleanup_status_page(struct intel_engine_cs *engine) | 496 | static void cleanup_status_page(struct intel_engine_cs *engine) |
537 | { | 497 | { |
538 | if (HWS_NEEDS_PHYSICAL(engine->i915)) { | 498 | if (HWS_NEEDS_PHYSICAL(engine->i915)) { |
@@ -707,8 +667,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) | |||
707 | { | 667 | { |
708 | struct drm_i915_private *i915 = engine->i915; | 668 | struct drm_i915_private *i915 = engine->i915; |
709 | 669 | ||
710 | intel_engine_cleanup_scratch(engine); | ||
711 | |||
712 | cleanup_status_page(engine); | 670 | cleanup_status_page(engine); |
713 | 671 | ||
714 | intel_engine_fini_breadcrumbs(engine); | 672 | intel_engine_fini_breadcrumbs(engine); |
@@ -723,6 +681,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) | |||
723 | __intel_context_unpin(i915->kernel_context, engine); | 681 | __intel_context_unpin(i915->kernel_context, engine); |
724 | 682 | ||
725 | i915_timeline_fini(&engine->timeline); | 683 | i915_timeline_fini(&engine->timeline); |
684 | |||
685 | intel_wa_list_free(&engine->ctx_wa_list); | ||
686 | intel_wa_list_free(&engine->wa_list); | ||
687 | intel_wa_list_free(&engine->whitelist); | ||
726 | } | 688 | } |
727 | 689 | ||
728 | u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) | 690 | u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) |
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 14cbaf4a0e93..f23570c44323 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c | |||
@@ -1309,7 +1309,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) | |||
1309 | fbc->active = false; | 1309 | fbc->active = false; |
1310 | 1310 | ||
1311 | if (need_fbc_vtd_wa(dev_priv)) | 1311 | if (need_fbc_vtd_wa(dev_priv)) |
1312 | mkwrite_device_info(dev_priv)->has_fbc = false; | 1312 | mkwrite_device_info(dev_priv)->display.has_fbc = false; |
1313 | 1313 | ||
1314 | i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv); | 1314 | i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv); |
1315 | DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", | 1315 | DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 2480c7d6edee..fb5bb5b32a60 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -672,7 +672,7 @@ int intel_fbdev_init(struct drm_device *dev) | |||
672 | struct intel_fbdev *ifbdev; | 672 | struct intel_fbdev *ifbdev; |
673 | int ret; | 673 | int ret; |
674 | 674 | ||
675 | if (WARN_ON(INTEL_INFO(dev_priv)->num_pipes == 0)) | 675 | if (WARN_ON(!HAS_DISPLAY(dev_priv))) |
676 | return -ENODEV; | 676 | return -ENODEV; |
677 | 677 | ||
678 | ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); | 678 | ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index e2c6a2b3e8f2..07e803a604bd 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -115,6 +115,8 @@ static u32 hsw_infoframe_enable(unsigned int type) | |||
115 | switch (type) { | 115 | switch (type) { |
116 | case DP_SDP_VSC: | 116 | case DP_SDP_VSC: |
117 | return VIDEO_DIP_ENABLE_VSC_HSW; | 117 | return VIDEO_DIP_ENABLE_VSC_HSW; |
118 | case DP_SDP_PPS: | ||
119 | return VDIP_ENABLE_PPS; | ||
118 | case HDMI_INFOFRAME_TYPE_AVI: | 120 | case HDMI_INFOFRAME_TYPE_AVI: |
119 | return VIDEO_DIP_ENABLE_AVI_HSW; | 121 | return VIDEO_DIP_ENABLE_AVI_HSW; |
120 | case HDMI_INFOFRAME_TYPE_SPD: | 122 | case HDMI_INFOFRAME_TYPE_SPD: |
@@ -136,6 +138,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv, | |||
136 | switch (type) { | 138 | switch (type) { |
137 | case DP_SDP_VSC: | 139 | case DP_SDP_VSC: |
138 | return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i); | 140 | return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i); |
141 | case DP_SDP_PPS: | ||
142 | return ICL_VIDEO_DIP_PPS_DATA(cpu_transcoder, i); | ||
139 | case HDMI_INFOFRAME_TYPE_AVI: | 143 | case HDMI_INFOFRAME_TYPE_AVI: |
140 | return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i); | 144 | return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i); |
141 | case HDMI_INFOFRAME_TYPE_SPD: | 145 | case HDMI_INFOFRAME_TYPE_SPD: |
@@ -148,6 +152,18 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv, | |||
148 | } | 152 | } |
149 | } | 153 | } |
150 | 154 | ||
155 | static int hsw_dip_data_size(unsigned int type) | ||
156 | { | ||
157 | switch (type) { | ||
158 | case DP_SDP_VSC: | ||
159 | return VIDEO_DIP_VSC_DATA_SIZE; | ||
160 | case DP_SDP_PPS: | ||
161 | return VIDEO_DIP_PPS_DATA_SIZE; | ||
162 | default: | ||
163 | return VIDEO_DIP_DATA_SIZE; | ||
164 | } | ||
165 | } | ||
166 | |||
151 | static void g4x_write_infoframe(struct intel_encoder *encoder, | 167 | static void g4x_write_infoframe(struct intel_encoder *encoder, |
152 | const struct intel_crtc_state *crtc_state, | 168 | const struct intel_crtc_state *crtc_state, |
153 | unsigned int type, | 169 | unsigned int type, |
@@ -382,11 +398,12 @@ static void hsw_write_infoframe(struct intel_encoder *encoder, | |||
382 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 398 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
383 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; | 399 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
384 | i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); | 400 | i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); |
385 | int data_size = type == DP_SDP_VSC ? | 401 | int data_size; |
386 | VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE; | ||
387 | int i; | 402 | int i; |
388 | u32 val = I915_READ(ctl_reg); | 403 | u32 val = I915_READ(ctl_reg); |
389 | 404 | ||
405 | data_size = hsw_dip_data_size(type); | ||
406 | |||
390 | val &= ~hsw_infoframe_enable(type); | 407 | val &= ~hsw_infoframe_enable(type); |
391 | I915_WRITE(ctl_reg, val); | 408 | I915_WRITE(ctl_reg, val); |
392 | 409 | ||
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 33d87ab93fdd..802d0394ccc4 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -817,7 +817,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv) | |||
817 | unsigned int pin; | 817 | unsigned int pin; |
818 | int ret; | 818 | int ret; |
819 | 819 | ||
820 | if (INTEL_INFO(dev_priv)->num_pipes == 0) | 820 | if (!HAS_DISPLAY(dev_priv)) |
821 | return 0; | 821 | return 0; |
822 | 822 | ||
823 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | 823 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 08fd9b12e4d7..d7fa301b5ec7 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -767,6 +767,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) | |||
767 | 767 | ||
768 | static void reset_csb_pointers(struct intel_engine_execlists *execlists) | 768 | static void reset_csb_pointers(struct intel_engine_execlists *execlists) |
769 | { | 769 | { |
770 | const unsigned int reset_value = GEN8_CSB_ENTRIES - 1; | ||
771 | |||
770 | /* | 772 | /* |
771 | * After a reset, the HW starts writing into CSB entry [0]. We | 773 | * After a reset, the HW starts writing into CSB entry [0]. We |
772 | * therefore have to set our HEAD pointer back one entry so that | 774 | * therefore have to set our HEAD pointer back one entry so that |
@@ -776,8 +778,8 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists) | |||
776 | * inline comparison of our cached head position against the last HW | 778 | * inline comparison of our cached head position against the last HW |
777 | * write works even before the first interrupt. | 779 | * write works even before the first interrupt. |
778 | */ | 780 | */ |
779 | execlists->csb_head = execlists->csb_write_reset; | 781 | execlists->csb_head = reset_value; |
780 | WRITE_ONCE(*execlists->csb_write, execlists->csb_write_reset); | 782 | WRITE_ONCE(*execlists->csb_write, reset_value); |
781 | } | 783 | } |
782 | 784 | ||
783 | static void nop_submission_tasklet(unsigned long data) | 785 | static void nop_submission_tasklet(unsigned long data) |
@@ -818,8 +820,11 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) | |||
818 | /* Mark all executing requests as skipped. */ | 820 | /* Mark all executing requests as skipped. */ |
819 | list_for_each_entry(rq, &engine->timeline.requests, link) { | 821 | list_for_each_entry(rq, &engine->timeline.requests, link) { |
820 | GEM_BUG_ON(!rq->global_seqno); | 822 | GEM_BUG_ON(!rq->global_seqno); |
821 | if (!i915_request_completed(rq)) | 823 | |
822 | dma_fence_set_error(&rq->fence, -EIO); | 824 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) |
825 | continue; | ||
826 | |||
827 | dma_fence_set_error(&rq->fence, -EIO); | ||
823 | } | 828 | } |
824 | 829 | ||
825 | /* Flush the queued requests to the timeline list (for retiring). */ | 830 | /* Flush the queued requests to the timeline list (for retiring). */ |
@@ -839,6 +844,10 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) | |||
839 | kmem_cache_free(engine->i915->priorities, p); | 844 | kmem_cache_free(engine->i915->priorities, p); |
840 | } | 845 | } |
841 | 846 | ||
847 | intel_write_status_page(engine, | ||
848 | I915_GEM_HWS_INDEX, | ||
849 | intel_engine_last_submit(engine)); | ||
850 | |||
842 | /* Remaining _unready_ requests will be nop'ed when submitted */ | 851 | /* Remaining _unready_ requests will be nop'ed when submitted */ |
843 | 852 | ||
844 | execlists->queue_priority = INT_MIN; | 853 | execlists->queue_priority = INT_MIN; |
@@ -1279,9 +1288,10 @@ static int execlists_request_alloc(struct i915_request *request) | |||
1279 | static u32 * | 1288 | static u32 * |
1280 | gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) | 1289 | gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) |
1281 | { | 1290 | { |
1291 | /* NB no one else is allowed to scribble over scratch + 256! */ | ||
1282 | *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; | 1292 | *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; |
1283 | *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); | 1293 | *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); |
1284 | *batch++ = i915_ggtt_offset(engine->scratch) + 256; | 1294 | *batch++ = i915_scratch_offset(engine->i915) + 256; |
1285 | *batch++ = 0; | 1295 | *batch++ = 0; |
1286 | 1296 | ||
1287 | *batch++ = MI_LOAD_REGISTER_IMM(1); | 1297 | *batch++ = MI_LOAD_REGISTER_IMM(1); |
@@ -1295,7 +1305,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) | |||
1295 | 1305 | ||
1296 | *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; | 1306 | *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; |
1297 | *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); | 1307 | *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); |
1298 | *batch++ = i915_ggtt_offset(engine->scratch) + 256; | 1308 | *batch++ = i915_scratch_offset(engine->i915) + 256; |
1299 | *batch++ = 0; | 1309 | *batch++ = 0; |
1300 | 1310 | ||
1301 | return batch; | 1311 | return batch; |
@@ -1332,7 +1342,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) | |||
1332 | PIPE_CONTROL_GLOBAL_GTT_IVB | | 1342 | PIPE_CONTROL_GLOBAL_GTT_IVB | |
1333 | PIPE_CONTROL_CS_STALL | | 1343 | PIPE_CONTROL_CS_STALL | |
1334 | PIPE_CONTROL_QW_WRITE, | 1344 | PIPE_CONTROL_QW_WRITE, |
1335 | i915_ggtt_offset(engine->scratch) + | 1345 | i915_scratch_offset(engine->i915) + |
1336 | 2 * CACHELINE_BYTES); | 1346 | 2 * CACHELINE_BYTES); |
1337 | 1347 | ||
1338 | *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; | 1348 | *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; |
@@ -1401,18 +1411,6 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) | |||
1401 | 1411 | ||
1402 | batch = emit_lri(batch, lri, ARRAY_SIZE(lri)); | 1412 | batch = emit_lri(batch, lri, ARRAY_SIZE(lri)); |
1403 | 1413 | ||
1404 | /* WaClearSlmSpaceAtContextSwitch:kbl */ | ||
1405 | /* Actual scratch location is at 128 bytes offset */ | ||
1406 | if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) { | ||
1407 | batch = gen8_emit_pipe_control(batch, | ||
1408 | PIPE_CONTROL_FLUSH_L3 | | ||
1409 | PIPE_CONTROL_GLOBAL_GTT_IVB | | ||
1410 | PIPE_CONTROL_CS_STALL | | ||
1411 | PIPE_CONTROL_QW_WRITE, | ||
1412 | i915_ggtt_offset(engine->scratch) | ||
1413 | + 2 * CACHELINE_BYTES); | ||
1414 | } | ||
1415 | |||
1416 | /* WaMediaPoolStateCmdInWABB:bxt,glk */ | 1414 | /* WaMediaPoolStateCmdInWABB:bxt,glk */ |
1417 | if (HAS_POOLED_EU(engine->i915)) { | 1415 | if (HAS_POOLED_EU(engine->i915)) { |
1418 | /* | 1416 | /* |
@@ -1629,6 +1627,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine) | |||
1629 | 1627 | ||
1630 | static int gen8_init_common_ring(struct intel_engine_cs *engine) | 1628 | static int gen8_init_common_ring(struct intel_engine_cs *engine) |
1631 | { | 1629 | { |
1630 | intel_engine_apply_workarounds(engine); | ||
1631 | |||
1632 | intel_mocs_init_engine(engine); | 1632 | intel_mocs_init_engine(engine); |
1633 | 1633 | ||
1634 | intel_engine_reset_breadcrumbs(engine); | 1634 | intel_engine_reset_breadcrumbs(engine); |
@@ -1653,7 +1653,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *engine) | |||
1653 | if (ret) | 1653 | if (ret) |
1654 | return ret; | 1654 | return ret; |
1655 | 1655 | ||
1656 | intel_whitelist_workarounds_apply(engine); | 1656 | intel_engine_apply_whitelist(engine); |
1657 | 1657 | ||
1658 | /* We need to disable the AsyncFlip performance optimisations in order | 1658 | /* We need to disable the AsyncFlip performance optimisations in order |
1659 | * to use MI_WAIT_FOR_EVENT within the CS. It should already be | 1659 | * to use MI_WAIT_FOR_EVENT within the CS. It should already be |
@@ -1676,7 +1676,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine) | |||
1676 | if (ret) | 1676 | if (ret) |
1677 | return ret; | 1677 | return ret; |
1678 | 1678 | ||
1679 | intel_whitelist_workarounds_apply(engine); | 1679 | intel_engine_apply_whitelist(engine); |
1680 | 1680 | ||
1681 | return 0; | 1681 | return 0; |
1682 | } | 1682 | } |
@@ -1974,7 +1974,7 @@ static int gen8_emit_flush_render(struct i915_request *request, | |||
1974 | { | 1974 | { |
1975 | struct intel_engine_cs *engine = request->engine; | 1975 | struct intel_engine_cs *engine = request->engine; |
1976 | u32 scratch_addr = | 1976 | u32 scratch_addr = |
1977 | i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; | 1977 | i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES; |
1978 | bool vf_flush_wa = false, dc_flush_wa = false; | 1978 | bool vf_flush_wa = false, dc_flush_wa = false; |
1979 | u32 *cs, flags = 0; | 1979 | u32 *cs, flags = 0; |
1980 | int len; | 1980 | int len; |
@@ -2088,7 +2088,7 @@ static int gen8_init_rcs_context(struct i915_request *rq) | |||
2088 | { | 2088 | { |
2089 | int ret; | 2089 | int ret; |
2090 | 2090 | ||
2091 | ret = intel_ctx_workarounds_emit(rq); | 2091 | ret = intel_engine_emit_ctx_wa(rq); |
2092 | if (ret) | 2092 | if (ret) |
2093 | return ret; | 2093 | return ret; |
2094 | 2094 | ||
@@ -2229,12 +2229,6 @@ logical_ring_setup(struct intel_engine_cs *engine) | |||
2229 | logical_ring_default_irqs(engine); | 2229 | logical_ring_default_irqs(engine); |
2230 | } | 2230 | } |
2231 | 2231 | ||
2232 | static bool csb_force_mmio(struct drm_i915_private *i915) | ||
2233 | { | ||
2234 | /* Older GVT emulation depends upon intercepting CSB mmio */ | ||
2235 | return intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915); | ||
2236 | } | ||
2237 | |||
2238 | static int logical_ring_init(struct intel_engine_cs *engine) | 2232 | static int logical_ring_init(struct intel_engine_cs *engine) |
2239 | { | 2233 | { |
2240 | struct drm_i915_private *i915 = engine->i915; | 2234 | struct drm_i915_private *i915 = engine->i915; |
@@ -2264,24 +2258,12 @@ static int logical_ring_init(struct intel_engine_cs *engine) | |||
2264 | upper_32_bits(ce->lrc_desc); | 2258 | upper_32_bits(ce->lrc_desc); |
2265 | } | 2259 | } |
2266 | 2260 | ||
2267 | execlists->csb_read = | 2261 | execlists->csb_status = |
2268 | i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)); | 2262 | &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; |
2269 | if (csb_force_mmio(i915)) { | ||
2270 | execlists->csb_status = (u32 __force *) | ||
2271 | (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0))); | ||
2272 | 2263 | ||
2273 | execlists->csb_write = (u32 __force *)execlists->csb_read; | 2264 | execlists->csb_write = |
2274 | execlists->csb_write_reset = | 2265 | &engine->status_page.page_addr[intel_hws_csb_write_index(i915)]; |
2275 | _MASKED_FIELD(GEN8_CSB_WRITE_PTR_MASK, | ||
2276 | GEN8_CSB_ENTRIES - 1); | ||
2277 | } else { | ||
2278 | execlists->csb_status = | ||
2279 | &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; | ||
2280 | 2266 | ||
2281 | execlists->csb_write = | ||
2282 | &engine->status_page.page_addr[intel_hws_csb_write_index(i915)]; | ||
2283 | execlists->csb_write_reset = GEN8_CSB_ENTRIES - 1; | ||
2284 | } | ||
2285 | reset_csb_pointers(execlists); | 2267 | reset_csb_pointers(execlists); |
2286 | 2268 | ||
2287 | return 0; | 2269 | return 0; |
@@ -2311,10 +2293,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine) | |||
2311 | if (ret) | 2293 | if (ret) |
2312 | return ret; | 2294 | return ret; |
2313 | 2295 | ||
2314 | ret = intel_engine_create_scratch(engine, PAGE_SIZE); | ||
2315 | if (ret) | ||
2316 | goto err_cleanup_common; | ||
2317 | |||
2318 | ret = intel_init_workaround_bb(engine); | 2296 | ret = intel_init_workaround_bb(engine); |
2319 | if (ret) { | 2297 | if (ret) { |
2320 | /* | 2298 | /* |
@@ -2326,11 +2304,10 @@ int logical_render_ring_init(struct intel_engine_cs *engine) | |||
2326 | ret); | 2304 | ret); |
2327 | } | 2305 | } |
2328 | 2306 | ||
2329 | return 0; | 2307 | intel_engine_init_whitelist(engine); |
2308 | intel_engine_init_workarounds(engine); | ||
2330 | 2309 | ||
2331 | err_cleanup_common: | 2310 | return 0; |
2332 | intel_engine_cleanup_common(engine); | ||
2333 | return ret; | ||
2334 | } | 2311 | } |
2335 | 2312 | ||
2336 | int logical_xcs_ring_init(struct intel_engine_cs *engine) | 2313 | int logical_xcs_ring_init(struct intel_engine_cs *engine) |
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h index d84b6d2d2fae..4aa68ffbd30e 100644 --- a/drivers/gpu/drm/i915/intel_opregion.h +++ b/drivers/gpu/drm/i915/intel_opregion.h | |||
@@ -87,12 +87,12 @@ static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) | |||
87 | { | 87 | { |
88 | } | 88 | } |
89 | 89 | ||
90 | void intel_opregion_resume(struct drm_i915_private *dev_priv) | 90 | static inline void intel_opregion_resume(struct drm_i915_private *dev_priv) |
91 | { | 91 | { |
92 | } | 92 | } |
93 | 93 | ||
94 | void intel_opregion_suspend(struct drm_i915_private *dev_priv, | 94 | static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv, |
95 | pci_power_t state) | 95 | pci_power_t state) |
96 | { | 96 | { |
97 | } | 97 | } |
98 | 98 | ||
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 897a791662c5..a26b4eddda25 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -3951,68 +3951,68 @@ static void | |||
3951 | skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, | 3951 | skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, |
3952 | const enum pipe pipe, | 3952 | const enum pipe pipe, |
3953 | const enum plane_id plane_id, | 3953 | const enum plane_id plane_id, |
3954 | struct skl_ddb_allocation *ddb /* out */) | 3954 | struct skl_ddb_entry *ddb_y, |
3955 | struct skl_ddb_entry *ddb_uv) | ||
3955 | { | 3956 | { |
3956 | u32 val, val2 = 0; | 3957 | u32 val, val2; |
3957 | int fourcc, pixel_format; | 3958 | u32 fourcc = 0; |
3958 | 3959 | ||
3959 | /* Cursor doesn't support NV12/planar, so no extra calculation needed */ | 3960 | /* Cursor doesn't support NV12/planar, so no extra calculation needed */ |
3960 | if (plane_id == PLANE_CURSOR) { | 3961 | if (plane_id == PLANE_CURSOR) { |
3961 | val = I915_READ(CUR_BUF_CFG(pipe)); | 3962 | val = I915_READ(CUR_BUF_CFG(pipe)); |
3962 | skl_ddb_entry_init_from_hw(dev_priv, | 3963 | skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); |
3963 | &ddb->plane[pipe][plane_id], val); | ||
3964 | return; | 3964 | return; |
3965 | } | 3965 | } |
3966 | 3966 | ||
3967 | val = I915_READ(PLANE_CTL(pipe, plane_id)); | 3967 | val = I915_READ(PLANE_CTL(pipe, plane_id)); |
3968 | 3968 | ||
3969 | /* No DDB allocated for disabled planes */ | 3969 | /* No DDB allocated for disabled planes */ |
3970 | if (!(val & PLANE_CTL_ENABLE)) | 3970 | if (val & PLANE_CTL_ENABLE) |
3971 | return; | 3971 | fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK, |
3972 | 3972 | val & PLANE_CTL_ORDER_RGBX, | |
3973 | pixel_format = val & PLANE_CTL_FORMAT_MASK; | 3973 | val & PLANE_CTL_ALPHA_MASK); |
3974 | fourcc = skl_format_to_fourcc(pixel_format, | ||
3975 | val & PLANE_CTL_ORDER_RGBX, | ||
3976 | val & PLANE_CTL_ALPHA_MASK); | ||
3977 | 3974 | ||
3978 | val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); | 3975 | if (INTEL_GEN(dev_priv) >= 11) { |
3979 | if (fourcc == DRM_FORMAT_NV12 && INTEL_GEN(dev_priv) < 11) { | 3976 | val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); |
3977 | skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); | ||
3978 | } else { | ||
3979 | val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); | ||
3980 | val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); | 3980 | val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); |
3981 | 3981 | ||
3982 | skl_ddb_entry_init_from_hw(dev_priv, | 3982 | if (fourcc == DRM_FORMAT_NV12) |
3983 | &ddb->plane[pipe][plane_id], val2); | 3983 | swap(val, val2); |
3984 | skl_ddb_entry_init_from_hw(dev_priv, | 3984 | |
3985 | &ddb->uv_plane[pipe][plane_id], val); | 3985 | skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); |
3986 | } else { | 3986 | skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2); |
3987 | skl_ddb_entry_init_from_hw(dev_priv, | ||
3988 | &ddb->plane[pipe][plane_id], val); | ||
3989 | } | 3987 | } |
3990 | } | 3988 | } |
3991 | 3989 | ||
3992 | void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, | 3990 | void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, |
3993 | struct skl_ddb_allocation *ddb /* out */) | 3991 | struct skl_ddb_entry *ddb_y, |
3992 | struct skl_ddb_entry *ddb_uv) | ||
3994 | { | 3993 | { |
3995 | struct intel_crtc *crtc; | 3994 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
3996 | 3995 | enum intel_display_power_domain power_domain; | |
3997 | memset(ddb, 0, sizeof(*ddb)); | 3996 | enum pipe pipe = crtc->pipe; |
3998 | 3997 | enum plane_id plane_id; | |
3999 | ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv); | ||
4000 | 3998 | ||
4001 | for_each_intel_crtc(&dev_priv->drm, crtc) { | 3999 | power_domain = POWER_DOMAIN_PIPE(pipe); |
4002 | enum intel_display_power_domain power_domain; | 4000 | if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) |
4003 | enum plane_id plane_id; | 4001 | return; |
4004 | enum pipe pipe = crtc->pipe; | ||
4005 | 4002 | ||
4006 | power_domain = POWER_DOMAIN_PIPE(pipe); | 4003 | for_each_plane_id_on_crtc(crtc, plane_id) |
4007 | if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) | 4004 | skl_ddb_get_hw_plane_state(dev_priv, pipe, |
4008 | continue; | 4005 | plane_id, |
4006 | &ddb_y[plane_id], | ||
4007 | &ddb_uv[plane_id]); | ||
4009 | 4008 | ||
4010 | for_each_plane_id_on_crtc(crtc, plane_id) | 4009 | intel_display_power_put(dev_priv, power_domain); |
4011 | skl_ddb_get_hw_plane_state(dev_priv, pipe, | 4010 | } |
4012 | plane_id, ddb); | ||
4013 | 4011 | ||
4014 | intel_display_power_put(dev_priv, power_domain); | 4012 | void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, |
4015 | } | 4013 | struct skl_ddb_allocation *ddb /* out */) |
4014 | { | ||
4015 | ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv); | ||
4016 | } | 4016 | } |
4017 | 4017 | ||
4018 | /* | 4018 | /* |
@@ -4410,7 +4410,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
4410 | struct drm_crtc *crtc = cstate->base.crtc; | 4410 | struct drm_crtc *crtc = cstate->base.crtc; |
4411 | struct drm_i915_private *dev_priv = to_i915(crtc->dev); | 4411 | struct drm_i915_private *dev_priv = to_i915(crtc->dev); |
4412 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4412 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4413 | enum pipe pipe = intel_crtc->pipe; | ||
4414 | struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb; | 4413 | struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb; |
4415 | uint16_t alloc_size, start; | 4414 | uint16_t alloc_size, start; |
4416 | uint16_t minimum[I915_MAX_PLANES] = {}; | 4415 | uint16_t minimum[I915_MAX_PLANES] = {}; |
@@ -4423,8 +4422,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
4423 | uint16_t total_min_blocks = 0; | 4422 | uint16_t total_min_blocks = 0; |
4424 | 4423 | ||
4425 | /* Clear the partitioning for disabled planes. */ | 4424 | /* Clear the partitioning for disabled planes. */ |
4426 | memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); | 4425 | memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y)); |
4427 | memset(ddb->uv_plane[pipe], 0, sizeof(ddb->uv_plane[pipe])); | 4426 | memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv)); |
4428 | 4427 | ||
4429 | if (WARN_ON(!state)) | 4428 | if (WARN_ON(!state)) |
4430 | return 0; | 4429 | return 0; |
@@ -4471,8 +4470,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
4471 | } | 4470 | } |
4472 | 4471 | ||
4473 | alloc_size -= total_min_blocks; | 4472 | alloc_size -= total_min_blocks; |
4474 | ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR]; | 4473 | cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR]; |
4475 | ddb->plane[pipe][PLANE_CURSOR].end = alloc->end; | 4474 | cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end; |
4476 | 4475 | ||
4477 | /* | 4476 | /* |
4478 | * 2. Distribute the remaining space in proportion to the amount of | 4477 | * 2. Distribute the remaining space in proportion to the amount of |
@@ -4503,8 +4502,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
4503 | 4502 | ||
4504 | /* Leave disabled planes at (0,0) */ | 4503 | /* Leave disabled planes at (0,0) */ |
4505 | if (data_rate) { | 4504 | if (data_rate) { |
4506 | ddb->plane[pipe][plane_id].start = start; | 4505 | cstate->wm.skl.plane_ddb_y[plane_id].start = start; |
4507 | ddb->plane[pipe][plane_id].end = start + plane_blocks; | 4506 | cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks; |
4508 | } | 4507 | } |
4509 | 4508 | ||
4510 | start += plane_blocks; | 4509 | start += plane_blocks; |
@@ -4519,8 +4518,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
4519 | WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks); | 4518 | WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks); |
4520 | 4519 | ||
4521 | if (uv_data_rate) { | 4520 | if (uv_data_rate) { |
4522 | ddb->uv_plane[pipe][plane_id].start = start; | 4521 | cstate->wm.skl.plane_ddb_uv[plane_id].start = start; |
4523 | ddb->uv_plane[pipe][plane_id].end = | 4522 | cstate->wm.skl.plane_ddb_uv[plane_id].end = |
4524 | start + uv_plane_blocks; | 4523 | start + uv_plane_blocks; |
4525 | } | 4524 | } |
4526 | 4525 | ||
@@ -4617,12 +4616,12 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate, | |||
4617 | } | 4616 | } |
4618 | 4617 | ||
4619 | static int | 4618 | static int |
4620 | skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, | 4619 | skl_compute_plane_wm_params(const struct intel_crtc_state *cstate, |
4621 | const struct intel_crtc_state *cstate, | ||
4622 | const struct intel_plane_state *intel_pstate, | 4620 | const struct intel_plane_state *intel_pstate, |
4623 | struct skl_wm_params *wp, int plane_id) | 4621 | struct skl_wm_params *wp, int color_plane) |
4624 | { | 4622 | { |
4625 | struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane); | 4623 | struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane); |
4624 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | ||
4626 | const struct drm_plane_state *pstate = &intel_pstate->base; | 4625 | const struct drm_plane_state *pstate = &intel_pstate->base; |
4627 | const struct drm_framebuffer *fb = pstate->fb; | 4626 | const struct drm_framebuffer *fb = pstate->fb; |
4628 | uint32_t interm_pbpl; | 4627 | uint32_t interm_pbpl; |
@@ -4630,11 +4629,8 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, | |||
4630 | to_intel_atomic_state(cstate->base.state); | 4629 | to_intel_atomic_state(cstate->base.state); |
4631 | bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); | 4630 | bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); |
4632 | 4631 | ||
4633 | if (!intel_wm_plane_visible(cstate, intel_pstate)) | ||
4634 | return 0; | ||
4635 | |||
4636 | /* only NV12 format has two planes */ | 4632 | /* only NV12 format has two planes */ |
4637 | if (plane_id == 1 && fb->format->format != DRM_FORMAT_NV12) { | 4633 | if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) { |
4638 | DRM_DEBUG_KMS("Non NV12 format have single plane\n"); | 4634 | DRM_DEBUG_KMS("Non NV12 format have single plane\n"); |
4639 | return -EINVAL; | 4635 | return -EINVAL; |
4640 | } | 4636 | } |
@@ -4659,10 +4655,10 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, | |||
4659 | wp->width = drm_rect_width(&intel_pstate->base.src) >> 16; | 4655 | wp->width = drm_rect_width(&intel_pstate->base.src) >> 16; |
4660 | } | 4656 | } |
4661 | 4657 | ||
4662 | if (plane_id == 1 && wp->is_planar) | 4658 | if (color_plane == 1 && wp->is_planar) |
4663 | wp->width /= 2; | 4659 | wp->width /= 2; |
4664 | 4660 | ||
4665 | wp->cpp = fb->format->cpp[plane_id]; | 4661 | wp->cpp = fb->format->cpp[color_plane]; |
4666 | wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, | 4662 | wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, |
4667 | intel_pstate); | 4663 | intel_pstate); |
4668 | 4664 | ||
@@ -4724,8 +4720,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, | |||
4724 | return 0; | 4720 | return 0; |
4725 | } | 4721 | } |
4726 | 4722 | ||
4727 | static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | 4723 | static int skl_compute_plane_wm(const struct intel_crtc_state *cstate, |
4728 | const struct intel_crtc_state *cstate, | ||
4729 | const struct intel_plane_state *intel_pstate, | 4724 | const struct intel_plane_state *intel_pstate, |
4730 | uint16_t ddb_allocation, | 4725 | uint16_t ddb_allocation, |
4731 | int level, | 4726 | int level, |
@@ -4733,6 +4728,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | |||
4733 | const struct skl_wm_level *result_prev, | 4728 | const struct skl_wm_level *result_prev, |
4734 | struct skl_wm_level *result /* out */) | 4729 | struct skl_wm_level *result /* out */) |
4735 | { | 4730 | { |
4731 | struct drm_i915_private *dev_priv = | ||
4732 | to_i915(intel_pstate->base.plane->dev); | ||
4736 | const struct drm_plane_state *pstate = &intel_pstate->base; | 4733 | const struct drm_plane_state *pstate = &intel_pstate->base; |
4737 | uint32_t latency = dev_priv->wm.skl_latency[level]; | 4734 | uint32_t latency = dev_priv->wm.skl_latency[level]; |
4738 | uint_fixed_16_16_t method1, method2; | 4735 | uint_fixed_16_16_t method1, method2; |
@@ -4743,11 +4740,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | |||
4743 | bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); | 4740 | bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); |
4744 | uint32_t min_disp_buf_needed; | 4741 | uint32_t min_disp_buf_needed; |
4745 | 4742 | ||
4746 | if (latency == 0 || | 4743 | if (latency == 0) |
4747 | !intel_wm_plane_visible(cstate, intel_pstate)) { | 4744 | return level == 0 ? -EINVAL : 0; |
4748 | result->plane_en = false; | ||
4749 | return 0; | ||
4750 | } | ||
4751 | 4745 | ||
4752 | /* Display WA #1141: kbl,cfl */ | 4746 | /* Display WA #1141: kbl,cfl */ |
4753 | if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || | 4747 | if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || |
@@ -4844,8 +4838,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | |||
4844 | if ((level > 0 && res_lines > 31) || | 4838 | if ((level > 0 && res_lines > 31) || |
4845 | res_blocks >= ddb_allocation || | 4839 | res_blocks >= ddb_allocation || |
4846 | min_disp_buf_needed >= ddb_allocation) { | 4840 | min_disp_buf_needed >= ddb_allocation) { |
4847 | result->plane_en = false; | ||
4848 | |||
4849 | /* | 4841 | /* |
4850 | * If there are no valid level 0 watermarks, then we can't | 4842 | * If there are no valid level 0 watermarks, then we can't |
4851 | * support this display configuration. | 4843 | * support this display configuration. |
@@ -4872,27 +4864,22 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | |||
4872 | } | 4864 | } |
4873 | 4865 | ||
4874 | static int | 4866 | static int |
4875 | skl_compute_wm_levels(const struct drm_i915_private *dev_priv, | 4867 | skl_compute_wm_levels(const struct intel_crtc_state *cstate, |
4876 | struct skl_ddb_allocation *ddb, | ||
4877 | const struct intel_crtc_state *cstate, | ||
4878 | const struct intel_plane_state *intel_pstate, | 4868 | const struct intel_plane_state *intel_pstate, |
4879 | uint16_t ddb_blocks, | 4869 | uint16_t ddb_blocks, |
4880 | const struct skl_wm_params *wm_params, | 4870 | const struct skl_wm_params *wm_params, |
4881 | struct skl_plane_wm *wm, | ||
4882 | struct skl_wm_level *levels) | 4871 | struct skl_wm_level *levels) |
4883 | { | 4872 | { |
4873 | struct drm_i915_private *dev_priv = | ||
4874 | to_i915(intel_pstate->base.plane->dev); | ||
4884 | int level, max_level = ilk_wm_max_level(dev_priv); | 4875 | int level, max_level = ilk_wm_max_level(dev_priv); |
4885 | struct skl_wm_level *result_prev = &levels[0]; | 4876 | struct skl_wm_level *result_prev = &levels[0]; |
4886 | int ret; | 4877 | int ret; |
4887 | 4878 | ||
4888 | if (WARN_ON(!intel_pstate->base.fb)) | ||
4889 | return -EINVAL; | ||
4890 | |||
4891 | for (level = 0; level <= max_level; level++) { | 4879 | for (level = 0; level <= max_level; level++) { |
4892 | struct skl_wm_level *result = &levels[level]; | 4880 | struct skl_wm_level *result = &levels[level]; |
4893 | 4881 | ||
4894 | ret = skl_compute_plane_wm(dev_priv, | 4882 | ret = skl_compute_plane_wm(cstate, |
4895 | cstate, | ||
4896 | intel_pstate, | 4883 | intel_pstate, |
4897 | ddb_blocks, | 4884 | ddb_blocks, |
4898 | level, | 4885 | level, |
@@ -4905,9 +4892,6 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv, | |||
4905 | result_prev = result; | 4892 | result_prev = result; |
4906 | } | 4893 | } |
4907 | 4894 | ||
4908 | if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12) | ||
4909 | wm->is_planar = true; | ||
4910 | |||
4911 | return 0; | 4895 | return 0; |
4912 | } | 4896 | } |
4913 | 4897 | ||
@@ -4935,10 +4919,9 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate) | |||
4935 | } | 4919 | } |
4936 | 4920 | ||
4937 | static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, | 4921 | static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, |
4938 | struct skl_wm_params *wp, | 4922 | const struct skl_wm_params *wp, |
4939 | struct skl_wm_level *wm_l0, | 4923 | struct skl_plane_wm *wm, |
4940 | uint16_t ddb_allocation, | 4924 | uint16_t ddb_allocation) |
4941 | struct skl_wm_level *trans_wm /* out */) | ||
4942 | { | 4925 | { |
4943 | struct drm_device *dev = cstate->base.crtc->dev; | 4926 | struct drm_device *dev = cstate->base.crtc->dev; |
4944 | const struct drm_i915_private *dev_priv = to_i915(dev); | 4927 | const struct drm_i915_private *dev_priv = to_i915(dev); |
@@ -4946,16 +4929,13 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, | |||
4946 | const uint16_t trans_amount = 10; /* This is configurable amount */ | 4929 | const uint16_t trans_amount = 10; /* This is configurable amount */ |
4947 | uint16_t wm0_sel_res_b, trans_offset_b, res_blocks; | 4930 | uint16_t wm0_sel_res_b, trans_offset_b, res_blocks; |
4948 | 4931 | ||
4949 | if (!cstate->base.active) | ||
4950 | goto exit; | ||
4951 | |||
4952 | /* Transition WM are not recommended by HW team for GEN9 */ | 4932 | /* Transition WM are not recommended by HW team for GEN9 */ |
4953 | if (INTEL_GEN(dev_priv) <= 9) | 4933 | if (INTEL_GEN(dev_priv) <= 9) |
4954 | goto exit; | 4934 | return; |
4955 | 4935 | ||
4956 | /* Transition WM don't make any sense if ipc is disabled */ | 4936 | /* Transition WM don't make any sense if ipc is disabled */ |
4957 | if (!dev_priv->ipc_enabled) | 4937 | if (!dev_priv->ipc_enabled) |
4958 | goto exit; | 4938 | return; |
4959 | 4939 | ||
4960 | trans_min = 14; | 4940 | trans_min = 14; |
4961 | if (INTEL_GEN(dev_priv) >= 11) | 4941 | if (INTEL_GEN(dev_priv) >= 11) |
@@ -4973,7 +4953,7 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, | |||
4973 | * Result Blocks is Result Blocks minus 1 and it should work for the | 4953 | * Result Blocks is Result Blocks minus 1 and it should work for the |
4974 | * current platforms. | 4954 | * current platforms. |
4975 | */ | 4955 | */ |
4976 | wm0_sel_res_b = wm_l0->plane_res_b - 1; | 4956 | wm0_sel_res_b = wm->wm[0].plane_res_b - 1; |
4977 | 4957 | ||
4978 | if (wp->y_tiled) { | 4958 | if (wp->y_tiled) { |
4979 | trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2, | 4959 | trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2, |
@@ -4992,107 +4972,129 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, | |||
4992 | res_blocks += 1; | 4972 | res_blocks += 1; |
4993 | 4973 | ||
4994 | if (res_blocks < ddb_allocation) { | 4974 | if (res_blocks < ddb_allocation) { |
4995 | trans_wm->plane_res_b = res_blocks; | 4975 | wm->trans_wm.plane_res_b = res_blocks; |
4996 | trans_wm->plane_en = true; | 4976 | wm->trans_wm.plane_en = true; |
4997 | return; | ||
4998 | } | 4977 | } |
4999 | |||
5000 | exit: | ||
5001 | trans_wm->plane_en = false; | ||
5002 | } | 4978 | } |
5003 | 4979 | ||
5004 | static int __skl_build_plane_wm_single(struct skl_ddb_allocation *ddb, | 4980 | static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, |
5005 | struct skl_pipe_wm *pipe_wm, | 4981 | const struct intel_plane_state *plane_state, |
5006 | enum plane_id plane_id, | 4982 | enum plane_id plane_id, int color_plane) |
5007 | const struct intel_crtc_state *cstate, | ||
5008 | const struct intel_plane_state *pstate, | ||
5009 | int color_plane) | ||
5010 | { | 4983 | { |
5011 | struct drm_i915_private *dev_priv = to_i915(pstate->base.plane->dev); | 4984 | struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; |
5012 | struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; | 4985 | u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_y[plane_id]); |
5013 | enum pipe pipe = to_intel_plane(pstate->base.plane)->pipe; | ||
5014 | struct skl_wm_params wm_params; | 4986 | struct skl_wm_params wm_params; |
5015 | uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]); | ||
5016 | int ret; | 4987 | int ret; |
5017 | 4988 | ||
5018 | ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate, | 4989 | ret = skl_compute_plane_wm_params(crtc_state, plane_state, |
5019 | &wm_params, color_plane); | 4990 | &wm_params, color_plane); |
5020 | if (ret) | 4991 | if (ret) |
5021 | return ret; | 4992 | return ret; |
5022 | 4993 | ||
5023 | ret = skl_compute_wm_levels(dev_priv, ddb, cstate, pstate, | 4994 | ret = skl_compute_wm_levels(crtc_state, plane_state, |
5024 | ddb_blocks, &wm_params, wm, wm->wm); | 4995 | ddb_blocks, &wm_params, wm->wm); |
5025 | |||
5026 | if (ret) | 4996 | if (ret) |
5027 | return ret; | 4997 | return ret; |
5028 | 4998 | ||
5029 | skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0], | 4999 | skl_compute_transition_wm(crtc_state, &wm_params, wm, ddb_blocks); |
5030 | ddb_blocks, &wm->trans_wm); | ||
5031 | 5000 | ||
5032 | return 0; | 5001 | return 0; |
5033 | } | 5002 | } |
5034 | 5003 | ||
5035 | static int skl_build_plane_wm_single(struct skl_ddb_allocation *ddb, | 5004 | static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, |
5036 | struct skl_pipe_wm *pipe_wm, | 5005 | const struct intel_plane_state *plane_state, |
5037 | const struct intel_crtc_state *cstate, | 5006 | enum plane_id plane_id) |
5038 | const struct intel_plane_state *pstate) | ||
5039 | { | ||
5040 | enum plane_id plane_id = to_intel_plane(pstate->base.plane)->id; | ||
5041 | |||
5042 | return __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0); | ||
5043 | } | ||
5044 | |||
5045 | static int skl_build_plane_wm_planar(struct skl_ddb_allocation *ddb, | ||
5046 | struct skl_pipe_wm *pipe_wm, | ||
5047 | const struct intel_crtc_state *cstate, | ||
5048 | const struct intel_plane_state *pstate) | ||
5049 | { | 5007 | { |
5050 | struct intel_plane *plane = to_intel_plane(pstate->base.plane); | 5008 | struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; |
5051 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | 5009 | u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_uv[plane_id]); |
5052 | enum plane_id plane_id = plane->id; | ||
5053 | struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; | ||
5054 | struct skl_wm_params wm_params; | 5010 | struct skl_wm_params wm_params; |
5055 | enum pipe pipe = plane->pipe; | ||
5056 | uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]); | ||
5057 | int ret; | 5011 | int ret; |
5058 | 5012 | ||
5059 | ret = __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0); | 5013 | wm->is_planar = true; |
5060 | if (ret) | ||
5061 | return ret; | ||
5062 | 5014 | ||
5063 | /* uv plane watermarks must also be validated for NV12/Planar */ | 5015 | /* uv plane watermarks must also be validated for NV12/Planar */ |
5064 | ddb_blocks = skl_ddb_entry_size(&ddb->uv_plane[pipe][plane_id]); | 5016 | ret = skl_compute_plane_wm_params(crtc_state, plane_state, |
5017 | &wm_params, 1); | ||
5018 | if (ret) | ||
5019 | return ret; | ||
5065 | 5020 | ||
5066 | ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate, &wm_params, 1); | 5021 | ret = skl_compute_wm_levels(crtc_state, plane_state, |
5022 | ddb_blocks, &wm_params, wm->uv_wm); | ||
5067 | if (ret) | 5023 | if (ret) |
5068 | return ret; | 5024 | return ret; |
5069 | 5025 | ||
5070 | return skl_compute_wm_levels(dev_priv, ddb, cstate, pstate, | 5026 | return 0; |
5071 | ddb_blocks, &wm_params, wm, wm->uv_wm); | ||
5072 | } | 5027 | } |
5073 | 5028 | ||
5074 | static int icl_build_plane_wm_planar(struct skl_ddb_allocation *ddb, | 5029 | static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm, |
5075 | struct skl_pipe_wm *pipe_wm, | 5030 | struct intel_crtc_state *crtc_state, |
5076 | const struct intel_crtc_state *cstate, | 5031 | const struct intel_plane_state *plane_state) |
5077 | const struct intel_plane_state *pstate) | ||
5078 | { | 5032 | { |
5033 | struct intel_plane *plane = to_intel_plane(plane_state->base.plane); | ||
5034 | const struct drm_framebuffer *fb = plane_state->base.fb; | ||
5035 | enum plane_id plane_id = plane->id; | ||
5079 | int ret; | 5036 | int ret; |
5080 | enum plane_id y_plane_id = pstate->linked_plane->id; | ||
5081 | enum plane_id uv_plane_id = to_intel_plane(pstate->base.plane)->id; | ||
5082 | 5037 | ||
5083 | ret = __skl_build_plane_wm_single(ddb, pipe_wm, y_plane_id, | 5038 | if (!intel_wm_plane_visible(crtc_state, plane_state)) |
5084 | cstate, pstate, 0); | 5039 | return 0; |
5040 | |||
5041 | ret = skl_build_plane_wm_single(crtc_state, plane_state, | ||
5042 | plane_id, 0); | ||
5085 | if (ret) | 5043 | if (ret) |
5086 | return ret; | 5044 | return ret; |
5087 | 5045 | ||
5088 | return __skl_build_plane_wm_single(ddb, pipe_wm, uv_plane_id, | 5046 | if (fb->format->is_yuv && fb->format->num_planes > 1) { |
5089 | cstate, pstate, 1); | 5047 | ret = skl_build_plane_wm_uv(crtc_state, plane_state, |
5048 | plane_id); | ||
5049 | if (ret) | ||
5050 | return ret; | ||
5051 | } | ||
5052 | |||
5053 | return 0; | ||
5054 | } | ||
5055 | |||
5056 | static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm, | ||
5057 | struct intel_crtc_state *crtc_state, | ||
5058 | const struct intel_plane_state *plane_state) | ||
5059 | { | ||
5060 | enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id; | ||
5061 | int ret; | ||
5062 | |||
5063 | /* Watermarks calculated in master */ | ||
5064 | if (plane_state->slave) | ||
5065 | return 0; | ||
5066 | |||
5067 | if (plane_state->linked_plane) { | ||
5068 | const struct drm_framebuffer *fb = plane_state->base.fb; | ||
5069 | enum plane_id y_plane_id = plane_state->linked_plane->id; | ||
5070 | |||
5071 | WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)); | ||
5072 | WARN_ON(!fb->format->is_yuv || | ||
5073 | fb->format->num_planes == 1); | ||
5074 | |||
5075 | ret = skl_build_plane_wm_single(crtc_state, plane_state, | ||
5076 | y_plane_id, 0); | ||
5077 | if (ret) | ||
5078 | return ret; | ||
5079 | |||
5080 | ret = skl_build_plane_wm_single(crtc_state, plane_state, | ||
5081 | plane_id, 1); | ||
5082 | if (ret) | ||
5083 | return ret; | ||
5084 | } else if (intel_wm_plane_visible(crtc_state, plane_state)) { | ||
5085 | ret = skl_build_plane_wm_single(crtc_state, plane_state, | ||
5086 | plane_id, 0); | ||
5087 | if (ret) | ||
5088 | return ret; | ||
5089 | } | ||
5090 | |||
5091 | return 0; | ||
5090 | } | 5092 | } |
5091 | 5093 | ||
5092 | static int skl_build_pipe_wm(struct intel_crtc_state *cstate, | 5094 | static int skl_build_pipe_wm(struct intel_crtc_state *cstate, |
5093 | struct skl_ddb_allocation *ddb, | ||
5094 | struct skl_pipe_wm *pipe_wm) | 5095 | struct skl_pipe_wm *pipe_wm) |
5095 | { | 5096 | { |
5097 | struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); | ||
5096 | struct drm_crtc_state *crtc_state = &cstate->base; | 5098 | struct drm_crtc_state *crtc_state = &cstate->base; |
5097 | struct drm_plane *plane; | 5099 | struct drm_plane *plane; |
5098 | const struct drm_plane_state *pstate; | 5100 | const struct drm_plane_state *pstate; |
@@ -5108,18 +5110,12 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate, | |||
5108 | const struct intel_plane_state *intel_pstate = | 5110 | const struct intel_plane_state *intel_pstate = |
5109 | to_intel_plane_state(pstate); | 5111 | to_intel_plane_state(pstate); |
5110 | 5112 | ||
5111 | /* Watermarks calculated in master */ | 5113 | if (INTEL_GEN(dev_priv) >= 11) |
5112 | if (intel_pstate->slave) | 5114 | ret = icl_build_plane_wm(pipe_wm, |
5113 | continue; | 5115 | cstate, intel_pstate); |
5114 | |||
5115 | if (intel_pstate->linked_plane) | ||
5116 | ret = icl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate); | ||
5117 | else if (intel_pstate->base.fb && | ||
5118 | intel_pstate->base.fb->format->format == DRM_FORMAT_NV12) | ||
5119 | ret = skl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate); | ||
5120 | else | 5116 | else |
5121 | ret = skl_build_plane_wm_single(ddb, pipe_wm, cstate, intel_pstate); | 5117 | ret = skl_build_plane_wm(pipe_wm, |
5122 | 5118 | cstate, intel_pstate); | |
5123 | if (ret) | 5119 | if (ret) |
5124 | return ret; | 5120 | return ret; |
5125 | } | 5121 | } |
@@ -5134,9 +5130,9 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, | |||
5134 | const struct skl_ddb_entry *entry) | 5130 | const struct skl_ddb_entry *entry) |
5135 | { | 5131 | { |
5136 | if (entry->end) | 5132 | if (entry->end) |
5137 | I915_WRITE(reg, (entry->end - 1) << 16 | entry->start); | 5133 | I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start); |
5138 | else | 5134 | else |
5139 | I915_WRITE(reg, 0); | 5135 | I915_WRITE_FW(reg, 0); |
5140 | } | 5136 | } |
5141 | 5137 | ||
5142 | static void skl_write_wm_level(struct drm_i915_private *dev_priv, | 5138 | static void skl_write_wm_level(struct drm_i915_private *dev_priv, |
@@ -5151,19 +5147,22 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv, | |||
5151 | val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; | 5147 | val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; |
5152 | } | 5148 | } |
5153 | 5149 | ||
5154 | I915_WRITE(reg, val); | 5150 | I915_WRITE_FW(reg, val); |
5155 | } | 5151 | } |
5156 | 5152 | ||
5157 | static void skl_write_plane_wm(struct intel_crtc *intel_crtc, | 5153 | void skl_write_plane_wm(struct intel_plane *plane, |
5158 | const struct skl_plane_wm *wm, | 5154 | const struct intel_crtc_state *crtc_state) |
5159 | const struct skl_ddb_allocation *ddb, | ||
5160 | enum plane_id plane_id) | ||
5161 | { | 5155 | { |
5162 | struct drm_crtc *crtc = &intel_crtc->base; | 5156 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
5163 | struct drm_device *dev = crtc->dev; | ||
5164 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
5165 | int level, max_level = ilk_wm_max_level(dev_priv); | 5157 | int level, max_level = ilk_wm_max_level(dev_priv); |
5166 | enum pipe pipe = intel_crtc->pipe; | 5158 | enum plane_id plane_id = plane->id; |
5159 | enum pipe pipe = plane->pipe; | ||
5160 | const struct skl_plane_wm *wm = | ||
5161 | &crtc_state->wm.skl.optimal.planes[plane_id]; | ||
5162 | const struct skl_ddb_entry *ddb_y = | ||
5163 | &crtc_state->wm.skl.plane_ddb_y[plane_id]; | ||
5164 | const struct skl_ddb_entry *ddb_uv = | ||
5165 | &crtc_state->wm.skl.plane_ddb_uv[plane_id]; | ||
5167 | 5166 | ||
5168 | for (level = 0; level <= max_level; level++) { | 5167 | for (level = 0; level <= max_level; level++) { |
5169 | skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level), | 5168 | skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level), |
@@ -5172,29 +5171,32 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc, | |||
5172 | skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), | 5171 | skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), |
5173 | &wm->trans_wm); | 5172 | &wm->trans_wm); |
5174 | 5173 | ||
5175 | if (wm->is_planar && INTEL_GEN(dev_priv) < 11) { | 5174 | if (INTEL_GEN(dev_priv) >= 11) { |
5176 | skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), | ||
5177 | &ddb->uv_plane[pipe][plane_id]); | ||
5178 | skl_ddb_entry_write(dev_priv, | 5175 | skl_ddb_entry_write(dev_priv, |
5179 | PLANE_NV12_BUF_CFG(pipe, plane_id), | 5176 | PLANE_BUF_CFG(pipe, plane_id), ddb_y); |
5180 | &ddb->plane[pipe][plane_id]); | 5177 | return; |
5181 | } else { | ||
5182 | skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), | ||
5183 | &ddb->plane[pipe][plane_id]); | ||
5184 | if (INTEL_GEN(dev_priv) < 11) | ||
5185 | I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0); | ||
5186 | } | 5178 | } |
5179 | |||
5180 | if (wm->is_planar) | ||
5181 | swap(ddb_y, ddb_uv); | ||
5182 | |||
5183 | skl_ddb_entry_write(dev_priv, | ||
5184 | PLANE_BUF_CFG(pipe, plane_id), ddb_y); | ||
5185 | skl_ddb_entry_write(dev_priv, | ||
5186 | PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv); | ||
5187 | } | 5187 | } |
5188 | 5188 | ||
5189 | static void skl_write_cursor_wm(struct intel_crtc *intel_crtc, | 5189 | void skl_write_cursor_wm(struct intel_plane *plane, |
5190 | const struct skl_plane_wm *wm, | 5190 | const struct intel_crtc_state *crtc_state) |
5191 | const struct skl_ddb_allocation *ddb) | ||
5192 | { | 5191 | { |
5193 | struct drm_crtc *crtc = &intel_crtc->base; | 5192 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
5194 | struct drm_device *dev = crtc->dev; | ||
5195 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
5196 | int level, max_level = ilk_wm_max_level(dev_priv); | 5193 | int level, max_level = ilk_wm_max_level(dev_priv); |
5197 | enum pipe pipe = intel_crtc->pipe; | 5194 | enum plane_id plane_id = plane->id; |
5195 | enum pipe pipe = plane->pipe; | ||
5196 | const struct skl_plane_wm *wm = | ||
5197 | &crtc_state->wm.skl.optimal.planes[plane_id]; | ||
5198 | const struct skl_ddb_entry *ddb = | ||
5199 | &crtc_state->wm.skl.plane_ddb_y[plane_id]; | ||
5198 | 5200 | ||
5199 | for (level = 0; level <= max_level; level++) { | 5201 | for (level = 0; level <= max_level; level++) { |
5200 | skl_write_wm_level(dev_priv, CUR_WM(pipe, level), | 5202 | skl_write_wm_level(dev_priv, CUR_WM(pipe, level), |
@@ -5202,22 +5204,30 @@ static void skl_write_cursor_wm(struct intel_crtc *intel_crtc, | |||
5202 | } | 5204 | } |
5203 | skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm); | 5205 | skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm); |
5204 | 5206 | ||
5205 | skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), | 5207 | skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb); |
5206 | &ddb->plane[pipe][PLANE_CURSOR]); | ||
5207 | } | 5208 | } |
5208 | 5209 | ||
5209 | bool skl_wm_level_equals(const struct skl_wm_level *l1, | 5210 | bool skl_wm_level_equals(const struct skl_wm_level *l1, |
5210 | const struct skl_wm_level *l2) | 5211 | const struct skl_wm_level *l2) |
5211 | { | 5212 | { |
5212 | if (l1->plane_en != l2->plane_en) | 5213 | return l1->plane_en == l2->plane_en && |
5213 | return false; | 5214 | l1->plane_res_l == l2->plane_res_l && |
5215 | l1->plane_res_b == l2->plane_res_b; | ||
5216 | } | ||
5214 | 5217 | ||
5215 | /* If both planes aren't enabled, the rest shouldn't matter */ | 5218 | static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv, |
5216 | if (!l1->plane_en) | 5219 | const struct skl_plane_wm *wm1, |
5217 | return true; | 5220 | const struct skl_plane_wm *wm2) |
5221 | { | ||
5222 | int level, max_level = ilk_wm_max_level(dev_priv); | ||
5218 | 5223 | ||
5219 | return (l1->plane_res_l == l2->plane_res_l && | 5224 | for (level = 0; level <= max_level; level++) { |
5220 | l1->plane_res_b == l2->plane_res_b); | 5225 | if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) || |
5226 | !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level])) | ||
5227 | return false; | ||
5228 | } | ||
5229 | |||
5230 | return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm); | ||
5221 | } | 5231 | } |
5222 | 5232 | ||
5223 | static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, | 5233 | static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, |
@@ -5244,13 +5254,12 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, | |||
5244 | static int skl_update_pipe_wm(struct drm_crtc_state *cstate, | 5254 | static int skl_update_pipe_wm(struct drm_crtc_state *cstate, |
5245 | const struct skl_pipe_wm *old_pipe_wm, | 5255 | const struct skl_pipe_wm *old_pipe_wm, |
5246 | struct skl_pipe_wm *pipe_wm, /* out */ | 5256 | struct skl_pipe_wm *pipe_wm, /* out */ |
5247 | struct skl_ddb_allocation *ddb, /* out */ | ||
5248 | bool *changed /* out */) | 5257 | bool *changed /* out */) |
5249 | { | 5258 | { |
5250 | struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate); | 5259 | struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate); |
5251 | int ret; | 5260 | int ret; |
5252 | 5261 | ||
5253 | ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm); | 5262 | ret = skl_build_pipe_wm(intel_cstate, pipe_wm); |
5254 | if (ret) | 5263 | if (ret) |
5255 | return ret; | 5264 | return ret; |
5256 | 5265 | ||
@@ -5276,42 +5285,29 @@ pipes_modified(struct drm_atomic_state *state) | |||
5276 | } | 5285 | } |
5277 | 5286 | ||
5278 | static int | 5287 | static int |
5279 | skl_ddb_add_affected_planes(struct intel_crtc_state *cstate) | 5288 | skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state, |
5289 | struct intel_crtc_state *new_crtc_state) | ||
5280 | { | 5290 | { |
5281 | struct drm_atomic_state *state = cstate->base.state; | 5291 | struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state); |
5282 | struct drm_device *dev = state->dev; | 5292 | struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); |
5283 | struct drm_crtc *crtc = cstate->base.crtc; | 5293 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
5284 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5294 | struct intel_plane *plane; |
5285 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
5286 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); | ||
5287 | struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; | ||
5288 | struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; | ||
5289 | struct drm_plane *plane; | ||
5290 | enum pipe pipe = intel_crtc->pipe; | ||
5291 | 5295 | ||
5292 | drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) { | 5296 | for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { |
5293 | struct drm_plane_state *plane_state; | 5297 | struct intel_plane_state *plane_state; |
5294 | struct intel_plane *linked; | 5298 | enum plane_id plane_id = plane->id; |
5295 | enum plane_id plane_id = to_intel_plane(plane)->id; | ||
5296 | 5299 | ||
5297 | if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id], | 5300 | if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id], |
5298 | &new_ddb->plane[pipe][plane_id]) && | 5301 | &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) && |
5299 | skl_ddb_entry_equal(&cur_ddb->uv_plane[pipe][plane_id], | 5302 | skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id], |
5300 | &new_ddb->uv_plane[pipe][plane_id])) | 5303 | &new_crtc_state->wm.skl.plane_ddb_uv[plane_id])) |
5301 | continue; | 5304 | continue; |
5302 | 5305 | ||
5303 | plane_state = drm_atomic_get_plane_state(state, plane); | 5306 | plane_state = intel_atomic_get_plane_state(state, plane); |
5304 | if (IS_ERR(plane_state)) | 5307 | if (IS_ERR(plane_state)) |
5305 | return PTR_ERR(plane_state); | 5308 | return PTR_ERR(plane_state); |
5306 | 5309 | ||
5307 | /* Make sure linked plane is updated too */ | 5310 | new_crtc_state->update_planes |= BIT(plane_id); |
5308 | linked = to_intel_plane_state(plane_state)->linked_plane; | ||
5309 | if (!linked) | ||
5310 | continue; | ||
5311 | |||
5312 | plane_state = drm_atomic_get_plane_state(state, &linked->base); | ||
5313 | if (IS_ERR(plane_state)) | ||
5314 | return PTR_ERR(plane_state); | ||
5315 | } | 5311 | } |
5316 | 5312 | ||
5317 | return 0; | 5313 | return 0; |
@@ -5323,18 +5319,21 @@ skl_compute_ddb(struct drm_atomic_state *state) | |||
5323 | const struct drm_i915_private *dev_priv = to_i915(state->dev); | 5319 | const struct drm_i915_private *dev_priv = to_i915(state->dev); |
5324 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); | 5320 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); |
5325 | struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb; | 5321 | struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb; |
5322 | struct intel_crtc_state *old_crtc_state; | ||
5323 | struct intel_crtc_state *new_crtc_state; | ||
5326 | struct intel_crtc *crtc; | 5324 | struct intel_crtc *crtc; |
5327 | struct intel_crtc_state *cstate; | ||
5328 | int ret, i; | 5325 | int ret, i; |
5329 | 5326 | ||
5330 | memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb)); | 5327 | memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb)); |
5331 | 5328 | ||
5332 | for_each_new_intel_crtc_in_state(intel_state, crtc, cstate, i) { | 5329 | for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state, |
5333 | ret = skl_allocate_pipe_ddb(cstate, ddb); | 5330 | new_crtc_state, i) { |
5331 | ret = skl_allocate_pipe_ddb(new_crtc_state, ddb); | ||
5334 | if (ret) | 5332 | if (ret) |
5335 | return ret; | 5333 | return ret; |
5336 | 5334 | ||
5337 | ret = skl_ddb_add_affected_planes(cstate); | 5335 | ret = skl_ddb_add_affected_planes(old_crtc_state, |
5336 | new_crtc_state); | ||
5338 | if (ret) | 5337 | if (ret) |
5339 | return ret; | 5338 | return ret; |
5340 | } | 5339 | } |
@@ -5343,36 +5342,29 @@ skl_compute_ddb(struct drm_atomic_state *state) | |||
5343 | } | 5342 | } |
5344 | 5343 | ||
5345 | static void | 5344 | static void |
5346 | skl_print_wm_changes(const struct drm_atomic_state *state) | 5345 | skl_print_wm_changes(struct intel_atomic_state *state) |
5347 | { | 5346 | { |
5348 | const struct drm_device *dev = state->dev; | 5347 | struct drm_i915_private *dev_priv = to_i915(state->base.dev); |
5349 | const struct drm_i915_private *dev_priv = to_i915(dev); | 5348 | const struct intel_crtc_state *old_crtc_state; |
5350 | const struct intel_atomic_state *intel_state = | 5349 | const struct intel_crtc_state *new_crtc_state; |
5351 | to_intel_atomic_state(state); | 5350 | struct intel_plane *plane; |
5352 | const struct drm_crtc *crtc; | 5351 | struct intel_crtc *crtc; |
5353 | const struct drm_crtc_state *cstate; | ||
5354 | const struct intel_plane *intel_plane; | ||
5355 | const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb; | ||
5356 | const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; | ||
5357 | int i; | 5352 | int i; |
5358 | 5353 | ||
5359 | for_each_new_crtc_in_state(state, crtc, cstate, i) { | 5354 | for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, |
5360 | const struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5355 | new_crtc_state, i) { |
5361 | enum pipe pipe = intel_crtc->pipe; | 5356 | for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { |
5362 | 5357 | enum plane_id plane_id = plane->id; | |
5363 | for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { | ||
5364 | enum plane_id plane_id = intel_plane->id; | ||
5365 | const struct skl_ddb_entry *old, *new; | 5358 | const struct skl_ddb_entry *old, *new; |
5366 | 5359 | ||
5367 | old = &old_ddb->plane[pipe][plane_id]; | 5360 | old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id]; |
5368 | new = &new_ddb->plane[pipe][plane_id]; | 5361 | new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id]; |
5369 | 5362 | ||
5370 | if (skl_ddb_entry_equal(old, new)) | 5363 | if (skl_ddb_entry_equal(old, new)) |
5371 | continue; | 5364 | continue; |
5372 | 5365 | ||
5373 | DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n", | 5366 | DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n", |
5374 | intel_plane->base.base.id, | 5367 | plane->base.base.id, plane->base.name, |
5375 | intel_plane->base.name, | ||
5376 | old->start, old->end, | 5368 | old->start, old->end, |
5377 | new->start, new->end); | 5369 | new->start, new->end); |
5378 | } | 5370 | } |
@@ -5469,6 +5461,66 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed) | |||
5469 | return 0; | 5461 | return 0; |
5470 | } | 5462 | } |
5471 | 5463 | ||
5464 | /* | ||
5465 | * To make sure the cursor watermark registers are always consistent | ||
5466 | * with our computed state the following scenario needs special | ||
5467 | * treatment: | ||
5468 | * | ||
5469 | * 1. enable cursor | ||
5470 | * 2. move cursor entirely offscreen | ||
5471 | * 3. disable cursor | ||
5472 | * | ||
5473 | * Step 2. does call .disable_plane() but does not zero the watermarks | ||
5474 | * (since we consider an offscreen cursor still active for the purposes | ||
5475 | * of watermarks). Step 3. would not normally call .disable_plane() | ||
5476 | * because the actual plane visibility isn't changing, and we don't | ||
5477 | * deallocate the cursor ddb until the pipe gets disabled. So we must | ||
5478 | * force step 3. to call .disable_plane() to update the watermark | ||
5479 | * registers properly. | ||
5480 | * | ||
5481 | * Other planes do not suffer from this issues as their watermarks are | ||
5482 | * calculated based on the actual plane visibility. The only time this | ||
5483 | * can trigger for the other planes is during the initial readout as the | ||
5484 | * default value of the watermarks registers is not zero. | ||
5485 | */ | ||
5486 | static int skl_wm_add_affected_planes(struct intel_atomic_state *state, | ||
5487 | struct intel_crtc *crtc) | ||
5488 | { | ||
5489 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
5490 | const struct intel_crtc_state *old_crtc_state = | ||
5491 | intel_atomic_get_old_crtc_state(state, crtc); | ||
5492 | struct intel_crtc_state *new_crtc_state = | ||
5493 | intel_atomic_get_new_crtc_state(state, crtc); | ||
5494 | struct intel_plane *plane; | ||
5495 | |||
5496 | for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { | ||
5497 | struct intel_plane_state *plane_state; | ||
5498 | enum plane_id plane_id = plane->id; | ||
5499 | |||
5500 | /* | ||
5501 | * Force a full wm update for every plane on modeset. | ||
5502 | * Required because the reset value of the wm registers | ||
5503 | * is non-zero, whereas we want all disabled planes to | ||
5504 | * have zero watermarks. So if we turn off the relevant | ||
5505 | * power well the hardware state will go out of sync | ||
5506 | * with the software state. | ||
5507 | */ | ||
5508 | if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) && | ||
5509 | skl_plane_wm_equals(dev_priv, | ||
5510 | &old_crtc_state->wm.skl.optimal.planes[plane_id], | ||
5511 | &new_crtc_state->wm.skl.optimal.planes[plane_id])) | ||
5512 | continue; | ||
5513 | |||
5514 | plane_state = intel_atomic_get_plane_state(state, plane); | ||
5515 | if (IS_ERR(plane_state)) | ||
5516 | return PTR_ERR(plane_state); | ||
5517 | |||
5518 | new_crtc_state->update_planes |= BIT(plane_id); | ||
5519 | } | ||
5520 | |||
5521 | return 0; | ||
5522 | } | ||
5523 | |||
5472 | static int | 5524 | static int |
5473 | skl_compute_wm(struct drm_atomic_state *state) | 5525 | skl_compute_wm(struct drm_atomic_state *state) |
5474 | { | 5526 | { |
@@ -5508,8 +5560,12 @@ skl_compute_wm(struct drm_atomic_state *state) | |||
5508 | &to_intel_crtc_state(crtc->state)->wm.skl.optimal; | 5560 | &to_intel_crtc_state(crtc->state)->wm.skl.optimal; |
5509 | 5561 | ||
5510 | pipe_wm = &intel_cstate->wm.skl.optimal; | 5562 | pipe_wm = &intel_cstate->wm.skl.optimal; |
5511 | ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, | 5563 | ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed); |
5512 | &results->ddb, &changed); | 5564 | if (ret) |
5565 | return ret; | ||
5566 | |||
5567 | ret = skl_wm_add_affected_planes(intel_state, | ||
5568 | to_intel_crtc(crtc)); | ||
5513 | if (ret) | 5569 | if (ret) |
5514 | return ret; | 5570 | return ret; |
5515 | 5571 | ||
@@ -5523,7 +5579,7 @@ skl_compute_wm(struct drm_atomic_state *state) | |||
5523 | intel_cstate->update_wm_pre = true; | 5579 | intel_cstate->update_wm_pre = true; |
5524 | } | 5580 | } |
5525 | 5581 | ||
5526 | skl_print_wm_changes(state); | 5582 | skl_print_wm_changes(intel_state); |
5527 | 5583 | ||
5528 | return 0; | 5584 | return 0; |
5529 | } | 5585 | } |
@@ -5534,23 +5590,12 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, | |||
5534 | struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc); | 5590 | struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc); |
5535 | struct drm_i915_private *dev_priv = to_i915(state->base.dev); | 5591 | struct drm_i915_private *dev_priv = to_i915(state->base.dev); |
5536 | struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; | 5592 | struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; |
5537 | const struct skl_ddb_allocation *ddb = &state->wm_results.ddb; | ||
5538 | enum pipe pipe = crtc->pipe; | 5593 | enum pipe pipe = crtc->pipe; |
5539 | enum plane_id plane_id; | ||
5540 | 5594 | ||
5541 | if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base))) | 5595 | if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base))) |
5542 | return; | 5596 | return; |
5543 | 5597 | ||
5544 | I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime); | 5598 | I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime); |
5545 | |||
5546 | for_each_plane_id_on_crtc(crtc, plane_id) { | ||
5547 | if (plane_id != PLANE_CURSOR) | ||
5548 | skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id], | ||
5549 | ddb, plane_id); | ||
5550 | else | ||
5551 | skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id], | ||
5552 | ddb); | ||
5553 | } | ||
5554 | } | 5599 | } |
5555 | 5600 | ||
5556 | static void skl_initial_wm(struct intel_atomic_state *state, | 5601 | static void skl_initial_wm(struct intel_atomic_state *state, |
@@ -5560,8 +5605,6 @@ static void skl_initial_wm(struct intel_atomic_state *state, | |||
5560 | struct drm_device *dev = intel_crtc->base.dev; | 5605 | struct drm_device *dev = intel_crtc->base.dev; |
5561 | struct drm_i915_private *dev_priv = to_i915(dev); | 5606 | struct drm_i915_private *dev_priv = to_i915(dev); |
5562 | struct skl_ddb_values *results = &state->wm_results; | 5607 | struct skl_ddb_values *results = &state->wm_results; |
5563 | struct skl_ddb_values *hw_vals = &dev_priv->wm.skl_hw; | ||
5564 | enum pipe pipe = intel_crtc->pipe; | ||
5565 | 5608 | ||
5566 | if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0) | 5609 | if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0) |
5567 | return; | 5610 | return; |
@@ -5571,11 +5614,6 @@ static void skl_initial_wm(struct intel_atomic_state *state, | |||
5571 | if (cstate->base.active_changed) | 5614 | if (cstate->base.active_changed) |
5572 | skl_atomic_update_crtc_wm(state, cstate); | 5615 | skl_atomic_update_crtc_wm(state, cstate); |
5573 | 5616 | ||
5574 | memcpy(hw_vals->ddb.uv_plane[pipe], results->ddb.uv_plane[pipe], | ||
5575 | sizeof(hw_vals->ddb.uv_plane[pipe])); | ||
5576 | memcpy(hw_vals->ddb.plane[pipe], results->ddb.plane[pipe], | ||
5577 | sizeof(hw_vals->ddb.plane[pipe])); | ||
5578 | |||
5579 | mutex_unlock(&dev_priv->wm.wm_mutex); | 5617 | mutex_unlock(&dev_priv->wm.wm_mutex); |
5580 | } | 5618 | } |
5581 | 5619 | ||
@@ -5726,13 +5764,6 @@ void skl_wm_get_hw_state(struct drm_device *dev) | |||
5726 | if (dev_priv->active_crtcs) { | 5764 | if (dev_priv->active_crtcs) { |
5727 | /* Fully recompute DDB on first atomic commit */ | 5765 | /* Fully recompute DDB on first atomic commit */ |
5728 | dev_priv->wm.distrust_bios_wm = true; | 5766 | dev_priv->wm.distrust_bios_wm = true; |
5729 | } else { | ||
5730 | /* | ||
5731 | * Easy/common case; just sanitize DDB now if everything off | ||
5732 | * Keep dbuf slice info intact | ||
5733 | */ | ||
5734 | memset(ddb->plane, 0, sizeof(ddb->plane)); | ||
5735 | memset(ddb->uv_plane, 0, sizeof(ddb->uv_plane)); | ||
5736 | } | 5767 | } |
5737 | } | 5768 | } |
5738 | 5769 | ||
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 54fa17a5596a..419e56342523 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c | |||
@@ -75,6 +75,10 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, | |||
75 | if (i915_modparams.enable_psr == -1) | 75 | if (i915_modparams.enable_psr == -1) |
76 | return false; | 76 | return false; |
77 | 77 | ||
78 | /* Cannot enable DSC and PSR2 simultaneously */ | ||
79 | WARN_ON(crtc_state->dsc_params.compression_enable && | ||
80 | crtc_state->has_psr2); | ||
81 | |||
78 | switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { | 82 | switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { |
79 | case I915_PSR_DEBUG_FORCE_PSR1: | 83 | case I915_PSR_DEBUG_FORCE_PSR1: |
80 | return false; | 84 | return false; |
@@ -169,6 +173,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) | |||
169 | u32 transcoders = BIT(TRANSCODER_EDP); | 173 | u32 transcoders = BIT(TRANSCODER_EDP); |
170 | enum transcoder cpu_transcoder; | 174 | enum transcoder cpu_transcoder; |
171 | ktime_t time_ns = ktime_get(); | 175 | ktime_t time_ns = ktime_get(); |
176 | u32 mask = 0; | ||
172 | 177 | ||
173 | if (INTEL_GEN(dev_priv) >= 8) | 178 | if (INTEL_GEN(dev_priv) >= 8) |
174 | transcoders |= BIT(TRANSCODER_A) | | 179 | transcoders |= BIT(TRANSCODER_A) | |
@@ -178,10 +183,22 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) | |||
178 | for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { | 183 | for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { |
179 | int shift = edp_psr_shift(cpu_transcoder); | 184 | int shift = edp_psr_shift(cpu_transcoder); |
180 | 185 | ||
181 | /* FIXME: Exit PSR and link train manually when this happens. */ | 186 | if (psr_iir & EDP_PSR_ERROR(shift)) { |
182 | if (psr_iir & EDP_PSR_ERROR(shift)) | 187 | DRM_WARN("[transcoder %s] PSR aux error\n", |
183 | DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n", | 188 | transcoder_name(cpu_transcoder)); |
184 | transcoder_name(cpu_transcoder)); | 189 | |
190 | dev_priv->psr.irq_aux_error = true; | ||
191 | |||
192 | /* | ||
193 | * If this interruption is not masked it will keep | ||
194 | * interrupting so fast that it prevents the scheduled | ||
195 | * work to run. | ||
196 | * Also after a PSR error, we don't want to arm PSR | ||
197 | * again so we don't care about unmask the interruption | ||
198 | * or unset irq_aux_error. | ||
199 | */ | ||
200 | mask |= EDP_PSR_ERROR(shift); | ||
201 | } | ||
185 | 202 | ||
186 | if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) { | 203 | if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) { |
187 | dev_priv->psr.last_entry_attempt = time_ns; | 204 | dev_priv->psr.last_entry_attempt = time_ns; |
@@ -203,6 +220,13 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) | |||
203 | } | 220 | } |
204 | } | 221 | } |
205 | } | 222 | } |
223 | |||
224 | if (mask) { | ||
225 | mask |= I915_READ(EDP_PSR_IMR); | ||
226 | I915_WRITE(EDP_PSR_IMR, mask); | ||
227 | |||
228 | schedule_work(&dev_priv->psr.work); | ||
229 | } | ||
206 | } | 230 | } |
207 | 231 | ||
208 | static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) | 232 | static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) |
@@ -482,6 +506,16 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, | |||
482 | if (!dev_priv->psr.sink_psr2_support) | 506 | if (!dev_priv->psr.sink_psr2_support) |
483 | return false; | 507 | return false; |
484 | 508 | ||
509 | /* | ||
510 | * DSC and PSR2 cannot be enabled simultaneously. If a requested | ||
511 | * resolution requires DSC to be enabled, priority is given to DSC | ||
512 | * over PSR2. | ||
513 | */ | ||
514 | if (crtc_state->dsc_params.compression_enable) { | ||
515 | DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n"); | ||
516 | return false; | ||
517 | } | ||
518 | |||
485 | if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { | 519 | if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { |
486 | psr_max_h = 4096; | 520 | psr_max_h = 4096; |
487 | psr_max_v = 2304; | 521 | psr_max_v = 2304; |
@@ -527,10 +561,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, | |||
527 | return; | 561 | return; |
528 | } | 562 | } |
529 | 563 | ||
530 | if (IS_HASWELL(dev_priv) && | 564 | if (dev_priv->psr.sink_not_reliable) { |
531 | I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) & | 565 | DRM_DEBUG_KMS("PSR sink implementation is not reliable\n"); |
532 | S3D_ENABLE) { | ||
533 | DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); | ||
534 | return; | 566 | return; |
535 | } | 567 | } |
536 | 568 | ||
@@ -687,6 +719,7 @@ void intel_psr_enable(struct intel_dp *intel_dp, | |||
687 | dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); | 719 | dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); |
688 | dev_priv->psr.busy_frontbuffer_bits = 0; | 720 | dev_priv->psr.busy_frontbuffer_bits = 0; |
689 | dev_priv->psr.prepared = true; | 721 | dev_priv->psr.prepared = true; |
722 | dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; | ||
690 | 723 | ||
691 | if (psr_global_enabled(dev_priv->psr.debug)) | 724 | if (psr_global_enabled(dev_priv->psr.debug)) |
692 | intel_psr_enable_locked(dev_priv, crtc_state); | 725 | intel_psr_enable_locked(dev_priv, crtc_state); |
@@ -933,6 +966,16 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, | |||
933 | return ret; | 966 | return ret; |
934 | } | 967 | } |
935 | 968 | ||
969 | static void intel_psr_handle_irq(struct drm_i915_private *dev_priv) | ||
970 | { | ||
971 | struct i915_psr *psr = &dev_priv->psr; | ||
972 | |||
973 | intel_psr_disable_locked(psr->dp); | ||
974 | psr->sink_not_reliable = true; | ||
975 | /* let's make sure that sink is awaken */ | ||
976 | drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0); | ||
977 | } | ||
978 | |||
936 | static void intel_psr_work(struct work_struct *work) | 979 | static void intel_psr_work(struct work_struct *work) |
937 | { | 980 | { |
938 | struct drm_i915_private *dev_priv = | 981 | struct drm_i915_private *dev_priv = |
@@ -943,6 +986,9 @@ static void intel_psr_work(struct work_struct *work) | |||
943 | if (!dev_priv->psr.enabled) | 986 | if (!dev_priv->psr.enabled) |
944 | goto unlock; | 987 | goto unlock; |
945 | 988 | ||
989 | if (READ_ONCE(dev_priv->psr.irq_aux_error)) | ||
990 | intel_psr_handle_irq(dev_priv); | ||
991 | |||
946 | /* | 992 | /* |
947 | * We have to make sure PSR is ready for re-enable | 993 | * We have to make sure PSR is ready for re-enable |
948 | * otherwise it keeps disabled until next full enable/disable cycle. | 994 | * otherwise it keeps disabled until next full enable/disable cycle. |
@@ -981,9 +1027,6 @@ unlock: | |||
981 | void intel_psr_invalidate(struct drm_i915_private *dev_priv, | 1027 | void intel_psr_invalidate(struct drm_i915_private *dev_priv, |
982 | unsigned frontbuffer_bits, enum fb_op_origin origin) | 1028 | unsigned frontbuffer_bits, enum fb_op_origin origin) |
983 | { | 1029 | { |
984 | struct drm_crtc *crtc; | ||
985 | enum pipe pipe; | ||
986 | |||
987 | if (!CAN_PSR(dev_priv)) | 1030 | if (!CAN_PSR(dev_priv)) |
988 | return; | 1031 | return; |
989 | 1032 | ||
@@ -996,10 +1039,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, | |||
996 | return; | 1039 | return; |
997 | } | 1040 | } |
998 | 1041 | ||
999 | crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc; | 1042 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); |
1000 | pipe = to_intel_crtc(crtc)->pipe; | ||
1001 | |||
1002 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); | ||
1003 | dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; | 1043 | dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; |
1004 | 1044 | ||
1005 | if (frontbuffer_bits) | 1045 | if (frontbuffer_bits) |
@@ -1024,9 +1064,6 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, | |||
1024 | void intel_psr_flush(struct drm_i915_private *dev_priv, | 1064 | void intel_psr_flush(struct drm_i915_private *dev_priv, |
1025 | unsigned frontbuffer_bits, enum fb_op_origin origin) | 1065 | unsigned frontbuffer_bits, enum fb_op_origin origin) |
1026 | { | 1066 | { |
1027 | struct drm_crtc *crtc; | ||
1028 | enum pipe pipe; | ||
1029 | |||
1030 | if (!CAN_PSR(dev_priv)) | 1067 | if (!CAN_PSR(dev_priv)) |
1031 | return; | 1068 | return; |
1032 | 1069 | ||
@@ -1039,10 +1076,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, | |||
1039 | return; | 1076 | return; |
1040 | } | 1077 | } |
1041 | 1078 | ||
1042 | crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc; | 1079 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); |
1043 | pipe = to_intel_crtc(crtc)->pipe; | ||
1044 | |||
1045 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); | ||
1046 | dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; | 1080 | dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; |
1047 | 1081 | ||
1048 | /* By definition flush = invalidate + flush */ | 1082 | /* By definition flush = invalidate + flush */ |
@@ -1056,7 +1090,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, | |||
1056 | * but it makes more sense write to the current active | 1090 | * but it makes more sense write to the current active |
1057 | * pipe. | 1091 | * pipe. |
1058 | */ | 1092 | */ |
1059 | I915_WRITE(CURSURFLIVE(pipe), 0); | 1093 | I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0); |
1060 | } | 1094 | } |
1061 | 1095 | ||
1062 | if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) | 1096 | if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) |
@@ -1073,6 +1107,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, | |||
1073 | */ | 1107 | */ |
1074 | void intel_psr_init(struct drm_i915_private *dev_priv) | 1108 | void intel_psr_init(struct drm_i915_private *dev_priv) |
1075 | { | 1109 | { |
1110 | u32 val; | ||
1111 | |||
1076 | if (!HAS_PSR(dev_priv)) | 1112 | if (!HAS_PSR(dev_priv)) |
1077 | return; | 1113 | return; |
1078 | 1114 | ||
@@ -1086,6 +1122,22 @@ void intel_psr_init(struct drm_i915_private *dev_priv) | |||
1086 | if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable) | 1122 | if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable) |
1087 | i915_modparams.enable_psr = 0; | 1123 | i915_modparams.enable_psr = 0; |
1088 | 1124 | ||
1125 | /* | ||
1126 | * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR | ||
1127 | * will still keep the error set even after the reset done in the | ||
1128 | * irq_preinstall and irq_uninstall hooks. | ||
1129 | * And enabling in this situation cause the screen to freeze in the | ||
1130 | * first time that PSR HW tries to activate so lets keep PSR disabled | ||
1131 | * to avoid any rendering problems. | ||
1132 | */ | ||
1133 | val = I915_READ(EDP_PSR_IIR); | ||
1134 | val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP)); | ||
1135 | if (val) { | ||
1136 | DRM_DEBUG_KMS("PSR interruption error set\n"); | ||
1137 | dev_priv->psr.sink_not_reliable = true; | ||
1138 | return; | ||
1139 | } | ||
1140 | |||
1089 | /* Set link_standby x link_off defaults */ | 1141 | /* Set link_standby x link_off defaults */ |
1090 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | 1142 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
1091 | /* HSW and BDW require workarounds that we don't implement. */ | 1143 | /* HSW and BDW require workarounds that we don't implement. */ |
@@ -1123,6 +1175,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp) | |||
1123 | if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) { | 1175 | if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) { |
1124 | DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n"); | 1176 | DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n"); |
1125 | intel_psr_disable_locked(intel_dp); | 1177 | intel_psr_disable_locked(intel_dp); |
1178 | psr->sink_not_reliable = true; | ||
1126 | } | 1179 | } |
1127 | 1180 | ||
1128 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) { | 1181 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) { |
@@ -1140,10 +1193,27 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp) | |||
1140 | if (val & ~errors) | 1193 | if (val & ~errors) |
1141 | DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n", | 1194 | DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n", |
1142 | val & ~errors); | 1195 | val & ~errors); |
1143 | if (val & errors) | 1196 | if (val & errors) { |
1144 | intel_psr_disable_locked(intel_dp); | 1197 | intel_psr_disable_locked(intel_dp); |
1198 | psr->sink_not_reliable = true; | ||
1199 | } | ||
1145 | /* clear status register */ | 1200 | /* clear status register */ |
1146 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val); | 1201 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val); |
1147 | exit: | 1202 | exit: |
1148 | mutex_unlock(&psr->lock); | 1203 | mutex_unlock(&psr->lock); |
1149 | } | 1204 | } |
1205 | |||
1206 | bool intel_psr_enabled(struct intel_dp *intel_dp) | ||
1207 | { | ||
1208 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | ||
1209 | bool ret; | ||
1210 | |||
1211 | if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) | ||
1212 | return false; | ||
1213 | |||
1214 | mutex_lock(&dev_priv->psr.lock); | ||
1215 | ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled); | ||
1216 | mutex_unlock(&dev_priv->psr.lock); | ||
1217 | |||
1218 | return ret; | ||
1219 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 87eebc13c0d8..c5eb26a7ee79 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -150,8 +150,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) | |||
150 | */ | 150 | */ |
151 | if (mode & EMIT_INVALIDATE) { | 151 | if (mode & EMIT_INVALIDATE) { |
152 | *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; | 152 | *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; |
153 | *cs++ = i915_ggtt_offset(rq->engine->scratch) | | 153 | *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; |
154 | PIPE_CONTROL_GLOBAL_GTT; | ||
155 | *cs++ = 0; | 154 | *cs++ = 0; |
156 | *cs++ = 0; | 155 | *cs++ = 0; |
157 | 156 | ||
@@ -159,8 +158,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) | |||
159 | *cs++ = MI_FLUSH; | 158 | *cs++ = MI_FLUSH; |
160 | 159 | ||
161 | *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; | 160 | *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; |
162 | *cs++ = i915_ggtt_offset(rq->engine->scratch) | | 161 | *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; |
163 | PIPE_CONTROL_GLOBAL_GTT; | ||
164 | *cs++ = 0; | 162 | *cs++ = 0; |
165 | *cs++ = 0; | 163 | *cs++ = 0; |
166 | } | 164 | } |
@@ -212,8 +210,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) | |||
212 | static int | 210 | static int |
213 | intel_emit_post_sync_nonzero_flush(struct i915_request *rq) | 211 | intel_emit_post_sync_nonzero_flush(struct i915_request *rq) |
214 | { | 212 | { |
215 | u32 scratch_addr = | 213 | u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; |
216 | i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES; | ||
217 | u32 *cs; | 214 | u32 *cs; |
218 | 215 | ||
219 | cs = intel_ring_begin(rq, 6); | 216 | cs = intel_ring_begin(rq, 6); |
@@ -246,8 +243,7 @@ intel_emit_post_sync_nonzero_flush(struct i915_request *rq) | |||
246 | static int | 243 | static int |
247 | gen6_render_ring_flush(struct i915_request *rq, u32 mode) | 244 | gen6_render_ring_flush(struct i915_request *rq, u32 mode) |
248 | { | 245 | { |
249 | u32 scratch_addr = | 246 | u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; |
250 | i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES; | ||
251 | u32 *cs, flags = 0; | 247 | u32 *cs, flags = 0; |
252 | int ret; | 248 | int ret; |
253 | 249 | ||
@@ -316,8 +312,7 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq) | |||
316 | static int | 312 | static int |
317 | gen7_render_ring_flush(struct i915_request *rq, u32 mode) | 313 | gen7_render_ring_flush(struct i915_request *rq, u32 mode) |
318 | { | 314 | { |
319 | u32 scratch_addr = | 315 | u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; |
320 | i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES; | ||
321 | u32 *cs, flags = 0; | 316 | u32 *cs, flags = 0; |
322 | 317 | ||
323 | /* | 318 | /* |
@@ -529,6 +524,13 @@ static int init_ring_common(struct intel_engine_cs *engine) | |||
529 | 524 | ||
530 | intel_engine_reset_breadcrumbs(engine); | 525 | intel_engine_reset_breadcrumbs(engine); |
531 | 526 | ||
527 | if (HAS_LEGACY_SEMAPHORES(engine->i915)) { | ||
528 | I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); | ||
529 | I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); | ||
530 | if (HAS_VEBOX(dev_priv)) | ||
531 | I915_WRITE(RING_SYNC_2(engine->mmio_base), 0); | ||
532 | } | ||
533 | |||
532 | /* Enforce ordering by reading HEAD register back */ | 534 | /* Enforce ordering by reading HEAD register back */ |
533 | I915_READ_HEAD(engine); | 535 | I915_READ_HEAD(engine); |
534 | 536 | ||
@@ -546,10 +548,11 @@ static int init_ring_common(struct intel_engine_cs *engine) | |||
546 | /* Check that the ring offsets point within the ring! */ | 548 | /* Check that the ring offsets point within the ring! */ |
547 | GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); | 549 | GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); |
548 | GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); | 550 | GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); |
549 | |||
550 | intel_ring_update_space(ring); | 551 | intel_ring_update_space(ring); |
552 | |||
553 | /* First wake the ring up to an empty/idle ring */ | ||
551 | I915_WRITE_HEAD(engine, ring->head); | 554 | I915_WRITE_HEAD(engine, ring->head); |
552 | I915_WRITE_TAIL(engine, ring->tail); | 555 | I915_WRITE_TAIL(engine, ring->head); |
553 | (void)I915_READ_TAIL(engine); | 556 | (void)I915_READ_TAIL(engine); |
554 | 557 | ||
555 | I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID); | 558 | I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID); |
@@ -574,6 +577,12 @@ static int init_ring_common(struct intel_engine_cs *engine) | |||
574 | if (INTEL_GEN(dev_priv) > 2) | 577 | if (INTEL_GEN(dev_priv) > 2) |
575 | I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); | 578 | I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); |
576 | 579 | ||
580 | /* Now awake, let it get started */ | ||
581 | if (ring->tail != ring->head) { | ||
582 | I915_WRITE_TAIL(engine, ring->tail); | ||
583 | (void)I915_READ_TAIL(engine); | ||
584 | } | ||
585 | |||
577 | /* Papering over lost _interrupts_ immediately following the restart */ | 586 | /* Papering over lost _interrupts_ immediately following the restart */ |
578 | intel_engine_wakeup(engine); | 587 | intel_engine_wakeup(engine); |
579 | out: | 588 | out: |
@@ -642,7 +651,7 @@ static int intel_rcs_ctx_init(struct i915_request *rq) | |||
642 | { | 651 | { |
643 | int ret; | 652 | int ret; |
644 | 653 | ||
645 | ret = intel_ctx_workarounds_emit(rq); | 654 | ret = intel_engine_emit_ctx_wa(rq); |
646 | if (ret != 0) | 655 | if (ret != 0) |
647 | return ret; | 656 | return ret; |
648 | 657 | ||
@@ -660,8 +669,6 @@ static int init_render_ring(struct intel_engine_cs *engine) | |||
660 | if (ret) | 669 | if (ret) |
661 | return ret; | 670 | return ret; |
662 | 671 | ||
663 | intel_whitelist_workarounds_apply(engine); | ||
664 | |||
665 | /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ | 672 | /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ |
666 | if (IS_GEN(dev_priv, 4, 6)) | 673 | if (IS_GEN(dev_priv, 4, 6)) |
667 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); | 674 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); |
@@ -743,9 +750,18 @@ static void cancel_requests(struct intel_engine_cs *engine) | |||
743 | /* Mark all submitted requests as skipped. */ | 750 | /* Mark all submitted requests as skipped. */ |
744 | list_for_each_entry(request, &engine->timeline.requests, link) { | 751 | list_for_each_entry(request, &engine->timeline.requests, link) { |
745 | GEM_BUG_ON(!request->global_seqno); | 752 | GEM_BUG_ON(!request->global_seqno); |
746 | if (!i915_request_completed(request)) | 753 | |
747 | dma_fence_set_error(&request->fence, -EIO); | 754 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, |
755 | &request->fence.flags)) | ||
756 | continue; | ||
757 | |||
758 | dma_fence_set_error(&request->fence, -EIO); | ||
748 | } | 759 | } |
760 | |||
761 | intel_write_status_page(engine, | ||
762 | I915_GEM_HWS_INDEX, | ||
763 | intel_engine_last_submit(engine)); | ||
764 | |||
749 | /* Remaining _unready_ requests will be nop'ed when submitted */ | 765 | /* Remaining _unready_ requests will be nop'ed when submitted */ |
750 | 766 | ||
751 | spin_unlock_irqrestore(&engine->timeline.lock, flags); | 767 | spin_unlock_irqrestore(&engine->timeline.lock, flags); |
@@ -973,7 +989,7 @@ i965_emit_bb_start(struct i915_request *rq, | |||
973 | } | 989 | } |
974 | 990 | ||
975 | /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ | 991 | /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ |
976 | #define I830_BATCH_LIMIT (256*1024) | 992 | #define I830_BATCH_LIMIT SZ_256K |
977 | #define I830_TLB_ENTRIES (2) | 993 | #define I830_TLB_ENTRIES (2) |
978 | #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) | 994 | #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) |
979 | static int | 995 | static int |
@@ -981,7 +997,9 @@ i830_emit_bb_start(struct i915_request *rq, | |||
981 | u64 offset, u32 len, | 997 | u64 offset, u32 len, |
982 | unsigned int dispatch_flags) | 998 | unsigned int dispatch_flags) |
983 | { | 999 | { |
984 | u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch); | 1000 | u32 *cs, cs_offset = i915_scratch_offset(rq->i915); |
1001 | |||
1002 | GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE); | ||
985 | 1003 | ||
986 | cs = intel_ring_begin(rq, 6); | 1004 | cs = intel_ring_begin(rq, 6); |
987 | if (IS_ERR(cs)) | 1005 | if (IS_ERR(cs)) |
@@ -1438,7 +1456,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) | |||
1438 | { | 1456 | { |
1439 | struct i915_timeline *timeline; | 1457 | struct i915_timeline *timeline; |
1440 | struct intel_ring *ring; | 1458 | struct intel_ring *ring; |
1441 | unsigned int size; | ||
1442 | int err; | 1459 | int err; |
1443 | 1460 | ||
1444 | intel_engine_setup_common(engine); | 1461 | intel_engine_setup_common(engine); |
@@ -1463,21 +1480,12 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) | |||
1463 | GEM_BUG_ON(engine->buffer); | 1480 | GEM_BUG_ON(engine->buffer); |
1464 | engine->buffer = ring; | 1481 | engine->buffer = ring; |
1465 | 1482 | ||
1466 | size = PAGE_SIZE; | ||
1467 | if (HAS_BROKEN_CS_TLB(engine->i915)) | ||
1468 | size = I830_WA_SIZE; | ||
1469 | err = intel_engine_create_scratch(engine, size); | ||
1470 | if (err) | ||
1471 | goto err_unpin; | ||
1472 | |||
1473 | err = intel_engine_init_common(engine); | 1483 | err = intel_engine_init_common(engine); |
1474 | if (err) | 1484 | if (err) |
1475 | goto err_scratch; | 1485 | goto err_unpin; |
1476 | 1486 | ||
1477 | return 0; | 1487 | return 0; |
1478 | 1488 | ||
1479 | err_scratch: | ||
1480 | intel_engine_cleanup_scratch(engine); | ||
1481 | err_unpin: | 1489 | err_unpin: |
1482 | intel_ring_unpin(ring); | 1490 | intel_ring_unpin(ring); |
1483 | err_ring: | 1491 | err_ring: |
@@ -1551,7 +1559,7 @@ static int flush_pd_dir(struct i915_request *rq) | |||
1551 | /* Stall until the page table load is complete */ | 1559 | /* Stall until the page table load is complete */ |
1552 | *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; | 1560 | *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; |
1553 | *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); | 1561 | *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); |
1554 | *cs++ = i915_ggtt_offset(engine->scratch); | 1562 | *cs++ = i915_scratch_offset(rq->i915); |
1555 | *cs++ = MI_NOOP; | 1563 | *cs++ = MI_NOOP; |
1556 | 1564 | ||
1557 | intel_ring_advance(rq, cs); | 1565 | intel_ring_advance(rq, cs); |
@@ -1660,7 +1668,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) | |||
1660 | /* Insert a delay before the next switch! */ | 1668 | /* Insert a delay before the next switch! */ |
1661 | *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; | 1669 | *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; |
1662 | *cs++ = i915_mmio_reg_offset(last_reg); | 1670 | *cs++ = i915_mmio_reg_offset(last_reg); |
1663 | *cs++ = i915_ggtt_offset(engine->scratch); | 1671 | *cs++ = i915_scratch_offset(rq->i915); |
1664 | *cs++ = MI_NOOP; | 1672 | *cs++ = MI_NOOP; |
1665 | } | 1673 | } |
1666 | *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; | 1674 | *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 8a2270b209b0..72edaa7ff411 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include "i915_selftest.h" | 15 | #include "i915_selftest.h" |
16 | #include "i915_timeline.h" | 16 | #include "i915_timeline.h" |
17 | #include "intel_gpu_commands.h" | 17 | #include "intel_gpu_commands.h" |
18 | #include "intel_workarounds.h" | ||
18 | 19 | ||
19 | struct drm_printer; | 20 | struct drm_printer; |
20 | struct i915_sched_attr; | 21 | struct i915_sched_attr; |
@@ -313,13 +314,6 @@ struct intel_engine_execlists { | |||
313 | struct rb_root_cached queue; | 314 | struct rb_root_cached queue; |
314 | 315 | ||
315 | /** | 316 | /** |
316 | * @csb_read: control register for Context Switch buffer | ||
317 | * | ||
318 | * Note this register is always in mmio. | ||
319 | */ | ||
320 | u32 __iomem *csb_read; | ||
321 | |||
322 | /** | ||
323 | * @csb_write: control register for Context Switch buffer | 317 | * @csb_write: control register for Context Switch buffer |
324 | * | 318 | * |
325 | * Note this register may be either mmio or HWSP shadow. | 319 | * Note this register may be either mmio or HWSP shadow. |
@@ -339,15 +333,6 @@ struct intel_engine_execlists { | |||
339 | u32 preempt_complete_status; | 333 | u32 preempt_complete_status; |
340 | 334 | ||
341 | /** | 335 | /** |
342 | * @csb_write_reset: reset value for CSB write pointer | ||
343 | * | ||
344 | * As the CSB write pointer maybe either in HWSP or as a field | ||
345 | * inside an mmio register, we want to reprogram it slightly | ||
346 | * differently to avoid later confusion. | ||
347 | */ | ||
348 | u32 csb_write_reset; | ||
349 | |||
350 | /** | ||
351 | * @csb_head: context status buffer head | 336 | * @csb_head: context status buffer head |
352 | */ | 337 | */ |
353 | u8 csb_head; | 338 | u8 csb_head; |
@@ -451,7 +436,9 @@ struct intel_engine_cs { | |||
451 | 436 | ||
452 | struct intel_hw_status_page status_page; | 437 | struct intel_hw_status_page status_page; |
453 | struct i915_ctx_workarounds wa_ctx; | 438 | struct i915_ctx_workarounds wa_ctx; |
454 | struct i915_vma *scratch; | 439 | struct i915_wa_list ctx_wa_list; |
440 | struct i915_wa_list wa_list; | ||
441 | struct i915_wa_list whitelist; | ||
455 | 442 | ||
456 | u32 irq_keep_mask; /* always keep these interrupts */ | 443 | u32 irq_keep_mask; /* always keep these interrupts */ |
457 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ | 444 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
@@ -908,10 +895,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine); | |||
908 | int intel_engine_init_common(struct intel_engine_cs *engine); | 895 | int intel_engine_init_common(struct intel_engine_cs *engine); |
909 | void intel_engine_cleanup_common(struct intel_engine_cs *engine); | 896 | void intel_engine_cleanup_common(struct intel_engine_cs *engine); |
910 | 897 | ||
911 | int intel_engine_create_scratch(struct intel_engine_cs *engine, | ||
912 | unsigned int size); | ||
913 | void intel_engine_cleanup_scratch(struct intel_engine_cs *engine); | ||
914 | |||
915 | int intel_init_render_ring_buffer(struct intel_engine_cs *engine); | 898 | int intel_init_render_ring_buffer(struct intel_engine_cs *engine); |
916 | int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine); | 899 | int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine); |
917 | int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); | 900 | int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 1c2de9b69a19..4350a5270423 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -76,6 +76,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) | |||
76 | return "TRANSCODER_C"; | 76 | return "TRANSCODER_C"; |
77 | case POWER_DOMAIN_TRANSCODER_EDP: | 77 | case POWER_DOMAIN_TRANSCODER_EDP: |
78 | return "TRANSCODER_EDP"; | 78 | return "TRANSCODER_EDP"; |
79 | case POWER_DOMAIN_TRANSCODER_EDP_VDSC: | ||
80 | return "TRANSCODER_EDP_VDSC"; | ||
79 | case POWER_DOMAIN_TRANSCODER_DSI_A: | 81 | case POWER_DOMAIN_TRANSCODER_DSI_A: |
80 | return "TRANSCODER_DSI_A"; | 82 | return "TRANSCODER_DSI_A"; |
81 | case POWER_DOMAIN_TRANSCODER_DSI_C: | 83 | case POWER_DOMAIN_TRANSCODER_DSI_C: |
@@ -2028,9 +2030,9 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, | |||
2028 | */ | 2030 | */ |
2029 | #define ICL_PW_2_POWER_DOMAINS ( \ | 2031 | #define ICL_PW_2_POWER_DOMAINS ( \ |
2030 | ICL_PW_3_POWER_DOMAINS | \ | 2032 | ICL_PW_3_POWER_DOMAINS | \ |
2033 | BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \ | ||
2031 | BIT_ULL(POWER_DOMAIN_INIT)) | 2034 | BIT_ULL(POWER_DOMAIN_INIT)) |
2032 | /* | 2035 | /* |
2033 | * - eDP/DSI VDSC | ||
2034 | * - KVMR (HW control) | 2036 | * - KVMR (HW control) |
2035 | */ | 2037 | */ |
2036 | #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ | 2038 | #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index abe193815ccc..d2e003d8f3db 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -373,14 +373,12 @@ skl_program_scaler(struct intel_plane *plane, | |||
373 | #define BOFF(x) (((x) & 0xffff) << 16) | 373 | #define BOFF(x) (((x) & 0xffff) << 16) |
374 | 374 | ||
375 | static void | 375 | static void |
376 | icl_program_input_csc_coeff(const struct intel_crtc_state *crtc_state, | 376 | icl_program_input_csc(struct intel_plane *plane, |
377 | const struct intel_plane_state *plane_state) | 377 | const struct intel_crtc_state *crtc_state, |
378 | const struct intel_plane_state *plane_state) | ||
378 | { | 379 | { |
379 | struct drm_i915_private *dev_priv = | 380 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
380 | to_i915(plane_state->base.plane->dev); | 381 | enum pipe pipe = plane->pipe; |
381 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); | ||
382 | enum pipe pipe = crtc->pipe; | ||
383 | struct intel_plane *plane = to_intel_plane(plane_state->base.plane); | ||
384 | enum plane_id plane_id = plane->id; | 382 | enum plane_id plane_id = plane->id; |
385 | 383 | ||
386 | static const u16 input_csc_matrix[][9] = { | 384 | static const u16 input_csc_matrix[][9] = { |
@@ -508,28 +506,12 @@ skl_program_plane(struct intel_plane *plane, | |||
508 | 506 | ||
509 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 507 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
510 | 508 | ||
511 | if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) | ||
512 | I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), | ||
513 | plane_state->color_ctl); | ||
514 | |||
515 | if (fb->format->is_yuv && icl_is_hdr_plane(plane)) | ||
516 | icl_program_input_csc_coeff(crtc_state, plane_state); | ||
517 | |||
518 | I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value); | ||
519 | I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax); | ||
520 | I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk); | ||
521 | |||
522 | I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x); | ||
523 | I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride); | 509 | I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride); |
510 | I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x); | ||
524 | I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); | 511 | I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); |
525 | I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), | 512 | I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), |
526 | (plane_state->color_plane[1].offset - surf_addr) | aux_stride); | 513 | (plane_state->color_plane[1].offset - surf_addr) | aux_stride); |
527 | 514 | ||
528 | if (INTEL_GEN(dev_priv) < 11) | ||
529 | I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id), | ||
530 | (plane_state->color_plane[1].y << 16) | | ||
531 | plane_state->color_plane[1].x); | ||
532 | |||
533 | if (icl_is_hdr_plane(plane)) { | 515 | if (icl_is_hdr_plane(plane)) { |
534 | u32 cus_ctl = 0; | 516 | u32 cus_ctl = 0; |
535 | 517 | ||
@@ -551,15 +533,38 @@ skl_program_plane(struct intel_plane *plane, | |||
551 | I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl); | 533 | I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl); |
552 | } | 534 | } |
553 | 535 | ||
554 | if (!slave && plane_state->scaler_id >= 0) | 536 | if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) |
555 | skl_program_scaler(plane, crtc_state, plane_state); | 537 | I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), |
538 | plane_state->color_ctl); | ||
556 | 539 | ||
557 | I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x); | 540 | if (fb->format->is_yuv && icl_is_hdr_plane(plane)) |
541 | icl_program_input_csc(plane, crtc_state, plane_state); | ||
542 | |||
543 | skl_write_plane_wm(plane, crtc_state); | ||
558 | 544 | ||
545 | I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value); | ||
546 | I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk); | ||
547 | I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax); | ||
548 | |||
549 | I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x); | ||
550 | |||
551 | if (INTEL_GEN(dev_priv) < 11) | ||
552 | I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id), | ||
553 | (plane_state->color_plane[1].y << 16) | | ||
554 | plane_state->color_plane[1].x); | ||
555 | |||
556 | /* | ||
557 | * The control register self-arms if the plane was previously | ||
558 | * disabled. Try to make the plane enable atomic by writing | ||
559 | * the control register just before the surface register. | ||
560 | */ | ||
559 | I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl); | 561 | I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl); |
560 | I915_WRITE_FW(PLANE_SURF(pipe, plane_id), | 562 | I915_WRITE_FW(PLANE_SURF(pipe, plane_id), |
561 | intel_plane_ggtt_offset(plane_state) + surf_addr); | 563 | intel_plane_ggtt_offset(plane_state) + surf_addr); |
562 | 564 | ||
565 | if (!slave && plane_state->scaler_id >= 0) | ||
566 | skl_program_scaler(plane, crtc_state, plane_state); | ||
567 | |||
563 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 568 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
564 | } | 569 | } |
565 | 570 | ||
@@ -589,7 +594,8 @@ icl_update_slave(struct intel_plane *plane, | |||
589 | } | 594 | } |
590 | 595 | ||
591 | static void | 596 | static void |
592 | skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) | 597 | skl_disable_plane(struct intel_plane *plane, |
598 | const struct intel_crtc_state *crtc_state) | ||
593 | { | 599 | { |
594 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | 600 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
595 | enum plane_id plane_id = plane->id; | 601 | enum plane_id plane_id = plane->id; |
@@ -598,6 +604,8 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) | |||
598 | 604 | ||
599 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 605 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
600 | 606 | ||
607 | skl_write_plane_wm(plane, crtc_state); | ||
608 | |||
601 | I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0); | 609 | I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0); |
602 | I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0); | 610 | I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0); |
603 | 611 | ||
@@ -819,35 +827,41 @@ vlv_update_plane(struct intel_plane *plane, | |||
819 | 827 | ||
820 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 828 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
821 | 829 | ||
822 | vlv_update_clrc(plane_state); | 830 | I915_WRITE_FW(SPSTRIDE(pipe, plane_id), |
831 | plane_state->color_plane[0].stride); | ||
832 | I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x); | ||
833 | I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w); | ||
834 | I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0); | ||
823 | 835 | ||
824 | if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) | 836 | if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) |
825 | chv_update_csc(plane_state); | 837 | chv_update_csc(plane_state); |
826 | 838 | ||
827 | if (key->flags) { | 839 | if (key->flags) { |
828 | I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value); | 840 | I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value); |
829 | I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value); | ||
830 | I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask); | 841 | I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask); |
842 | I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value); | ||
831 | } | 843 | } |
832 | I915_WRITE_FW(SPSTRIDE(pipe, plane_id), | ||
833 | plane_state->color_plane[0].stride); | ||
834 | I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x); | ||
835 | 844 | ||
836 | I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x); | ||
837 | I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset); | 845 | I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset); |
846 | I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x); | ||
838 | 847 | ||
839 | I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0); | 848 | /* |
840 | 849 | * The control register self-arms if the plane was previously | |
841 | I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w); | 850 | * disabled. Try to make the plane enable atomic by writing |
851 | * the control register just before the surface register. | ||
852 | */ | ||
842 | I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl); | 853 | I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl); |
843 | I915_WRITE_FW(SPSURF(pipe, plane_id), | 854 | I915_WRITE_FW(SPSURF(pipe, plane_id), |
844 | intel_plane_ggtt_offset(plane_state) + sprsurf_offset); | 855 | intel_plane_ggtt_offset(plane_state) + sprsurf_offset); |
845 | 856 | ||
857 | vlv_update_clrc(plane_state); | ||
858 | |||
846 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 859 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
847 | } | 860 | } |
848 | 861 | ||
849 | static void | 862 | static void |
850 | vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) | 863 | vlv_disable_plane(struct intel_plane *plane, |
864 | const struct intel_crtc_state *crtc_state) | ||
851 | { | 865 | { |
852 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | 866 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
853 | enum pipe pipe = plane->pipe; | 867 | enum pipe pipe = plane->pipe; |
@@ -980,27 +994,32 @@ ivb_update_plane(struct intel_plane *plane, | |||
980 | 994 | ||
981 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 995 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
982 | 996 | ||
997 | I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride); | ||
998 | I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x); | ||
999 | I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); | ||
1000 | if (IS_IVYBRIDGE(dev_priv)) | ||
1001 | I915_WRITE_FW(SPRSCALE(pipe), sprscale); | ||
1002 | |||
983 | if (key->flags) { | 1003 | if (key->flags) { |
984 | I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value); | 1004 | I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value); |
985 | I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value); | ||
986 | I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask); | 1005 | I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask); |
1006 | I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value); | ||
987 | } | 1007 | } |
988 | 1008 | ||
989 | I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride); | ||
990 | I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x); | ||
991 | |||
992 | /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET | 1009 | /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET |
993 | * register */ | 1010 | * register */ |
994 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { | 1011 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
995 | I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x); | 1012 | I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x); |
996 | } else { | 1013 | } else { |
997 | I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x); | ||
998 | I915_WRITE_FW(SPRLINOFF(pipe), linear_offset); | 1014 | I915_WRITE_FW(SPRLINOFF(pipe), linear_offset); |
1015 | I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x); | ||
999 | } | 1016 | } |
1000 | 1017 | ||
1001 | I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); | 1018 | /* |
1002 | if (IS_IVYBRIDGE(dev_priv)) | 1019 | * The control register self-arms if the plane was previously |
1003 | I915_WRITE_FW(SPRSCALE(pipe), sprscale); | 1020 | * disabled. Try to make the plane enable atomic by writing |
1021 | * the control register just before the surface register. | ||
1022 | */ | ||
1004 | I915_WRITE_FW(SPRCTL(pipe), sprctl); | 1023 | I915_WRITE_FW(SPRCTL(pipe), sprctl); |
1005 | I915_WRITE_FW(SPRSURF(pipe), | 1024 | I915_WRITE_FW(SPRSURF(pipe), |
1006 | intel_plane_ggtt_offset(plane_state) + sprsurf_offset); | 1025 | intel_plane_ggtt_offset(plane_state) + sprsurf_offset); |
@@ -1009,7 +1028,8 @@ ivb_update_plane(struct intel_plane *plane, | |||
1009 | } | 1028 | } |
1010 | 1029 | ||
1011 | static void | 1030 | static void |
1012 | ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) | 1031 | ivb_disable_plane(struct intel_plane *plane, |
1032 | const struct intel_crtc_state *crtc_state) | ||
1013 | { | 1033 | { |
1014 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | 1034 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
1015 | enum pipe pipe = plane->pipe; | 1035 | enum pipe pipe = plane->pipe; |
@@ -1018,7 +1038,7 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) | |||
1018 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 1038 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
1019 | 1039 | ||
1020 | I915_WRITE_FW(SPRCTL(pipe), 0); | 1040 | I915_WRITE_FW(SPRCTL(pipe), 0); |
1021 | /* Can't leave the scaler enabled... */ | 1041 | /* Disable the scaler */ |
1022 | if (IS_IVYBRIDGE(dev_priv)) | 1042 | if (IS_IVYBRIDGE(dev_priv)) |
1023 | I915_WRITE_FW(SPRSCALE(pipe), 0); | 1043 | I915_WRITE_FW(SPRSCALE(pipe), 0); |
1024 | I915_WRITE_FW(SPRSURF(pipe), 0); | 1044 | I915_WRITE_FW(SPRSURF(pipe), 0); |
@@ -1148,20 +1168,25 @@ g4x_update_plane(struct intel_plane *plane, | |||
1148 | 1168 | ||
1149 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 1169 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
1150 | 1170 | ||
1171 | I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride); | ||
1172 | I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x); | ||
1173 | I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); | ||
1174 | I915_WRITE_FW(DVSSCALE(pipe), dvsscale); | ||
1175 | |||
1151 | if (key->flags) { | 1176 | if (key->flags) { |
1152 | I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value); | 1177 | I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value); |
1153 | I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value); | ||
1154 | I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask); | 1178 | I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask); |
1179 | I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value); | ||
1155 | } | 1180 | } |
1156 | 1181 | ||
1157 | I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride); | ||
1158 | I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x); | ||
1159 | |||
1160 | I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x); | ||
1161 | I915_WRITE_FW(DVSLINOFF(pipe), linear_offset); | 1182 | I915_WRITE_FW(DVSLINOFF(pipe), linear_offset); |
1183 | I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x); | ||
1162 | 1184 | ||
1163 | I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); | 1185 | /* |
1164 | I915_WRITE_FW(DVSSCALE(pipe), dvsscale); | 1186 | * The control register self-arms if the plane was previously |
1187 | * disabled. Try to make the plane enable atomic by writing | ||
1188 | * the control register just before the surface register. | ||
1189 | */ | ||
1165 | I915_WRITE_FW(DVSCNTR(pipe), dvscntr); | 1190 | I915_WRITE_FW(DVSCNTR(pipe), dvscntr); |
1166 | I915_WRITE_FW(DVSSURF(pipe), | 1191 | I915_WRITE_FW(DVSSURF(pipe), |
1167 | intel_plane_ggtt_offset(plane_state) + dvssurf_offset); | 1192 | intel_plane_ggtt_offset(plane_state) + dvssurf_offset); |
@@ -1170,7 +1195,8 @@ g4x_update_plane(struct intel_plane *plane, | |||
1170 | } | 1195 | } |
1171 | 1196 | ||
1172 | static void | 1197 | static void |
1173 | g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) | 1198 | g4x_disable_plane(struct intel_plane *plane, |
1199 | const struct intel_crtc_state *crtc_state) | ||
1174 | { | 1200 | { |
1175 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | 1201 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
1176 | enum pipe pipe = plane->pipe; | 1202 | enum pipe pipe = plane->pipe; |
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c new file mode 100644 index 000000000000..c56ba0e04044 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_vdsc.c | |||
@@ -0,0 +1,1088 @@ | |||
1 | // SPDX-License-Identifier: MIT | ||
2 | /* | ||
3 | * Copyright © 2018 Intel Corporation | ||
4 | * | ||
5 | * Author: Gaurav K Singh <gaurav.k.singh@intel.com> | ||
6 | * Manasi Navare <manasi.d.navare@intel.com> | ||
7 | */ | ||
8 | |||
9 | #include <drm/drmP.h> | ||
10 | #include <drm/i915_drm.h> | ||
11 | #include "i915_drv.h" | ||
12 | #include "intel_drv.h" | ||
13 | |||
14 | enum ROW_INDEX_BPP { | ||
15 | ROW_INDEX_6BPP = 0, | ||
16 | ROW_INDEX_8BPP, | ||
17 | ROW_INDEX_10BPP, | ||
18 | ROW_INDEX_12BPP, | ||
19 | ROW_INDEX_15BPP, | ||
20 | MAX_ROW_INDEX | ||
21 | }; | ||
22 | |||
23 | enum COLUMN_INDEX_BPC { | ||
24 | COLUMN_INDEX_8BPC = 0, | ||
25 | COLUMN_INDEX_10BPC, | ||
26 | COLUMN_INDEX_12BPC, | ||
27 | COLUMN_INDEX_14BPC, | ||
28 | COLUMN_INDEX_16BPC, | ||
29 | MAX_COLUMN_INDEX | ||
30 | }; | ||
31 | |||
32 | #define DSC_SUPPORTED_VERSION_MIN 1 | ||
33 | |||
34 | /* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */ | ||
35 | static u16 rc_buf_thresh[] = { | ||
36 | 896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616, | ||
37 | 7744, 7872, 8000, 8064 | ||
38 | }; | ||
39 | |||
40 | struct rc_parameters { | ||
41 | u16 initial_xmit_delay; | ||
42 | u8 first_line_bpg_offset; | ||
43 | u16 initial_offset; | ||
44 | u8 flatness_min_qp; | ||
45 | u8 flatness_max_qp; | ||
46 | u8 rc_quant_incr_limit0; | ||
47 | u8 rc_quant_incr_limit1; | ||
48 | struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES]; | ||
49 | }; | ||
50 | |||
51 | /* | ||
52 | * Selected Rate Control Related Parameter Recommended Values | ||
53 | * from DSC_v1.11 spec & C Model release: DSC_model_20161212 | ||
54 | */ | ||
55 | static struct rc_parameters rc_params[][MAX_COLUMN_INDEX] = { | ||
56 | { | ||
57 | /* 6BPP/8BPC */ | ||
58 | { 768, 15, 6144, 3, 13, 11, 11, { | ||
59 | { 0, 4, 0 }, { 1, 6, -2 }, { 3, 8, -2 }, { 4, 8, -4 }, | ||
60 | { 5, 9, -6 }, { 5, 9, -6 }, { 6, 9, -6 }, { 6, 10, -8 }, | ||
61 | { 7, 11, -8 }, { 8, 12, -10 }, { 9, 12, -10 }, { 10, 12, -12 }, | ||
62 | { 10, 12, -12 }, { 11, 12, -12 }, { 13, 14, -12 } | ||
63 | } | ||
64 | }, | ||
65 | /* 6BPP/10BPC */ | ||
66 | { 768, 15, 6144, 7, 17, 15, 15, { | ||
67 | { 0, 8, 0 }, { 3, 10, -2 }, { 7, 12, -2 }, { 8, 12, -4 }, | ||
68 | { 9, 13, -6 }, { 9, 13, -6 }, { 10, 13, -6 }, { 10, 14, -8 }, | ||
69 | { 11, 15, -8 }, { 12, 16, -10 }, { 13, 16, -10 }, | ||
70 | { 14, 16, -12 }, { 14, 16, -12 }, { 15, 16, -12 }, | ||
71 | { 17, 18, -12 } | ||
72 | } | ||
73 | }, | ||
74 | /* 6BPP/12BPC */ | ||
75 | { 768, 15, 6144, 11, 21, 19, 19, { | ||
76 | { 0, 12, 0 }, { 5, 14, -2 }, { 11, 16, -2 }, { 12, 16, -4 }, | ||
77 | { 13, 17, -6 }, { 13, 17, -6 }, { 14, 17, -6 }, { 14, 18, -8 }, | ||
78 | { 15, 19, -8 }, { 16, 20, -10 }, { 17, 20, -10 }, | ||
79 | { 18, 20, -12 }, { 18, 20, -12 }, { 19, 20, -12 }, | ||
80 | { 21, 22, -12 } | ||
81 | } | ||
82 | }, | ||
83 | /* 6BPP/14BPC */ | ||
84 | { 768, 15, 6144, 15, 25, 23, 27, { | ||
85 | { 0, 16, 0 }, { 7, 18, -2 }, { 15, 20, -2 }, { 16, 20, -4 }, | ||
86 | { 17, 21, -6 }, { 17, 21, -6 }, { 18, 21, -6 }, { 18, 22, -8 }, | ||
87 | { 19, 23, -8 }, { 20, 24, -10 }, { 21, 24, -10 }, | ||
88 | { 22, 24, -12 }, { 22, 24, -12 }, { 23, 24, -12 }, | ||
89 | { 25, 26, -12 } | ||
90 | } | ||
91 | }, | ||
92 | /* 6BPP/16BPC */ | ||
93 | { 768, 15, 6144, 19, 29, 27, 27, { | ||
94 | { 0, 20, 0 }, { 9, 22, -2 }, { 19, 24, -2 }, { 20, 24, -4 }, | ||
95 | { 21, 25, -6 }, { 21, 25, -6 }, { 22, 25, -6 }, { 22, 26, -8 }, | ||
96 | { 23, 27, -8 }, { 24, 28, -10 }, { 25, 28, -10 }, | ||
97 | { 26, 28, -12 }, { 26, 28, -12 }, { 27, 28, -12 }, | ||
98 | { 29, 30, -12 } | ||
99 | } | ||
100 | }, | ||
101 | }, | ||
102 | { | ||
103 | /* 8BPP/8BPC */ | ||
104 | { 512, 12, 6144, 3, 12, 11, 11, { | ||
105 | { 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 }, | ||
106 | { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, | ||
107 | { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 }, { 5, 12, -12 }, | ||
108 | { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 } | ||
109 | } | ||
110 | }, | ||
111 | /* 8BPP/10BPC */ | ||
112 | { 512, 12, 6144, 7, 16, 15, 15, { | ||
113 | { 0, 4, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 }, | ||
114 | { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, | ||
115 | { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 }, | ||
116 | { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 } | ||
117 | } | ||
118 | }, | ||
119 | /* 8BPP/12BPC */ | ||
120 | { 512, 12, 6144, 11, 20, 19, 19, { | ||
121 | { 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 }, | ||
122 | { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, | ||
123 | { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 }, | ||
124 | { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 }, | ||
125 | { 21, 23, -12 } | ||
126 | } | ||
127 | }, | ||
128 | /* 8BPP/14BPC */ | ||
129 | { 512, 12, 6144, 15, 24, 23, 23, { | ||
130 | { 0, 12, 0 }, { 5, 13, 0 }, { 11, 15, 0 }, { 12, 17, -2 }, | ||
131 | { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 }, | ||
132 | { 15, 21, -8 }, { 15, 22, -10 }, { 17, 22, -10 }, | ||
133 | { 17, 23, -12 }, { 17, 23, -12 }, { 21, 24, -12 }, | ||
134 | { 24, 25, -12 } | ||
135 | } | ||
136 | }, | ||
137 | /* 8BPP/16BPC */ | ||
138 | { 512, 12, 6144, 19, 28, 27, 27, { | ||
139 | { 0, 12, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 15, 20, -2 }, | ||
140 | { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 }, | ||
141 | { 19, 25, -8 }, { 19, 26, -10 }, { 21, 26, -10 }, | ||
142 | { 21, 27, -12 }, { 21, 27, -12 }, { 25, 28, -12 }, | ||
143 | { 28, 29, -12 } | ||
144 | } | ||
145 | }, | ||
146 | }, | ||
147 | { | ||
148 | /* 10BPP/8BPC */ | ||
149 | { 410, 15, 5632, 3, 12, 11, 11, { | ||
150 | { 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 }, | ||
151 | { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, | ||
152 | { 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 10, -10 }, | ||
153 | { 5, 11, -12 }, { 7, 11, -12 }, { 11, 12, -12 } | ||
154 | } | ||
155 | }, | ||
156 | /* 10BPP/10BPC */ | ||
157 | { 410, 15, 5632, 7, 16, 15, 15, { | ||
158 | { 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 }, | ||
159 | { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, | ||
160 | { 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 14, -10 }, | ||
161 | { 9, 15, -12 }, { 11, 15, -12 }, { 15, 16, -12 } | ||
162 | } | ||
163 | }, | ||
164 | /* 10BPP/12BPC */ | ||
165 | { 410, 15, 5632, 11, 20, 19, 19, { | ||
166 | { 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 }, | ||
167 | { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, | ||
168 | { 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 }, | ||
169 | { 13, 18, -10 }, { 13, 19, -12 }, { 15, 19, -12 }, | ||
170 | { 19, 20, -12 } | ||
171 | } | ||
172 | }, | ||
173 | /* 10BPP/14BPC */ | ||
174 | { 410, 15, 5632, 15, 24, 23, 23, { | ||
175 | { 0, 11, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 13, 18, -2 }, | ||
176 | { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 }, | ||
177 | { 15, 21, -8 }, { 15, 21, -10 }, { 17, 22, -10 }, | ||
178 | { 17, 22, -10 }, { 17, 23, -12 }, { 19, 23, -12 }, | ||
179 | { 23, 24, -12 } | ||
180 | } | ||
181 | }, | ||
182 | /* 10BPP/16BPC */ | ||
183 | { 410, 15, 5632, 19, 28, 27, 27, { | ||
184 | { 0, 11, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 16, 20, -2 }, | ||
185 | { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 }, | ||
186 | { 19, 25, -8 }, { 19, 25, -10 }, { 21, 26, -10 }, | ||
187 | { 21, 26, -10 }, { 21, 27, -12 }, { 23, 27, -12 }, | ||
188 | { 27, 28, -12 } | ||
189 | } | ||
190 | }, | ||
191 | }, | ||
192 | { | ||
193 | /* 12BPP/8BPC */ | ||
194 | { 341, 15, 2048, 3, 12, 11, 11, { | ||
195 | { 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 }, | ||
196 | { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, | ||
197 | { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 }, | ||
198 | { 5, 12, -12 }, { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 } | ||
199 | } | ||
200 | }, | ||
201 | /* 12BPP/10BPC */ | ||
202 | { 341, 15, 2048, 7, 16, 15, 15, { | ||
203 | { 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 }, | ||
204 | { 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, | ||
205 | { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 }, | ||
206 | { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 } | ||
207 | } | ||
208 | }, | ||
209 | /* 12BPP/12BPC */ | ||
210 | { 341, 15, 2048, 11, 20, 19, 19, { | ||
211 | { 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 }, | ||
212 | { 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, | ||
213 | { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 }, | ||
214 | { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 }, | ||
215 | { 21, 23, -12 } | ||
216 | } | ||
217 | }, | ||
218 | /* 12BPP/14BPC */ | ||
219 | { 341, 15, 2048, 15, 24, 23, 23, { | ||
220 | { 0, 6, 2 }, { 7, 10, 0 }, { 9, 13, 0 }, { 11, 16, -2 }, | ||
221 | { 14, 17, -4 }, { 15, 18, -6 }, { 15, 19, -8 }, { 15, 20, -8 }, | ||
222 | { 15, 20, -8 }, { 15, 21, -10 }, { 17, 21, -10 }, | ||
223 | { 17, 21, -12 }, { 17, 21, -12 }, { 19, 22, -12 }, | ||
224 | { 22, 23, -12 } | ||
225 | } | ||
226 | }, | ||
227 | /* 12BPP/16BPC */ | ||
228 | { 341, 15, 2048, 19, 28, 27, 27, { | ||
229 | { 0, 6, 2 }, { 6, 11, 0 }, { 11, 15, 0 }, { 14, 18, -2 }, | ||
230 | { 18, 21, -4 }, { 19, 22, -6 }, { 19, 23, -8 }, { 19, 24, -8 }, | ||
231 | { 19, 24, -8 }, { 19, 25, -10 }, { 21, 25, -10 }, | ||
232 | { 21, 25, -12 }, { 21, 25, -12 }, { 23, 26, -12 }, | ||
233 | { 26, 27, -12 } | ||
234 | } | ||
235 | }, | ||
236 | }, | ||
237 | { | ||
238 | /* 15BPP/8BPC */ | ||
239 | { 273, 15, 2048, 3, 12, 11, 11, { | ||
240 | { 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 }, | ||
241 | { 1, 2, 2 }, { 1, 3, 0 }, { 1, 3, -2 }, { 2, 4, -4 }, | ||
242 | { 2, 5, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 4, 7, -10 }, | ||
243 | { 5, 7, -12 }, { 7, 8, -12 }, { 8, 9, -12 } | ||
244 | } | ||
245 | }, | ||
246 | /* 15BPP/10BPC */ | ||
247 | { 273, 15, 2048, 7, 16, 15, 15, { | ||
248 | { 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 }, | ||
249 | { 5, 6, 2 }, { 5, 7, 0 }, { 5, 7, -2 }, { 6, 8, -4 }, | ||
250 | { 6, 9, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 8, 11, -10 }, | ||
251 | { 9, 11, -12 }, { 11, 12, -12 }, { 12, 13, -12 } | ||
252 | } | ||
253 | }, | ||
254 | /* 15BPP/12BPC */ | ||
255 | { 273, 15, 2048, 11, 20, 19, 19, { | ||
256 | { 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 }, | ||
257 | { 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 }, | ||
258 | { 11, 13, -6 }, { 11, 13, -8 }, { 12, 14, -10 }, | ||
259 | { 13, 15, -10 }, { 13, 15, -12 }, { 15, 16, -12 }, | ||
260 | { 16, 17, -12 } | ||
261 | } | ||
262 | }, | ||
263 | /* 15BPP/14BPC */ | ||
264 | { 273, 15, 2048, 15, 24, 23, 23, { | ||
265 | { 0, 4, 10 }, { 3, 8, 8 }, { 6, 11, 6 }, { 9, 14, 4 }, | ||
266 | { 13, 15, 2 }, { 13, 15, 0 }, { 13, 16, -2 }, { 14, 16, -4 }, | ||
267 | { 15, 17, -6 }, { 15, 17, -8 }, { 16, 18, -10 }, | ||
268 | { 17, 19, -10 }, { 17, 19, -12 }, { 19, 20, -12 }, | ||
269 | { 20, 21, -12 } | ||
270 | } | ||
271 | }, | ||
272 | /* 15BPP/16BPC */ | ||
273 | { 273, 15, 2048, 19, 28, 27, 27, { | ||
274 | { 0, 4, 10 }, { 4, 9, 8 }, { 8, 13, 6 }, { 12, 17, 4 }, | ||
275 | { 17, 19, 2 }, { 17, 20, 0 }, { 17, 20, -2 }, { 18, 20, -4 }, | ||
276 | { 19, 21, -6 }, { 19, 21, -8 }, { 20, 22, -10 }, | ||
277 | { 21, 23, -10 }, { 21, 23, -12 }, { 23, 24, -12 }, | ||
278 | { 24, 25, -12 } | ||
279 | } | ||
280 | } | ||
281 | } | ||
282 | |||
283 | }; | ||
284 | |||
285 | static int get_row_index_for_rc_params(u16 compressed_bpp) | ||
286 | { | ||
287 | switch (compressed_bpp) { | ||
288 | case 6: | ||
289 | return ROW_INDEX_6BPP; | ||
290 | case 8: | ||
291 | return ROW_INDEX_8BPP; | ||
292 | case 10: | ||
293 | return ROW_INDEX_10BPP; | ||
294 | case 12: | ||
295 | return ROW_INDEX_12BPP; | ||
296 | case 15: | ||
297 | return ROW_INDEX_15BPP; | ||
298 | default: | ||
299 | return -EINVAL; | ||
300 | } | ||
301 | } | ||
302 | |||
303 | static int get_column_index_for_rc_params(u8 bits_per_component) | ||
304 | { | ||
305 | switch (bits_per_component) { | ||
306 | case 8: | ||
307 | return COLUMN_INDEX_8BPC; | ||
308 | case 10: | ||
309 | return COLUMN_INDEX_10BPC; | ||
310 | case 12: | ||
311 | return COLUMN_INDEX_12BPC; | ||
312 | case 14: | ||
313 | return COLUMN_INDEX_14BPC; | ||
314 | case 16: | ||
315 | return COLUMN_INDEX_16BPC; | ||
316 | default: | ||
317 | return -EINVAL; | ||
318 | } | ||
319 | } | ||
320 | |||
321 | static int intel_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg) | ||
322 | { | ||
323 | unsigned long groups_per_line = 0; | ||
324 | unsigned long groups_total = 0; | ||
325 | unsigned long num_extra_mux_bits = 0; | ||
326 | unsigned long slice_bits = 0; | ||
327 | unsigned long hrd_delay = 0; | ||
328 | unsigned long final_scale = 0; | ||
329 | unsigned long rbs_min = 0; | ||
330 | |||
331 | /* Number of groups used to code each line of a slice */ | ||
332 | groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width, | ||
333 | DSC_RC_PIXELS_PER_GROUP); | ||
334 | |||
335 | /* chunksize in Bytes */ | ||
336 | vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width * | ||
337 | vdsc_cfg->bits_per_pixel, | ||
338 | (8 * 16)); | ||
339 | |||
340 | if (vdsc_cfg->convert_rgb) | ||
341 | num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size + | ||
342 | (4 * vdsc_cfg->bits_per_component + 4) | ||
343 | - 2); | ||
344 | else | ||
345 | num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size + | ||
346 | (4 * vdsc_cfg->bits_per_component + 4) + | ||
347 | 2 * (4 * vdsc_cfg->bits_per_component) - 2; | ||
348 | /* Number of bits in one Slice */ | ||
349 | slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height; | ||
350 | |||
351 | while ((num_extra_mux_bits > 0) && | ||
352 | ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size)) | ||
353 | num_extra_mux_bits--; | ||
354 | |||
355 | if (groups_per_line < vdsc_cfg->initial_scale_value - 8) | ||
356 | vdsc_cfg->initial_scale_value = groups_per_line + 8; | ||
357 | |||
358 | /* scale_decrement_interval calculation according to DSC spec 1.11 */ | ||
359 | if (vdsc_cfg->initial_scale_value > 8) | ||
360 | vdsc_cfg->scale_decrement_interval = groups_per_line / | ||
361 | (vdsc_cfg->initial_scale_value - 8); | ||
362 | else | ||
363 | vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX; | ||
364 | |||
365 | vdsc_cfg->final_offset = vdsc_cfg->rc_model_size - | ||
366 | (vdsc_cfg->initial_xmit_delay * | ||
367 | vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits; | ||
368 | |||
369 | if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) { | ||
370 | DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n"); | ||
371 | return -ERANGE; | ||
372 | } | ||
373 | |||
374 | final_scale = (vdsc_cfg->rc_model_size * 8) / | ||
375 | (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset); | ||
376 | if (vdsc_cfg->slice_height > 1) | ||
377 | /* | ||
378 | * NflBpgOffset is 16 bit value with 11 fractional bits | ||
379 | * hence we multiply by 2^11 for preserving the | ||
380 | * fractional part | ||
381 | */ | ||
382 | vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11), | ||
383 | (vdsc_cfg->slice_height - 1)); | ||
384 | else | ||
385 | vdsc_cfg->nfl_bpg_offset = 0; | ||
386 | |||
387 | /* 2^16 - 1 */ | ||
388 | if (vdsc_cfg->nfl_bpg_offset > 65535) { | ||
389 | DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n"); | ||
390 | return -ERANGE; | ||
391 | } | ||
392 | |||
393 | /* Number of groups used to code the entire slice */ | ||
394 | groups_total = groups_per_line * vdsc_cfg->slice_height; | ||
395 | |||
396 | /* slice_bpg_offset is 16 bit value with 11 fractional bits */ | ||
397 | vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size - | ||
398 | vdsc_cfg->initial_offset + | ||
399 | num_extra_mux_bits) << 11), | ||
400 | groups_total); | ||
401 | |||
402 | if (final_scale > 9) { | ||
403 | /* | ||
404 | * ScaleIncrementInterval = | ||
405 | * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125)) | ||
406 | * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value, | ||
407 | * we need divide by 2^11 from pstDscCfg values | ||
408 | */ | ||
409 | vdsc_cfg->scale_increment_interval = | ||
410 | (vdsc_cfg->final_offset * (1 << 11)) / | ||
411 | ((vdsc_cfg->nfl_bpg_offset + | ||
412 | vdsc_cfg->slice_bpg_offset) * | ||
413 | (final_scale - 9)); | ||
414 | } else { | ||
415 | /* | ||
416 | * If finalScaleValue is less than or equal to 9, a value of 0 should | ||
417 | * be used to disable the scale increment at the end of the slice | ||
418 | */ | ||
419 | vdsc_cfg->scale_increment_interval = 0; | ||
420 | } | ||
421 | |||
422 | if (vdsc_cfg->scale_increment_interval > 65535) { | ||
423 | DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n"); | ||
424 | return -ERANGE; | ||
425 | } | ||
426 | |||
427 | /* | ||
428 | * DSC spec mentions that bits_per_pixel specifies the target | ||
429 | * bits/pixel (bpp) rate that is used by the encoder, | ||
430 | * in steps of 1/16 of a bit per pixel | ||
431 | */ | ||
432 | rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset + | ||
433 | DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay * | ||
434 | vdsc_cfg->bits_per_pixel, 16) + | ||
435 | groups_per_line * vdsc_cfg->first_line_bpg_offset; | ||
436 | |||
437 | hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel); | ||
438 | vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16; | ||
439 | vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay; | ||
440 | |||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, | ||
445 | struct intel_crtc_state *pipe_config) | ||
446 | { | ||
447 | struct drm_dsc_config *vdsc_cfg = &pipe_config->dp_dsc_cfg; | ||
448 | u16 compressed_bpp = pipe_config->dsc_params.compressed_bpp; | ||
449 | u8 i = 0; | ||
450 | int row_index = 0; | ||
451 | int column_index = 0; | ||
452 | u8 line_buf_depth = 0; | ||
453 | |||
454 | vdsc_cfg->pic_width = pipe_config->base.adjusted_mode.crtc_hdisplay; | ||
455 | vdsc_cfg->pic_height = pipe_config->base.adjusted_mode.crtc_vdisplay; | ||
456 | vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width, | ||
457 | pipe_config->dsc_params.slice_count); | ||
458 | /* | ||
459 | * Slice Height of 8 works for all currently available panels. So start | ||
460 | * with that if pic_height is an integral multiple of 8. | ||
461 | * Eventually add logic to try multiple slice heights. | ||
462 | */ | ||
463 | if (vdsc_cfg->pic_height % 8 == 0) | ||
464 | vdsc_cfg->slice_height = 8; | ||
465 | else if (vdsc_cfg->pic_height % 4 == 0) | ||
466 | vdsc_cfg->slice_height = 4; | ||
467 | else | ||
468 | vdsc_cfg->slice_height = 2; | ||
469 | |||
470 | /* Values filled from DSC Sink DPCD */ | ||
471 | vdsc_cfg->dsc_version_major = | ||
472 | (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & | ||
473 | DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; | ||
474 | vdsc_cfg->dsc_version_minor = | ||
475 | min(DSC_SUPPORTED_VERSION_MIN, | ||
476 | (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & | ||
477 | DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); | ||
478 | |||
479 | vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & | ||
480 | DP_DSC_RGB; | ||
481 | |||
482 | line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); | ||
483 | if (!line_buf_depth) { | ||
484 | DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n"); | ||
485 | return -EINVAL; | ||
486 | } | ||
487 | if (vdsc_cfg->dsc_version_minor == 2) | ||
488 | vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? | ||
489 | DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; | ||
490 | else | ||
491 | vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? | ||
492 | DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; | ||
493 | |||
494 | /* Gen 11 does not support YCbCr */ | ||
495 | vdsc_cfg->enable422 = false; | ||
496 | /* Gen 11 does not support VBR */ | ||
497 | vdsc_cfg->vbr_enable = false; | ||
498 | vdsc_cfg->block_pred_enable = | ||
499 | intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & | ||
500 | DP_DSC_BLK_PREDICTION_IS_SUPPORTED; | ||
501 | |||
502 | /* Gen 11 only supports integral values of bpp */ | ||
503 | vdsc_cfg->bits_per_pixel = compressed_bpp << 4; | ||
504 | vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3; | ||
505 | |||
506 | for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) { | ||
507 | /* | ||
508 | * six 0s are appended to the lsb of each threshold value | ||
509 | * internally in h/w. | ||
510 | * Only 8 bits are allowed for programming RcBufThreshold | ||
511 | */ | ||
512 | vdsc_cfg->rc_buf_thresh[i] = rc_buf_thresh[i] >> 6; | ||
513 | } | ||
514 | |||
515 | /* | ||
516 | * For 6bpp, RC Buffer threshold 12 and 13 need a different value | ||
517 | * as per C Model | ||
518 | */ | ||
519 | if (compressed_bpp == 6) { | ||
520 | vdsc_cfg->rc_buf_thresh[12] = 0x7C; | ||
521 | vdsc_cfg->rc_buf_thresh[13] = 0x7D; | ||
522 | } | ||
523 | |||
524 | row_index = get_row_index_for_rc_params(compressed_bpp); | ||
525 | column_index = | ||
526 | get_column_index_for_rc_params(vdsc_cfg->bits_per_component); | ||
527 | |||
528 | if (row_index < 0 || column_index < 0) | ||
529 | return -EINVAL; | ||
530 | |||
531 | vdsc_cfg->first_line_bpg_offset = | ||
532 | rc_params[row_index][column_index].first_line_bpg_offset; | ||
533 | vdsc_cfg->initial_xmit_delay = | ||
534 | rc_params[row_index][column_index].initial_xmit_delay; | ||
535 | vdsc_cfg->initial_offset = | ||
536 | rc_params[row_index][column_index].initial_offset; | ||
537 | vdsc_cfg->flatness_min_qp = | ||
538 | rc_params[row_index][column_index].flatness_min_qp; | ||
539 | vdsc_cfg->flatness_max_qp = | ||
540 | rc_params[row_index][column_index].flatness_max_qp; | ||
541 | vdsc_cfg->rc_quant_incr_limit0 = | ||
542 | rc_params[row_index][column_index].rc_quant_incr_limit0; | ||
543 | vdsc_cfg->rc_quant_incr_limit1 = | ||
544 | rc_params[row_index][column_index].rc_quant_incr_limit1; | ||
545 | |||
546 | for (i = 0; i < DSC_NUM_BUF_RANGES; i++) { | ||
547 | vdsc_cfg->rc_range_params[i].range_min_qp = | ||
548 | rc_params[row_index][column_index].rc_range_params[i].range_min_qp; | ||
549 | vdsc_cfg->rc_range_params[i].range_max_qp = | ||
550 | rc_params[row_index][column_index].rc_range_params[i].range_max_qp; | ||
551 | /* | ||
552 | * Range BPG Offset uses 2's complement and is only a 6 bits. So | ||
553 | * mask it to get only 6 bits. | ||
554 | */ | ||
555 | vdsc_cfg->rc_range_params[i].range_bpg_offset = | ||
556 | rc_params[row_index][column_index].rc_range_params[i].range_bpg_offset & | ||
557 | DSC_RANGE_BPG_OFFSET_MASK; | ||
558 | } | ||
559 | |||
560 | /* | ||
561 | * BitsPerComponent value determines mux_word_size: | ||
562 | * When BitsPerComponent is 12bpc, muxWordSize will be equal to 64 bits | ||
563 | * When BitsPerComponent is 8 or 10bpc, muxWordSize will be equal to | ||
564 | * 48 bits | ||
565 | */ | ||
566 | if (vdsc_cfg->bits_per_component == 8 || | ||
567 | vdsc_cfg->bits_per_component == 10) | ||
568 | vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC; | ||
569 | else if (vdsc_cfg->bits_per_component == 12) | ||
570 | vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC; | ||
571 | |||
572 | /* RC_MODEL_SIZE is a constant across all configurations */ | ||
573 | vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; | ||
574 | /* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */ | ||
575 | vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) / | ||
576 | (vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset); | ||
577 | |||
578 | return intel_compute_rc_parameters(vdsc_cfg); | ||
579 | } | ||
580 | |||
581 | enum intel_display_power_domain | ||
582 | intel_dsc_power_domain(const struct intel_crtc_state *crtc_state) | ||
583 | { | ||
584 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; | ||
585 | |||
586 | /* | ||
587 | * On ICL VDSC/joining for eDP transcoder uses a separate power well PW2 | ||
588 | * This requires POWER_DOMAIN_TRANSCODER_EDP_VDSC power domain. | ||
589 | * For any other transcoder, VDSC/joining uses the power well associated | ||
590 | * with the pipe/transcoder in use. Hence another reference on the | ||
591 | * transcoder power domain will suffice. | ||
592 | */ | ||
593 | if (cpu_transcoder == TRANSCODER_EDP) | ||
594 | return POWER_DOMAIN_TRANSCODER_EDP_VDSC; | ||
595 | else | ||
596 | return POWER_DOMAIN_TRANSCODER(cpu_transcoder); | ||
597 | } | ||
598 | |||
599 | static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, | ||
600 | const struct intel_crtc_state *crtc_state) | ||
601 | { | ||
602 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); | ||
603 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
604 | const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg; | ||
605 | enum pipe pipe = crtc->pipe; | ||
606 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; | ||
607 | u32 pps_val = 0; | ||
608 | u32 rc_buf_thresh_dword[4]; | ||
609 | u32 rc_range_params_dword[8]; | ||
610 | u8 num_vdsc_instances = (crtc_state->dsc_params.dsc_split) ? 2 : 1; | ||
611 | int i = 0; | ||
612 | |||
613 | /* Populate PICTURE_PARAMETER_SET_0 registers */ | ||
614 | pps_val = DSC_VER_MAJ | vdsc_cfg->dsc_version_minor << | ||
615 | DSC_VER_MIN_SHIFT | | ||
616 | vdsc_cfg->bits_per_component << DSC_BPC_SHIFT | | ||
617 | vdsc_cfg->line_buf_depth << DSC_LINE_BUF_DEPTH_SHIFT; | ||
618 | if (vdsc_cfg->block_pred_enable) | ||
619 | pps_val |= DSC_BLOCK_PREDICTION; | ||
620 | if (vdsc_cfg->convert_rgb) | ||
621 | pps_val |= DSC_COLOR_SPACE_CONVERSION; | ||
622 | if (vdsc_cfg->enable422) | ||
623 | pps_val |= DSC_422_ENABLE; | ||
624 | if (vdsc_cfg->vbr_enable) | ||
625 | pps_val |= DSC_VBR_ENABLE; | ||
626 | DRM_INFO("PPS0 = 0x%08x\n", pps_val); | ||
627 | if (cpu_transcoder == TRANSCODER_EDP) { | ||
628 | I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val); | ||
629 | /* | ||
630 | * If 2 VDSC instances are needed, configure PPS for second | ||
631 | * VDSC | ||
632 | */ | ||
633 | if (crtc_state->dsc_params.dsc_split) | ||
634 | I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val); | ||
635 | } else { | ||
636 | I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val); | ||
637 | if (crtc_state->dsc_params.dsc_split) | ||
638 | I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe), | ||
639 | pps_val); | ||
640 | } | ||
641 | |||
642 | /* Populate PICTURE_PARAMETER_SET_1 registers */ | ||
643 | pps_val = 0; | ||
644 | pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel); | ||
645 | DRM_INFO("PPS1 = 0x%08x\n", pps_val); | ||
646 | if (cpu_transcoder == TRANSCODER_EDP) { | ||
647 | I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val); | ||
648 | /* | ||
649 | * If 2 VDSC instances are needed, configure PPS for second | ||
650 | * VDSC | ||
651 | */ | ||
652 | if (crtc_state->dsc_params.dsc_split) | ||
653 | I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val); | ||
654 | } else { | ||
655 | I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val); | ||
656 | if (crtc_state->dsc_params.dsc_split) | ||
657 | I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe), | ||
658 | pps_val); | ||
659 | } | ||
660 | |||
661 | /* Populate PICTURE_PARAMETER_SET_2 registers */ | ||
662 | pps_val = 0; | ||
663 | pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) | | ||
664 | DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances); | ||
665 | DRM_INFO("PPS2 = 0x%08x\n", pps_val); | ||
666 | if (encoder->type == INTEL_OUTPUT_EDP) { | ||
667 | I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val); | ||
668 | /* | ||
669 | * If 2 VDSC instances are needed, configure PPS for second | ||
670 | * VDSC | ||
671 | */ | ||
672 | if (crtc_state->dsc_params.dsc_split) | ||
673 | I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val); | ||
674 | } else { | ||
675 | I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val); | ||
676 | if (crtc_state->dsc_params.dsc_split) | ||
677 | I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe), | ||
678 | pps_val); | ||
679 | } | ||
680 | |||
681 | /* Populate PICTURE_PARAMETER_SET_3 registers */ | ||
682 | pps_val = 0; | ||
683 | pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) | | ||
684 | DSC_SLICE_WIDTH(vdsc_cfg->slice_width); | ||
685 | DRM_INFO("PPS3 = 0x%08x\n", pps_val); | ||
686 | if (cpu_transcoder == TRANSCODER_EDP) { | ||
687 | I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val); | ||
688 | /* | ||
689 | * If 2 VDSC instances are needed, configure PPS for second | ||
690 | * VDSC | ||
691 | */ | ||
692 | if (crtc_state->dsc_params.dsc_split) | ||
693 | I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val); | ||
694 | } else { | ||
695 | I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val); | ||
696 | if (crtc_state->dsc_params.dsc_split) | ||
697 | I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe), | ||
698 | pps_val); | ||
699 | } | ||
700 | |||
701 | /* Populate PICTURE_PARAMETER_SET_4 registers */ | ||
702 | pps_val = 0; | ||
703 | pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) | | ||
704 | DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay); | ||
705 | DRM_INFO("PPS4 = 0x%08x\n", pps_val); | ||
706 | if (cpu_transcoder == TRANSCODER_EDP) { | ||
707 | I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val); | ||
708 | /* | ||
709 | * If 2 VDSC instances are needed, configure PPS for second | ||
710 | * VDSC | ||
711 | */ | ||
712 | if (crtc_state->dsc_params.dsc_split) | ||
713 | I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val); | ||
714 | } else { | ||
715 | I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val); | ||
716 | if (crtc_state->dsc_params.dsc_split) | ||
717 | I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe), | ||
718 | pps_val); | ||
719 | } | ||
720 | |||
721 | /* Populate PICTURE_PARAMETER_SET_5 registers */ | ||
722 | pps_val = 0; | ||
723 | pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) | | ||
724 | DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval); | ||
725 | DRM_INFO("PPS5 = 0x%08x\n", pps_val); | ||
726 | if (cpu_transcoder == TRANSCODER_EDP) { | ||
727 | I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val); | ||
728 | /* | ||
729 | * If 2 VDSC instances are needed, configure PPS for second | ||
730 | * VDSC | ||
731 | */ | ||
732 | if (crtc_state->dsc_params.dsc_split) | ||
733 | I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val); | ||
734 | } else { | ||
735 | I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val); | ||
736 | if (crtc_state->dsc_params.dsc_split) | ||
737 | I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe), | ||
738 | pps_val); | ||
739 | } | ||
740 | |||
741 | /* Populate PICTURE_PARAMETER_SET_6 registers */ | ||
742 | pps_val = 0; | ||
743 | pps_val |= DSC_INITIAL_SCALE_VALUE(vdsc_cfg->initial_scale_value) | | ||
744 | DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) | | ||
745 | DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) | | ||
746 | DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp); | ||
747 | DRM_INFO("PPS6 = 0x%08x\n", pps_val); | ||
748 | if (cpu_transcoder == TRANSCODER_EDP) { | ||
749 | I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val); | ||
750 | /* | ||
751 | * If 2 VDSC instances are needed, configure PPS for second | ||
752 | * VDSC | ||
753 | */ | ||
754 | if (crtc_state->dsc_params.dsc_split) | ||
755 | I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val); | ||
756 | } else { | ||
757 | I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val); | ||
758 | if (crtc_state->dsc_params.dsc_split) | ||
759 | I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe), | ||
760 | pps_val); | ||
761 | } | ||
762 | |||
763 | /* Populate PICTURE_PARAMETER_SET_7 registers */ | ||
764 | pps_val = 0; | ||
765 | pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) | | ||
766 | DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset); | ||
767 | DRM_INFO("PPS7 = 0x%08x\n", pps_val); | ||
768 | if (cpu_transcoder == TRANSCODER_EDP) { | ||
769 | I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val); | ||
770 | /* | ||
771 | * If 2 VDSC instances are needed, configure PPS for second | ||
772 | * VDSC | ||
773 | */ | ||
774 | if (crtc_state->dsc_params.dsc_split) | ||
775 | I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val); | ||
776 | } else { | ||
777 | I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val); | ||
778 | if (crtc_state->dsc_params.dsc_split) | ||
779 | I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe), | ||
780 | pps_val); | ||
781 | } | ||
782 | |||
783 | /* Populate PICTURE_PARAMETER_SET_8 registers */ | ||
784 | pps_val = 0; | ||
785 | pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) | | ||
786 | DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset); | ||
787 | DRM_INFO("PPS8 = 0x%08x\n", pps_val); | ||
788 | if (cpu_transcoder == TRANSCODER_EDP) { | ||
789 | I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val); | ||
790 | /* | ||
791 | * If 2 VDSC instances are needed, configure PPS for second | ||
792 | * VDSC | ||
793 | */ | ||
794 | if (crtc_state->dsc_params.dsc_split) | ||
795 | I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val); | ||
796 | } else { | ||
797 | I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val); | ||
798 | if (crtc_state->dsc_params.dsc_split) | ||
799 | I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe), | ||
800 | pps_val); | ||
801 | } | ||
802 | |||
803 | /* Populate PICTURE_PARAMETER_SET_9 registers */ | ||
804 | pps_val = 0; | ||
805 | pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) | | ||
806 | DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST); | ||
807 | DRM_INFO("PPS9 = 0x%08x\n", pps_val); | ||
808 | if (cpu_transcoder == TRANSCODER_EDP) { | ||
809 | I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val); | ||
810 | /* | ||
811 | * If 2 VDSC instances are needed, configure PPS for second | ||
812 | * VDSC | ||
813 | */ | ||
814 | if (crtc_state->dsc_params.dsc_split) | ||
815 | I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val); | ||
816 | } else { | ||
817 | I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val); | ||
818 | if (crtc_state->dsc_params.dsc_split) | ||
819 | I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe), | ||
820 | pps_val); | ||
821 | } | ||
822 | |||
823 | /* Populate PICTURE_PARAMETER_SET_10 registers */ | ||
824 | pps_val = 0; | ||
825 | pps_val |= DSC_RC_QUANT_INC_LIMIT0(vdsc_cfg->rc_quant_incr_limit0) | | ||
826 | DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) | | ||
827 | DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) | | ||
828 | DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST); | ||
829 | DRM_INFO("PPS10 = 0x%08x\n", pps_val); | ||
830 | if (cpu_transcoder == TRANSCODER_EDP) { | ||
831 | I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val); | ||
832 | /* | ||
833 | * If 2 VDSC instances are needed, configure PPS for second | ||
834 | * VDSC | ||
835 | */ | ||
836 | if (crtc_state->dsc_params.dsc_split) | ||
837 | I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val); | ||
838 | } else { | ||
839 | I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val); | ||
840 | if (crtc_state->dsc_params.dsc_split) | ||
841 | I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe), | ||
842 | pps_val); | ||
843 | } | ||
844 | |||
845 | /* Populate Picture parameter set 16 */ | ||
846 | pps_val = 0; | ||
847 | pps_val |= DSC_SLICE_CHUNK_SIZE(vdsc_cfg->slice_chunk_size) | | ||
848 | DSC_SLICE_PER_LINE((vdsc_cfg->pic_width / num_vdsc_instances) / | ||
849 | vdsc_cfg->slice_width) | | ||
850 | DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height / | ||
851 | vdsc_cfg->slice_height); | ||
852 | DRM_INFO("PPS16 = 0x%08x\n", pps_val); | ||
853 | if (cpu_transcoder == TRANSCODER_EDP) { | ||
854 | I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val); | ||
855 | /* | ||
856 | * If 2 VDSC instances are needed, configure PPS for second | ||
857 | * VDSC | ||
858 | */ | ||
859 | if (crtc_state->dsc_params.dsc_split) | ||
860 | I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val); | ||
861 | } else { | ||
862 | I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val); | ||
863 | if (crtc_state->dsc_params.dsc_split) | ||
864 | I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe), | ||
865 | pps_val); | ||
866 | } | ||
867 | |||
868 | /* Populate the RC_BUF_THRESH registers */ | ||
869 | memset(rc_buf_thresh_dword, 0, sizeof(rc_buf_thresh_dword)); | ||
870 | for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) { | ||
871 | rc_buf_thresh_dword[i / 4] |= | ||
872 | (u32)(vdsc_cfg->rc_buf_thresh[i] << | ||
873 | BITS_PER_BYTE * (i % 4)); | ||
874 | DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i, | ||
875 | rc_buf_thresh_dword[i / 4]); | ||
876 | } | ||
877 | if (cpu_transcoder == TRANSCODER_EDP) { | ||
878 | I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]); | ||
879 | I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]); | ||
880 | I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]); | ||
881 | I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]); | ||
882 | if (crtc_state->dsc_params.dsc_split) { | ||
883 | I915_WRITE(DSCC_RC_BUF_THRESH_0, | ||
884 | rc_buf_thresh_dword[0]); | ||
885 | I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW, | ||
886 | rc_buf_thresh_dword[1]); | ||
887 | I915_WRITE(DSCC_RC_BUF_THRESH_1, | ||
888 | rc_buf_thresh_dword[2]); | ||
889 | I915_WRITE(DSCC_RC_BUF_THRESH_1_UDW, | ||
890 | rc_buf_thresh_dword[3]); | ||
891 | } | ||
892 | } else { | ||
893 | I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0(pipe), | ||
894 | rc_buf_thresh_dword[0]); | ||
895 | I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe), | ||
896 | rc_buf_thresh_dword[1]); | ||
897 | I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1(pipe), | ||
898 | rc_buf_thresh_dword[2]); | ||
899 | I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe), | ||
900 | rc_buf_thresh_dword[3]); | ||
901 | if (crtc_state->dsc_params.dsc_split) { | ||
902 | I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe), | ||
903 | rc_buf_thresh_dword[0]); | ||
904 | I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe), | ||
905 | rc_buf_thresh_dword[1]); | ||
906 | I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1(pipe), | ||
907 | rc_buf_thresh_dword[2]); | ||
908 | I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe), | ||
909 | rc_buf_thresh_dword[3]); | ||
910 | } | ||
911 | } | ||
912 | |||
913 | /* Populate the RC_RANGE_PARAMETERS registers */ | ||
914 | memset(rc_range_params_dword, 0, sizeof(rc_range_params_dword)); | ||
915 | for (i = 0; i < DSC_NUM_BUF_RANGES; i++) { | ||
916 | rc_range_params_dword[i / 2] |= | ||
917 | (u32)(((vdsc_cfg->rc_range_params[i].range_bpg_offset << | ||
918 | RC_BPG_OFFSET_SHIFT) | | ||
919 | (vdsc_cfg->rc_range_params[i].range_max_qp << | ||
920 | RC_MAX_QP_SHIFT) | | ||
921 | (vdsc_cfg->rc_range_params[i].range_min_qp << | ||
922 | RC_MIN_QP_SHIFT)) << 16 * (i % 2)); | ||
923 | DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i, | ||
924 | rc_range_params_dword[i / 2]); | ||
925 | } | ||
926 | if (cpu_transcoder == TRANSCODER_EDP) { | ||
927 | I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0, | ||
928 | rc_range_params_dword[0]); | ||
929 | I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW, | ||
930 | rc_range_params_dword[1]); | ||
931 | I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1, | ||
932 | rc_range_params_dword[2]); | ||
933 | I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1_UDW, | ||
934 | rc_range_params_dword[3]); | ||
935 | I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2, | ||
936 | rc_range_params_dword[4]); | ||
937 | I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2_UDW, | ||
938 | rc_range_params_dword[5]); | ||
939 | I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3, | ||
940 | rc_range_params_dword[6]); | ||
941 | I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW, | ||
942 | rc_range_params_dword[7]); | ||
943 | if (crtc_state->dsc_params.dsc_split) { | ||
944 | I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0, | ||
945 | rc_range_params_dword[0]); | ||
946 | I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW, | ||
947 | rc_range_params_dword[1]); | ||
948 | I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1, | ||
949 | rc_range_params_dword[2]); | ||
950 | I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1_UDW, | ||
951 | rc_range_params_dword[3]); | ||
952 | I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2, | ||
953 | rc_range_params_dword[4]); | ||
954 | I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2_UDW, | ||
955 | rc_range_params_dword[5]); | ||
956 | I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3, | ||
957 | rc_range_params_dword[6]); | ||
958 | I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3_UDW, | ||
959 | rc_range_params_dword[7]); | ||
960 | } | ||
961 | } else { | ||
962 | I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe), | ||
963 | rc_range_params_dword[0]); | ||
964 | I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe), | ||
965 | rc_range_params_dword[1]); | ||
966 | I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe), | ||
967 | rc_range_params_dword[2]); | ||
968 | I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe), | ||
969 | rc_range_params_dword[3]); | ||
970 | I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe), | ||
971 | rc_range_params_dword[4]); | ||
972 | I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe), | ||
973 | rc_range_params_dword[5]); | ||
974 | I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe), | ||
975 | rc_range_params_dword[6]); | ||
976 | I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe), | ||
977 | rc_range_params_dword[7]); | ||
978 | if (crtc_state->dsc_params.dsc_split) { | ||
979 | I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe), | ||
980 | rc_range_params_dword[0]); | ||
981 | I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe), | ||
982 | rc_range_params_dword[1]); | ||
983 | I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe), | ||
984 | rc_range_params_dword[2]); | ||
985 | I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe), | ||
986 | rc_range_params_dword[3]); | ||
987 | I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe), | ||
988 | rc_range_params_dword[4]); | ||
989 | I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe), | ||
990 | rc_range_params_dword[5]); | ||
991 | I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe), | ||
992 | rc_range_params_dword[6]); | ||
993 | I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe), | ||
994 | rc_range_params_dword[7]); | ||
995 | } | ||
996 | } | ||
997 | } | ||
998 | |||
999 | static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder, | ||
1000 | const struct intel_crtc_state *crtc_state) | ||
1001 | { | ||
1002 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | ||
1003 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | ||
1004 | const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg; | ||
1005 | struct drm_dsc_pps_infoframe dp_dsc_pps_sdp; | ||
1006 | |||
1007 | /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */ | ||
1008 | drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp); | ||
1009 | |||
1010 | /* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */ | ||
1011 | drm_dsc_pps_infoframe_pack(&dp_dsc_pps_sdp, vdsc_cfg); | ||
1012 | |||
1013 | intel_dig_port->write_infoframe(encoder, crtc_state, | ||
1014 | DP_SDP_PPS, &dp_dsc_pps_sdp, | ||
1015 | sizeof(dp_dsc_pps_sdp)); | ||
1016 | } | ||
1017 | |||
1018 | void intel_dsc_enable(struct intel_encoder *encoder, | ||
1019 | const struct intel_crtc_state *crtc_state) | ||
1020 | { | ||
1021 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); | ||
1022 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
1023 | enum pipe pipe = crtc->pipe; | ||
1024 | i915_reg_t dss_ctl1_reg, dss_ctl2_reg; | ||
1025 | u32 dss_ctl1_val = 0; | ||
1026 | u32 dss_ctl2_val = 0; | ||
1027 | |||
1028 | if (!crtc_state->dsc_params.compression_enable) | ||
1029 | return; | ||
1030 | |||
1031 | /* Enable Power wells for VDSC/joining */ | ||
1032 | intel_display_power_get(dev_priv, | ||
1033 | intel_dsc_power_domain(crtc_state)); | ||
1034 | |||
1035 | intel_configure_pps_for_dsc_encoder(encoder, crtc_state); | ||
1036 | |||
1037 | intel_dp_write_dsc_pps_sdp(encoder, crtc_state); | ||
1038 | |||
1039 | if (crtc_state->cpu_transcoder == TRANSCODER_EDP) { | ||
1040 | dss_ctl1_reg = DSS_CTL1; | ||
1041 | dss_ctl2_reg = DSS_CTL2; | ||
1042 | } else { | ||
1043 | dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe); | ||
1044 | dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe); | ||
1045 | } | ||
1046 | dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE; | ||
1047 | if (crtc_state->dsc_params.dsc_split) { | ||
1048 | dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE; | ||
1049 | dss_ctl1_val |= JOINER_ENABLE; | ||
1050 | } | ||
1051 | I915_WRITE(dss_ctl1_reg, dss_ctl1_val); | ||
1052 | I915_WRITE(dss_ctl2_reg, dss_ctl2_val); | ||
1053 | } | ||
1054 | |||
1055 | void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state) | ||
1056 | { | ||
1057 | struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); | ||
1058 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
1059 | enum pipe pipe = crtc->pipe; | ||
1060 | i915_reg_t dss_ctl1_reg, dss_ctl2_reg; | ||
1061 | u32 dss_ctl1_val = 0, dss_ctl2_val = 0; | ||
1062 | |||
1063 | if (!old_crtc_state->dsc_params.compression_enable) | ||
1064 | return; | ||
1065 | |||
1066 | if (old_crtc_state->cpu_transcoder == TRANSCODER_EDP) { | ||
1067 | dss_ctl1_reg = DSS_CTL1; | ||
1068 | dss_ctl2_reg = DSS_CTL2; | ||
1069 | } else { | ||
1070 | dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe); | ||
1071 | dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe); | ||
1072 | } | ||
1073 | dss_ctl1_val = I915_READ(dss_ctl1_reg); | ||
1074 | if (dss_ctl1_val & JOINER_ENABLE) | ||
1075 | dss_ctl1_val &= ~JOINER_ENABLE; | ||
1076 | I915_WRITE(dss_ctl1_reg, dss_ctl1_val); | ||
1077 | |||
1078 | dss_ctl2_val = I915_READ(dss_ctl2_reg); | ||
1079 | if (dss_ctl2_val & LEFT_BRANCH_VDSC_ENABLE || | ||
1080 | dss_ctl2_val & RIGHT_BRANCH_VDSC_ENABLE) | ||
1081 | dss_ctl2_val &= ~(LEFT_BRANCH_VDSC_ENABLE | | ||
1082 | RIGHT_BRANCH_VDSC_ENABLE); | ||
1083 | I915_WRITE(dss_ctl2_reg, dss_ctl2_val); | ||
1084 | |||
1085 | /* Disable Power wells for VDSC/joining */ | ||
1086 | intel_display_power_put(dev_priv, | ||
1087 | intel_dsc_power_domain(old_crtc_state)); | ||
1088 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c index ca1f78a42b17..4f41e326f3f3 100644 --- a/drivers/gpu/drm/i915/intel_workarounds.c +++ b/drivers/gpu/drm/i915/intel_workarounds.c | |||
@@ -48,58 +48,112 @@ | |||
48 | * - Public functions to init or apply the given workaround type. | 48 | * - Public functions to init or apply the given workaround type. |
49 | */ | 49 | */ |
50 | 50 | ||
51 | static void wa_add(struct drm_i915_private *i915, | 51 | static void wa_init_start(struct i915_wa_list *wal, const char *name) |
52 | i915_reg_t reg, const u32 mask, const u32 val) | ||
53 | { | 52 | { |
54 | struct i915_workarounds *wa = &i915->workarounds; | 53 | wal->name = name; |
55 | unsigned int start = 0, end = wa->count; | 54 | } |
56 | unsigned int addr = i915_mmio_reg_offset(reg); | 55 | |
57 | struct i915_wa_reg *r; | 56 | #define WA_LIST_CHUNK (1 << 4) |
57 | |||
58 | static void wa_init_finish(struct i915_wa_list *wal) | ||
59 | { | ||
60 | /* Trim unused entries. */ | ||
61 | if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) { | ||
62 | struct i915_wa *list = kmemdup(wal->list, | ||
63 | wal->count * sizeof(*list), | ||
64 | GFP_KERNEL); | ||
65 | |||
66 | if (list) { | ||
67 | kfree(wal->list); | ||
68 | wal->list = list; | ||
69 | } | ||
70 | } | ||
71 | |||
72 | if (!wal->count) | ||
73 | return; | ||
74 | |||
75 | DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n", | ||
76 | wal->wa_count, wal->name); | ||
77 | } | ||
78 | |||
79 | static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) | ||
80 | { | ||
81 | unsigned int addr = i915_mmio_reg_offset(wa->reg); | ||
82 | unsigned int start = 0, end = wal->count; | ||
83 | const unsigned int grow = WA_LIST_CHUNK; | ||
84 | struct i915_wa *wa_; | ||
85 | |||
86 | GEM_BUG_ON(!is_power_of_2(grow)); | ||
87 | |||
88 | if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */ | ||
89 | struct i915_wa *list; | ||
90 | |||
91 | list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa), | ||
92 | GFP_KERNEL); | ||
93 | if (!list) { | ||
94 | DRM_ERROR("No space for workaround init!\n"); | ||
95 | return; | ||
96 | } | ||
97 | |||
98 | if (wal->list) | ||
99 | memcpy(list, wal->list, sizeof(*wa) * wal->count); | ||
100 | |||
101 | wal->list = list; | ||
102 | } | ||
58 | 103 | ||
59 | while (start < end) { | 104 | while (start < end) { |
60 | unsigned int mid = start + (end - start) / 2; | 105 | unsigned int mid = start + (end - start) / 2; |
61 | 106 | ||
62 | if (wa->reg[mid].addr < addr) { | 107 | if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) { |
63 | start = mid + 1; | 108 | start = mid + 1; |
64 | } else if (wa->reg[mid].addr > addr) { | 109 | } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) { |
65 | end = mid; | 110 | end = mid; |
66 | } else { | 111 | } else { |
67 | r = &wa->reg[mid]; | 112 | wa_ = &wal->list[mid]; |
68 | 113 | ||
69 | if ((mask & ~r->mask) == 0) { | 114 | if ((wa->mask & ~wa_->mask) == 0) { |
70 | DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n", | 115 | DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n", |
71 | addr, r->mask, r->value); | 116 | i915_mmio_reg_offset(wa_->reg), |
117 | wa_->mask, wa_->val); | ||
72 | 118 | ||
73 | r->value &= ~mask; | 119 | wa_->val &= ~wa->mask; |
74 | } | 120 | } |
75 | 121 | ||
76 | r->value |= val; | 122 | wal->wa_count++; |
77 | r->mask |= mask; | 123 | wa_->val |= wa->val; |
124 | wa_->mask |= wa->mask; | ||
78 | return; | 125 | return; |
79 | } | 126 | } |
80 | } | 127 | } |
81 | 128 | ||
82 | if (WARN_ON_ONCE(wa->count >= I915_MAX_WA_REGS)) { | 129 | wal->wa_count++; |
83 | DRM_ERROR("Dropping w/a for reg %04x (mask: %08x, value: %08x)\n", | 130 | wa_ = &wal->list[wal->count++]; |
84 | addr, mask, val); | 131 | *wa_ = *wa; |
85 | return; | ||
86 | } | ||
87 | |||
88 | r = &wa->reg[wa->count++]; | ||
89 | r->addr = addr; | ||
90 | r->value = val; | ||
91 | r->mask = mask; | ||
92 | 132 | ||
93 | while (r-- > wa->reg) { | 133 | while (wa_-- > wal->list) { |
94 | GEM_BUG_ON(r[0].addr == r[1].addr); | 134 | GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) == |
95 | if (r[1].addr > r[0].addr) | 135 | i915_mmio_reg_offset(wa_[1].reg)); |
136 | if (i915_mmio_reg_offset(wa_[1].reg) > | ||
137 | i915_mmio_reg_offset(wa_[0].reg)) | ||
96 | break; | 138 | break; |
97 | 139 | ||
98 | swap(r[1], r[0]); | 140 | swap(wa_[1], wa_[0]); |
99 | } | 141 | } |
100 | } | 142 | } |
101 | 143 | ||
102 | #define WA_REG(addr, mask, val) wa_add(dev_priv, (addr), (mask), (val)) | 144 | static void |
145 | __wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val) | ||
146 | { | ||
147 | struct i915_wa wa = { | ||
148 | .reg = reg, | ||
149 | .mask = mask, | ||
150 | .val = val | ||
151 | }; | ||
152 | |||
153 | _wa_add(wal, &wa); | ||
154 | } | ||
155 | |||
156 | #define WA_REG(addr, mask, val) __wa_add(wal, (addr), (mask), (val)) | ||
103 | 157 | ||
104 | #define WA_SET_BIT_MASKED(addr, mask) \ | 158 | #define WA_SET_BIT_MASKED(addr, mask) \ |
105 | WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask)) | 159 | WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask)) |
@@ -110,8 +164,10 @@ static void wa_add(struct drm_i915_private *i915, | |||
110 | #define WA_SET_FIELD_MASKED(addr, mask, value) \ | 164 | #define WA_SET_FIELD_MASKED(addr, mask, value) \ |
111 | WA_REG(addr, (mask), _MASKED_FIELD(mask, value)) | 165 | WA_REG(addr, (mask), _MASKED_FIELD(mask, value)) |
112 | 166 | ||
113 | static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv) | 167 | static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine) |
114 | { | 168 | { |
169 | struct i915_wa_list *wal = &engine->ctx_wa_list; | ||
170 | |||
115 | WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); | 171 | WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); |
116 | 172 | ||
117 | /* WaDisableAsyncFlipPerfMode:bdw,chv */ | 173 | /* WaDisableAsyncFlipPerfMode:bdw,chv */ |
@@ -155,17 +211,14 @@ static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv) | |||
155 | WA_SET_FIELD_MASKED(GEN7_GT_MODE, | 211 | WA_SET_FIELD_MASKED(GEN7_GT_MODE, |
156 | GEN6_WIZ_HASHING_MASK, | 212 | GEN6_WIZ_HASHING_MASK, |
157 | GEN6_WIZ_HASHING_16x4); | 213 | GEN6_WIZ_HASHING_16x4); |
158 | |||
159 | return 0; | ||
160 | } | 214 | } |
161 | 215 | ||
162 | static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv) | 216 | static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine) |
163 | { | 217 | { |
164 | int ret; | 218 | struct drm_i915_private *i915 = engine->i915; |
219 | struct i915_wa_list *wal = &engine->ctx_wa_list; | ||
165 | 220 | ||
166 | ret = gen8_ctx_workarounds_init(dev_priv); | 221 | gen8_ctx_workarounds_init(engine); |
167 | if (ret) | ||
168 | return ret; | ||
169 | 222 | ||
170 | /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ | 223 | /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ |
171 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); | 224 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); |
@@ -185,31 +238,28 @@ static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv) | |||
185 | /* WaForceContextSaveRestoreNonCoherent:bdw */ | 238 | /* WaForceContextSaveRestoreNonCoherent:bdw */ |
186 | HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | | 239 | HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | |
187 | /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ | 240 | /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ |
188 | (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); | 241 | (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); |
189 | |||
190 | return 0; | ||
191 | } | 242 | } |
192 | 243 | ||
193 | static int chv_ctx_workarounds_init(struct drm_i915_private *dev_priv) | 244 | static void chv_ctx_workarounds_init(struct intel_engine_cs *engine) |
194 | { | 245 | { |
195 | int ret; | 246 | struct i915_wa_list *wal = &engine->ctx_wa_list; |
196 | 247 | ||
197 | ret = gen8_ctx_workarounds_init(dev_priv); | 248 | gen8_ctx_workarounds_init(engine); |
198 | if (ret) | ||
199 | return ret; | ||
200 | 249 | ||
201 | /* WaDisableThreadStallDopClockGating:chv */ | 250 | /* WaDisableThreadStallDopClockGating:chv */ |
202 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); | 251 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); |
203 | 252 | ||
204 | /* Improve HiZ throughput on CHV. */ | 253 | /* Improve HiZ throughput on CHV. */ |
205 | WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); | 254 | WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); |
206 | |||
207 | return 0; | ||
208 | } | 255 | } |
209 | 256 | ||
210 | static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv) | 257 | static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine) |
211 | { | 258 | { |
212 | if (HAS_LLC(dev_priv)) { | 259 | struct drm_i915_private *i915 = engine->i915; |
260 | struct i915_wa_list *wal = &engine->ctx_wa_list; | ||
261 | |||
262 | if (HAS_LLC(i915)) { | ||
213 | /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl | 263 | /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl |
214 | * | 264 | * |
215 | * Must match Display Engine. See | 265 | * Must match Display Engine. See |
@@ -228,7 +278,7 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv) | |||
228 | PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); | 278 | PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); |
229 | 279 | ||
230 | /* Syncing dependencies between camera and graphics:skl,bxt,kbl */ | 280 | /* Syncing dependencies between camera and graphics:skl,bxt,kbl */ |
231 | if (!IS_COFFEELAKE(dev_priv)) | 281 | if (!IS_COFFEELAKE(i915)) |
232 | WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, | 282 | WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, |
233 | GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); | 283 | GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); |
234 | 284 | ||
@@ -271,9 +321,7 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv) | |||
271 | HDC_FORCE_NON_COHERENT); | 321 | HDC_FORCE_NON_COHERENT); |
272 | 322 | ||
273 | /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */ | 323 | /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */ |
274 | if (IS_SKYLAKE(dev_priv) || | 324 | if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) |
275 | IS_KABYLAKE(dev_priv) || | ||
276 | IS_COFFEELAKE(dev_priv)) | ||
277 | WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, | 325 | WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, |
278 | GEN8_SAMPLER_POWER_BYPASS_DIS); | 326 | GEN8_SAMPLER_POWER_BYPASS_DIS); |
279 | 327 | ||
@@ -300,14 +348,14 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv) | |||
300 | GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); | 348 | GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); |
301 | 349 | ||
302 | /* WaClearHIZ_WM_CHICKEN3:bxt,glk */ | 350 | /* WaClearHIZ_WM_CHICKEN3:bxt,glk */ |
303 | if (IS_GEN9_LP(dev_priv)) | 351 | if (IS_GEN9_LP(i915)) |
304 | WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ); | 352 | WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ); |
305 | |||
306 | return 0; | ||
307 | } | 353 | } |
308 | 354 | ||
309 | static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv) | 355 | static void skl_tune_iz_hashing(struct intel_engine_cs *engine) |
310 | { | 356 | { |
357 | struct drm_i915_private *i915 = engine->i915; | ||
358 | struct i915_wa_list *wal = &engine->ctx_wa_list; | ||
311 | u8 vals[3] = { 0, 0, 0 }; | 359 | u8 vals[3] = { 0, 0, 0 }; |
312 | unsigned int i; | 360 | unsigned int i; |
313 | 361 | ||
@@ -318,7 +366,7 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv) | |||
318 | * Only consider slices where one, and only one, subslice has 7 | 366 | * Only consider slices where one, and only one, subslice has 7 |
319 | * EUs | 367 | * EUs |
320 | */ | 368 | */ |
321 | if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i])) | 369 | if (!is_power_of_2(INTEL_INFO(i915)->sseu.subslice_7eu[i])) |
322 | continue; | 370 | continue; |
323 | 371 | ||
324 | /* | 372 | /* |
@@ -327,12 +375,12 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv) | |||
327 | * | 375 | * |
328 | * -> 0 <= ss <= 3; | 376 | * -> 0 <= ss <= 3; |
329 | */ | 377 | */ |
330 | ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1; | 378 | ss = ffs(INTEL_INFO(i915)->sseu.subslice_7eu[i]) - 1; |
331 | vals[i] = 3 - ss; | 379 | vals[i] = 3 - ss; |
332 | } | 380 | } |
333 | 381 | ||
334 | if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0) | 382 | if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0) |
335 | return 0; | 383 | return; |
336 | 384 | ||
337 | /* Tune IZ hashing. See intel_device_info_runtime_init() */ | 385 | /* Tune IZ hashing. See intel_device_info_runtime_init() */ |
338 | WA_SET_FIELD_MASKED(GEN7_GT_MODE, | 386 | WA_SET_FIELD_MASKED(GEN7_GT_MODE, |
@@ -342,28 +390,19 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv) | |||
342 | GEN9_IZ_HASHING(2, vals[2]) | | 390 | GEN9_IZ_HASHING(2, vals[2]) | |
343 | GEN9_IZ_HASHING(1, vals[1]) | | 391 | GEN9_IZ_HASHING(1, vals[1]) | |
344 | GEN9_IZ_HASHING(0, vals[0])); | 392 | GEN9_IZ_HASHING(0, vals[0])); |
345 | |||
346 | return 0; | ||
347 | } | 393 | } |
348 | 394 | ||
349 | static int skl_ctx_workarounds_init(struct drm_i915_private *dev_priv) | 395 | static void skl_ctx_workarounds_init(struct intel_engine_cs *engine) |
350 | { | 396 | { |
351 | int ret; | 397 | gen9_ctx_workarounds_init(engine); |
352 | 398 | skl_tune_iz_hashing(engine); | |
353 | ret = gen9_ctx_workarounds_init(dev_priv); | ||
354 | if (ret) | ||
355 | return ret; | ||
356 | |||
357 | return skl_tune_iz_hashing(dev_priv); | ||
358 | } | 399 | } |
359 | 400 | ||
360 | static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv) | 401 | static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine) |
361 | { | 402 | { |
362 | int ret; | 403 | struct i915_wa_list *wal = &engine->ctx_wa_list; |
363 | 404 | ||
364 | ret = gen9_ctx_workarounds_init(dev_priv); | 405 | gen9_ctx_workarounds_init(engine); |
365 | if (ret) | ||
366 | return ret; | ||
367 | 406 | ||
368 | /* WaDisableThreadStallDopClockGating:bxt */ | 407 | /* WaDisableThreadStallDopClockGating:bxt */ |
369 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, | 408 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, |
@@ -372,57 +411,41 @@ static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv) | |||
372 | /* WaToEnableHwFixForPushConstHWBug:bxt */ | 411 | /* WaToEnableHwFixForPushConstHWBug:bxt */ |
373 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, | 412 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, |
374 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); | 413 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); |
375 | |||
376 | return 0; | ||
377 | } | 414 | } |
378 | 415 | ||
379 | static int kbl_ctx_workarounds_init(struct drm_i915_private *dev_priv) | 416 | static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine) |
380 | { | 417 | { |
381 | int ret; | 418 | struct drm_i915_private *i915 = engine->i915; |
382 | 419 | struct i915_wa_list *wal = &engine->ctx_wa_list; | |
383 | ret = gen9_ctx_workarounds_init(dev_priv); | ||
384 | if (ret) | ||
385 | return ret; | ||
386 | 420 | ||
387 | /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */ | 421 | gen9_ctx_workarounds_init(engine); |
388 | if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0)) | ||
389 | WA_SET_BIT_MASKED(HDC_CHICKEN0, | ||
390 | HDC_FENCE_DEST_SLM_DISABLE); | ||
391 | 422 | ||
392 | /* WaToEnableHwFixForPushConstHWBug:kbl */ | 423 | /* WaToEnableHwFixForPushConstHWBug:kbl */ |
393 | if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) | 424 | if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER)) |
394 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, | 425 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, |
395 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); | 426 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); |
396 | 427 | ||
397 | /* WaDisableSbeCacheDispatchPortSharing:kbl */ | 428 | /* WaDisableSbeCacheDispatchPortSharing:kbl */ |
398 | WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1, | 429 | WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1, |
399 | GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); | 430 | GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); |
400 | |||
401 | return 0; | ||
402 | } | 431 | } |
403 | 432 | ||
404 | static int glk_ctx_workarounds_init(struct drm_i915_private *dev_priv) | 433 | static void glk_ctx_workarounds_init(struct intel_engine_cs *engine) |
405 | { | 434 | { |
406 | int ret; | 435 | struct i915_wa_list *wal = &engine->ctx_wa_list; |
407 | 436 | ||
408 | ret = gen9_ctx_workarounds_init(dev_priv); | 437 | gen9_ctx_workarounds_init(engine); |
409 | if (ret) | ||
410 | return ret; | ||
411 | 438 | ||
412 | /* WaToEnableHwFixForPushConstHWBug:glk */ | 439 | /* WaToEnableHwFixForPushConstHWBug:glk */ |
413 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, | 440 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, |
414 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); | 441 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); |
415 | |||
416 | return 0; | ||
417 | } | 442 | } |
418 | 443 | ||
419 | static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv) | 444 | static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine) |
420 | { | 445 | { |
421 | int ret; | 446 | struct i915_wa_list *wal = &engine->ctx_wa_list; |
422 | 447 | ||
423 | ret = gen9_ctx_workarounds_init(dev_priv); | 448 | gen9_ctx_workarounds_init(engine); |
424 | if (ret) | ||
425 | return ret; | ||
426 | 449 | ||
427 | /* WaToEnableHwFixForPushConstHWBug:cfl */ | 450 | /* WaToEnableHwFixForPushConstHWBug:cfl */ |
428 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, | 451 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, |
@@ -431,18 +454,19 @@ static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv) | |||
431 | /* WaDisableSbeCacheDispatchPortSharing:cfl */ | 454 | /* WaDisableSbeCacheDispatchPortSharing:cfl */ |
432 | WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1, | 455 | WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1, |
433 | GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); | 456 | GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); |
434 | |||
435 | return 0; | ||
436 | } | 457 | } |
437 | 458 | ||
438 | static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv) | 459 | static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine) |
439 | { | 460 | { |
461 | struct drm_i915_private *i915 = engine->i915; | ||
462 | struct i915_wa_list *wal = &engine->ctx_wa_list; | ||
463 | |||
440 | /* WaForceContextSaveRestoreNonCoherent:cnl */ | 464 | /* WaForceContextSaveRestoreNonCoherent:cnl */ |
441 | WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0, | 465 | WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0, |
442 | HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT); | 466 | HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT); |
443 | 467 | ||
444 | /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */ | 468 | /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */ |
445 | if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0)) | 469 | if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0)) |
446 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5); | 470 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5); |
447 | 471 | ||
448 | /* WaDisableReplayBufferBankArbitrationOptimization:cnl */ | 472 | /* WaDisableReplayBufferBankArbitrationOptimization:cnl */ |
@@ -450,7 +474,7 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv) | |||
450 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); | 474 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); |
451 | 475 | ||
452 | /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */ | 476 | /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */ |
453 | if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0)) | 477 | if (IS_CNL_REVID(i915, 0, CNL_REVID_B0)) |
454 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, | 478 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, |
455 | GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE); | 479 | GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE); |
456 | 480 | ||
@@ -470,16 +494,17 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv) | |||
470 | 494 | ||
471 | /* WaDisableEarlyEOT:cnl */ | 495 | /* WaDisableEarlyEOT:cnl */ |
472 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT); | 496 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT); |
473 | |||
474 | return 0; | ||
475 | } | 497 | } |
476 | 498 | ||
477 | static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv) | 499 | static void icl_ctx_workarounds_init(struct intel_engine_cs *engine) |
478 | { | 500 | { |
501 | struct drm_i915_private *i915 = engine->i915; | ||
502 | struct i915_wa_list *wal = &engine->ctx_wa_list; | ||
503 | |||
479 | /* Wa_1604370585:icl (pre-prod) | 504 | /* Wa_1604370585:icl (pre-prod) |
480 | * Formerly known as WaPushConstantDereferenceHoldDisable | 505 | * Formerly known as WaPushConstantDereferenceHoldDisable |
481 | */ | 506 | */ |
482 | if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0)) | 507 | if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) |
483 | WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, | 508 | WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, |
484 | PUSH_CONSTANT_DEREF_DISABLE); | 509 | PUSH_CONSTANT_DEREF_DISABLE); |
485 | 510 | ||
@@ -495,7 +520,7 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv) | |||
495 | /* Wa_2006611047:icl (pre-prod) | 520 | /* Wa_2006611047:icl (pre-prod) |
496 | * Formerly known as WaDisableImprovedTdlClkGating | 521 | * Formerly known as WaDisableImprovedTdlClkGating |
497 | */ | 522 | */ |
498 | if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0)) | 523 | if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) |
499 | WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, | 524 | WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, |
500 | GEN11_TDL_CLOCK_GATING_FIX_DISABLE); | 525 | GEN11_TDL_CLOCK_GATING_FIX_DISABLE); |
501 | 526 | ||
@@ -504,70 +529,67 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv) | |||
504 | GEN11_STATE_CACHE_REDIRECT_TO_CS); | 529 | GEN11_STATE_CACHE_REDIRECT_TO_CS); |
505 | 530 | ||
506 | /* Wa_2006665173:icl (pre-prod) */ | 531 | /* Wa_2006665173:icl (pre-prod) */ |
507 | if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0)) | 532 | if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) |
508 | WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, | 533 | WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, |
509 | GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC); | 534 | GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC); |
510 | |||
511 | return 0; | ||
512 | } | 535 | } |
513 | 536 | ||
514 | int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv) | 537 | void intel_engine_init_ctx_wa(struct intel_engine_cs *engine) |
515 | { | 538 | { |
516 | int err = 0; | 539 | struct drm_i915_private *i915 = engine->i915; |
517 | 540 | struct i915_wa_list *wal = &engine->ctx_wa_list; | |
518 | dev_priv->workarounds.count = 0; | 541 | |
519 | 542 | wa_init_start(wal, "context"); | |
520 | if (INTEL_GEN(dev_priv) < 8) | 543 | |
521 | err = 0; | 544 | if (INTEL_GEN(i915) < 8) |
522 | else if (IS_BROADWELL(dev_priv)) | 545 | return; |
523 | err = bdw_ctx_workarounds_init(dev_priv); | 546 | else if (IS_BROADWELL(i915)) |
524 | else if (IS_CHERRYVIEW(dev_priv)) | 547 | bdw_ctx_workarounds_init(engine); |
525 | err = chv_ctx_workarounds_init(dev_priv); | 548 | else if (IS_CHERRYVIEW(i915)) |
526 | else if (IS_SKYLAKE(dev_priv)) | 549 | chv_ctx_workarounds_init(engine); |
527 | err = skl_ctx_workarounds_init(dev_priv); | 550 | else if (IS_SKYLAKE(i915)) |
528 | else if (IS_BROXTON(dev_priv)) | 551 | skl_ctx_workarounds_init(engine); |
529 | err = bxt_ctx_workarounds_init(dev_priv); | 552 | else if (IS_BROXTON(i915)) |
530 | else if (IS_KABYLAKE(dev_priv)) | 553 | bxt_ctx_workarounds_init(engine); |
531 | err = kbl_ctx_workarounds_init(dev_priv); | 554 | else if (IS_KABYLAKE(i915)) |
532 | else if (IS_GEMINILAKE(dev_priv)) | 555 | kbl_ctx_workarounds_init(engine); |
533 | err = glk_ctx_workarounds_init(dev_priv); | 556 | else if (IS_GEMINILAKE(i915)) |
534 | else if (IS_COFFEELAKE(dev_priv)) | 557 | glk_ctx_workarounds_init(engine); |
535 | err = cfl_ctx_workarounds_init(dev_priv); | 558 | else if (IS_COFFEELAKE(i915)) |
536 | else if (IS_CANNONLAKE(dev_priv)) | 559 | cfl_ctx_workarounds_init(engine); |
537 | err = cnl_ctx_workarounds_init(dev_priv); | 560 | else if (IS_CANNONLAKE(i915)) |
538 | else if (IS_ICELAKE(dev_priv)) | 561 | cnl_ctx_workarounds_init(engine); |
539 | err = icl_ctx_workarounds_init(dev_priv); | 562 | else if (IS_ICELAKE(i915)) |
563 | icl_ctx_workarounds_init(engine); | ||
540 | else | 564 | else |
541 | MISSING_CASE(INTEL_GEN(dev_priv)); | 565 | MISSING_CASE(INTEL_GEN(i915)); |
542 | if (err) | ||
543 | return err; | ||
544 | 566 | ||
545 | DRM_DEBUG_DRIVER("Number of context specific w/a: %d\n", | 567 | wa_init_finish(wal); |
546 | dev_priv->workarounds.count); | ||
547 | return 0; | ||
548 | } | 568 | } |
549 | 569 | ||
550 | int intel_ctx_workarounds_emit(struct i915_request *rq) | 570 | int intel_engine_emit_ctx_wa(struct i915_request *rq) |
551 | { | 571 | { |
552 | struct i915_workarounds *w = &rq->i915->workarounds; | 572 | struct i915_wa_list *wal = &rq->engine->ctx_wa_list; |
573 | struct i915_wa *wa; | ||
574 | unsigned int i; | ||
553 | u32 *cs; | 575 | u32 *cs; |
554 | int ret, i; | 576 | int ret; |
555 | 577 | ||
556 | if (w->count == 0) | 578 | if (wal->count == 0) |
557 | return 0; | 579 | return 0; |
558 | 580 | ||
559 | ret = rq->engine->emit_flush(rq, EMIT_BARRIER); | 581 | ret = rq->engine->emit_flush(rq, EMIT_BARRIER); |
560 | if (ret) | 582 | if (ret) |
561 | return ret; | 583 | return ret; |
562 | 584 | ||
563 | cs = intel_ring_begin(rq, (w->count * 2 + 2)); | 585 | cs = intel_ring_begin(rq, (wal->count * 2 + 2)); |
564 | if (IS_ERR(cs)) | 586 | if (IS_ERR(cs)) |
565 | return PTR_ERR(cs); | 587 | return PTR_ERR(cs); |
566 | 588 | ||
567 | *cs++ = MI_LOAD_REGISTER_IMM(w->count); | 589 | *cs++ = MI_LOAD_REGISTER_IMM(wal->count); |
568 | for (i = 0; i < w->count; i++) { | 590 | for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { |
569 | *cs++ = w->reg[i].addr; | 591 | *cs++ = i915_mmio_reg_offset(wa->reg); |
570 | *cs++ = w->reg[i].value; | 592 | *cs++ = wa->val; |
571 | } | 593 | } |
572 | *cs++ = MI_NOOP; | 594 | *cs++ = MI_NOOP; |
573 | 595 | ||
@@ -580,160 +602,149 @@ int intel_ctx_workarounds_emit(struct i915_request *rq) | |||
580 | return 0; | 602 | return 0; |
581 | } | 603 | } |
582 | 604 | ||
583 | static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 605 | static void |
606 | wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val) | ||
607 | { | ||
608 | struct i915_wa wa = { | ||
609 | .reg = reg, | ||
610 | .mask = val, | ||
611 | .val = _MASKED_BIT_ENABLE(val) | ||
612 | }; | ||
613 | |||
614 | _wa_add(wal, &wa); | ||
615 | } | ||
616 | |||
617 | static void | ||
618 | wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, | ||
619 | u32 val) | ||
584 | { | 620 | { |
621 | struct i915_wa wa = { | ||
622 | .reg = reg, | ||
623 | .mask = mask, | ||
624 | .val = val | ||
625 | }; | ||
626 | |||
627 | _wa_add(wal, &wa); | ||
585 | } | 628 | } |
586 | 629 | ||
587 | static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 630 | static void |
631 | wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val) | ||
588 | { | 632 | { |
633 | wa_write_masked_or(wal, reg, ~0, val); | ||
589 | } | 634 | } |
590 | 635 | ||
591 | static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 636 | static void |
637 | wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val) | ||
592 | { | 638 | { |
593 | /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */ | 639 | wa_write_masked_or(wal, reg, val, val); |
594 | I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, | 640 | } |
595 | _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE)); | ||
596 | 641 | ||
597 | /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */ | 642 | static void gen9_gt_workarounds_init(struct drm_i915_private *i915) |
598 | I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | | 643 | { |
599 | GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); | 644 | struct i915_wa_list *wal = &i915->gt_wa_list; |
600 | 645 | ||
601 | /* WaDisableKillLogic:bxt,skl,kbl */ | 646 | /* WaDisableKillLogic:bxt,skl,kbl */ |
602 | if (!IS_COFFEELAKE(dev_priv)) | 647 | if (!IS_COFFEELAKE(i915)) |
603 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | | 648 | wa_write_or(wal, |
604 | ECOCHK_DIS_TLB); | 649 | GAM_ECOCHK, |
650 | ECOCHK_DIS_TLB); | ||
605 | 651 | ||
606 | if (HAS_LLC(dev_priv)) { | 652 | if (HAS_LLC(i915)) { |
607 | /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl | 653 | /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl |
608 | * | 654 | * |
609 | * Must match Display Engine. See | 655 | * Must match Display Engine. See |
610 | * WaCompressedResourceDisplayNewHashMode. | 656 | * WaCompressedResourceDisplayNewHashMode. |
611 | */ | 657 | */ |
612 | I915_WRITE(MMCD_MISC_CTRL, | 658 | wa_write_or(wal, |
613 | I915_READ(MMCD_MISC_CTRL) | | 659 | MMCD_MISC_CTRL, |
614 | MMCD_PCLA | | 660 | MMCD_PCLA | MMCD_HOTSPOT_EN); |
615 | MMCD_HOTSPOT_EN); | ||
616 | } | 661 | } |
617 | 662 | ||
618 | /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */ | 663 | /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */ |
619 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | | 664 | wa_write_or(wal, |
620 | BDW_DISABLE_HDC_INVALIDATION); | 665 | GAM_ECOCHK, |
621 | 666 | BDW_DISABLE_HDC_INVALIDATION); | |
622 | /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */ | ||
623 | if (IS_GEN9_LP(dev_priv)) { | ||
624 | u32 val = I915_READ(GEN8_L3SQCREG1); | ||
625 | |||
626 | val &= ~L3_PRIO_CREDITS_MASK; | ||
627 | val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2); | ||
628 | I915_WRITE(GEN8_L3SQCREG1, val); | ||
629 | } | ||
630 | |||
631 | /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */ | ||
632 | I915_WRITE(GEN8_L3SQCREG4, | ||
633 | I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES); | ||
634 | |||
635 | /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */ | ||
636 | I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, | ||
637 | _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); | ||
638 | } | 667 | } |
639 | 668 | ||
640 | static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 669 | static void skl_gt_workarounds_init(struct drm_i915_private *i915) |
641 | { | 670 | { |
642 | gen9_gt_workarounds_apply(dev_priv); | 671 | struct i915_wa_list *wal = &i915->gt_wa_list; |
643 | 672 | ||
644 | /* WaEnableGapsTsvCreditFix:skl */ | 673 | gen9_gt_workarounds_init(i915); |
645 | I915_WRITE(GEN8_GARBCNTL, | ||
646 | I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE); | ||
647 | 674 | ||
648 | /* WaDisableGafsUnitClkGating:skl */ | 675 | /* WaDisableGafsUnitClkGating:skl */ |
649 | I915_WRITE(GEN7_UCGCTL4, | 676 | wa_write_or(wal, |
650 | I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); | 677 | GEN7_UCGCTL4, |
678 | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); | ||
651 | 679 | ||
652 | /* WaInPlaceDecompressionHang:skl */ | 680 | /* WaInPlaceDecompressionHang:skl */ |
653 | if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) | 681 | if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER)) |
654 | I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, | 682 | wa_write_or(wal, |
655 | I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | | 683 | GEN9_GAMT_ECO_REG_RW_IA, |
656 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); | 684 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); |
657 | } | 685 | } |
658 | 686 | ||
659 | static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 687 | static void bxt_gt_workarounds_init(struct drm_i915_private *i915) |
660 | { | 688 | { |
661 | gen9_gt_workarounds_apply(dev_priv); | 689 | struct i915_wa_list *wal = &i915->gt_wa_list; |
662 | 690 | ||
663 | /* WaDisablePooledEuLoadBalancingFix:bxt */ | 691 | gen9_gt_workarounds_init(i915); |
664 | I915_WRITE(FF_SLICE_CS_CHICKEN2, | ||
665 | _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE)); | ||
666 | 692 | ||
667 | /* WaInPlaceDecompressionHang:bxt */ | 693 | /* WaInPlaceDecompressionHang:bxt */ |
668 | I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, | 694 | wa_write_or(wal, |
669 | I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | | 695 | GEN9_GAMT_ECO_REG_RW_IA, |
670 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); | 696 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); |
671 | } | 697 | } |
672 | 698 | ||
673 | static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 699 | static void kbl_gt_workarounds_init(struct drm_i915_private *i915) |
674 | { | 700 | { |
675 | gen9_gt_workarounds_apply(dev_priv); | 701 | struct i915_wa_list *wal = &i915->gt_wa_list; |
676 | 702 | ||
677 | /* WaEnableGapsTsvCreditFix:kbl */ | 703 | gen9_gt_workarounds_init(i915); |
678 | I915_WRITE(GEN8_GARBCNTL, | ||
679 | I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE); | ||
680 | 704 | ||
681 | /* WaDisableDynamicCreditSharing:kbl */ | 705 | /* WaDisableDynamicCreditSharing:kbl */ |
682 | if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) | 706 | if (IS_KBL_REVID(i915, 0, KBL_REVID_B0)) |
683 | I915_WRITE(GAMT_CHKN_BIT_REG, | 707 | wa_write_or(wal, |
684 | I915_READ(GAMT_CHKN_BIT_REG) | | 708 | GAMT_CHKN_BIT_REG, |
685 | GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); | 709 | GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); |
686 | 710 | ||
687 | /* WaDisableGafsUnitClkGating:kbl */ | 711 | /* WaDisableGafsUnitClkGating:kbl */ |
688 | I915_WRITE(GEN7_UCGCTL4, | 712 | wa_write_or(wal, |
689 | I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); | 713 | GEN7_UCGCTL4, |
714 | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); | ||
690 | 715 | ||
691 | /* WaInPlaceDecompressionHang:kbl */ | 716 | /* WaInPlaceDecompressionHang:kbl */ |
692 | I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, | 717 | wa_write_or(wal, |
693 | I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | | 718 | GEN9_GAMT_ECO_REG_RW_IA, |
694 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); | 719 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); |
695 | |||
696 | /* WaKBLVECSSemaphoreWaitPoll:kbl */ | ||
697 | if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) { | ||
698 | struct intel_engine_cs *engine; | ||
699 | unsigned int tmp; | ||
700 | |||
701 | for_each_engine(engine, dev_priv, tmp) { | ||
702 | if (engine->id == RCS) | ||
703 | continue; | ||
704 | |||
705 | I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1); | ||
706 | } | ||
707 | } | ||
708 | } | 720 | } |
709 | 721 | ||
710 | static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 722 | static void glk_gt_workarounds_init(struct drm_i915_private *i915) |
711 | { | 723 | { |
712 | gen9_gt_workarounds_apply(dev_priv); | 724 | gen9_gt_workarounds_init(i915); |
713 | } | 725 | } |
714 | 726 | ||
715 | static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 727 | static void cfl_gt_workarounds_init(struct drm_i915_private *i915) |
716 | { | 728 | { |
717 | gen9_gt_workarounds_apply(dev_priv); | 729 | struct i915_wa_list *wal = &i915->gt_wa_list; |
718 | 730 | ||
719 | /* WaEnableGapsTsvCreditFix:cfl */ | 731 | gen9_gt_workarounds_init(i915); |
720 | I915_WRITE(GEN8_GARBCNTL, | ||
721 | I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE); | ||
722 | 732 | ||
723 | /* WaDisableGafsUnitClkGating:cfl */ | 733 | /* WaDisableGafsUnitClkGating:cfl */ |
724 | I915_WRITE(GEN7_UCGCTL4, | 734 | wa_write_or(wal, |
725 | I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); | 735 | GEN7_UCGCTL4, |
736 | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); | ||
726 | 737 | ||
727 | /* WaInPlaceDecompressionHang:cfl */ | 738 | /* WaInPlaceDecompressionHang:cfl */ |
728 | I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, | 739 | wa_write_or(wal, |
729 | I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | | 740 | GEN9_GAMT_ECO_REG_RW_IA, |
730 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); | 741 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); |
731 | } | 742 | } |
732 | 743 | ||
733 | static void wa_init_mcr(struct drm_i915_private *dev_priv) | 744 | static void wa_init_mcr(struct drm_i915_private *dev_priv) |
734 | { | 745 | { |
735 | const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu); | 746 | const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu); |
736 | u32 mcr; | 747 | struct i915_wa_list *wal = &dev_priv->gt_wa_list; |
737 | u32 mcr_slice_subslice_mask; | 748 | u32 mcr_slice_subslice_mask; |
738 | 749 | ||
739 | /* | 750 | /* |
@@ -770,8 +781,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv) | |||
770 | WARN_ON((enabled_mask & disabled_mask) != enabled_mask); | 781 | WARN_ON((enabled_mask & disabled_mask) != enabled_mask); |
771 | } | 782 | } |
772 | 783 | ||
773 | mcr = I915_READ(GEN8_MCR_SELECTOR); | ||
774 | |||
775 | if (INTEL_GEN(dev_priv) >= 11) | 784 | if (INTEL_GEN(dev_priv) >= 11) |
776 | mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | | 785 | mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | |
777 | GEN11_MCR_SUBSLICE_MASK; | 786 | GEN11_MCR_SUBSLICE_MASK; |
@@ -789,186 +798,220 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv) | |||
789 | * occasions, such as INSTDONE, where this value is dependent | 798 | * occasions, such as INSTDONE, where this value is dependent |
790 | * on s/ss combo, the read should be done with read_subslice_reg. | 799 | * on s/ss combo, the read should be done with read_subslice_reg. |
791 | */ | 800 | */ |
792 | mcr &= ~mcr_slice_subslice_mask; | 801 | wa_write_masked_or(wal, |
793 | mcr |= intel_calculate_mcr_s_ss_select(dev_priv); | 802 | GEN8_MCR_SELECTOR, |
794 | I915_WRITE(GEN8_MCR_SELECTOR, mcr); | 803 | mcr_slice_subslice_mask, |
804 | intel_calculate_mcr_s_ss_select(dev_priv)); | ||
795 | } | 805 | } |
796 | 806 | ||
797 | static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 807 | static void cnl_gt_workarounds_init(struct drm_i915_private *i915) |
798 | { | 808 | { |
799 | wa_init_mcr(dev_priv); | 809 | struct i915_wa_list *wal = &i915->gt_wa_list; |
810 | |||
811 | wa_init_mcr(i915); | ||
800 | 812 | ||
801 | /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */ | 813 | /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */ |
802 | if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0)) | 814 | if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0)) |
803 | I915_WRITE(GAMT_CHKN_BIT_REG, | 815 | wa_write_or(wal, |
804 | I915_READ(GAMT_CHKN_BIT_REG) | | 816 | GAMT_CHKN_BIT_REG, |
805 | GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT); | 817 | GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT); |
806 | 818 | ||
807 | /* WaInPlaceDecompressionHang:cnl */ | 819 | /* WaInPlaceDecompressionHang:cnl */ |
808 | I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, | 820 | wa_write_or(wal, |
809 | I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | | 821 | GEN9_GAMT_ECO_REG_RW_IA, |
810 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); | 822 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); |
811 | |||
812 | /* WaEnablePreemptionGranularityControlByUMD:cnl */ | ||
813 | I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, | ||
814 | _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); | ||
815 | } | 823 | } |
816 | 824 | ||
817 | static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 825 | static void icl_gt_workarounds_init(struct drm_i915_private *i915) |
818 | { | 826 | { |
819 | wa_init_mcr(dev_priv); | 827 | struct i915_wa_list *wal = &i915->gt_wa_list; |
820 | 828 | ||
821 | /* This is not an Wa. Enable for better image quality */ | 829 | wa_init_mcr(i915); |
822 | I915_WRITE(_3D_CHICKEN3, | ||
823 | _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE)); | ||
824 | 830 | ||
825 | /* WaInPlaceDecompressionHang:icl */ | 831 | /* WaInPlaceDecompressionHang:icl */ |
826 | I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, | 832 | wa_write_or(wal, |
827 | I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | | 833 | GEN9_GAMT_ECO_REG_RW_IA, |
828 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); | 834 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); |
829 | |||
830 | /* WaPipelineFlushCoherentLines:icl */ | ||
831 | I915_WRITE(GEN8_L3SQCREG4, | ||
832 | I915_READ(GEN8_L3SQCREG4) | | ||
833 | GEN8_LQSC_FLUSH_COHERENT_LINES); | ||
834 | |||
835 | /* Wa_1405543622:icl | ||
836 | * Formerly known as WaGAPZPriorityScheme | ||
837 | */ | ||
838 | I915_WRITE(GEN8_GARBCNTL, | ||
839 | I915_READ(GEN8_GARBCNTL) | | ||
840 | GEN11_ARBITRATION_PRIO_ORDER_MASK); | ||
841 | |||
842 | /* Wa_1604223664:icl | ||
843 | * Formerly known as WaL3BankAddressHashing | ||
844 | */ | ||
845 | I915_WRITE(GEN8_GARBCNTL, | ||
846 | (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) | | ||
847 | GEN11_HASH_CTRL_EXCL_BIT0); | ||
848 | I915_WRITE(GEN11_GLBLINVL, | ||
849 | (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) | | ||
850 | GEN11_BANK_HASH_ADDR_EXCL_BIT0); | ||
851 | 835 | ||
852 | /* WaModifyGamTlbPartitioning:icl */ | 836 | /* WaModifyGamTlbPartitioning:icl */ |
853 | I915_WRITE(GEN11_GACB_PERF_CTRL, | 837 | wa_write_masked_or(wal, |
854 | (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) | | 838 | GEN11_GACB_PERF_CTRL, |
855 | GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4); | 839 | GEN11_HASH_CTRL_MASK, |
856 | 840 | GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4); | |
857 | /* Wa_1405733216:icl | ||
858 | * Formerly known as WaDisableCleanEvicts | ||
859 | */ | ||
860 | I915_WRITE(GEN8_L3SQCREG4, | ||
861 | I915_READ(GEN8_L3SQCREG4) | | ||
862 | GEN11_LQSC_CLEAN_EVICT_DISABLE); | ||
863 | 841 | ||
864 | /* Wa_1405766107:icl | 842 | /* Wa_1405766107:icl |
865 | * Formerly known as WaCL2SFHalfMaxAlloc | 843 | * Formerly known as WaCL2SFHalfMaxAlloc |
866 | */ | 844 | */ |
867 | I915_WRITE(GEN11_LSN_UNSLCVC, | 845 | wa_write_or(wal, |
868 | I915_READ(GEN11_LSN_UNSLCVC) | | 846 | GEN11_LSN_UNSLCVC, |
869 | GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | | 847 | GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | |
870 | GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); | 848 | GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); |
871 | 849 | ||
872 | /* Wa_220166154:icl | 850 | /* Wa_220166154:icl |
873 | * Formerly known as WaDisCtxReload | 851 | * Formerly known as WaDisCtxReload |
874 | */ | 852 | */ |
875 | I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA, | 853 | wa_write_or(wal, |
876 | I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) | | 854 | GEN8_GAMW_ECO_DEV_RW_IA, |
877 | GAMW_ECO_DEV_CTX_RELOAD_DISABLE); | 855 | GAMW_ECO_DEV_CTX_RELOAD_DISABLE); |
878 | 856 | ||
879 | /* Wa_1405779004:icl (pre-prod) */ | 857 | /* Wa_1405779004:icl (pre-prod) */ |
880 | if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0)) | 858 | if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) |
881 | I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, | 859 | wa_write_or(wal, |
882 | I915_READ(SLICE_UNIT_LEVEL_CLKGATE) | | 860 | SLICE_UNIT_LEVEL_CLKGATE, |
883 | MSCUNIT_CLKGATE_DIS); | 861 | MSCUNIT_CLKGATE_DIS); |
884 | 862 | ||
885 | /* Wa_1406680159:icl */ | 863 | /* Wa_1406680159:icl */ |
886 | I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, | 864 | wa_write_or(wal, |
887 | I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) | | 865 | SUBSLICE_UNIT_LEVEL_CLKGATE, |
888 | GWUNIT_CLKGATE_DIS); | 866 | GWUNIT_CLKGATE_DIS); |
889 | |||
890 | /* Wa_1604302699:icl */ | ||
891 | I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER, | ||
892 | I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) | | ||
893 | GEN11_I2M_WRITE_DISABLE); | ||
894 | 867 | ||
895 | /* Wa_1406838659:icl (pre-prod) */ | 868 | /* Wa_1406838659:icl (pre-prod) */ |
896 | if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0)) | 869 | if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) |
897 | I915_WRITE(INF_UNIT_LEVEL_CLKGATE, | 870 | wa_write_or(wal, |
898 | I915_READ(INF_UNIT_LEVEL_CLKGATE) | | 871 | INF_UNIT_LEVEL_CLKGATE, |
899 | CGPSF_CLKGATE_DIS); | 872 | CGPSF_CLKGATE_DIS); |
900 | |||
901 | /* WaForwardProgressSoftReset:icl */ | ||
902 | I915_WRITE(GEN10_SCRATCH_LNCF2, | ||
903 | I915_READ(GEN10_SCRATCH_LNCF2) | | ||
904 | PMFLUSHDONE_LNICRSDROP | | ||
905 | PMFLUSH_GAPL3UNBLOCK | | ||
906 | PMFLUSHDONE_LNEBLK); | ||
907 | 873 | ||
908 | /* Wa_1406463099:icl | 874 | /* Wa_1406463099:icl |
909 | * Formerly known as WaGamTlbPendError | 875 | * Formerly known as WaGamTlbPendError |
910 | */ | 876 | */ |
911 | I915_WRITE(GAMT_CHKN_BIT_REG, | 877 | wa_write_or(wal, |
912 | I915_READ(GAMT_CHKN_BIT_REG) | | 878 | GAMT_CHKN_BIT_REG, |
913 | GAMT_CHKN_DISABLE_L3_COH_PIPE); | 879 | GAMT_CHKN_DISABLE_L3_COH_PIPE); |
914 | |||
915 | /* Wa_1406609255:icl (pre-prod) */ | ||
916 | if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0)) | ||
917 | I915_WRITE(GEN7_SARCHKMD, | ||
918 | I915_READ(GEN7_SARCHKMD) | | ||
919 | GEN7_DISABLE_DEMAND_PREFETCH | | ||
920 | GEN7_DISABLE_SAMPLER_PREFETCH); | ||
921 | } | 880 | } |
922 | 881 | ||
923 | void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 882 | void intel_gt_init_workarounds(struct drm_i915_private *i915) |
924 | { | 883 | { |
925 | if (INTEL_GEN(dev_priv) < 8) | 884 | struct i915_wa_list *wal = &i915->gt_wa_list; |
885 | |||
886 | wa_init_start(wal, "GT"); | ||
887 | |||
888 | if (INTEL_GEN(i915) < 8) | ||
889 | return; | ||
890 | else if (IS_BROADWELL(i915)) | ||
926 | return; | 891 | return; |
927 | else if (IS_BROADWELL(dev_priv)) | 892 | else if (IS_CHERRYVIEW(i915)) |
928 | bdw_gt_workarounds_apply(dev_priv); | 893 | return; |
929 | else if (IS_CHERRYVIEW(dev_priv)) | 894 | else if (IS_SKYLAKE(i915)) |
930 | chv_gt_workarounds_apply(dev_priv); | 895 | skl_gt_workarounds_init(i915); |
931 | else if (IS_SKYLAKE(dev_priv)) | 896 | else if (IS_BROXTON(i915)) |
932 | skl_gt_workarounds_apply(dev_priv); | 897 | bxt_gt_workarounds_init(i915); |
933 | else if (IS_BROXTON(dev_priv)) | 898 | else if (IS_KABYLAKE(i915)) |
934 | bxt_gt_workarounds_apply(dev_priv); | 899 | kbl_gt_workarounds_init(i915); |
935 | else if (IS_KABYLAKE(dev_priv)) | 900 | else if (IS_GEMINILAKE(i915)) |
936 | kbl_gt_workarounds_apply(dev_priv); | 901 | glk_gt_workarounds_init(i915); |
937 | else if (IS_GEMINILAKE(dev_priv)) | 902 | else if (IS_COFFEELAKE(i915)) |
938 | glk_gt_workarounds_apply(dev_priv); | 903 | cfl_gt_workarounds_init(i915); |
939 | else if (IS_COFFEELAKE(dev_priv)) | 904 | else if (IS_CANNONLAKE(i915)) |
940 | cfl_gt_workarounds_apply(dev_priv); | 905 | cnl_gt_workarounds_init(i915); |
941 | else if (IS_CANNONLAKE(dev_priv)) | 906 | else if (IS_ICELAKE(i915)) |
942 | cnl_gt_workarounds_apply(dev_priv); | 907 | icl_gt_workarounds_init(i915); |
943 | else if (IS_ICELAKE(dev_priv)) | ||
944 | icl_gt_workarounds_apply(dev_priv); | ||
945 | else | 908 | else |
946 | MISSING_CASE(INTEL_GEN(dev_priv)); | 909 | MISSING_CASE(INTEL_GEN(i915)); |
910 | |||
911 | wa_init_finish(wal); | ||
947 | } | 912 | } |
948 | 913 | ||
949 | struct whitelist { | 914 | static enum forcewake_domains |
950 | i915_reg_t reg[RING_MAX_NONPRIV_SLOTS]; | 915 | wal_get_fw_for_rmw(struct drm_i915_private *dev_priv, |
951 | unsigned int count; | 916 | const struct i915_wa_list *wal) |
952 | u32 nopid; | 917 | { |
953 | }; | 918 | enum forcewake_domains fw = 0; |
919 | struct i915_wa *wa; | ||
920 | unsigned int i; | ||
954 | 921 | ||
955 | static void whitelist_reg(struct whitelist *w, i915_reg_t reg) | 922 | for (i = 0, wa = wal->list; i < wal->count; i++, wa++) |
923 | fw |= intel_uncore_forcewake_for_reg(dev_priv, | ||
924 | wa->reg, | ||
925 | FW_REG_READ | | ||
926 | FW_REG_WRITE); | ||
927 | |||
928 | return fw; | ||
929 | } | ||
930 | |||
931 | static void | ||
932 | wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal) | ||
956 | { | 933 | { |
957 | if (GEM_DEBUG_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS)) | 934 | enum forcewake_domains fw; |
935 | unsigned long flags; | ||
936 | struct i915_wa *wa; | ||
937 | unsigned int i; | ||
938 | |||
939 | if (!wal->count) | ||
958 | return; | 940 | return; |
959 | 941 | ||
960 | w->reg[w->count++] = reg; | 942 | fw = wal_get_fw_for_rmw(dev_priv, wal); |
943 | |||
944 | spin_lock_irqsave(&dev_priv->uncore.lock, flags); | ||
945 | intel_uncore_forcewake_get__locked(dev_priv, fw); | ||
946 | |||
947 | for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { | ||
948 | u32 val = I915_READ_FW(wa->reg); | ||
949 | |||
950 | val &= ~wa->mask; | ||
951 | val |= wa->val; | ||
952 | |||
953 | I915_WRITE_FW(wa->reg, val); | ||
954 | } | ||
955 | |||
956 | intel_uncore_forcewake_put__locked(dev_priv, fw); | ||
957 | spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); | ||
958 | |||
959 | DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name); | ||
960 | } | ||
961 | |||
962 | void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv) | ||
963 | { | ||
964 | wa_list_apply(dev_priv, &dev_priv->gt_wa_list); | ||
965 | } | ||
966 | |||
967 | static bool | ||
968 | wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from) | ||
969 | { | ||
970 | if ((cur ^ wa->val) & wa->mask) { | ||
971 | DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n", | ||
972 | name, from, i915_mmio_reg_offset(wa->reg), cur, | ||
973 | cur & wa->mask, wa->val, wa->mask); | ||
974 | |||
975 | return false; | ||
976 | } | ||
977 | |||
978 | return true; | ||
961 | } | 979 | } |
962 | 980 | ||
963 | static void bdw_whitelist_build(struct whitelist *w) | 981 | static bool wa_list_verify(struct drm_i915_private *dev_priv, |
982 | const struct i915_wa_list *wal, | ||
983 | const char *from) | ||
964 | { | 984 | { |
985 | struct i915_wa *wa; | ||
986 | unsigned int i; | ||
987 | bool ok = true; | ||
988 | |||
989 | for (i = 0, wa = wal->list; i < wal->count; i++, wa++) | ||
990 | ok &= wa_verify(wa, I915_READ(wa->reg), wal->name, from); | ||
991 | |||
992 | return ok; | ||
965 | } | 993 | } |
966 | 994 | ||
967 | static void chv_whitelist_build(struct whitelist *w) | 995 | bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv, |
996 | const char *from) | ||
968 | { | 997 | { |
998 | return wa_list_verify(dev_priv, &dev_priv->gt_wa_list, from); | ||
969 | } | 999 | } |
970 | 1000 | ||
971 | static void gen9_whitelist_build(struct whitelist *w) | 1001 | static void |
1002 | whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg) | ||
1003 | { | ||
1004 | struct i915_wa wa = { | ||
1005 | .reg = reg | ||
1006 | }; | ||
1007 | |||
1008 | if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS)) | ||
1009 | return; | ||
1010 | |||
1011 | _wa_add(wal, &wa); | ||
1012 | } | ||
1013 | |||
1014 | static void gen9_whitelist_build(struct i915_wa_list *w) | ||
972 | { | 1015 | { |
973 | /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */ | 1016 | /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */ |
974 | whitelist_reg(w, GEN9_CTX_PREEMPT_REG); | 1017 | whitelist_reg(w, GEN9_CTX_PREEMPT_REG); |
@@ -980,7 +1023,7 @@ static void gen9_whitelist_build(struct whitelist *w) | |||
980 | whitelist_reg(w, GEN8_HDC_CHICKEN1); | 1023 | whitelist_reg(w, GEN8_HDC_CHICKEN1); |
981 | } | 1024 | } |
982 | 1025 | ||
983 | static void skl_whitelist_build(struct whitelist *w) | 1026 | static void skl_whitelist_build(struct i915_wa_list *w) |
984 | { | 1027 | { |
985 | gen9_whitelist_build(w); | 1028 | gen9_whitelist_build(w); |
986 | 1029 | ||
@@ -988,12 +1031,12 @@ static void skl_whitelist_build(struct whitelist *w) | |||
988 | whitelist_reg(w, GEN8_L3SQCREG4); | 1031 | whitelist_reg(w, GEN8_L3SQCREG4); |
989 | } | 1032 | } |
990 | 1033 | ||
991 | static void bxt_whitelist_build(struct whitelist *w) | 1034 | static void bxt_whitelist_build(struct i915_wa_list *w) |
992 | { | 1035 | { |
993 | gen9_whitelist_build(w); | 1036 | gen9_whitelist_build(w); |
994 | } | 1037 | } |
995 | 1038 | ||
996 | static void kbl_whitelist_build(struct whitelist *w) | 1039 | static void kbl_whitelist_build(struct i915_wa_list *w) |
997 | { | 1040 | { |
998 | gen9_whitelist_build(w); | 1041 | gen9_whitelist_build(w); |
999 | 1042 | ||
@@ -1001,7 +1044,7 @@ static void kbl_whitelist_build(struct whitelist *w) | |||
1001 | whitelist_reg(w, GEN8_L3SQCREG4); | 1044 | whitelist_reg(w, GEN8_L3SQCREG4); |
1002 | } | 1045 | } |
1003 | 1046 | ||
1004 | static void glk_whitelist_build(struct whitelist *w) | 1047 | static void glk_whitelist_build(struct i915_wa_list *w) |
1005 | { | 1048 | { |
1006 | gen9_whitelist_build(w); | 1049 | gen9_whitelist_build(w); |
1007 | 1050 | ||
@@ -1009,18 +1052,18 @@ static void glk_whitelist_build(struct whitelist *w) | |||
1009 | whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); | 1052 | whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); |
1010 | } | 1053 | } |
1011 | 1054 | ||
1012 | static void cfl_whitelist_build(struct whitelist *w) | 1055 | static void cfl_whitelist_build(struct i915_wa_list *w) |
1013 | { | 1056 | { |
1014 | gen9_whitelist_build(w); | 1057 | gen9_whitelist_build(w); |
1015 | } | 1058 | } |
1016 | 1059 | ||
1017 | static void cnl_whitelist_build(struct whitelist *w) | 1060 | static void cnl_whitelist_build(struct i915_wa_list *w) |
1018 | { | 1061 | { |
1019 | /* WaEnablePreemptionGranularityControlByUMD:cnl */ | 1062 | /* WaEnablePreemptionGranularityControlByUMD:cnl */ |
1020 | whitelist_reg(w, GEN8_CS_CHICKEN1); | 1063 | whitelist_reg(w, GEN8_CS_CHICKEN1); |
1021 | } | 1064 | } |
1022 | 1065 | ||
1023 | static void icl_whitelist_build(struct whitelist *w) | 1066 | static void icl_whitelist_build(struct i915_wa_list *w) |
1024 | { | 1067 | { |
1025 | /* WaAllowUMDToModifyHalfSliceChicken7:icl */ | 1068 | /* WaAllowUMDToModifyHalfSliceChicken7:icl */ |
1026 | whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7); | 1069 | whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7); |
@@ -1029,22 +1072,21 @@ static void icl_whitelist_build(struct whitelist *w) | |||
1029 | whitelist_reg(w, GEN10_SAMPLER_MODE); | 1072 | whitelist_reg(w, GEN10_SAMPLER_MODE); |
1030 | } | 1073 | } |
1031 | 1074 | ||
1032 | static struct whitelist *whitelist_build(struct intel_engine_cs *engine, | 1075 | void intel_engine_init_whitelist(struct intel_engine_cs *engine) |
1033 | struct whitelist *w) | ||
1034 | { | 1076 | { |
1035 | struct drm_i915_private *i915 = engine->i915; | 1077 | struct drm_i915_private *i915 = engine->i915; |
1078 | struct i915_wa_list *w = &engine->whitelist; | ||
1036 | 1079 | ||
1037 | GEM_BUG_ON(engine->id != RCS); | 1080 | GEM_BUG_ON(engine->id != RCS); |
1038 | 1081 | ||
1039 | w->count = 0; | 1082 | wa_init_start(w, "whitelist"); |
1040 | w->nopid = i915_mmio_reg_offset(RING_NOPID(engine->mmio_base)); | ||
1041 | 1083 | ||
1042 | if (INTEL_GEN(i915) < 8) | 1084 | if (INTEL_GEN(i915) < 8) |
1043 | return NULL; | 1085 | return; |
1044 | else if (IS_BROADWELL(i915)) | 1086 | else if (IS_BROADWELL(i915)) |
1045 | bdw_whitelist_build(w); | 1087 | return; |
1046 | else if (IS_CHERRYVIEW(i915)) | 1088 | else if (IS_CHERRYVIEW(i915)) |
1047 | chv_whitelist_build(w); | 1089 | return; |
1048 | else if (IS_SKYLAKE(i915)) | 1090 | else if (IS_SKYLAKE(i915)) |
1049 | skl_whitelist_build(w); | 1091 | skl_whitelist_build(w); |
1050 | else if (IS_BROXTON(i915)) | 1092 | else if (IS_BROXTON(i915)) |
@@ -1062,39 +1104,180 @@ static struct whitelist *whitelist_build(struct intel_engine_cs *engine, | |||
1062 | else | 1104 | else |
1063 | MISSING_CASE(INTEL_GEN(i915)); | 1105 | MISSING_CASE(INTEL_GEN(i915)); |
1064 | 1106 | ||
1065 | return w; | 1107 | wa_init_finish(w); |
1066 | } | 1108 | } |
1067 | 1109 | ||
1068 | static void whitelist_apply(struct intel_engine_cs *engine, | 1110 | void intel_engine_apply_whitelist(struct intel_engine_cs *engine) |
1069 | const struct whitelist *w) | ||
1070 | { | 1111 | { |
1071 | struct drm_i915_private *dev_priv = engine->i915; | 1112 | struct drm_i915_private *dev_priv = engine->i915; |
1113 | const struct i915_wa_list *wal = &engine->whitelist; | ||
1072 | const u32 base = engine->mmio_base; | 1114 | const u32 base = engine->mmio_base; |
1115 | struct i915_wa *wa; | ||
1073 | unsigned int i; | 1116 | unsigned int i; |
1074 | 1117 | ||
1075 | if (!w) | 1118 | if (!wal->count) |
1076 | return; | 1119 | return; |
1077 | 1120 | ||
1078 | intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL); | 1121 | for (i = 0, wa = wal->list; i < wal->count; i++, wa++) |
1079 | 1122 | I915_WRITE(RING_FORCE_TO_NONPRIV(base, i), | |
1080 | for (i = 0; i < w->count; i++) | 1123 | i915_mmio_reg_offset(wa->reg)); |
1081 | I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i), | ||
1082 | i915_mmio_reg_offset(w->reg[i])); | ||
1083 | 1124 | ||
1084 | /* And clear the rest just in case of garbage */ | 1125 | /* And clear the rest just in case of garbage */ |
1085 | for (; i < RING_MAX_NONPRIV_SLOTS; i++) | 1126 | for (; i < RING_MAX_NONPRIV_SLOTS; i++) |
1086 | I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i), w->nopid); | 1127 | I915_WRITE(RING_FORCE_TO_NONPRIV(base, i), |
1128 | i915_mmio_reg_offset(RING_NOPID(base))); | ||
1087 | 1129 | ||
1088 | intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL); | 1130 | DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name); |
1089 | } | 1131 | } |
1090 | 1132 | ||
1091 | void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine) | 1133 | static void rcs_engine_wa_init(struct intel_engine_cs *engine) |
1092 | { | 1134 | { |
1093 | struct whitelist w; | 1135 | struct drm_i915_private *i915 = engine->i915; |
1136 | struct i915_wa_list *wal = &engine->wa_list; | ||
1137 | |||
1138 | if (IS_ICELAKE(i915)) { | ||
1139 | /* This is not an Wa. Enable for better image quality */ | ||
1140 | wa_masked_en(wal, | ||
1141 | _3D_CHICKEN3, | ||
1142 | _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE); | ||
1143 | |||
1144 | /* WaPipelineFlushCoherentLines:icl */ | ||
1145 | wa_write_or(wal, | ||
1146 | GEN8_L3SQCREG4, | ||
1147 | GEN8_LQSC_FLUSH_COHERENT_LINES); | ||
1148 | |||
1149 | /* | ||
1150 | * Wa_1405543622:icl | ||
1151 | * Formerly known as WaGAPZPriorityScheme | ||
1152 | */ | ||
1153 | wa_write_or(wal, | ||
1154 | GEN8_GARBCNTL, | ||
1155 | GEN11_ARBITRATION_PRIO_ORDER_MASK); | ||
1156 | |||
1157 | /* | ||
1158 | * Wa_1604223664:icl | ||
1159 | * Formerly known as WaL3BankAddressHashing | ||
1160 | */ | ||
1161 | wa_write_masked_or(wal, | ||
1162 | GEN8_GARBCNTL, | ||
1163 | GEN11_HASH_CTRL_EXCL_MASK, | ||
1164 | GEN11_HASH_CTRL_EXCL_BIT0); | ||
1165 | wa_write_masked_or(wal, | ||
1166 | GEN11_GLBLINVL, | ||
1167 | GEN11_BANK_HASH_ADDR_EXCL_MASK, | ||
1168 | GEN11_BANK_HASH_ADDR_EXCL_BIT0); | ||
1094 | 1169 | ||
1095 | whitelist_apply(engine, whitelist_build(engine, &w)); | 1170 | /* |
1171 | * Wa_1405733216:icl | ||
1172 | * Formerly known as WaDisableCleanEvicts | ||
1173 | */ | ||
1174 | wa_write_or(wal, | ||
1175 | GEN8_L3SQCREG4, | ||
1176 | GEN11_LQSC_CLEAN_EVICT_DISABLE); | ||
1177 | |||
1178 | /* WaForwardProgressSoftReset:icl */ | ||
1179 | wa_write_or(wal, | ||
1180 | GEN10_SCRATCH_LNCF2, | ||
1181 | PMFLUSHDONE_LNICRSDROP | | ||
1182 | PMFLUSH_GAPL3UNBLOCK | | ||
1183 | PMFLUSHDONE_LNEBLK); | ||
1184 | |||
1185 | /* Wa_1406609255:icl (pre-prod) */ | ||
1186 | if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) | ||
1187 | wa_write_or(wal, | ||
1188 | GEN7_SARCHKMD, | ||
1189 | GEN7_DISABLE_DEMAND_PREFETCH | | ||
1190 | GEN7_DISABLE_SAMPLER_PREFETCH); | ||
1191 | } | ||
1192 | |||
1193 | if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) { | ||
1194 | /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */ | ||
1195 | wa_masked_en(wal, | ||
1196 | GEN7_FF_SLICE_CS_CHICKEN1, | ||
1197 | GEN9_FFSC_PERCTX_PREEMPT_CTRL); | ||
1198 | } | ||
1199 | |||
1200 | if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) { | ||
1201 | /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */ | ||
1202 | wa_write_or(wal, | ||
1203 | GEN8_GARBCNTL, | ||
1204 | GEN9_GAPS_TSV_CREDIT_DISABLE); | ||
1205 | } | ||
1206 | |||
1207 | if (IS_BROXTON(i915)) { | ||
1208 | /* WaDisablePooledEuLoadBalancingFix:bxt */ | ||
1209 | wa_masked_en(wal, | ||
1210 | FF_SLICE_CS_CHICKEN2, | ||
1211 | GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); | ||
1212 | } | ||
1213 | |||
1214 | if (IS_GEN9(i915)) { | ||
1215 | /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */ | ||
1216 | wa_masked_en(wal, | ||
1217 | GEN9_CSFE_CHICKEN1_RCS, | ||
1218 | GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE); | ||
1219 | |||
1220 | /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */ | ||
1221 | wa_write_or(wal, | ||
1222 | BDW_SCRATCH1, | ||
1223 | GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); | ||
1224 | |||
1225 | /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */ | ||
1226 | if (IS_GEN9_LP(i915)) | ||
1227 | wa_write_masked_or(wal, | ||
1228 | GEN8_L3SQCREG1, | ||
1229 | L3_PRIO_CREDITS_MASK, | ||
1230 | L3_GENERAL_PRIO_CREDITS(62) | | ||
1231 | L3_HIGH_PRIO_CREDITS(2)); | ||
1232 | |||
1233 | /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */ | ||
1234 | wa_write_or(wal, | ||
1235 | GEN8_L3SQCREG4, | ||
1236 | GEN8_LQSC_FLUSH_COHERENT_LINES); | ||
1237 | } | ||
1238 | } | ||
1239 | |||
1240 | static void xcs_engine_wa_init(struct intel_engine_cs *engine) | ||
1241 | { | ||
1242 | struct drm_i915_private *i915 = engine->i915; | ||
1243 | struct i915_wa_list *wal = &engine->wa_list; | ||
1244 | |||
1245 | /* WaKBLVECSSemaphoreWaitPoll:kbl */ | ||
1246 | if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) { | ||
1247 | wa_write(wal, | ||
1248 | RING_SEMA_WAIT_POLL(engine->mmio_base), | ||
1249 | 1); | ||
1250 | } | ||
1251 | } | ||
1252 | |||
1253 | void intel_engine_init_workarounds(struct intel_engine_cs *engine) | ||
1254 | { | ||
1255 | struct i915_wa_list *wal = &engine->wa_list; | ||
1256 | |||
1257 | if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8)) | ||
1258 | return; | ||
1259 | |||
1260 | wa_init_start(wal, engine->name); | ||
1261 | |||
1262 | if (engine->id == RCS) | ||
1263 | rcs_engine_wa_init(engine); | ||
1264 | else | ||
1265 | xcs_engine_wa_init(engine); | ||
1266 | |||
1267 | wa_init_finish(wal); | ||
1268 | } | ||
1269 | |||
1270 | void intel_engine_apply_workarounds(struct intel_engine_cs *engine) | ||
1271 | { | ||
1272 | wa_list_apply(engine->i915, &engine->wa_list); | ||
1096 | } | 1273 | } |
1097 | 1274 | ||
1098 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | 1275 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
1276 | static bool intel_engine_verify_workarounds(struct intel_engine_cs *engine, | ||
1277 | const char *from) | ||
1278 | { | ||
1279 | return wa_list_verify(engine->i915, &engine->wa_list, from); | ||
1280 | } | ||
1281 | |||
1099 | #include "selftests/intel_workarounds.c" | 1282 | #include "selftests/intel_workarounds.c" |
1100 | #endif | 1283 | #endif |
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h index b11d0623e626..7c734714b05e 100644 --- a/drivers/gpu/drm/i915/intel_workarounds.h +++ b/drivers/gpu/drm/i915/intel_workarounds.h | |||
@@ -7,11 +7,39 @@ | |||
7 | #ifndef _I915_WORKAROUNDS_H_ | 7 | #ifndef _I915_WORKAROUNDS_H_ |
8 | #define _I915_WORKAROUNDS_H_ | 8 | #define _I915_WORKAROUNDS_H_ |
9 | 9 | ||
10 | int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv); | 10 | #include <linux/slab.h> |
11 | int intel_ctx_workarounds_emit(struct i915_request *rq); | ||
12 | 11 | ||
13 | void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv); | 12 | struct i915_wa { |
13 | i915_reg_t reg; | ||
14 | u32 mask; | ||
15 | u32 val; | ||
16 | }; | ||
14 | 17 | ||
15 | void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine); | 18 | struct i915_wa_list { |
19 | const char *name; | ||
20 | struct i915_wa *list; | ||
21 | unsigned int count; | ||
22 | unsigned int wa_count; | ||
23 | }; | ||
24 | |||
25 | static inline void intel_wa_list_free(struct i915_wa_list *wal) | ||
26 | { | ||
27 | kfree(wal->list); | ||
28 | memset(wal, 0, sizeof(*wal)); | ||
29 | } | ||
30 | |||
31 | void intel_engine_init_ctx_wa(struct intel_engine_cs *engine); | ||
32 | int intel_engine_emit_ctx_wa(struct i915_request *rq); | ||
33 | |||
34 | void intel_gt_init_workarounds(struct drm_i915_private *dev_priv); | ||
35 | void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv); | ||
36 | bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv, | ||
37 | const char *from); | ||
38 | |||
39 | void intel_engine_init_whitelist(struct intel_engine_cs *engine); | ||
40 | void intel_engine_apply_whitelist(struct intel_engine_cs *engine); | ||
41 | |||
42 | void intel_engine_init_workarounds(struct intel_engine_cs *engine); | ||
43 | void intel_engine_apply_workarounds(struct intel_engine_cs *engine); | ||
16 | 44 | ||
17 | #endif | 45 | #endif |
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c new file mode 100644 index 000000000000..208a966da8ca --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_reset.c | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | * SPDX-License-Identifier: MIT | ||
3 | * | ||
4 | * Copyright © 2018 Intel Corporation | ||
5 | */ | ||
6 | |||
7 | #include "igt_reset.h" | ||
8 | |||
9 | #include "../i915_drv.h" | ||
10 | #include "../intel_ringbuffer.h" | ||
11 | |||
12 | void igt_global_reset_lock(struct drm_i915_private *i915) | ||
13 | { | ||
14 | struct intel_engine_cs *engine; | ||
15 | enum intel_engine_id id; | ||
16 | |||
17 | pr_debug("%s: current gpu_error=%08lx\n", | ||
18 | __func__, i915->gpu_error.flags); | ||
19 | |||
20 | while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) | ||
21 | wait_event(i915->gpu_error.reset_queue, | ||
22 | !test_bit(I915_RESET_BACKOFF, | ||
23 | &i915->gpu_error.flags)); | ||
24 | |||
25 | for_each_engine(engine, i915, id) { | ||
26 | while (test_and_set_bit(I915_RESET_ENGINE + id, | ||
27 | &i915->gpu_error.flags)) | ||
28 | wait_on_bit(&i915->gpu_error.flags, | ||
29 | I915_RESET_ENGINE + id, | ||
30 | TASK_UNINTERRUPTIBLE); | ||
31 | } | ||
32 | } | ||
33 | |||
34 | void igt_global_reset_unlock(struct drm_i915_private *i915) | ||
35 | { | ||
36 | struct intel_engine_cs *engine; | ||
37 | enum intel_engine_id id; | ||
38 | |||
39 | for_each_engine(engine, i915, id) | ||
40 | clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); | ||
41 | |||
42 | clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); | ||
43 | wake_up_all(&i915->gpu_error.reset_queue); | ||
44 | } | ||
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h new file mode 100644 index 000000000000..5f0234d045d5 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_reset.h | |||
@@ -0,0 +1,15 @@ | |||
1 | /* | ||
2 | * SPDX-License-Identifier: MIT | ||
3 | * | ||
4 | * Copyright © 2018 Intel Corporation | ||
5 | */ | ||
6 | |||
7 | #ifndef __I915_SELFTESTS_IGT_RESET_H__ | ||
8 | #define __I915_SELFTESTS_IGT_RESET_H__ | ||
9 | |||
10 | #include "../i915_drv.h" | ||
11 | |||
12 | void igt_global_reset_lock(struct drm_i915_private *i915); | ||
13 | void igt_global_reset_unlock(struct drm_i915_private *i915); | ||
14 | |||
15 | #endif | ||
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c new file mode 100644 index 000000000000..8cd34f6e6859 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c | |||
@@ -0,0 +1,199 @@ | |||
1 | /* | ||
2 | * SPDX-License-Identifier: MIT | ||
3 | * | ||
4 | * Copyright © 2018 Intel Corporation | ||
5 | */ | ||
6 | |||
7 | #include "igt_spinner.h" | ||
8 | |||
9 | int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915) | ||
10 | { | ||
11 | unsigned int mode; | ||
12 | void *vaddr; | ||
13 | int err; | ||
14 | |||
15 | GEM_BUG_ON(INTEL_GEN(i915) < 8); | ||
16 | |||
17 | memset(spin, 0, sizeof(*spin)); | ||
18 | spin->i915 = i915; | ||
19 | |||
20 | spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); | ||
21 | if (IS_ERR(spin->hws)) { | ||
22 | err = PTR_ERR(spin->hws); | ||
23 | goto err; | ||
24 | } | ||
25 | |||
26 | spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE); | ||
27 | if (IS_ERR(spin->obj)) { | ||
28 | err = PTR_ERR(spin->obj); | ||
29 | goto err_hws; | ||
30 | } | ||
31 | |||
32 | i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC); | ||
33 | vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); | ||
34 | if (IS_ERR(vaddr)) { | ||
35 | err = PTR_ERR(vaddr); | ||
36 | goto err_obj; | ||
37 | } | ||
38 | spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); | ||
39 | |||
40 | mode = i915_coherent_map_type(i915); | ||
41 | vaddr = i915_gem_object_pin_map(spin->obj, mode); | ||
42 | if (IS_ERR(vaddr)) { | ||
43 | err = PTR_ERR(vaddr); | ||
44 | goto err_unpin_hws; | ||
45 | } | ||
46 | spin->batch = vaddr; | ||
47 | |||
48 | return 0; | ||
49 | |||
50 | err_unpin_hws: | ||
51 | i915_gem_object_unpin_map(spin->hws); | ||
52 | err_obj: | ||
53 | i915_gem_object_put(spin->obj); | ||
54 | err_hws: | ||
55 | i915_gem_object_put(spin->hws); | ||
56 | err: | ||
57 | return err; | ||
58 | } | ||
59 | |||
60 | static unsigned int seqno_offset(u64 fence) | ||
61 | { | ||
62 | return offset_in_page(sizeof(u32) * fence); | ||
63 | } | ||
64 | |||
65 | static u64 hws_address(const struct i915_vma *hws, | ||
66 | const struct i915_request *rq) | ||
67 | { | ||
68 | return hws->node.start + seqno_offset(rq->fence.context); | ||
69 | } | ||
70 | |||
71 | static int emit_recurse_batch(struct igt_spinner *spin, | ||
72 | struct i915_request *rq, | ||
73 | u32 arbitration_command) | ||
74 | { | ||
75 | struct i915_address_space *vm = &rq->gem_context->ppgtt->vm; | ||
76 | struct i915_vma *hws, *vma; | ||
77 | u32 *batch; | ||
78 | int err; | ||
79 | |||
80 | vma = i915_vma_instance(spin->obj, vm, NULL); | ||
81 | if (IS_ERR(vma)) | ||
82 | return PTR_ERR(vma); | ||
83 | |||
84 | hws = i915_vma_instance(spin->hws, vm, NULL); | ||
85 | if (IS_ERR(hws)) | ||
86 | return PTR_ERR(hws); | ||
87 | |||
88 | err = i915_vma_pin(vma, 0, 0, PIN_USER); | ||
89 | if (err) | ||
90 | return err; | ||
91 | |||
92 | err = i915_vma_pin(hws, 0, 0, PIN_USER); | ||
93 | if (err) | ||
94 | goto unpin_vma; | ||
95 | |||
96 | err = i915_vma_move_to_active(vma, rq, 0); | ||
97 | if (err) | ||
98 | goto unpin_hws; | ||
99 | |||
100 | if (!i915_gem_object_has_active_reference(vma->obj)) { | ||
101 | i915_gem_object_get(vma->obj); | ||
102 | i915_gem_object_set_active_reference(vma->obj); | ||
103 | } | ||
104 | |||
105 | err = i915_vma_move_to_active(hws, rq, 0); | ||
106 | if (err) | ||
107 | goto unpin_hws; | ||
108 | |||
109 | if (!i915_gem_object_has_active_reference(hws->obj)) { | ||
110 | i915_gem_object_get(hws->obj); | ||
111 | i915_gem_object_set_active_reference(hws->obj); | ||
112 | } | ||
113 | |||
114 | batch = spin->batch; | ||
115 | |||
116 | *batch++ = MI_STORE_DWORD_IMM_GEN4; | ||
117 | *batch++ = lower_32_bits(hws_address(hws, rq)); | ||
118 | *batch++ = upper_32_bits(hws_address(hws, rq)); | ||
119 | *batch++ = rq->fence.seqno; | ||
120 | |||
121 | *batch++ = arbitration_command; | ||
122 | |||
123 | *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; | ||
124 | *batch++ = lower_32_bits(vma->node.start); | ||
125 | *batch++ = upper_32_bits(vma->node.start); | ||
126 | *batch++ = MI_BATCH_BUFFER_END; /* not reached */ | ||
127 | |||
128 | i915_gem_chipset_flush(spin->i915); | ||
129 | |||
130 | err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); | ||
131 | |||
132 | unpin_hws: | ||
133 | i915_vma_unpin(hws); | ||
134 | unpin_vma: | ||
135 | i915_vma_unpin(vma); | ||
136 | return err; | ||
137 | } | ||
138 | |||
139 | struct i915_request * | ||
140 | igt_spinner_create_request(struct igt_spinner *spin, | ||
141 | struct i915_gem_context *ctx, | ||
142 | struct intel_engine_cs *engine, | ||
143 | u32 arbitration_command) | ||
144 | { | ||
145 | struct i915_request *rq; | ||
146 | int err; | ||
147 | |||
148 | rq = i915_request_alloc(engine, ctx); | ||
149 | if (IS_ERR(rq)) | ||
150 | return rq; | ||
151 | |||
152 | err = emit_recurse_batch(spin, rq, arbitration_command); | ||
153 | if (err) { | ||
154 | i915_request_add(rq); | ||
155 | return ERR_PTR(err); | ||
156 | } | ||
157 | |||
158 | return rq; | ||
159 | } | ||
160 | |||
161 | static u32 | ||
162 | hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq) | ||
163 | { | ||
164 | u32 *seqno = spin->seqno + seqno_offset(rq->fence.context); | ||
165 | |||
166 | return READ_ONCE(*seqno); | ||
167 | } | ||
168 | |||
169 | void igt_spinner_end(struct igt_spinner *spin) | ||
170 | { | ||
171 | *spin->batch = MI_BATCH_BUFFER_END; | ||
172 | i915_gem_chipset_flush(spin->i915); | ||
173 | } | ||
174 | |||
175 | void igt_spinner_fini(struct igt_spinner *spin) | ||
176 | { | ||
177 | igt_spinner_end(spin); | ||
178 | |||
179 | i915_gem_object_unpin_map(spin->obj); | ||
180 | i915_gem_object_put(spin->obj); | ||
181 | |||
182 | i915_gem_object_unpin_map(spin->hws); | ||
183 | i915_gem_object_put(spin->hws); | ||
184 | } | ||
185 | |||
186 | bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq) | ||
187 | { | ||
188 | if (!wait_event_timeout(rq->execute, | ||
189 | READ_ONCE(rq->global_seqno), | ||
190 | msecs_to_jiffies(10))) | ||
191 | return false; | ||
192 | |||
193 | return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq), | ||
194 | rq->fence.seqno), | ||
195 | 10) && | ||
196 | wait_for(i915_seqno_passed(hws_seqno(spin, rq), | ||
197 | rq->fence.seqno), | ||
198 | 1000)); | ||
199 | } | ||
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h new file mode 100644 index 000000000000..391777c76dc7 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * SPDX-License-Identifier: MIT | ||
3 | * | ||
4 | * Copyright © 2018 Intel Corporation | ||
5 | */ | ||
6 | |||
7 | #ifndef __I915_SELFTESTS_IGT_SPINNER_H__ | ||
8 | #define __I915_SELFTESTS_IGT_SPINNER_H__ | ||
9 | |||
10 | #include "../i915_selftest.h" | ||
11 | |||
12 | #include "../i915_drv.h" | ||
13 | #include "../i915_request.h" | ||
14 | #include "../intel_ringbuffer.h" | ||
15 | #include "../i915_gem_context.h" | ||
16 | |||
17 | struct igt_spinner { | ||
18 | struct drm_i915_private *i915; | ||
19 | struct drm_i915_gem_object *hws; | ||
20 | struct drm_i915_gem_object *obj; | ||
21 | u32 *batch; | ||
22 | void *seqno; | ||
23 | }; | ||
24 | |||
25 | int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915); | ||
26 | void igt_spinner_fini(struct igt_spinner *spin); | ||
27 | |||
28 | struct i915_request * | ||
29 | igt_spinner_create_request(struct igt_spinner *spin, | ||
30 | struct i915_gem_context *ctx, | ||
31 | struct intel_engine_cs *engine, | ||
32 | u32 arbitration_command); | ||
33 | void igt_spinner_end(struct igt_spinner *spin); | ||
34 | |||
35 | bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq); | ||
36 | |||
37 | #endif | ||
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index defe671130ab..40efbed611de 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include "../i915_selftest.h" | 27 | #include "../i915_selftest.h" |
28 | #include "i915_random.h" | 28 | #include "i915_random.h" |
29 | #include "igt_flush_test.h" | 29 | #include "igt_flush_test.h" |
30 | #include "igt_reset.h" | ||
30 | #include "igt_wedge_me.h" | 31 | #include "igt_wedge_me.h" |
31 | 32 | ||
32 | #include "mock_context.h" | 33 | #include "mock_context.h" |
@@ -308,6 +309,7 @@ static int igt_hang_sanitycheck(void *arg) | |||
308 | goto unlock; | 309 | goto unlock; |
309 | 310 | ||
310 | for_each_engine(engine, i915, id) { | 311 | for_each_engine(engine, i915, id) { |
312 | struct igt_wedge_me w; | ||
311 | long timeout; | 313 | long timeout; |
312 | 314 | ||
313 | if (!intel_engine_can_store_dword(engine)) | 315 | if (!intel_engine_can_store_dword(engine)) |
@@ -328,9 +330,14 @@ static int igt_hang_sanitycheck(void *arg) | |||
328 | 330 | ||
329 | i915_request_add(rq); | 331 | i915_request_add(rq); |
330 | 332 | ||
331 | timeout = i915_request_wait(rq, | 333 | timeout = 0; |
332 | I915_WAIT_LOCKED, | 334 | igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/) |
333 | MAX_SCHEDULE_TIMEOUT); | 335 | timeout = i915_request_wait(rq, |
336 | I915_WAIT_LOCKED, | ||
337 | MAX_SCHEDULE_TIMEOUT); | ||
338 | if (i915_terminally_wedged(&i915->gpu_error)) | ||
339 | timeout = -EIO; | ||
340 | |||
334 | i915_request_put(rq); | 341 | i915_request_put(rq); |
335 | 342 | ||
336 | if (timeout < 0) { | 343 | if (timeout < 0) { |
@@ -348,40 +355,6 @@ unlock: | |||
348 | return err; | 355 | return err; |
349 | } | 356 | } |
350 | 357 | ||
351 | static void global_reset_lock(struct drm_i915_private *i915) | ||
352 | { | ||
353 | struct intel_engine_cs *engine; | ||
354 | enum intel_engine_id id; | ||
355 | |||
356 | pr_debug("%s: current gpu_error=%08lx\n", | ||
357 | __func__, i915->gpu_error.flags); | ||
358 | |||
359 | while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) | ||
360 | wait_event(i915->gpu_error.reset_queue, | ||
361 | !test_bit(I915_RESET_BACKOFF, | ||
362 | &i915->gpu_error.flags)); | ||
363 | |||
364 | for_each_engine(engine, i915, id) { | ||
365 | while (test_and_set_bit(I915_RESET_ENGINE + id, | ||
366 | &i915->gpu_error.flags)) | ||
367 | wait_on_bit(&i915->gpu_error.flags, | ||
368 | I915_RESET_ENGINE + id, | ||
369 | TASK_UNINTERRUPTIBLE); | ||
370 | } | ||
371 | } | ||
372 | |||
373 | static void global_reset_unlock(struct drm_i915_private *i915) | ||
374 | { | ||
375 | struct intel_engine_cs *engine; | ||
376 | enum intel_engine_id id; | ||
377 | |||
378 | for_each_engine(engine, i915, id) | ||
379 | clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); | ||
380 | |||
381 | clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); | ||
382 | wake_up_all(&i915->gpu_error.reset_queue); | ||
383 | } | ||
384 | |||
385 | static int igt_global_reset(void *arg) | 358 | static int igt_global_reset(void *arg) |
386 | { | 359 | { |
387 | struct drm_i915_private *i915 = arg; | 360 | struct drm_i915_private *i915 = arg; |
@@ -390,7 +363,7 @@ static int igt_global_reset(void *arg) | |||
390 | 363 | ||
391 | /* Check that we can issue a global GPU reset */ | 364 | /* Check that we can issue a global GPU reset */ |
392 | 365 | ||
393 | global_reset_lock(i915); | 366 | igt_global_reset_lock(i915); |
394 | set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags); | 367 | set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags); |
395 | 368 | ||
396 | mutex_lock(&i915->drm.struct_mutex); | 369 | mutex_lock(&i915->drm.struct_mutex); |
@@ -405,7 +378,7 @@ static int igt_global_reset(void *arg) | |||
405 | mutex_unlock(&i915->drm.struct_mutex); | 378 | mutex_unlock(&i915->drm.struct_mutex); |
406 | 379 | ||
407 | GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags)); | 380 | GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags)); |
408 | global_reset_unlock(i915); | 381 | igt_global_reset_unlock(i915); |
409 | 382 | ||
410 | if (i915_terminally_wedged(&i915->gpu_error)) | 383 | if (i915_terminally_wedged(&i915->gpu_error)) |
411 | err = -EIO; | 384 | err = -EIO; |
@@ -936,7 +909,7 @@ static int igt_reset_wait(void *arg) | |||
936 | 909 | ||
937 | /* Check that we detect a stuck waiter and issue a reset */ | 910 | /* Check that we detect a stuck waiter and issue a reset */ |
938 | 911 | ||
939 | global_reset_lock(i915); | 912 | igt_global_reset_lock(i915); |
940 | 913 | ||
941 | mutex_lock(&i915->drm.struct_mutex); | 914 | mutex_lock(&i915->drm.struct_mutex); |
942 | err = hang_init(&h, i915); | 915 | err = hang_init(&h, i915); |
@@ -988,7 +961,7 @@ fini: | |||
988 | hang_fini(&h); | 961 | hang_fini(&h); |
989 | unlock: | 962 | unlock: |
990 | mutex_unlock(&i915->drm.struct_mutex); | 963 | mutex_unlock(&i915->drm.struct_mutex); |
991 | global_reset_unlock(i915); | 964 | igt_global_reset_unlock(i915); |
992 | 965 | ||
993 | if (i915_terminally_wedged(&i915->gpu_error)) | 966 | if (i915_terminally_wedged(&i915->gpu_error)) |
994 | return -EIO; | 967 | return -EIO; |
@@ -1066,7 +1039,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, | |||
1066 | 1039 | ||
1067 | /* Check that we can recover an unbind stuck on a hanging request */ | 1040 | /* Check that we can recover an unbind stuck on a hanging request */ |
1068 | 1041 | ||
1069 | global_reset_lock(i915); | 1042 | igt_global_reset_lock(i915); |
1070 | 1043 | ||
1071 | mutex_lock(&i915->drm.struct_mutex); | 1044 | mutex_lock(&i915->drm.struct_mutex); |
1072 | err = hang_init(&h, i915); | 1045 | err = hang_init(&h, i915); |
@@ -1186,7 +1159,7 @@ fini: | |||
1186 | hang_fini(&h); | 1159 | hang_fini(&h); |
1187 | unlock: | 1160 | unlock: |
1188 | mutex_unlock(&i915->drm.struct_mutex); | 1161 | mutex_unlock(&i915->drm.struct_mutex); |
1189 | global_reset_unlock(i915); | 1162 | igt_global_reset_unlock(i915); |
1190 | 1163 | ||
1191 | if (i915_terminally_wedged(&i915->gpu_error)) | 1164 | if (i915_terminally_wedged(&i915->gpu_error)) |
1192 | return -EIO; | 1165 | return -EIO; |
@@ -1266,7 +1239,7 @@ static int igt_reset_queue(void *arg) | |||
1266 | 1239 | ||
1267 | /* Check that we replay pending requests following a hang */ | 1240 | /* Check that we replay pending requests following a hang */ |
1268 | 1241 | ||
1269 | global_reset_lock(i915); | 1242 | igt_global_reset_lock(i915); |
1270 | 1243 | ||
1271 | mutex_lock(&i915->drm.struct_mutex); | 1244 | mutex_lock(&i915->drm.struct_mutex); |
1272 | err = hang_init(&h, i915); | 1245 | err = hang_init(&h, i915); |
@@ -1397,7 +1370,7 @@ fini: | |||
1397 | hang_fini(&h); | 1370 | hang_fini(&h); |
1398 | unlock: | 1371 | unlock: |
1399 | mutex_unlock(&i915->drm.struct_mutex); | 1372 | mutex_unlock(&i915->drm.struct_mutex); |
1400 | global_reset_unlock(i915); | 1373 | igt_global_reset_unlock(i915); |
1401 | 1374 | ||
1402 | if (i915_terminally_wedged(&i915->gpu_error)) | 1375 | if (i915_terminally_wedged(&i915->gpu_error)) |
1403 | return -EIO; | 1376 | return -EIO; |
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index 94fc0e5c8766..ca461e3a5f27 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c | |||
@@ -6,216 +6,18 @@ | |||
6 | 6 | ||
7 | #include "../i915_selftest.h" | 7 | #include "../i915_selftest.h" |
8 | #include "igt_flush_test.h" | 8 | #include "igt_flush_test.h" |
9 | #include "igt_spinner.h" | ||
9 | #include "i915_random.h" | 10 | #include "i915_random.h" |
10 | 11 | ||
11 | #include "mock_context.h" | 12 | #include "mock_context.h" |
12 | 13 | ||
13 | struct spinner { | ||
14 | struct drm_i915_private *i915; | ||
15 | struct drm_i915_gem_object *hws; | ||
16 | struct drm_i915_gem_object *obj; | ||
17 | u32 *batch; | ||
18 | void *seqno; | ||
19 | }; | ||
20 | |||
21 | static int spinner_init(struct spinner *spin, struct drm_i915_private *i915) | ||
22 | { | ||
23 | unsigned int mode; | ||
24 | void *vaddr; | ||
25 | int err; | ||
26 | |||
27 | GEM_BUG_ON(INTEL_GEN(i915) < 8); | ||
28 | |||
29 | memset(spin, 0, sizeof(*spin)); | ||
30 | spin->i915 = i915; | ||
31 | |||
32 | spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); | ||
33 | if (IS_ERR(spin->hws)) { | ||
34 | err = PTR_ERR(spin->hws); | ||
35 | goto err; | ||
36 | } | ||
37 | |||
38 | spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE); | ||
39 | if (IS_ERR(spin->obj)) { | ||
40 | err = PTR_ERR(spin->obj); | ||
41 | goto err_hws; | ||
42 | } | ||
43 | |||
44 | i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC); | ||
45 | vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); | ||
46 | if (IS_ERR(vaddr)) { | ||
47 | err = PTR_ERR(vaddr); | ||
48 | goto err_obj; | ||
49 | } | ||
50 | spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); | ||
51 | |||
52 | mode = i915_coherent_map_type(i915); | ||
53 | vaddr = i915_gem_object_pin_map(spin->obj, mode); | ||
54 | if (IS_ERR(vaddr)) { | ||
55 | err = PTR_ERR(vaddr); | ||
56 | goto err_unpin_hws; | ||
57 | } | ||
58 | spin->batch = vaddr; | ||
59 | |||
60 | return 0; | ||
61 | |||
62 | err_unpin_hws: | ||
63 | i915_gem_object_unpin_map(spin->hws); | ||
64 | err_obj: | ||
65 | i915_gem_object_put(spin->obj); | ||
66 | err_hws: | ||
67 | i915_gem_object_put(spin->hws); | ||
68 | err: | ||
69 | return err; | ||
70 | } | ||
71 | |||
72 | static unsigned int seqno_offset(u64 fence) | ||
73 | { | ||
74 | return offset_in_page(sizeof(u32) * fence); | ||
75 | } | ||
76 | |||
77 | static u64 hws_address(const struct i915_vma *hws, | ||
78 | const struct i915_request *rq) | ||
79 | { | ||
80 | return hws->node.start + seqno_offset(rq->fence.context); | ||
81 | } | ||
82 | |||
83 | static int emit_recurse_batch(struct spinner *spin, | ||
84 | struct i915_request *rq, | ||
85 | u32 arbitration_command) | ||
86 | { | ||
87 | struct i915_address_space *vm = &rq->gem_context->ppgtt->vm; | ||
88 | struct i915_vma *hws, *vma; | ||
89 | u32 *batch; | ||
90 | int err; | ||
91 | |||
92 | vma = i915_vma_instance(spin->obj, vm, NULL); | ||
93 | if (IS_ERR(vma)) | ||
94 | return PTR_ERR(vma); | ||
95 | |||
96 | hws = i915_vma_instance(spin->hws, vm, NULL); | ||
97 | if (IS_ERR(hws)) | ||
98 | return PTR_ERR(hws); | ||
99 | |||
100 | err = i915_vma_pin(vma, 0, 0, PIN_USER); | ||
101 | if (err) | ||
102 | return err; | ||
103 | |||
104 | err = i915_vma_pin(hws, 0, 0, PIN_USER); | ||
105 | if (err) | ||
106 | goto unpin_vma; | ||
107 | |||
108 | err = i915_vma_move_to_active(vma, rq, 0); | ||
109 | if (err) | ||
110 | goto unpin_hws; | ||
111 | |||
112 | if (!i915_gem_object_has_active_reference(vma->obj)) { | ||
113 | i915_gem_object_get(vma->obj); | ||
114 | i915_gem_object_set_active_reference(vma->obj); | ||
115 | } | ||
116 | |||
117 | err = i915_vma_move_to_active(hws, rq, 0); | ||
118 | if (err) | ||
119 | goto unpin_hws; | ||
120 | |||
121 | if (!i915_gem_object_has_active_reference(hws->obj)) { | ||
122 | i915_gem_object_get(hws->obj); | ||
123 | i915_gem_object_set_active_reference(hws->obj); | ||
124 | } | ||
125 | |||
126 | batch = spin->batch; | ||
127 | |||
128 | *batch++ = MI_STORE_DWORD_IMM_GEN4; | ||
129 | *batch++ = lower_32_bits(hws_address(hws, rq)); | ||
130 | *batch++ = upper_32_bits(hws_address(hws, rq)); | ||
131 | *batch++ = rq->fence.seqno; | ||
132 | |||
133 | *batch++ = arbitration_command; | ||
134 | |||
135 | *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; | ||
136 | *batch++ = lower_32_bits(vma->node.start); | ||
137 | *batch++ = upper_32_bits(vma->node.start); | ||
138 | *batch++ = MI_BATCH_BUFFER_END; /* not reached */ | ||
139 | |||
140 | i915_gem_chipset_flush(spin->i915); | ||
141 | |||
142 | err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); | ||
143 | |||
144 | unpin_hws: | ||
145 | i915_vma_unpin(hws); | ||
146 | unpin_vma: | ||
147 | i915_vma_unpin(vma); | ||
148 | return err; | ||
149 | } | ||
150 | |||
151 | static struct i915_request * | ||
152 | spinner_create_request(struct spinner *spin, | ||
153 | struct i915_gem_context *ctx, | ||
154 | struct intel_engine_cs *engine, | ||
155 | u32 arbitration_command) | ||
156 | { | ||
157 | struct i915_request *rq; | ||
158 | int err; | ||
159 | |||
160 | rq = i915_request_alloc(engine, ctx); | ||
161 | if (IS_ERR(rq)) | ||
162 | return rq; | ||
163 | |||
164 | err = emit_recurse_batch(spin, rq, arbitration_command); | ||
165 | if (err) { | ||
166 | i915_request_add(rq); | ||
167 | return ERR_PTR(err); | ||
168 | } | ||
169 | |||
170 | return rq; | ||
171 | } | ||
172 | |||
173 | static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq) | ||
174 | { | ||
175 | u32 *seqno = spin->seqno + seqno_offset(rq->fence.context); | ||
176 | |||
177 | return READ_ONCE(*seqno); | ||
178 | } | ||
179 | |||
180 | static void spinner_end(struct spinner *spin) | ||
181 | { | ||
182 | *spin->batch = MI_BATCH_BUFFER_END; | ||
183 | i915_gem_chipset_flush(spin->i915); | ||
184 | } | ||
185 | |||
186 | static void spinner_fini(struct spinner *spin) | ||
187 | { | ||
188 | spinner_end(spin); | ||
189 | |||
190 | i915_gem_object_unpin_map(spin->obj); | ||
191 | i915_gem_object_put(spin->obj); | ||
192 | |||
193 | i915_gem_object_unpin_map(spin->hws); | ||
194 | i915_gem_object_put(spin->hws); | ||
195 | } | ||
196 | |||
197 | static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq) | ||
198 | { | ||
199 | if (!wait_event_timeout(rq->execute, | ||
200 | READ_ONCE(rq->global_seqno), | ||
201 | msecs_to_jiffies(10))) | ||
202 | return false; | ||
203 | |||
204 | return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq), | ||
205 | rq->fence.seqno), | ||
206 | 10) && | ||
207 | wait_for(i915_seqno_passed(hws_seqno(spin, rq), | ||
208 | rq->fence.seqno), | ||
209 | 1000)); | ||
210 | } | ||
211 | |||
212 | static int live_sanitycheck(void *arg) | 14 | static int live_sanitycheck(void *arg) |
213 | { | 15 | { |
214 | struct drm_i915_private *i915 = arg; | 16 | struct drm_i915_private *i915 = arg; |
215 | struct intel_engine_cs *engine; | 17 | struct intel_engine_cs *engine; |
216 | struct i915_gem_context *ctx; | 18 | struct i915_gem_context *ctx; |
217 | enum intel_engine_id id; | 19 | enum intel_engine_id id; |
218 | struct spinner spin; | 20 | struct igt_spinner spin; |
219 | int err = -ENOMEM; | 21 | int err = -ENOMEM; |
220 | 22 | ||
221 | if (!HAS_LOGICAL_RING_CONTEXTS(i915)) | 23 | if (!HAS_LOGICAL_RING_CONTEXTS(i915)) |
@@ -224,7 +26,7 @@ static int live_sanitycheck(void *arg) | |||
224 | mutex_lock(&i915->drm.struct_mutex); | 26 | mutex_lock(&i915->drm.struct_mutex); |
225 | intel_runtime_pm_get(i915); | 27 | intel_runtime_pm_get(i915); |
226 | 28 | ||
227 | if (spinner_init(&spin, i915)) | 29 | if (igt_spinner_init(&spin, i915)) |
228 | goto err_unlock; | 30 | goto err_unlock; |
229 | 31 | ||
230 | ctx = kernel_context(i915); | 32 | ctx = kernel_context(i915); |
@@ -234,14 +36,14 @@ static int live_sanitycheck(void *arg) | |||
234 | for_each_engine(engine, i915, id) { | 36 | for_each_engine(engine, i915, id) { |
235 | struct i915_request *rq; | 37 | struct i915_request *rq; |
236 | 38 | ||
237 | rq = spinner_create_request(&spin, ctx, engine, MI_NOOP); | 39 | rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); |
238 | if (IS_ERR(rq)) { | 40 | if (IS_ERR(rq)) { |
239 | err = PTR_ERR(rq); | 41 | err = PTR_ERR(rq); |
240 | goto err_ctx; | 42 | goto err_ctx; |
241 | } | 43 | } |
242 | 44 | ||
243 | i915_request_add(rq); | 45 | i915_request_add(rq); |
244 | if (!wait_for_spinner(&spin, rq)) { | 46 | if (!igt_wait_for_spinner(&spin, rq)) { |
245 | GEM_TRACE("spinner failed to start\n"); | 47 | GEM_TRACE("spinner failed to start\n"); |
246 | GEM_TRACE_DUMP(); | 48 | GEM_TRACE_DUMP(); |
247 | i915_gem_set_wedged(i915); | 49 | i915_gem_set_wedged(i915); |
@@ -249,7 +51,7 @@ static int live_sanitycheck(void *arg) | |||
249 | goto err_ctx; | 51 | goto err_ctx; |
250 | } | 52 | } |
251 | 53 | ||
252 | spinner_end(&spin); | 54 | igt_spinner_end(&spin); |
253 | if (igt_flush_test(i915, I915_WAIT_LOCKED)) { | 55 | if (igt_flush_test(i915, I915_WAIT_LOCKED)) { |
254 | err = -EIO; | 56 | err = -EIO; |
255 | goto err_ctx; | 57 | goto err_ctx; |
@@ -260,7 +62,7 @@ static int live_sanitycheck(void *arg) | |||
260 | err_ctx: | 62 | err_ctx: |
261 | kernel_context_close(ctx); | 63 | kernel_context_close(ctx); |
262 | err_spin: | 64 | err_spin: |
263 | spinner_fini(&spin); | 65 | igt_spinner_fini(&spin); |
264 | err_unlock: | 66 | err_unlock: |
265 | igt_flush_test(i915, I915_WAIT_LOCKED); | 67 | igt_flush_test(i915, I915_WAIT_LOCKED); |
266 | intel_runtime_pm_put(i915); | 68 | intel_runtime_pm_put(i915); |
@@ -272,7 +74,7 @@ static int live_preempt(void *arg) | |||
272 | { | 74 | { |
273 | struct drm_i915_private *i915 = arg; | 75 | struct drm_i915_private *i915 = arg; |
274 | struct i915_gem_context *ctx_hi, *ctx_lo; | 76 | struct i915_gem_context *ctx_hi, *ctx_lo; |
275 | struct spinner spin_hi, spin_lo; | 77 | struct igt_spinner spin_hi, spin_lo; |
276 | struct intel_engine_cs *engine; | 78 | struct intel_engine_cs *engine; |
277 | enum intel_engine_id id; | 79 | enum intel_engine_id id; |
278 | int err = -ENOMEM; | 80 | int err = -ENOMEM; |
@@ -283,10 +85,10 @@ static int live_preempt(void *arg) | |||
283 | mutex_lock(&i915->drm.struct_mutex); | 85 | mutex_lock(&i915->drm.struct_mutex); |
284 | intel_runtime_pm_get(i915); | 86 | intel_runtime_pm_get(i915); |
285 | 87 | ||
286 | if (spinner_init(&spin_hi, i915)) | 88 | if (igt_spinner_init(&spin_hi, i915)) |
287 | goto err_unlock; | 89 | goto err_unlock; |
288 | 90 | ||
289 | if (spinner_init(&spin_lo, i915)) | 91 | if (igt_spinner_init(&spin_lo, i915)) |
290 | goto err_spin_hi; | 92 | goto err_spin_hi; |
291 | 93 | ||
292 | ctx_hi = kernel_context(i915); | 94 | ctx_hi = kernel_context(i915); |
@@ -304,15 +106,15 @@ static int live_preempt(void *arg) | |||
304 | for_each_engine(engine, i915, id) { | 106 | for_each_engine(engine, i915, id) { |
305 | struct i915_request *rq; | 107 | struct i915_request *rq; |
306 | 108 | ||
307 | rq = spinner_create_request(&spin_lo, ctx_lo, engine, | 109 | rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, |
308 | MI_ARB_CHECK); | 110 | MI_ARB_CHECK); |
309 | if (IS_ERR(rq)) { | 111 | if (IS_ERR(rq)) { |
310 | err = PTR_ERR(rq); | 112 | err = PTR_ERR(rq); |
311 | goto err_ctx_lo; | 113 | goto err_ctx_lo; |
312 | } | 114 | } |
313 | 115 | ||
314 | i915_request_add(rq); | 116 | i915_request_add(rq); |
315 | if (!wait_for_spinner(&spin_lo, rq)) { | 117 | if (!igt_wait_for_spinner(&spin_lo, rq)) { |
316 | GEM_TRACE("lo spinner failed to start\n"); | 118 | GEM_TRACE("lo spinner failed to start\n"); |
317 | GEM_TRACE_DUMP(); | 119 | GEM_TRACE_DUMP(); |
318 | i915_gem_set_wedged(i915); | 120 | i915_gem_set_wedged(i915); |
@@ -320,16 +122,16 @@ static int live_preempt(void *arg) | |||
320 | goto err_ctx_lo; | 122 | goto err_ctx_lo; |
321 | } | 123 | } |
322 | 124 | ||
323 | rq = spinner_create_request(&spin_hi, ctx_hi, engine, | 125 | rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, |
324 | MI_ARB_CHECK); | 126 | MI_ARB_CHECK); |
325 | if (IS_ERR(rq)) { | 127 | if (IS_ERR(rq)) { |
326 | spinner_end(&spin_lo); | 128 | igt_spinner_end(&spin_lo); |
327 | err = PTR_ERR(rq); | 129 | err = PTR_ERR(rq); |
328 | goto err_ctx_lo; | 130 | goto err_ctx_lo; |
329 | } | 131 | } |
330 | 132 | ||
331 | i915_request_add(rq); | 133 | i915_request_add(rq); |
332 | if (!wait_for_spinner(&spin_hi, rq)) { | 134 | if (!igt_wait_for_spinner(&spin_hi, rq)) { |
333 | GEM_TRACE("hi spinner failed to start\n"); | 135 | GEM_TRACE("hi spinner failed to start\n"); |
334 | GEM_TRACE_DUMP(); | 136 | GEM_TRACE_DUMP(); |
335 | i915_gem_set_wedged(i915); | 137 | i915_gem_set_wedged(i915); |
@@ -337,8 +139,8 @@ static int live_preempt(void *arg) | |||
337 | goto err_ctx_lo; | 139 | goto err_ctx_lo; |
338 | } | 140 | } |
339 | 141 | ||
340 | spinner_end(&spin_hi); | 142 | igt_spinner_end(&spin_hi); |
341 | spinner_end(&spin_lo); | 143 | igt_spinner_end(&spin_lo); |
342 | if (igt_flush_test(i915, I915_WAIT_LOCKED)) { | 144 | if (igt_flush_test(i915, I915_WAIT_LOCKED)) { |
343 | err = -EIO; | 145 | err = -EIO; |
344 | goto err_ctx_lo; | 146 | goto err_ctx_lo; |
@@ -351,9 +153,9 @@ err_ctx_lo: | |||
351 | err_ctx_hi: | 153 | err_ctx_hi: |
352 | kernel_context_close(ctx_hi); | 154 | kernel_context_close(ctx_hi); |
353 | err_spin_lo: | 155 | err_spin_lo: |
354 | spinner_fini(&spin_lo); | 156 | igt_spinner_fini(&spin_lo); |
355 | err_spin_hi: | 157 | err_spin_hi: |
356 | spinner_fini(&spin_hi); | 158 | igt_spinner_fini(&spin_hi); |
357 | err_unlock: | 159 | err_unlock: |
358 | igt_flush_test(i915, I915_WAIT_LOCKED); | 160 | igt_flush_test(i915, I915_WAIT_LOCKED); |
359 | intel_runtime_pm_put(i915); | 161 | intel_runtime_pm_put(i915); |
@@ -365,7 +167,7 @@ static int live_late_preempt(void *arg) | |||
365 | { | 167 | { |
366 | struct drm_i915_private *i915 = arg; | 168 | struct drm_i915_private *i915 = arg; |
367 | struct i915_gem_context *ctx_hi, *ctx_lo; | 169 | struct i915_gem_context *ctx_hi, *ctx_lo; |
368 | struct spinner spin_hi, spin_lo; | 170 | struct igt_spinner spin_hi, spin_lo; |
369 | struct intel_engine_cs *engine; | 171 | struct intel_engine_cs *engine; |
370 | struct i915_sched_attr attr = {}; | 172 | struct i915_sched_attr attr = {}; |
371 | enum intel_engine_id id; | 173 | enum intel_engine_id id; |
@@ -377,10 +179,10 @@ static int live_late_preempt(void *arg) | |||
377 | mutex_lock(&i915->drm.struct_mutex); | 179 | mutex_lock(&i915->drm.struct_mutex); |
378 | intel_runtime_pm_get(i915); | 180 | intel_runtime_pm_get(i915); |
379 | 181 | ||
380 | if (spinner_init(&spin_hi, i915)) | 182 | if (igt_spinner_init(&spin_hi, i915)) |
381 | goto err_unlock; | 183 | goto err_unlock; |
382 | 184 | ||
383 | if (spinner_init(&spin_lo, i915)) | 185 | if (igt_spinner_init(&spin_lo, i915)) |
384 | goto err_spin_hi; | 186 | goto err_spin_hi; |
385 | 187 | ||
386 | ctx_hi = kernel_context(i915); | 188 | ctx_hi = kernel_context(i915); |
@@ -394,28 +196,29 @@ static int live_late_preempt(void *arg) | |||
394 | for_each_engine(engine, i915, id) { | 196 | for_each_engine(engine, i915, id) { |
395 | struct i915_request *rq; | 197 | struct i915_request *rq; |
396 | 198 | ||
397 | rq = spinner_create_request(&spin_lo, ctx_lo, engine, | 199 | rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, |
398 | MI_ARB_CHECK); | 200 | MI_ARB_CHECK); |
399 | if (IS_ERR(rq)) { | 201 | if (IS_ERR(rq)) { |
400 | err = PTR_ERR(rq); | 202 | err = PTR_ERR(rq); |
401 | goto err_ctx_lo; | 203 | goto err_ctx_lo; |
402 | } | 204 | } |
403 | 205 | ||
404 | i915_request_add(rq); | 206 | i915_request_add(rq); |
405 | if (!wait_for_spinner(&spin_lo, rq)) { | 207 | if (!igt_wait_for_spinner(&spin_lo, rq)) { |
406 | pr_err("First context failed to start\n"); | 208 | pr_err("First context failed to start\n"); |
407 | goto err_wedged; | 209 | goto err_wedged; |
408 | } | 210 | } |
409 | 211 | ||
410 | rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP); | 212 | rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, |
213 | MI_NOOP); | ||
411 | if (IS_ERR(rq)) { | 214 | if (IS_ERR(rq)) { |
412 | spinner_end(&spin_lo); | 215 | igt_spinner_end(&spin_lo); |
413 | err = PTR_ERR(rq); | 216 | err = PTR_ERR(rq); |
414 | goto err_ctx_lo; | 217 | goto err_ctx_lo; |
415 | } | 218 | } |
416 | 219 | ||
417 | i915_request_add(rq); | 220 | i915_request_add(rq); |
418 | if (wait_for_spinner(&spin_hi, rq)) { | 221 | if (igt_wait_for_spinner(&spin_hi, rq)) { |
419 | pr_err("Second context overtook first?\n"); | 222 | pr_err("Second context overtook first?\n"); |
420 | goto err_wedged; | 223 | goto err_wedged; |
421 | } | 224 | } |
@@ -423,14 +226,14 @@ static int live_late_preempt(void *arg) | |||
423 | attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); | 226 | attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); |
424 | engine->schedule(rq, &attr); | 227 | engine->schedule(rq, &attr); |
425 | 228 | ||
426 | if (!wait_for_spinner(&spin_hi, rq)) { | 229 | if (!igt_wait_for_spinner(&spin_hi, rq)) { |
427 | pr_err("High priority context failed to preempt the low priority context\n"); | 230 | pr_err("High priority context failed to preempt the low priority context\n"); |
428 | GEM_TRACE_DUMP(); | 231 | GEM_TRACE_DUMP(); |
429 | goto err_wedged; | 232 | goto err_wedged; |
430 | } | 233 | } |
431 | 234 | ||
432 | spinner_end(&spin_hi); | 235 | igt_spinner_end(&spin_hi); |
433 | spinner_end(&spin_lo); | 236 | igt_spinner_end(&spin_lo); |
434 | if (igt_flush_test(i915, I915_WAIT_LOCKED)) { | 237 | if (igt_flush_test(i915, I915_WAIT_LOCKED)) { |
435 | err = -EIO; | 238 | err = -EIO; |
436 | goto err_ctx_lo; | 239 | goto err_ctx_lo; |
@@ -443,9 +246,9 @@ err_ctx_lo: | |||
443 | err_ctx_hi: | 246 | err_ctx_hi: |
444 | kernel_context_close(ctx_hi); | 247 | kernel_context_close(ctx_hi); |
445 | err_spin_lo: | 248 | err_spin_lo: |
446 | spinner_fini(&spin_lo); | 249 | igt_spinner_fini(&spin_lo); |
447 | err_spin_hi: | 250 | err_spin_hi: |
448 | spinner_fini(&spin_hi); | 251 | igt_spinner_fini(&spin_hi); |
449 | err_unlock: | 252 | err_unlock: |
450 | igt_flush_test(i915, I915_WAIT_LOCKED); | 253 | igt_flush_test(i915, I915_WAIT_LOCKED); |
451 | intel_runtime_pm_put(i915); | 254 | intel_runtime_pm_put(i915); |
@@ -453,8 +256,8 @@ err_unlock: | |||
453 | return err; | 256 | return err; |
454 | 257 | ||
455 | err_wedged: | 258 | err_wedged: |
456 | spinner_end(&spin_hi); | 259 | igt_spinner_end(&spin_hi); |
457 | spinner_end(&spin_lo); | 260 | igt_spinner_end(&spin_lo); |
458 | i915_gem_set_wedged(i915); | 261 | i915_gem_set_wedged(i915); |
459 | err = -EIO; | 262 | err = -EIO; |
460 | goto err_ctx_lo; | 263 | goto err_ctx_lo; |
@@ -464,7 +267,7 @@ static int live_preempt_hang(void *arg) | |||
464 | { | 267 | { |
465 | struct drm_i915_private *i915 = arg; | 268 | struct drm_i915_private *i915 = arg; |
466 | struct i915_gem_context *ctx_hi, *ctx_lo; | 269 | struct i915_gem_context *ctx_hi, *ctx_lo; |
467 | struct spinner spin_hi, spin_lo; | 270 | struct igt_spinner spin_hi, spin_lo; |
468 | struct intel_engine_cs *engine; | 271 | struct intel_engine_cs *engine; |
469 | enum intel_engine_id id; | 272 | enum intel_engine_id id; |
470 | int err = -ENOMEM; | 273 | int err = -ENOMEM; |
@@ -478,10 +281,10 @@ static int live_preempt_hang(void *arg) | |||
478 | mutex_lock(&i915->drm.struct_mutex); | 281 | mutex_lock(&i915->drm.struct_mutex); |
479 | intel_runtime_pm_get(i915); | 282 | intel_runtime_pm_get(i915); |
480 | 283 | ||
481 | if (spinner_init(&spin_hi, i915)) | 284 | if (igt_spinner_init(&spin_hi, i915)) |
482 | goto err_unlock; | 285 | goto err_unlock; |
483 | 286 | ||
484 | if (spinner_init(&spin_lo, i915)) | 287 | if (igt_spinner_init(&spin_lo, i915)) |
485 | goto err_spin_hi; | 288 | goto err_spin_hi; |
486 | 289 | ||
487 | ctx_hi = kernel_context(i915); | 290 | ctx_hi = kernel_context(i915); |
@@ -500,15 +303,15 @@ static int live_preempt_hang(void *arg) | |||
500 | if (!intel_engine_has_preemption(engine)) | 303 | if (!intel_engine_has_preemption(engine)) |
501 | continue; | 304 | continue; |
502 | 305 | ||
503 | rq = spinner_create_request(&spin_lo, ctx_lo, engine, | 306 | rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, |
504 | MI_ARB_CHECK); | 307 | MI_ARB_CHECK); |
505 | if (IS_ERR(rq)) { | 308 | if (IS_ERR(rq)) { |
506 | err = PTR_ERR(rq); | 309 | err = PTR_ERR(rq); |
507 | goto err_ctx_lo; | 310 | goto err_ctx_lo; |
508 | } | 311 | } |
509 | 312 | ||
510 | i915_request_add(rq); | 313 | i915_request_add(rq); |
511 | if (!wait_for_spinner(&spin_lo, rq)) { | 314 | if (!igt_wait_for_spinner(&spin_lo, rq)) { |
512 | GEM_TRACE("lo spinner failed to start\n"); | 315 | GEM_TRACE("lo spinner failed to start\n"); |
513 | GEM_TRACE_DUMP(); | 316 | GEM_TRACE_DUMP(); |
514 | i915_gem_set_wedged(i915); | 317 | i915_gem_set_wedged(i915); |
@@ -516,10 +319,10 @@ static int live_preempt_hang(void *arg) | |||
516 | goto err_ctx_lo; | 319 | goto err_ctx_lo; |
517 | } | 320 | } |
518 | 321 | ||
519 | rq = spinner_create_request(&spin_hi, ctx_hi, engine, | 322 | rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, |
520 | MI_ARB_CHECK); | 323 | MI_ARB_CHECK); |
521 | if (IS_ERR(rq)) { | 324 | if (IS_ERR(rq)) { |
522 | spinner_end(&spin_lo); | 325 | igt_spinner_end(&spin_lo); |
523 | err = PTR_ERR(rq); | 326 | err = PTR_ERR(rq); |
524 | goto err_ctx_lo; | 327 | goto err_ctx_lo; |
525 | } | 328 | } |
@@ -544,7 +347,7 @@ static int live_preempt_hang(void *arg) | |||
544 | 347 | ||
545 | engine->execlists.preempt_hang.inject_hang = false; | 348 | engine->execlists.preempt_hang.inject_hang = false; |
546 | 349 | ||
547 | if (!wait_for_spinner(&spin_hi, rq)) { | 350 | if (!igt_wait_for_spinner(&spin_hi, rq)) { |
548 | GEM_TRACE("hi spinner failed to start\n"); | 351 | GEM_TRACE("hi spinner failed to start\n"); |
549 | GEM_TRACE_DUMP(); | 352 | GEM_TRACE_DUMP(); |
550 | i915_gem_set_wedged(i915); | 353 | i915_gem_set_wedged(i915); |
@@ -552,8 +355,8 @@ static int live_preempt_hang(void *arg) | |||
552 | goto err_ctx_lo; | 355 | goto err_ctx_lo; |
553 | } | 356 | } |
554 | 357 | ||
555 | spinner_end(&spin_hi); | 358 | igt_spinner_end(&spin_hi); |
556 | spinner_end(&spin_lo); | 359 | igt_spinner_end(&spin_lo); |
557 | if (igt_flush_test(i915, I915_WAIT_LOCKED)) { | 360 | if (igt_flush_test(i915, I915_WAIT_LOCKED)) { |
558 | err = -EIO; | 361 | err = -EIO; |
559 | goto err_ctx_lo; | 362 | goto err_ctx_lo; |
@@ -566,9 +369,9 @@ err_ctx_lo: | |||
566 | err_ctx_hi: | 369 | err_ctx_hi: |
567 | kernel_context_close(ctx_hi); | 370 | kernel_context_close(ctx_hi); |
568 | err_spin_lo: | 371 | err_spin_lo: |
569 | spinner_fini(&spin_lo); | 372 | igt_spinner_fini(&spin_lo); |
570 | err_spin_hi: | 373 | err_spin_hi: |
571 | spinner_fini(&spin_hi); | 374 | igt_spinner_fini(&spin_hi); |
572 | err_unlock: | 375 | err_unlock: |
573 | igt_flush_test(i915, I915_WAIT_LOCKED); | 376 | igt_flush_test(i915, I915_WAIT_LOCKED); |
574 | intel_runtime_pm_put(i915); | 377 | intel_runtime_pm_put(i915); |
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index d1a0923d2f38..67017d5175b8 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c | |||
@@ -6,6 +6,9 @@ | |||
6 | 6 | ||
7 | #include "../i915_selftest.h" | 7 | #include "../i915_selftest.h" |
8 | 8 | ||
9 | #include "igt_flush_test.h" | ||
10 | #include "igt_reset.h" | ||
11 | #include "igt_spinner.h" | ||
9 | #include "igt_wedge_me.h" | 12 | #include "igt_wedge_me.h" |
10 | #include "mock_context.h" | 13 | #include "mock_context.h" |
11 | 14 | ||
@@ -91,17 +94,23 @@ err_obj: | |||
91 | return ERR_PTR(err); | 94 | return ERR_PTR(err); |
92 | } | 95 | } |
93 | 96 | ||
94 | static u32 get_whitelist_reg(const struct whitelist *w, unsigned int i) | 97 | static u32 |
98 | get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i) | ||
95 | { | 99 | { |
96 | return i < w->count ? i915_mmio_reg_offset(w->reg[i]) : w->nopid; | 100 | i915_reg_t reg = i < engine->whitelist.count ? |
101 | engine->whitelist.list[i].reg : | ||
102 | RING_NOPID(engine->mmio_base); | ||
103 | |||
104 | return i915_mmio_reg_offset(reg); | ||
97 | } | 105 | } |
98 | 106 | ||
99 | static void print_results(const struct whitelist *w, const u32 *results) | 107 | static void |
108 | print_results(const struct intel_engine_cs *engine, const u32 *results) | ||
100 | { | 109 | { |
101 | unsigned int i; | 110 | unsigned int i; |
102 | 111 | ||
103 | for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { | 112 | for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { |
104 | u32 expected = get_whitelist_reg(w, i); | 113 | u32 expected = get_whitelist_reg(engine, i); |
105 | u32 actual = results[i]; | 114 | u32 actual = results[i]; |
106 | 115 | ||
107 | pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n", | 116 | pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n", |
@@ -109,8 +118,7 @@ static void print_results(const struct whitelist *w, const u32 *results) | |||
109 | } | 118 | } |
110 | } | 119 | } |
111 | 120 | ||
112 | static int check_whitelist(const struct whitelist *w, | 121 | static int check_whitelist(struct i915_gem_context *ctx, |
113 | struct i915_gem_context *ctx, | ||
114 | struct intel_engine_cs *engine) | 122 | struct intel_engine_cs *engine) |
115 | { | 123 | { |
116 | struct drm_i915_gem_object *results; | 124 | struct drm_i915_gem_object *results; |
@@ -138,11 +146,11 @@ static int check_whitelist(const struct whitelist *w, | |||
138 | } | 146 | } |
139 | 147 | ||
140 | for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { | 148 | for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { |
141 | u32 expected = get_whitelist_reg(w, i); | 149 | u32 expected = get_whitelist_reg(engine, i); |
142 | u32 actual = vaddr[i]; | 150 | u32 actual = vaddr[i]; |
143 | 151 | ||
144 | if (expected != actual) { | 152 | if (expected != actual) { |
145 | print_results(w, vaddr); | 153 | print_results(engine, vaddr); |
146 | pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n", | 154 | pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n", |
147 | i, expected, actual); | 155 | i, expected, actual); |
148 | 156 | ||
@@ -159,66 +167,107 @@ out_put: | |||
159 | 167 | ||
160 | static int do_device_reset(struct intel_engine_cs *engine) | 168 | static int do_device_reset(struct intel_engine_cs *engine) |
161 | { | 169 | { |
162 | i915_reset(engine->i915, ENGINE_MASK(engine->id), NULL); | 170 | set_bit(I915_RESET_HANDOFF, &engine->i915->gpu_error.flags); |
171 | i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds"); | ||
163 | return 0; | 172 | return 0; |
164 | } | 173 | } |
165 | 174 | ||
166 | static int do_engine_reset(struct intel_engine_cs *engine) | 175 | static int do_engine_reset(struct intel_engine_cs *engine) |
167 | { | 176 | { |
168 | return i915_reset_engine(engine, NULL); | 177 | return i915_reset_engine(engine, "live_workarounds"); |
169 | } | 178 | } |
170 | 179 | ||
171 | static int switch_to_scratch_context(struct intel_engine_cs *engine) | 180 | static int |
181 | switch_to_scratch_context(struct intel_engine_cs *engine, | ||
182 | struct igt_spinner *spin) | ||
172 | { | 183 | { |
173 | struct i915_gem_context *ctx; | 184 | struct i915_gem_context *ctx; |
174 | struct i915_request *rq; | 185 | struct i915_request *rq; |
186 | int err = 0; | ||
175 | 187 | ||
176 | ctx = kernel_context(engine->i915); | 188 | ctx = kernel_context(engine->i915); |
177 | if (IS_ERR(ctx)) | 189 | if (IS_ERR(ctx)) |
178 | return PTR_ERR(ctx); | 190 | return PTR_ERR(ctx); |
179 | 191 | ||
180 | intel_runtime_pm_get(engine->i915); | 192 | intel_runtime_pm_get(engine->i915); |
181 | rq = i915_request_alloc(engine, ctx); | 193 | |
194 | if (spin) | ||
195 | rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); | ||
196 | else | ||
197 | rq = i915_request_alloc(engine, ctx); | ||
198 | |||
182 | intel_runtime_pm_put(engine->i915); | 199 | intel_runtime_pm_put(engine->i915); |
183 | 200 | ||
184 | kernel_context_close(ctx); | 201 | kernel_context_close(ctx); |
185 | if (IS_ERR(rq)) | 202 | |
186 | return PTR_ERR(rq); | 203 | if (IS_ERR(rq)) { |
204 | spin = NULL; | ||
205 | err = PTR_ERR(rq); | ||
206 | goto err; | ||
207 | } | ||
187 | 208 | ||
188 | i915_request_add(rq); | 209 | i915_request_add(rq); |
189 | 210 | ||
190 | return 0; | 211 | if (spin && !igt_wait_for_spinner(spin, rq)) { |
212 | pr_err("Spinner failed to start\n"); | ||
213 | err = -ETIMEDOUT; | ||
214 | } | ||
215 | |||
216 | err: | ||
217 | if (err && spin) | ||
218 | igt_spinner_end(spin); | ||
219 | |||
220 | return err; | ||
191 | } | 221 | } |
192 | 222 | ||
193 | static int check_whitelist_across_reset(struct intel_engine_cs *engine, | 223 | static int check_whitelist_across_reset(struct intel_engine_cs *engine, |
194 | int (*reset)(struct intel_engine_cs *), | 224 | int (*reset)(struct intel_engine_cs *), |
195 | const struct whitelist *w, | ||
196 | const char *name) | 225 | const char *name) |
197 | { | 226 | { |
227 | struct drm_i915_private *i915 = engine->i915; | ||
228 | bool want_spin = reset == do_engine_reset; | ||
198 | struct i915_gem_context *ctx; | 229 | struct i915_gem_context *ctx; |
230 | struct igt_spinner spin; | ||
199 | int err; | 231 | int err; |
200 | 232 | ||
201 | ctx = kernel_context(engine->i915); | 233 | pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n", |
234 | engine->whitelist.count, name); | ||
235 | |||
236 | if (want_spin) { | ||
237 | err = igt_spinner_init(&spin, i915); | ||
238 | if (err) | ||
239 | return err; | ||
240 | } | ||
241 | |||
242 | ctx = kernel_context(i915); | ||
202 | if (IS_ERR(ctx)) | 243 | if (IS_ERR(ctx)) |
203 | return PTR_ERR(ctx); | 244 | return PTR_ERR(ctx); |
204 | 245 | ||
205 | err = check_whitelist(w, ctx, engine); | 246 | err = check_whitelist(ctx, engine); |
206 | if (err) { | 247 | if (err) { |
207 | pr_err("Invalid whitelist *before* %s reset!\n", name); | 248 | pr_err("Invalid whitelist *before* %s reset!\n", name); |
208 | goto out; | 249 | goto out; |
209 | } | 250 | } |
210 | 251 | ||
211 | err = switch_to_scratch_context(engine); | 252 | err = switch_to_scratch_context(engine, want_spin ? &spin : NULL); |
212 | if (err) | 253 | if (err) |
213 | goto out; | 254 | goto out; |
214 | 255 | ||
256 | intel_runtime_pm_get(i915); | ||
215 | err = reset(engine); | 257 | err = reset(engine); |
258 | intel_runtime_pm_put(i915); | ||
259 | |||
260 | if (want_spin) { | ||
261 | igt_spinner_end(&spin); | ||
262 | igt_spinner_fini(&spin); | ||
263 | } | ||
264 | |||
216 | if (err) { | 265 | if (err) { |
217 | pr_err("%s reset failed\n", name); | 266 | pr_err("%s reset failed\n", name); |
218 | goto out; | 267 | goto out; |
219 | } | 268 | } |
220 | 269 | ||
221 | err = check_whitelist(w, ctx, engine); | 270 | err = check_whitelist(ctx, engine); |
222 | if (err) { | 271 | if (err) { |
223 | pr_err("Whitelist not preserved in context across %s reset!\n", | 272 | pr_err("Whitelist not preserved in context across %s reset!\n", |
224 | name); | 273 | name); |
@@ -227,11 +276,11 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, | |||
227 | 276 | ||
228 | kernel_context_close(ctx); | 277 | kernel_context_close(ctx); |
229 | 278 | ||
230 | ctx = kernel_context(engine->i915); | 279 | ctx = kernel_context(i915); |
231 | if (IS_ERR(ctx)) | 280 | if (IS_ERR(ctx)) |
232 | return PTR_ERR(ctx); | 281 | return PTR_ERR(ctx); |
233 | 282 | ||
234 | err = check_whitelist(w, ctx, engine); | 283 | err = check_whitelist(ctx, engine); |
235 | if (err) { | 284 | if (err) { |
236 | pr_err("Invalid whitelist *after* %s reset in fresh context!\n", | 285 | pr_err("Invalid whitelist *after* %s reset in fresh context!\n", |
237 | name); | 286 | name); |
@@ -247,26 +296,18 @@ static int live_reset_whitelist(void *arg) | |||
247 | { | 296 | { |
248 | struct drm_i915_private *i915 = arg; | 297 | struct drm_i915_private *i915 = arg; |
249 | struct intel_engine_cs *engine = i915->engine[RCS]; | 298 | struct intel_engine_cs *engine = i915->engine[RCS]; |
250 | struct i915_gpu_error *error = &i915->gpu_error; | ||
251 | struct whitelist w; | ||
252 | int err = 0; | 299 | int err = 0; |
253 | 300 | ||
254 | /* If we reset the gpu, we should not lose the RING_NONPRIV */ | 301 | /* If we reset the gpu, we should not lose the RING_NONPRIV */ |
255 | 302 | ||
256 | if (!engine) | 303 | if (!engine || engine->whitelist.count == 0) |
257 | return 0; | ||
258 | |||
259 | if (!whitelist_build(engine, &w)) | ||
260 | return 0; | 304 | return 0; |
261 | 305 | ||
262 | pr_info("Checking %d whitelisted registers (RING_NONPRIV)\n", w.count); | 306 | igt_global_reset_lock(i915); |
263 | |||
264 | set_bit(I915_RESET_BACKOFF, &error->flags); | ||
265 | set_bit(I915_RESET_ENGINE + engine->id, &error->flags); | ||
266 | 307 | ||
267 | if (intel_has_reset_engine(i915)) { | 308 | if (intel_has_reset_engine(i915)) { |
268 | err = check_whitelist_across_reset(engine, | 309 | err = check_whitelist_across_reset(engine, |
269 | do_engine_reset, &w, | 310 | do_engine_reset, |
270 | "engine"); | 311 | "engine"); |
271 | if (err) | 312 | if (err) |
272 | goto out; | 313 | goto out; |
@@ -274,22 +315,156 @@ static int live_reset_whitelist(void *arg) | |||
274 | 315 | ||
275 | if (intel_has_gpu_reset(i915)) { | 316 | if (intel_has_gpu_reset(i915)) { |
276 | err = check_whitelist_across_reset(engine, | 317 | err = check_whitelist_across_reset(engine, |
277 | do_device_reset, &w, | 318 | do_device_reset, |
278 | "device"); | 319 | "device"); |
279 | if (err) | 320 | if (err) |
280 | goto out; | 321 | goto out; |
281 | } | 322 | } |
282 | 323 | ||
283 | out: | 324 | out: |
284 | clear_bit(I915_RESET_ENGINE + engine->id, &error->flags); | 325 | igt_global_reset_unlock(i915); |
285 | clear_bit(I915_RESET_BACKOFF, &error->flags); | ||
286 | return err; | 326 | return err; |
287 | } | 327 | } |
288 | 328 | ||
329 | static bool verify_gt_engine_wa(struct drm_i915_private *i915, const char *str) | ||
330 | { | ||
331 | struct intel_engine_cs *engine; | ||
332 | enum intel_engine_id id; | ||
333 | bool ok = true; | ||
334 | |||
335 | ok &= intel_gt_verify_workarounds(i915, str); | ||
336 | |||
337 | for_each_engine(engine, i915, id) | ||
338 | ok &= intel_engine_verify_workarounds(engine, str); | ||
339 | |||
340 | return ok; | ||
341 | } | ||
342 | |||
343 | static int | ||
344 | live_gpu_reset_gt_engine_workarounds(void *arg) | ||
345 | { | ||
346 | struct drm_i915_private *i915 = arg; | ||
347 | struct i915_gpu_error *error = &i915->gpu_error; | ||
348 | bool ok; | ||
349 | |||
350 | if (!intel_has_gpu_reset(i915)) | ||
351 | return 0; | ||
352 | |||
353 | pr_info("Verifying after GPU reset...\n"); | ||
354 | |||
355 | igt_global_reset_lock(i915); | ||
356 | |||
357 | ok = verify_gt_engine_wa(i915, "before reset"); | ||
358 | if (!ok) | ||
359 | goto out; | ||
360 | |||
361 | intel_runtime_pm_get(i915); | ||
362 | set_bit(I915_RESET_HANDOFF, &error->flags); | ||
363 | i915_reset(i915, ALL_ENGINES, "live_workarounds"); | ||
364 | intel_runtime_pm_put(i915); | ||
365 | |||
366 | ok = verify_gt_engine_wa(i915, "after reset"); | ||
367 | |||
368 | out: | ||
369 | igt_global_reset_unlock(i915); | ||
370 | |||
371 | return ok ? 0 : -ESRCH; | ||
372 | } | ||
373 | |||
374 | static int | ||
375 | live_engine_reset_gt_engine_workarounds(void *arg) | ||
376 | { | ||
377 | struct drm_i915_private *i915 = arg; | ||
378 | struct intel_engine_cs *engine; | ||
379 | struct i915_gem_context *ctx; | ||
380 | struct igt_spinner spin; | ||
381 | enum intel_engine_id id; | ||
382 | struct i915_request *rq; | ||
383 | int ret = 0; | ||
384 | |||
385 | if (!intel_has_reset_engine(i915)) | ||
386 | return 0; | ||
387 | |||
388 | ctx = kernel_context(i915); | ||
389 | if (IS_ERR(ctx)) | ||
390 | return PTR_ERR(ctx); | ||
391 | |||
392 | igt_global_reset_lock(i915); | ||
393 | |||
394 | for_each_engine(engine, i915, id) { | ||
395 | bool ok; | ||
396 | |||
397 | pr_info("Verifying after %s reset...\n", engine->name); | ||
398 | |||
399 | ok = verify_gt_engine_wa(i915, "before reset"); | ||
400 | if (!ok) { | ||
401 | ret = -ESRCH; | ||
402 | goto err; | ||
403 | } | ||
404 | |||
405 | intel_runtime_pm_get(i915); | ||
406 | i915_reset_engine(engine, "live_workarounds"); | ||
407 | intel_runtime_pm_put(i915); | ||
408 | |||
409 | ok = verify_gt_engine_wa(i915, "after idle reset"); | ||
410 | if (!ok) { | ||
411 | ret = -ESRCH; | ||
412 | goto err; | ||
413 | } | ||
414 | |||
415 | ret = igt_spinner_init(&spin, i915); | ||
416 | if (ret) | ||
417 | goto err; | ||
418 | |||
419 | intel_runtime_pm_get(i915); | ||
420 | |||
421 | rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); | ||
422 | if (IS_ERR(rq)) { | ||
423 | ret = PTR_ERR(rq); | ||
424 | igt_spinner_fini(&spin); | ||
425 | intel_runtime_pm_put(i915); | ||
426 | goto err; | ||
427 | } | ||
428 | |||
429 | i915_request_add(rq); | ||
430 | |||
431 | if (!igt_wait_for_spinner(&spin, rq)) { | ||
432 | pr_err("Spinner failed to start\n"); | ||
433 | igt_spinner_fini(&spin); | ||
434 | intel_runtime_pm_put(i915); | ||
435 | ret = -ETIMEDOUT; | ||
436 | goto err; | ||
437 | } | ||
438 | |||
439 | i915_reset_engine(engine, "live_workarounds"); | ||
440 | |||
441 | intel_runtime_pm_put(i915); | ||
442 | |||
443 | igt_spinner_end(&spin); | ||
444 | igt_spinner_fini(&spin); | ||
445 | |||
446 | ok = verify_gt_engine_wa(i915, "after busy reset"); | ||
447 | if (!ok) { | ||
448 | ret = -ESRCH; | ||
449 | goto err; | ||
450 | } | ||
451 | } | ||
452 | |||
453 | err: | ||
454 | igt_global_reset_unlock(i915); | ||
455 | kernel_context_close(ctx); | ||
456 | |||
457 | igt_flush_test(i915, I915_WAIT_LOCKED); | ||
458 | |||
459 | return ret; | ||
460 | } | ||
461 | |||
289 | int intel_workarounds_live_selftests(struct drm_i915_private *i915) | 462 | int intel_workarounds_live_selftests(struct drm_i915_private *i915) |
290 | { | 463 | { |
291 | static const struct i915_subtest tests[] = { | 464 | static const struct i915_subtest tests[] = { |
292 | SUBTEST(live_reset_whitelist), | 465 | SUBTEST(live_reset_whitelist), |
466 | SUBTEST(live_gpu_reset_gt_engine_workarounds), | ||
467 | SUBTEST(live_engine_reset_gt_engine_workarounds), | ||
293 | }; | 468 | }; |
294 | int err; | 469 | int err; |
295 | 470 | ||
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 0a7252aecfa5..bb71db63c99c 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c | |||
@@ -334,7 +334,7 @@ int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr, | |||
334 | } | 334 | } |
335 | EXPORT_SYMBOL_GPL(sysfs_create_file_ns); | 335 | EXPORT_SYMBOL_GPL(sysfs_create_file_ns); |
336 | 336 | ||
337 | int sysfs_create_files(struct kobject *kobj, const struct attribute **ptr) | 337 | int sysfs_create_files(struct kobject *kobj, const struct attribute * const *ptr) |
338 | { | 338 | { |
339 | int err = 0; | 339 | int err = 0; |
340 | int i; | 340 | int i; |
@@ -493,7 +493,7 @@ bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr) | |||
493 | return ret; | 493 | return ret; |
494 | } | 494 | } |
495 | 495 | ||
496 | void sysfs_remove_files(struct kobject *kobj, const struct attribute **ptr) | 496 | void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *ptr) |
497 | { | 497 | { |
498 | int i; | 498 | int i; |
499 | for (i = 0; ptr[i]; i++) | 499 | for (i = 0; ptr[i]; i++) |
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 3314e91f6eb3..5736c942c85b 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h | |||
@@ -1123,7 +1123,8 @@ drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) | |||
1123 | u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], | 1123 | u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], |
1124 | bool is_edp); | 1124 | bool is_edp); |
1125 | u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]); | 1125 | u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]); |
1126 | u8 drm_dp_dsc_sink_max_color_depth(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE]); | 1126 | int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE], |
1127 | u8 dsc_bpc[3]); | ||
1127 | 1128 | ||
1128 | static inline bool | 1129 | static inline bool |
1129 | drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) | 1130 | drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) |
diff --git a/include/drm/drm_dsc.h b/include/drm/drm_dsc.h new file mode 100644 index 000000000000..d03f1b83421a --- /dev/null +++ b/include/drm/drm_dsc.h | |||
@@ -0,0 +1,485 @@ | |||
1 | /* SPDX-License-Identifier: MIT | ||
2 | * Copyright (C) 2018 Intel Corp. | ||
3 | * | ||
4 | * Authors: | ||
5 | * Manasi Navare <manasi.d.navare@intel.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef DRM_DSC_H_ | ||
9 | #define DRM_DSC_H_ | ||
10 | |||
11 | #include <drm/drm_dp_helper.h> | ||
12 | |||
13 | /* VESA Display Stream Compression DSC 1.2 constants */ | ||
14 | #define DSC_NUM_BUF_RANGES 15 | ||
15 | #define DSC_MUX_WORD_SIZE_8_10_BPC 48 | ||
16 | #define DSC_MUX_WORD_SIZE_12_BPC 64 | ||
17 | #define DSC_RC_PIXELS_PER_GROUP 3 | ||
18 | #define DSC_SCALE_DECREMENT_INTERVAL_MAX 4095 | ||
19 | #define DSC_RANGE_BPG_OFFSET_MASK 0x3f | ||
20 | |||
21 | /* DSC Rate Control Constants */ | ||
22 | #define DSC_RC_MODEL_SIZE_CONST 8192 | ||
23 | #define DSC_RC_EDGE_FACTOR_CONST 6 | ||
24 | #define DSC_RC_TGT_OFFSET_HI_CONST 3 | ||
25 | #define DSC_RC_TGT_OFFSET_LO_CONST 3 | ||
26 | |||
27 | /* DSC PPS constants and macros */ | ||
28 | #define DSC_PPS_VERSION_MAJOR_SHIFT 4 | ||
29 | #define DSC_PPS_BPC_SHIFT 4 | ||
30 | #define DSC_PPS_MSB_SHIFT 8 | ||
31 | #define DSC_PPS_LSB_MASK (0xFF << 0) | ||
32 | #define DSC_PPS_BPP_HIGH_MASK (0x3 << 8) | ||
33 | #define DSC_PPS_VBR_EN_SHIFT 2 | ||
34 | #define DSC_PPS_SIMPLE422_SHIFT 3 | ||
35 | #define DSC_PPS_CONVERT_RGB_SHIFT 4 | ||
36 | #define DSC_PPS_BLOCK_PRED_EN_SHIFT 5 | ||
37 | #define DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK (0x3 << 8) | ||
38 | #define DSC_PPS_SCALE_DEC_INT_HIGH_MASK (0xF << 8) | ||
39 | #define DSC_PPS_RC_TGT_OFFSET_HI_SHIFT 4 | ||
40 | #define DSC_PPS_RC_RANGE_MINQP_SHIFT 11 | ||
41 | #define DSC_PPS_RC_RANGE_MAXQP_SHIFT 6 | ||
42 | #define DSC_PPS_NATIVE_420_SHIFT 1 | ||
43 | #define DSC_1_2_MAX_LINEBUF_DEPTH_BITS 16 | ||
44 | #define DSC_1_2_MAX_LINEBUF_DEPTH_VAL 0 | ||
45 | #define DSC_1_1_MAX_LINEBUF_DEPTH_BITS 13 | ||
46 | |||
47 | /* Configuration for a single Rate Control model range */ | ||
48 | struct drm_dsc_rc_range_parameters { | ||
49 | /* Min Quantization Parameters allowed for this range */ | ||
50 | u8 range_min_qp; | ||
51 | /* Max Quantization Parameters allowed for this range */ | ||
52 | u8 range_max_qp; | ||
53 | /* Bits/group offset to apply to target for this group */ | ||
54 | u8 range_bpg_offset; | ||
55 | }; | ||
56 | |||
57 | struct drm_dsc_config { | ||
58 | /* Bits / component for previous reconstructed line buffer */ | ||
59 | u8 line_buf_depth; | ||
60 | /* Bits per component to code (must be 8, 10, or 12) */ | ||
61 | u8 bits_per_component; | ||
62 | /* | ||
63 | * Flag indicating to do RGB - YCoCg conversion | ||
64 | * and back (should be 1 for RGB input) | ||
65 | */ | ||
66 | bool convert_rgb; | ||
67 | u8 slice_count; | ||
68 | /* Slice Width */ | ||
69 | u16 slice_width; | ||
70 | /* Slice Height */ | ||
71 | u16 slice_height; | ||
72 | /* | ||
73 | * 4:2:2 enable mode (from PPS, 4:2:2 conversion happens | ||
74 | * outside of DSC encode/decode algorithm) | ||
75 | */ | ||
76 | bool enable422; | ||
77 | /* Picture Width */ | ||
78 | u16 pic_width; | ||
79 | /* Picture Height */ | ||
80 | u16 pic_height; | ||
81 | /* Offset to bits/group used by RC to determine QP adjustment */ | ||
82 | u8 rc_tgt_offset_high; | ||
83 | /* Offset to bits/group used by RC to determine QP adjustment */ | ||
84 | u8 rc_tgt_offset_low; | ||
85 | /* Bits/pixel target << 4 (ie., 4 fractional bits) */ | ||
86 | u16 bits_per_pixel; | ||
87 | /* | ||
88 | * Factor to determine if an edge is present based | ||
89 | * on the bits produced | ||
90 | */ | ||
91 | u8 rc_edge_factor; | ||
92 | /* Slow down incrementing once the range reaches this value */ | ||
93 | u8 rc_quant_incr_limit1; | ||
94 | /* Slow down incrementing once the range reaches this value */ | ||
95 | u8 rc_quant_incr_limit0; | ||
96 | /* Number of pixels to delay the initial transmission */ | ||
97 | u16 initial_xmit_delay; | ||
98 | /* Number of pixels to delay the VLD on the decoder,not including SSM */ | ||
99 | u16 initial_dec_delay; | ||
100 | /* Block prediction enable */ | ||
101 | bool block_pred_enable; | ||
102 | /* Bits/group offset to use for first line of the slice */ | ||
103 | u8 first_line_bpg_offset; | ||
104 | /* Value to use for RC model offset at slice start */ | ||
105 | u16 initial_offset; | ||
106 | /* Thresholds defining each of the buffer ranges */ | ||
107 | u16 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1]; | ||
108 | /* Parameters for each of the RC ranges */ | ||
109 | struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES]; | ||
110 | /* Total size of RC model */ | ||
111 | u16 rc_model_size; | ||
112 | /* Minimum QP where flatness information is sent */ | ||
113 | u8 flatness_min_qp; | ||
114 | /* Maximum QP where flatness information is sent */ | ||
115 | u8 flatness_max_qp; | ||
116 | /* Initial value for scale factor */ | ||
117 | u8 initial_scale_value; | ||
118 | /* Decrement scale factor every scale_decrement_interval groups */ | ||
119 | u16 scale_decrement_interval; | ||
120 | /* Increment scale factor every scale_increment_interval groups */ | ||
121 | u16 scale_increment_interval; | ||
122 | /* Non-first line BPG offset to use */ | ||
123 | u16 nfl_bpg_offset; | ||
124 | /* BPG offset used to enforce slice bit */ | ||
125 | u16 slice_bpg_offset; | ||
126 | /* Final RC linear transformation offset value */ | ||
127 | u16 final_offset; | ||
128 | /* Enable on-off VBR (ie., disable stuffing bits) */ | ||
129 | bool vbr_enable; | ||
130 | /* Mux word size (in bits) for SSM mode */ | ||
131 | u8 mux_word_size; | ||
132 | /* | ||
133 | * The (max) size in bytes of the "chunks" that are | ||
134 | * used in slice multiplexing | ||
135 | */ | ||
136 | u16 slice_chunk_size; | ||
137 | /* Rate Control buffer siz in bits */ | ||
138 | u16 rc_bits; | ||
139 | /* DSC Minor Version */ | ||
140 | u8 dsc_version_minor; | ||
141 | /* DSC Major version */ | ||
142 | u8 dsc_version_major; | ||
143 | /* Native 4:2:2 support */ | ||
144 | bool native_422; | ||
145 | /* Native 4:2:0 support */ | ||
146 | bool native_420; | ||
147 | /* Additional bits/grp for seconnd line of slice for native 4:2:0 */ | ||
148 | u8 second_line_bpg_offset; | ||
149 | /* Num of bits deallocated for each grp that is not in second line of slice */ | ||
150 | u16 nsl_bpg_offset; | ||
151 | /* Offset adj fr second line in Native 4:2:0 mode */ | ||
152 | u16 second_line_offset_adj; | ||
153 | }; | ||
154 | |||
155 | /** | ||
156 | * struct picture_parameter_set - Represents 128 bytes of Picture Parameter Set | ||
157 | * | ||
158 | * The VESA DSC standard defines picture parameter set (PPS) which display | ||
159 | * stream compression encoders must communicate to decoders. | ||
160 | * The PPS is encapsulated in 128 bytes (PPS 0 through PPS 127). The fields in | ||
161 | * this structure are as per Table 4.1 in Vesa DSC specification v1.1/v1.2. | ||
162 | * The PPS fields that span over more than a byte should be stored in Big Endian | ||
163 | * format. | ||
164 | */ | ||
165 | struct drm_dsc_picture_parameter_set { | ||
166 | /** | ||
167 | * @dsc_version: | ||
168 | * PPS0[3:0] - dsc_version_minor: Contains Minor version of DSC | ||
169 | * PPS0[7:4] - dsc_version_major: Contains major version of DSC | ||
170 | */ | ||
171 | u8 dsc_version; | ||
172 | /** | ||
173 | * @pps_identifier: | ||
174 | * PPS1[7:0] - Application specific identifier that can be | ||
175 | * used to differentiate between different PPS tables. | ||
176 | */ | ||
177 | u8 pps_identifier; | ||
178 | /** | ||
179 | * @pps_reserved: | ||
180 | * PPS2[7:0]- RESERVED Byte | ||
181 | */ | ||
182 | u8 pps_reserved; | ||
183 | /** | ||
184 | * @pps_3: | ||
185 | * PPS3[3:0] - linebuf_depth: Contains linebuffer bit depth used to | ||
186 | * generate the bitstream. (0x0 - 16 bits for DSC 1.2, 0x8 - 8 bits, | ||
187 | * 0xA - 10 bits, 0xB - 11 bits, 0xC - 12 bits, 0xD - 13 bits, | ||
188 | * 0xE - 14 bits for DSC1.2, 0xF - 14 bits for DSC 1.2. | ||
189 | * PPS3[7:4] - bits_per_component: Bits per component for the original | ||
190 | * pixels of the encoded picture. | ||
191 | * 0x0 = 16bpc (allowed only when dsc_version_minor = 0x2) | ||
192 | * 0x8 = 8bpc, 0xA = 10bpc, 0xC = 12bpc, 0xE = 14bpc (also | ||
193 | * allowed only when dsc_minor_version = 0x2) | ||
194 | */ | ||
195 | u8 pps_3; | ||
196 | /** | ||
197 | * @pps_4: | ||
198 | * PPS4[1:0] -These are the most significant 2 bits of | ||
199 | * compressed BPP bits_per_pixel[9:0] syntax element. | ||
200 | * PPS4[2] - vbr_enable: 0 = VBR disabled, 1 = VBR enabled | ||
201 | * PPS4[3] - simple_422: Indicates if decoder drops samples to | ||
202 | * reconstruct the 4:2:2 picture. | ||
203 | * PPS4[4] - Convert_rgb: Indicates if DSC color space conversion is | ||
204 | * active. | ||
205 | * PPS4[5] - blobk_pred_enable: Indicates if BP is used to code any | ||
206 | * groups in picture | ||
207 | * PPS4[7:6] - Reseved bits | ||
208 | */ | ||
209 | u8 pps_4; | ||
210 | /** | ||
211 | * @bits_per_pixel_low: | ||
212 | * PPS5[7:0] - This indicates the lower significant 8 bits of | ||
213 | * the compressed BPP bits_per_pixel[9:0] element. | ||
214 | */ | ||
215 | u8 bits_per_pixel_low; | ||
216 | /** | ||
217 | * @pic_height: | ||
218 | * PPS6[7:0], PPS7[7:0] -pic_height: Specifies the number of pixel rows | ||
219 | * within the raster. | ||
220 | */ | ||
221 | __be16 pic_height; | ||
222 | /** | ||
223 | * @pic_width: | ||
224 | * PPS8[7:0], PPS9[7:0] - pic_width: Number of pixel columns within | ||
225 | * the raster. | ||
226 | */ | ||
227 | __be16 pic_width; | ||
228 | /** | ||
229 | * @slice_height: | ||
230 | * PPS10[7:0], PPS11[7:0] - Slice height in units of pixels. | ||
231 | */ | ||
232 | __be16 slice_height; | ||
233 | /** | ||
234 | * @slice_width: | ||
235 | * PPS12[7:0], PPS13[7:0] - Slice width in terms of pixels. | ||
236 | */ | ||
237 | __be16 slice_width; | ||
238 | /** | ||
239 | * @chunk_size: | ||
240 | * PPS14[7:0], PPS15[7:0] - Size in units of bytes of the chunks | ||
241 | * that are used for slice multiplexing. | ||
242 | */ | ||
243 | __be16 chunk_size; | ||
244 | /** | ||
245 | * @initial_xmit_delay_high: | ||
246 | * PPS16[1:0] - Most Significant two bits of initial transmission delay. | ||
247 | * It specifies the number of pixel times that the encoder waits before | ||
248 | * transmitting data from its rate buffer. | ||
249 | * PPS16[7:2] - Reserved | ||
250 | */ | ||
251 | u8 initial_xmit_delay_high; | ||
252 | /** | ||
253 | * @initial_xmit_delay_low: | ||
254 | * PPS17[7:0] - Least significant 8 bits of initial transmission delay. | ||
255 | */ | ||
256 | u8 initial_xmit_delay_low; | ||
257 | /** | ||
258 | * @initial_dec_delay: | ||
259 | * | ||
260 | * PPS18[7:0], PPS19[7:0] - Initial decoding delay which is the number | ||
261 | * of pixel times that the decoder accumulates data in its rate buffer | ||
262 | * before starting to decode and output pixels. | ||
263 | */ | ||
264 | __be16 initial_dec_delay; | ||
265 | /** | ||
266 | * @pps20_reserved: | ||
267 | * | ||
268 | * PPS20[7:0] - Reserved | ||
269 | */ | ||
270 | u8 pps20_reserved; | ||
271 | /** | ||
272 | * @initial_scale_value: | ||
273 | * PPS21[5:0] - Initial rcXformScale factor used at beginning | ||
274 | * of a slice. | ||
275 | * PPS21[7:6] - Reserved | ||
276 | */ | ||
277 | u8 initial_scale_value; | ||
278 | /** | ||
279 | * @scale_increment_interval: | ||
280 | * PPS22[7:0], PPS23[7:0] - Number of group times between incrementing | ||
281 | * the rcXformScale factor at end of a slice. | ||
282 | */ | ||
283 | __be16 scale_increment_interval; | ||
284 | /** | ||
285 | * @scale_decrement_interval_high: | ||
286 | * PPS24[3:0] - Higher 4 bits indicating number of group times between | ||
287 | * decrementing the rcXformScale factor at beginning of a slice. | ||
288 | * PPS24[7:4] - Reserved | ||
289 | */ | ||
290 | u8 scale_decrement_interval_high; | ||
291 | /** | ||
292 | * @scale_decrement_interval_low: | ||
293 | * PPS25[7:0] - Lower 8 bits of scale decrement interval | ||
294 | */ | ||
295 | u8 scale_decrement_interval_low; | ||
296 | /** | ||
297 | * @pps26_reserved: | ||
298 | * PPS26[7:0] | ||
299 | */ | ||
300 | u8 pps26_reserved; | ||
301 | /** | ||
302 | * @first_line_bpg_offset: | ||
303 | * PPS27[4:0] - Number of additional bits that are allocated | ||
304 | * for each group on first line of a slice. | ||
305 | * PPS27[7:5] - Reserved | ||
306 | */ | ||
307 | u8 first_line_bpg_offset; | ||
308 | /** | ||
309 | * @nfl_bpg_offset: | ||
310 | * PPS28[7:0], PPS29[7:0] - Number of bits including frac bits | ||
311 | * deallocated for each group for groups after the first line of slice. | ||
312 | */ | ||
313 | __be16 nfl_bpg_offset; | ||
314 | /** | ||
315 | * @slice_bpg_offset: | ||
316 | * PPS30, PPS31[7:0] - Number of bits that are deallocated for each | ||
317 | * group to enforce the slice constraint. | ||
318 | */ | ||
319 | __be16 slice_bpg_offset; | ||
320 | /** | ||
321 | * @initial_offset: | ||
322 | * PPS32,33[7:0] - Initial value for rcXformOffset | ||
323 | */ | ||
324 | __be16 initial_offset; | ||
325 | /** | ||
326 | * @final_offset: | ||
327 | * PPS34,35[7:0] - Maximum end-of-slice value for rcXformOffset | ||
328 | */ | ||
329 | __be16 final_offset; | ||
330 | /** | ||
331 | * @flatness_min_qp: | ||
332 | * PPS36[4:0] - Minimum QP at which flatness is signaled and | ||
333 | * flatness QP adjustment is made. | ||
334 | * PPS36[7:5] - Reserved | ||
335 | */ | ||
336 | u8 flatness_min_qp; | ||
337 | /** | ||
338 | * @flatness_max_qp: | ||
339 | * PPS37[4:0] - Max QP at which flatness is signalled and | ||
340 | * the flatness adjustment is made. | ||
341 | * PPS37[7:5] - Reserved | ||
342 | */ | ||
343 | u8 flatness_max_qp; | ||
344 | /** | ||
345 | * @rc_model_size: | ||
346 | * PPS38,39[7:0] - Number of bits within RC Model. | ||
347 | */ | ||
348 | __be16 rc_model_size; | ||
349 | /** | ||
350 | * @rc_edge_factor: | ||
351 | * PPS40[3:0] - Ratio of current activity vs, previous | ||
352 | * activity to determine presence of edge. | ||
353 | * PPS40[7:4] - Reserved | ||
354 | */ | ||
355 | u8 rc_edge_factor; | ||
356 | /** | ||
357 | * @rc_quant_incr_limit0: | ||
358 | * PPS41[4:0] - QP threshold used in short term RC | ||
359 | * PPS41[7:5] - Reserved | ||
360 | */ | ||
361 | u8 rc_quant_incr_limit0; | ||
362 | /** | ||
363 | * @rc_quant_incr_limit1: | ||
364 | * PPS42[4:0] - QP threshold used in short term RC | ||
365 | * PPS42[7:5] - Reserved | ||
366 | */ | ||
367 | u8 rc_quant_incr_limit1; | ||
368 | /** | ||
369 | * @rc_tgt_offset: | ||
370 | * PPS43[3:0] - Lower end of the variability range around the target | ||
371 | * bits per group that is allowed by short term RC. | ||
372 | * PPS43[7:4]- Upper end of the variability range around the target | ||
373 | * bits per group that i allowed by short term rc. | ||
374 | */ | ||
375 | u8 rc_tgt_offset; | ||
376 | /** | ||
377 | * @rc_buf_thresh: | ||
378 | * PPS44[7:0] - PPS57[7:0] - Specifies the thresholds in RC model for | ||
379 | * the 15 ranges defined by 14 thresholds. | ||
380 | */ | ||
381 | u8 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1]; | ||
382 | /** | ||
383 | * @rc_range_parameters: | ||
384 | * PPS58[7:0] - PPS87[7:0] | ||
385 | * Parameters that correspond to each of the 15 ranges. | ||
386 | */ | ||
387 | __be16 rc_range_parameters[DSC_NUM_BUF_RANGES]; | ||
388 | /** | ||
389 | * @native_422_420: | ||
390 | * PPS88[0] - 0 = Native 4:2:2 not used | ||
391 | * 1 = Native 4:2:2 used | ||
392 | * PPS88[1] - 0 = Native 4:2:0 not use | ||
393 | * 1 = Native 4:2:0 used | ||
394 | * PPS88[7:2] - Reserved 6 bits | ||
395 | */ | ||
396 | u8 native_422_420; | ||
397 | /** | ||
398 | * @second_line_bpg_offset: | ||
399 | * PPS89[4:0] - Additional bits/group budget for the | ||
400 | * second line of a slice in Native 4:2:0 mode. | ||
401 | * Set to 0 if DSC minor version is 1 or native420 is 0. | ||
402 | * PPS89[7:5] - Reserved | ||
403 | */ | ||
404 | u8 second_line_bpg_offset; | ||
405 | /** | ||
406 | * @nsl_bpg_offset: | ||
407 | * PPS90[7:0], PPS91[7:0] - Number of bits that are deallocated | ||
408 | * for each group that is not in the second line of a slice. | ||
409 | */ | ||
410 | __be16 nsl_bpg_offset; | ||
411 | /** | ||
412 | * @second_line_offset_adj: | ||
413 | * PPS92[7:0], PPS93[7:0] - Used as offset adjustment for the second | ||
414 | * line in Native 4:2:0 mode. | ||
415 | */ | ||
416 | __be16 second_line_offset_adj; | ||
417 | /** | ||
418 | * @pps_long_94_reserved: | ||
419 | * PPS 94, 95, 96, 97 - Reserved | ||
420 | */ | ||
421 | u32 pps_long_94_reserved; | ||
422 | /** | ||
423 | * @pps_long_98_reserved: | ||
424 | * PPS 98, 99, 100, 101 - Reserved | ||
425 | */ | ||
426 | u32 pps_long_98_reserved; | ||
427 | /** | ||
428 | * @pps_long_102_reserved: | ||
429 | * PPS 102, 103, 104, 105 - Reserved | ||
430 | */ | ||
431 | u32 pps_long_102_reserved; | ||
432 | /** | ||
433 | * @pps_long_106_reserved: | ||
434 | * PPS 106, 107, 108, 109 - reserved | ||
435 | */ | ||
436 | u32 pps_long_106_reserved; | ||
437 | /** | ||
438 | * @pps_long_110_reserved: | ||
439 | * PPS 110, 111, 112, 113 - reserved | ||
440 | */ | ||
441 | u32 pps_long_110_reserved; | ||
442 | /** | ||
443 | * @pps_long_114_reserved: | ||
444 | * PPS 114 - 117 - reserved | ||
445 | */ | ||
446 | u32 pps_long_114_reserved; | ||
447 | /** | ||
448 | * @pps_long_118_reserved: | ||
449 | * PPS 118 - 121 - reserved | ||
450 | */ | ||
451 | u32 pps_long_118_reserved; | ||
452 | /** | ||
453 | * @pps_long_122_reserved: | ||
454 | * PPS 122- 125 - reserved | ||
455 | */ | ||
456 | u32 pps_long_122_reserved; | ||
457 | /** | ||
458 | * @pps_short_126_reserved: | ||
459 | * PPS 126, 127 - reserved | ||
460 | */ | ||
461 | __be16 pps_short_126_reserved; | ||
462 | } __packed; | ||
463 | |||
464 | /** | ||
465 | * struct drm_dsc_pps_infoframe - DSC infoframe carrying the Picture Parameter | ||
466 | * Set Metadata | ||
467 | * | ||
468 | * This structure represents the DSC PPS infoframe required to send the Picture | ||
469 | * Parameter Set metadata required before enabling VESA Display Stream | ||
470 | * Compression. This is based on the DP Secondary Data Packet structure and | ||
471 | * comprises of SDP Header as defined in drm_dp_helper.h and PPS payload. | ||
472 | * | ||
473 | * @pps_header: Header for PPS as per DP SDP header format | ||
474 | * @pps_payload: PPS payload fields as per DSC specification Table 4-1 | ||
475 | */ | ||
476 | struct drm_dsc_pps_infoframe { | ||
477 | struct dp_sdp_header pps_header; | ||
478 | struct drm_dsc_picture_parameter_set pps_payload; | ||
479 | } __packed; | ||
480 | |||
481 | void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp); | ||
482 | void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp, | ||
483 | const struct drm_dsc_config *dsc_cfg); | ||
484 | |||
485 | #endif /* _DRM_DSC_H_ */ | ||
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 987cefa337de..786816cf4aa5 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h | |||
@@ -234,7 +234,7 @@ int __must_check sysfs_create_file_ns(struct kobject *kobj, | |||
234 | const struct attribute *attr, | 234 | const struct attribute *attr, |
235 | const void *ns); | 235 | const void *ns); |
236 | int __must_check sysfs_create_files(struct kobject *kobj, | 236 | int __must_check sysfs_create_files(struct kobject *kobj, |
237 | const struct attribute **attr); | 237 | const struct attribute * const *attr); |
238 | int __must_check sysfs_chmod_file(struct kobject *kobj, | 238 | int __must_check sysfs_chmod_file(struct kobject *kobj, |
239 | const struct attribute *attr, umode_t mode); | 239 | const struct attribute *attr, umode_t mode); |
240 | struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, | 240 | struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, |
@@ -243,7 +243,7 @@ void sysfs_unbreak_active_protection(struct kernfs_node *kn); | |||
243 | void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, | 243 | void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, |
244 | const void *ns); | 244 | const void *ns); |
245 | bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr); | 245 | bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr); |
246 | void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr); | 246 | void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr); |
247 | 247 | ||
248 | int __must_check sysfs_create_bin_file(struct kobject *kobj, | 248 | int __must_check sysfs_create_bin_file(struct kobject *kobj, |
249 | const struct bin_attribute *attr); | 249 | const struct bin_attribute *attr); |
@@ -342,7 +342,7 @@ static inline int sysfs_create_file_ns(struct kobject *kobj, | |||
342 | } | 342 | } |
343 | 343 | ||
344 | static inline int sysfs_create_files(struct kobject *kobj, | 344 | static inline int sysfs_create_files(struct kobject *kobj, |
345 | const struct attribute **attr) | 345 | const struct attribute * const *attr) |
346 | { | 346 | { |
347 | return 0; | 347 | return 0; |
348 | } | 348 | } |
@@ -377,7 +377,7 @@ static inline bool sysfs_remove_file_self(struct kobject *kobj, | |||
377 | } | 377 | } |
378 | 378 | ||
379 | static inline void sysfs_remove_files(struct kobject *kobj, | 379 | static inline void sysfs_remove_files(struct kobject *kobj, |
380 | const struct attribute **attr) | 380 | const struct attribute * const *attr) |
381 | { | 381 | { |
382 | } | 382 | } |
383 | 383 | ||