aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c300
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h37
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c28
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h33
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c50
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c20
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c125
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h54
-rw-r--r--drivers/gpu/drm/i915/i915_request.c14
-rw-r--r--drivers/gpu/drm/i915/i915_request.h8
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c13
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c53
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c7
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c632
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h45
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c3
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c6
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c80
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c57
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c147
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c19
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c23
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c101
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c494
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c202
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c5
39 files changed, 1686 insertions, 950 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1f7051e97afb..b4744a68cd88 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4117,6 +4117,17 @@ i915_ring_test_irq_set(void *data, u64 val)
4117{ 4117{
4118 struct drm_i915_private *i915 = data; 4118 struct drm_i915_private *i915 = data;
4119 4119
4120 /* GuC keeps the user interrupt permanently enabled for submission */
4121 if (USES_GUC_SUBMISSION(i915))
4122 return -ENODEV;
4123
4124 /*
4125 * From icl, we can no longer individually mask interrupt generation
4126 * from each engine.
4127 */
4128 if (INTEL_GEN(i915) >= 11)
4129 return -ENODEV;
4130
4120 val &= INTEL_INFO(i915)->ring_mask; 4131 val &= INTEL_INFO(i915)->ring_mask;
4121 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 4132 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4122 4133
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2ddf8538cb47..44e2c0f5ec50 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1063,6 +1063,300 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
1063 intel_gvt_sanitize_options(dev_priv); 1063 intel_gvt_sanitize_options(dev_priv);
1064} 1064}
1065 1065
1066static enum dram_rank skl_get_dimm_rank(u8 size, u32 rank)
1067{
1068 if (size == 0)
1069 return I915_DRAM_RANK_INVALID;
1070 if (rank == SKL_DRAM_RANK_SINGLE)
1071 return I915_DRAM_RANK_SINGLE;
1072 else if (rank == SKL_DRAM_RANK_DUAL)
1073 return I915_DRAM_RANK_DUAL;
1074
1075 return I915_DRAM_RANK_INVALID;
1076}
1077
1078static bool
1079skl_is_16gb_dimm(enum dram_rank rank, u8 size, u8 width)
1080{
1081 if (rank == I915_DRAM_RANK_SINGLE && width == 8 && size == 16)
1082 return true;
1083 else if (rank == I915_DRAM_RANK_DUAL && width == 8 && size == 32)
1084 return true;
1085 else if (rank == SKL_DRAM_RANK_SINGLE && width == 16 && size == 8)
1086 return true;
1087 else if (rank == SKL_DRAM_RANK_DUAL && width == 16 && size == 16)
1088 return true;
1089
1090 return false;
1091}
1092
1093static int
1094skl_dram_get_channel_info(struct dram_channel_info *ch, u32 val)
1095{
1096 u32 tmp_l, tmp_s;
1097 u32 s_val = val >> SKL_DRAM_S_SHIFT;
1098
1099 if (!val)
1100 return -EINVAL;
1101
1102 tmp_l = val & SKL_DRAM_SIZE_MASK;
1103 tmp_s = s_val & SKL_DRAM_SIZE_MASK;
1104
1105 if (tmp_l == 0 && tmp_s == 0)
1106 return -EINVAL;
1107
1108 ch->l_info.size = tmp_l;
1109 ch->s_info.size = tmp_s;
1110
1111 tmp_l = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
1112 tmp_s = (s_val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
1113 ch->l_info.width = (1 << tmp_l) * 8;
1114 ch->s_info.width = (1 << tmp_s) * 8;
1115
1116 tmp_l = val & SKL_DRAM_RANK_MASK;
1117 tmp_s = s_val & SKL_DRAM_RANK_MASK;
1118 ch->l_info.rank = skl_get_dimm_rank(ch->l_info.size, tmp_l);
1119 ch->s_info.rank = skl_get_dimm_rank(ch->s_info.size, tmp_s);
1120
1121 if (ch->l_info.rank == I915_DRAM_RANK_DUAL ||
1122 ch->s_info.rank == I915_DRAM_RANK_DUAL)
1123 ch->rank = I915_DRAM_RANK_DUAL;
1124 else if (ch->l_info.rank == I915_DRAM_RANK_SINGLE &&
1125 ch->s_info.rank == I915_DRAM_RANK_SINGLE)
1126 ch->rank = I915_DRAM_RANK_DUAL;
1127 else
1128 ch->rank = I915_DRAM_RANK_SINGLE;
1129
1130 ch->is_16gb_dimm = skl_is_16gb_dimm(ch->l_info.rank, ch->l_info.size,
1131 ch->l_info.width) ||
1132 skl_is_16gb_dimm(ch->s_info.rank, ch->s_info.size,
1133 ch->s_info.width);
1134
1135 DRM_DEBUG_KMS("(size:width:rank) L(%dGB:X%d:%s) S(%dGB:X%d:%s)\n",
1136 ch->l_info.size, ch->l_info.width,
1137 ch->l_info.rank ? "dual" : "single",
1138 ch->s_info.size, ch->s_info.width,
1139 ch->s_info.rank ? "dual" : "single");
1140
1141 return 0;
1142}
1143
1144static bool
1145intel_is_dram_symmetric(u32 val_ch0, u32 val_ch1,
1146 struct dram_channel_info *ch0)
1147{
1148 return (val_ch0 == val_ch1 &&
1149 (ch0->s_info.size == 0 ||
1150 (ch0->l_info.size == ch0->s_info.size &&
1151 ch0->l_info.width == ch0->s_info.width &&
1152 ch0->l_info.rank == ch0->s_info.rank)));
1153}
1154
1155static int
1156skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
1157{
1158 struct dram_info *dram_info = &dev_priv->dram_info;
1159 struct dram_channel_info ch0, ch1;
1160 u32 val_ch0, val_ch1;
1161 int ret;
1162
1163 val_ch0 = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
1164 ret = skl_dram_get_channel_info(&ch0, val_ch0);
1165 if (ret == 0)
1166 dram_info->num_channels++;
1167
1168 val_ch1 = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
1169 ret = skl_dram_get_channel_info(&ch1, val_ch1);
1170 if (ret == 0)
1171 dram_info->num_channels++;
1172
1173 if (dram_info->num_channels == 0) {
1174 DRM_INFO("Number of memory channels is zero\n");
1175 return -EINVAL;
1176 }
1177
1178 dram_info->valid_dimm = true;
1179
1180 /*
1181 * If any of the channel is single rank channel, worst case output
1182 * will be same as if single rank memory, so consider single rank
1183 * memory.
1184 */
1185 if (ch0.rank == I915_DRAM_RANK_SINGLE ||
1186 ch1.rank == I915_DRAM_RANK_SINGLE)
1187 dram_info->rank = I915_DRAM_RANK_SINGLE;
1188 else
1189 dram_info->rank = max(ch0.rank, ch1.rank);
1190
1191 if (dram_info->rank == I915_DRAM_RANK_INVALID) {
1192 DRM_INFO("couldn't get memory rank information\n");
1193 return -EINVAL;
1194 }
1195
1196 if (ch0.is_16gb_dimm || ch1.is_16gb_dimm)
1197 dram_info->is_16gb_dimm = true;
1198
1199 dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
1200 val_ch1,
1201 &ch0);
1202
1203 DRM_DEBUG_KMS("memory configuration is %sSymmetric memory\n",
1204 dev_priv->dram_info.symmetric_memory ? "" : "not ");
1205 return 0;
1206}
1207
1208static int
1209skl_get_dram_info(struct drm_i915_private *dev_priv)
1210{
1211 struct dram_info *dram_info = &dev_priv->dram_info;
1212 u32 mem_freq_khz, val;
1213 int ret;
1214
1215 ret = skl_dram_get_channels_info(dev_priv);
1216 if (ret)
1217 return ret;
1218
1219 val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
1220 mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
1221 SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
1222
1223 dram_info->bandwidth_kbps = dram_info->num_channels *
1224 mem_freq_khz * 8;
1225
1226 if (dram_info->bandwidth_kbps == 0) {
1227 DRM_INFO("Couldn't get system memory bandwidth\n");
1228 return -EINVAL;
1229 }
1230
1231 dram_info->valid = true;
1232 return 0;
1233}
1234
1235static int
1236bxt_get_dram_info(struct drm_i915_private *dev_priv)
1237{
1238 struct dram_info *dram_info = &dev_priv->dram_info;
1239 u32 dram_channels;
1240 u32 mem_freq_khz, val;
1241 u8 num_active_channels;
1242 int i;
1243
1244 val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
1245 mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
1246 BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
1247
1248 dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
1249 num_active_channels = hweight32(dram_channels);
1250
1251 /* Each active bit represents 4-byte channel */
1252 dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
1253
1254 if (dram_info->bandwidth_kbps == 0) {
1255 DRM_INFO("Couldn't get system memory bandwidth\n");
1256 return -EINVAL;
1257 }
1258
1259 /*
1260 * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
1261 */
1262 for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
1263 u8 size, width;
1264 enum dram_rank rank;
1265 u32 tmp;
1266
1267 val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
1268 if (val == 0xFFFFFFFF)
1269 continue;
1270
1271 dram_info->num_channels++;
1272 tmp = val & BXT_DRAM_RANK_MASK;
1273
1274 if (tmp == BXT_DRAM_RANK_SINGLE)
1275 rank = I915_DRAM_RANK_SINGLE;
1276 else if (tmp == BXT_DRAM_RANK_DUAL)
1277 rank = I915_DRAM_RANK_DUAL;
1278 else
1279 rank = I915_DRAM_RANK_INVALID;
1280
1281 tmp = val & BXT_DRAM_SIZE_MASK;
1282 if (tmp == BXT_DRAM_SIZE_4GB)
1283 size = 4;
1284 else if (tmp == BXT_DRAM_SIZE_6GB)
1285 size = 6;
1286 else if (tmp == BXT_DRAM_SIZE_8GB)
1287 size = 8;
1288 else if (tmp == BXT_DRAM_SIZE_12GB)
1289 size = 12;
1290 else if (tmp == BXT_DRAM_SIZE_16GB)
1291 size = 16;
1292 else
1293 size = 0;
1294
1295 tmp = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
1296 width = (1 << tmp) * 8;
1297 DRM_DEBUG_KMS("dram size:%dGB width:X%d rank:%s\n", size,
1298 width, rank == I915_DRAM_RANK_SINGLE ? "single" :
1299 rank == I915_DRAM_RANK_DUAL ? "dual" : "unknown");
1300
1301 /*
1302 * If any of the channel is single rank channel,
1303 * worst case output will be same as if single rank
1304 * memory, so consider single rank memory.
1305 */
1306 if (dram_info->rank == I915_DRAM_RANK_INVALID)
1307 dram_info->rank = rank;
1308 else if (rank == I915_DRAM_RANK_SINGLE)
1309 dram_info->rank = I915_DRAM_RANK_SINGLE;
1310 }
1311
1312 if (dram_info->rank == I915_DRAM_RANK_INVALID) {
1313 DRM_INFO("couldn't get memory rank information\n");
1314 return -EINVAL;
1315 }
1316
1317 dram_info->valid_dimm = true;
1318 dram_info->valid = true;
1319 return 0;
1320}
1321
1322static void
1323intel_get_dram_info(struct drm_i915_private *dev_priv)
1324{
1325 struct dram_info *dram_info = &dev_priv->dram_info;
1326 char bandwidth_str[32];
1327 int ret;
1328
1329 dram_info->valid = false;
1330 dram_info->valid_dimm = false;
1331 dram_info->is_16gb_dimm = false;
1332 dram_info->rank = I915_DRAM_RANK_INVALID;
1333 dram_info->bandwidth_kbps = 0;
1334 dram_info->num_channels = 0;
1335
1336 if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv))
1337 return;
1338
1339 /* Need to calculate bandwidth only for Gen9 */
1340 if (IS_BROXTON(dev_priv))
1341 ret = bxt_get_dram_info(dev_priv);
1342 else if (INTEL_GEN(dev_priv) == 9)
1343 ret = skl_get_dram_info(dev_priv);
1344 else
1345 ret = skl_dram_get_channels_info(dev_priv);
1346 if (ret)
1347 return;
1348
1349 if (dram_info->bandwidth_kbps)
1350 sprintf(bandwidth_str, "%d KBps", dram_info->bandwidth_kbps);
1351 else
1352 sprintf(bandwidth_str, "unknown");
1353 DRM_DEBUG_KMS("DRAM bandwidth:%s, total-channels: %u\n",
1354 bandwidth_str, dram_info->num_channels);
1355 DRM_DEBUG_KMS("DRAM rank: %s rank 16GB-dimm:%s\n",
1356 (dram_info->rank == I915_DRAM_RANK_DUAL) ?
1357 "dual" : "single", yesno(dram_info->is_16gb_dimm));
1358}
1359
1066/** 1360/**
1067 * i915_driver_init_hw - setup state requiring device access 1361 * i915_driver_init_hw - setup state requiring device access
1068 * @dev_priv: device private 1362 * @dev_priv: device private
@@ -1180,6 +1474,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1180 goto err_msi; 1474 goto err_msi;
1181 1475
1182 intel_opregion_setup(dev_priv); 1476 intel_opregion_setup(dev_priv);
1477 /*
1478 * Fill the dram structure to get the system raw bandwidth and
1479 * dram info. This will be used for memory latency calculation.
1480 */
1481 intel_get_dram_info(dev_priv);
1482
1183 1483
1184 return 0; 1484 return 0;
1185 1485
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7ea442033a57..8624b4bdc242 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -87,8 +87,8 @@
87 87
88#define DRIVER_NAME "i915" 88#define DRIVER_NAME "i915"
89#define DRIVER_DESC "Intel Graphics" 89#define DRIVER_DESC "Intel Graphics"
90#define DRIVER_DATE "20180906" 90#define DRIVER_DATE "20180921"
91#define DRIVER_TIMESTAMP 1536242083 91#define DRIVER_TIMESTAMP 1537521997
92 92
93/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 93/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
94 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 94 * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -1946,6 +1946,20 @@ struct drm_i915_private {
1946 bool distrust_bios_wm; 1946 bool distrust_bios_wm;
1947 } wm; 1947 } wm;
1948 1948
1949 struct dram_info {
1950 bool valid;
1951 bool valid_dimm;
1952 bool is_16gb_dimm;
1953 u8 num_channels;
1954 enum dram_rank {
1955 I915_DRAM_RANK_INVALID = 0,
1956 I915_DRAM_RANK_SINGLE,
1957 I915_DRAM_RANK_DUAL
1958 } rank;
1959 u32 bandwidth_kbps;
1960 bool symmetric_memory;
1961 } dram_info;
1962
1949 struct i915_runtime_pm runtime_pm; 1963 struct i915_runtime_pm runtime_pm;
1950 1964
1951 struct { 1965 struct {
@@ -2159,6 +2173,15 @@ struct drm_i915_private {
2159 */ 2173 */
2160}; 2174};
2161 2175
2176struct dram_channel_info {
2177 struct info {
2178 u8 size, width;
2179 enum dram_rank rank;
2180 } l_info, s_info;
2181 enum dram_rank rank;
2182 bool is_16gb_dimm;
2183};
2184
2162static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 2185static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
2163{ 2186{
2164 return container_of(dev, struct drm_i915_private, drm); 2187 return container_of(dev, struct drm_i915_private, drm);
@@ -2284,7 +2307,7 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2284#define for_each_sgt_dma(__dmap, __iter, __sgt) \ 2307#define for_each_sgt_dma(__dmap, __iter, __sgt) \
2285 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ 2308 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
2286 ((__dmap) = (__iter).dma + (__iter).curr); \ 2309 ((__dmap) = (__iter).dma + (__iter).curr); \
2287 (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ 2310 (((__iter).curr += I915_GTT_PAGE_SIZE) >= (__iter).max) ? \
2288 (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) 2311 (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
2289 2312
2290/** 2313/**
@@ -3074,6 +3097,12 @@ enum i915_map_type {
3074 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE, 3097 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
3075}; 3098};
3076 3099
3100static inline enum i915_map_type
3101i915_coherent_map_type(struct drm_i915_private *i915)
3102{
3103 return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
3104}
3105
3077/** 3106/**
3078 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 3107 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
3079 * @obj: the object to map into kernel address space 3108 * @obj: the object to map into kernel address space
@@ -3311,7 +3340,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
3311void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 3340void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
3312 struct drm_mm_node *node); 3341 struct drm_mm_node *node);
3313int i915_gem_init_stolen(struct drm_i915_private *dev_priv); 3342int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
3314void i915_gem_cleanup_stolen(struct drm_device *dev); 3343void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv);
3315struct drm_i915_gem_object * 3344struct drm_i915_gem_object *
3316i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, 3345i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
3317 resource_size_t size); 3346 resource_size_t size);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 89834ce19acd..db9688d14912 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2506,7 +2506,9 @@ static bool i915_sg_trim(struct sg_table *orig_st)
2506 new_sg = new_st.sgl; 2506 new_sg = new_st.sgl;
2507 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) { 2507 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
2508 sg_set_page(new_sg, sg_page(sg), sg->length, 0); 2508 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
2509 /* called before being DMA mapped, no need to copy sg->dma_* */ 2509 sg_dma_address(new_sg) = sg_dma_address(sg);
2510 sg_dma_len(new_sg) = sg_dma_len(sg);
2511
2510 new_sg = sg_next(new_sg); 2512 new_sg = sg_next(new_sg);
2511 } 2513 }
2512 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */ 2514 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
@@ -3438,6 +3440,9 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
3438 i915_retire_requests(i915); 3440 i915_retire_requests(i915);
3439 GEM_BUG_ON(i915->gt.active_requests); 3441 GEM_BUG_ON(i915->gt.active_requests);
3440 3442
3443 if (!intel_gpu_reset(i915, ALL_ENGINES))
3444 intel_engines_sanitize(i915);
3445
3441 /* 3446 /*
3442 * Undo nop_submit_request. We prevent all new i915 requests from 3447 * Undo nop_submit_request. We prevent all new i915 requests from
3443 * being queued (by disallowing execbuf whilst wedged) so having 3448 * being queued (by disallowing execbuf whilst wedged) so having
@@ -5414,8 +5419,19 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
5414 5419
5415 assert_kernel_context_is_current(i915); 5420 assert_kernel_context_is_current(i915);
5416 5421
5422 /*
5423 * Immediately park the GPU so that we enable powersaving and
5424 * treat it as idle. The next time we issue a request, we will
5425 * unpark and start using the engine->pinned_default_state, otherwise
5426 * it is in limbo and an early reset may fail.
5427 */
5428 __i915_gem_park(i915);
5429
5417 for_each_engine(engine, i915, id) { 5430 for_each_engine(engine, i915, id) {
5418 struct i915_vma *state; 5431 struct i915_vma *state;
5432 void *vaddr;
5433
5434 GEM_BUG_ON(to_intel_context(ctx, engine)->pin_count);
5419 5435
5420 state = to_intel_context(ctx, engine)->state; 5436 state = to_intel_context(ctx, engine)->state;
5421 if (!state) 5437 if (!state)
@@ -5438,6 +5454,16 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
5438 goto err_active; 5454 goto err_active;
5439 5455
5440 engine->default_state = i915_gem_object_get(state->obj); 5456 engine->default_state = i915_gem_object_get(state->obj);
5457
5458 /* Check we can acquire the image of the context state */
5459 vaddr = i915_gem_object_pin_map(engine->default_state,
5460 I915_MAP_FORCE_WB);
5461 if (IS_ERR(vaddr)) {
5462 err = PTR_ERR(vaddr);
5463 goto err_active;
5464 }
5465
5466 i915_gem_object_unpin_map(engine->default_state);
5441 } 5467 }
5442 5468
5443 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) { 5469 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 747b8170a15a..f772593b99ab 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -862,7 +862,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
862 ret = -EINVAL; 862 ret = -EINVAL;
863 break; 863 break;
864 case I915_CONTEXT_PARAM_NO_ZEROMAP: 864 case I915_CONTEXT_PARAM_NO_ZEROMAP:
865 args->value = ctx->flags & CONTEXT_NO_ZEROMAP; 865 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
866 break; 866 break;
867 case I915_CONTEXT_PARAM_GTT_SIZE: 867 case I915_CONTEXT_PARAM_GTT_SIZE:
868 if (ctx->ppgtt) 868 if (ctx->ppgtt)
@@ -896,27 +896,23 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
896 struct drm_i915_file_private *file_priv = file->driver_priv; 896 struct drm_i915_file_private *file_priv = file->driver_priv;
897 struct drm_i915_gem_context_param *args = data; 897 struct drm_i915_gem_context_param *args = data;
898 struct i915_gem_context *ctx; 898 struct i915_gem_context *ctx;
899 int ret; 899 int ret = 0;
900 900
901 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 901 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
902 if (!ctx) 902 if (!ctx)
903 return -ENOENT; 903 return -ENOENT;
904 904
905 ret = i915_mutex_lock_interruptible(dev);
906 if (ret)
907 goto out;
908
909 switch (args->param) { 905 switch (args->param) {
910 case I915_CONTEXT_PARAM_BAN_PERIOD: 906 case I915_CONTEXT_PARAM_BAN_PERIOD:
911 ret = -EINVAL; 907 ret = -EINVAL;
912 break; 908 break;
913 case I915_CONTEXT_PARAM_NO_ZEROMAP: 909 case I915_CONTEXT_PARAM_NO_ZEROMAP:
914 if (args->size) { 910 if (args->size)
915 ret = -EINVAL; 911 ret = -EINVAL;
916 } else { 912 else if (args->value)
917 ctx->flags &= ~CONTEXT_NO_ZEROMAP; 913 set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
918 ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0; 914 else
919 } 915 clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
920 break; 916 break;
921 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 917 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
922 if (args->size) 918 if (args->size)
@@ -960,9 +956,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
960 ret = -EINVAL; 956 ret = -EINVAL;
961 break; 957 break;
962 } 958 }
963 mutex_unlock(&dev->struct_mutex);
964 959
965out:
966 i915_gem_context_put(ctx); 960 i915_gem_context_put(ctx);
967 return ret; 961 return ret;
968} 962}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index e09673ca731d..08165f6a0a84 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -117,15 +117,20 @@ struct i915_gem_context {
117 struct rcu_head rcu; 117 struct rcu_head rcu;
118 118
119 /** 119 /**
120 * @user_flags: small set of booleans controlled by the user
121 */
122 unsigned long user_flags;
123#define UCONTEXT_NO_ZEROMAP 0
124#define UCONTEXT_NO_ERROR_CAPTURE 1
125#define UCONTEXT_BANNABLE 2
126
127 /**
120 * @flags: small set of booleans 128 * @flags: small set of booleans
121 */ 129 */
122 unsigned long flags; 130 unsigned long flags;
123#define CONTEXT_NO_ZEROMAP BIT(0) 131#define CONTEXT_BANNED 0
124#define CONTEXT_NO_ERROR_CAPTURE 1 132#define CONTEXT_CLOSED 1
125#define CONTEXT_CLOSED 2 133#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
126#define CONTEXT_BANNABLE 3
127#define CONTEXT_BANNED 4
128#define CONTEXT_FORCE_SINGLE_SUBMISSION 5
129 134
130 /** 135 /**
131 * @hw_id: - unique identifier for the context 136 * @hw_id: - unique identifier for the context
@@ -209,37 +214,37 @@ static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx
209static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx) 214static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
210{ 215{
211 GEM_BUG_ON(i915_gem_context_is_closed(ctx)); 216 GEM_BUG_ON(i915_gem_context_is_closed(ctx));
212 __set_bit(CONTEXT_CLOSED, &ctx->flags); 217 set_bit(CONTEXT_CLOSED, &ctx->flags);
213} 218}
214 219
215static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx) 220static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
216{ 221{
217 return test_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags); 222 return test_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
218} 223}
219 224
220static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx) 225static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
221{ 226{
222 __set_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags); 227 set_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
223} 228}
224 229
225static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx) 230static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
226{ 231{
227 __clear_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags); 232 clear_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
228} 233}
229 234
230static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx) 235static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
231{ 236{
232 return test_bit(CONTEXT_BANNABLE, &ctx->flags); 237 return test_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
233} 238}
234 239
235static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx) 240static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
236{ 241{
237 __set_bit(CONTEXT_BANNABLE, &ctx->flags); 242 set_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
238} 243}
239 244
240static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx) 245static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
241{ 246{
242 __clear_bit(CONTEXT_BANNABLE, &ctx->flags); 247 clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
243} 248}
244 249
245static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx) 250static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
@@ -249,7 +254,7 @@ static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx
249 254
250static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx) 255static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
251{ 256{
252 __set_bit(CONTEXT_BANNED, &ctx->flags); 257 set_bit(CONTEXT_BANNED, &ctx->flags);
253} 258}
254 259
255static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx) 260static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 22b4cb775576..09187286d346 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -693,9 +693,14 @@ static int eb_reserve(struct i915_execbuffer *eb)
693 eb_unreserve_vma(vma, &eb->flags[i]); 693 eb_unreserve_vma(vma, &eb->flags[i]);
694 694
695 if (flags & EXEC_OBJECT_PINNED) 695 if (flags & EXEC_OBJECT_PINNED)
696 /* Pinned must have their slot */
696 list_add(&vma->exec_link, &eb->unbound); 697 list_add(&vma->exec_link, &eb->unbound);
697 else if (flags & __EXEC_OBJECT_NEEDS_MAP) 698 else if (flags & __EXEC_OBJECT_NEEDS_MAP)
699 /* Map require the lowest 256MiB (aperture) */
698 list_add_tail(&vma->exec_link, &eb->unbound); 700 list_add_tail(&vma->exec_link, &eb->unbound);
701 else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
702 /* Prioritise 4GiB region for restricted bo */
703 list_add(&vma->exec_link, &last);
699 else 704 else
700 list_add_tail(&vma->exec_link, &last); 705 list_add_tail(&vma->exec_link, &last);
701 } 706 }
@@ -743,7 +748,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
743 } 748 }
744 749
745 eb->context_flags = 0; 750 eb->context_flags = 0;
746 if (ctx->flags & CONTEXT_NO_ZEROMAP) 751 if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
747 eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS; 752 eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;
748 753
749 return 0; 754 return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index eb0e446d6482..56c7f8637311 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1050,7 +1050,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
1050 do { 1050 do {
1051 vaddr[idx->pte] = pte_encode | iter->dma; 1051 vaddr[idx->pte] = pte_encode | iter->dma;
1052 1052
1053 iter->dma += PAGE_SIZE; 1053 iter->dma += I915_GTT_PAGE_SIZE;
1054 if (iter->dma >= iter->max) { 1054 if (iter->dma >= iter->max) {
1055 iter->sg = __sg_next(iter->sg); 1055 iter->sg = __sg_next(iter->sg);
1056 if (!iter->sg) { 1056 if (!iter->sg) {
@@ -1144,7 +1144,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
1144 vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K && 1144 vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
1145 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && 1145 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1146 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || 1146 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1147 rem >= (max - index) << PAGE_SHIFT)) 1147 rem >= (max - index) * I915_GTT_PAGE_SIZE))
1148 maybe_64K = true; 1148 maybe_64K = true;
1149 1149
1150 vaddr = kmap_atomic_px(pt); 1150 vaddr = kmap_atomic_px(pt);
@@ -1169,7 +1169,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
1169 if (maybe_64K && index < max && 1169 if (maybe_64K && index < max &&
1170 !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && 1170 !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1171 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || 1171 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1172 rem >= (max - index) << PAGE_SHIFT))) 1172 rem >= (max - index) * I915_GTT_PAGE_SIZE)))
1173 maybe_64K = false; 1173 maybe_64K = false;
1174 1174
1175 if (unlikely(!IS_ALIGNED(iter->dma, page_size))) 1175 if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
@@ -1759,7 +1759,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
1759 1759
1760 seq_printf(m, "\t\t(%03d, %04d) %08lx: ", 1760 seq_printf(m, "\t\t(%03d, %04d) %08lx: ",
1761 pde, pte, 1761 pde, pte,
1762 (pde * GEN6_PTES + pte) * PAGE_SIZE); 1762 (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
1763 for (i = 0; i < 4; i++) { 1763 for (i = 0; i < 4; i++) {
1764 if (vaddr[pte + i] != scratch_pte) 1764 if (vaddr[pte + i] != scratch_pte)
1765 seq_printf(m, " %08x", vaddr[pte + i]); 1765 seq_printf(m, " %08x", vaddr[pte + i]);
@@ -1842,10 +1842,10 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1842 u64 start, u64 length) 1842 u64 start, u64 length)
1843{ 1843{
1844 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); 1844 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1845 unsigned int first_entry = start >> PAGE_SHIFT; 1845 unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
1846 unsigned int pde = first_entry / GEN6_PTES; 1846 unsigned int pde = first_entry / GEN6_PTES;
1847 unsigned int pte = first_entry % GEN6_PTES; 1847 unsigned int pte = first_entry % GEN6_PTES;
1848 unsigned int num_entries = length >> PAGE_SHIFT; 1848 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
1849 const gen6_pte_t scratch_pte = ppgtt->scratch_pte; 1849 const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
1850 1850
1851 while (num_entries) { 1851 while (num_entries) {
@@ -1886,7 +1886,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1886 u32 flags) 1886 u32 flags)
1887{ 1887{
1888 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1888 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1889 unsigned first_entry = vma->node.start >> PAGE_SHIFT; 1889 unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
1890 unsigned act_pt = first_entry / GEN6_PTES; 1890 unsigned act_pt = first_entry / GEN6_PTES;
1891 unsigned act_pte = first_entry % GEN6_PTES; 1891 unsigned act_pte = first_entry % GEN6_PTES;
1892 const u32 pte_encode = vm->pte_encode(0, cache_level, flags); 1892 const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
@@ -1899,7 +1899,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1899 do { 1899 do {
1900 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); 1900 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
1901 1901
1902 iter.dma += PAGE_SIZE; 1902 iter.dma += I915_GTT_PAGE_SIZE;
1903 if (iter.dma == iter.max) { 1903 if (iter.dma == iter.max) {
1904 iter.sg = __sg_next(iter.sg); 1904 iter.sg = __sg_next(iter.sg);
1905 if (!iter.sg) 1905 if (!iter.sg)
@@ -2037,7 +2037,7 @@ static int pd_vma_bind(struct i915_vma *vma,
2037{ 2037{
2038 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm); 2038 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
2039 struct gen6_hw_ppgtt *ppgtt = vma->private; 2039 struct gen6_hw_ppgtt *ppgtt = vma->private;
2040 u32 ggtt_offset = i915_ggtt_offset(vma) / PAGE_SIZE; 2040 u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
2041 struct i915_page_table *pt; 2041 struct i915_page_table *pt;
2042 unsigned int pde; 2042 unsigned int pde;
2043 2043
@@ -2163,7 +2163,7 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
2163 ppgtt->base.vm.i915 = i915; 2163 ppgtt->base.vm.i915 = i915;
2164 ppgtt->base.vm.dma = &i915->drm.pdev->dev; 2164 ppgtt->base.vm.dma = &i915->drm.pdev->dev;
2165 2165
2166 ppgtt->base.vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE; 2166 ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE;
2167 2167
2168 i915_address_space_init(&ppgtt->base.vm, i915); 2168 i915_address_space_init(&ppgtt->base.vm, i915);
2169 2169
@@ -2456,7 +2456,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2456{ 2456{
2457 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2457 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2458 gen8_pte_t __iomem *pte = 2458 gen8_pte_t __iomem *pte =
2459 (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT); 2459 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2460 2460
2461 gen8_set_pte(pte, gen8_pte_encode(addr, level, 0)); 2461 gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
2462 2462
@@ -2480,7 +2480,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2480 */ 2480 */
2481 2481
2482 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; 2482 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
2483 gtt_entries += vma->node.start >> PAGE_SHIFT; 2483 gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
2484 for_each_sgt_dma(addr, sgt_iter, vma->pages) 2484 for_each_sgt_dma(addr, sgt_iter, vma->pages)
2485 gen8_set_pte(gtt_entries++, pte_encode | addr); 2485 gen8_set_pte(gtt_entries++, pte_encode | addr);
2486 2486
@@ -2499,7 +2499,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2499{ 2499{
2500 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2500 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2501 gen6_pte_t __iomem *pte = 2501 gen6_pte_t __iomem *pte =
2502 (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT); 2502 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2503 2503
2504 iowrite32(vm->pte_encode(addr, level, flags), pte); 2504 iowrite32(vm->pte_encode(addr, level, flags), pte);
2505 2505
@@ -2519,7 +2519,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2519{ 2519{
2520 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2520 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2521 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; 2521 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
2522 unsigned int i = vma->node.start >> PAGE_SHIFT; 2522 unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
2523 struct sgt_iter iter; 2523 struct sgt_iter iter;
2524 dma_addr_t addr; 2524 dma_addr_t addr;
2525 for_each_sgt_dma(addr, iter, vma->pages) 2525 for_each_sgt_dma(addr, iter, vma->pages)
@@ -2541,8 +2541,8 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2541 u64 start, u64 length) 2541 u64 start, u64 length)
2542{ 2542{
2543 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2543 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2544 unsigned first_entry = start >> PAGE_SHIFT; 2544 unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2545 unsigned num_entries = length >> PAGE_SHIFT; 2545 unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2546 const gen8_pte_t scratch_pte = 2546 const gen8_pte_t scratch_pte =
2547 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); 2547 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
2548 gen8_pte_t __iomem *gtt_base = 2548 gen8_pte_t __iomem *gtt_base =
@@ -2657,8 +2657,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2657 u64 start, u64 length) 2657 u64 start, u64 length)
2658{ 2658{
2659 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2659 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2660 unsigned first_entry = start >> PAGE_SHIFT; 2660 unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2661 unsigned num_entries = length >> PAGE_SHIFT; 2661 unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2662 gen6_pte_t scratch_pte, __iomem *gtt_base = 2662 gen6_pte_t scratch_pte, __iomem *gtt_base =
2663 (gen6_pte_t __iomem *)ggtt->gsm + first_entry; 2663 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2664 const int max_entries = ggtt_total_entries(ggtt) - first_entry; 2664 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -3005,7 +3005,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
3005 arch_phys_wc_del(ggtt->mtrr); 3005 arch_phys_wc_del(ggtt->mtrr);
3006 io_mapping_fini(&ggtt->iomap); 3006 io_mapping_fini(&ggtt->iomap);
3007 3007
3008 i915_gem_cleanup_stolen(&dev_priv->drm); 3008 i915_gem_cleanup_stolen(dev_priv);
3009} 3009}
3010 3010
3011static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) 3011static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -3023,7 +3023,7 @@ static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
3023 bdw_gmch_ctl = 1 << bdw_gmch_ctl; 3023 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
3024 3024
3025#ifdef CONFIG_X86_32 3025#ifdef CONFIG_X86_32
3026 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */ 3026 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
3027 if (bdw_gmch_ctl > 4) 3027 if (bdw_gmch_ctl > 4)
3028 bdw_gmch_ctl = 4; 3028 bdw_gmch_ctl = 4;
3029#endif 3029#endif
@@ -3398,7 +3398,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3398 else 3398 else
3399 size = gen8_get_total_gtt_size(snb_gmch_ctl); 3399 size = gen8_get_total_gtt_size(snb_gmch_ctl);
3400 3400
3401 ggtt->vm.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT; 3401 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
3402 ggtt->vm.cleanup = gen6_gmch_remove; 3402 ggtt->vm.cleanup = gen6_gmch_remove;
3403 ggtt->vm.insert_page = gen8_ggtt_insert_page; 3403 ggtt->vm.insert_page = gen8_ggtt_insert_page;
3404 ggtt->vm.clear_range = nop_clear_range; 3404 ggtt->vm.clear_range = nop_clear_range;
@@ -3456,7 +3456,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
3456 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 3456 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3457 3457
3458 size = gen6_get_total_gtt_size(snb_gmch_ctl); 3458 size = gen6_get_total_gtt_size(snb_gmch_ctl);
3459 ggtt->vm.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT; 3459 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
3460 3460
3461 ggtt->vm.clear_range = gen6_ggtt_clear_range; 3461 ggtt->vm.clear_range = gen6_ggtt_clear_range;
3462 ggtt->vm.insert_page = gen6_ggtt_insert_page; 3462 ggtt->vm.insert_page = gen6_ggtt_insert_page;
@@ -3727,9 +3727,9 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
3727 * the entries so the sg list can be happily traversed. 3727 * the entries so the sg list can be happily traversed.
3728 * The only thing we need are DMA addresses. 3728 * The only thing we need are DMA addresses.
3729 */ 3729 */
3730 sg_set_page(sg, NULL, PAGE_SIZE, 0); 3730 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
3731 sg_dma_address(sg) = in[offset + src_idx]; 3731 sg_dma_address(sg) = in[offset + src_idx];
3732 sg_dma_len(sg) = PAGE_SIZE; 3732 sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
3733 sg = sg_next(sg); 3733 sg = sg_next(sg);
3734 src_idx -= stride; 3734 src_idx -= stride;
3735 } 3735 }
@@ -3742,7 +3742,7 @@ static noinline struct sg_table *
3742intel_rotate_pages(struct intel_rotation_info *rot_info, 3742intel_rotate_pages(struct intel_rotation_info *rot_info,
3743 struct drm_i915_gem_object *obj) 3743 struct drm_i915_gem_object *obj)
3744{ 3744{
3745 const unsigned long n_pages = obj->base.size / PAGE_SIZE; 3745 const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE;
3746 unsigned int size = intel_rotation_info_size(rot_info); 3746 unsigned int size = intel_rotation_info_size(rot_info);
3747 struct sgt_iter sgt_iter; 3747 struct sgt_iter sgt_iter;
3748 dma_addr_t dma_addr; 3748 dma_addr_t dma_addr;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 53440bf87650..f29a7ff7c362 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -167,10 +167,8 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
167 return 0; 167 return 0;
168} 168}
169 169
170void i915_gem_cleanup_stolen(struct drm_device *dev) 170void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
171{ 171{
172 struct drm_i915_private *dev_priv = to_i915(dev);
173
174 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 172 if (!drm_mm_initialized(&dev_priv->mm.stolen))
175 return; 173 return;
176 174
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index f7f2aa71d8d9..2835cacd0d08 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1365,15 +1365,20 @@ static void request_record_user_bo(struct i915_request *request,
1365{ 1365{
1366 struct i915_capture_list *c; 1366 struct i915_capture_list *c;
1367 struct drm_i915_error_object **bo; 1367 struct drm_i915_error_object **bo;
1368 long count; 1368 long count, max;
1369 1369
1370 count = 0; 1370 max = 0;
1371 for (c = request->capture_list; c; c = c->next) 1371 for (c = request->capture_list; c; c = c->next)
1372 count++; 1372 max++;
1373 if (!max)
1374 return;
1373 1375
1374 bo = NULL; 1376 bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC);
1375 if (count) 1377 if (!bo) {
1376 bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC); 1378 /* If we can't capture everything, try to capture something. */
1379 max = min_t(long, max, PAGE_SIZE / sizeof(*bo));
1380 bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC);
1381 }
1377 if (!bo) 1382 if (!bo)
1378 return; 1383 return;
1379 1384
@@ -1382,7 +1387,8 @@ static void request_record_user_bo(struct i915_request *request,
1382 bo[count] = i915_error_object_create(request->i915, c->vma); 1387 bo[count] = i915_error_object_create(request->i915, c->vma);
1383 if (!bo[count]) 1388 if (!bo[count])
1384 break; 1389 break;
1385 count++; 1390 if (++count == max)
1391 break;
1386 } 1392 }
1387 1393
1388 ee->user_bo = bo; 1394 ee->user_bo = bo;
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index ccb20230df2c..664b96bb65a3 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1680,107 +1680,6 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
1680} 1680}
1681 1681
1682/* 1682/*
1683 * Same as gen8_update_reg_state_unlocked only through the batchbuffer. This
1684 * is only used by the kernel context.
1685 */
1686static int gen8_emit_oa_config(struct i915_request *rq,
1687 const struct i915_oa_config *oa_config)
1688{
1689 struct drm_i915_private *dev_priv = rq->i915;
1690 /* The MMIO offsets for Flex EU registers aren't contiguous */
1691 u32 flex_mmio[] = {
1692 i915_mmio_reg_offset(EU_PERF_CNTL0),
1693 i915_mmio_reg_offset(EU_PERF_CNTL1),
1694 i915_mmio_reg_offset(EU_PERF_CNTL2),
1695 i915_mmio_reg_offset(EU_PERF_CNTL3),
1696 i915_mmio_reg_offset(EU_PERF_CNTL4),
1697 i915_mmio_reg_offset(EU_PERF_CNTL5),
1698 i915_mmio_reg_offset(EU_PERF_CNTL6),
1699 };
1700 u32 *cs;
1701 int i;
1702
1703 cs = intel_ring_begin(rq, ARRAY_SIZE(flex_mmio) * 2 + 4);
1704 if (IS_ERR(cs))
1705 return PTR_ERR(cs);
1706
1707 *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
1708
1709 *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
1710 *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
1711 (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
1712 GEN8_OA_COUNTER_RESUME;
1713
1714 for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
1715 u32 mmio = flex_mmio[i];
1716
1717 /*
1718 * This arbitrary default will select the 'EU FPU0 Pipeline
1719 * Active' event. In the future it's anticipated that there
1720 * will be an explicit 'No Event' we can select, but not
1721 * yet...
1722 */
1723 u32 value = 0;
1724
1725 if (oa_config) {
1726 u32 j;
1727
1728 for (j = 0; j < oa_config->flex_regs_len; j++) {
1729 if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
1730 value = oa_config->flex_regs[j].value;
1731 break;
1732 }
1733 }
1734 }
1735
1736 *cs++ = mmio;
1737 *cs++ = value;
1738 }
1739
1740 *cs++ = MI_NOOP;
1741 intel_ring_advance(rq, cs);
1742
1743 return 0;
1744}
1745
1746static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_priv,
1747 const struct i915_oa_config *oa_config)
1748{
1749 struct intel_engine_cs *engine = dev_priv->engine[RCS];
1750 struct i915_timeline *timeline;
1751 struct i915_request *rq;
1752 int ret;
1753
1754 lockdep_assert_held(&dev_priv->drm.struct_mutex);
1755
1756 i915_retire_requests(dev_priv);
1757
1758 rq = i915_request_alloc(engine, dev_priv->kernel_context);
1759 if (IS_ERR(rq))
1760 return PTR_ERR(rq);
1761
1762 ret = gen8_emit_oa_config(rq, oa_config);
1763 if (ret) {
1764 i915_request_add(rq);
1765 return ret;
1766 }
1767
1768 /* Queue this switch after all other activity */
1769 list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
1770 struct i915_request *prev;
1771
1772 prev = i915_gem_active_raw(&timeline->last_request,
1773 &dev_priv->drm.struct_mutex);
1774 if (prev)
1775 i915_request_await_dma_fence(rq, &prev->fence);
1776 }
1777
1778 i915_request_add(rq);
1779
1780 return 0;
1781}
1782
1783/*
1784 * Manages updating the per-context aspects of the OA stream 1683 * Manages updating the per-context aspects of the OA stream
1785 * configuration across all contexts. 1684 * configuration across all contexts.
1786 * 1685 *
@@ -1808,17 +1707,13 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
1808 const struct i915_oa_config *oa_config) 1707 const struct i915_oa_config *oa_config)
1809{ 1708{
1810 struct intel_engine_cs *engine = dev_priv->engine[RCS]; 1709 struct intel_engine_cs *engine = dev_priv->engine[RCS];
1710 unsigned int map_type = i915_coherent_map_type(dev_priv);
1811 struct i915_gem_context *ctx; 1711 struct i915_gem_context *ctx;
1712 struct i915_request *rq;
1812 int ret; 1713 int ret;
1813 unsigned int wait_flags = I915_WAIT_LOCKED;
1814 1714
1815 lockdep_assert_held(&dev_priv->drm.struct_mutex); 1715 lockdep_assert_held(&dev_priv->drm.struct_mutex);
1816 1716
1817 /* Switch away from any user context. */
1818 ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
1819 if (ret)
1820 return ret;
1821
1822 /* 1717 /*
1823 * The OA register config is setup through the context image. This image 1718 * The OA register config is setup through the context image. This image
1824 * might be written to by the GPU on context switch (in particular on 1719 * might be written to by the GPU on context switch (in particular on
@@ -1833,7 +1728,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
1833 * the GPU from any submitted work. 1728 * the GPU from any submitted work.
1834 */ 1729 */
1835 ret = i915_gem_wait_for_idle(dev_priv, 1730 ret = i915_gem_wait_for_idle(dev_priv,
1836 wait_flags, 1731 I915_WAIT_LOCKED,
1837 MAX_SCHEDULE_TIMEOUT); 1732 MAX_SCHEDULE_TIMEOUT);
1838 if (ret) 1733 if (ret)
1839 return ret; 1734 return ret;
@@ -1847,7 +1742,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
1847 if (!ce->state) 1742 if (!ce->state)
1848 continue; 1743 continue;
1849 1744
1850 regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB); 1745 regs = i915_gem_object_pin_map(ce->state->obj, map_type);
1851 if (IS_ERR(regs)) 1746 if (IS_ERR(regs))
1852 return PTR_ERR(regs); 1747 return PTR_ERR(regs);
1853 1748
@@ -1859,7 +1754,17 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
1859 i915_gem_object_unpin_map(ce->state->obj); 1754 i915_gem_object_unpin_map(ce->state->obj);
1860 } 1755 }
1861 1756
1862 return ret; 1757 /*
1758 * Apply the configuration by doing one context restore of the edited
1759 * context image.
1760 */
1761 rq = i915_request_alloc(engine, dev_priv->kernel_context);
1762 if (IS_ERR(rq))
1763 return PTR_ERR(rq);
1764
1765 i915_request_add(rq);
1766
1767 return 0;
1863} 1768}
1864 1769
1865static int gen8_enable_metric_set(struct drm_i915_private *dev_priv, 1770static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 09bc8e730ee1..4948b352bf4c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -9583,6 +9583,54 @@ enum skl_power_gate {
9583#define DC_STATE_DEBUG_MASK_CORES (1 << 0) 9583#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
9584#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1) 9584#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
9585 9585
9586#define BXT_P_CR_MC_BIOS_REQ_0_0_0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7114)
9587#define BXT_REQ_DATA_MASK 0x3F
9588#define BXT_DRAM_CHANNEL_ACTIVE_SHIFT 12
9589#define BXT_DRAM_CHANNEL_ACTIVE_MASK (0xF << 12)
9590#define BXT_MEMORY_FREQ_MULTIPLIER_HZ 133333333
9591
9592#define BXT_D_CR_DRP0_DUNIT8 0x1000
9593#define BXT_D_CR_DRP0_DUNIT9 0x1200
9594#define BXT_D_CR_DRP0_DUNIT_START 8
9595#define BXT_D_CR_DRP0_DUNIT_END 11
9596#define BXT_D_CR_DRP0_DUNIT(x) _MMIO(MCHBAR_MIRROR_BASE_SNB + \
9597 _PICK_EVEN((x) - 8, BXT_D_CR_DRP0_DUNIT8,\
9598 BXT_D_CR_DRP0_DUNIT9))
9599#define BXT_DRAM_RANK_MASK 0x3
9600#define BXT_DRAM_RANK_SINGLE 0x1
9601#define BXT_DRAM_RANK_DUAL 0x3
9602#define BXT_DRAM_WIDTH_MASK (0x3 << 4)
9603#define BXT_DRAM_WIDTH_SHIFT 4
9604#define BXT_DRAM_WIDTH_X8 (0x0 << 4)
9605#define BXT_DRAM_WIDTH_X16 (0x1 << 4)
9606#define BXT_DRAM_WIDTH_X32 (0x2 << 4)
9607#define BXT_DRAM_WIDTH_X64 (0x3 << 4)
9608#define BXT_DRAM_SIZE_MASK (0x7 << 6)
9609#define BXT_DRAM_SIZE_SHIFT 6
9610#define BXT_DRAM_SIZE_4GB (0x0 << 6)
9611#define BXT_DRAM_SIZE_6GB (0x1 << 6)
9612#define BXT_DRAM_SIZE_8GB (0x2 << 6)
9613#define BXT_DRAM_SIZE_12GB (0x3 << 6)
9614#define BXT_DRAM_SIZE_16GB (0x4 << 6)
9615
9616#define SKL_MEMORY_FREQ_MULTIPLIER_HZ 266666666
9617#define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04)
9618#define SKL_REQ_DATA_MASK (0xF << 0)
9619
9620#define SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
9621#define SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5010)
9622#define SKL_DRAM_S_SHIFT 16
9623#define SKL_DRAM_SIZE_MASK 0x3F
9624#define SKL_DRAM_WIDTH_MASK (0x3 << 8)
9625#define SKL_DRAM_WIDTH_SHIFT 8
9626#define SKL_DRAM_WIDTH_X8 (0x0 << 8)
9627#define SKL_DRAM_WIDTH_X16 (0x1 << 8)
9628#define SKL_DRAM_WIDTH_X32 (0x2 << 8)
9629#define SKL_DRAM_RANK_MASK (0x1 << 10)
9630#define SKL_DRAM_RANK_SHIFT 10
9631#define SKL_DRAM_RANK_SINGLE (0x0 << 10)
9632#define SKL_DRAM_RANK_DUAL (0x1 << 10)
9633
9586/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, 9634/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
9587 * since on HSW we can't write to it using I915_WRITE. */ 9635 * since on HSW we can't write to it using I915_WRITE. */
9588#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C) 9636#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
@@ -10231,6 +10279,12 @@ enum skl_power_gate {
10231#define PREPARE_COUNT_SHIFT 0 10279#define PREPARE_COUNT_SHIFT 0
10232#define PREPARE_COUNT_MASK (0x3f << 0) 10280#define PREPARE_COUNT_MASK (0x3f << 0)
10233 10281
10282#define _ICL_DSI_T_INIT_MASTER_0 0x6b088
10283#define _ICL_DSI_T_INIT_MASTER_1 0x6b888
10284#define ICL_DSI_T_INIT_MASTER(port) _MMIO_PORT(port, \
10285 _ICL_DSI_T_INIT_MASTER_0,\
10286 _ICL_DSI_T_INIT_MASTER_1)
10287
10234/* bits 31:0 */ 10288/* bits 31:0 */
10235#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084) 10289#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
10236#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884) 10290#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 09ed48833b54..a492385b2089 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -732,13 +732,13 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
732 rq = kmem_cache_alloc(i915->requests, 732 rq = kmem_cache_alloc(i915->requests,
733 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 733 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
734 if (unlikely(!rq)) { 734 if (unlikely(!rq)) {
735 i915_retire_requests(i915);
736
735 /* Ratelimit ourselves to prevent oom from malicious clients */ 737 /* Ratelimit ourselves to prevent oom from malicious clients */
736 ret = i915_gem_wait_for_idle(i915, 738 rq = i915_gem_active_raw(&ce->ring->timeline->last_request,
737 I915_WAIT_LOCKED | 739 &i915->drm.struct_mutex);
738 I915_WAIT_INTERRUPTIBLE, 740 if (rq)
739 MAX_SCHEDULE_TIMEOUT); 741 cond_synchronize_rcu(rq->rcustate);
740 if (ret)
741 goto err_unreserve;
742 742
743 /* 743 /*
744 * We've forced the client to stall and catch up with whatever 744 * We've forced the client to stall and catch up with whatever
@@ -758,6 +758,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
758 } 758 }
759 } 759 }
760 760
761 rq->rcustate = get_state_synchronize_rcu();
762
761 INIT_LIST_HEAD(&rq->active_list); 763 INIT_LIST_HEAD(&rq->active_list);
762 rq->i915 = i915; 764 rq->i915 = i915;
763 rq->engine = engine; 765 rq->engine = engine;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 9898301ab7ef..7fa94b024968 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -101,6 +101,14 @@ struct i915_request {
101 struct intel_signal_node signaling; 101 struct intel_signal_node signaling;
102 102
103 /* 103 /*
104 * The rcu epoch of when this request was allocated. Used to judiciously
105 * apply backpressure on future allocations to ensure that under
106 * mempressure there is sufficient RCU ticks for us to reclaim our
107 * RCU protected slabs.
108 */
109 unsigned long rcustate;
110
111 /*
104 * Fences for the various phases in the request's lifetime. 112 * Fences for the various phases in the request's lifetime.
105 * 113 *
106 * The submit fence is used to await upon all of the request's 114 * The submit fence is used to await upon all of the request's
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 1de5173e53a2..6dbeed079ae5 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -24,13 +24,13 @@ enum {
24 DEBUG_FENCE_NOTIFY, 24 DEBUG_FENCE_NOTIFY,
25}; 25};
26 26
27#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
28
29static void *i915_sw_fence_debug_hint(void *addr) 27static void *i915_sw_fence_debug_hint(void *addr)
30{ 28{
31 return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK); 29 return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK);
32} 30}
33 31
32#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
33
34static struct debug_obj_descr i915_sw_fence_debug_descr = { 34static struct debug_obj_descr i915_sw_fence_debug_descr = {
35 .name = "i915_sw_fence", 35 .name = "i915_sw_fence",
36 .debug_hint = i915_sw_fence_debug_hint, 36 .debug_hint = i915_sw_fence_debug_hint,
@@ -393,10 +393,11 @@ static void timer_i915_sw_fence_wake(struct timer_list *t)
393 if (!fence) 393 if (!fence)
394 return; 394 return;
395 395
396 pr_warn("asynchronous wait on fence %s:%s:%x timed out\n", 396 pr_notice("Asynchronous wait on fence %s:%s:%x timed out (hint:%pS)\n",
397 cb->dma->ops->get_driver_name(cb->dma), 397 cb->dma->ops->get_driver_name(cb->dma),
398 cb->dma->ops->get_timeline_name(cb->dma), 398 cb->dma->ops->get_timeline_name(cb->dma),
399 cb->dma->seqno); 399 cb->dma->seqno,
400 i915_sw_fence_debug_hint(fence));
400 401
401 i915_sw_fence_complete(fence); 402 i915_sw_fence_complete(fence);
402} 403}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index fa7df5fe154b..aabebe0d2e9b 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -113,71 +113,18 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
113 struct intel_plane_state *intel_state) 113 struct intel_plane_state *intel_state)
114{ 114{
115 struct drm_plane *plane = intel_state->base.plane; 115 struct drm_plane *plane = intel_state->base.plane;
116 struct drm_i915_private *dev_priv = to_i915(plane->dev);
117 struct drm_plane_state *state = &intel_state->base; 116 struct drm_plane_state *state = &intel_state->base;
118 struct intel_plane *intel_plane = to_intel_plane(plane); 117 struct intel_plane *intel_plane = to_intel_plane(plane);
119 const struct drm_display_mode *adjusted_mode =
120 &crtc_state->base.adjusted_mode;
121 int ret; 118 int ret;
122 119
123 if (!intel_state->base.crtc && !old_plane_state->base.crtc) 120 if (!intel_state->base.crtc && !old_plane_state->base.crtc)
124 return 0; 121 return 0;
125 122
126 if (state->fb && drm_rotation_90_or_270(state->rotation)) {
127 struct drm_format_name_buf format_name;
128
129 if (state->fb->modifier != I915_FORMAT_MOD_Y_TILED &&
130 state->fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
131 DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
132 return -EINVAL;
133 }
134
135 /*
136 * 90/270 is not allowed with RGB64 16:16:16:16,
137 * RGB 16-bit 5:6:5, and Indexed 8-bit.
138 * TBD: Add RGB64 case once its added in supported format list.
139 */
140 switch (state->fb->format->format) {
141 case DRM_FORMAT_C8:
142 case DRM_FORMAT_RGB565:
143 DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
144 drm_get_format_name(state->fb->format->format,
145 &format_name));
146 return -EINVAL;
147
148 default:
149 break;
150 }
151 }
152
153 /* CHV ignores the mirror bit when the rotate bit is set :( */
154 if (IS_CHERRYVIEW(dev_priv) &&
155 state->rotation & DRM_MODE_ROTATE_180 &&
156 state->rotation & DRM_MODE_REFLECT_X) {
157 DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
158 return -EINVAL;
159 }
160
161 intel_state->base.visible = false; 123 intel_state->base.visible = false;
162 ret = intel_plane->check_plane(crtc_state, intel_state); 124 ret = intel_plane->check_plane(crtc_state, intel_state);
163 if (ret) 125 if (ret)
164 return ret; 126 return ret;
165 127
166 /*
167 * Y-tiling is not supported in IF-ID Interlace mode in
168 * GEN9 and above.
169 */
170 if (state->fb && INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
171 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
172 if (state->fb->modifier == I915_FORMAT_MOD_Y_TILED ||
173 state->fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
174 state->fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
175 state->fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
176 DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
177 return -EINVAL;
178 }
179 }
180
181 /* FIXME pre-g4x don't work like this */ 128 /* FIXME pre-g4x don't work like this */
182 if (state->visible) 129 if (state->visible)
183 crtc_state->active_planes |= BIT(intel_plane->id); 130 crtc_state->active_planes |= BIT(intel_plane->id);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 14cf4c367e36..4aa8f3d6b64c 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -34,6 +34,9 @@
34 * low-power state and comes back to normal. 34 * low-power state and comes back to normal.
35 */ 35 */
36 36
37#define I915_CSR_ICL "i915/icl_dmc_ver1_07.bin"
38#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
39
37#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin" 40#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
38MODULE_FIRMWARE(I915_CSR_GLK); 41MODULE_FIRMWARE(I915_CSR_GLK);
39#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) 42#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
@@ -304,6 +307,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
304 if (csr->fw_path == i915_modparams.dmc_firmware_path) { 307 if (csr->fw_path == i915_modparams.dmc_firmware_path) {
305 /* Bypass version check for firmware override. */ 308 /* Bypass version check for firmware override. */
306 required_version = csr->version; 309 required_version = csr->version;
310 } else if (IS_ICELAKE(dev_priv)) {
311 required_version = ICL_CSR_VERSION_REQUIRED;
307 } else if (IS_CANNONLAKE(dev_priv)) { 312 } else if (IS_CANNONLAKE(dev_priv)) {
308 required_version = CNL_CSR_VERSION_REQUIRED; 313 required_version = CNL_CSR_VERSION_REQUIRED;
309 } else if (IS_GEMINILAKE(dev_priv)) { 314 } else if (IS_GEMINILAKE(dev_priv)) {
@@ -471,6 +476,8 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
471 476
472 if (i915_modparams.dmc_firmware_path) 477 if (i915_modparams.dmc_firmware_path)
473 csr->fw_path = i915_modparams.dmc_firmware_path; 478 csr->fw_path = i915_modparams.dmc_firmware_path;
479 else if (IS_ICELAKE(dev_priv))
480 csr->fw_path = I915_CSR_ICL;
474 else if (IS_CANNONLAKE(dev_priv)) 481 else if (IS_CANNONLAKE(dev_priv))
475 csr->fw_path = I915_CSR_CNL; 482 csr->fw_path = I915_CSR_CNL;
476 else if (IS_GEMINILAKE(dev_priv)) 483 else if (IS_GEMINILAKE(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index cd01a09c5e0f..b6910c8b4e08 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2077,7 +2077,7 @@ out:
2077static inline enum intel_display_power_domain 2077static inline enum intel_display_power_domain
2078intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp) 2078intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
2079{ 2079{
2080 /* CNL HW requires corresponding AUX IOs to be powered up for PSR with 2080 /* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with
2081 * DC states enabled at the same time, while for driver initiated AUX 2081 * DC states enabled at the same time, while for driver initiated AUX
2082 * transfers we need the same AUX IOs to be powered but with DC states 2082 * transfers we need the same AUX IOs to be powered but with DC states
2083 * disabled. Accordingly use the AUX power domain here which leaves DC 2083 * disabled. Accordingly use the AUX power domain here which leaves DC
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5711cb701760..fbcc56caffb6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1917,10 +1917,10 @@ static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1917} 1917}
1918 1918
1919static unsigned int 1919static unsigned int
1920intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane) 1920intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1921{ 1921{
1922 struct drm_i915_private *dev_priv = to_i915(fb->dev); 1922 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1923 unsigned int cpp = fb->format->cpp[plane]; 1923 unsigned int cpp = fb->format->cpp[color_plane];
1924 1924
1925 switch (fb->modifier) { 1925 switch (fb->modifier) {
1926 case DRM_FORMAT_MOD_LINEAR: 1926 case DRM_FORMAT_MOD_LINEAR:
@@ -1931,7 +1931,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
1931 else 1931 else
1932 return 512; 1932 return 512;
1933 case I915_FORMAT_MOD_Y_TILED_CCS: 1933 case I915_FORMAT_MOD_Y_TILED_CCS:
1934 if (plane == 1) 1934 if (color_plane == 1)
1935 return 128; 1935 return 128;
1936 /* fall through */ 1936 /* fall through */
1937 case I915_FORMAT_MOD_Y_TILED: 1937 case I915_FORMAT_MOD_Y_TILED:
@@ -1940,7 +1940,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
1940 else 1940 else
1941 return 512; 1941 return 512;
1942 case I915_FORMAT_MOD_Yf_TILED_CCS: 1942 case I915_FORMAT_MOD_Yf_TILED_CCS:
1943 if (plane == 1) 1943 if (color_plane == 1)
1944 return 128; 1944 return 128;
1945 /* fall through */ 1945 /* fall through */
1946 case I915_FORMAT_MOD_Yf_TILED: 1946 case I915_FORMAT_MOD_Yf_TILED:
@@ -1965,22 +1965,22 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
1965} 1965}
1966 1966
1967static unsigned int 1967static unsigned int
1968intel_tile_height(const struct drm_framebuffer *fb, int plane) 1968intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1969{ 1969{
1970 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) 1970 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1971 return 1; 1971 return 1;
1972 else 1972 else
1973 return intel_tile_size(to_i915(fb->dev)) / 1973 return intel_tile_size(to_i915(fb->dev)) /
1974 intel_tile_width_bytes(fb, plane); 1974 intel_tile_width_bytes(fb, color_plane);
1975} 1975}
1976 1976
1977/* Return the tile dimensions in pixel units */ 1977/* Return the tile dimensions in pixel units */
1978static void intel_tile_dims(const struct drm_framebuffer *fb, int plane, 1978static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1979 unsigned int *tile_width, 1979 unsigned int *tile_width,
1980 unsigned int *tile_height) 1980 unsigned int *tile_height)
1981{ 1981{
1982 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, plane); 1982 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1983 unsigned int cpp = fb->format->cpp[plane]; 1983 unsigned int cpp = fb->format->cpp[color_plane];
1984 1984
1985 *tile_width = tile_width_bytes / cpp; 1985 *tile_width = tile_width_bytes / cpp;
1986 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes; 1986 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
@@ -1988,9 +1988,9 @@ static void intel_tile_dims(const struct drm_framebuffer *fb, int plane,
1988 1988
1989unsigned int 1989unsigned int
1990intel_fb_align_height(const struct drm_framebuffer *fb, 1990intel_fb_align_height(const struct drm_framebuffer *fb,
1991 int plane, unsigned int height) 1991 int color_plane, unsigned int height)
1992{ 1992{
1993 unsigned int tile_height = intel_tile_height(fb, plane); 1993 unsigned int tile_height = intel_tile_height(fb, color_plane);
1994 1994
1995 return ALIGN(height, tile_height); 1995 return ALIGN(height, tile_height);
1996} 1996}
@@ -2044,12 +2044,12 @@ static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_pr
2044} 2044}
2045 2045
2046static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, 2046static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2047 int plane) 2047 int color_plane)
2048{ 2048{
2049 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2049 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2050 2050
2051 /* AUX_DIST needs only 4K alignment */ 2051 /* AUX_DIST needs only 4K alignment */
2052 if (plane == 1) 2052 if (color_plane == 1)
2053 return 4096; 2053 return 4096;
2054 2054
2055 switch (fb->modifier) { 2055 switch (fb->modifier) {
@@ -2080,14 +2080,13 @@ static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2080 2080
2081struct i915_vma * 2081struct i915_vma *
2082intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 2082intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2083 unsigned int rotation, 2083 const struct i915_ggtt_view *view,
2084 bool uses_fence, 2084 bool uses_fence,
2085 unsigned long *out_flags) 2085 unsigned long *out_flags)
2086{ 2086{
2087 struct drm_device *dev = fb->dev; 2087 struct drm_device *dev = fb->dev;
2088 struct drm_i915_private *dev_priv = to_i915(dev); 2088 struct drm_i915_private *dev_priv = to_i915(dev);
2089 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2089 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2090 struct i915_ggtt_view view;
2091 struct i915_vma *vma; 2090 struct i915_vma *vma;
2092 unsigned int pinctl; 2091 unsigned int pinctl;
2093 u32 alignment; 2092 u32 alignment;
@@ -2096,8 +2095,6 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2096 2095
2097 alignment = intel_surf_alignment(fb, 0); 2096 alignment = intel_surf_alignment(fb, 0);
2098 2097
2099 intel_fill_fb_ggtt_view(&view, fb, rotation);
2100
2101 /* Note that the w/a also requires 64 PTE of padding following the 2098 /* Note that the w/a also requires 64 PTE of padding following the
2102 * bo. We currently fill all unused PTE with the shadow page and so 2099 * bo. We currently fill all unused PTE with the shadow page and so
2103 * we should always have valid PTE following the scanout preventing 2100 * we should always have valid PTE following the scanout preventing
@@ -2130,7 +2127,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2130 pinctl |= PIN_MAPPABLE; 2127 pinctl |= PIN_MAPPABLE;
2131 2128
2132 vma = i915_gem_object_pin_to_display_plane(obj, 2129 vma = i915_gem_object_pin_to_display_plane(obj,
2133 alignment, &view, pinctl); 2130 alignment, view, pinctl);
2134 if (IS_ERR(vma)) 2131 if (IS_ERR(vma))
2135 goto err; 2132 goto err;
2136 2133
@@ -2182,13 +2179,13 @@ void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2182 i915_vma_put(vma); 2179 i915_vma_put(vma);
2183} 2180}
2184 2181
2185static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane, 2182static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2186 unsigned int rotation) 2183 unsigned int rotation)
2187{ 2184{
2188 if (drm_rotation_90_or_270(rotation)) 2185 if (drm_rotation_90_or_270(rotation))
2189 return to_intel_framebuffer(fb)->rotated[plane].pitch; 2186 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2190 else 2187 else
2191 return fb->pitches[plane]; 2188 return fb->pitches[color_plane];
2192} 2189}
2193 2190
2194/* 2191/*
@@ -2199,11 +2196,11 @@ static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
2199 */ 2196 */
2200u32 intel_fb_xy_to_linear(int x, int y, 2197u32 intel_fb_xy_to_linear(int x, int y,
2201 const struct intel_plane_state *state, 2198 const struct intel_plane_state *state,
2202 int plane) 2199 int color_plane)
2203{ 2200{
2204 const struct drm_framebuffer *fb = state->base.fb; 2201 const struct drm_framebuffer *fb = state->base.fb;
2205 unsigned int cpp = fb->format->cpp[plane]; 2202 unsigned int cpp = fb->format->cpp[color_plane];
2206 unsigned int pitch = fb->pitches[plane]; 2203 unsigned int pitch = state->color_plane[color_plane].stride;
2207 2204
2208 return y * pitch + x * cpp; 2205 return y * pitch + x * cpp;
2209} 2206}
@@ -2215,28 +2212,28 @@ u32 intel_fb_xy_to_linear(int x, int y,
2215 */ 2212 */
2216void intel_add_fb_offsets(int *x, int *y, 2213void intel_add_fb_offsets(int *x, int *y,
2217 const struct intel_plane_state *state, 2214 const struct intel_plane_state *state,
2218 int plane) 2215 int color_plane)
2219 2216
2220{ 2217{
2221 const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb); 2218 const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2222 unsigned int rotation = state->base.rotation; 2219 unsigned int rotation = state->base.rotation;
2223 2220
2224 if (drm_rotation_90_or_270(rotation)) { 2221 if (drm_rotation_90_or_270(rotation)) {
2225 *x += intel_fb->rotated[plane].x; 2222 *x += intel_fb->rotated[color_plane].x;
2226 *y += intel_fb->rotated[plane].y; 2223 *y += intel_fb->rotated[color_plane].y;
2227 } else { 2224 } else {
2228 *x += intel_fb->normal[plane].x; 2225 *x += intel_fb->normal[color_plane].x;
2229 *y += intel_fb->normal[plane].y; 2226 *y += intel_fb->normal[color_plane].y;
2230 } 2227 }
2231} 2228}
2232 2229
2233static u32 __intel_adjust_tile_offset(int *x, int *y, 2230static u32 intel_adjust_tile_offset(int *x, int *y,
2234 unsigned int tile_width, 2231 unsigned int tile_width,
2235 unsigned int tile_height, 2232 unsigned int tile_height,
2236 unsigned int tile_size, 2233 unsigned int tile_size,
2237 unsigned int pitch_tiles, 2234 unsigned int pitch_tiles,
2238 u32 old_offset, 2235 u32 old_offset,
2239 u32 new_offset) 2236 u32 new_offset)
2240{ 2237{
2241 unsigned int pitch_pixels = pitch_tiles * tile_width; 2238 unsigned int pitch_pixels = pitch_tiles * tile_width;
2242 unsigned int tiles; 2239 unsigned int tiles;
@@ -2257,14 +2254,15 @@ static u32 __intel_adjust_tile_offset(int *x, int *y,
2257 return new_offset; 2254 return new_offset;
2258} 2255}
2259 2256
2260static u32 _intel_adjust_tile_offset(int *x, int *y, 2257static u32 intel_adjust_aligned_offset(int *x, int *y,
2261 const struct drm_framebuffer *fb, int plane, 2258 const struct drm_framebuffer *fb,
2262 unsigned int rotation, 2259 int color_plane,
2263 u32 old_offset, u32 new_offset) 2260 unsigned int rotation,
2261 unsigned int pitch,
2262 u32 old_offset, u32 new_offset)
2264{ 2263{
2265 const struct drm_i915_private *dev_priv = to_i915(fb->dev); 2264 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2266 unsigned int cpp = fb->format->cpp[plane]; 2265 unsigned int cpp = fb->format->cpp[color_plane];
2267 unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
2268 2266
2269 WARN_ON(new_offset > old_offset); 2267 WARN_ON(new_offset > old_offset);
2270 2268
@@ -2273,7 +2271,7 @@ static u32 _intel_adjust_tile_offset(int *x, int *y,
2273 unsigned int pitch_tiles; 2271 unsigned int pitch_tiles;
2274 2272
2275 tile_size = intel_tile_size(dev_priv); 2273 tile_size = intel_tile_size(dev_priv);
2276 intel_tile_dims(fb, plane, &tile_width, &tile_height); 2274 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2277 2275
2278 if (drm_rotation_90_or_270(rotation)) { 2276 if (drm_rotation_90_or_270(rotation)) {
2279 pitch_tiles = pitch / tile_height; 2277 pitch_tiles = pitch / tile_height;
@@ -2282,9 +2280,9 @@ static u32 _intel_adjust_tile_offset(int *x, int *y,
2282 pitch_tiles = pitch / (tile_width * cpp); 2280 pitch_tiles = pitch / (tile_width * cpp);
2283 } 2281 }
2284 2282
2285 __intel_adjust_tile_offset(x, y, tile_width, tile_height, 2283 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2286 tile_size, pitch_tiles, 2284 tile_size, pitch_tiles,
2287 old_offset, new_offset); 2285 old_offset, new_offset);
2288 } else { 2286 } else {
2289 old_offset += *y * pitch + *x * cpp; 2287 old_offset += *y * pitch + *x * cpp;
2290 2288
@@ -2299,17 +2297,19 @@ static u32 _intel_adjust_tile_offset(int *x, int *y,
2299 * Adjust the tile offset by moving the difference into 2297 * Adjust the tile offset by moving the difference into
2300 * the x/y offsets. 2298 * the x/y offsets.
2301 */ 2299 */
2302static u32 intel_adjust_tile_offset(int *x, int *y, 2300static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2303 const struct intel_plane_state *state, int plane, 2301 const struct intel_plane_state *state,
2304 u32 old_offset, u32 new_offset) 2302 int color_plane,
2305{ 2303 u32 old_offset, u32 new_offset)
2306 return _intel_adjust_tile_offset(x, y, state->base.fb, plane, 2304{
2307 state->base.rotation, 2305 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2308 old_offset, new_offset); 2306 state->base.rotation,
2307 state->color_plane[color_plane].stride,
2308 old_offset, new_offset);
2309} 2309}
2310 2310
2311/* 2311/*
2312 * Computes the linear offset to the base tile and adjusts 2312 * Computes the aligned offset to the base tile and adjusts
2313 * x, y. bytes per pixel is assumed to be a power-of-two. 2313 * x, y. bytes per pixel is assumed to be a power-of-two.
2314 * 2314 *
2315 * In the 90/270 rotated case, x and y are assumed 2315 * In the 90/270 rotated case, x and y are assumed
@@ -2322,15 +2322,16 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
2322 * used. This is why the user has to pass in the pitch since it 2322 * used. This is why the user has to pass in the pitch since it
2323 * is specified in the rotated orientation. 2323 * is specified in the rotated orientation.
2324 */ 2324 */
2325static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv, 2325static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2326 int *x, int *y, 2326 int *x, int *y,
2327 const struct drm_framebuffer *fb, int plane, 2327 const struct drm_framebuffer *fb,
2328 unsigned int pitch, 2328 int color_plane,
2329 unsigned int rotation, 2329 unsigned int pitch,
2330 u32 alignment) 2330 unsigned int rotation,
2331 u32 alignment)
2331{ 2332{
2332 uint64_t fb_modifier = fb->modifier; 2333 uint64_t fb_modifier = fb->modifier;
2333 unsigned int cpp = fb->format->cpp[plane]; 2334 unsigned int cpp = fb->format->cpp[color_plane];
2334 u32 offset, offset_aligned; 2335 u32 offset, offset_aligned;
2335 2336
2336 if (alignment) 2337 if (alignment)
@@ -2341,7 +2342,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
2341 unsigned int tile_rows, tiles, pitch_tiles; 2342 unsigned int tile_rows, tiles, pitch_tiles;
2342 2343
2343 tile_size = intel_tile_size(dev_priv); 2344 tile_size = intel_tile_size(dev_priv);
2344 intel_tile_dims(fb, plane, &tile_width, &tile_height); 2345 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2345 2346
2346 if (drm_rotation_90_or_270(rotation)) { 2347 if (drm_rotation_90_or_270(rotation)) {
2347 pitch_tiles = pitch / tile_height; 2348 pitch_tiles = pitch / tile_height;
@@ -2359,9 +2360,9 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
2359 offset = (tile_rows * pitch_tiles + tiles) * tile_size; 2360 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2360 offset_aligned = offset & ~alignment; 2361 offset_aligned = offset & ~alignment;
2361 2362
2362 __intel_adjust_tile_offset(x, y, tile_width, tile_height, 2363 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2363 tile_size, pitch_tiles, 2364 tile_size, pitch_tiles,
2364 offset, offset_aligned); 2365 offset, offset_aligned);
2365 } else { 2366 } else {
2366 offset = *y * pitch + *x * cpp; 2367 offset = *y * pitch + *x * cpp;
2367 offset_aligned = offset & ~alignment; 2368 offset_aligned = offset & ~alignment;
@@ -2373,42 +2374,44 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
2373 return offset_aligned; 2374 return offset_aligned;
2374} 2375}
2375 2376
2376u32 intel_compute_tile_offset(int *x, int *y, 2377static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2377 const struct intel_plane_state *state, 2378 const struct intel_plane_state *state,
2378 int plane) 2379 int color_plane)
2379{ 2380{
2380 struct intel_plane *intel_plane = to_intel_plane(state->base.plane); 2381 struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2381 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 2382 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2382 const struct drm_framebuffer *fb = state->base.fb; 2383 const struct drm_framebuffer *fb = state->base.fb;
2383 unsigned int rotation = state->base.rotation; 2384 unsigned int rotation = state->base.rotation;
2384 int pitch = intel_fb_pitch(fb, plane, rotation); 2385 int pitch = state->color_plane[color_plane].stride;
2385 u32 alignment; 2386 u32 alignment;
2386 2387
2387 if (intel_plane->id == PLANE_CURSOR) 2388 if (intel_plane->id == PLANE_CURSOR)
2388 alignment = intel_cursor_alignment(dev_priv); 2389 alignment = intel_cursor_alignment(dev_priv);
2389 else 2390 else
2390 alignment = intel_surf_alignment(fb, plane); 2391 alignment = intel_surf_alignment(fb, color_plane);
2391 2392
2392 return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch, 2393 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2393 rotation, alignment); 2394 pitch, rotation, alignment);
2394} 2395}
2395 2396
2396/* Convert the fb->offset[] into x/y offsets */ 2397/* Convert the fb->offset[] into x/y offsets */
2397static int intel_fb_offset_to_xy(int *x, int *y, 2398static int intel_fb_offset_to_xy(int *x, int *y,
2398 const struct drm_framebuffer *fb, int plane) 2399 const struct drm_framebuffer *fb,
2400 int color_plane)
2399{ 2401{
2400 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2402 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2401 2403
2402 if (fb->modifier != DRM_FORMAT_MOD_LINEAR && 2404 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2403 fb->offsets[plane] % intel_tile_size(dev_priv)) 2405 fb->offsets[color_plane] % intel_tile_size(dev_priv))
2404 return -EINVAL; 2406 return -EINVAL;
2405 2407
2406 *x = 0; 2408 *x = 0;
2407 *y = 0; 2409 *y = 0;
2408 2410
2409 _intel_adjust_tile_offset(x, y, 2411 intel_adjust_aligned_offset(x, y,
2410 fb, plane, DRM_MODE_ROTATE_0, 2412 fb, color_plane, DRM_MODE_ROTATE_0,
2411 fb->offsets[plane], 0); 2413 fb->pitches[color_plane],
2414 fb->offsets[color_plane], 0);
2412 2415
2413 return 0; 2416 return 0;
2414} 2417}
@@ -2565,9 +2568,10 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
2565 intel_fb->normal[i].x = x; 2568 intel_fb->normal[i].x = x;
2566 intel_fb->normal[i].y = y; 2569 intel_fb->normal[i].y = y;
2567 2570
2568 offset = _intel_compute_tile_offset(dev_priv, &x, &y, 2571 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2569 fb, i, fb->pitches[i], 2572 fb->pitches[i],
2570 DRM_MODE_ROTATE_0, tile_size); 2573 DRM_MODE_ROTATE_0,
2574 tile_size);
2571 offset /= tile_size; 2575 offset /= tile_size;
2572 2576
2573 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 2577 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
@@ -2614,10 +2618,10 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
2614 * We only keep the x/y offsets, so push all of the 2618 * We only keep the x/y offsets, so push all of the
2615 * gtt offset into the x/y offsets. 2619 * gtt offset into the x/y offsets.
2616 */ 2620 */
2617 __intel_adjust_tile_offset(&x, &y, 2621 intel_adjust_tile_offset(&x, &y,
2618 tile_width, tile_height, 2622 tile_width, tile_height,
2619 tile_size, pitch_tiles, 2623 tile_size, pitch_tiles,
2620 gtt_offset_rotated * tile_size, 0); 2624 gtt_offset_rotated * tile_size, 0);
2621 2625
2622 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; 2626 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2623 2627
@@ -2636,9 +2640,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
2636 max_size = max(max_size, offset + size); 2640 max_size = max(max_size, offset + size);
2637 } 2641 }
2638 2642
2639 if (max_size * tile_size > obj->base.size) { 2643 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2640 DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n", 2644 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2641 max_size * tile_size, obj->base.size); 2645 mul_u32_u32(max_size, tile_size), obj->base.size);
2642 return -EINVAL; 2646 return -EINVAL;
2643 } 2647 }
2644 2648
@@ -2853,10 +2857,15 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2853 return; 2857 return;
2854 2858
2855valid_fb: 2859valid_fb:
2860 intel_fill_fb_ggtt_view(&intel_state->view, fb,
2861 intel_state->base.rotation);
2862 intel_state->color_plane[0].stride =
2863 intel_fb_pitch(fb, 0, intel_state->base.rotation);
2864
2856 mutex_lock(&dev->struct_mutex); 2865 mutex_lock(&dev->struct_mutex);
2857 intel_state->vma = 2866 intel_state->vma =
2858 intel_pin_and_fence_fb_obj(fb, 2867 intel_pin_and_fence_fb_obj(fb,
2859 primary->state->rotation, 2868 &intel_state->view,
2860 intel_plane_uses_fence(intel_state), 2869 intel_plane_uses_fence(intel_state),
2861 &intel_state->flags); 2870 &intel_state->flags);
2862 mutex_unlock(&dev->struct_mutex); 2871 mutex_unlock(&dev->struct_mutex);
@@ -2899,10 +2908,11 @@ valid_fb:
2899 &obj->frontbuffer_bits); 2908 &obj->frontbuffer_bits);
2900} 2909}
2901 2910
2902static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane, 2911static int skl_max_plane_width(const struct drm_framebuffer *fb,
2912 int color_plane,
2903 unsigned int rotation) 2913 unsigned int rotation)
2904{ 2914{
2905 int cpp = fb->format->cpp[plane]; 2915 int cpp = fb->format->cpp[color_plane];
2906 2916
2907 switch (fb->modifier) { 2917 switch (fb->modifier) {
2908 case DRM_FORMAT_MOD_LINEAR: 2918 case DRM_FORMAT_MOD_LINEAR:
@@ -2950,9 +2960,9 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
2950 const struct drm_framebuffer *fb = plane_state->base.fb; 2960 const struct drm_framebuffer *fb = plane_state->base.fb;
2951 int hsub = fb->format->hsub; 2961 int hsub = fb->format->hsub;
2952 int vsub = fb->format->vsub; 2962 int vsub = fb->format->vsub;
2953 int aux_x = plane_state->aux.x; 2963 int aux_x = plane_state->color_plane[1].x;
2954 int aux_y = plane_state->aux.y; 2964 int aux_y = plane_state->color_plane[1].y;
2955 u32 aux_offset = plane_state->aux.offset; 2965 u32 aux_offset = plane_state->color_plane[1].offset;
2956 u32 alignment = intel_surf_alignment(fb, 1); 2966 u32 alignment = intel_surf_alignment(fb, 1);
2957 2967
2958 while (aux_offset >= main_offset && aux_y <= main_y) { 2968 while (aux_offset >= main_offset && aux_y <= main_y) {
@@ -2966,8 +2976,8 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
2966 2976
2967 x = aux_x / hsub; 2977 x = aux_x / hsub;
2968 y = aux_y / vsub; 2978 y = aux_y / vsub;
2969 aux_offset = intel_adjust_tile_offset(&x, &y, plane_state, 1, 2979 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
2970 aux_offset, aux_offset - alignment); 2980 aux_offset, aux_offset - alignment);
2971 aux_x = x * hsub + aux_x % hsub; 2981 aux_x = x * hsub + aux_x % hsub;
2972 aux_y = y * vsub + aux_y % vsub; 2982 aux_y = y * vsub + aux_y % vsub;
2973 } 2983 }
@@ -2975,30 +2985,24 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
2975 if (aux_x != main_x || aux_y != main_y) 2985 if (aux_x != main_x || aux_y != main_y)
2976 return false; 2986 return false;
2977 2987
2978 plane_state->aux.offset = aux_offset; 2988 plane_state->color_plane[1].offset = aux_offset;
2979 plane_state->aux.x = aux_x; 2989 plane_state->color_plane[1].x = aux_x;
2980 plane_state->aux.y = aux_y; 2990 plane_state->color_plane[1].y = aux_y;
2981 2991
2982 return true; 2992 return true;
2983} 2993}
2984 2994
2985static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, 2995static int skl_check_main_surface(struct intel_plane_state *plane_state)
2986 struct intel_plane_state *plane_state)
2987{ 2996{
2988 struct drm_i915_private *dev_priv =
2989 to_i915(plane_state->base.plane->dev);
2990 const struct drm_framebuffer *fb = plane_state->base.fb; 2997 const struct drm_framebuffer *fb = plane_state->base.fb;
2991 unsigned int rotation = plane_state->base.rotation; 2998 unsigned int rotation = plane_state->base.rotation;
2992 int x = plane_state->base.src.x1 >> 16; 2999 int x = plane_state->base.src.x1 >> 16;
2993 int y = plane_state->base.src.y1 >> 16; 3000 int y = plane_state->base.src.y1 >> 16;
2994 int w = drm_rect_width(&plane_state->base.src) >> 16; 3001 int w = drm_rect_width(&plane_state->base.src) >> 16;
2995 int h = drm_rect_height(&plane_state->base.src) >> 16; 3002 int h = drm_rect_height(&plane_state->base.src) >> 16;
2996 int dst_x = plane_state->base.dst.x1;
2997 int dst_w = drm_rect_width(&plane_state->base.dst);
2998 int pipe_src_w = crtc_state->pipe_src_w;
2999 int max_width = skl_max_plane_width(fb, 0, rotation); 3003 int max_width = skl_max_plane_width(fb, 0, rotation);
3000 int max_height = 4096; 3004 int max_height = 4096;
3001 u32 alignment, offset, aux_offset = plane_state->aux.offset; 3005 u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3002 3006
3003 if (w > max_width || h > max_height) { 3007 if (w > max_width || h > max_height) {
3004 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n", 3008 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
@@ -3006,26 +3010,8 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
3006 return -EINVAL; 3010 return -EINVAL;
3007 } 3011 }
3008 3012
3009 /*
3010 * Display WA #1175: cnl,glk
3011 * Planes other than the cursor may cause FIFO underflow and display
3012 * corruption if starting less than 4 pixels from the right edge of
3013 * the screen.
3014 * Besides the above WA fix the similar problem, where planes other
3015 * than the cursor ending less than 4 pixels from the left edge of the
3016 * screen may cause FIFO underflow and display corruption.
3017 */
3018 if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
3019 (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) {
3020 DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
3021 dst_x + dst_w < 4 ? "end" : "start",
3022 dst_x + dst_w < 4 ? dst_x + dst_w : dst_x,
3023 4, pipe_src_w - 4);
3024 return -ERANGE;
3025 }
3026
3027 intel_add_fb_offsets(&x, &y, plane_state, 0); 3013 intel_add_fb_offsets(&x, &y, plane_state, 0);
3028 offset = intel_compute_tile_offset(&x, &y, plane_state, 0); 3014 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3029 alignment = intel_surf_alignment(fb, 0); 3015 alignment = intel_surf_alignment(fb, 0);
3030 3016
3031 /* 3017 /*
@@ -3034,8 +3020,8 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
3034 * sure that is what we will get. 3020 * sure that is what we will get.
3035 */ 3021 */
3036 if (offset > aux_offset) 3022 if (offset > aux_offset)
3037 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0, 3023 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3038 offset, aux_offset & ~(alignment - 1)); 3024 offset, aux_offset & ~(alignment - 1));
3039 3025
3040 /* 3026 /*
3041 * When using an X-tiled surface, the plane blows up 3027 * When using an X-tiled surface, the plane blows up
@@ -3046,14 +3032,14 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
3046 if (fb->modifier == I915_FORMAT_MOD_X_TILED) { 3032 if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3047 int cpp = fb->format->cpp[0]; 3033 int cpp = fb->format->cpp[0];
3048 3034
3049 while ((x + w) * cpp > fb->pitches[0]) { 3035 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3050 if (offset == 0) { 3036 if (offset == 0) {
3051 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n"); 3037 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3052 return -EINVAL; 3038 return -EINVAL;
3053 } 3039 }
3054 3040
3055 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0, 3041 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3056 offset, offset - alignment); 3042 offset, offset - alignment);
3057 } 3043 }
3058 } 3044 }
3059 3045
@@ -3066,26 +3052,25 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
3066 if (offset == 0) 3052 if (offset == 0)
3067 break; 3053 break;
3068 3054
3069 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0, 3055 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3070 offset, offset - alignment); 3056 offset, offset - alignment);
3071 } 3057 }
3072 3058
3073 if (x != plane_state->aux.x || y != plane_state->aux.y) { 3059 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3074 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n"); 3060 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3075 return -EINVAL; 3061 return -EINVAL;
3076 } 3062 }
3077 } 3063 }
3078 3064
3079 plane_state->main.offset = offset; 3065 plane_state->color_plane[0].offset = offset;
3080 plane_state->main.x = x; 3066 plane_state->color_plane[0].x = x;
3081 plane_state->main.y = y; 3067 plane_state->color_plane[0].y = y;
3082 3068
3083 return 0; 3069 return 0;
3084} 3070}
3085 3071
3086static int 3072static int
3087skl_check_nv12_surface(const struct intel_crtc_state *crtc_state, 3073skl_check_nv12_surface(struct intel_plane_state *plane_state)
3088 struct intel_plane_state *plane_state)
3089{ 3074{
3090 /* Display WA #1106 */ 3075 /* Display WA #1106 */
3091 if (plane_state->base.rotation != 3076 if (plane_state->base.rotation !=
@@ -3119,7 +3104,7 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3119 u32 offset; 3104 u32 offset;
3120 3105
3121 intel_add_fb_offsets(&x, &y, plane_state, 1); 3106 intel_add_fb_offsets(&x, &y, plane_state, 1);
3122 offset = intel_compute_tile_offset(&x, &y, plane_state, 1); 3107 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3123 3108
3124 /* FIXME not quite sure how/if these apply to the chroma plane */ 3109 /* FIXME not quite sure how/if these apply to the chroma plane */
3125 if (w > max_width || h > max_height) { 3110 if (w > max_width || h > max_height) {
@@ -3128,9 +3113,9 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3128 return -EINVAL; 3113 return -EINVAL;
3129 } 3114 }
3130 3115
3131 plane_state->aux.offset = offset; 3116 plane_state->color_plane[1].offset = offset;
3132 plane_state->aux.x = x; 3117 plane_state->color_plane[1].x = x;
3133 plane_state->aux.y = y; 3118 plane_state->color_plane[1].y = y;
3134 3119
3135 return 0; 3120 return 0;
3136} 3121}
@@ -3146,34 +3131,25 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3146 int y = src_y / vsub; 3131 int y = src_y / vsub;
3147 u32 offset; 3132 u32 offset;
3148 3133
3149 if (plane_state->base.rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180)) {
3150 DRM_DEBUG_KMS("RC support only with 0/180 degree rotation %x\n",
3151 plane_state->base.rotation);
3152 return -EINVAL;
3153 }
3154
3155 intel_add_fb_offsets(&x, &y, plane_state, 1); 3134 intel_add_fb_offsets(&x, &y, plane_state, 1);
3156 offset = intel_compute_tile_offset(&x, &y, plane_state, 1); 3135 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3157 3136
3158 plane_state->aux.offset = offset; 3137 plane_state->color_plane[1].offset = offset;
3159 plane_state->aux.x = x * hsub + src_x % hsub; 3138 plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3160 plane_state->aux.y = y * vsub + src_y % vsub; 3139 plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3161 3140
3162 return 0; 3141 return 0;
3163} 3142}
3164 3143
3165int skl_check_plane_surface(const struct intel_crtc_state *crtc_state, 3144int skl_check_plane_surface(struct intel_plane_state *plane_state)
3166 struct intel_plane_state *plane_state)
3167{ 3145{
3168 const struct drm_framebuffer *fb = plane_state->base.fb; 3146 const struct drm_framebuffer *fb = plane_state->base.fb;
3169 unsigned int rotation = plane_state->base.rotation; 3147 unsigned int rotation = plane_state->base.rotation;
3170 int ret; 3148 int ret;
3171 3149
3172 if (rotation & DRM_MODE_REFLECT_X && 3150 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3173 fb->modifier == DRM_FORMAT_MOD_LINEAR) { 3151 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3174 DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n"); 3152 plane_state->color_plane[1].stride = intel_fb_pitch(fb, 1, rotation);
3175 return -EINVAL;
3176 }
3177 3153
3178 if (!plane_state->base.visible) 3154 if (!plane_state->base.visible)
3179 return 0; 3155 return 0;
@@ -3189,7 +3165,7 @@ int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
3189 * the main surface setup depends on it. 3165 * the main surface setup depends on it.
3190 */ 3166 */
3191 if (fb->format->format == DRM_FORMAT_NV12) { 3167 if (fb->format->format == DRM_FORMAT_NV12) {
3192 ret = skl_check_nv12_surface(crtc_state, plane_state); 3168 ret = skl_check_nv12_surface(plane_state);
3193 if (ret) 3169 if (ret)
3194 return ret; 3170 return ret;
3195 ret = skl_check_nv12_aux_surface(plane_state); 3171 ret = skl_check_nv12_aux_surface(plane_state);
@@ -3200,18 +3176,45 @@ int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
3200 if (ret) 3176 if (ret)
3201 return ret; 3177 return ret;
3202 } else { 3178 } else {
3203 plane_state->aux.offset = ~0xfff; 3179 plane_state->color_plane[1].offset = ~0xfff;
3204 plane_state->aux.x = 0; 3180 plane_state->color_plane[1].x = 0;
3205 plane_state->aux.y = 0; 3181 plane_state->color_plane[1].y = 0;
3206 } 3182 }
3207 3183
3208 ret = skl_check_main_surface(crtc_state, plane_state); 3184 ret = skl_check_main_surface(plane_state);
3209 if (ret) 3185 if (ret)
3210 return ret; 3186 return ret;
3211 3187
3212 return 0; 3188 return 0;
3213} 3189}
3214 3190
3191unsigned int
3192i9xx_plane_max_stride(struct intel_plane *plane,
3193 u32 pixel_format, u64 modifier,
3194 unsigned int rotation)
3195{
3196 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3197
3198 if (!HAS_GMCH_DISPLAY(dev_priv)) {
3199 return 32*1024;
3200 } else if (INTEL_GEN(dev_priv) >= 4) {
3201 if (modifier == I915_FORMAT_MOD_X_TILED)
3202 return 16*1024;
3203 else
3204 return 32*1024;
3205 } else if (INTEL_GEN(dev_priv) >= 3) {
3206 if (modifier == I915_FORMAT_MOD_X_TILED)
3207 return 8*1024;
3208 else
3209 return 16*1024;
3210 } else {
3211 if (plane->i9xx_plane == PLANE_C)
3212 return 4*1024;
3213 else
3214 return 8*1024;
3215 }
3216}
3217
3215static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, 3218static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3216 const struct intel_plane_state *plane_state) 3219 const struct intel_plane_state *plane_state)
3217{ 3220{
@@ -3278,21 +3281,25 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3278{ 3281{
3279 struct drm_i915_private *dev_priv = 3282 struct drm_i915_private *dev_priv =
3280 to_i915(plane_state->base.plane->dev); 3283 to_i915(plane_state->base.plane->dev);
3284 const struct drm_framebuffer *fb = plane_state->base.fb;
3285 unsigned int rotation = plane_state->base.rotation;
3281 int src_x = plane_state->base.src.x1 >> 16; 3286 int src_x = plane_state->base.src.x1 >> 16;
3282 int src_y = plane_state->base.src.y1 >> 16; 3287 int src_y = plane_state->base.src.y1 >> 16;
3283 u32 offset; 3288 u32 offset;
3284 3289
3290 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3291 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3292
3285 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 3293 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3286 3294
3287 if (INTEL_GEN(dev_priv) >= 4) 3295 if (INTEL_GEN(dev_priv) >= 4)
3288 offset = intel_compute_tile_offset(&src_x, &src_y, 3296 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3289 plane_state, 0); 3297 plane_state, 0);
3290 else 3298 else
3291 offset = 0; 3299 offset = 0;
3292 3300
3293 /* HSW/BDW do this automagically in hardware */ 3301 /* HSW/BDW do this automagically in hardware */
3294 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { 3302 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3295 unsigned int rotation = plane_state->base.rotation;
3296 int src_w = drm_rect_width(&plane_state->base.src) >> 16; 3303 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3297 int src_h = drm_rect_height(&plane_state->base.src) >> 16; 3304 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3298 3305
@@ -3304,9 +3311,43 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3304 } 3311 }
3305 } 3312 }
3306 3313
3307 plane_state->main.offset = offset; 3314 plane_state->color_plane[0].offset = offset;
3308 plane_state->main.x = src_x; 3315 plane_state->color_plane[0].x = src_x;
3309 plane_state->main.y = src_y; 3316 plane_state->color_plane[0].y = src_y;
3317
3318 return 0;
3319}
3320
3321static int
3322i9xx_plane_check(struct intel_crtc_state *crtc_state,
3323 struct intel_plane_state *plane_state)
3324{
3325 int ret;
3326
3327 ret = chv_plane_check_rotation(plane_state);
3328 if (ret)
3329 return ret;
3330
3331 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3332 &crtc_state->base,
3333 DRM_PLANE_HELPER_NO_SCALING,
3334 DRM_PLANE_HELPER_NO_SCALING,
3335 false, true);
3336 if (ret)
3337 return ret;
3338
3339 if (!plane_state->base.visible)
3340 return 0;
3341
3342 ret = intel_plane_check_src_coordinates(plane_state);
3343 if (ret)
3344 return ret;
3345
3346 ret = i9xx_check_plane_surface(plane_state);
3347 if (ret)
3348 return ret;
3349
3350 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3310 3351
3311 return 0; 3352 return 0;
3312} 3353}
@@ -3316,20 +3357,19 @@ static void i9xx_update_plane(struct intel_plane *plane,
3316 const struct intel_plane_state *plane_state) 3357 const struct intel_plane_state *plane_state)
3317{ 3358{
3318 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3359 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3319 const struct drm_framebuffer *fb = plane_state->base.fb;
3320 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 3360 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3321 u32 linear_offset; 3361 u32 linear_offset;
3322 u32 dspcntr = plane_state->ctl; 3362 u32 dspcntr = plane_state->ctl;
3323 i915_reg_t reg = DSPCNTR(i9xx_plane); 3363 i915_reg_t reg = DSPCNTR(i9xx_plane);
3324 int x = plane_state->main.x; 3364 int x = plane_state->color_plane[0].x;
3325 int y = plane_state->main.y; 3365 int y = plane_state->color_plane[0].y;
3326 unsigned long irqflags; 3366 unsigned long irqflags;
3327 u32 dspaddr_offset; 3367 u32 dspaddr_offset;
3328 3368
3329 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 3369 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3330 3370
3331 if (INTEL_GEN(dev_priv) >= 4) 3371 if (INTEL_GEN(dev_priv) >= 4)
3332 dspaddr_offset = plane_state->main.offset; 3372 dspaddr_offset = plane_state->color_plane[0].offset;
3333 else 3373 else
3334 dspaddr_offset = linear_offset; 3374 dspaddr_offset = linear_offset;
3335 3375
@@ -3353,7 +3393,7 @@ static void i9xx_update_plane(struct intel_plane *plane,
3353 3393
3354 I915_WRITE_FW(reg, dspcntr); 3394 I915_WRITE_FW(reg, dspcntr);
3355 3395
3356 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), fb->pitches[0]); 3396 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3357 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 3397 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3358 I915_WRITE_FW(DSPSURF(i9xx_plane), 3398 I915_WRITE_FW(DSPSURF(i9xx_plane),
3359 intel_plane_ggtt_offset(plane_state) + 3399 intel_plane_ggtt_offset(plane_state) +
@@ -3428,12 +3468,12 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3428} 3468}
3429 3469
3430static u32 3470static u32
3431intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane) 3471intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
3432{ 3472{
3433 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) 3473 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3434 return 64; 3474 return 64;
3435 else 3475 else
3436 return intel_tile_width_bytes(fb, plane); 3476 return intel_tile_width_bytes(fb, color_plane);
3437} 3477}
3438 3478
3439static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 3479static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
@@ -3463,24 +3503,24 @@ static void skl_detach_scalers(struct intel_crtc *intel_crtc)
3463 } 3503 }
3464} 3504}
3465 3505
3466u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane, 3506u32 skl_plane_stride(const struct intel_plane_state *plane_state,
3467 unsigned int rotation) 3507 int color_plane)
3468{ 3508{
3469 u32 stride; 3509 const struct drm_framebuffer *fb = plane_state->base.fb;
3510 unsigned int rotation = plane_state->base.rotation;
3511 u32 stride = plane_state->color_plane[color_plane].stride;
3470 3512
3471 if (plane >= fb->format->num_planes) 3513 if (color_plane >= fb->format->num_planes)
3472 return 0; 3514 return 0;
3473 3515
3474 stride = intel_fb_pitch(fb, plane, rotation);
3475
3476 /* 3516 /*
3477 * The stride is either expressed as a multiple of 64 bytes chunks for 3517 * The stride is either expressed as a multiple of 64 bytes chunks for
3478 * linear buffers or in number of tiles for tiled buffers. 3518 * linear buffers or in number of tiles for tiled buffers.
3479 */ 3519 */
3480 if (drm_rotation_90_or_270(rotation)) 3520 if (drm_rotation_90_or_270(rotation))
3481 stride /= intel_tile_height(fb, plane); 3521 stride /= intel_tile_height(fb, color_plane);
3482 else 3522 else
3483 stride /= intel_fb_stride_alignment(fb, plane); 3523 stride /= intel_fb_stride_alignment(fb, color_plane);
3484 3524
3485 return stride; 3525 return stride;
3486} 3526}
@@ -6014,6 +6054,8 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6014 6054
6015 i9xx_set_pipeconf(intel_crtc); 6055 i9xx_set_pipeconf(intel_crtc);
6016 6056
6057 intel_color_set_csc(&pipe_config->base);
6058
6017 intel_crtc->active = true; 6059 intel_crtc->active = true;
6018 6060
6019 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6061 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
@@ -6113,8 +6155,8 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc)
6113 6155
6114 assert_pipe_disabled(dev_priv, crtc->pipe); 6156 assert_pipe_disabled(dev_priv, crtc->pipe);
6115 6157
6116 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", 6158 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6117 I915_READ(PFIT_CONTROL)); 6159 I915_READ(PFIT_CONTROL));
6118 I915_WRITE(PFIT_CONTROL, 0); 6160 I915_WRITE(PFIT_CONTROL, 0);
6119} 6161}
6120 6162
@@ -8634,8 +8676,8 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8634 ironlake_compute_dpll(crtc, crtc_state, NULL); 8676 ironlake_compute_dpll(crtc, crtc_state, NULL);
8635 8677
8636 if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) { 8678 if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
8637 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 8679 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
8638 pipe_name(crtc->pipe)); 8680 pipe_name(crtc->pipe));
8639 return -EINVAL; 8681 return -EINVAL;
8640 } 8682 }
8641 8683
@@ -9202,8 +9244,8 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9202 intel_get_crtc_new_encoder(state, crtc_state); 9244 intel_get_crtc_new_encoder(state, crtc_state);
9203 9245
9204 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) { 9246 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
9205 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 9247 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9206 pipe_name(crtc->pipe)); 9248 pipe_name(crtc->pipe));
9207 return -EINVAL; 9249 return -EINVAL;
9208 } 9250 }
9209 } 9251 }
@@ -9592,7 +9634,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
9592 else 9634 else
9593 base = intel_plane_ggtt_offset(plane_state); 9635 base = intel_plane_ggtt_offset(plane_state);
9594 9636
9595 base += plane_state->main.offset; 9637 base += plane_state->color_plane[0].offset;
9596 9638
9597 /* ILK+ do this automagically */ 9639 /* ILK+ do this automagically */
9598 if (HAS_GMCH_DISPLAY(dev_priv) && 9640 if (HAS_GMCH_DISPLAY(dev_priv) &&
@@ -9635,14 +9677,44 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
9635 height > 0 && height <= config->cursor_height; 9677 height > 0 && height <= config->cursor_height;
9636} 9678}
9637 9679
9638static int intel_check_cursor(struct intel_crtc_state *crtc_state, 9680static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
9639 struct intel_plane_state *plane_state)
9640{ 9681{
9641 const struct drm_framebuffer *fb = plane_state->base.fb; 9682 const struct drm_framebuffer *fb = plane_state->base.fb;
9683 unsigned int rotation = plane_state->base.rotation;
9642 int src_x, src_y; 9684 int src_x, src_y;
9643 u32 offset; 9685 u32 offset;
9686
9687 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
9688 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
9689
9690 src_x = plane_state->base.src_x >> 16;
9691 src_y = plane_state->base.src_y >> 16;
9692
9693 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
9694 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
9695 plane_state, 0);
9696
9697 if (src_x != 0 || src_y != 0) {
9698 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
9699 return -EINVAL;
9700 }
9701
9702 plane_state->color_plane[0].offset = offset;
9703
9704 return 0;
9705}
9706
9707static int intel_check_cursor(struct intel_crtc_state *crtc_state,
9708 struct intel_plane_state *plane_state)
9709{
9710 const struct drm_framebuffer *fb = plane_state->base.fb;
9644 int ret; 9711 int ret;
9645 9712
9713 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
9714 DRM_DEBUG_KMS("cursor cannot be tiled\n");
9715 return -EINVAL;
9716 }
9717
9646 ret = drm_atomic_helper_check_plane_state(&plane_state->base, 9718 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
9647 &crtc_state->base, 9719 &crtc_state->base,
9648 DRM_PLANE_HELPER_NO_SCALING, 9720 DRM_PLANE_HELPER_NO_SCALING,
@@ -9651,39 +9723,35 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
9651 if (ret) 9723 if (ret)
9652 return ret; 9724 return ret;
9653 9725
9654 if (!fb) 9726 if (!plane_state->base.visible)
9655 return 0; 9727 return 0;
9656 9728
9657 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 9729 ret = intel_plane_check_src_coordinates(plane_state);
9658 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 9730 if (ret)
9659 return -EINVAL; 9731 return ret;
9660 }
9661
9662 src_x = plane_state->base.src_x >> 16;
9663 src_y = plane_state->base.src_y >> 16;
9664
9665 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
9666 offset = intel_compute_tile_offset(&src_x, &src_y, plane_state, 0);
9667
9668 if (src_x != 0 || src_y != 0) {
9669 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
9670 return -EINVAL;
9671 }
9672 9732
9673 plane_state->main.offset = offset; 9733 ret = intel_cursor_check_surface(plane_state);
9734 if (ret)
9735 return ret;
9674 9736
9675 return 0; 9737 return 0;
9676} 9738}
9677 9739
9740static unsigned int
9741i845_cursor_max_stride(struct intel_plane *plane,
9742 u32 pixel_format, u64 modifier,
9743 unsigned int rotation)
9744{
9745 return 2048;
9746}
9747
9678static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, 9748static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
9679 const struct intel_plane_state *plane_state) 9749 const struct intel_plane_state *plane_state)
9680{ 9750{
9681 const struct drm_framebuffer *fb = plane_state->base.fb;
9682
9683 return CURSOR_ENABLE | 9751 return CURSOR_ENABLE |
9684 CURSOR_GAMMA_ENABLE | 9752 CURSOR_GAMMA_ENABLE |
9685 CURSOR_FORMAT_ARGB | 9753 CURSOR_FORMAT_ARGB |
9686 CURSOR_STRIDE(fb->pitches[0]); 9754 CURSOR_STRIDE(plane_state->color_plane[0].stride);
9687} 9755}
9688 9756
9689static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) 9757static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
@@ -9719,6 +9787,9 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
9719 return -EINVAL; 9787 return -EINVAL;
9720 } 9788 }
9721 9789
9790 WARN_ON(plane_state->base.visible &&
9791 plane_state->color_plane[0].stride != fb->pitches[0]);
9792
9722 switch (fb->pitches[0]) { 9793 switch (fb->pitches[0]) {
9723 case 256: 9794 case 256:
9724 case 512: 9795 case 512:
@@ -9807,6 +9878,14 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane,
9807 return ret; 9878 return ret;
9808} 9879}
9809 9880
9881static unsigned int
9882i9xx_cursor_max_stride(struct intel_plane *plane,
9883 u32 pixel_format, u64 modifier,
9884 unsigned int rotation)
9885{
9886 return plane->base.dev->mode_config.cursor_width * 4;
9887}
9888
9810static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, 9889static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
9811 const struct intel_plane_state *plane_state) 9890 const struct intel_plane_state *plane_state)
9812{ 9891{
@@ -9912,6 +9991,9 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
9912 return -EINVAL; 9991 return -EINVAL;
9913 } 9992 }
9914 9993
9994 WARN_ON(plane_state->base.visible &&
9995 plane_state->color_plane[0].stride != fb->pitches[0]);
9996
9915 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) { 9997 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
9916 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n", 9998 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
9917 fb->pitches[0], plane_state->base.crtc_w); 9999 fb->pitches[0], plane_state->base.crtc_w);
@@ -12982,7 +13064,7 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
12982 } 13064 }
12983 13065
12984 vma = intel_pin_and_fence_fb_obj(fb, 13066 vma = intel_pin_and_fence_fb_obj(fb,
12985 plane_state->base.rotation, 13067 &plane_state->view,
12986 intel_plane_uses_fence(plane_state), 13068 intel_plane_uses_fence(plane_state),
12987 &plane_state->flags); 13069 &plane_state->flags);
12988 if (IS_ERR(vma)) 13070 if (IS_ERR(vma))
@@ -13160,19 +13242,17 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
13160} 13242}
13161 13243
13162int 13244int
13163skl_max_scale(struct intel_crtc *intel_crtc, 13245skl_max_scale(const struct intel_crtc_state *crtc_state,
13164 struct intel_crtc_state *crtc_state, 13246 u32 pixel_format)
13165 uint32_t pixel_format)
13166{ 13247{
13167 struct drm_i915_private *dev_priv; 13248 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
13249 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13168 int max_scale, mult; 13250 int max_scale, mult;
13169 int crtc_clock, max_dotclk, tmpclk1, tmpclk2; 13251 int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
13170 13252
13171 if (!intel_crtc || !crtc_state->base.enable) 13253 if (!crtc_state->base.enable)
13172 return DRM_PLANE_HELPER_NO_SCALING; 13254 return DRM_PLANE_HELPER_NO_SCALING;
13173 13255
13174 dev_priv = to_i915(intel_crtc->base.dev);
13175
13176 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; 13256 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13177 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk; 13257 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
13178 13258
@@ -13196,61 +13276,6 @@ skl_max_scale(struct intel_crtc *intel_crtc,
13196 return max_scale; 13276 return max_scale;
13197} 13277}
13198 13278
13199static int
13200intel_check_primary_plane(struct intel_crtc_state *crtc_state,
13201 struct intel_plane_state *state)
13202{
13203 struct intel_plane *plane = to_intel_plane(state->base.plane);
13204 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
13205 struct drm_crtc *crtc = state->base.crtc;
13206 int min_scale = DRM_PLANE_HELPER_NO_SCALING;
13207 int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13208 bool can_position = false;
13209 int ret;
13210 uint32_t pixel_format = 0;
13211
13212 if (INTEL_GEN(dev_priv) >= 9) {
13213 /* use scaler when colorkey is not required */
13214 if (!state->ckey.flags) {
13215 min_scale = 1;
13216 if (state->base.fb)
13217 pixel_format = state->base.fb->format->format;
13218 max_scale = skl_max_scale(to_intel_crtc(crtc),
13219 crtc_state, pixel_format);
13220 }
13221 can_position = true;
13222 }
13223
13224 ret = drm_atomic_helper_check_plane_state(&state->base,
13225 &crtc_state->base,
13226 min_scale, max_scale,
13227 can_position, true);
13228 if (ret)
13229 return ret;
13230
13231 if (!state->base.fb)
13232 return 0;
13233
13234 if (INTEL_GEN(dev_priv) >= 9) {
13235 ret = skl_check_plane_surface(crtc_state, state);
13236 if (ret)
13237 return ret;
13238
13239 state->ctl = skl_plane_ctl(crtc_state, state);
13240 } else {
13241 ret = i9xx_check_plane_surface(state);
13242 if (ret)
13243 return ret;
13244
13245 state->ctl = i9xx_plane_ctl(crtc_state, state);
13246 }
13247
13248 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
13249 state->color_ctl = glk_plane_color_ctl(crtc_state, state);
13250
13251 return 0;
13252}
13253
13254static void intel_begin_crtc_commit(struct drm_crtc *crtc, 13279static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13255 struct drm_crtc_state *old_crtc_state) 13280 struct drm_crtc_state *old_crtc_state)
13256{ 13281{
@@ -13672,12 +13697,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13672 13697
13673 primary->base.state = &state->base; 13698 primary->base.state = &state->base;
13674 13699
13675 primary->can_scale = false; 13700 if (INTEL_GEN(dev_priv) >= 9)
13676 primary->max_downscale = 1;
13677 if (INTEL_GEN(dev_priv) >= 9) {
13678 primary->can_scale = true;
13679 state->scaler_id = -1; 13701 state->scaler_id = -1;
13680 }
13681 primary->pipe = pipe; 13702 primary->pipe = pipe;
13682 /* 13703 /*
13683 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS 13704 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
@@ -13704,8 +13725,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13704 fbc->possible_framebuffer_bits |= primary->frontbuffer_bit; 13725 fbc->possible_framebuffer_bits |= primary->frontbuffer_bit;
13705 } 13726 }
13706 13727
13707 primary->check_plane = intel_check_primary_plane;
13708
13709 if (INTEL_GEN(dev_priv) >= 9) { 13728 if (INTEL_GEN(dev_priv) >= 9) {
13710 primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe, 13729 primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
13711 PLANE_PRIMARY); 13730 PLANE_PRIMARY);
@@ -13723,9 +13742,11 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13723 else 13742 else
13724 modifiers = skl_format_modifiers_noccs; 13743 modifiers = skl_format_modifiers_noccs;
13725 13744
13745 primary->max_stride = skl_plane_max_stride;
13726 primary->update_plane = skl_update_plane; 13746 primary->update_plane = skl_update_plane;
13727 primary->disable_plane = skl_disable_plane; 13747 primary->disable_plane = skl_disable_plane;
13728 primary->get_hw_state = skl_plane_get_hw_state; 13748 primary->get_hw_state = skl_plane_get_hw_state;
13749 primary->check_plane = skl_plane_check;
13729 13750
13730 plane_funcs = &skl_plane_funcs; 13751 plane_funcs = &skl_plane_funcs;
13731 } else if (INTEL_GEN(dev_priv) >= 4) { 13752 } else if (INTEL_GEN(dev_priv) >= 4) {
@@ -13733,9 +13754,11 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13733 num_formats = ARRAY_SIZE(i965_primary_formats); 13754 num_formats = ARRAY_SIZE(i965_primary_formats);
13734 modifiers = i9xx_format_modifiers; 13755 modifiers = i9xx_format_modifiers;
13735 13756
13757 primary->max_stride = i9xx_plane_max_stride;
13736 primary->update_plane = i9xx_update_plane; 13758 primary->update_plane = i9xx_update_plane;
13737 primary->disable_plane = i9xx_disable_plane; 13759 primary->disable_plane = i9xx_disable_plane;
13738 primary->get_hw_state = i9xx_plane_get_hw_state; 13760 primary->get_hw_state = i9xx_plane_get_hw_state;
13761 primary->check_plane = i9xx_plane_check;
13739 13762
13740 plane_funcs = &i965_plane_funcs; 13763 plane_funcs = &i965_plane_funcs;
13741 } else { 13764 } else {
@@ -13743,9 +13766,11 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13743 num_formats = ARRAY_SIZE(i8xx_primary_formats); 13766 num_formats = ARRAY_SIZE(i8xx_primary_formats);
13744 modifiers = i9xx_format_modifiers; 13767 modifiers = i9xx_format_modifiers;
13745 13768
13769 primary->max_stride = i9xx_plane_max_stride;
13746 primary->update_plane = i9xx_update_plane; 13770 primary->update_plane = i9xx_update_plane;
13747 primary->disable_plane = i9xx_disable_plane; 13771 primary->disable_plane = i9xx_disable_plane;
13748 primary->get_hw_state = i9xx_plane_get_hw_state; 13772 primary->get_hw_state = i9xx_plane_get_hw_state;
13773 primary->check_plane = i9xx_plane_check;
13749 13774
13750 plane_funcs = &i8xx_plane_funcs; 13775 plane_funcs = &i8xx_plane_funcs;
13751 } 13776 }
@@ -13842,19 +13867,19 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13842 13867
13843 cursor->base.state = &state->base; 13868 cursor->base.state = &state->base;
13844 13869
13845 cursor->can_scale = false;
13846 cursor->max_downscale = 1;
13847 cursor->pipe = pipe; 13870 cursor->pipe = pipe;
13848 cursor->i9xx_plane = (enum i9xx_plane_id) pipe; 13871 cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
13849 cursor->id = PLANE_CURSOR; 13872 cursor->id = PLANE_CURSOR;
13850 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id); 13873 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
13851 13874
13852 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 13875 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
13876 cursor->max_stride = i845_cursor_max_stride;
13853 cursor->update_plane = i845_update_cursor; 13877 cursor->update_plane = i845_update_cursor;
13854 cursor->disable_plane = i845_disable_cursor; 13878 cursor->disable_plane = i845_disable_cursor;
13855 cursor->get_hw_state = i845_cursor_get_hw_state; 13879 cursor->get_hw_state = i845_cursor_get_hw_state;
13856 cursor->check_plane = i845_check_cursor; 13880 cursor->check_plane = i845_check_cursor;
13857 } else { 13881 } else {
13882 cursor->max_stride = i9xx_cursor_max_stride;
13858 cursor->update_plane = i9xx_update_cursor; 13883 cursor->update_plane = i9xx_update_cursor;
13859 cursor->disable_plane = i9xx_disable_cursor; 13884 cursor->disable_plane = i9xx_disable_cursor;
13860 cursor->get_hw_state = i9xx_cursor_get_hw_state; 13885 cursor->get_hw_state = i9xx_cursor_get_hw_state;
@@ -14380,31 +14405,18 @@ static
14380u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv, 14405u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
14381 uint64_t fb_modifier, uint32_t pixel_format) 14406 uint64_t fb_modifier, uint32_t pixel_format)
14382{ 14407{
14383 u32 gen = INTEL_GEN(dev_priv); 14408 struct intel_crtc *crtc;
14409 struct intel_plane *plane;
14384 14410
14385 if (gen >= 9) { 14411 /*
14386 int cpp = drm_format_plane_cpp(pixel_format, 0); 14412 * We assume the primary plane for pipe A has
14413 * the highest stride limits of them all.
14414 */
14415 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
14416 plane = to_intel_plane(crtc->base.primary);
14387 14417
14388 /* "The stride in bytes must not exceed the of the size of 8K 14418 return plane->max_stride(plane, pixel_format, fb_modifier,
14389 * pixels and 32K bytes." 14419 DRM_MODE_ROTATE_0);
14390 */
14391 return min(8192 * cpp, 32768);
14392 } else if (gen >= 5 && !HAS_GMCH_DISPLAY(dev_priv)) {
14393 return 32*1024;
14394 } else if (gen >= 4) {
14395 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14396 return 16*1024;
14397 else
14398 return 32*1024;
14399 } else if (gen >= 3) {
14400 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14401 return 8*1024;
14402 else
14403 return 16*1024;
14404 } else {
14405 /* XXX DSPC is limited to 4k tiled */
14406 return 8*1024;
14407 }
14408} 14420}
14409 14421
14410static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, 14422static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f5731215210a..bf1c38728a59 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -497,18 +497,21 @@ struct intel_atomic_state {
497 497
498struct intel_plane_state { 498struct intel_plane_state {
499 struct drm_plane_state base; 499 struct drm_plane_state base;
500 struct i915_ggtt_view view;
500 struct i915_vma *vma; 501 struct i915_vma *vma;
501 unsigned long flags; 502 unsigned long flags;
502#define PLANE_HAS_FENCE BIT(0) 503#define PLANE_HAS_FENCE BIT(0)
503 504
504 struct { 505 struct {
505 u32 offset; 506 u32 offset;
507 /*
508 * Plane stride in:
509 * bytes for 0/180 degree rotation
510 * pixels for 90/270 degree rotation
511 */
512 u32 stride;
506 int x, y; 513 int x, y;
507 } main; 514 } color_plane[2];
508 struct {
509 u32 offset;
510 int x, y;
511 } aux;
512 515
513 /* plane control register */ 516 /* plane control register */
514 u32 ctl; 517 u32 ctl;
@@ -950,10 +953,8 @@ struct intel_plane {
950 enum i9xx_plane_id i9xx_plane; 953 enum i9xx_plane_id i9xx_plane;
951 enum plane_id id; 954 enum plane_id id;
952 enum pipe pipe; 955 enum pipe pipe;
953 bool can_scale;
954 bool has_fbc; 956 bool has_fbc;
955 bool has_ccs; 957 bool has_ccs;
956 int max_downscale;
957 uint32_t frontbuffer_bit; 958 uint32_t frontbuffer_bit;
958 959
959 struct { 960 struct {
@@ -966,6 +967,9 @@ struct intel_plane {
966 * the intel_plane_state structure and accessed via plane_state. 967 * the intel_plane_state structure and accessed via plane_state.
967 */ 968 */
968 969
970 unsigned int (*max_stride)(struct intel_plane *plane,
971 u32 pixel_format, u64 modifier,
972 unsigned int rotation);
969 void (*update_plane)(struct intel_plane *plane, 973 void (*update_plane)(struct intel_plane *plane,
970 const struct intel_crtc_state *crtc_state, 974 const struct intel_crtc_state *crtc_state,
971 const struct intel_plane_state *plane_state); 975 const struct intel_plane_state *plane_state);
@@ -1442,7 +1446,7 @@ void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
1442 struct drm_atomic_state *old_state); 1446 struct drm_atomic_state *old_state);
1443 1447
1444unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, 1448unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
1445 int plane, unsigned int height); 1449 int color_plane, unsigned int height);
1446 1450
1447/* intel_audio.c */ 1451/* intel_audio.c */
1448void intel_init_audio_hooks(struct drm_i915_private *dev_priv); 1452void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
@@ -1565,7 +1569,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
1565 struct drm_modeset_acquire_ctx *ctx); 1569 struct drm_modeset_acquire_ctx *ctx);
1566struct i915_vma * 1570struct i915_vma *
1567intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 1571intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1568 unsigned int rotation, 1572 const struct i915_ggtt_view *view,
1569 bool uses_fence, 1573 bool uses_fence,
1570 unsigned long *out_flags); 1574 unsigned long *out_flags);
1571void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags); 1575void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
@@ -1614,8 +1618,6 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1614void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); 1618void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
1615#define assert_pipe_enabled(d, p) assert_pipe(d, p, true) 1619#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
1616#define assert_pipe_disabled(d, p) assert_pipe(d, p, false) 1620#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
1617u32 intel_compute_tile_offset(int *x, int *y,
1618 const struct intel_plane_state *state, int plane);
1619void intel_prepare_reset(struct drm_i915_private *dev_priv); 1621void intel_prepare_reset(struct drm_i915_private *dev_priv);
1620void intel_finish_reset(struct drm_i915_private *dev_priv); 1622void intel_finish_reset(struct drm_i915_private *dev_priv);
1621void hsw_enable_pc8(struct drm_i915_private *dev_priv); 1623void hsw_enable_pc8(struct drm_i915_private *dev_priv);
@@ -1645,8 +1647,8 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
1645 1647
1646u16 skl_scaler_calc_phase(int sub, bool chroma_center); 1648u16 skl_scaler_calc_phase(int sub, bool chroma_center);
1647int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); 1649int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
1648int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, 1650int skl_max_scale(const struct intel_crtc_state *crtc_state,
1649 uint32_t pixel_format); 1651 u32 pixel_format);
1650 1652
1651static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state) 1653static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
1652{ 1654{
@@ -1658,12 +1660,14 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
1658u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, 1660u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
1659 const struct intel_plane_state *plane_state); 1661 const struct intel_plane_state *plane_state);
1660u32 glk_color_ctl(const struct intel_plane_state *plane_state); 1662u32 glk_color_ctl(const struct intel_plane_state *plane_state);
1661u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane, 1663u32 skl_plane_stride(const struct intel_plane_state *plane_state,
1662 unsigned int rotation); 1664 int plane);
1663int skl_check_plane_surface(const struct intel_crtc_state *crtc_state, 1665int skl_check_plane_surface(struct intel_plane_state *plane_state);
1664 struct intel_plane_state *plane_state);
1665int i9xx_check_plane_surface(struct intel_plane_state *plane_state); 1666int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
1666int skl_format_to_fourcc(int format, bool rgb_order, bool alpha); 1667int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
1668unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
1669 u32 pixel_format, u64 modifier,
1670 unsigned int rotation);
1667 1671
1668/* intel_csr.c */ 1672/* intel_csr.c */
1669void intel_csr_ucode_init(struct drm_i915_private *); 1673void intel_csr_ucode_init(struct drm_i915_private *);
@@ -2131,6 +2135,13 @@ bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
2131 enum pipe pipe, enum plane_id plane_id); 2135 enum pipe pipe, enum plane_id plane_id);
2132bool skl_plane_has_planar(struct drm_i915_private *dev_priv, 2136bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
2133 enum pipe pipe, enum plane_id plane_id); 2137 enum pipe pipe, enum plane_id plane_id);
2138unsigned int skl_plane_max_stride(struct intel_plane *plane,
2139 u32 pixel_format, u64 modifier,
2140 unsigned int rotation);
2141int skl_plane_check(struct intel_crtc_state *crtc_state,
2142 struct intel_plane_state *plane_state);
2143int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
2144int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
2134 2145
2135/* intel_tv.c */ 2146/* intel_tv.c */
2136void intel_tv_init(struct drm_i915_private *dev_priv); 2147void intel_tv_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 10cd051ba29e..217ed3ee1cab 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -990,6 +990,9 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
990 } 990 }
991 local_bh_enable(); 991 local_bh_enable();
992 992
993 /* Otherwise flush the tasklet if it was on another cpu */
994 tasklet_unlock_wait(t);
995
993 if (READ_ONCE(engine->execlists.active)) 996 if (READ_ONCE(engine->execlists.active))
994 return false; 997 return false;
995 } 998 }
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 01d1d2088f04..74d425c700ef 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -670,8 +670,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
670 cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16; 670 cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
671 cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16; 671 cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
672 cache->plane.visible = plane_state->base.visible; 672 cache->plane.visible = plane_state->base.visible;
673 cache->plane.adjusted_x = plane_state->main.x; 673 cache->plane.adjusted_x = plane_state->color_plane[0].x;
674 cache->plane.adjusted_y = plane_state->main.y; 674 cache->plane.adjusted_y = plane_state->color_plane[0].y;
675 cache->plane.y = plane_state->base.src.y1 >> 16; 675 cache->plane.y = plane_state->base.src.y1 >> 16;
676 676
677 if (!cache->plane.visible) 677 if (!cache->plane.visible)
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index fb2f9fce34cd..f99332972b7a 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -175,6 +175,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
175 struct drm_i915_private *dev_priv = to_i915(dev); 175 struct drm_i915_private *dev_priv = to_i915(dev);
176 struct pci_dev *pdev = dev_priv->drm.pdev; 176 struct pci_dev *pdev = dev_priv->drm.pdev;
177 struct i915_ggtt *ggtt = &dev_priv->ggtt; 177 struct i915_ggtt *ggtt = &dev_priv->ggtt;
178 const struct i915_ggtt_view view = {
179 .type = I915_GGTT_VIEW_NORMAL,
180 };
178 struct fb_info *info; 181 struct fb_info *info;
179 struct drm_framebuffer *fb; 182 struct drm_framebuffer *fb;
180 struct i915_vma *vma; 183 struct i915_vma *vma;
@@ -214,8 +217,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
214 * BIOS is suitable for own access. 217 * BIOS is suitable for own access.
215 */ 218 */
216 vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, 219 vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base,
217 DRM_MODE_ROTATE_0, 220 &view, false, &flags);
218 false, &flags);
219 if (IS_ERR(vma)) { 221 if (IS_ERR(vma)) {
220 ret = PTR_ERR(vma); 222 ret = PTR_ERR(vma);
221 goto out_unlock; 223 goto out_unlock;
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 07b9d313b019..a81f04d46e87 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -557,16 +557,36 @@ static void inject_preempt_context(struct work_struct *work)
557 preempt_work[engine->id]); 557 preempt_work[engine->id]);
558 struct intel_guc_client *client = guc->preempt_client; 558 struct intel_guc_client *client = guc->preempt_client;
559 struct guc_stage_desc *stage_desc = __get_stage_desc(client); 559 struct guc_stage_desc *stage_desc = __get_stage_desc(client);
560 u32 ctx_desc = lower_32_bits(to_intel_context(client->owner, 560 struct intel_context *ce = to_intel_context(client->owner, engine);
561 engine)->lrc_desc);
562 u32 data[7]; 561 u32 data[7];
563 562
564 /* 563 if (!ce->ring->emit) { /* recreate upon load/resume */
565 * The ring contains commands to write GUC_PREEMPT_FINISHED into HWSP. 564 u32 addr = intel_hws_preempt_done_address(engine);
566 * See guc_fill_preempt_context(). 565 u32 *cs;
567 */ 566
567 cs = ce->ring->vaddr;
568 if (engine->id == RCS) {
569 cs = gen8_emit_ggtt_write_rcs(cs,
570 GUC_PREEMPT_FINISHED,
571 addr);
572 } else {
573 cs = gen8_emit_ggtt_write(cs,
574 GUC_PREEMPT_FINISHED,
575 addr);
576 *cs++ = MI_NOOP;
577 *cs++ = MI_NOOP;
578 }
579 *cs++ = MI_USER_INTERRUPT;
580 *cs++ = MI_NOOP;
581
582 ce->ring->emit = GUC_PREEMPT_BREADCRUMB_BYTES;
583 GEM_BUG_ON((void *)cs - ce->ring->vaddr != ce->ring->emit);
584
585 flush_ggtt_writes(ce->ring->vma);
586 }
587
568 spin_lock_irq(&client->wq_lock); 588 spin_lock_irq(&client->wq_lock);
569 guc_wq_item_append(client, engine->guc_id, ctx_desc, 589 guc_wq_item_append(client, engine->guc_id, lower_32_bits(ce->lrc_desc),
570 GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0); 590 GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0);
571 spin_unlock_irq(&client->wq_lock); 591 spin_unlock_irq(&client->wq_lock);
572 592
@@ -1044,50 +1064,6 @@ static inline bool ctx_save_restore_disabled(struct intel_context *ce)
1044#undef SR_DISABLED 1064#undef SR_DISABLED
1045} 1065}
1046 1066
1047static void guc_fill_preempt_context(struct intel_guc *guc)
1048{
1049 struct drm_i915_private *dev_priv = guc_to_i915(guc);
1050 struct intel_guc_client *client = guc->preempt_client;
1051 struct intel_engine_cs *engine;
1052 enum intel_engine_id id;
1053
1054 for_each_engine(engine, dev_priv, id) {
1055 struct intel_context *ce =
1056 to_intel_context(client->owner, engine);
1057 u32 addr = intel_hws_preempt_done_address(engine);
1058 u32 *cs;
1059
1060 GEM_BUG_ON(!ce->pin_count);
1061
1062 /*
1063 * We rely on this context image *not* being saved after
1064 * preemption. This ensures that the RING_HEAD / RING_TAIL
1065 * remain pointing at initial values forever.
1066 */
1067 GEM_BUG_ON(!ctx_save_restore_disabled(ce));
1068
1069 cs = ce->ring->vaddr;
1070 if (id == RCS) {
1071 cs = gen8_emit_ggtt_write_rcs(cs,
1072 GUC_PREEMPT_FINISHED,
1073 addr);
1074 } else {
1075 cs = gen8_emit_ggtt_write(cs,
1076 GUC_PREEMPT_FINISHED,
1077 addr);
1078 *cs++ = MI_NOOP;
1079 *cs++ = MI_NOOP;
1080 }
1081 *cs++ = MI_USER_INTERRUPT;
1082 *cs++ = MI_NOOP;
1083
1084 GEM_BUG_ON((void *)cs - ce->ring->vaddr !=
1085 GUC_PREEMPT_BREADCRUMB_BYTES);
1086
1087 flush_ggtt_writes(ce->ring->vma);
1088 }
1089}
1090
1091static int guc_clients_create(struct intel_guc *guc) 1067static int guc_clients_create(struct intel_guc *guc)
1092{ 1068{
1093 struct drm_i915_private *dev_priv = guc_to_i915(guc); 1069 struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -1118,8 +1094,6 @@ static int guc_clients_create(struct intel_guc *guc)
1118 return PTR_ERR(client); 1094 return PTR_ERR(client);
1119 } 1095 }
1120 guc->preempt_client = client; 1096 guc->preempt_client = client;
1121
1122 guc_fill_preempt_context(guc);
1123 } 1097 }
1124 1098
1125 return 0; 1099 return 0;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9b1f0e5211a0..43957bb37a42 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1294,7 +1294,7 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
1294 * on an active context (which by nature is already on the GPU). 1294 * on an active context (which by nature is already on the GPU).
1295 */ 1295 */
1296 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { 1296 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1297 err = i915_gem_object_set_to_gtt_domain(vma->obj, true); 1297 err = i915_gem_object_set_to_wc_domain(vma->obj, true);
1298 if (err) 1298 if (err)
1299 return err; 1299 return err;
1300 } 1300 }
@@ -1322,7 +1322,9 @@ __execlists_context_pin(struct intel_engine_cs *engine,
1322 if (ret) 1322 if (ret)
1323 goto err; 1323 goto err;
1324 1324
1325 vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB); 1325 vaddr = i915_gem_object_pin_map(ce->state->obj,
1326 i915_coherent_map_type(ctx->i915) |
1327 I915_MAP_OVERRIDE);
1326 if (IS_ERR(vaddr)) { 1328 if (IS_ERR(vaddr)) {
1327 ret = PTR_ERR(vaddr); 1329 ret = PTR_ERR(vaddr);
1328 goto unpin_vma; 1330 goto unpin_vma;
@@ -1338,11 +1340,13 @@ __execlists_context_pin(struct intel_engine_cs *engine,
1338 1340
1339 intel_lr_context_descriptor_update(ctx, engine, ce); 1341 intel_lr_context_descriptor_update(ctx, engine, ce);
1340 1342
1343 GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
1344
1341 ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; 1345 ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
1342 ce->lrc_reg_state[CTX_RING_BUFFER_START+1] = 1346 ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
1343 i915_ggtt_offset(ce->ring->vma); 1347 i915_ggtt_offset(ce->ring->vma);
1344 GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head)); 1348 ce->lrc_reg_state[CTX_RING_HEAD + 1] = ce->ring->head;
1345 ce->lrc_reg_state[CTX_RING_HEAD+1] = ce->ring->head; 1349 ce->lrc_reg_state[CTX_RING_TAIL + 1] = ce->ring->tail;
1346 1350
1347 ce->state->obj->pin_global++; 1351 ce->state->obj->pin_global++;
1348 i915_gem_context_get(ctx); 1352 i915_gem_context_get(ctx);
@@ -2392,7 +2396,7 @@ static int logical_ring_init(struct intel_engine_cs *engine)
2392 2396
2393 ret = intel_engine_init_common(engine); 2397 ret = intel_engine_init_common(engine);
2394 if (ret) 2398 if (ret)
2395 goto error; 2399 return ret;
2396 2400
2397 if (HAS_LOGICAL_RING_ELSQ(i915)) { 2401 if (HAS_LOGICAL_RING_ELSQ(i915)) {
2398 execlists->submit_reg = i915->regs + 2402 execlists->submit_reg = i915->regs +
@@ -2434,10 +2438,6 @@ static int logical_ring_init(struct intel_engine_cs *engine)
2434 reset_csb_pointers(execlists); 2438 reset_csb_pointers(execlists);
2435 2439
2436 return 0; 2440 return 0;
2437
2438error:
2439 intel_logical_ring_cleanup(engine);
2440 return ret;
2441} 2441}
2442 2442
2443int logical_render_ring_init(struct intel_engine_cs *engine) 2443int logical_render_ring_init(struct intel_engine_cs *engine)
@@ -2460,10 +2460,14 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
2460 engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs; 2460 engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs;
2461 engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_rcs_sz; 2461 engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_rcs_sz;
2462 2462
2463 ret = intel_engine_create_scratch(engine, PAGE_SIZE); 2463 ret = logical_ring_init(engine);
2464 if (ret) 2464 if (ret)
2465 return ret; 2465 return ret;
2466 2466
2467 ret = intel_engine_create_scratch(engine, PAGE_SIZE);
2468 if (ret)
2469 goto err_cleanup_common;
2470
2467 ret = intel_init_workaround_bb(engine); 2471 ret = intel_init_workaround_bb(engine);
2468 if (ret) { 2472 if (ret) {
2469 /* 2473 /*
@@ -2475,7 +2479,11 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
2475 ret); 2479 ret);
2476 } 2480 }
2477 2481
2478 return logical_ring_init(engine); 2482 return 0;
2483
2484err_cleanup_common:
2485 intel_engine_cleanup_common(engine);
2486 return ret;
2479} 2487}
2480 2488
2481int logical_xcs_ring_init(struct intel_engine_cs *engine) 2489int logical_xcs_ring_init(struct intel_engine_cs *engine)
@@ -2841,13 +2849,14 @@ error_deref_obj:
2841 return ret; 2849 return ret;
2842} 2850}
2843 2851
2844void intel_lr_context_resume(struct drm_i915_private *dev_priv) 2852void intel_lr_context_resume(struct drm_i915_private *i915)
2845{ 2853{
2846 struct intel_engine_cs *engine; 2854 struct intel_engine_cs *engine;
2847 struct i915_gem_context *ctx; 2855 struct i915_gem_context *ctx;
2848 enum intel_engine_id id; 2856 enum intel_engine_id id;
2849 2857
2850 /* Because we emit WA_TAIL_DWORDS there may be a disparity 2858 /*
2859 * Because we emit WA_TAIL_DWORDS there may be a disparity
2851 * between our bookkeeping in ce->ring->head and ce->ring->tail and 2860 * between our bookkeeping in ce->ring->head and ce->ring->tail and
2852 * that stored in context. As we only write new commands from 2861 * that stored in context. As we only write new commands from
2853 * ce->ring->tail onwards, everything before that is junk. If the GPU 2862 * ce->ring->tail onwards, everything before that is junk. If the GPU
@@ -2857,28 +2866,22 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
2857 * So to avoid that we reset the context images upon resume. For 2866 * So to avoid that we reset the context images upon resume. For
2858 * simplicity, we just zero everything out. 2867 * simplicity, we just zero everything out.
2859 */ 2868 */
2860 list_for_each_entry(ctx, &dev_priv->contexts.list, link) { 2869 list_for_each_entry(ctx, &i915->contexts.list, link) {
2861 for_each_engine(engine, dev_priv, id) { 2870 for_each_engine(engine, i915, id) {
2862 struct intel_context *ce = 2871 struct intel_context *ce =
2863 to_intel_context(ctx, engine); 2872 to_intel_context(ctx, engine);
2864 u32 *reg;
2865 2873
2866 if (!ce->state) 2874 if (!ce->state)
2867 continue; 2875 continue;
2868 2876
2869 reg = i915_gem_object_pin_map(ce->state->obj, 2877 intel_ring_reset(ce->ring, 0);
2870 I915_MAP_WB);
2871 if (WARN_ON(IS_ERR(reg)))
2872 continue;
2873
2874 reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
2875 reg[CTX_RING_HEAD+1] = 0;
2876 reg[CTX_RING_TAIL+1] = 0;
2877 2878
2878 ce->state->obj->mm.dirty = true; 2879 if (ce->pin_count) { /* otherwise done in context_pin */
2879 i915_gem_object_unpin_map(ce->state->obj); 2880 u32 *regs = ce->lrc_reg_state;
2880 2881
2881 intel_ring_reset(ce->ring, 0); 2882 regs[CTX_RING_HEAD + 1] = ce->ring->head;
2883 regs[CTX_RING_TAIL + 1] = ce->ring->tail;
2884 }
2882 } 2885 }
2883 } 2886 }
2884} 2887}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 443dfaefd7a6..72eb7e48e8bc 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -487,23 +487,6 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv)
487 overlay->active = false; 487 overlay->active = false;
488} 488}
489 489
490struct put_image_params {
491 int format;
492 short dst_x;
493 short dst_y;
494 short dst_w;
495 short dst_h;
496 short src_w;
497 short src_scan_h;
498 short src_scan_w;
499 short src_h;
500 short stride_Y;
501 short stride_UV;
502 int offset_Y;
503 int offset_U;
504 int offset_V;
505};
506
507static int packed_depth_bytes(u32 format) 490static int packed_depth_bytes(u32 format)
508{ 491{
509 switch (format & I915_OVERLAY_DEPTH_MASK) { 492 switch (format & I915_OVERLAY_DEPTH_MASK) {
@@ -618,25 +601,25 @@ static void update_polyphase_filter(struct overlay_registers __iomem *regs)
618 601
619static bool update_scaling_factors(struct intel_overlay *overlay, 602static bool update_scaling_factors(struct intel_overlay *overlay,
620 struct overlay_registers __iomem *regs, 603 struct overlay_registers __iomem *regs,
621 struct put_image_params *params) 604 struct drm_intel_overlay_put_image *params)
622{ 605{
623 /* fixed point with a 12 bit shift */ 606 /* fixed point with a 12 bit shift */
624 u32 xscale, yscale, xscale_UV, yscale_UV; 607 u32 xscale, yscale, xscale_UV, yscale_UV;
625#define FP_SHIFT 12 608#define FP_SHIFT 12
626#define FRACT_MASK 0xfff 609#define FRACT_MASK 0xfff
627 bool scale_changed = false; 610 bool scale_changed = false;
628 int uv_hscale = uv_hsubsampling(params->format); 611 int uv_hscale = uv_hsubsampling(params->flags);
629 int uv_vscale = uv_vsubsampling(params->format); 612 int uv_vscale = uv_vsubsampling(params->flags);
630 613
631 if (params->dst_w > 1) 614 if (params->dst_width > 1)
632 xscale = ((params->src_scan_w - 1) << FP_SHIFT) 615 xscale = ((params->src_scan_width - 1) << FP_SHIFT) /
633 /(params->dst_w); 616 params->dst_width;
634 else 617 else
635 xscale = 1 << FP_SHIFT; 618 xscale = 1 << FP_SHIFT;
636 619
637 if (params->dst_h > 1) 620 if (params->dst_height > 1)
638 yscale = ((params->src_scan_h - 1) << FP_SHIFT) 621 yscale = ((params->src_scan_height - 1) << FP_SHIFT) /
639 /(params->dst_h); 622 params->dst_height;
640 else 623 else
641 yscale = 1 << FP_SHIFT; 624 yscale = 1 << FP_SHIFT;
642 625
@@ -713,12 +696,12 @@ static void update_colorkey(struct intel_overlay *overlay,
713 iowrite32(flags, &regs->DCLRKM); 696 iowrite32(flags, &regs->DCLRKM);
714} 697}
715 698
716static u32 overlay_cmd_reg(struct put_image_params *params) 699static u32 overlay_cmd_reg(struct drm_intel_overlay_put_image *params)
717{ 700{
718 u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0; 701 u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
719 702
720 if (params->format & I915_OVERLAY_YUV_PLANAR) { 703 if (params->flags & I915_OVERLAY_YUV_PLANAR) {
721 switch (params->format & I915_OVERLAY_DEPTH_MASK) { 704 switch (params->flags & I915_OVERLAY_DEPTH_MASK) {
722 case I915_OVERLAY_YUV422: 705 case I915_OVERLAY_YUV422:
723 cmd |= OCMD_YUV_422_PLANAR; 706 cmd |= OCMD_YUV_422_PLANAR;
724 break; 707 break;
@@ -731,7 +714,7 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
731 break; 714 break;
732 } 715 }
733 } else { /* YUV packed */ 716 } else { /* YUV packed */
734 switch (params->format & I915_OVERLAY_DEPTH_MASK) { 717 switch (params->flags & I915_OVERLAY_DEPTH_MASK) {
735 case I915_OVERLAY_YUV422: 718 case I915_OVERLAY_YUV422:
736 cmd |= OCMD_YUV_422_PACKED; 719 cmd |= OCMD_YUV_422_PACKED;
737 break; 720 break;
@@ -740,7 +723,7 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
740 break; 723 break;
741 } 724 }
742 725
743 switch (params->format & I915_OVERLAY_SWAP_MASK) { 726 switch (params->flags & I915_OVERLAY_SWAP_MASK) {
744 case I915_OVERLAY_NO_SWAP: 727 case I915_OVERLAY_NO_SWAP:
745 break; 728 break;
746 case I915_OVERLAY_UV_SWAP: 729 case I915_OVERLAY_UV_SWAP:
@@ -760,7 +743,7 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
760 743
761static int intel_overlay_do_put_image(struct intel_overlay *overlay, 744static int intel_overlay_do_put_image(struct intel_overlay *overlay,
762 struct drm_i915_gem_object *new_bo, 745 struct drm_i915_gem_object *new_bo,
763 struct put_image_params *params) 746 struct drm_intel_overlay_put_image *params)
764{ 747{
765 struct overlay_registers __iomem *regs = overlay->regs; 748 struct overlay_registers __iomem *regs = overlay->regs;
766 struct drm_i915_private *dev_priv = overlay->i915; 749 struct drm_i915_private *dev_priv = overlay->i915;
@@ -806,35 +789,40 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
806 goto out_unpin; 789 goto out_unpin;
807 } 790 }
808 791
809 iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS); 792 iowrite32(params->dst_y << 16 | params->dst_x, &regs->DWINPOS);
810 iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ); 793 iowrite32(params->dst_height << 16 | params->dst_width, &regs->DWINSZ);
811 794
812 if (params->format & I915_OVERLAY_YUV_PACKED) 795 if (params->flags & I915_OVERLAY_YUV_PACKED)
813 tmp_width = packed_width_bytes(params->format, params->src_w); 796 tmp_width = packed_width_bytes(params->flags,
797 params->src_width);
814 else 798 else
815 tmp_width = params->src_w; 799 tmp_width = params->src_width;
816 800
817 swidth = params->src_w; 801 swidth = params->src_width;
818 swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width); 802 swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
819 sheight = params->src_h; 803 sheight = params->src_height;
820 iowrite32(i915_ggtt_offset(vma) + params->offset_Y, &regs->OBUF_0Y); 804 iowrite32(i915_ggtt_offset(vma) + params->offset_Y, &regs->OBUF_0Y);
821 ostride = params->stride_Y; 805 ostride = params->stride_Y;
822 806
823 if (params->format & I915_OVERLAY_YUV_PLANAR) { 807 if (params->flags & I915_OVERLAY_YUV_PLANAR) {
824 int uv_hscale = uv_hsubsampling(params->format); 808 int uv_hscale = uv_hsubsampling(params->flags);
825 int uv_vscale = uv_vsubsampling(params->format); 809 int uv_vscale = uv_vsubsampling(params->flags);
826 u32 tmp_U, tmp_V; 810 u32 tmp_U, tmp_V;
827 swidth |= (params->src_w/uv_hscale) << 16; 811
812 swidth |= (params->src_width / uv_hscale) << 16;
813 sheight |= (params->src_height / uv_vscale) << 16;
814
828 tmp_U = calc_swidthsw(dev_priv, params->offset_U, 815 tmp_U = calc_swidthsw(dev_priv, params->offset_U,
829 params->src_w/uv_hscale); 816 params->src_width / uv_hscale);
830 tmp_V = calc_swidthsw(dev_priv, params->offset_V, 817 tmp_V = calc_swidthsw(dev_priv, params->offset_V,
831 params->src_w/uv_hscale); 818 params->src_width / uv_hscale);
832 swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; 819 swidthsw |= max(tmp_U, tmp_V) << 16;
833 sheight |= (params->src_h/uv_vscale) << 16; 820
834 iowrite32(i915_ggtt_offset(vma) + params->offset_U, 821 iowrite32(i915_ggtt_offset(vma) + params->offset_U,
835 &regs->OBUF_0U); 822 &regs->OBUF_0U);
836 iowrite32(i915_ggtt_offset(vma) + params->offset_V, 823 iowrite32(i915_ggtt_offset(vma) + params->offset_V,
837 &regs->OBUF_0V); 824 &regs->OBUF_0V);
825
838 ostride |= params->stride_UV << 16; 826 ostride |= params->stride_UV << 16;
839 } 827 }
840 828
@@ -938,15 +926,16 @@ static int check_overlay_dst(struct intel_overlay *overlay,
938 return -EINVAL; 926 return -EINVAL;
939} 927}
940 928
941static int check_overlay_scaling(struct put_image_params *rec) 929static int check_overlay_scaling(struct drm_intel_overlay_put_image *rec)
942{ 930{
943 u32 tmp; 931 u32 tmp;
944 932
945 /* downscaling limit is 8.0 */ 933 /* downscaling limit is 8.0 */
946 tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16; 934 tmp = ((rec->src_scan_height << 16) / rec->dst_height) >> 16;
947 if (tmp > 7) 935 if (tmp > 7)
948 return -EINVAL; 936 return -EINVAL;
949 tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16; 937
938 tmp = ((rec->src_scan_width << 16) / rec->dst_width) >> 16;
950 if (tmp > 7) 939 if (tmp > 7)
951 return -EINVAL; 940 return -EINVAL;
952 941
@@ -1067,13 +1056,12 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
1067int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, 1056int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
1068 struct drm_file *file_priv) 1057 struct drm_file *file_priv)
1069{ 1058{
1070 struct drm_intel_overlay_put_image *put_image_rec = data; 1059 struct drm_intel_overlay_put_image *params = data;
1071 struct drm_i915_private *dev_priv = to_i915(dev); 1060 struct drm_i915_private *dev_priv = to_i915(dev);
1072 struct intel_overlay *overlay; 1061 struct intel_overlay *overlay;
1073 struct drm_crtc *drmmode_crtc; 1062 struct drm_crtc *drmmode_crtc;
1074 struct intel_crtc *crtc; 1063 struct intel_crtc *crtc;
1075 struct drm_i915_gem_object *new_bo; 1064 struct drm_i915_gem_object *new_bo;
1076 struct put_image_params *params;
1077 int ret; 1065 int ret;
1078 1066
1079 overlay = dev_priv->overlay; 1067 overlay = dev_priv->overlay;
@@ -1082,7 +1070,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
1082 return -ENODEV; 1070 return -ENODEV;
1083 } 1071 }
1084 1072
1085 if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) { 1073 if (!(params->flags & I915_OVERLAY_ENABLE)) {
1086 drm_modeset_lock_all(dev); 1074 drm_modeset_lock_all(dev);
1087 mutex_lock(&dev->struct_mutex); 1075 mutex_lock(&dev->struct_mutex);
1088 1076
@@ -1094,22 +1082,14 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
1094 return ret; 1082 return ret;
1095 } 1083 }
1096 1084
1097 params = kmalloc(sizeof(*params), GFP_KERNEL); 1085 drmmode_crtc = drm_crtc_find(dev, file_priv, params->crtc_id);
1098 if (!params) 1086 if (!drmmode_crtc)
1099 return -ENOMEM; 1087 return -ENOENT;
1100
1101 drmmode_crtc = drm_crtc_find(dev, file_priv, put_image_rec->crtc_id);
1102 if (!drmmode_crtc) {
1103 ret = -ENOENT;
1104 goto out_free;
1105 }
1106 crtc = to_intel_crtc(drmmode_crtc); 1088 crtc = to_intel_crtc(drmmode_crtc);
1107 1089
1108 new_bo = i915_gem_object_lookup(file_priv, put_image_rec->bo_handle); 1090 new_bo = i915_gem_object_lookup(file_priv, params->bo_handle);
1109 if (!new_bo) { 1091 if (!new_bo)
1110 ret = -ENOENT; 1092 return -ENOENT;
1111 goto out_free;
1112 }
1113 1093
1114 drm_modeset_lock_all(dev); 1094 drm_modeset_lock_all(dev);
1115 mutex_lock(&dev->struct_mutex); 1095 mutex_lock(&dev->struct_mutex);
@@ -1145,42 +1125,27 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
1145 overlay->pfit_active = false; 1125 overlay->pfit_active = false;
1146 } 1126 }
1147 1127
1148 ret = check_overlay_dst(overlay, put_image_rec); 1128 ret = check_overlay_dst(overlay, params);
1149 if (ret != 0) 1129 if (ret != 0)
1150 goto out_unlock; 1130 goto out_unlock;
1151 1131
1152 if (overlay->pfit_active) { 1132 if (overlay->pfit_active) {
1153 params->dst_y = ((((u32)put_image_rec->dst_y) << 12) / 1133 params->dst_y = (((u32)params->dst_y << 12) /
1154 overlay->pfit_vscale_ratio); 1134 overlay->pfit_vscale_ratio);
1155 /* shifting right rounds downwards, so add 1 */ 1135 /* shifting right rounds downwards, so add 1 */
1156 params->dst_h = ((((u32)put_image_rec->dst_height) << 12) / 1136 params->dst_height = (((u32)params->dst_height << 12) /
1157 overlay->pfit_vscale_ratio) + 1; 1137 overlay->pfit_vscale_ratio) + 1;
1158 } else {
1159 params->dst_y = put_image_rec->dst_y;
1160 params->dst_h = put_image_rec->dst_height;
1161 } 1138 }
1162 params->dst_x = put_image_rec->dst_x; 1139
1163 params->dst_w = put_image_rec->dst_width; 1140 if (params->src_scan_height > params->src_height ||
1164 1141 params->src_scan_width > params->src_width) {
1165 params->src_w = put_image_rec->src_width;
1166 params->src_h = put_image_rec->src_height;
1167 params->src_scan_w = put_image_rec->src_scan_width;
1168 params->src_scan_h = put_image_rec->src_scan_height;
1169 if (params->src_scan_h > params->src_h ||
1170 params->src_scan_w > params->src_w) {
1171 ret = -EINVAL; 1142 ret = -EINVAL;
1172 goto out_unlock; 1143 goto out_unlock;
1173 } 1144 }
1174 1145
1175 ret = check_overlay_src(dev_priv, put_image_rec, new_bo); 1146 ret = check_overlay_src(dev_priv, params, new_bo);
1176 if (ret != 0) 1147 if (ret != 0)
1177 goto out_unlock; 1148 goto out_unlock;
1178 params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
1179 params->stride_Y = put_image_rec->stride_Y;
1180 params->stride_UV = put_image_rec->stride_UV;
1181 params->offset_Y = put_image_rec->offset_Y;
1182 params->offset_U = put_image_rec->offset_U;
1183 params->offset_V = put_image_rec->offset_V;
1184 1149
1185 /* Check scaling after src size to prevent a divide-by-zero. */ 1150 /* Check scaling after src size to prevent a divide-by-zero. */
1186 ret = check_overlay_scaling(params); 1151 ret = check_overlay_scaling(params);
@@ -1195,16 +1160,12 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
1195 drm_modeset_unlock_all(dev); 1160 drm_modeset_unlock_all(dev);
1196 i915_gem_object_put(new_bo); 1161 i915_gem_object_put(new_bo);
1197 1162
1198 kfree(params);
1199
1200 return 0; 1163 return 0;
1201 1164
1202out_unlock: 1165out_unlock:
1203 mutex_unlock(&dev->struct_mutex); 1166 mutex_unlock(&dev->struct_mutex);
1204 drm_modeset_unlock_all(dev); 1167 drm_modeset_unlock_all(dev);
1205 i915_gem_object_put(new_bo); 1168 i915_gem_object_put(new_bo);
1206out_free:
1207 kfree(params);
1208 1169
1209 return ret; 1170 return ret;
1210} 1171}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d99e5fabe93c..1db9b8328275 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2875,6 +2875,16 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2875 } 2875 }
2876 } 2876 }
2877 2877
2878 /*
2879 * WA Level-0 adjustment for 16GB DIMMs: SKL+
2880 * If we could not get dimm info enable this WA to prevent from
2881 * any underrun. If not able to get Dimm info assume 16GB dimm
2882 * to avoid any underrun.
2883 */
2884 if (!dev_priv->dram_info.valid_dimm ||
2885 dev_priv->dram_info.is_16gb_dimm)
2886 wm[0] += 1;
2887
2878 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2888 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2879 uint64_t sskpd = I915_READ64(MCH_SSKPD); 2889 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2880 2890
@@ -6108,10 +6118,13 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
6108 u32 val; 6118 u32 val;
6109 6119
6110 /* Display WA #0477 WaDisableIPC: skl */ 6120 /* Display WA #0477 WaDisableIPC: skl */
6111 if (IS_SKYLAKE(dev_priv)) { 6121 if (IS_SKYLAKE(dev_priv))
6122 dev_priv->ipc_enabled = false;
6123
6124 /* Display WA #1141: SKL:all KBL:all CFL */
6125 if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
6126 !dev_priv->dram_info.symmetric_memory)
6112 dev_priv->ipc_enabled = false; 6127 dev_priv->ipc_enabled = false;
6113 return;
6114 }
6115 6128
6116 val = I915_READ(DISP_ARB_CTL2); 6129 val = I915_READ(DISP_ARB_CTL2);
6117 6130
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 472939f5c18f..d0ef50bf930a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1677,9 +1677,26 @@ static int switch_context(struct i915_request *rq)
1677 GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); 1677 GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
1678 1678
1679 if (ppgtt) { 1679 if (ppgtt) {
1680 ret = load_pd_dir(rq, ppgtt); 1680 int loops;
1681 if (ret) 1681
1682 goto err; 1682 /*
1683 * Baytail takes a little more convincing that it really needs
1684 * to reload the PD between contexts. It is not just a little
1685 * longer, as adding more stalls after the load_pd_dir (i.e.
1686 * adding a long loop around flush_pd_dir) is not as effective
1687 * as reloading the PD umpteen times. 32 is derived from
1688 * experimentation (gem_exec_parallel/fds) and has no good
1689 * explanation.
1690 */
1691 loops = 1;
1692 if (engine->id == BCS && IS_VALLEYVIEW(engine->i915))
1693 loops = 32;
1694
1695 do {
1696 ret = load_pd_dir(rq, ppgtt);
1697 if (ret)
1698 goto err;
1699 } while (--loops);
1683 1700
1684 if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) { 1701 if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) {
1685 unwind_mm = intel_engine_flag(engine); 1702 unwind_mm = intel_engine_flag(engine);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 480dadb1047b..0fdabce647ab 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -1996,6 +1996,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1996 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 1996 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
1997 1997
1998#define ICL_AUX_A_IO_POWER_DOMAINS ( \ 1998#define ICL_AUX_A_IO_POWER_DOMAINS ( \
1999 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
1999 BIT_ULL(POWER_DOMAIN_AUX_A)) 2000 BIT_ULL(POWER_DOMAIN_AUX_A))
2000#define ICL_AUX_B_IO_POWER_DOMAINS ( \ 2001#define ICL_AUX_B_IO_POWER_DOMAINS ( \
2001 BIT_ULL(POWER_DOMAIN_AUX_B)) 2002 BIT_ULL(POWER_DOMAIN_AUX_B))
@@ -3563,6 +3564,9 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
3563 3564
3564 /* 7. Setup MBUS. */ 3565 /* 7. Setup MBUS. */
3565 icl_mbus_init(dev_priv); 3566 icl_mbus_init(dev_priv);
3567
3568 if (resume && dev_priv->csr.dmc_payload)
3569 intel_csr_load_program(dev_priv);
3566} 3570}
3567 3571
3568static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 3572static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 812fe7b06f87..701372e512a8 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -99,32 +99,13 @@ struct intel_sdvo {
99 */ 99 */
100 uint16_t hotplug_active; 100 uint16_t hotplug_active;
101 101
102 /**
103 * This is set if we're going to treat the device as TV-out.
104 *
105 * While we have these nice friendly flags for output types that ought
106 * to decide this for us, the S-Video output on our HDMI+S-Video card
107 * shows up as RGB1 (VGA).
108 */
109 bool is_tv;
110
111 enum port port; 102 enum port port;
112 103
113 /**
114 * This is set if we treat the device as HDMI, instead of DVI.
115 */
116 bool is_hdmi;
117 bool has_hdmi_monitor; 104 bool has_hdmi_monitor;
118 bool has_hdmi_audio; 105 bool has_hdmi_audio;
119 bool rgb_quant_range_selectable; 106 bool rgb_quant_range_selectable;
120 107
121 /** 108 /**
122 * This is set if we detect output of sdvo device as LVDS and
123 * have a valid fixed mode to use with the panel.
124 */
125 bool is_lvds;
126
127 /**
128 * This is sdvo fixed pannel mode pointer 109 * This is sdvo fixed pannel mode pointer
129 */ 110 */
130 struct drm_display_mode *sdvo_lvds_fixed_mode; 111 struct drm_display_mode *sdvo_lvds_fixed_mode;
@@ -172,6 +153,11 @@ struct intel_sdvo_connector {
172 153
173 /* this is to get the range of margin.*/ 154 /* this is to get the range of margin.*/
174 u32 max_hscan, max_vscan; 155 u32 max_hscan, max_vscan;
156
157 /**
158 * This is set if we treat the device as HDMI, instead of DVI.
159 */
160 bool is_hdmi;
175}; 161};
176 162
177struct intel_sdvo_connector_state { 163struct intel_sdvo_connector_state {
@@ -766,6 +752,7 @@ static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo,
766 752
767static bool 753static bool
768intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo, 754intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
755 struct intel_sdvo_connector *intel_sdvo_connector,
769 uint16_t clock, 756 uint16_t clock,
770 uint16_t width, 757 uint16_t width,
771 uint16_t height) 758 uint16_t height)
@@ -778,7 +765,7 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
778 args.height = height; 765 args.height = height;
779 args.interlace = 0; 766 args.interlace = 0;
780 767
781 if (intel_sdvo->is_lvds && 768 if (IS_LVDS(intel_sdvo_connector) &&
782 (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width || 769 (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
783 intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height)) 770 intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
784 args.scaled = 1; 771 args.scaled = 1;
@@ -1067,6 +1054,7 @@ intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
1067 */ 1054 */
1068static bool 1055static bool
1069intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo, 1056intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
1057 struct intel_sdvo_connector *intel_sdvo_connector,
1070 const struct drm_display_mode *mode, 1058 const struct drm_display_mode *mode,
1071 struct drm_display_mode *adjusted_mode) 1059 struct drm_display_mode *adjusted_mode)
1072{ 1060{
@@ -1077,6 +1065,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
1077 return false; 1065 return false;
1078 1066
1079 if (!intel_sdvo_create_preferred_input_timing(intel_sdvo, 1067 if (!intel_sdvo_create_preferred_input_timing(intel_sdvo,
1068 intel_sdvo_connector,
1080 mode->clock / 10, 1069 mode->clock / 10,
1081 mode->hdisplay, 1070 mode->hdisplay,
1082 mode->vdisplay)) 1071 mode->vdisplay))
@@ -1127,6 +1116,8 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1127 struct intel_sdvo *intel_sdvo = to_sdvo(encoder); 1116 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1128 struct intel_sdvo_connector_state *intel_sdvo_state = 1117 struct intel_sdvo_connector_state *intel_sdvo_state =
1129 to_intel_sdvo_connector_state(conn_state); 1118 to_intel_sdvo_connector_state(conn_state);
1119 struct intel_sdvo_connector *intel_sdvo_connector =
1120 to_intel_sdvo_connector(conn_state->connector);
1130 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 1121 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1131 struct drm_display_mode *mode = &pipe_config->base.mode; 1122 struct drm_display_mode *mode = &pipe_config->base.mode;
1132 1123
@@ -1142,20 +1133,22 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1142 * timings, even though this isn't really the right place in 1133 * timings, even though this isn't really the right place in
1143 * the sequence to do it. Oh well. 1134 * the sequence to do it. Oh well.
1144 */ 1135 */
1145 if (intel_sdvo->is_tv) { 1136 if (IS_TV(intel_sdvo_connector)) {
1146 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) 1137 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
1147 return false; 1138 return false;
1148 1139
1149 (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, 1140 (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
1141 intel_sdvo_connector,
1150 mode, 1142 mode,
1151 adjusted_mode); 1143 adjusted_mode);
1152 pipe_config->sdvo_tv_clock = true; 1144 pipe_config->sdvo_tv_clock = true;
1153 } else if (intel_sdvo->is_lvds) { 1145 } else if (IS_LVDS(intel_sdvo_connector)) {
1154 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, 1146 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
1155 intel_sdvo->sdvo_lvds_fixed_mode)) 1147 intel_sdvo->sdvo_lvds_fixed_mode))
1156 return false; 1148 return false;
1157 1149
1158 (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, 1150 (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
1151 intel_sdvo_connector,
1159 mode, 1152 mode,
1160 adjusted_mode); 1153 adjusted_mode);
1161 } 1154 }
@@ -1194,11 +1187,11 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1194 } 1187 }
1195 1188
1196 /* Clock computation needs to happen after pixel multiplier. */ 1189 /* Clock computation needs to happen after pixel multiplier. */
1197 if (intel_sdvo->is_tv) 1190 if (IS_TV(intel_sdvo_connector))
1198 i9xx_adjust_sdvo_tv_clock(pipe_config); 1191 i9xx_adjust_sdvo_tv_clock(pipe_config);
1199 1192
1200 /* Set user selected PAR to incoming mode's member */ 1193 /* Set user selected PAR to incoming mode's member */
1201 if (intel_sdvo->is_hdmi) 1194 if (intel_sdvo_connector->is_hdmi)
1202 adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio; 1195 adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
1203 1196
1204 return true; 1197 return true;
@@ -1275,6 +1268,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
1275 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; 1268 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
1276 const struct intel_sdvo_connector_state *sdvo_state = 1269 const struct intel_sdvo_connector_state *sdvo_state =
1277 to_intel_sdvo_connector_state(conn_state); 1270 to_intel_sdvo_connector_state(conn_state);
1271 const struct intel_sdvo_connector *intel_sdvo_connector =
1272 to_intel_sdvo_connector(conn_state->connector);
1278 const struct drm_display_mode *mode = &crtc_state->base.mode; 1273 const struct drm_display_mode *mode = &crtc_state->base.mode;
1279 struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder); 1274 struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
1280 u32 sdvox; 1275 u32 sdvox;
@@ -1304,7 +1299,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
1304 return; 1299 return;
1305 1300
1306 /* lvds has a special fixed output timing. */ 1301 /* lvds has a special fixed output timing. */
1307 if (intel_sdvo->is_lvds) 1302 if (IS_LVDS(intel_sdvo_connector))
1308 intel_sdvo_get_dtd_from_mode(&output_dtd, 1303 intel_sdvo_get_dtd_from_mode(&output_dtd,
1309 intel_sdvo->sdvo_lvds_fixed_mode); 1304 intel_sdvo->sdvo_lvds_fixed_mode);
1310 else 1305 else
@@ -1325,13 +1320,13 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
1325 } else 1320 } else
1326 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); 1321 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
1327 1322
1328 if (intel_sdvo->is_tv && 1323 if (IS_TV(intel_sdvo_connector) &&
1329 !intel_sdvo_set_tv_format(intel_sdvo, conn_state)) 1324 !intel_sdvo_set_tv_format(intel_sdvo, conn_state))
1330 return; 1325 return;
1331 1326
1332 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); 1327 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1333 1328
1334 if (intel_sdvo->is_tv || intel_sdvo->is_lvds) 1329 if (IS_TV(intel_sdvo_connector) || IS_LVDS(intel_sdvo_connector))
1335 input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags; 1330 input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
1336 if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) 1331 if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
1337 DRM_INFO("Setting input timings on %s failed\n", 1332 DRM_INFO("Setting input timings on %s failed\n",
@@ -1630,6 +1625,8 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
1630 struct drm_display_mode *mode) 1625 struct drm_display_mode *mode)
1631{ 1626{
1632 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); 1627 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1628 struct intel_sdvo_connector *intel_sdvo_connector =
1629 to_intel_sdvo_connector(connector);
1633 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 1630 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
1634 1631
1635 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 1632 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -1644,7 +1641,7 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
1644 if (mode->clock > max_dotclk) 1641 if (mode->clock > max_dotclk)
1645 return MODE_CLOCK_HIGH; 1642 return MODE_CLOCK_HIGH;
1646 1643
1647 if (intel_sdvo->is_lvds) { 1644 if (IS_LVDS(intel_sdvo_connector)) {
1648 if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay) 1645 if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
1649 return MODE_PANEL; 1646 return MODE_PANEL;
1650 1647
@@ -1759,6 +1756,8 @@ static enum drm_connector_status
1759intel_sdvo_tmds_sink_detect(struct drm_connector *connector) 1756intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
1760{ 1757{
1761 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); 1758 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1759 struct intel_sdvo_connector *intel_sdvo_connector =
1760 to_intel_sdvo_connector(connector);
1762 enum drm_connector_status status; 1761 enum drm_connector_status status;
1763 struct edid *edid; 1762 struct edid *edid;
1764 1763
@@ -1797,7 +1796,7 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
1797 /* DDC bus is shared, match EDID to connector type */ 1796 /* DDC bus is shared, match EDID to connector type */
1798 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 1797 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
1799 status = connector_status_connected; 1798 status = connector_status_connected;
1800 if (intel_sdvo->is_hdmi) { 1799 if (intel_sdvo_connector->is_hdmi) {
1801 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); 1800 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
1802 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); 1801 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
1803 intel_sdvo->rgb_quant_range_selectable = 1802 intel_sdvo->rgb_quant_range_selectable =
@@ -1875,17 +1874,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1875 ret = connector_status_connected; 1874 ret = connector_status_connected;
1876 } 1875 }
1877 1876
1878 /* May update encoder flag for like clock for SDVO TV, etc.*/
1879 if (ret == connector_status_connected) {
1880 intel_sdvo->is_tv = false;
1881 intel_sdvo->is_lvds = false;
1882
1883 if (response & SDVO_TV_MASK)
1884 intel_sdvo->is_tv = true;
1885 if (response & SDVO_LVDS_MASK)
1886 intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
1887 }
1888
1889 return ret; 1877 return ret;
1890} 1878}
1891 1879
@@ -2054,16 +2042,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
2054 * arranged in priority order. 2042 * arranged in priority order.
2055 */ 2043 */
2056 intel_ddc_get_modes(connector, &intel_sdvo->ddc); 2044 intel_ddc_get_modes(connector, &intel_sdvo->ddc);
2057
2058 list_for_each_entry(newmode, &connector->probed_modes, head) {
2059 if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
2060 intel_sdvo->sdvo_lvds_fixed_mode =
2061 drm_mode_duplicate(connector->dev, newmode);
2062
2063 intel_sdvo->is_lvds = true;
2064 break;
2065 }
2066 }
2067} 2045}
2068 2046
2069static int intel_sdvo_get_modes(struct drm_connector *connector) 2047static int intel_sdvo_get_modes(struct drm_connector *connector)
@@ -2555,7 +2533,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2555 if (INTEL_GEN(dev_priv) >= 4 && 2533 if (INTEL_GEN(dev_priv) >= 4 &&
2556 intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { 2534 intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
2557 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2535 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2558 intel_sdvo->is_hdmi = true; 2536 intel_sdvo_connector->is_hdmi = true;
2559 } 2537 }
2560 2538
2561 if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { 2539 if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
@@ -2563,7 +2541,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2563 return false; 2541 return false;
2564 } 2542 }
2565 2543
2566 if (intel_sdvo->is_hdmi) 2544 if (intel_sdvo_connector->is_hdmi)
2567 intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector); 2545 intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
2568 2546
2569 return true; 2547 return true;
@@ -2591,8 +2569,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2591 intel_sdvo->controlled_output |= type; 2569 intel_sdvo->controlled_output |= type;
2592 intel_sdvo_connector->output_flag = type; 2570 intel_sdvo_connector->output_flag = type;
2593 2571
2594 intel_sdvo->is_tv = true;
2595
2596 if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { 2572 if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
2597 kfree(intel_sdvo_connector); 2573 kfree(intel_sdvo_connector);
2598 return false; 2574 return false;
@@ -2654,6 +2630,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2654 struct drm_connector *connector; 2630 struct drm_connector *connector;
2655 struct intel_connector *intel_connector; 2631 struct intel_connector *intel_connector;
2656 struct intel_sdvo_connector *intel_sdvo_connector; 2632 struct intel_sdvo_connector *intel_sdvo_connector;
2633 struct drm_display_mode *mode;
2657 2634
2658 DRM_DEBUG_KMS("initialising LVDS device %d\n", device); 2635 DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
2659 2636
@@ -2682,6 +2659,19 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2682 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) 2659 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
2683 goto err; 2660 goto err;
2684 2661
2662 intel_sdvo_get_lvds_modes(connector);
2663
2664 list_for_each_entry(mode, &connector->probed_modes, head) {
2665 if (mode->type & DRM_MODE_TYPE_PREFERRED) {
2666 intel_sdvo->sdvo_lvds_fixed_mode =
2667 drm_mode_duplicate(connector->dev, mode);
2668 break;
2669 }
2670 }
2671
2672 if (!intel_sdvo->sdvo_lvds_fixed_mode)
2673 goto err;
2674
2685 return true; 2675 return true;
2686 2676
2687err: 2677err:
@@ -2692,9 +2682,6 @@ err:
2692static bool 2682static bool
2693intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags) 2683intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
2694{ 2684{
2695 intel_sdvo->is_tv = false;
2696 intel_sdvo->is_lvds = false;
2697
2698 /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ 2685 /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
2699 2686
2700 if (flags & SDVO_OUTPUT_TMDS0) 2687 if (flags & SDVO_OUTPUT_TMDS0)
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9600ccfc5b76..d4c8e10fc90b 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -230,6 +230,56 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
230#endif 230#endif
231} 231}
232 232
233int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
234{
235 const struct drm_framebuffer *fb = plane_state->base.fb;
236 struct drm_rect *src = &plane_state->base.src;
237 u32 src_x, src_y, src_w, src_h;
238
239 /*
240 * Hardware doesn't handle subpixel coordinates.
241 * Adjust to (macro)pixel boundary, but be careful not to
242 * increase the source viewport size, because that could
243 * push the downscaling factor out of bounds.
244 */
245 src_x = src->x1 >> 16;
246 src_w = drm_rect_width(src) >> 16;
247 src_y = src->y1 >> 16;
248 src_h = drm_rect_height(src) >> 16;
249
250 src->x1 = src_x << 16;
251 src->x2 = (src_x + src_w) << 16;
252 src->y1 = src_y << 16;
253 src->y2 = (src_y + src_h) << 16;
254
255 if (fb->format->is_yuv &&
256 fb->format->format != DRM_FORMAT_NV12 &&
257 (src_x & 1 || src_w & 1)) {
258 DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
259 src_x, src_w);
260 return -EINVAL;
261 }
262
263 return 0;
264}
265
266unsigned int
267skl_plane_max_stride(struct intel_plane *plane,
268 u32 pixel_format, u64 modifier,
269 unsigned int rotation)
270{
271 int cpp = drm_format_plane_cpp(pixel_format, 0);
272
273 /*
274 * "The stride in bytes must not exceed the
275 * of the size of 8K pixels and 32K bytes."
276 */
277 if (drm_rotation_90_or_270(rotation))
278 return min(8192, 32768 / cpp);
279 else
280 return min(8192 * cpp, 32768);
281}
282
233void 283void
234skl_update_plane(struct intel_plane *plane, 284skl_update_plane(struct intel_plane *plane,
235 const struct intel_crtc_state *crtc_state, 285 const struct intel_crtc_state *crtc_state,
@@ -241,16 +291,15 @@ skl_update_plane(struct intel_plane *plane,
241 enum pipe pipe = plane->pipe; 291 enum pipe pipe = plane->pipe;
242 u32 plane_ctl = plane_state->ctl; 292 u32 plane_ctl = plane_state->ctl;
243 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 293 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
244 u32 surf_addr = plane_state->main.offset; 294 u32 surf_addr = plane_state->color_plane[0].offset;
245 unsigned int rotation = plane_state->base.rotation; 295 u32 stride = skl_plane_stride(plane_state, 0);
246 u32 stride = skl_plane_stride(fb, 0, rotation); 296 u32 aux_stride = skl_plane_stride(plane_state, 1);
247 u32 aux_stride = skl_plane_stride(fb, 1, rotation);
248 int crtc_x = plane_state->base.dst.x1; 297 int crtc_x = plane_state->base.dst.x1;
249 int crtc_y = plane_state->base.dst.y1; 298 int crtc_y = plane_state->base.dst.y1;
250 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); 299 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
251 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); 300 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
252 uint32_t x = plane_state->main.x; 301 uint32_t x = plane_state->color_plane[0].x;
253 uint32_t y = plane_state->main.y; 302 uint32_t y = plane_state->color_plane[0].y;
254 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; 303 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
255 uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; 304 uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
256 unsigned long irqflags; 305 unsigned long irqflags;
@@ -277,9 +326,10 @@ skl_update_plane(struct intel_plane *plane,
277 I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride); 326 I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
278 I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); 327 I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
279 I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), 328 I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
280 (plane_state->aux.offset - surf_addr) | aux_stride); 329 (plane_state->color_plane[1].offset - surf_addr) | aux_stride);
281 I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id), 330 I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
282 (plane_state->aux.y << 16) | plane_state->aux.x); 331 (plane_state->color_plane[1].y << 16) |
332 plane_state->color_plane[1].x);
283 333
284 /* program plane scaler */ 334 /* program plane scaler */
285 if (plane_state->scaler_id >= 0) { 335 if (plane_state->scaler_id >= 0) {
@@ -545,15 +595,15 @@ vlv_update_plane(struct intel_plane *plane,
545 enum pipe pipe = plane->pipe; 595 enum pipe pipe = plane->pipe;
546 enum plane_id plane_id = plane->id; 596 enum plane_id plane_id = plane->id;
547 u32 sprctl = plane_state->ctl; 597 u32 sprctl = plane_state->ctl;
548 u32 sprsurf_offset = plane_state->main.offset; 598 u32 sprsurf_offset = plane_state->color_plane[0].offset;
549 u32 linear_offset; 599 u32 linear_offset;
550 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 600 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
551 int crtc_x = plane_state->base.dst.x1; 601 int crtc_x = plane_state->base.dst.x1;
552 int crtc_y = plane_state->base.dst.y1; 602 int crtc_y = plane_state->base.dst.y1;
553 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); 603 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
554 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); 604 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
555 uint32_t x = plane_state->main.x; 605 uint32_t x = plane_state->color_plane[0].x;
556 uint32_t y = plane_state->main.y; 606 uint32_t y = plane_state->color_plane[0].y;
557 unsigned long irqflags; 607 unsigned long irqflags;
558 608
559 /* Sizes are 0 based */ 609 /* Sizes are 0 based */
@@ -574,7 +624,8 @@ vlv_update_plane(struct intel_plane *plane,
574 I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value); 624 I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
575 I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask); 625 I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask);
576 } 626 }
577 I915_WRITE_FW(SPSTRIDE(pipe, plane_id), fb->pitches[0]); 627 I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
628 plane_state->color_plane[0].stride);
578 I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x); 629 I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
579 630
580 if (fb->modifier == I915_FORMAT_MOD_X_TILED) 631 if (fb->modifier == I915_FORMAT_MOD_X_TILED)
@@ -704,15 +755,15 @@ ivb_update_plane(struct intel_plane *plane,
704 const struct drm_framebuffer *fb = plane_state->base.fb; 755 const struct drm_framebuffer *fb = plane_state->base.fb;
705 enum pipe pipe = plane->pipe; 756 enum pipe pipe = plane->pipe;
706 u32 sprctl = plane_state->ctl, sprscale = 0; 757 u32 sprctl = plane_state->ctl, sprscale = 0;
707 u32 sprsurf_offset = plane_state->main.offset; 758 u32 sprsurf_offset = plane_state->color_plane[0].offset;
708 u32 linear_offset; 759 u32 linear_offset;
709 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 760 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
710 int crtc_x = plane_state->base.dst.x1; 761 int crtc_x = plane_state->base.dst.x1;
711 int crtc_y = plane_state->base.dst.y1; 762 int crtc_y = plane_state->base.dst.y1;
712 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); 763 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
713 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); 764 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
714 uint32_t x = plane_state->main.x; 765 uint32_t x = plane_state->color_plane[0].x;
715 uint32_t y = plane_state->main.y; 766 uint32_t y = plane_state->color_plane[0].y;
716 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; 767 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
717 uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; 768 uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
718 unsigned long irqflags; 769 unsigned long irqflags;
@@ -736,7 +787,7 @@ ivb_update_plane(struct intel_plane *plane,
736 I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask); 787 I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
737 } 788 }
738 789
739 I915_WRITE_FW(SPRSTRIDE(pipe), fb->pitches[0]); 790 I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
740 I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x); 791 I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
741 792
742 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET 793 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
@@ -749,7 +800,7 @@ ivb_update_plane(struct intel_plane *plane,
749 I915_WRITE_FW(SPRLINOFF(pipe), linear_offset); 800 I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
750 801
751 I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); 802 I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
752 if (plane->can_scale) 803 if (IS_IVYBRIDGE(dev_priv))
753 I915_WRITE_FW(SPRSCALE(pipe), sprscale); 804 I915_WRITE_FW(SPRSCALE(pipe), sprscale);
754 I915_WRITE_FW(SPRCTL(pipe), sprctl); 805 I915_WRITE_FW(SPRCTL(pipe), sprctl);
755 I915_WRITE_FW(SPRSURF(pipe), 806 I915_WRITE_FW(SPRSURF(pipe),
@@ -770,7 +821,7 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
770 821
771 I915_WRITE_FW(SPRCTL(pipe), 0); 822 I915_WRITE_FW(SPRCTL(pipe), 0);
772 /* Can't leave the scaler enabled... */ 823 /* Can't leave the scaler enabled... */
773 if (plane->can_scale) 824 if (IS_IVYBRIDGE(dev_priv))
774 I915_WRITE_FW(SPRSCALE(pipe), 0); 825 I915_WRITE_FW(SPRSCALE(pipe), 0);
775 826
776 I915_WRITE_FW(SPRSURF(pipe), 0); 827 I915_WRITE_FW(SPRSURF(pipe), 0);
@@ -800,6 +851,14 @@ ivb_plane_get_hw_state(struct intel_plane *plane,
800 return ret; 851 return ret;
801} 852}
802 853
854static unsigned int
855g4x_sprite_max_stride(struct intel_plane *plane,
856 u32 pixel_format, u64 modifier,
857 unsigned int rotation)
858{
859 return 16384;
860}
861
803static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, 862static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
804 const struct intel_plane_state *plane_state) 863 const struct intel_plane_state *plane_state)
805{ 864{
@@ -868,15 +927,15 @@ g4x_update_plane(struct intel_plane *plane,
868 const struct drm_framebuffer *fb = plane_state->base.fb; 927 const struct drm_framebuffer *fb = plane_state->base.fb;
869 enum pipe pipe = plane->pipe; 928 enum pipe pipe = plane->pipe;
870 u32 dvscntr = plane_state->ctl, dvsscale = 0; 929 u32 dvscntr = plane_state->ctl, dvsscale = 0;
871 u32 dvssurf_offset = plane_state->main.offset; 930 u32 dvssurf_offset = plane_state->color_plane[0].offset;
872 u32 linear_offset; 931 u32 linear_offset;
873 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 932 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
874 int crtc_x = plane_state->base.dst.x1; 933 int crtc_x = plane_state->base.dst.x1;
875 int crtc_y = plane_state->base.dst.y1; 934 int crtc_y = plane_state->base.dst.y1;
876 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); 935 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
877 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); 936 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
878 uint32_t x = plane_state->main.x; 937 uint32_t x = plane_state->color_plane[0].x;
879 uint32_t y = plane_state->main.y; 938 uint32_t y = plane_state->color_plane[0].y;
880 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; 939 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
881 uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; 940 uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
882 unsigned long irqflags; 941 unsigned long irqflags;
@@ -900,7 +959,7 @@ g4x_update_plane(struct intel_plane *plane,
900 I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask); 959 I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
901 } 960 }
902 961
903 I915_WRITE_FW(DVSSTRIDE(pipe), fb->pitches[0]); 962 I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
904 I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x); 963 I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
905 964
906 if (fb->modifier == I915_FORMAT_MOD_X_TILED) 965 if (fb->modifier == I915_FORMAT_MOD_X_TILED)
@@ -959,144 +1018,309 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
959} 1018}
960 1019
961static int 1020static int
962intel_check_sprite_plane(struct intel_crtc_state *crtc_state, 1021g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
963 struct intel_plane_state *state) 1022 struct intel_plane_state *plane_state)
964{ 1023{
965 struct intel_plane *plane = to_intel_plane(state->base.plane); 1024 const struct drm_framebuffer *fb = plane_state->base.fb;
966 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1025 const struct drm_rect *src = &plane_state->base.src;
967 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1026 const struct drm_rect *dst = &plane_state->base.dst;
968 struct drm_framebuffer *fb = state->base.fb; 1027 int src_x, src_y, src_w, src_h, crtc_w, crtc_h;
969 int max_stride = INTEL_GEN(dev_priv) >= 9 ? 32768 : 16384; 1028 const struct drm_display_mode *adjusted_mode =
970 int max_scale, min_scale; 1029 &crtc_state->base.adjusted_mode;
971 bool can_scale; 1030 unsigned int cpp = fb->format->cpp[0];
972 int ret; 1031 unsigned int width_bytes;
973 uint32_t pixel_format = 0; 1032 int min_width, min_height;
974 1033
975 if (!fb) { 1034 crtc_w = drm_rect_width(dst);
976 state->base.visible = false; 1035 crtc_h = drm_rect_height(dst);
1036
1037 src_x = src->x1 >> 16;
1038 src_y = src->y1 >> 16;
1039 src_w = drm_rect_width(src) >> 16;
1040 src_h = drm_rect_height(src) >> 16;
1041
1042 if (src_w == crtc_w && src_h == crtc_h)
977 return 0; 1043 return 0;
1044
1045 min_width = 3;
1046
1047 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1048 if (src_h & 1) {
1049 DRM_DEBUG_KMS("Source height must be even with interlaced modes\n");
1050 return -EINVAL;
1051 }
1052 min_height = 6;
1053 } else {
1054 min_height = 3;
978 } 1055 }
979 1056
980 /* Don't modify another pipe's plane */ 1057 width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
981 if (plane->pipe != crtc->pipe) { 1058
982 DRM_DEBUG_KMS("Wrong plane <-> crtc mapping\n"); 1059 if (src_w < min_width || src_h < min_height ||
1060 src_w > 2048 || src_h > 2048) {
1061 DRM_DEBUG_KMS("Source dimensions (%dx%d) exceed hardware limits (%dx%d - %dx%d)\n",
1062 src_w, src_h, min_width, min_height, 2048, 2048);
983 return -EINVAL; 1063 return -EINVAL;
984 } 1064 }
985 1065
986 /* FIXME check all gen limits */ 1066 if (width_bytes > 4096) {
987 if (fb->width < 3 || fb->height < 3 || fb->pitches[0] > max_stride) { 1067 DRM_DEBUG_KMS("Fetch width (%d) exceeds hardware max with scaling (%u)\n",
988 DRM_DEBUG_KMS("Unsuitable framebuffer for plane\n"); 1068 width_bytes, 4096);
989 return -EINVAL; 1069 return -EINVAL;
990 } 1070 }
991 1071
992 /* setup can_scale, min_scale, max_scale */ 1072 if (width_bytes > 4096 || fb->pitches[0] > 4096) {
993 if (INTEL_GEN(dev_priv) >= 9) { 1073 DRM_DEBUG_KMS("Stride (%u) exceeds hardware max with scaling (%u)\n",
994 if (state->base.fb) 1074 fb->pitches[0], 4096);
995 pixel_format = state->base.fb->format->format; 1075 return -EINVAL;
996 /* use scaler when colorkey is not required */ 1076 }
997 if (!state->ckey.flags) { 1077
998 can_scale = 1; 1078 return 0;
999 min_scale = 1; 1079}
1000 max_scale = 1080
1001 skl_max_scale(crtc, crtc_state, pixel_format); 1081static int
1002 } else { 1082g4x_sprite_check(struct intel_crtc_state *crtc_state,
1003 can_scale = 0; 1083 struct intel_plane_state *plane_state)
1004 min_scale = DRM_PLANE_HELPER_NO_SCALING; 1084{
1005 max_scale = DRM_PLANE_HELPER_NO_SCALING; 1085 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1006 } 1086 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1087 int max_scale, min_scale;
1088 int ret;
1089
1090 if (INTEL_GEN(dev_priv) < 7) {
1091 min_scale = 1;
1092 max_scale = 16 << 16;
1093 } else if (IS_IVYBRIDGE(dev_priv)) {
1094 min_scale = 1;
1095 max_scale = 2 << 16;
1007 } else { 1096 } else {
1008 can_scale = plane->can_scale; 1097 min_scale = DRM_PLANE_HELPER_NO_SCALING;
1009 max_scale = plane->max_downscale << 16; 1098 max_scale = DRM_PLANE_HELPER_NO_SCALING;
1010 min_scale = plane->can_scale ? 1 : (1 << 16);
1011 } 1099 }
1012 1100
1013 ret = drm_atomic_helper_check_plane_state(&state->base, 1101 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
1014 &crtc_state->base, 1102 &crtc_state->base,
1015 min_scale, max_scale, 1103 min_scale, max_scale,
1016 true, true); 1104 true, true);
1017 if (ret) 1105 if (ret)
1018 return ret; 1106 return ret;
1019 1107
1020 if (state->base.visible) { 1108 if (!plane_state->base.visible)
1021 struct drm_rect *src = &state->base.src; 1109 return 0;
1022 struct drm_rect *dst = &state->base.dst; 1110
1023 unsigned int crtc_w = drm_rect_width(dst); 1111 ret = intel_plane_check_src_coordinates(plane_state);
1024 unsigned int crtc_h = drm_rect_height(dst); 1112 if (ret)
1025 uint32_t src_x, src_y, src_w, src_h; 1113 return ret;
1114
1115 ret = g4x_sprite_check_scaling(crtc_state, plane_state);
1116 if (ret)
1117 return ret;
1118
1119 ret = i9xx_check_plane_surface(plane_state);
1120 if (ret)
1121 return ret;
1122
1123 if (INTEL_GEN(dev_priv) >= 7)
1124 plane_state->ctl = ivb_sprite_ctl(crtc_state, plane_state);
1125 else
1126 plane_state->ctl = g4x_sprite_ctl(crtc_state, plane_state);
1127
1128 return 0;
1129}
1130
1131int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
1132{
1133 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1134 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1135 unsigned int rotation = plane_state->base.rotation;
1136
1137 /* CHV ignores the mirror bit when the rotate bit is set :( */
1138 if (IS_CHERRYVIEW(dev_priv) &&
1139 rotation & DRM_MODE_ROTATE_180 &&
1140 rotation & DRM_MODE_REFLECT_X) {
1141 DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
1142 return -EINVAL;
1143 }
1144
1145 return 0;
1146}
1147
1148static int
1149vlv_sprite_check(struct intel_crtc_state *crtc_state,
1150 struct intel_plane_state *plane_state)
1151{
1152 int ret;
1153
1154 ret = chv_plane_check_rotation(plane_state);
1155 if (ret)
1156 return ret;
1157
1158 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
1159 &crtc_state->base,
1160 DRM_PLANE_HELPER_NO_SCALING,
1161 DRM_PLANE_HELPER_NO_SCALING,
1162 true, true);
1163 if (ret)
1164 return ret;
1165
1166 if (!plane_state->base.visible)
1167 return 0;
1168
1169 ret = intel_plane_check_src_coordinates(plane_state);
1170 if (ret)
1171 return ret;
1172
1173 ret = i9xx_check_plane_surface(plane_state);
1174 if (ret)
1175 return ret;
1176
1177 plane_state->ctl = vlv_sprite_ctl(crtc_state, plane_state);
1178
1179 return 0;
1180}
1181
1182static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
1183 const struct intel_plane_state *plane_state)
1184{
1185 const struct drm_framebuffer *fb = plane_state->base.fb;
1186 unsigned int rotation = plane_state->base.rotation;
1187 struct drm_format_name_buf format_name;
1188
1189 if (!fb)
1190 return 0;
1191
1192 if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) &&
1193 is_ccs_modifier(fb->modifier)) {
1194 DRM_DEBUG_KMS("RC support only with 0/180 degree rotation (%x)\n",
1195 rotation);
1196 return -EINVAL;
1197 }
1198
1199 if (rotation & DRM_MODE_REFLECT_X &&
1200 fb->modifier == DRM_FORMAT_MOD_LINEAR) {
1201 DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
1202 return -EINVAL;
1203 }
1204
1205 if (drm_rotation_90_or_270(rotation)) {
1206 if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
1207 fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
1208 DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
1209 return -EINVAL;
1210 }
1026 1211
1027 /* 1212 /*
1028 * Hardware doesn't handle subpixel coordinates. 1213 * 90/270 is not allowed with RGB64 16:16:16:16,
1029 * Adjust to (macro)pixel boundary, but be careful not to 1214 * RGB 16-bit 5:6:5, and Indexed 8-bit.
1030 * increase the source viewport size, because that could 1215 * TBD: Add RGB64 case once its added in supported format list.
1031 * push the downscaling factor out of bounds.
1032 */ 1216 */
1033 src_x = src->x1 >> 16; 1217 switch (fb->format->format) {
1034 src_w = drm_rect_width(src) >> 16; 1218 case DRM_FORMAT_C8:
1035 src_y = src->y1 >> 16; 1219 case DRM_FORMAT_RGB565:
1036 src_h = drm_rect_height(src) >> 16; 1220 DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
1037 1221 drm_get_format_name(fb->format->format,
1038 src->x1 = src_x << 16; 1222 &format_name));
1039 src->x2 = (src_x + src_w) << 16;
1040 src->y1 = src_y << 16;
1041 src->y2 = (src_y + src_h) << 16;
1042
1043 if (fb->format->is_yuv &&
1044 fb->format->format != DRM_FORMAT_NV12 &&
1045 (src_x % 2 || src_w % 2)) {
1046 DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
1047 src_x, src_w);
1048 return -EINVAL; 1223 return -EINVAL;
1224 default:
1225 break;
1049 } 1226 }
1227 }
1050 1228
1051 /* Check size restrictions when scaling */ 1229 /* Y-tiling is not supported in IF-ID Interlace mode */
1052 if (src_w != crtc_w || src_h != crtc_h) { 1230 if (crtc_state->base.enable &&
1053 unsigned int width_bytes; 1231 crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
1054 int cpp = fb->format->cpp[0]; 1232 (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
1233 fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
1234 fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
1235 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)) {
1236 DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
1237 return -EINVAL;
1238 }
1055 1239
1056 WARN_ON(!can_scale); 1240 return 0;
1241}
1057 1242
1058 width_bytes = ((src_x * cpp) & 63) + src_w * cpp; 1243static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_state,
1244 const struct intel_plane_state *plane_state)
1245{
1246 struct drm_i915_private *dev_priv =
1247 to_i915(plane_state->base.plane->dev);
1248 int crtc_x = plane_state->base.dst.x1;
1249 int crtc_w = drm_rect_width(&plane_state->base.dst);
1250 int pipe_src_w = crtc_state->pipe_src_w;
1059 1251
1060 /* FIXME interlacing min height is 6 */ 1252 /*
1061 if (INTEL_GEN(dev_priv) < 9 && ( 1253 * Display WA #1175: cnl,glk
1062 src_w < 3 || src_h < 3 || 1254 * Planes other than the cursor may cause FIFO underflow and display
1063 src_w > 2048 || src_h > 2048 || 1255 * corruption if starting less than 4 pixels from the right edge of
1064 crtc_w < 3 || crtc_h < 3 || 1256 * the screen.
1065 width_bytes > 4096 || fb->pitches[0] > 4096)) { 1257 * Besides the above WA fix the similar problem, where planes other
1066 DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n"); 1258 * than the cursor ending less than 4 pixels from the left edge of the
1067 return -EINVAL; 1259 * screen may cause FIFO underflow and display corruption.
1068 } 1260 */
1069 } 1261 if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
1262 (crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) {
1263 DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
1264 crtc_x + crtc_w < 4 ? "end" : "start",
1265 crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
1266 4, pipe_src_w - 4);
1267 return -ERANGE;
1070 } 1268 }
1071 1269
1072 if (INTEL_GEN(dev_priv) >= 9) { 1270 return 0;
1073 ret = skl_check_plane_surface(crtc_state, state); 1271}
1074 if (ret)
1075 return ret;
1076 1272
1077 state->ctl = skl_plane_ctl(crtc_state, state); 1273int skl_plane_check(struct intel_crtc_state *crtc_state,
1078 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1274 struct intel_plane_state *plane_state)
1079 ret = i9xx_check_plane_surface(state); 1275{
1080 if (ret) 1276 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1081 return ret; 1277 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1278 int max_scale, min_scale;
1279 int ret;
1082 1280
1083 state->ctl = vlv_sprite_ctl(crtc_state, state); 1281 ret = skl_plane_check_fb(crtc_state, plane_state);
1084 } else if (INTEL_GEN(dev_priv) >= 7) { 1282 if (ret)
1085 ret = i9xx_check_plane_surface(state); 1283 return ret;
1086 if (ret)
1087 return ret;
1088 1284
1089 state->ctl = ivb_sprite_ctl(crtc_state, state); 1285 /* use scaler when colorkey is not required */
1090 } else { 1286 if (!plane_state->ckey.flags) {
1091 ret = i9xx_check_plane_surface(state); 1287 const struct drm_framebuffer *fb = plane_state->base.fb;
1092 if (ret)
1093 return ret;
1094 1288
1095 state->ctl = g4x_sprite_ctl(crtc_state, state); 1289 min_scale = 1;
1290 max_scale = skl_max_scale(crtc_state,
1291 fb ? fb->format->format : 0);
1292 } else {
1293 min_scale = DRM_PLANE_HELPER_NO_SCALING;
1294 max_scale = DRM_PLANE_HELPER_NO_SCALING;
1096 } 1295 }
1097 1296
1297 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
1298 &crtc_state->base,
1299 min_scale, max_scale,
1300 true, true);
1301 if (ret)
1302 return ret;
1303
1304 if (!plane_state->base.visible)
1305 return 0;
1306
1307 ret = skl_plane_check_dst_coordinates(crtc_state, plane_state);
1308 if (ret)
1309 return ret;
1310
1311 ret = intel_plane_check_src_coordinates(plane_state);
1312 if (ret)
1313 return ret;
1314
1315 ret = skl_check_plane_surface(plane_state);
1316 if (ret)
1317 return ret;
1318
1319 plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
1320
1098 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 1321 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
1099 state->color_ctl = glk_plane_color_ctl(crtc_state, state); 1322 plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
1323 plane_state);
1100 1324
1101 return 0; 1325 return 0;
1102} 1326}
@@ -1523,15 +1747,16 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1523 intel_plane->base.state = &state->base; 1747 intel_plane->base.state = &state->base;
1524 1748
1525 if (INTEL_GEN(dev_priv) >= 9) { 1749 if (INTEL_GEN(dev_priv) >= 9) {
1526 intel_plane->can_scale = true;
1527 state->scaler_id = -1; 1750 state->scaler_id = -1;
1528 1751
1529 intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, 1752 intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
1530 PLANE_SPRITE0 + plane); 1753 PLANE_SPRITE0 + plane);
1531 1754
1755 intel_plane->max_stride = skl_plane_max_stride;
1532 intel_plane->update_plane = skl_update_plane; 1756 intel_plane->update_plane = skl_update_plane;
1533 intel_plane->disable_plane = skl_disable_plane; 1757 intel_plane->disable_plane = skl_disable_plane;
1534 intel_plane->get_hw_state = skl_plane_get_hw_state; 1758 intel_plane->get_hw_state = skl_plane_get_hw_state;
1759 intel_plane->check_plane = skl_plane_check;
1535 1760
1536 if (skl_plane_has_planar(dev_priv, pipe, 1761 if (skl_plane_has_planar(dev_priv, pipe,
1537 PLANE_SPRITE0 + plane)) { 1762 PLANE_SPRITE0 + plane)) {
@@ -1549,12 +1774,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1549 1774
1550 plane_funcs = &skl_plane_funcs; 1775 plane_funcs = &skl_plane_funcs;
1551 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1776 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1552 intel_plane->can_scale = false; 1777 intel_plane->max_stride = i9xx_plane_max_stride;
1553 intel_plane->max_downscale = 1;
1554
1555 intel_plane->update_plane = vlv_update_plane; 1778 intel_plane->update_plane = vlv_update_plane;
1556 intel_plane->disable_plane = vlv_disable_plane; 1779 intel_plane->disable_plane = vlv_disable_plane;
1557 intel_plane->get_hw_state = vlv_plane_get_hw_state; 1780 intel_plane->get_hw_state = vlv_plane_get_hw_state;
1781 intel_plane->check_plane = vlv_sprite_check;
1558 1782
1559 plane_formats = vlv_plane_formats; 1783 plane_formats = vlv_plane_formats;
1560 num_plane_formats = ARRAY_SIZE(vlv_plane_formats); 1784 num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
@@ -1562,17 +1786,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1562 1786
1563 plane_funcs = &vlv_sprite_funcs; 1787 plane_funcs = &vlv_sprite_funcs;
1564 } else if (INTEL_GEN(dev_priv) >= 7) { 1788 } else if (INTEL_GEN(dev_priv) >= 7) {
1565 if (IS_IVYBRIDGE(dev_priv)) { 1789 intel_plane->max_stride = g4x_sprite_max_stride;
1566 intel_plane->can_scale = true;
1567 intel_plane->max_downscale = 2;
1568 } else {
1569 intel_plane->can_scale = false;
1570 intel_plane->max_downscale = 1;
1571 }
1572
1573 intel_plane->update_plane = ivb_update_plane; 1790 intel_plane->update_plane = ivb_update_plane;
1574 intel_plane->disable_plane = ivb_disable_plane; 1791 intel_plane->disable_plane = ivb_disable_plane;
1575 intel_plane->get_hw_state = ivb_plane_get_hw_state; 1792 intel_plane->get_hw_state = ivb_plane_get_hw_state;
1793 intel_plane->check_plane = g4x_sprite_check;
1576 1794
1577 plane_formats = snb_plane_formats; 1795 plane_formats = snb_plane_formats;
1578 num_plane_formats = ARRAY_SIZE(snb_plane_formats); 1796 num_plane_formats = ARRAY_SIZE(snb_plane_formats);
@@ -1580,12 +1798,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1580 1798
1581 plane_funcs = &snb_sprite_funcs; 1799 plane_funcs = &snb_sprite_funcs;
1582 } else { 1800 } else {
1583 intel_plane->can_scale = true; 1801 intel_plane->max_stride = g4x_sprite_max_stride;
1584 intel_plane->max_downscale = 16;
1585
1586 intel_plane->update_plane = g4x_update_plane; 1802 intel_plane->update_plane = g4x_update_plane;
1587 intel_plane->disable_plane = g4x_disable_plane; 1803 intel_plane->disable_plane = g4x_disable_plane;
1588 intel_plane->get_hw_state = g4x_plane_get_hw_state; 1804 intel_plane->get_hw_state = g4x_plane_get_hw_state;
1805 intel_plane->check_plane = g4x_sprite_check;
1589 1806
1590 modifiers = i9xx_plane_format_modifiers; 1807 modifiers = i9xx_plane_format_modifiers;
1591 if (IS_GEN6(dev_priv)) { 1808 if (IS_GEN6(dev_priv)) {
@@ -1618,7 +1835,6 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
1618 intel_plane->i9xx_plane = plane; 1835 intel_plane->i9xx_plane = plane;
1619 intel_plane->id = PLANE_SPRITE0 + plane; 1836 intel_plane->id = PLANE_SPRITE0 + plane;
1620 intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, intel_plane->id); 1837 intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, intel_plane->id);
1621 intel_plane->check_plane = intel_check_sprite_plane;
1622 1838
1623 possible_crtcs = (1 << pipe); 1839 possible_crtcs = (1 << pipe);
1624 1840
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 7c95697e1a35..b1b3e81b6e24 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -401,6 +401,10 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
401 ret = intel_guc_submission_enable(guc); 401 ret = intel_guc_submission_enable(guc);
402 if (ret) 402 if (ret)
403 goto err_communication; 403 goto err_communication;
404 } else if (INTEL_GEN(i915) < 11) {
405 ret = intel_guc_sample_forcewake(guc);
406 if (ret)
407 goto err_communication;
404 } 408 }
405 409
406 dev_info(i915->drm.dev, "GuC firmware version %u.%u\n", 410 dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index e272127783fe..8d03f64eabd7 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -235,6 +235,8 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
235 sg = sg_next(sg); 235 sg = sg_next(sg);
236 } while (1); 236 } while (1);
237 237
238 i915_sg_trim(st);
239
238 obj->mm.madv = I915_MADV_DONTNEED; 240 obj->mm.madv = I915_MADV_DONTNEED;
239 241
240 __i915_gem_object_set_pages(obj, st, sg_page_sizes); 242 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index 4e6a221063ac..f7392c1ffe75 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -298,6 +298,7 @@ static int igt_gem_coherency(void *arg)
298 values = offsets + ncachelines; 298 values = offsets + ncachelines;
299 299
300 mutex_lock(&i915->drm.struct_mutex); 300 mutex_lock(&i915->drm.struct_mutex);
301 intel_runtime_pm_get(i915);
301 for (over = igt_coherency_mode; over->name; over++) { 302 for (over = igt_coherency_mode; over->name; over++) {
302 if (!over->set) 303 if (!over->set)
303 continue; 304 continue;
@@ -375,6 +376,7 @@ static int igt_gem_coherency(void *arg)
375 } 376 }
376 } 377 }
377unlock: 378unlock:
379 intel_runtime_pm_put(i915);
378 mutex_unlock(&i915->drm.struct_mutex); 380 mutex_unlock(&i915->drm.struct_mutex);
379 kfree(offsets); 381 kfree(offsets);
380 return err; 382 return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 1c92560d35da..76df25aa90c9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -22,6 +22,8 @@
22 * 22 *
23 */ 23 */
24 24
25#include <linux/prime_numbers.h>
26
25#include "../i915_selftest.h" 27#include "../i915_selftest.h"
26#include "i915_random.h" 28#include "i915_random.h"
27#include "igt_flush_test.h" 29#include "igt_flush_test.h"
@@ -32,6 +34,200 @@
32 34
33#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32)) 35#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
34 36
37struct live_test {
38 struct drm_i915_private *i915;
39 const char *func;
40 const char *name;
41
42 unsigned int reset_count;
43};
44
45static int begin_live_test(struct live_test *t,
46 struct drm_i915_private *i915,
47 const char *func,
48 const char *name)
49{
50 int err;
51
52 t->i915 = i915;
53 t->func = func;
54 t->name = name;
55
56 err = i915_gem_wait_for_idle(i915,
57 I915_WAIT_LOCKED,
58 MAX_SCHEDULE_TIMEOUT);
59 if (err) {
60 pr_err("%s(%s): failed to idle before, with err=%d!",
61 func, name, err);
62 return err;
63 }
64
65 i915->gpu_error.missed_irq_rings = 0;
66 t->reset_count = i915_reset_count(&i915->gpu_error);
67
68 return 0;
69}
70
71static int end_live_test(struct live_test *t)
72{
73 struct drm_i915_private *i915 = t->i915;
74
75 if (igt_flush_test(i915, I915_WAIT_LOCKED))
76 return -EIO;
77
78 if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
79 pr_err("%s(%s): GPU was reset %d times!\n",
80 t->func, t->name,
81 i915_reset_count(&i915->gpu_error) - t->reset_count);
82 return -EIO;
83 }
84
85 if (i915->gpu_error.missed_irq_rings) {
86 pr_err("%s(%s): Missed interrupts on engines %lx\n",
87 t->func, t->name, i915->gpu_error.missed_irq_rings);
88 return -EIO;
89 }
90
91 return 0;
92}
93
94static int live_nop_switch(void *arg)
95{
96 const unsigned int nctx = 1024;
97 struct drm_i915_private *i915 = arg;
98 struct intel_engine_cs *engine;
99 struct i915_gem_context **ctx;
100 enum intel_engine_id id;
101 struct drm_file *file;
102 struct live_test t;
103 unsigned long n;
104 int err = -ENODEV;
105
106 /*
107 * Create as many contexts as we can feasibly get away with
108 * and check we can switch between them rapidly.
109 *
110 * Serves as very simple stress test for submission and HW switching
111 * between contexts.
112 */
113
114 if (!DRIVER_CAPS(i915)->has_logical_contexts)
115 return 0;
116
117 file = mock_file(i915);
118 if (IS_ERR(file))
119 return PTR_ERR(file);
120
121 mutex_lock(&i915->drm.struct_mutex);
122 intel_runtime_pm_get(i915);
123
124 ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
125 if (!ctx) {
126 err = -ENOMEM;
127 goto out_unlock;
128 }
129
130 for (n = 0; n < nctx; n++) {
131 ctx[n] = i915_gem_create_context(i915, file->driver_priv);
132 if (IS_ERR(ctx[n])) {
133 err = PTR_ERR(ctx[n]);
134 goto out_unlock;
135 }
136 }
137
138 for_each_engine(engine, i915, id) {
139 struct i915_request *rq;
140 unsigned long end_time, prime;
141 ktime_t times[2] = {};
142
143 times[0] = ktime_get_raw();
144 for (n = 0; n < nctx; n++) {
145 rq = i915_request_alloc(engine, ctx[n]);
146 if (IS_ERR(rq)) {
147 err = PTR_ERR(rq);
148 goto out_unlock;
149 }
150 i915_request_add(rq);
151 }
152 if (i915_request_wait(rq,
153 I915_WAIT_LOCKED,
154 HZ / 5) < 0) {
155 pr_err("Failed to populated %d contexts\n", nctx);
156 i915_gem_set_wedged(i915);
157 err = -EIO;
158 goto out_unlock;
159 }
160
161 times[1] = ktime_get_raw();
162
163 pr_info("Populated %d contexts on %s in %lluns\n",
164 nctx, engine->name, ktime_to_ns(times[1] - times[0]));
165
166 err = begin_live_test(&t, i915, __func__, engine->name);
167 if (err)
168 goto out_unlock;
169
170 end_time = jiffies + i915_selftest.timeout_jiffies;
171 for_each_prime_number_from(prime, 2, 8192) {
172 times[1] = ktime_get_raw();
173
174 for (n = 0; n < prime; n++) {
175 rq = i915_request_alloc(engine, ctx[n % nctx]);
176 if (IS_ERR(rq)) {
177 err = PTR_ERR(rq);
178 goto out_unlock;
179 }
180
181 /*
182 * This space is left intentionally blank.
183 *
184 * We do not actually want to perform any
185 * action with this request, we just want
186 * to measure the latency in allocation
187 * and submission of our breadcrumbs -
188 * ensuring that the bare request is sufficient
189 * for the system to work (i.e. proper HEAD
190 * tracking of the rings, interrupt handling,
191 * etc). It also gives us the lowest bounds
192 * for latency.
193 */
194
195 i915_request_add(rq);
196 }
197 if (i915_request_wait(rq,
198 I915_WAIT_LOCKED,
199 HZ / 5) < 0) {
200 pr_err("Switching between %ld contexts timed out\n",
201 prime);
202 i915_gem_set_wedged(i915);
203 break;
204 }
205
206 times[1] = ktime_sub(ktime_get_raw(), times[1]);
207 if (prime == 2)
208 times[0] = times[1];
209
210 if (__igt_timeout(end_time, NULL))
211 break;
212 }
213
214 err = end_live_test(&t);
215 if (err)
216 goto out_unlock;
217
218 pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
219 engine->name,
220 ktime_to_ns(times[0]),
221 prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
222 }
223
224out_unlock:
225 intel_runtime_pm_put(i915);
226 mutex_unlock(&i915->drm.struct_mutex);
227 mock_file_free(i915, file);
228 return err;
229}
230
35static struct i915_vma * 231static struct i915_vma *
36gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value) 232gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
37{ 233{
@@ -195,6 +391,7 @@ err_request:
195 i915_request_add(rq); 391 i915_request_add(rq);
196err_batch: 392err_batch:
197 i915_vma_unpin(batch); 393 i915_vma_unpin(batch);
394 i915_vma_put(batch);
198err_vma: 395err_vma:
199 i915_vma_unpin(vma); 396 i915_vma_unpin(vma);
200 return err; 397 return err;
@@ -636,6 +833,8 @@ static int igt_switch_to_kernel_context(void *arg)
636 */ 833 */
637 834
638 mutex_lock(&i915->drm.struct_mutex); 835 mutex_lock(&i915->drm.struct_mutex);
836 intel_runtime_pm_get(i915);
837
639 ctx = kernel_context(i915); 838 ctx = kernel_context(i915);
640 if (IS_ERR(ctx)) { 839 if (IS_ERR(ctx)) {
641 mutex_unlock(&i915->drm.struct_mutex); 840 mutex_unlock(&i915->drm.struct_mutex);
@@ -658,6 +857,8 @@ out_unlock:
658 GEM_TRACE_DUMP_ON(err); 857 GEM_TRACE_DUMP_ON(err);
659 if (igt_flush_test(i915, I915_WAIT_LOCKED)) 858 if (igt_flush_test(i915, I915_WAIT_LOCKED))
660 err = -EIO; 859 err = -EIO;
860
861 intel_runtime_pm_put(i915);
661 mutex_unlock(&i915->drm.struct_mutex); 862 mutex_unlock(&i915->drm.struct_mutex);
662 863
663 kernel_context_close(ctx); 864 kernel_context_close(ctx);
@@ -713,6 +914,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
713{ 914{
714 static const struct i915_subtest tests[] = { 915 static const struct i915_subtest tests[] = {
715 SUBTEST(igt_switch_to_kernel_context), 916 SUBTEST(igt_switch_to_kernel_context),
917 SUBTEST(live_nop_switch),
716 SUBTEST(igt_ctx_exec), 918 SUBTEST(igt_ctx_exec),
717 SUBTEST(igt_ctx_readonly), 919 SUBTEST(igt_ctx_readonly),
718 }; 920 };
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index c4aac6141e04..07e557815308 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -342,6 +342,7 @@ static int live_nop_request(void *arg)
342 */ 342 */
343 343
344 mutex_lock(&i915->drm.struct_mutex); 344 mutex_lock(&i915->drm.struct_mutex);
345 intel_runtime_pm_get(i915);
345 346
346 for_each_engine(engine, i915, id) { 347 for_each_engine(engine, i915, id) {
347 struct i915_request *request = NULL; 348 struct i915_request *request = NULL;
@@ -402,6 +403,7 @@ static int live_nop_request(void *arg)
402 } 403 }
403 404
404out_unlock: 405out_unlock:
406 intel_runtime_pm_put(i915);
405 mutex_unlock(&i915->drm.struct_mutex); 407 mutex_unlock(&i915->drm.struct_mutex);
406 return err; 408 return err;
407} 409}
@@ -487,6 +489,7 @@ static int live_empty_request(void *arg)
487 */ 489 */
488 490
489 mutex_lock(&i915->drm.struct_mutex); 491 mutex_lock(&i915->drm.struct_mutex);
492 intel_runtime_pm_get(i915);
490 493
491 batch = empty_batch(i915); 494 batch = empty_batch(i915);
492 if (IS_ERR(batch)) { 495 if (IS_ERR(batch)) {
@@ -550,6 +553,7 @@ out_batch:
550 i915_vma_unpin(batch); 553 i915_vma_unpin(batch);
551 i915_vma_put(batch); 554 i915_vma_put(batch);
552out_unlock: 555out_unlock:
556 intel_runtime_pm_put(i915);
553 mutex_unlock(&i915->drm.struct_mutex); 557 mutex_unlock(&i915->drm.struct_mutex);
554 return err; 558 return err;
555} 559}
@@ -644,6 +648,7 @@ static int live_all_engines(void *arg)
644 */ 648 */
645 649
646 mutex_lock(&i915->drm.struct_mutex); 650 mutex_lock(&i915->drm.struct_mutex);
651 intel_runtime_pm_get(i915);
647 652
648 err = begin_live_test(&t, i915, __func__, ""); 653 err = begin_live_test(&t, i915, __func__, "");
649 if (err) 654 if (err)
@@ -726,6 +731,7 @@ out_request:
726 i915_vma_unpin(batch); 731 i915_vma_unpin(batch);
727 i915_vma_put(batch); 732 i915_vma_put(batch);
728out_unlock: 733out_unlock:
734 intel_runtime_pm_put(i915);
729 mutex_unlock(&i915->drm.struct_mutex); 735 mutex_unlock(&i915->drm.struct_mutex);
730 return err; 736 return err;
731} 737}
@@ -747,6 +753,7 @@ static int live_sequential_engines(void *arg)
747 */ 753 */
748 754
749 mutex_lock(&i915->drm.struct_mutex); 755 mutex_lock(&i915->drm.struct_mutex);
756 intel_runtime_pm_get(i915);
750 757
751 err = begin_live_test(&t, i915, __func__, ""); 758 err = begin_live_test(&t, i915, __func__, "");
752 if (err) 759 if (err)
@@ -853,6 +860,7 @@ out_request:
853 i915_request_put(request[id]); 860 i915_request_put(request[id]);
854 } 861 }
855out_unlock: 862out_unlock:
863 intel_runtime_pm_put(i915);
856 mutex_unlock(&i915->drm.struct_mutex); 864 mutex_unlock(&i915->drm.struct_mutex);
857 return err; 865 return err;
858} 866}
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index 90ba88c972cf..0c0ab82b6228 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -142,6 +142,7 @@ static int igt_guc_clients(void *args)
142 142
143 GEM_BUG_ON(!HAS_GUC(dev_priv)); 143 GEM_BUG_ON(!HAS_GUC(dev_priv));
144 mutex_lock(&dev_priv->drm.struct_mutex); 144 mutex_lock(&dev_priv->drm.struct_mutex);
145 intel_runtime_pm_get(dev_priv);
145 146
146 guc = &dev_priv->guc; 147 guc = &dev_priv->guc;
147 if (!guc) { 148 if (!guc) {
@@ -269,6 +270,7 @@ out:
269 guc_clients_create(guc); 270 guc_clients_create(guc);
270 guc_clients_doorbell_init(guc); 271 guc_clients_doorbell_init(guc);
271unlock: 272unlock:
273 intel_runtime_pm_put(dev_priv);
272 mutex_unlock(&dev_priv->drm.struct_mutex); 274 mutex_unlock(&dev_priv->drm.struct_mutex);
273 return err; 275 return err;
274} 276}
@@ -287,6 +289,7 @@ static int igt_guc_doorbells(void *arg)
287 289
288 GEM_BUG_ON(!HAS_GUC(dev_priv)); 290 GEM_BUG_ON(!HAS_GUC(dev_priv));
289 mutex_lock(&dev_priv->drm.struct_mutex); 291 mutex_lock(&dev_priv->drm.struct_mutex);
292 intel_runtime_pm_get(dev_priv);
290 293
291 guc = &dev_priv->guc; 294 guc = &dev_priv->guc;
292 if (!guc) { 295 if (!guc) {
@@ -379,6 +382,7 @@ out:
379 guc_client_free(clients[i]); 382 guc_client_free(clients[i]);
380 } 383 }
381unlock: 384unlock:
385 intel_runtime_pm_put(dev_priv);
382 mutex_unlock(&dev_priv->drm.struct_mutex); 386 mutex_unlock(&dev_priv->drm.struct_mutex);
383 return err; 387 return err;
384} 388}
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 582566faef09..1aea7a8f2224 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -221,6 +221,7 @@ static int live_sanitycheck(void *arg)
221 return 0; 221 return 0;
222 222
223 mutex_lock(&i915->drm.struct_mutex); 223 mutex_lock(&i915->drm.struct_mutex);
224 intel_runtime_pm_get(i915);
224 225
225 if (spinner_init(&spin, i915)) 226 if (spinner_init(&spin, i915))
226 goto err_unlock; 227 goto err_unlock;
@@ -261,6 +262,7 @@ err_spin:
261 spinner_fini(&spin); 262 spinner_fini(&spin);
262err_unlock: 263err_unlock:
263 igt_flush_test(i915, I915_WAIT_LOCKED); 264 igt_flush_test(i915, I915_WAIT_LOCKED);
265 intel_runtime_pm_put(i915);
264 mutex_unlock(&i915->drm.struct_mutex); 266 mutex_unlock(&i915->drm.struct_mutex);
265 return err; 267 return err;
266} 268}
@@ -278,6 +280,7 @@ static int live_preempt(void *arg)
278 return 0; 280 return 0;
279 281
280 mutex_lock(&i915->drm.struct_mutex); 282 mutex_lock(&i915->drm.struct_mutex);
283 intel_runtime_pm_get(i915);
281 284
282 if (spinner_init(&spin_hi, i915)) 285 if (spinner_init(&spin_hi, i915))
283 goto err_unlock; 286 goto err_unlock;
@@ -350,6 +353,7 @@ err_spin_hi:
350 spinner_fini(&spin_hi); 353 spinner_fini(&spin_hi);
351err_unlock: 354err_unlock:
352 igt_flush_test(i915, I915_WAIT_LOCKED); 355 igt_flush_test(i915, I915_WAIT_LOCKED);
356 intel_runtime_pm_put(i915);
353 mutex_unlock(&i915->drm.struct_mutex); 357 mutex_unlock(&i915->drm.struct_mutex);
354 return err; 358 return err;
355} 359}
@@ -368,6 +372,7 @@ static int live_late_preempt(void *arg)
368 return 0; 372 return 0;
369 373
370 mutex_lock(&i915->drm.struct_mutex); 374 mutex_lock(&i915->drm.struct_mutex);
375 intel_runtime_pm_get(i915);
371 376
372 if (spinner_init(&spin_hi, i915)) 377 if (spinner_init(&spin_hi, i915))
373 goto err_unlock; 378 goto err_unlock;
@@ -440,6 +445,7 @@ err_spin_hi:
440 spinner_fini(&spin_hi); 445 spinner_fini(&spin_hi);
441err_unlock: 446err_unlock:
442 igt_flush_test(i915, I915_WAIT_LOCKED); 447 igt_flush_test(i915, I915_WAIT_LOCKED);
448 intel_runtime_pm_put(i915);
443 mutex_unlock(&i915->drm.struct_mutex); 449 mutex_unlock(&i915->drm.struct_mutex);
444 return err; 450 return err;
445 451
@@ -467,6 +473,7 @@ static int live_preempt_hang(void *arg)
467 return 0; 473 return 0;
468 474
469 mutex_lock(&i915->drm.struct_mutex); 475 mutex_lock(&i915->drm.struct_mutex);
476 intel_runtime_pm_get(i915);
470 477
471 if (spinner_init(&spin_hi, i915)) 478 if (spinner_init(&spin_hi, i915))
472 goto err_unlock; 479 goto err_unlock;
@@ -561,6 +568,7 @@ err_spin_hi:
561 spinner_fini(&spin_hi); 568 spinner_fini(&spin_hi);
562err_unlock: 569err_unlock:
563 igt_flush_test(i915, I915_WAIT_LOCKED); 570 igt_flush_test(i915, I915_WAIT_LOCKED);
571 intel_runtime_pm_put(i915);
564 mutex_unlock(&i915->drm.struct_mutex); 572 mutex_unlock(&i915->drm.struct_mutex);
565 return err; 573 return err;
566} 574}
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index 0d39b3bf0c0d..d1a0923d2f38 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -44,7 +44,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
44 if (err) 44 if (err)
45 goto err_obj; 45 goto err_obj;
46 46
47 intel_runtime_pm_get(engine->i915);
47 rq = i915_request_alloc(engine, ctx); 48 rq = i915_request_alloc(engine, ctx);
49 intel_runtime_pm_put(engine->i915);
48 if (IS_ERR(rq)) { 50 if (IS_ERR(rq)) {
49 err = PTR_ERR(rq); 51 err = PTR_ERR(rq);
50 goto err_pin; 52 goto err_pin;
@@ -175,7 +177,10 @@ static int switch_to_scratch_context(struct intel_engine_cs *engine)
175 if (IS_ERR(ctx)) 177 if (IS_ERR(ctx))
176 return PTR_ERR(ctx); 178 return PTR_ERR(ctx);
177 179
180 intel_runtime_pm_get(engine->i915);
178 rq = i915_request_alloc(engine, ctx); 181 rq = i915_request_alloc(engine, ctx);
182 intel_runtime_pm_put(engine->i915);
183
179 kernel_context_close(ctx); 184 kernel_context_close(ctx);
180 if (IS_ERR(rq)) 185 if (IS_ERR(rq))
181 return PTR_ERR(rq); 186 return PTR_ERR(rq);