aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@linux.intel.com>2016-05-17 09:07:51 -0400
committerMaarten Lankhorst <maarten.lankhorst@linux.intel.com>2016-05-19 08:37:37 -0400
commitaa420ddd8eeaa5df579894a412289e4d07c2fee9 (patch)
tree2b8a13d714dcbca1ecf2cd7a9564742a6e77c948
parentafee4d8707ab1f21b7668de995be3a5961e83582 (diff)
drm/i915: Allow mmio updates on all platforms, v2.
With intel_pipe_update begin/end we ensure that the mmio updates don't run during vblank interrupt, using the hw counter we can be sure that when current vblank count != vblank count at the time of pipe_update_end the mmio update is complete. This allows us to use mmio updates on all platforms, using the update_plane call. With Chris Wilson's patch to skip waiting for vblanks for legacy_cursor_update this potentially leaves a small race condition, in which update_plane can be called with a freed crtc_state. Because of this commit acf4e84d61673 ("drm/i915: Avoid stalling on pending flips for legacy cursor updates") is temporarily reverted. Changes since v1: - Split out the flip_work rename. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1463490484-19540-9-git-send-email-maarten.lankhorst@linux.intel.com Reviewed-by: Patrik Jakobsson <patrik.jakobsson@linux.intel.com>
-rw-r--r--drivers/gpu/drm/i915/intel_display.c99
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
2 files changed, 7 insertions, 93 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a432fdc6fe68..9c57736fc521 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11269,9 +11269,6 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
11269 if (engine == NULL) 11269 if (engine == NULL)
11270 return true; 11270 return true;
11271 11271
11272 if (INTEL_GEN(engine->i915) < 5)
11273 return false;
11274
11275 if (i915.use_mmio_flip < 0) 11272 if (i915.use_mmio_flip < 0)
11276 return false; 11273 return false;
11277 else if (i915.use_mmio_flip > 0) 11274 else if (i915.use_mmio_flip > 0)
@@ -11286,92 +11283,15 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
11286 return engine != i915_gem_request_get_engine(obj->last_write_req); 11283 return engine != i915_gem_request_get_engine(obj->last_write_req);
11287} 11284}
11288 11285
11289static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11290 unsigned int rotation,
11291 struct intel_flip_work *work)
11292{
11293 struct drm_device *dev = intel_crtc->base.dev;
11294 struct drm_i915_private *dev_priv = dev->dev_private;
11295 struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11296 const enum pipe pipe = intel_crtc->pipe;
11297 u32 ctl, stride, tile_height;
11298
11299 ctl = I915_READ(PLANE_CTL(pipe, 0));
11300 ctl &= ~PLANE_CTL_TILED_MASK;
11301 switch (fb->modifier[0]) {
11302 case DRM_FORMAT_MOD_NONE:
11303 break;
11304 case I915_FORMAT_MOD_X_TILED:
11305 ctl |= PLANE_CTL_TILED_X;
11306 break;
11307 case I915_FORMAT_MOD_Y_TILED:
11308 ctl |= PLANE_CTL_TILED_Y;
11309 break;
11310 case I915_FORMAT_MOD_Yf_TILED:
11311 ctl |= PLANE_CTL_TILED_YF;
11312 break;
11313 default:
11314 MISSING_CASE(fb->modifier[0]);
11315 }
11316
11317 /*
11318 * The stride is either expressed as a multiple of 64 bytes chunks for
11319 * linear buffers or in number of tiles for tiled buffers.
11320 */
11321 if (intel_rotation_90_or_270(rotation)) {
11322 /* stride = Surface height in tiles */
11323 tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
11324 stride = DIV_ROUND_UP(fb->height, tile_height);
11325 } else {
11326 stride = fb->pitches[0] /
11327 intel_fb_stride_alignment(dev_priv, fb->modifier[0],
11328 fb->pixel_format);
11329 }
11330
11331 /*
11332 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11333 * PLANE_SURF updates, the update is then guaranteed to be atomic.
11334 */
11335 I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11336 I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11337
11338 I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11339 POSTING_READ(PLANE_SURF(pipe, 0));
11340}
11341
11342static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11343 struct intel_flip_work *work)
11344{
11345 struct drm_device *dev = intel_crtc->base.dev;
11346 struct drm_i915_private *dev_priv = dev->dev_private;
11347 struct intel_framebuffer *intel_fb =
11348 to_intel_framebuffer(intel_crtc->base.primary->fb);
11349 struct drm_i915_gem_object *obj = intel_fb->obj;
11350 i915_reg_t reg = DSPCNTR(intel_crtc->plane);
11351 u32 dspcntr;
11352
11353 dspcntr = I915_READ(reg);
11354
11355 if (obj->tiling_mode != I915_TILING_NONE)
11356 dspcntr |= DISPPLANE_TILED;
11357 else
11358 dspcntr &= ~DISPPLANE_TILED;
11359
11360 I915_WRITE(reg, dspcntr);
11361
11362 I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11363 POSTING_READ(DSPSURF(intel_crtc->plane));
11364}
11365
11366static void intel_mmio_flip_work_func(struct work_struct *w) 11286static void intel_mmio_flip_work_func(struct work_struct *w)
11367{ 11287{
11368 struct intel_flip_work *work = 11288 struct intel_flip_work *work =
11369 container_of(w, struct intel_flip_work, mmio_work); 11289 container_of(w, struct intel_flip_work, mmio_work);
11370 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 11290 struct intel_crtc *crtc = to_intel_crtc(work->crtc);
11371 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11291 struct drm_device *dev = crtc->base.dev;
11372 struct intel_framebuffer *intel_fb = 11292 struct drm_i915_private *dev_priv = dev->dev_private;
11373 to_intel_framebuffer(crtc->base.primary->fb); 11293 struct intel_plane *primary = to_intel_plane(crtc->base.primary);
11374 struct drm_i915_gem_object *obj = intel_fb->obj; 11294 struct drm_i915_gem_object *obj = intel_fb_obj(primary->base.state->fb);
11375 11295
11376 if (work->flip_queued_req) 11296 if (work->flip_queued_req)
11377 WARN_ON(__i915_wait_request(work->flip_queued_req, 11297 WARN_ON(__i915_wait_request(work->flip_queued_req,
@@ -11385,13 +11305,9 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
11385 MAX_SCHEDULE_TIMEOUT) < 0); 11305 MAX_SCHEDULE_TIMEOUT) < 0);
11386 11306
11387 intel_pipe_update_start(crtc); 11307 intel_pipe_update_start(crtc);
11388 11308 primary->update_plane(&primary->base,
11389 if (INTEL_GEN(dev_priv) >= 9) 11309 crtc->config,
11390 skl_do_mmio_flip(crtc, work->rotation, work); 11310 to_intel_plane_state(primary->base.state));
11391 else
11392 /* use_mmio_flip() retricts MMIO flips to ilk+ */
11393 ilk_do_mmio_flip(crtc, work);
11394
11395 intel_pipe_update_end(crtc, work); 11311 intel_pipe_update_end(crtc, work);
11396} 11312}
11397 11313
@@ -11616,7 +11532,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11616 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), 11532 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11617 obj, 0); 11533 obj, 0);
11618 work->gtt_offset += intel_crtc->dspaddr_offset; 11534 work->gtt_offset += intel_crtc->dspaddr_offset;
11619 work->rotation = crtc->primary->state->rotation;
11620 11535
11621 if (mmio_flip) { 11536 if (mmio_flip) {
11622 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func); 11537 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 97de5e05890a..9502e79b2f5b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -983,7 +983,6 @@ struct intel_flip_work {
983 struct drm_i915_gem_request *flip_queued_req; 983 struct drm_i915_gem_request *flip_queued_req;
984 u32 flip_queued_vblank; 984 u32 flip_queued_vblank;
985 u32 flip_ready_vblank; 985 u32 flip_ready_vblank;
986 unsigned int rotation;
987}; 986};
988 987
989struct intel_load_detect_pipe { 988struct intel_load_detect_pipe {