diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2012-09-26 08:47:30 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-10-12 04:59:09 -0400 |
commit | acb868d3d710b09a356d848e0cd44d9713a9e274 (patch) | |
tree | ace446247ba28f770c8be9493e8be7f16852dbf8 /drivers/gpu/drm/i915/intel_overlay.c | |
parent | 26b6e44afb58432a5e998da0343757404f9de9ee (diff) |
drm/i915: Disallow preallocation of requests
The intention was to allow the caller to avoid a failure to queue a
request having already written commands to the ring. However, this is a
moot point as the i915_add_request() can fail for other reasons than a
mere allocation failure and those failure cases are more likely than
ENOMEM. So the overlay code already had to handle i915_add_request()
failures, and due to
commit 3bb73aba1ed5198a2c1dfaac4f3c95459930d84a
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri Jul 20 12:40:59 2012 +0100
drm/i915: Allow late allocation of request for i915_add_request()
the error handling code in intel_overlay.c was subject to causing
double-frees, as found by coverity.
Rather than further complicate i915_add_request() and callers, realise
the battle is lost and adapt intel_overlay.c to take advantage of the
late allocation of requests.
v2: Handle callers passing in a NULL seqno.
v3: Ditto. This time for sure.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_overlay.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_overlay.c | 72 |
1 files changed, 15 insertions, 57 deletions
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index afd0f30ab882..555912f510a9 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -210,7 +210,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay, | |||
210 | } | 210 | } |
211 | 211 | ||
212 | static int intel_overlay_do_wait_request(struct intel_overlay *overlay, | 212 | static int intel_overlay_do_wait_request(struct intel_overlay *overlay, |
213 | struct drm_i915_gem_request *request, | ||
214 | void (*tail)(struct intel_overlay *)) | 213 | void (*tail)(struct intel_overlay *)) |
215 | { | 214 | { |
216 | struct drm_device *dev = overlay->dev; | 215 | struct drm_device *dev = overlay->dev; |
@@ -219,12 +218,10 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, | |||
219 | int ret; | 218 | int ret; |
220 | 219 | ||
221 | BUG_ON(overlay->last_flip_req); | 220 | BUG_ON(overlay->last_flip_req); |
222 | ret = i915_add_request(ring, NULL, request); | 221 | ret = i915_add_request(ring, NULL, &overlay->last_flip_req); |
223 | if (ret) { | 222 | if (ret) |
224 | kfree(request); | 223 | return ret; |
225 | return ret; | 224 | |
226 | } | ||
227 | overlay->last_flip_req = request->seqno; | ||
228 | overlay->flip_tail = tail; | 225 | overlay->flip_tail = tail; |
229 | ret = i915_wait_seqno(ring, overlay->last_flip_req); | 226 | ret = i915_wait_seqno(ring, overlay->last_flip_req); |
230 | if (ret) | 227 | if (ret) |
@@ -241,7 +238,6 @@ static int intel_overlay_on(struct intel_overlay *overlay) | |||
241 | struct drm_device *dev = overlay->dev; | 238 | struct drm_device *dev = overlay->dev; |
242 | struct drm_i915_private *dev_priv = dev->dev_private; | 239 | struct drm_i915_private *dev_priv = dev->dev_private; |
243 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | 240 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
244 | struct drm_i915_gem_request *request; | ||
245 | int ret; | 241 | int ret; |
246 | 242 | ||
247 | BUG_ON(overlay->active); | 243 | BUG_ON(overlay->active); |
@@ -249,17 +245,9 @@ static int intel_overlay_on(struct intel_overlay *overlay) | |||
249 | 245 | ||
250 | WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); | 246 | WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); |
251 | 247 | ||
252 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
253 | if (request == NULL) { | ||
254 | ret = -ENOMEM; | ||
255 | goto out; | ||
256 | } | ||
257 | |||
258 | ret = intel_ring_begin(ring, 4); | 248 | ret = intel_ring_begin(ring, 4); |
259 | if (ret) { | 249 | if (ret) |
260 | kfree(request); | 250 | return ret; |
261 | goto out; | ||
262 | } | ||
263 | 251 | ||
264 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); | 252 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); |
265 | intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE); | 253 | intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE); |
@@ -267,9 +255,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) | |||
267 | intel_ring_emit(ring, MI_NOOP); | 255 | intel_ring_emit(ring, MI_NOOP); |
268 | intel_ring_advance(ring); | 256 | intel_ring_advance(ring); |
269 | 257 | ||
270 | ret = intel_overlay_do_wait_request(overlay, request, NULL); | 258 | return intel_overlay_do_wait_request(overlay, NULL); |
271 | out: | ||
272 | return ret; | ||
273 | } | 259 | } |
274 | 260 | ||
275 | /* overlay needs to be enabled in OCMD reg */ | 261 | /* overlay needs to be enabled in OCMD reg */ |
@@ -279,17 +265,12 @@ static int intel_overlay_continue(struct intel_overlay *overlay, | |||
279 | struct drm_device *dev = overlay->dev; | 265 | struct drm_device *dev = overlay->dev; |
280 | drm_i915_private_t *dev_priv = dev->dev_private; | 266 | drm_i915_private_t *dev_priv = dev->dev_private; |
281 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | 267 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
282 | struct drm_i915_gem_request *request; | ||
283 | u32 flip_addr = overlay->flip_addr; | 268 | u32 flip_addr = overlay->flip_addr; |
284 | u32 tmp; | 269 | u32 tmp; |
285 | int ret; | 270 | int ret; |
286 | 271 | ||
287 | BUG_ON(!overlay->active); | 272 | BUG_ON(!overlay->active); |
288 | 273 | ||
289 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
290 | if (request == NULL) | ||
291 | return -ENOMEM; | ||
292 | |||
293 | if (load_polyphase_filter) | 274 | if (load_polyphase_filter) |
294 | flip_addr |= OFC_UPDATE; | 275 | flip_addr |= OFC_UPDATE; |
295 | 276 | ||
@@ -299,22 +280,14 @@ static int intel_overlay_continue(struct intel_overlay *overlay, | |||
299 | DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); | 280 | DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); |
300 | 281 | ||
301 | ret = intel_ring_begin(ring, 2); | 282 | ret = intel_ring_begin(ring, 2); |
302 | if (ret) { | 283 | if (ret) |
303 | kfree(request); | ||
304 | return ret; | 284 | return ret; |
305 | } | 285 | |
306 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); | 286 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); |
307 | intel_ring_emit(ring, flip_addr); | 287 | intel_ring_emit(ring, flip_addr); |
308 | intel_ring_advance(ring); | 288 | intel_ring_advance(ring); |
309 | 289 | ||
310 | ret = i915_add_request(ring, NULL, request); | 290 | return i915_add_request(ring, NULL, &overlay->last_flip_req); |
311 | if (ret) { | ||
312 | kfree(request); | ||
313 | return ret; | ||
314 | } | ||
315 | |||
316 | overlay->last_flip_req = request->seqno; | ||
317 | return 0; | ||
318 | } | 291 | } |
319 | 292 | ||
320 | static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) | 293 | static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) |
@@ -350,15 +323,10 @@ static int intel_overlay_off(struct intel_overlay *overlay) | |||
350 | struct drm_i915_private *dev_priv = dev->dev_private; | 323 | struct drm_i915_private *dev_priv = dev->dev_private; |
351 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | 324 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
352 | u32 flip_addr = overlay->flip_addr; | 325 | u32 flip_addr = overlay->flip_addr; |
353 | struct drm_i915_gem_request *request; | ||
354 | int ret; | 326 | int ret; |
355 | 327 | ||
356 | BUG_ON(!overlay->active); | 328 | BUG_ON(!overlay->active); |
357 | 329 | ||
358 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
359 | if (request == NULL) | ||
360 | return -ENOMEM; | ||
361 | |||
362 | /* According to intel docs the overlay hw may hang (when switching | 330 | /* According to intel docs the overlay hw may hang (when switching |
363 | * off) without loading the filter coeffs. It is however unclear whether | 331 | * off) without loading the filter coeffs. It is however unclear whether |
364 | * this applies to the disabling of the overlay or to the switching off | 332 | * this applies to the disabling of the overlay or to the switching off |
@@ -366,10 +334,9 @@ static int intel_overlay_off(struct intel_overlay *overlay) | |||
366 | flip_addr |= OFC_UPDATE; | 334 | flip_addr |= OFC_UPDATE; |
367 | 335 | ||
368 | ret = intel_ring_begin(ring, 6); | 336 | ret = intel_ring_begin(ring, 6); |
369 | if (ret) { | 337 | if (ret) |
370 | kfree(request); | ||
371 | return ret; | 338 | return ret; |
372 | } | 339 | |
373 | /* wait for overlay to go idle */ | 340 | /* wait for overlay to go idle */ |
374 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); | 341 | intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); |
375 | intel_ring_emit(ring, flip_addr); | 342 | intel_ring_emit(ring, flip_addr); |
@@ -380,8 +347,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) | |||
380 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 347 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
381 | intel_ring_advance(ring); | 348 | intel_ring_advance(ring); |
382 | 349 | ||
383 | return intel_overlay_do_wait_request(overlay, request, | 350 | return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail); |
384 | intel_overlay_off_tail); | ||
385 | } | 351 | } |
386 | 352 | ||
387 | /* recover from an interruption due to a signal | 353 | /* recover from an interruption due to a signal |
@@ -426,24 +392,16 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) | |||
426 | return 0; | 392 | return 0; |
427 | 393 | ||
428 | if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { | 394 | if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { |
429 | struct drm_i915_gem_request *request; | ||
430 | |||
431 | /* synchronous slowpath */ | 395 | /* synchronous slowpath */ |
432 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
433 | if (request == NULL) | ||
434 | return -ENOMEM; | ||
435 | |||
436 | ret = intel_ring_begin(ring, 2); | 396 | ret = intel_ring_begin(ring, 2); |
437 | if (ret) { | 397 | if (ret) |
438 | kfree(request); | ||
439 | return ret; | 398 | return ret; |
440 | } | ||
441 | 399 | ||
442 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 400 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
443 | intel_ring_emit(ring, MI_NOOP); | 401 | intel_ring_emit(ring, MI_NOOP); |
444 | intel_ring_advance(ring); | 402 | intel_ring_advance(ring); |
445 | 403 | ||
446 | ret = intel_overlay_do_wait_request(overlay, request, | 404 | ret = intel_overlay_do_wait_request(overlay, |
447 | intel_overlay_release_old_vid_tail); | 405 | intel_overlay_release_old_vid_tail); |
448 | if (ret) | 406 | if (ret) |
449 | return ret; | 407 | return ret; |