diff options
Diffstat (limited to 'drivers')
38 files changed, 1043 insertions, 509 deletions
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 1551ca7df394..136ec04d683f 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c | |||
| @@ -30,13 +30,16 @@ | |||
| 30 | EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); | 30 | EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); |
| 31 | EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal); | 31 | EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal); |
| 32 | 32 | ||
| 33 | static DEFINE_SPINLOCK(dma_fence_stub_lock); | ||
| 34 | static struct dma_fence dma_fence_stub; | ||
| 35 | |||
| 33 | /* | 36 | /* |
| 34 | * fence context counter: each execution context should have its own | 37 | * fence context counter: each execution context should have its own |
| 35 | * fence context, this allows checking if fences belong to the same | 38 | * fence context, this allows checking if fences belong to the same |
| 36 | * context or not. One device can have multiple separate contexts, | 39 | * context or not. One device can have multiple separate contexts, |
| 37 | * and they're used if some engine can run independently of another. | 40 | * and they're used if some engine can run independently of another. |
| 38 | */ | 41 | */ |
| 39 | static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0); | 42 | static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1); |
| 40 | 43 | ||
| 41 | /** | 44 | /** |
| 42 | * DOC: DMA fences overview | 45 | * DOC: DMA fences overview |
| @@ -68,6 +71,37 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0); | |||
| 68 | * &dma_buf.resv pointer. | 71 | * &dma_buf.resv pointer. |
| 69 | */ | 72 | */ |
| 70 | 73 | ||
| 74 | static const char *dma_fence_stub_get_name(struct dma_fence *fence) | ||
| 75 | { | ||
| 76 | return "stub"; | ||
| 77 | } | ||
| 78 | |||
| 79 | static const struct dma_fence_ops dma_fence_stub_ops = { | ||
| 80 | .get_driver_name = dma_fence_stub_get_name, | ||
| 81 | .get_timeline_name = dma_fence_stub_get_name, | ||
| 82 | }; | ||
| 83 | |||
| 84 | /** | ||
| 85 | * dma_fence_get_stub - return a signaled fence | ||
| 86 | * | ||
| 87 | * Return a stub fence which is already signaled. | ||
| 88 | */ | ||
| 89 | struct dma_fence *dma_fence_get_stub(void) | ||
| 90 | { | ||
| 91 | spin_lock(&dma_fence_stub_lock); | ||
| 92 | if (!dma_fence_stub.ops) { | ||
| 93 | dma_fence_init(&dma_fence_stub, | ||
| 94 | &dma_fence_stub_ops, | ||
| 95 | &dma_fence_stub_lock, | ||
| 96 | 0, 0); | ||
| 97 | dma_fence_signal_locked(&dma_fence_stub); | ||
| 98 | } | ||
| 99 | spin_unlock(&dma_fence_stub_lock); | ||
| 100 | |||
| 101 | return dma_fence_get(&dma_fence_stub); | ||
| 102 | } | ||
| 103 | EXPORT_SYMBOL(dma_fence_get_stub); | ||
| 104 | |||
| 71 | /** | 105 | /** |
| 72 | * dma_fence_context_alloc - allocate an array of fence contexts | 106 | * dma_fence_context_alloc - allocate an array of fence contexts |
| 73 | * @num: amount of contexts to allocate | 107 | * @num: amount of contexts to allocate |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 024dfbd87f11..dc54e9efd910 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
| @@ -1193,7 +1193,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) | |||
| 1193 | int i; | 1193 | int i; |
| 1194 | 1194 | ||
| 1195 | for (i = 0; i < p->num_post_dep_syncobjs; ++i) | 1195 | for (i = 0; i < p->num_post_dep_syncobjs; ++i) |
| 1196 | drm_syncobj_replace_fence(p->post_dep_syncobjs[i], 0, p->fence); | 1196 | drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence); |
| 1197 | } | 1197 | } |
| 1198 | 1198 | ||
| 1199 | static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | 1199 | static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, |
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 5ed12144ceb7..54e2ae614dcc 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
| @@ -3131,27 +3131,104 @@ void drm_atomic_helper_shutdown(struct drm_device *dev) | |||
| 3131 | struct drm_modeset_acquire_ctx ctx; | 3131 | struct drm_modeset_acquire_ctx ctx; |
| 3132 | int ret; | 3132 | int ret; |
| 3133 | 3133 | ||
| 3134 | drm_modeset_acquire_init(&ctx, 0); | 3134 | DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret); |
| 3135 | while (1) { | ||
| 3136 | ret = drm_modeset_lock_all_ctx(dev, &ctx); | ||
| 3137 | if (!ret) | ||
| 3138 | ret = __drm_atomic_helper_disable_all(dev, &ctx, true); | ||
| 3139 | |||
| 3140 | if (ret != -EDEADLK) | ||
| 3141 | break; | ||
| 3142 | |||
| 3143 | drm_modeset_backoff(&ctx); | ||
| 3144 | } | ||
| 3145 | 3135 | ||
| 3136 | ret = __drm_atomic_helper_disable_all(dev, &ctx, true); | ||
| 3146 | if (ret) | 3137 | if (ret) |
| 3147 | DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); | 3138 | DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); |
| 3148 | 3139 | ||
| 3149 | drm_modeset_drop_locks(&ctx); | 3140 | DRM_MODESET_LOCK_ALL_END(ctx, ret); |
| 3150 | drm_modeset_acquire_fini(&ctx); | ||
| 3151 | } | 3141 | } |
| 3152 | EXPORT_SYMBOL(drm_atomic_helper_shutdown); | 3142 | EXPORT_SYMBOL(drm_atomic_helper_shutdown); |
| 3153 | 3143 | ||
| 3154 | /** | 3144 | /** |
| 3145 | * drm_atomic_helper_duplicate_state - duplicate an atomic state object | ||
| 3146 | * @dev: DRM device | ||
| 3147 | * @ctx: lock acquisition context | ||
| 3148 | * | ||
| 3149 | * Makes a copy of the current atomic state by looping over all objects and | ||
| 3150 | * duplicating their respective states. This is used for example by suspend/ | ||
| 3151 | * resume support code to save the state prior to suspend such that it can | ||
| 3152 | * be restored upon resume. | ||
| 3153 | * | ||
| 3154 | * Note that this treats atomic state as persistent between save and restore. | ||
| 3155 | * Drivers must make sure that this is possible and won't result in confusion | ||
| 3156 | * or erroneous behaviour. | ||
| 3157 | * | ||
| 3158 | * Note that if callers haven't already acquired all modeset locks this might | ||
| 3159 | * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). | ||
| 3160 | * | ||
| 3161 | * Returns: | ||
| 3162 | * A pointer to the copy of the atomic state object on success or an | ||
| 3163 | * ERR_PTR()-encoded error code on failure. | ||
| 3164 | * | ||
| 3165 | * See also: | ||
| 3166 | * drm_atomic_helper_suspend(), drm_atomic_helper_resume() | ||
| 3167 | */ | ||
| 3168 | struct drm_atomic_state * | ||
| 3169 | drm_atomic_helper_duplicate_state(struct drm_device *dev, | ||
| 3170 | struct drm_modeset_acquire_ctx *ctx) | ||
| 3171 | { | ||
| 3172 | struct drm_atomic_state *state; | ||
| 3173 | struct drm_connector *conn; | ||
| 3174 | struct drm_connector_list_iter conn_iter; | ||
| 3175 | struct drm_plane *plane; | ||
| 3176 | struct drm_crtc *crtc; | ||
| 3177 | int err = 0; | ||
| 3178 | |||
| 3179 | state = drm_atomic_state_alloc(dev); | ||
| 3180 | if (!state) | ||
| 3181 | return ERR_PTR(-ENOMEM); | ||
| 3182 | |||
| 3183 | state->acquire_ctx = ctx; | ||
| 3184 | |||
| 3185 | drm_for_each_crtc(crtc, dev) { | ||
| 3186 | struct drm_crtc_state *crtc_state; | ||
| 3187 | |||
| 3188 | crtc_state = drm_atomic_get_crtc_state(state, crtc); | ||
| 3189 | if (IS_ERR(crtc_state)) { | ||
| 3190 | err = PTR_ERR(crtc_state); | ||
| 3191 | goto free; | ||
| 3192 | } | ||
| 3193 | } | ||
| 3194 | |||
| 3195 | drm_for_each_plane(plane, dev) { | ||
| 3196 | struct drm_plane_state *plane_state; | ||
| 3197 | |||
| 3198 | plane_state = drm_atomic_get_plane_state(state, plane); | ||
| 3199 | if (IS_ERR(plane_state)) { | ||
| 3200 | err = PTR_ERR(plane_state); | ||
| 3201 | goto free; | ||
| 3202 | } | ||
| 3203 | } | ||
| 3204 | |||
| 3205 | drm_connector_list_iter_begin(dev, &conn_iter); | ||
| 3206 | drm_for_each_connector_iter(conn, &conn_iter) { | ||
| 3207 | struct drm_connector_state *conn_state; | ||
| 3208 | |||
| 3209 | conn_state = drm_atomic_get_connector_state(state, conn); | ||
| 3210 | if (IS_ERR(conn_state)) { | ||
| 3211 | err = PTR_ERR(conn_state); | ||
| 3212 | drm_connector_list_iter_end(&conn_iter); | ||
| 3213 | goto free; | ||
| 3214 | } | ||
| 3215 | } | ||
| 3216 | drm_connector_list_iter_end(&conn_iter); | ||
| 3217 | |||
| 3218 | /* clear the acquire context so that it isn't accidentally reused */ | ||
| 3219 | state->acquire_ctx = NULL; | ||
| 3220 | |||
| 3221 | free: | ||
| 3222 | if (err < 0) { | ||
| 3223 | drm_atomic_state_put(state); | ||
| 3224 | state = ERR_PTR(err); | ||
| 3225 | } | ||
| 3226 | |||
| 3227 | return state; | ||
| 3228 | } | ||
| 3229 | EXPORT_SYMBOL(drm_atomic_helper_duplicate_state); | ||
| 3230 | |||
| 3231 | /** | ||
| 3155 | * drm_atomic_helper_suspend - subsystem-level suspend helper | 3232 | * drm_atomic_helper_suspend - subsystem-level suspend helper |
| 3156 | * @dev: DRM device | 3233 | * @dev: DRM device |
| 3157 | * | 3234 | * |
| @@ -3182,14 +3259,10 @@ struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev) | |||
| 3182 | struct drm_atomic_state *state; | 3259 | struct drm_atomic_state *state; |
| 3183 | int err; | 3260 | int err; |
| 3184 | 3261 | ||
| 3185 | drm_modeset_acquire_init(&ctx, 0); | 3262 | /* This can never be returned, but it makes the compiler happy */ |
| 3263 | state = ERR_PTR(-EINVAL); | ||
| 3186 | 3264 | ||
| 3187 | retry: | 3265 | DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err); |
| 3188 | err = drm_modeset_lock_all_ctx(dev, &ctx); | ||
| 3189 | if (err < 0) { | ||
| 3190 | state = ERR_PTR(err); | ||
| 3191 | goto unlock; | ||
| 3192 | } | ||
| 3193 | 3266 | ||
| 3194 | state = drm_atomic_helper_duplicate_state(dev, &ctx); | 3267 | state = drm_atomic_helper_duplicate_state(dev, &ctx); |
| 3195 | if (IS_ERR(state)) | 3268 | if (IS_ERR(state)) |
| @@ -3203,13 +3276,10 @@ retry: | |||
| 3203 | } | 3276 | } |
| 3204 | 3277 | ||
| 3205 | unlock: | 3278 | unlock: |
| 3206 | if (PTR_ERR(state) == -EDEADLK) { | 3279 | DRM_MODESET_LOCK_ALL_END(ctx, err); |
| 3207 | drm_modeset_backoff(&ctx); | 3280 | if (err) |
| 3208 | goto retry; | 3281 | return ERR_PTR(err); |
| 3209 | } | ||
| 3210 | 3282 | ||
| 3211 | drm_modeset_drop_locks(&ctx); | ||
| 3212 | drm_modeset_acquire_fini(&ctx); | ||
| 3213 | return state; | 3283 | return state; |
| 3214 | } | 3284 | } |
| 3215 | EXPORT_SYMBOL(drm_atomic_helper_suspend); | 3285 | EXPORT_SYMBOL(drm_atomic_helper_suspend); |
| @@ -3232,7 +3302,7 @@ EXPORT_SYMBOL(drm_atomic_helper_suspend); | |||
| 3232 | int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, | 3302 | int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, |
| 3233 | struct drm_modeset_acquire_ctx *ctx) | 3303 | struct drm_modeset_acquire_ctx *ctx) |
| 3234 | { | 3304 | { |
| 3235 | int i; | 3305 | int i, ret; |
| 3236 | struct drm_plane *plane; | 3306 | struct drm_plane *plane; |
| 3237 | struct drm_plane_state *new_plane_state; | 3307 | struct drm_plane_state *new_plane_state; |
| 3238 | struct drm_connector *connector; | 3308 | struct drm_connector *connector; |
| @@ -3251,7 +3321,11 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, | |||
| 3251 | for_each_new_connector_in_state(state, connector, new_conn_state, i) | 3321 | for_each_new_connector_in_state(state, connector, new_conn_state, i) |
| 3252 | state->connectors[i].old_state = connector->state; | 3322 | state->connectors[i].old_state = connector->state; |
| 3253 | 3323 | ||
| 3254 | return drm_atomic_commit(state); | 3324 | ret = drm_atomic_commit(state); |
| 3325 | |||
| 3326 | state->acquire_ctx = NULL; | ||
| 3327 | |||
| 3328 | return ret; | ||
| 3255 | } | 3329 | } |
| 3256 | EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state); | 3330 | EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state); |
| 3257 | 3331 | ||
| @@ -3279,23 +3353,12 @@ int drm_atomic_helper_resume(struct drm_device *dev, | |||
| 3279 | 3353 | ||
| 3280 | drm_mode_config_reset(dev); | 3354 | drm_mode_config_reset(dev); |
| 3281 | 3355 | ||
| 3282 | drm_modeset_acquire_init(&ctx, 0); | 3356 | DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err); |
| 3283 | while (1) { | ||
| 3284 | err = drm_modeset_lock_all_ctx(dev, &ctx); | ||
| 3285 | if (err) | ||
| 3286 | goto out; | ||
| 3287 | 3357 | ||
| 3288 | err = drm_atomic_helper_commit_duplicated_state(state, &ctx); | 3358 | err = drm_atomic_helper_commit_duplicated_state(state, &ctx); |
| 3289 | out: | ||
| 3290 | if (err != -EDEADLK) | ||
| 3291 | break; | ||
| 3292 | |||
| 3293 | drm_modeset_backoff(&ctx); | ||
| 3294 | } | ||
| 3295 | 3359 | ||
| 3360 | DRM_MODESET_LOCK_ALL_END(ctx, err); | ||
| 3296 | drm_atomic_state_put(state); | 3361 | drm_atomic_state_put(state); |
| 3297 | drm_modeset_drop_locks(&ctx); | ||
| 3298 | drm_modeset_acquire_fini(&ctx); | ||
| 3299 | 3362 | ||
| 3300 | return err; | 3363 | return err; |
| 3301 | } | 3364 | } |
| @@ -3434,3 +3497,73 @@ fail: | |||
| 3434 | return ret; | 3497 | return ret; |
| 3435 | } | 3498 | } |
| 3436 | EXPORT_SYMBOL(drm_atomic_helper_page_flip_target); | 3499 | EXPORT_SYMBOL(drm_atomic_helper_page_flip_target); |
| 3500 | |||
| 3501 | /** | ||
| 3502 | * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table | ||
| 3503 | * @crtc: CRTC object | ||
| 3504 | * @red: red correction table | ||
| 3505 | * @green: green correction table | ||
| 3506 | * @blue: green correction table | ||
| 3507 | * @size: size of the tables | ||
| 3508 | * @ctx: lock acquire context | ||
| 3509 | * | ||
| 3510 | * Implements support for legacy gamma correction table for drivers | ||
| 3511 | * that support color management through the DEGAMMA_LUT/GAMMA_LUT | ||
| 3512 | * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for | ||
| 3513 | * how the atomic color management and gamma tables work. | ||
| 3514 | */ | ||
| 3515 | int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, | ||
| 3516 | u16 *red, u16 *green, u16 *blue, | ||
| 3517 | uint32_t size, | ||
| 3518 | struct drm_modeset_acquire_ctx *ctx) | ||
| 3519 | { | ||
| 3520 | struct drm_device *dev = crtc->dev; | ||
| 3521 | struct drm_atomic_state *state; | ||
| 3522 | struct drm_crtc_state *crtc_state; | ||
| 3523 | struct drm_property_blob *blob = NULL; | ||
| 3524 | struct drm_color_lut *blob_data; | ||
| 3525 | int i, ret = 0; | ||
| 3526 | bool replaced; | ||
| 3527 | |||
| 3528 | state = drm_atomic_state_alloc(crtc->dev); | ||
| 3529 | if (!state) | ||
| 3530 | return -ENOMEM; | ||
| 3531 | |||
| 3532 | blob = drm_property_create_blob(dev, | ||
| 3533 | sizeof(struct drm_color_lut) * size, | ||
| 3534 | NULL); | ||
| 3535 | if (IS_ERR(blob)) { | ||
| 3536 | ret = PTR_ERR(blob); | ||
| 3537 | blob = NULL; | ||
| 3538 | goto fail; | ||
| 3539 | } | ||
| 3540 | |||
| 3541 | /* Prepare GAMMA_LUT with the legacy values. */ | ||
| 3542 | blob_data = blob->data; | ||
| 3543 | for (i = 0; i < size; i++) { | ||
| 3544 | blob_data[i].red = red[i]; | ||
| 3545 | blob_data[i].green = green[i]; | ||
| 3546 | blob_data[i].blue = blue[i]; | ||
| 3547 | } | ||
| 3548 | |||
| 3549 | state->acquire_ctx = ctx; | ||
| 3550 | crtc_state = drm_atomic_get_crtc_state(state, crtc); | ||
| 3551 | if (IS_ERR(crtc_state)) { | ||
| 3552 | ret = PTR_ERR(crtc_state); | ||
| 3553 | goto fail; | ||
| 3554 | } | ||
| 3555 | |||
| 3556 | /* Reset DEGAMMA_LUT and CTM properties. */ | ||
| 3557 | replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL); | ||
| 3558 | replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); | ||
| 3559 | replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob); | ||
| 3560 | crtc_state->color_mgmt_changed |= replaced; | ||
| 3561 | |||
| 3562 | ret = drm_atomic_commit(state); | ||
| 3563 | |||
| 3564 | fail: | ||
| 3565 | drm_atomic_state_put(state); | ||
| 3566 | drm_property_blob_put(blob); | ||
| 3567 | return ret; | ||
| 3568 | } | ||
| 3569 | EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set); | ||
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index 3ba996069d69..60bd7d708e35 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c | |||
| @@ -394,93 +394,6 @@ drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector) | |||
| 394 | EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state); | 394 | EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state); |
| 395 | 395 | ||
| 396 | /** | 396 | /** |
| 397 | * drm_atomic_helper_duplicate_state - duplicate an atomic state object | ||
| 398 | * @dev: DRM device | ||
| 399 | * @ctx: lock acquisition context | ||
| 400 | * | ||
| 401 | * Makes a copy of the current atomic state by looping over all objects and | ||
| 402 | * duplicating their respective states. This is used for example by suspend/ | ||
| 403 | * resume support code to save the state prior to suspend such that it can | ||
| 404 | * be restored upon resume. | ||
| 405 | * | ||
| 406 | * Note that this treats atomic state as persistent between save and restore. | ||
| 407 | * Drivers must make sure that this is possible and won't result in confusion | ||
| 408 | * or erroneous behaviour. | ||
| 409 | * | ||
| 410 | * Note that if callers haven't already acquired all modeset locks this might | ||
| 411 | * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). | ||
| 412 | * | ||
| 413 | * Returns: | ||
| 414 | * A pointer to the copy of the atomic state object on success or an | ||
| 415 | * ERR_PTR()-encoded error code on failure. | ||
| 416 | * | ||
| 417 | * See also: | ||
| 418 | * drm_atomic_helper_suspend(), drm_atomic_helper_resume() | ||
| 419 | */ | ||
| 420 | struct drm_atomic_state * | ||
| 421 | drm_atomic_helper_duplicate_state(struct drm_device *dev, | ||
| 422 | struct drm_modeset_acquire_ctx *ctx) | ||
| 423 | { | ||
| 424 | struct drm_atomic_state *state; | ||
| 425 | struct drm_connector *conn; | ||
| 426 | struct drm_connector_list_iter conn_iter; | ||
| 427 | struct drm_plane *plane; | ||
| 428 | struct drm_crtc *crtc; | ||
| 429 | int err = 0; | ||
| 430 | |||
| 431 | state = drm_atomic_state_alloc(dev); | ||
| 432 | if (!state) | ||
| 433 | return ERR_PTR(-ENOMEM); | ||
| 434 | |||
| 435 | state->acquire_ctx = ctx; | ||
| 436 | |||
| 437 | drm_for_each_crtc(crtc, dev) { | ||
| 438 | struct drm_crtc_state *crtc_state; | ||
| 439 | |||
| 440 | crtc_state = drm_atomic_get_crtc_state(state, crtc); | ||
| 441 | if (IS_ERR(crtc_state)) { | ||
| 442 | err = PTR_ERR(crtc_state); | ||
| 443 | goto free; | ||
| 444 | } | ||
| 445 | } | ||
| 446 | |||
| 447 | drm_for_each_plane(plane, dev) { | ||
| 448 | struct drm_plane_state *plane_state; | ||
| 449 | |||
| 450 | plane_state = drm_atomic_get_plane_state(state, plane); | ||
| 451 | if (IS_ERR(plane_state)) { | ||
| 452 | err = PTR_ERR(plane_state); | ||
| 453 | goto free; | ||
| 454 | } | ||
| 455 | } | ||
| 456 | |||
| 457 | drm_connector_list_iter_begin(dev, &conn_iter); | ||
| 458 | drm_for_each_connector_iter(conn, &conn_iter) { | ||
| 459 | struct drm_connector_state *conn_state; | ||
| 460 | |||
| 461 | conn_state = drm_atomic_get_connector_state(state, conn); | ||
| 462 | if (IS_ERR(conn_state)) { | ||
| 463 | err = PTR_ERR(conn_state); | ||
| 464 | drm_connector_list_iter_end(&conn_iter); | ||
| 465 | goto free; | ||
| 466 | } | ||
| 467 | } | ||
| 468 | drm_connector_list_iter_end(&conn_iter); | ||
| 469 | |||
| 470 | /* clear the acquire context so that it isn't accidentally reused */ | ||
| 471 | state->acquire_ctx = NULL; | ||
| 472 | |||
| 473 | free: | ||
| 474 | if (err < 0) { | ||
| 475 | drm_atomic_state_put(state); | ||
| 476 | state = ERR_PTR(err); | ||
| 477 | } | ||
| 478 | |||
| 479 | return state; | ||
| 480 | } | ||
| 481 | EXPORT_SYMBOL(drm_atomic_helper_duplicate_state); | ||
| 482 | |||
| 483 | /** | ||
| 484 | * __drm_atomic_helper_connector_destroy_state - release connector state | 397 | * __drm_atomic_helper_connector_destroy_state - release connector state |
| 485 | * @state: connector state object to release | 398 | * @state: connector state object to release |
| 486 | * | 399 | * |
| @@ -516,76 +429,6 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, | |||
| 516 | EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state); | 429 | EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state); |
| 517 | 430 | ||
| 518 | /** | 431 | /** |
| 519 | * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table | ||
| 520 | * @crtc: CRTC object | ||
| 521 | * @red: red correction table | ||
| 522 | * @green: green correction table | ||
| 523 | * @blue: green correction table | ||
| 524 | * @size: size of the tables | ||
| 525 | * @ctx: lock acquire context | ||
| 526 | * | ||
| 527 | * Implements support for legacy gamma correction table for drivers | ||
| 528 | * that support color management through the DEGAMMA_LUT/GAMMA_LUT | ||
| 529 | * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for | ||
| 530 | * how the atomic color management and gamma tables work. | ||
| 531 | */ | ||
| 532 | int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, | ||
| 533 | u16 *red, u16 *green, u16 *blue, | ||
| 534 | uint32_t size, | ||
| 535 | struct drm_modeset_acquire_ctx *ctx) | ||
| 536 | { | ||
| 537 | struct drm_device *dev = crtc->dev; | ||
| 538 | struct drm_atomic_state *state; | ||
| 539 | struct drm_crtc_state *crtc_state; | ||
| 540 | struct drm_property_blob *blob = NULL; | ||
| 541 | struct drm_color_lut *blob_data; | ||
| 542 | int i, ret = 0; | ||
| 543 | bool replaced; | ||
| 544 | |||
| 545 | state = drm_atomic_state_alloc(crtc->dev); | ||
| 546 | if (!state) | ||
| 547 | return -ENOMEM; | ||
| 548 | |||
| 549 | blob = drm_property_create_blob(dev, | ||
| 550 | sizeof(struct drm_color_lut) * size, | ||
| 551 | NULL); | ||
| 552 | if (IS_ERR(blob)) { | ||
| 553 | ret = PTR_ERR(blob); | ||
| 554 | blob = NULL; | ||
| 555 | goto fail; | ||
| 556 | } | ||
| 557 | |||
| 558 | /* Prepare GAMMA_LUT with the legacy values. */ | ||
| 559 | blob_data = blob->data; | ||
| 560 | for (i = 0; i < size; i++) { | ||
| 561 | blob_data[i].red = red[i]; | ||
| 562 | blob_data[i].green = green[i]; | ||
| 563 | blob_data[i].blue = blue[i]; | ||
| 564 | } | ||
| 565 | |||
| 566 | state->acquire_ctx = ctx; | ||
| 567 | crtc_state = drm_atomic_get_crtc_state(state, crtc); | ||
| 568 | if (IS_ERR(crtc_state)) { | ||
| 569 | ret = PTR_ERR(crtc_state); | ||
| 570 | goto fail; | ||
| 571 | } | ||
| 572 | |||
| 573 | /* Reset DEGAMMA_LUT and CTM properties. */ | ||
| 574 | replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL); | ||
| 575 | replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); | ||
| 576 | replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob); | ||
| 577 | crtc_state->color_mgmt_changed |= replaced; | ||
| 578 | |||
| 579 | ret = drm_atomic_commit(state); | ||
| 580 | |||
| 581 | fail: | ||
| 582 | drm_atomic_state_put(state); | ||
| 583 | drm_property_blob_put(blob); | ||
| 584 | return ret; | ||
| 585 | } | ||
| 586 | EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set); | ||
| 587 | |||
| 588 | /** | ||
| 589 | * __drm_atomic_helper_private_duplicate_state - copy atomic private state | 432 | * __drm_atomic_helper_private_duplicate_state - copy atomic private state |
| 590 | * @obj: CRTC object | 433 | * @obj: CRTC object |
| 591 | * @state: new private object state | 434 | * @state: new private object state |
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c index 581cc3788223..07dcf47daafe 100644 --- a/drivers/gpu/drm/drm_color_mgmt.c +++ b/drivers/gpu/drm/drm_color_mgmt.c | |||
| @@ -255,11 +255,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev, | |||
| 255 | if (crtc_lut->gamma_size != crtc->gamma_size) | 255 | if (crtc_lut->gamma_size != crtc->gamma_size) |
| 256 | return -EINVAL; | 256 | return -EINVAL; |
| 257 | 257 | ||
| 258 | drm_modeset_acquire_init(&ctx, 0); | 258 | DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret); |
| 259 | retry: | ||
| 260 | ret = drm_modeset_lock_all_ctx(dev, &ctx); | ||
| 261 | if (ret) | ||
| 262 | goto out; | ||
| 263 | 259 | ||
| 264 | size = crtc_lut->gamma_size * (sizeof(uint16_t)); | 260 | size = crtc_lut->gamma_size * (sizeof(uint16_t)); |
| 265 | r_base = crtc->gamma_store; | 261 | r_base = crtc->gamma_store; |
| @@ -284,13 +280,7 @@ retry: | |||
| 284 | crtc->gamma_size, &ctx); | 280 | crtc->gamma_size, &ctx); |
| 285 | 281 | ||
| 286 | out: | 282 | out: |
| 287 | if (ret == -EDEADLK) { | 283 | DRM_MODESET_LOCK_ALL_END(ctx, ret); |
| 288 | drm_modeset_backoff(&ctx); | ||
| 289 | goto retry; | ||
| 290 | } | ||
| 291 | drm_modeset_drop_locks(&ctx); | ||
| 292 | drm_modeset_acquire_fini(&ctx); | ||
| 293 | |||
| 294 | return ret; | 284 | return ret; |
| 295 | 285 | ||
| 296 | } | 286 | } |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 6f8ddfcfaba5..1593dd6cdfb7 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
| @@ -572,9 +572,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, | |||
| 572 | struct drm_mode_crtc *crtc_req = data; | 572 | struct drm_mode_crtc *crtc_req = data; |
| 573 | struct drm_crtc *crtc; | 573 | struct drm_crtc *crtc; |
| 574 | struct drm_plane *plane; | 574 | struct drm_plane *plane; |
| 575 | struct drm_connector **connector_set, *connector; | 575 | struct drm_connector **connector_set = NULL, *connector; |
| 576 | struct drm_framebuffer *fb; | 576 | struct drm_framebuffer *fb = NULL; |
| 577 | struct drm_display_mode *mode; | 577 | struct drm_display_mode *mode = NULL; |
| 578 | struct drm_mode_set set; | 578 | struct drm_mode_set set; |
| 579 | uint32_t __user *set_connectors_ptr; | 579 | uint32_t __user *set_connectors_ptr; |
| 580 | struct drm_modeset_acquire_ctx ctx; | 580 | struct drm_modeset_acquire_ctx ctx; |
| @@ -601,15 +601,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, | |||
| 601 | plane = crtc->primary; | 601 | plane = crtc->primary; |
| 602 | 602 | ||
| 603 | mutex_lock(&crtc->dev->mode_config.mutex); | 603 | mutex_lock(&crtc->dev->mode_config.mutex); |
| 604 | drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); | 604 | DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, |
| 605 | retry: | 605 | DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret); |
| 606 | connector_set = NULL; | ||
| 607 | fb = NULL; | ||
| 608 | mode = NULL; | ||
| 609 | |||
| 610 | ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx); | ||
| 611 | if (ret) | ||
| 612 | goto out; | ||
| 613 | 606 | ||
| 614 | if (crtc_req->mode_valid) { | 607 | if (crtc_req->mode_valid) { |
| 615 | /* If we have a mode we need a framebuffer. */ | 608 | /* If we have a mode we need a framebuffer. */ |
| @@ -768,13 +761,13 @@ out: | |||
| 768 | } | 761 | } |
| 769 | kfree(connector_set); | 762 | kfree(connector_set); |
| 770 | drm_mode_destroy(dev, mode); | 763 | drm_mode_destroy(dev, mode); |
| 771 | if (ret == -EDEADLK) { | 764 | |
| 772 | ret = drm_modeset_backoff(&ctx); | 765 | /* In case we need to retry... */ |
| 773 | if (!ret) | 766 | connector_set = NULL; |
| 774 | goto retry; | 767 | fb = NULL; |
| 775 | } | 768 | mode = NULL; |
| 776 | drm_modeset_drop_locks(&ctx); | 769 | |
| 777 | drm_modeset_acquire_fini(&ctx); | 770 | DRM_MODESET_LOCK_ALL_END(ctx, ret); |
| 778 | mutex_unlock(&crtc->dev->mode_config.mutex); | 771 | mutex_unlock(&crtc->dev->mode_config.mutex); |
| 779 | 772 | ||
| 780 | return ret; | 773 | return ret; |
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index 8a5100685875..51f534db9107 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c | |||
| @@ -56,6 +56,10 @@ | |||
| 56 | * drm_modeset_drop_locks(ctx); | 56 | * drm_modeset_drop_locks(ctx); |
| 57 | * drm_modeset_acquire_fini(ctx); | 57 | * drm_modeset_acquire_fini(ctx); |
| 58 | * | 58 | * |
| 59 | * For convenience this control flow is implemented in | ||
| 60 | * DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END() for the case | ||
| 61 | * where all modeset locks need to be taken through drm_modeset_lock_all_ctx(). | ||
| 62 | * | ||
| 59 | * If all that is needed is a single modeset lock, then the &struct | 63 | * If all that is needed is a single modeset lock, then the &struct |
| 60 | * drm_modeset_acquire_ctx is not needed and the locking can be simplified | 64 | * drm_modeset_acquire_ctx is not needed and the locking can be simplified |
| 61 | * by passing a NULL instead of ctx in the drm_modeset_lock() call or | 65 | * by passing a NULL instead of ctx in the drm_modeset_lock() call or |
| @@ -383,6 +387,8 @@ EXPORT_SYMBOL(drm_modeset_unlock); | |||
| 383 | * Locks acquired with this function should be released by calling the | 387 | * Locks acquired with this function should be released by calling the |
| 384 | * drm_modeset_drop_locks() function on @ctx. | 388 | * drm_modeset_drop_locks() function on @ctx. |
| 385 | * | 389 | * |
| 390 | * See also: DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END() | ||
| 391 | * | ||
| 386 | * Returns: 0 on success or a negative error-code on failure. | 392 | * Returns: 0 on success or a negative error-code on failure. |
| 387 | */ | 393 | */ |
| 388 | int drm_modeset_lock_all_ctx(struct drm_device *dev, | 394 | int drm_modeset_lock_all_ctx(struct drm_device *dev, |
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 679455e36829..5f650d8fc66b 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c | |||
| @@ -767,11 +767,8 @@ static int setplane_internal(struct drm_plane *plane, | |||
| 767 | struct drm_modeset_acquire_ctx ctx; | 767 | struct drm_modeset_acquire_ctx ctx; |
| 768 | int ret; | 768 | int ret; |
| 769 | 769 | ||
| 770 | drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); | 770 | DRM_MODESET_LOCK_ALL_BEGIN(plane->dev, ctx, |
| 771 | retry: | 771 | DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret); |
| 772 | ret = drm_modeset_lock_all_ctx(plane->dev, &ctx); | ||
| 773 | if (ret) | ||
| 774 | goto fail; | ||
| 775 | 772 | ||
| 776 | if (drm_drv_uses_atomic_modeset(plane->dev)) | 773 | if (drm_drv_uses_atomic_modeset(plane->dev)) |
| 777 | ret = __setplane_atomic(plane, crtc, fb, | 774 | ret = __setplane_atomic(plane, crtc, fb, |
| @@ -782,14 +779,7 @@ retry: | |||
| 782 | crtc_x, crtc_y, crtc_w, crtc_h, | 779 | crtc_x, crtc_y, crtc_w, crtc_h, |
| 783 | src_x, src_y, src_w, src_h, &ctx); | 780 | src_x, src_y, src_w, src_h, &ctx); |
| 784 | 781 | ||
| 785 | fail: | 782 | DRM_MODESET_LOCK_ALL_END(ctx, ret); |
| 786 | if (ret == -EDEADLK) { | ||
| 787 | ret = drm_modeset_backoff(&ctx); | ||
| 788 | if (!ret) | ||
| 789 | goto retry; | ||
| 790 | } | ||
| 791 | drm_modeset_drop_locks(&ctx); | ||
| 792 | drm_modeset_acquire_fini(&ctx); | ||
| 793 | 783 | ||
| 794 | return ret; | 784 | return ret; |
| 795 | } | 785 | } |
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index e2c5b3ca4824..db30a0e89db8 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c | |||
| @@ -56,22 +56,6 @@ | |||
| 56 | #include "drm_internal.h" | 56 | #include "drm_internal.h" |
| 57 | #include <drm/drm_syncobj.h> | 57 | #include <drm/drm_syncobj.h> |
| 58 | 58 | ||
| 59 | struct drm_syncobj_stub_fence { | ||
| 60 | struct dma_fence base; | ||
| 61 | spinlock_t lock; | ||
| 62 | }; | ||
| 63 | |||
| 64 | static const char *drm_syncobj_stub_fence_get_name(struct dma_fence *fence) | ||
| 65 | { | ||
| 66 | return "syncobjstub"; | ||
| 67 | } | ||
| 68 | |||
| 69 | static const struct dma_fence_ops drm_syncobj_stub_fence_ops = { | ||
| 70 | .get_driver_name = drm_syncobj_stub_fence_get_name, | ||
| 71 | .get_timeline_name = drm_syncobj_stub_fence_get_name, | ||
| 72 | }; | ||
| 73 | |||
| 74 | |||
| 75 | /** | 59 | /** |
| 76 | * drm_syncobj_find - lookup and reference a sync object. | 60 | * drm_syncobj_find - lookup and reference a sync object. |
| 77 | * @file_private: drm file private pointer | 61 | * @file_private: drm file private pointer |
| @@ -156,13 +140,11 @@ void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, | |||
| 156 | /** | 140 | /** |
| 157 | * drm_syncobj_replace_fence - replace fence in a sync object. | 141 | * drm_syncobj_replace_fence - replace fence in a sync object. |
| 158 | * @syncobj: Sync object to replace fence in | 142 | * @syncobj: Sync object to replace fence in |
| 159 | * @point: timeline point | ||
| 160 | * @fence: fence to install in sync file. | 143 | * @fence: fence to install in sync file. |
| 161 | * | 144 | * |
| 162 | * This replaces the fence on a sync object, or a timeline point fence. | 145 | * This replaces the fence on a sync object. |
| 163 | */ | 146 | */ |
| 164 | void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, | 147 | void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, |
| 165 | u64 point, | ||
| 166 | struct dma_fence *fence) | 148 | struct dma_fence *fence) |
| 167 | { | 149 | { |
| 168 | struct dma_fence *old_fence; | 150 | struct dma_fence *old_fence; |
| @@ -190,23 +172,18 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, | |||
| 190 | } | 172 | } |
| 191 | EXPORT_SYMBOL(drm_syncobj_replace_fence); | 173 | EXPORT_SYMBOL(drm_syncobj_replace_fence); |
| 192 | 174 | ||
| 193 | static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) | 175 | /** |
| 176 | * drm_syncobj_assign_null_handle - assign a stub fence to the sync object | ||
| 177 | * @syncobj: sync object to assign the fence on | ||
| 178 | * | ||
| 179 | * Assign a already signaled stub fence to the sync object. | ||
| 180 | */ | ||
| 181 | static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) | ||
| 194 | { | 182 | { |
| 195 | struct drm_syncobj_stub_fence *fence; | 183 | struct dma_fence *fence = dma_fence_get_stub(); |
| 196 | fence = kzalloc(sizeof(*fence), GFP_KERNEL); | ||
| 197 | if (fence == NULL) | ||
| 198 | return -ENOMEM; | ||
| 199 | |||
| 200 | spin_lock_init(&fence->lock); | ||
| 201 | dma_fence_init(&fence->base, &drm_syncobj_stub_fence_ops, | ||
| 202 | &fence->lock, 0, 0); | ||
| 203 | dma_fence_signal(&fence->base); | ||
| 204 | |||
| 205 | drm_syncobj_replace_fence(syncobj, 0, &fence->base); | ||
| 206 | 184 | ||
| 207 | dma_fence_put(&fence->base); | 185 | drm_syncobj_replace_fence(syncobj, fence); |
| 208 | 186 | dma_fence_put(fence); | |
| 209 | return 0; | ||
| 210 | } | 187 | } |
| 211 | 188 | ||
| 212 | /** | 189 | /** |
| @@ -254,7 +231,7 @@ void drm_syncobj_free(struct kref *kref) | |||
| 254 | struct drm_syncobj *syncobj = container_of(kref, | 231 | struct drm_syncobj *syncobj = container_of(kref, |
| 255 | struct drm_syncobj, | 232 | struct drm_syncobj, |
| 256 | refcount); | 233 | refcount); |
| 257 | drm_syncobj_replace_fence(syncobj, 0, NULL); | 234 | drm_syncobj_replace_fence(syncobj, NULL); |
| 258 | kfree(syncobj); | 235 | kfree(syncobj); |
| 259 | } | 236 | } |
| 260 | EXPORT_SYMBOL(drm_syncobj_free); | 237 | EXPORT_SYMBOL(drm_syncobj_free); |
| @@ -274,7 +251,6 @@ EXPORT_SYMBOL(drm_syncobj_free); | |||
| 274 | int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, | 251 | int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, |
| 275 | struct dma_fence *fence) | 252 | struct dma_fence *fence) |
| 276 | { | 253 | { |
| 277 | int ret; | ||
| 278 | struct drm_syncobj *syncobj; | 254 | struct drm_syncobj *syncobj; |
| 279 | 255 | ||
| 280 | syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL); | 256 | syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL); |
| @@ -285,16 +261,11 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, | |||
| 285 | INIT_LIST_HEAD(&syncobj->cb_list); | 261 | INIT_LIST_HEAD(&syncobj->cb_list); |
| 286 | spin_lock_init(&syncobj->lock); | 262 | spin_lock_init(&syncobj->lock); |
| 287 | 263 | ||
| 288 | if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) { | 264 | if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) |
| 289 | ret = drm_syncobj_assign_null_handle(syncobj); | 265 | drm_syncobj_assign_null_handle(syncobj); |
| 290 | if (ret < 0) { | ||
| 291 | drm_syncobj_put(syncobj); | ||
| 292 | return ret; | ||
| 293 | } | ||
| 294 | } | ||
| 295 | 266 | ||
| 296 | if (fence) | 267 | if (fence) |
| 297 | drm_syncobj_replace_fence(syncobj, 0, fence); | 268 | drm_syncobj_replace_fence(syncobj, fence); |
| 298 | 269 | ||
| 299 | *out_syncobj = syncobj; | 270 | *out_syncobj = syncobj; |
| 300 | return 0; | 271 | return 0; |
| @@ -479,7 +450,7 @@ static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, | |||
| 479 | return -ENOENT; | 450 | return -ENOENT; |
| 480 | } | 451 | } |
| 481 | 452 | ||
| 482 | drm_syncobj_replace_fence(syncobj, 0, fence); | 453 | drm_syncobj_replace_fence(syncobj, fence); |
| 483 | dma_fence_put(fence); | 454 | dma_fence_put(fence); |
| 484 | drm_syncobj_put(syncobj); | 455 | drm_syncobj_put(syncobj); |
| 485 | return 0; | 456 | return 0; |
| @@ -950,7 +921,7 @@ drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, | |||
| 950 | return ret; | 921 | return ret; |
| 951 | 922 | ||
| 952 | for (i = 0; i < args->count_handles; i++) | 923 | for (i = 0; i < args->count_handles; i++) |
| 953 | drm_syncobj_replace_fence(syncobjs[i], 0, NULL); | 924 | drm_syncobj_replace_fence(syncobjs[i], NULL); |
| 954 | 925 | ||
| 955 | drm_syncobj_array_free(syncobjs, args->count_handles); | 926 | drm_syncobj_array_free(syncobjs, args->count_handles); |
| 956 | 927 | ||
| @@ -982,11 +953,8 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, | |||
| 982 | if (ret < 0) | 953 | if (ret < 0) |
| 983 | return ret; | 954 | return ret; |
| 984 | 955 | ||
| 985 | for (i = 0; i < args->count_handles; i++) { | 956 | for (i = 0; i < args->count_handles; i++) |
| 986 | ret = drm_syncobj_assign_null_handle(syncobjs[i]); | 957 | drm_syncobj_assign_null_handle(syncobjs[i]); |
| 987 | if (ret < 0) | ||
| 988 | break; | ||
| 989 | } | ||
| 990 | 958 | ||
| 991 | drm_syncobj_array_free(syncobjs, args->count_handles); | 959 | drm_syncobj_array_free(syncobjs, args->count_handles); |
| 992 | 960 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d4fac09095f8..10a4afb4f235 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
| @@ -2191,7 +2191,7 @@ signal_fence_array(struct i915_execbuffer *eb, | |||
| 2191 | if (!(flags & I915_EXEC_FENCE_SIGNAL)) | 2191 | if (!(flags & I915_EXEC_FENCE_SIGNAL)) |
| 2192 | continue; | 2192 | continue; |
| 2193 | 2193 | ||
| 2194 | drm_syncobj_replace_fence(syncobj, 0, fence); | 2194 | drm_syncobj_replace_fence(syncobj, fence); |
| 2195 | } | 2195 | } |
| 2196 | } | 2196 | } |
| 2197 | 2197 | ||
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index 8ee2cf9e47cd..6119a0224278 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c | |||
| @@ -80,6 +80,7 @@ | |||
| 80 | struct meson_plane { | 80 | struct meson_plane { |
| 81 | struct drm_plane base; | 81 | struct drm_plane base; |
| 82 | struct meson_drm *priv; | 82 | struct meson_drm *priv; |
| 83 | bool enabled; | ||
| 83 | }; | 84 | }; |
| 84 | #define to_meson_plane(x) container_of(x, struct meson_plane, base) | 85 | #define to_meson_plane(x) container_of(x, struct meson_plane, base) |
| 85 | 86 | ||
| @@ -304,6 +305,15 @@ static void meson_plane_atomic_update(struct drm_plane *plane, | |||
| 304 | priv->viu.osd1_stride = fb->pitches[0]; | 305 | priv->viu.osd1_stride = fb->pitches[0]; |
| 305 | priv->viu.osd1_height = fb->height; | 306 | priv->viu.osd1_height = fb->height; |
| 306 | 307 | ||
| 308 | if (!meson_plane->enabled) { | ||
| 309 | /* Reset OSD1 before enabling it on GXL+ SoCs */ | ||
| 310 | if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") || | ||
| 311 | meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) | ||
| 312 | meson_viu_osd1_reset(priv); | ||
| 313 | |||
| 314 | meson_plane->enabled = true; | ||
| 315 | } | ||
| 316 | |||
| 307 | spin_unlock_irqrestore(&priv->drm->event_lock, flags); | 317 | spin_unlock_irqrestore(&priv->drm->event_lock, flags); |
| 308 | } | 318 | } |
| 309 | 319 | ||
| @@ -317,6 +327,8 @@ static void meson_plane_atomic_disable(struct drm_plane *plane, | |||
| 317 | writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0, | 327 | writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0, |
| 318 | priv->io_base + _REG(VPP_MISC)); | 328 | priv->io_base + _REG(VPP_MISC)); |
| 319 | 329 | ||
| 330 | meson_plane->enabled = false; | ||
| 331 | |||
| 320 | } | 332 | } |
| 321 | 333 | ||
| 322 | static const struct drm_plane_helper_funcs meson_plane_helper_funcs = { | 334 | static const struct drm_plane_helper_funcs meson_plane_helper_funcs = { |
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c index 2dffb987ec65..0ba87ff95530 100644 --- a/drivers/gpu/drm/meson/meson_viu.c +++ b/drivers/gpu/drm/meson/meson_viu.c | |||
| @@ -296,6 +296,33 @@ static void meson_viu_load_matrix(struct meson_drm *priv) | |||
| 296 | true); | 296 | true); |
| 297 | } | 297 | } |
| 298 | 298 | ||
| 299 | /* VIU OSD1 Reset as workaround for GXL+ Alpha OSD Bug */ | ||
| 300 | void meson_viu_osd1_reset(struct meson_drm *priv) | ||
| 301 | { | ||
| 302 | uint32_t osd1_fifo_ctrl_stat, osd1_ctrl_stat2; | ||
| 303 | |||
| 304 | /* Save these 2 registers state */ | ||
| 305 | osd1_fifo_ctrl_stat = readl_relaxed( | ||
| 306 | priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT)); | ||
| 307 | osd1_ctrl_stat2 = readl_relaxed( | ||
| 308 | priv->io_base + _REG(VIU_OSD1_CTRL_STAT2)); | ||
| 309 | |||
| 310 | /* Reset OSD1 */ | ||
| 311 | writel_bits_relaxed(BIT(0), BIT(0), | ||
| 312 | priv->io_base + _REG(VIU_SW_RESET)); | ||
| 313 | writel_bits_relaxed(BIT(0), 0, | ||
| 314 | priv->io_base + _REG(VIU_SW_RESET)); | ||
| 315 | |||
| 316 | /* Rewrite these registers state lost in the reset */ | ||
| 317 | writel_relaxed(osd1_fifo_ctrl_stat, | ||
| 318 | priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT)); | ||
| 319 | writel_relaxed(osd1_ctrl_stat2, | ||
| 320 | priv->io_base + _REG(VIU_OSD1_CTRL_STAT2)); | ||
| 321 | |||
| 322 | /* Reload the conversion matrix */ | ||
| 323 | meson_viu_load_matrix(priv); | ||
| 324 | } | ||
| 325 | |||
| 299 | void meson_viu_init(struct meson_drm *priv) | 326 | void meson_viu_init(struct meson_drm *priv) |
| 300 | { | 327 | { |
| 301 | uint32_t reg; | 328 | uint32_t reg; |
diff --git a/drivers/gpu/drm/meson/meson_viu.h b/drivers/gpu/drm/meson/meson_viu.h index 073b1910bd1b..0f84bddd2ff0 100644 --- a/drivers/gpu/drm/meson/meson_viu.h +++ b/drivers/gpu/drm/meson/meson_viu.h | |||
| @@ -59,6 +59,7 @@ | |||
| 59 | #define OSD_REPLACE_EN BIT(14) | 59 | #define OSD_REPLACE_EN BIT(14) |
| 60 | #define OSD_REPLACE_SHIFT 6 | 60 | #define OSD_REPLACE_SHIFT 6 |
| 61 | 61 | ||
| 62 | void meson_viu_osd1_reset(struct meson_drm *priv); | ||
| 62 | void meson_viu_init(struct meson_drm *priv); | 63 | void meson_viu_init(struct meson_drm *priv); |
| 63 | 64 | ||
| 64 | #endif /* __MESON_VIU_H */ | 65 | #endif /* __MESON_VIU_H */ |
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 5fbee837b0db..9c69e739a524 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c | |||
| @@ -618,6 +618,30 @@ static const struct panel_desc auo_g070vvn01 = { | |||
| 618 | }, | 618 | }, |
| 619 | }; | 619 | }; |
| 620 | 620 | ||
| 621 | static const struct drm_display_mode auo_g101evn010_mode = { | ||
| 622 | .clock = 68930, | ||
| 623 | .hdisplay = 1280, | ||
| 624 | .hsync_start = 1280 + 82, | ||
| 625 | .hsync_end = 1280 + 82 + 2, | ||
| 626 | .htotal = 1280 + 82 + 2 + 84, | ||
| 627 | .vdisplay = 800, | ||
| 628 | .vsync_start = 800 + 8, | ||
| 629 | .vsync_end = 800 + 8 + 2, | ||
| 630 | .vtotal = 800 + 8 + 2 + 6, | ||
| 631 | .vrefresh = 60, | ||
| 632 | }; | ||
| 633 | |||
| 634 | static const struct panel_desc auo_g101evn010 = { | ||
| 635 | .modes = &auo_g101evn010_mode, | ||
| 636 | .num_modes = 1, | ||
| 637 | .bpc = 6, | ||
| 638 | .size = { | ||
| 639 | .width = 216, | ||
| 640 | .height = 135, | ||
| 641 | }, | ||
| 642 | .bus_format = MEDIA_BUS_FMT_RGB666_1X18, | ||
| 643 | }; | ||
| 644 | |||
| 621 | static const struct drm_display_mode auo_g104sn02_mode = { | 645 | static const struct drm_display_mode auo_g104sn02_mode = { |
| 622 | .clock = 40000, | 646 | .clock = 40000, |
| 623 | .hdisplay = 800, | 647 | .hdisplay = 800, |
| @@ -2494,6 +2518,9 @@ static const struct of_device_id platform_of_match[] = { | |||
| 2494 | .compatible = "auo,g070vvn01", | 2518 | .compatible = "auo,g070vvn01", |
| 2495 | .data = &auo_g070vvn01, | 2519 | .data = &auo_g070vvn01, |
| 2496 | }, { | 2520 | }, { |
| 2521 | .compatible = "auo,g101evn010", | ||
| 2522 | .data = &auo_g101evn010, | ||
| 2523 | }, { | ||
| 2497 | .compatible = "auo,g104sn02", | 2524 | .compatible = "auo,g104sn02", |
| 2498 | .data = &auo_g104sn02, | 2525 | .data = &auo_g104sn02, |
| 2499 | }, { | 2526 | }, { |
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c index 5fa0441bb6df..38c938c9adda 100644 --- a/drivers/gpu/drm/pl111/pl111_vexpress.c +++ b/drivers/gpu/drm/pl111/pl111_vexpress.c | |||
| @@ -55,6 +55,8 @@ int pl111_vexpress_clcd_init(struct device *dev, | |||
| 55 | } | 55 | } |
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | of_node_put(root); | ||
| 59 | |||
| 58 | /* | 60 | /* |
| 59 | * If there is a coretile HDLCD and it has a driver, | 61 | * If there is a coretile HDLCD and it has a driver, |
| 60 | * do not mux the CLCD on the motherboard to the DVI. | 62 | * do not mux the CLCD on the motherboard to the DVI. |
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 94f055186b95..f50a3b1864bb 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <drm/drm_atomic_helper.h> | 21 | #include <drm/drm_atomic_helper.h> |
| 22 | #include <drm/drm_crtc_helper.h> | 22 | #include <drm/drm_crtc_helper.h> |
| 23 | #include <drm/drm_fb_cma_helper.h> | 23 | #include <drm/drm_fb_cma_helper.h> |
| 24 | #include <drm/drm_fb_helper.h> | ||
| 24 | #include <drm/drm_gem_cma_helper.h> | 25 | #include <drm/drm_gem_cma_helper.h> |
| 25 | 26 | ||
| 26 | #include "rcar_du_drv.h" | 27 | #include "rcar_du_drv.h" |
| @@ -392,19 +393,11 @@ MODULE_DEVICE_TABLE(of, rcar_du_of_table); | |||
| 392 | * DRM operations | 393 | * DRM operations |
| 393 | */ | 394 | */ |
| 394 | 395 | ||
| 395 | static void rcar_du_lastclose(struct drm_device *dev) | ||
| 396 | { | ||
| 397 | struct rcar_du_device *rcdu = dev->dev_private; | ||
| 398 | |||
| 399 | drm_fbdev_cma_restore_mode(rcdu->fbdev); | ||
| 400 | } | ||
| 401 | |||
| 402 | DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops); | 396 | DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops); |
| 403 | 397 | ||
| 404 | static struct drm_driver rcar_du_driver = { | 398 | static struct drm_driver rcar_du_driver = { |
| 405 | .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 399 | .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
| 406 | | DRIVER_ATOMIC, | 400 | | DRIVER_ATOMIC, |
| 407 | .lastclose = rcar_du_lastclose, | ||
| 408 | .gem_free_object_unlocked = drm_gem_cma_free_object, | 401 | .gem_free_object_unlocked = drm_gem_cma_free_object, |
| 409 | .gem_vm_ops = &drm_gem_cma_vm_ops, | 402 | .gem_vm_ops = &drm_gem_cma_vm_ops, |
| 410 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | 403 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, |
| @@ -460,9 +453,6 @@ static int rcar_du_remove(struct platform_device *pdev) | |||
| 460 | 453 | ||
| 461 | drm_dev_unregister(ddev); | 454 | drm_dev_unregister(ddev); |
| 462 | 455 | ||
| 463 | if (rcdu->fbdev) | ||
| 464 | drm_fbdev_cma_fini(rcdu->fbdev); | ||
| 465 | |||
| 466 | drm_kms_helper_poll_fini(ddev); | 456 | drm_kms_helper_poll_fini(ddev); |
| 467 | drm_mode_config_cleanup(ddev); | 457 | drm_mode_config_cleanup(ddev); |
| 468 | 458 | ||
| @@ -522,6 +512,8 @@ static int rcar_du_probe(struct platform_device *pdev) | |||
| 522 | 512 | ||
| 523 | DRM_INFO("Device %s probed\n", dev_name(&pdev->dev)); | 513 | DRM_INFO("Device %s probed\n", dev_name(&pdev->dev)); |
| 524 | 514 | ||
| 515 | drm_fbdev_generic_setup(ddev, 32); | ||
| 516 | |||
| 525 | return 0; | 517 | return 0; |
| 526 | 518 | ||
| 527 | error: | 519 | error: |
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index 9f5563296c5a..a68da79b424e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h | |||
| @@ -20,7 +20,6 @@ | |||
| 20 | struct clk; | 20 | struct clk; |
| 21 | struct device; | 21 | struct device; |
| 22 | struct drm_device; | 22 | struct drm_device; |
| 23 | struct drm_fbdev_cma; | ||
| 24 | struct rcar_du_device; | 23 | struct rcar_du_device; |
| 25 | 24 | ||
| 26 | #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */ | 25 | #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */ |
| @@ -78,7 +77,6 @@ struct rcar_du_device { | |||
| 78 | void __iomem *mmio; | 77 | void __iomem *mmio; |
| 79 | 78 | ||
| 80 | struct drm_device *ddev; | 79 | struct drm_device *ddev; |
| 81 | struct drm_fbdev_cma *fbdev; | ||
| 82 | 80 | ||
| 83 | struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS]; | 81 | struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS]; |
| 84 | unsigned int num_crtcs; | 82 | unsigned int num_crtcs; |
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index fe6f65c94eef..9c7007d45408 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c | |||
| @@ -255,13 +255,6 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, | |||
| 255 | return drm_gem_fb_create(dev, file_priv, mode_cmd); | 255 | return drm_gem_fb_create(dev, file_priv, mode_cmd); |
| 256 | } | 256 | } |
| 257 | 257 | ||
| 258 | static void rcar_du_output_poll_changed(struct drm_device *dev) | ||
| 259 | { | ||
| 260 | struct rcar_du_device *rcdu = dev->dev_private; | ||
| 261 | |||
| 262 | drm_fbdev_cma_hotplug_event(rcdu->fbdev); | ||
| 263 | } | ||
| 264 | |||
| 265 | /* ----------------------------------------------------------------------------- | 258 | /* ----------------------------------------------------------------------------- |
| 266 | * Atomic Check and Update | 259 | * Atomic Check and Update |
| 267 | */ | 260 | */ |
| @@ -308,7 +301,6 @@ static const struct drm_mode_config_helper_funcs rcar_du_mode_config_helper = { | |||
| 308 | 301 | ||
| 309 | static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = { | 302 | static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = { |
| 310 | .fb_create = rcar_du_fb_create, | 303 | .fb_create = rcar_du_fb_create, |
| 311 | .output_poll_changed = rcar_du_output_poll_changed, | ||
| 312 | .atomic_check = rcar_du_atomic_check, | 304 | .atomic_check = rcar_du_atomic_check, |
| 313 | .atomic_commit = drm_atomic_helper_commit, | 305 | .atomic_commit = drm_atomic_helper_commit, |
| 314 | }; | 306 | }; |
| @@ -543,7 +535,6 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) | |||
| 543 | 535 | ||
| 544 | struct drm_device *dev = rcdu->ddev; | 536 | struct drm_device *dev = rcdu->ddev; |
| 545 | struct drm_encoder *encoder; | 537 | struct drm_encoder *encoder; |
| 546 | struct drm_fbdev_cma *fbdev; | ||
| 547 | unsigned int dpad0_sources; | 538 | unsigned int dpad0_sources; |
| 548 | unsigned int num_encoders; | 539 | unsigned int num_encoders; |
| 549 | unsigned int num_groups; | 540 | unsigned int num_groups; |
| @@ -682,17 +673,5 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) | |||
| 682 | 673 | ||
| 683 | drm_kms_helper_poll_init(dev); | 674 | drm_kms_helper_poll_init(dev); |
| 684 | 675 | ||
| 685 | if (dev->mode_config.num_connector) { | ||
| 686 | fbdev = drm_fbdev_cma_init(dev, 32, | ||
| 687 | dev->mode_config.num_connector); | ||
| 688 | if (IS_ERR(fbdev)) | ||
| 689 | return PTR_ERR(fbdev); | ||
| 690 | |||
| 691 | rcdu->fbdev = fbdev; | ||
| 692 | } else { | ||
| 693 | dev_info(rcdu->dev, | ||
| 694 | "no connector found, disabling fbdev emulation\n"); | ||
| 695 | } | ||
| 696 | |||
| 697 | return 0; | 676 | return 0; |
| 698 | } | 677 | } |
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index ccdeae6299eb..9e4c375ccc96 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c | |||
| @@ -410,6 +410,7 @@ static const struct of_device_id sun4i_drv_of_table[] = { | |||
| 410 | { .compatible = "allwinner,sun8i-v3s-display-engine" }, | 410 | { .compatible = "allwinner,sun8i-v3s-display-engine" }, |
| 411 | { .compatible = "allwinner,sun9i-a80-display-engine" }, | 411 | { .compatible = "allwinner,sun9i-a80-display-engine" }, |
| 412 | { .compatible = "allwinner,sun50i-a64-display-engine" }, | 412 | { .compatible = "allwinner,sun50i-a64-display-engine" }, |
| 413 | { .compatible = "allwinner,sun50i-h6-display-engine" }, | ||
| 413 | { } | 414 | { } |
| 414 | }; | 415 | }; |
| 415 | MODULE_DEVICE_TABLE(of, sun4i_drv_of_table); | 416 | MODULE_DEVICE_TABLE(of, sun4i_drv_of_table); |
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c index 07f45a008a0f..54d6fe0f37ce 100644 --- a/drivers/gpu/drm/tinydrm/repaper.c +++ b/drivers/gpu/drm/tinydrm/repaper.c | |||
| @@ -108,12 +108,11 @@ static int repaper_spi_transfer(struct spi_device *spi, u8 header, | |||
| 108 | 108 | ||
| 109 | /* Stack allocated tx? */ | 109 | /* Stack allocated tx? */ |
| 110 | if (tx && len <= 32) { | 110 | if (tx && len <= 32) { |
| 111 | txbuf = kmalloc(len, GFP_KERNEL); | 111 | txbuf = kmemdup(tx, len, GFP_KERNEL); |
| 112 | if (!txbuf) { | 112 | if (!txbuf) { |
| 113 | ret = -ENOMEM; | 113 | ret = -ENOMEM; |
| 114 | goto out_free; | 114 | goto out_free; |
| 115 | } | 115 | } |
| 116 | memcpy(txbuf, tx, len); | ||
| 117 | } | 116 | } |
| 118 | 117 | ||
| 119 | if (rx) { | 118 | if (rx) { |
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index 54d96518a131..a08766d39eab 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c | |||
| @@ -293,6 +293,7 @@ v3d_prime_import_sg_table(struct drm_device *dev, | |||
| 293 | bo->resv = attach->dmabuf->resv; | 293 | bo->resv = attach->dmabuf->resv; |
| 294 | 294 | ||
| 295 | bo->sgt = sgt; | 295 | bo->sgt = sgt; |
| 296 | obj->import_attach = attach; | ||
| 296 | v3d_bo_get_pages(bo); | 297 | v3d_bo_get_pages(bo); |
| 297 | 298 | ||
| 298 | v3d_mmu_insert_ptes(bo); | 299 | v3d_mmu_insert_ptes(bo); |
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 2a85fa68ffea..f0afcec72c34 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c | |||
| @@ -112,10 +112,15 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, | |||
| 112 | return 0; | 112 | return 0; |
| 113 | } | 113 | } |
| 114 | 114 | ||
| 115 | /* Any params that aren't just register reads would go here. */ | ||
| 116 | 115 | ||
| 117 | DRM_DEBUG("Unknown parameter %d\n", args->param); | 116 | switch (args->param) { |
| 118 | return -EINVAL; | 117 | case DRM_V3D_PARAM_SUPPORTS_TFU: |
| 118 | args->value = 1; | ||
| 119 | return 0; | ||
| 120 | default: | ||
| 121 | DRM_DEBUG("Unknown parameter %d\n", args->param); | ||
| 122 | return -EINVAL; | ||
| 123 | } | ||
| 119 | } | 124 | } |
| 120 | 125 | ||
| 121 | static int | 126 | static int |
| @@ -170,7 +175,8 @@ static const struct file_operations v3d_drm_fops = { | |||
| 170 | /* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP | 175 | /* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP |
| 171 | * protection between clients. Note that render nodes would be be | 176 | * protection between clients. Note that render nodes would be be |
| 172 | * able to submit CLs that could access BOs from clients authenticated | 177 | * able to submit CLs that could access BOs from clients authenticated |
| 173 | * with the master node. | 178 | * with the master node. The TFU doesn't use the GMP, so it would |
| 179 | * need to stay DRM_AUTH until we do buffer size/offset validation. | ||
| 174 | */ | 180 | */ |
| 175 | static const struct drm_ioctl_desc v3d_drm_ioctls[] = { | 181 | static const struct drm_ioctl_desc v3d_drm_ioctls[] = { |
| 176 | DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CL, v3d_submit_cl_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), | 182 | DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CL, v3d_submit_cl_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), |
| @@ -179,6 +185,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = { | |||
| 179 | DRM_IOCTL_DEF_DRV(V3D_MMAP_BO, v3d_mmap_bo_ioctl, DRM_RENDER_ALLOW), | 185 | DRM_IOCTL_DEF_DRV(V3D_MMAP_BO, v3d_mmap_bo_ioctl, DRM_RENDER_ALLOW), |
| 180 | DRM_IOCTL_DEF_DRV(V3D_GET_PARAM, v3d_get_param_ioctl, DRM_RENDER_ALLOW), | 186 | DRM_IOCTL_DEF_DRV(V3D_GET_PARAM, v3d_get_param_ioctl, DRM_RENDER_ALLOW), |
| 181 | DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW), | 187 | DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW), |
| 188 | DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), | ||
| 182 | }; | 189 | }; |
| 183 | 190 | ||
| 184 | static const struct vm_operations_struct v3d_vm_ops = { | 191 | static const struct vm_operations_struct v3d_vm_ops = { |
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index cbe5be0c47eb..dcb772a19191 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h | |||
| @@ -7,19 +7,18 @@ | |||
| 7 | #include <drm/drm_encoder.h> | 7 | #include <drm/drm_encoder.h> |
| 8 | #include <drm/drm_gem.h> | 8 | #include <drm/drm_gem.h> |
| 9 | #include <drm/gpu_scheduler.h> | 9 | #include <drm/gpu_scheduler.h> |
| 10 | #include "uapi/drm/v3d_drm.h" | ||
| 10 | 11 | ||
| 11 | #define GMP_GRANULARITY (128 * 1024) | 12 | #define GMP_GRANULARITY (128 * 1024) |
| 12 | 13 | ||
| 13 | /* Enum for each of the V3D queues. We maintain various queue | 14 | /* Enum for each of the V3D queues. */ |
| 14 | * tracking as an array because at some point we'll want to support | ||
| 15 | * the TFU (texture formatting unit) as another queue. | ||
| 16 | */ | ||
| 17 | enum v3d_queue { | 15 | enum v3d_queue { |
| 18 | V3D_BIN, | 16 | V3D_BIN, |
| 19 | V3D_RENDER, | 17 | V3D_RENDER, |
| 18 | V3D_TFU, | ||
| 20 | }; | 19 | }; |
| 21 | 20 | ||
| 22 | #define V3D_MAX_QUEUES (V3D_RENDER + 1) | 21 | #define V3D_MAX_QUEUES (V3D_TFU + 1) |
| 23 | 22 | ||
| 24 | struct v3d_queue_state { | 23 | struct v3d_queue_state { |
| 25 | struct drm_gpu_scheduler sched; | 24 | struct drm_gpu_scheduler sched; |
| @@ -68,6 +67,7 @@ struct v3d_dev { | |||
| 68 | 67 | ||
| 69 | struct v3d_exec_info *bin_job; | 68 | struct v3d_exec_info *bin_job; |
| 70 | struct v3d_exec_info *render_job; | 69 | struct v3d_exec_info *render_job; |
| 70 | struct v3d_tfu_job *tfu_job; | ||
| 71 | 71 | ||
| 72 | struct v3d_queue_state queue[V3D_MAX_QUEUES]; | 72 | struct v3d_queue_state queue[V3D_MAX_QUEUES]; |
| 73 | 73 | ||
| @@ -218,6 +218,25 @@ struct v3d_exec_info { | |||
| 218 | u32 qma, qms, qts; | 218 | u32 qma, qms, qts; |
| 219 | }; | 219 | }; |
| 220 | 220 | ||
| 221 | struct v3d_tfu_job { | ||
| 222 | struct drm_sched_job base; | ||
| 223 | |||
| 224 | struct drm_v3d_submit_tfu args; | ||
| 225 | |||
| 226 | /* An optional fence userspace can pass in for the job to depend on. */ | ||
| 227 | struct dma_fence *in_fence; | ||
| 228 | |||
| 229 | /* v3d fence to be signaled by IRQ handler when the job is complete. */ | ||
| 230 | struct dma_fence *done_fence; | ||
| 231 | |||
| 232 | struct v3d_dev *v3d; | ||
| 233 | |||
| 234 | struct kref refcount; | ||
| 235 | |||
| 236 | /* This is the array of BOs that were looked up at the start of exec. */ | ||
| 237 | struct v3d_bo *bo[4]; | ||
| 238 | }; | ||
| 239 | |||
| 221 | /** | 240 | /** |
| 222 | * _wait_for - magic (register) wait macro | 241 | * _wait_for - magic (register) wait macro |
| 223 | * | 242 | * |
| @@ -281,9 +300,12 @@ int v3d_gem_init(struct drm_device *dev); | |||
| 281 | void v3d_gem_destroy(struct drm_device *dev); | 300 | void v3d_gem_destroy(struct drm_device *dev); |
| 282 | int v3d_submit_cl_ioctl(struct drm_device *dev, void *data, | 301 | int v3d_submit_cl_ioctl(struct drm_device *dev, void *data, |
| 283 | struct drm_file *file_priv); | 302 | struct drm_file *file_priv); |
| 303 | int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, | ||
| 304 | struct drm_file *file_priv); | ||
| 284 | int v3d_wait_bo_ioctl(struct drm_device *dev, void *data, | 305 | int v3d_wait_bo_ioctl(struct drm_device *dev, void *data, |
| 285 | struct drm_file *file_priv); | 306 | struct drm_file *file_priv); |
| 286 | void v3d_exec_put(struct v3d_exec_info *exec); | 307 | void v3d_exec_put(struct v3d_exec_info *exec); |
| 308 | void v3d_tfu_job_put(struct v3d_tfu_job *exec); | ||
| 287 | void v3d_reset(struct v3d_dev *v3d); | 309 | void v3d_reset(struct v3d_dev *v3d); |
| 288 | void v3d_invalidate_caches(struct v3d_dev *v3d); | 310 | void v3d_invalidate_caches(struct v3d_dev *v3d); |
| 289 | void v3d_flush_caches(struct v3d_dev *v3d); | 311 | void v3d_flush_caches(struct v3d_dev *v3d); |
diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c index 50bfcf9a8a1a..b0a2a1ae2eb1 100644 --- a/drivers/gpu/drm/v3d/v3d_fence.c +++ b/drivers/gpu/drm/v3d/v3d_fence.c | |||
| @@ -29,10 +29,16 @@ static const char *v3d_fence_get_timeline_name(struct dma_fence *fence) | |||
| 29 | { | 29 | { |
| 30 | struct v3d_fence *f = to_v3d_fence(fence); | 30 | struct v3d_fence *f = to_v3d_fence(fence); |
| 31 | 31 | ||
| 32 | if (f->queue == V3D_BIN) | 32 | switch (f->queue) { |
| 33 | case V3D_BIN: | ||
| 33 | return "v3d-bin"; | 34 | return "v3d-bin"; |
| 34 | else | 35 | case V3D_RENDER: |
| 35 | return "v3d-render"; | 36 | return "v3d-render"; |
| 37 | case V3D_TFU: | ||
| 38 | return "v3d-tfu"; | ||
| 39 | default: | ||
| 40 | return NULL; | ||
| 41 | } | ||
| 36 | } | 42 | } |
| 37 | 43 | ||
| 38 | const struct dma_fence_ops v3d_fence_ops = { | 44 | const struct dma_fence_ops v3d_fence_ops = { |
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 1e8947c7d954..05ca6319065e 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c | |||
| @@ -207,26 +207,26 @@ v3d_flush_caches(struct v3d_dev *v3d) | |||
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | static void | 209 | static void |
| 210 | v3d_attach_object_fences(struct v3d_exec_info *exec) | 210 | v3d_attach_object_fences(struct v3d_bo **bos, int bo_count, |
| 211 | struct dma_fence *fence) | ||
| 211 | { | 212 | { |
| 212 | struct dma_fence *out_fence = exec->render_done_fence; | ||
| 213 | int i; | 213 | int i; |
| 214 | 214 | ||
| 215 | for (i = 0; i < exec->bo_count; i++) { | 215 | for (i = 0; i < bo_count; i++) { |
| 216 | /* XXX: Use shared fences for read-only objects. */ | 216 | /* XXX: Use shared fences for read-only objects. */ |
| 217 | reservation_object_add_excl_fence(exec->bo[i]->resv, out_fence); | 217 | reservation_object_add_excl_fence(bos[i]->resv, fence); |
| 218 | } | 218 | } |
| 219 | } | 219 | } |
| 220 | 220 | ||
| 221 | static void | 221 | static void |
| 222 | v3d_unlock_bo_reservations(struct drm_device *dev, | 222 | v3d_unlock_bo_reservations(struct v3d_bo **bos, |
| 223 | struct v3d_exec_info *exec, | 223 | int bo_count, |
| 224 | struct ww_acquire_ctx *acquire_ctx) | 224 | struct ww_acquire_ctx *acquire_ctx) |
| 225 | { | 225 | { |
| 226 | int i; | 226 | int i; |
| 227 | 227 | ||
| 228 | for (i = 0; i < exec->bo_count; i++) | 228 | for (i = 0; i < bo_count; i++) |
| 229 | ww_mutex_unlock(&exec->bo[i]->resv->lock); | 229 | ww_mutex_unlock(&bos[i]->resv->lock); |
| 230 | 230 | ||
| 231 | ww_acquire_fini(acquire_ctx); | 231 | ww_acquire_fini(acquire_ctx); |
| 232 | } | 232 | } |
| @@ -239,8 +239,8 @@ v3d_unlock_bo_reservations(struct drm_device *dev, | |||
| 239 | * to v3d, so we don't attach dma-buf fences to them. | 239 | * to v3d, so we don't attach dma-buf fences to them. |
| 240 | */ | 240 | */ |
| 241 | static int | 241 | static int |
| 242 | v3d_lock_bo_reservations(struct drm_device *dev, | 242 | v3d_lock_bo_reservations(struct v3d_bo **bos, |
| 243 | struct v3d_exec_info *exec, | 243 | int bo_count, |
| 244 | struct ww_acquire_ctx *acquire_ctx) | 244 | struct ww_acquire_ctx *acquire_ctx) |
| 245 | { | 245 | { |
| 246 | int contended_lock = -1; | 246 | int contended_lock = -1; |
| @@ -250,7 +250,7 @@ v3d_lock_bo_reservations(struct drm_device *dev, | |||
| 250 | 250 | ||
| 251 | retry: | 251 | retry: |
| 252 | if (contended_lock != -1) { | 252 | if (contended_lock != -1) { |
| 253 | struct v3d_bo *bo = exec->bo[contended_lock]; | 253 | struct v3d_bo *bo = bos[contended_lock]; |
| 254 | 254 | ||
| 255 | ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, | 255 | ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, |
| 256 | acquire_ctx); | 256 | acquire_ctx); |
| @@ -260,20 +260,20 @@ retry: | |||
| 260 | } | 260 | } |
| 261 | } | 261 | } |
| 262 | 262 | ||
| 263 | for (i = 0; i < exec->bo_count; i++) { | 263 | for (i = 0; i < bo_count; i++) { |
| 264 | if (i == contended_lock) | 264 | if (i == contended_lock) |
| 265 | continue; | 265 | continue; |
| 266 | 266 | ||
| 267 | ret = ww_mutex_lock_interruptible(&exec->bo[i]->resv->lock, | 267 | ret = ww_mutex_lock_interruptible(&bos[i]->resv->lock, |
| 268 | acquire_ctx); | 268 | acquire_ctx); |
| 269 | if (ret) { | 269 | if (ret) { |
| 270 | int j; | 270 | int j; |
| 271 | 271 | ||
| 272 | for (j = 0; j < i; j++) | 272 | for (j = 0; j < i; j++) |
| 273 | ww_mutex_unlock(&exec->bo[j]->resv->lock); | 273 | ww_mutex_unlock(&bos[j]->resv->lock); |
| 274 | 274 | ||
| 275 | if (contended_lock != -1 && contended_lock >= i) { | 275 | if (contended_lock != -1 && contended_lock >= i) { |
| 276 | struct v3d_bo *bo = exec->bo[contended_lock]; | 276 | struct v3d_bo *bo = bos[contended_lock]; |
| 277 | 277 | ||
| 278 | ww_mutex_unlock(&bo->resv->lock); | 278 | ww_mutex_unlock(&bo->resv->lock); |
| 279 | } | 279 | } |
| @@ -293,10 +293,11 @@ retry: | |||
| 293 | /* Reserve space for our shared (read-only) fence references, | 293 | /* Reserve space for our shared (read-only) fence references, |
| 294 | * before we commit the CL to the hardware. | 294 | * before we commit the CL to the hardware. |
| 295 | */ | 295 | */ |
| 296 | for (i = 0; i < exec->bo_count; i++) { | 296 | for (i = 0; i < bo_count; i++) { |
| 297 | ret = reservation_object_reserve_shared(exec->bo[i]->resv, 1); | 297 | ret = reservation_object_reserve_shared(bos[i]->resv, 1); |
| 298 | if (ret) { | 298 | if (ret) { |
| 299 | v3d_unlock_bo_reservations(dev, exec, acquire_ctx); | 299 | v3d_unlock_bo_reservations(bos, bo_count, |
| 300 | acquire_ctx); | ||
| 300 | return ret; | 301 | return ret; |
| 301 | } | 302 | } |
| 302 | } | 303 | } |
| @@ -419,6 +420,33 @@ void v3d_exec_put(struct v3d_exec_info *exec) | |||
| 419 | kref_put(&exec->refcount, v3d_exec_cleanup); | 420 | kref_put(&exec->refcount, v3d_exec_cleanup); |
| 420 | } | 421 | } |
| 421 | 422 | ||
| 423 | static void | ||
| 424 | v3d_tfu_job_cleanup(struct kref *ref) | ||
| 425 | { | ||
| 426 | struct v3d_tfu_job *job = container_of(ref, struct v3d_tfu_job, | ||
| 427 | refcount); | ||
| 428 | struct v3d_dev *v3d = job->v3d; | ||
| 429 | unsigned int i; | ||
| 430 | |||
| 431 | dma_fence_put(job->in_fence); | ||
| 432 | dma_fence_put(job->done_fence); | ||
| 433 | |||
| 434 | for (i = 0; i < ARRAY_SIZE(job->bo); i++) { | ||
| 435 | if (job->bo[i]) | ||
| 436 | drm_gem_object_put_unlocked(&job->bo[i]->base); | ||
| 437 | } | ||
| 438 | |||
| 439 | pm_runtime_mark_last_busy(v3d->dev); | ||
| 440 | pm_runtime_put_autosuspend(v3d->dev); | ||
| 441 | |||
| 442 | kfree(job); | ||
| 443 | } | ||
| 444 | |||
| 445 | void v3d_tfu_job_put(struct v3d_tfu_job *job) | ||
| 446 | { | ||
| 447 | kref_put(&job->refcount, v3d_tfu_job_cleanup); | ||
| 448 | } | ||
| 449 | |||
| 422 | int | 450 | int |
| 423 | v3d_wait_bo_ioctl(struct drm_device *dev, void *data, | 451 | v3d_wait_bo_ioctl(struct drm_device *dev, void *data, |
| 424 | struct drm_file *file_priv) | 452 | struct drm_file *file_priv) |
| @@ -493,6 +521,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, | |||
| 493 | struct drm_syncobj *sync_out; | 521 | struct drm_syncobj *sync_out; |
| 494 | int ret = 0; | 522 | int ret = 0; |
| 495 | 523 | ||
| 524 | trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end); | ||
| 525 | |||
| 496 | if (args->pad != 0) { | 526 | if (args->pad != 0) { |
| 497 | DRM_INFO("pad must be zero: %d\n", args->pad); | 527 | DRM_INFO("pad must be zero: %d\n", args->pad); |
| 498 | return -EINVAL; | 528 | return -EINVAL; |
| @@ -536,7 +566,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, | |||
| 536 | if (ret) | 566 | if (ret) |
| 537 | goto fail; | 567 | goto fail; |
| 538 | 568 | ||
| 539 | ret = v3d_lock_bo_reservations(dev, exec, &acquire_ctx); | 569 | ret = v3d_lock_bo_reservations(exec->bo, exec->bo_count, |
| 570 | &acquire_ctx); | ||
| 540 | if (ret) | 571 | if (ret) |
| 541 | goto fail; | 572 | goto fail; |
| 542 | 573 | ||
| @@ -570,15 +601,15 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, | |||
| 570 | &v3d_priv->sched_entity[V3D_RENDER]); | 601 | &v3d_priv->sched_entity[V3D_RENDER]); |
| 571 | mutex_unlock(&v3d->sched_lock); | 602 | mutex_unlock(&v3d->sched_lock); |
| 572 | 603 | ||
| 573 | v3d_attach_object_fences(exec); | 604 | v3d_attach_object_fences(exec->bo, exec->bo_count, |
| 605 | exec->render_done_fence); | ||
| 574 | 606 | ||
| 575 | v3d_unlock_bo_reservations(dev, exec, &acquire_ctx); | 607 | v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx); |
| 576 | 608 | ||
| 577 | /* Update the return sync object for the */ | 609 | /* Update the return sync object for the */ |
| 578 | sync_out = drm_syncobj_find(file_priv, args->out_sync); | 610 | sync_out = drm_syncobj_find(file_priv, args->out_sync); |
| 579 | if (sync_out) { | 611 | if (sync_out) { |
| 580 | drm_syncobj_replace_fence(sync_out, 0, | 612 | drm_syncobj_replace_fence(sync_out, exec->render_done_fence); |
| 581 | exec->render_done_fence); | ||
| 582 | drm_syncobj_put(sync_out); | 613 | drm_syncobj_put(sync_out); |
| 583 | } | 614 | } |
| 584 | 615 | ||
| @@ -588,13 +619,121 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, | |||
| 588 | 619 | ||
| 589 | fail_unreserve: | 620 | fail_unreserve: |
| 590 | mutex_unlock(&v3d->sched_lock); | 621 | mutex_unlock(&v3d->sched_lock); |
| 591 | v3d_unlock_bo_reservations(dev, exec, &acquire_ctx); | 622 | v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx); |
| 592 | fail: | 623 | fail: |
| 593 | v3d_exec_put(exec); | 624 | v3d_exec_put(exec); |
| 594 | 625 | ||
| 595 | return ret; | 626 | return ret; |
| 596 | } | 627 | } |
| 597 | 628 | ||
| 629 | /** | ||
| 630 | * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D. | ||
| 631 | * @dev: DRM device | ||
| 632 | * @data: ioctl argument | ||
| 633 | * @file_priv: DRM file for this fd | ||
| 634 | * | ||
| 635 | * Userspace provides the register setup for the TFU, which we don't | ||
| 636 | * need to validate since the TFU is behind the MMU. | ||
| 637 | */ | ||
| 638 | int | ||
| 639 | v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, | ||
| 640 | struct drm_file *file_priv) | ||
| 641 | { | ||
| 642 | struct v3d_dev *v3d = to_v3d_dev(dev); | ||
| 643 | struct v3d_file_priv *v3d_priv = file_priv->driver_priv; | ||
| 644 | struct drm_v3d_submit_tfu *args = data; | ||
| 645 | struct v3d_tfu_job *job; | ||
| 646 | struct ww_acquire_ctx acquire_ctx; | ||
| 647 | struct drm_syncobj *sync_out; | ||
| 648 | struct dma_fence *sched_done_fence; | ||
| 649 | int ret = 0; | ||
| 650 | int bo_count; | ||
| 651 | |||
| 652 | trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia); | ||
| 653 | |||
| 654 | job = kcalloc(1, sizeof(*job), GFP_KERNEL); | ||
| 655 | if (!job) | ||
| 656 | return -ENOMEM; | ||
| 657 | |||
| 658 | ret = pm_runtime_get_sync(v3d->dev); | ||
| 659 | if (ret < 0) { | ||
| 660 | kfree(job); | ||
| 661 | return ret; | ||
| 662 | } | ||
| 663 | |||
| 664 | kref_init(&job->refcount); | ||
| 665 | |||
| 666 | ret = drm_syncobj_find_fence(file_priv, args->in_sync, | ||
| 667 | 0, 0, &job->in_fence); | ||
| 668 | if (ret == -EINVAL) | ||
| 669 | goto fail; | ||
| 670 | |||
| 671 | job->args = *args; | ||
| 672 | job->v3d = v3d; | ||
| 673 | |||
| 674 | spin_lock(&file_priv->table_lock); | ||
| 675 | for (bo_count = 0; bo_count < ARRAY_SIZE(job->bo); bo_count++) { | ||
| 676 | struct drm_gem_object *bo; | ||
| 677 | |||
| 678 | if (!args->bo_handles[bo_count]) | ||
| 679 | break; | ||
| 680 | |||
| 681 | bo = idr_find(&file_priv->object_idr, | ||
| 682 | args->bo_handles[bo_count]); | ||
| 683 | if (!bo) { | ||
| 684 | DRM_DEBUG("Failed to look up GEM BO %d: %d\n", | ||
| 685 | bo_count, args->bo_handles[bo_count]); | ||
| 686 | ret = -ENOENT; | ||
| 687 | spin_unlock(&file_priv->table_lock); | ||
| 688 | goto fail; | ||
| 689 | } | ||
| 690 | drm_gem_object_get(bo); | ||
| 691 | job->bo[bo_count] = to_v3d_bo(bo); | ||
| 692 | } | ||
| 693 | spin_unlock(&file_priv->table_lock); | ||
| 694 | |||
| 695 | ret = v3d_lock_bo_reservations(job->bo, bo_count, &acquire_ctx); | ||
| 696 | if (ret) | ||
| 697 | goto fail; | ||
| 698 | |||
| 699 | mutex_lock(&v3d->sched_lock); | ||
| 700 | ret = drm_sched_job_init(&job->base, | ||
| 701 | &v3d_priv->sched_entity[V3D_TFU], | ||
| 702 | v3d_priv); | ||
| 703 | if (ret) | ||
| 704 | goto fail_unreserve; | ||
| 705 | |||
| 706 | sched_done_fence = dma_fence_get(&job->base.s_fence->finished); | ||
| 707 | |||
| 708 | kref_get(&job->refcount); /* put by scheduler job completion */ | ||
| 709 | drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[V3D_TFU]); | ||
| 710 | mutex_unlock(&v3d->sched_lock); | ||
| 711 | |||
| 712 | v3d_attach_object_fences(job->bo, bo_count, sched_done_fence); | ||
| 713 | |||
| 714 | v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx); | ||
| 715 | |||
| 716 | /* Update the return sync object */ | ||
| 717 | sync_out = drm_syncobj_find(file_priv, args->out_sync); | ||
| 718 | if (sync_out) { | ||
| 719 | drm_syncobj_replace_fence(sync_out, sched_done_fence); | ||
| 720 | drm_syncobj_put(sync_out); | ||
| 721 | } | ||
| 722 | dma_fence_put(sched_done_fence); | ||
| 723 | |||
| 724 | v3d_tfu_job_put(job); | ||
| 725 | |||
| 726 | return 0; | ||
| 727 | |||
| 728 | fail_unreserve: | ||
| 729 | mutex_unlock(&v3d->sched_lock); | ||
| 730 | v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx); | ||
| 731 | fail: | ||
| 732 | v3d_tfu_job_put(job); | ||
| 733 | |||
| 734 | return ret; | ||
| 735 | } | ||
| 736 | |||
| 598 | int | 737 | int |
| 599 | v3d_gem_init(struct drm_device *dev) | 738 | v3d_gem_init(struct drm_device *dev) |
| 600 | { | 739 | { |
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index e07514eb11b5..69338da70ddc 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c | |||
| @@ -4,8 +4,8 @@ | |||
| 4 | /** | 4 | /** |
| 5 | * DOC: Interrupt management for the V3D engine | 5 | * DOC: Interrupt management for the V3D engine |
| 6 | * | 6 | * |
| 7 | * When we take a binning or rendering flush done interrupt, we need | 7 | * When we take a bin, render, or TFU done interrupt, we need to |
| 8 | * to signal the fence for that job so that the scheduler can queue up | 8 | * signal the fence for that job so that the scheduler can queue up |
| 9 | * the next one and unblock any waiters. | 9 | * the next one and unblock any waiters. |
| 10 | * | 10 | * |
| 11 | * When we take the binner out of memory interrupt, we need to | 11 | * When we take the binner out of memory interrupt, we need to |
| @@ -15,6 +15,7 @@ | |||
| 15 | 15 | ||
| 16 | #include "v3d_drv.h" | 16 | #include "v3d_drv.h" |
| 17 | #include "v3d_regs.h" | 17 | #include "v3d_regs.h" |
| 18 | #include "v3d_trace.h" | ||
| 18 | 19 | ||
| 19 | #define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \ | 20 | #define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \ |
| 20 | V3D_INT_FLDONE | \ | 21 | V3D_INT_FLDONE | \ |
| @@ -23,7 +24,8 @@ | |||
| 23 | 24 | ||
| 24 | #define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \ | 25 | #define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \ |
| 25 | V3D_HUB_INT_MMU_PTI | \ | 26 | V3D_HUB_INT_MMU_PTI | \ |
| 26 | V3D_HUB_INT_MMU_CAP)) | 27 | V3D_HUB_INT_MMU_CAP | \ |
| 28 | V3D_HUB_INT_TFUC)) | ||
| 27 | 29 | ||
| 28 | static void | 30 | static void |
| 29 | v3d_overflow_mem_work(struct work_struct *work) | 31 | v3d_overflow_mem_work(struct work_struct *work) |
| @@ -87,12 +89,20 @@ v3d_irq(int irq, void *arg) | |||
| 87 | } | 89 | } |
| 88 | 90 | ||
| 89 | if (intsts & V3D_INT_FLDONE) { | 91 | if (intsts & V3D_INT_FLDONE) { |
| 90 | dma_fence_signal(v3d->bin_job->bin.done_fence); | 92 | struct v3d_fence *fence = |
| 93 | to_v3d_fence(v3d->bin_job->bin.done_fence); | ||
| 94 | |||
| 95 | trace_v3d_bcl_irq(&v3d->drm, fence->seqno); | ||
| 96 | dma_fence_signal(&fence->base); | ||
| 91 | status = IRQ_HANDLED; | 97 | status = IRQ_HANDLED; |
| 92 | } | 98 | } |
| 93 | 99 | ||
| 94 | if (intsts & V3D_INT_FRDONE) { | 100 | if (intsts & V3D_INT_FRDONE) { |
| 95 | dma_fence_signal(v3d->render_job->render.done_fence); | 101 | struct v3d_fence *fence = |
| 102 | to_v3d_fence(v3d->render_job->render.done_fence); | ||
| 103 | |||
| 104 | trace_v3d_rcl_irq(&v3d->drm, fence->seqno); | ||
| 105 | dma_fence_signal(&fence->base); | ||
| 96 | status = IRQ_HANDLED; | 106 | status = IRQ_HANDLED; |
| 97 | } | 107 | } |
| 98 | 108 | ||
| @@ -117,6 +127,15 @@ v3d_hub_irq(int irq, void *arg) | |||
| 117 | /* Acknowledge the interrupts we're handling here. */ | 127 | /* Acknowledge the interrupts we're handling here. */ |
| 118 | V3D_WRITE(V3D_HUB_INT_CLR, intsts); | 128 | V3D_WRITE(V3D_HUB_INT_CLR, intsts); |
| 119 | 129 | ||
| 130 | if (intsts & V3D_HUB_INT_TFUC) { | ||
| 131 | struct v3d_fence *fence = | ||
| 132 | to_v3d_fence(v3d->tfu_job->done_fence); | ||
| 133 | |||
| 134 | trace_v3d_tfu_irq(&v3d->drm, fence->seqno); | ||
| 135 | dma_fence_signal(&fence->base); | ||
| 136 | status = IRQ_HANDLED; | ||
| 137 | } | ||
| 138 | |||
| 120 | if (intsts & (V3D_HUB_INT_MMU_WRV | | 139 | if (intsts & (V3D_HUB_INT_MMU_WRV | |
| 121 | V3D_HUB_INT_MMU_PTI | | 140 | V3D_HUB_INT_MMU_PTI | |
| 122 | V3D_HUB_INT_MMU_CAP)) { | 141 | V3D_HUB_INT_MMU_CAP)) { |
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h index c3a5e4e44f73..6ccdee9d47bd 100644 --- a/drivers/gpu/drm/v3d/v3d_regs.h +++ b/drivers/gpu/drm/v3d/v3d_regs.h | |||
| @@ -86,6 +86,55 @@ | |||
| 86 | # define V3D_TOP_GR_BRIDGE_SW_INIT_1 0x0000c | 86 | # define V3D_TOP_GR_BRIDGE_SW_INIT_1 0x0000c |
| 87 | # define V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT BIT(0) | 87 | # define V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT BIT(0) |
| 88 | 88 | ||
| 89 | #define V3D_TFU_CS 0x00400 | ||
| 90 | /* Stops current job, empties input fifo. */ | ||
| 91 | # define V3D_TFU_CS_TFURST BIT(31) | ||
| 92 | # define V3D_TFU_CS_CVTCT_MASK V3D_MASK(23, 16) | ||
| 93 | # define V3D_TFU_CS_CVTCT_SHIFT 16 | ||
| 94 | # define V3D_TFU_CS_NFREE_MASK V3D_MASK(13, 8) | ||
| 95 | # define V3D_TFU_CS_NFREE_SHIFT 8 | ||
| 96 | # define V3D_TFU_CS_BUSY BIT(0) | ||
| 97 | |||
| 98 | #define V3D_TFU_SU 0x00404 | ||
| 99 | /* Interrupt when FINTTHR input slots are free (0 = disabled) */ | ||
| 100 | # define V3D_TFU_SU_FINTTHR_MASK V3D_MASK(13, 8) | ||
| 101 | # define V3D_TFU_SU_FINTTHR_SHIFT 8 | ||
| 102 | /* Skips resetting the CRC at the start of CRC generation. */ | ||
| 103 | # define V3D_TFU_SU_CRCCHAIN BIT(4) | ||
| 104 | /* skips writes, computes CRC of the image. miplevels must be 0. */ | ||
| 105 | # define V3D_TFU_SU_CRC BIT(3) | ||
| 106 | # define V3D_TFU_SU_THROTTLE_MASK V3D_MASK(1, 0) | ||
| 107 | # define V3D_TFU_SU_THROTTLE_SHIFT 0 | ||
| 108 | |||
| 109 | #define V3D_TFU_ICFG 0x00408 | ||
| 110 | /* Interrupt when the conversion is complete. */ | ||
| 111 | # define V3D_TFU_ICFG_IOC BIT(0) | ||
| 112 | |||
| 113 | /* Input Image Address */ | ||
| 114 | #define V3D_TFU_IIA 0x0040c | ||
| 115 | /* Input Chroma Address */ | ||
| 116 | #define V3D_TFU_ICA 0x00410 | ||
| 117 | /* Input Image Stride */ | ||
| 118 | #define V3D_TFU_IIS 0x00414 | ||
| 119 | /* Input Image U-Plane Address */ | ||
| 120 | #define V3D_TFU_IUA 0x00418 | ||
| 121 | /* Output Image Address */ | ||
| 122 | #define V3D_TFU_IOA 0x0041c | ||
| 123 | /* Image Output Size */ | ||
| 124 | #define V3D_TFU_IOS 0x00420 | ||
| 125 | /* TFU YUV Coefficient 0 */ | ||
| 126 | #define V3D_TFU_COEF0 0x00424 | ||
| 127 | /* Use these regs instead of the defaults. */ | ||
| 128 | # define V3D_TFU_COEF0_USECOEF BIT(31) | ||
| 129 | /* TFU YUV Coefficient 1 */ | ||
| 130 | #define V3D_TFU_COEF1 0x00428 | ||
| 131 | /* TFU YUV Coefficient 2 */ | ||
| 132 | #define V3D_TFU_COEF2 0x0042c | ||
| 133 | /* TFU YUV Coefficient 3 */ | ||
| 134 | #define V3D_TFU_COEF3 0x00430 | ||
| 135 | |||
| 136 | #define V3D_TFU_CRC 0x00434 | ||
| 137 | |||
| 89 | /* Per-MMU registers. */ | 138 | /* Per-MMU registers. */ |
| 90 | 139 | ||
| 91 | #define V3D_MMUC_CONTROL 0x01000 | 140 | #define V3D_MMUC_CONTROL 0x01000 |
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index c66d0ce21435..f7508e907536 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c | |||
| @@ -30,6 +30,12 @@ to_v3d_job(struct drm_sched_job *sched_job) | |||
| 30 | return container_of(sched_job, struct v3d_job, base); | 30 | return container_of(sched_job, struct v3d_job, base); |
| 31 | } | 31 | } |
| 32 | 32 | ||
| 33 | static struct v3d_tfu_job * | ||
| 34 | to_tfu_job(struct drm_sched_job *sched_job) | ||
| 35 | { | ||
| 36 | return container_of(sched_job, struct v3d_tfu_job, base); | ||
| 37 | } | ||
| 38 | |||
| 33 | static void | 39 | static void |
| 34 | v3d_job_free(struct drm_sched_job *sched_job) | 40 | v3d_job_free(struct drm_sched_job *sched_job) |
| 35 | { | 41 | { |
| @@ -40,6 +46,16 @@ v3d_job_free(struct drm_sched_job *sched_job) | |||
| 40 | v3d_exec_put(job->exec); | 46 | v3d_exec_put(job->exec); |
| 41 | } | 47 | } |
| 42 | 48 | ||
| 49 | static void | ||
| 50 | v3d_tfu_job_free(struct drm_sched_job *sched_job) | ||
| 51 | { | ||
| 52 | struct v3d_tfu_job *job = to_tfu_job(sched_job); | ||
| 53 | |||
| 54 | drm_sched_job_cleanup(sched_job); | ||
| 55 | |||
| 56 | v3d_tfu_job_put(job); | ||
| 57 | } | ||
| 58 | |||
| 43 | /** | 59 | /** |
| 44 | * Returns the fences that the bin or render job depends on, one by one. | 60 | * Returns the fences that the bin or render job depends on, one by one. |
| 45 | * v3d_job_run() won't be called until all of them have been signaled. | 61 | * v3d_job_run() won't be called until all of them have been signaled. |
| @@ -78,6 +94,27 @@ v3d_job_dependency(struct drm_sched_job *sched_job, | |||
| 78 | return fence; | 94 | return fence; |
| 79 | } | 95 | } |
| 80 | 96 | ||
| 97 | /** | ||
| 98 | * Returns the fences that the TFU job depends on, one by one. | ||
| 99 | * v3d_tfu_job_run() won't be called until all of them have been | ||
| 100 | * signaled. | ||
| 101 | */ | ||
| 102 | static struct dma_fence * | ||
| 103 | v3d_tfu_job_dependency(struct drm_sched_job *sched_job, | ||
| 104 | struct drm_sched_entity *s_entity) | ||
| 105 | { | ||
| 106 | struct v3d_tfu_job *job = to_tfu_job(sched_job); | ||
| 107 | struct dma_fence *fence; | ||
| 108 | |||
| 109 | fence = job->in_fence; | ||
| 110 | if (fence) { | ||
| 111 | job->in_fence = NULL; | ||
| 112 | return fence; | ||
| 113 | } | ||
| 114 | |||
| 115 | return NULL; | ||
| 116 | } | ||
| 117 | |||
| 81 | static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job) | 118 | static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job) |
| 82 | { | 119 | { |
| 83 | struct v3d_job *job = to_v3d_job(sched_job); | 120 | struct v3d_job *job = to_v3d_job(sched_job); |
| @@ -149,28 +186,47 @@ static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job) | |||
| 149 | return fence; | 186 | return fence; |
| 150 | } | 187 | } |
| 151 | 188 | ||
| 152 | static void | 189 | static struct dma_fence * |
| 153 | v3d_job_timedout(struct drm_sched_job *sched_job) | 190 | v3d_tfu_job_run(struct drm_sched_job *sched_job) |
| 154 | { | 191 | { |
| 155 | struct v3d_job *job = to_v3d_job(sched_job); | 192 | struct v3d_tfu_job *job = to_tfu_job(sched_job); |
| 156 | struct v3d_exec_info *exec = job->exec; | 193 | struct v3d_dev *v3d = job->v3d; |
| 157 | struct v3d_dev *v3d = exec->v3d; | 194 | struct drm_device *dev = &v3d->drm; |
| 158 | enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER; | 195 | struct dma_fence *fence; |
| 159 | enum v3d_queue q; | ||
| 160 | u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q)); | ||
| 161 | u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q)); | ||
| 162 | 196 | ||
| 163 | /* If the current address or return address have changed, then | 197 | fence = v3d_fence_create(v3d, V3D_TFU); |
| 164 | * the GPU has probably made progress and we should delay the | 198 | if (IS_ERR(fence)) |
| 165 | * reset. This could fail if the GPU got in an infinite loop | 199 | return NULL; |
| 166 | * in the CL, but that is pretty unlikely outside of an i-g-t | 200 | |
| 167 | * testcase. | 201 | v3d->tfu_job = job; |
| 168 | */ | 202 | if (job->done_fence) |
| 169 | if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) { | 203 | dma_fence_put(job->done_fence); |
| 170 | job->timedout_ctca = ctca; | 204 | job->done_fence = dma_fence_get(fence); |
| 171 | job->timedout_ctra = ctra; | 205 | |
| 172 | return; | 206 | trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno); |
| 207 | |||
| 208 | V3D_WRITE(V3D_TFU_IIA, job->args.iia); | ||
| 209 | V3D_WRITE(V3D_TFU_IIS, job->args.iis); | ||
| 210 | V3D_WRITE(V3D_TFU_ICA, job->args.ica); | ||
| 211 | V3D_WRITE(V3D_TFU_IUA, job->args.iua); | ||
| 212 | V3D_WRITE(V3D_TFU_IOA, job->args.ioa); | ||
| 213 | V3D_WRITE(V3D_TFU_IOS, job->args.ios); | ||
| 214 | V3D_WRITE(V3D_TFU_COEF0, job->args.coef[0]); | ||
| 215 | if (job->args.coef[0] & V3D_TFU_COEF0_USECOEF) { | ||
| 216 | V3D_WRITE(V3D_TFU_COEF1, job->args.coef[1]); | ||
| 217 | V3D_WRITE(V3D_TFU_COEF2, job->args.coef[2]); | ||
| 218 | V3D_WRITE(V3D_TFU_COEF3, job->args.coef[3]); | ||
| 173 | } | 219 | } |
| 220 | /* ICFG kicks off the job. */ | ||
| 221 | V3D_WRITE(V3D_TFU_ICFG, job->args.icfg | V3D_TFU_ICFG_IOC); | ||
| 222 | |||
| 223 | return fence; | ||
| 224 | } | ||
| 225 | |||
| 226 | static void | ||
| 227 | v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) | ||
| 228 | { | ||
| 229 | enum v3d_queue q; | ||
| 174 | 230 | ||
| 175 | mutex_lock(&v3d->reset_lock); | 231 | mutex_lock(&v3d->reset_lock); |
| 176 | 232 | ||
| @@ -195,6 +251,39 @@ v3d_job_timedout(struct drm_sched_job *sched_job) | |||
| 195 | mutex_unlock(&v3d->reset_lock); | 251 | mutex_unlock(&v3d->reset_lock); |
| 196 | } | 252 | } |
| 197 | 253 | ||
| 254 | static void | ||
| 255 | v3d_job_timedout(struct drm_sched_job *sched_job) | ||
| 256 | { | ||
| 257 | struct v3d_job *job = to_v3d_job(sched_job); | ||
| 258 | struct v3d_exec_info *exec = job->exec; | ||
| 259 | struct v3d_dev *v3d = exec->v3d; | ||
| 260 | enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER; | ||
| 261 | u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q)); | ||
| 262 | u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q)); | ||
| 263 | |||
| 264 | /* If the current address or return address have changed, then | ||
| 265 | * the GPU has probably made progress and we should delay the | ||
| 266 | * reset. This could fail if the GPU got in an infinite loop | ||
| 267 | * in the CL, but that is pretty unlikely outside of an i-g-t | ||
| 268 | * testcase. | ||
| 269 | */ | ||
| 270 | if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) { | ||
| 271 | job->timedout_ctca = ctca; | ||
| 272 | job->timedout_ctra = ctra; | ||
| 273 | return; | ||
| 274 | } | ||
| 275 | |||
| 276 | v3d_gpu_reset_for_timeout(v3d, sched_job); | ||
| 277 | } | ||
| 278 | |||
| 279 | static void | ||
| 280 | v3d_tfu_job_timedout(struct drm_sched_job *sched_job) | ||
| 281 | { | ||
| 282 | struct v3d_tfu_job *job = to_tfu_job(sched_job); | ||
| 283 | |||
| 284 | v3d_gpu_reset_for_timeout(job->v3d, sched_job); | ||
| 285 | } | ||
| 286 | |||
| 198 | static const struct drm_sched_backend_ops v3d_sched_ops = { | 287 | static const struct drm_sched_backend_ops v3d_sched_ops = { |
| 199 | .dependency = v3d_job_dependency, | 288 | .dependency = v3d_job_dependency, |
| 200 | .run_job = v3d_job_run, | 289 | .run_job = v3d_job_run, |
| @@ -202,6 +291,13 @@ static const struct drm_sched_backend_ops v3d_sched_ops = { | |||
| 202 | .free_job = v3d_job_free | 291 | .free_job = v3d_job_free |
| 203 | }; | 292 | }; |
| 204 | 293 | ||
| 294 | static const struct drm_sched_backend_ops v3d_tfu_sched_ops = { | ||
| 295 | .dependency = v3d_tfu_job_dependency, | ||
| 296 | .run_job = v3d_tfu_job_run, | ||
| 297 | .timedout_job = v3d_tfu_job_timedout, | ||
| 298 | .free_job = v3d_tfu_job_free | ||
| 299 | }; | ||
| 300 | |||
| 205 | int | 301 | int |
| 206 | v3d_sched_init(struct v3d_dev *v3d) | 302 | v3d_sched_init(struct v3d_dev *v3d) |
| 207 | { | 303 | { |
| @@ -232,6 +328,19 @@ v3d_sched_init(struct v3d_dev *v3d) | |||
| 232 | return ret; | 328 | return ret; |
| 233 | } | 329 | } |
| 234 | 330 | ||
| 331 | ret = drm_sched_init(&v3d->queue[V3D_TFU].sched, | ||
| 332 | &v3d_tfu_sched_ops, | ||
| 333 | hw_jobs_limit, job_hang_limit, | ||
| 334 | msecs_to_jiffies(hang_limit_ms), | ||
| 335 | "v3d_tfu"); | ||
| 336 | if (ret) { | ||
| 337 | dev_err(v3d->dev, "Failed to create TFU scheduler: %d.", | ||
| 338 | ret); | ||
| 339 | drm_sched_fini(&v3d->queue[V3D_RENDER].sched); | ||
| 340 | drm_sched_fini(&v3d->queue[V3D_BIN].sched); | ||
| 341 | return ret; | ||
| 342 | } | ||
| 343 | |||
| 235 | return 0; | 344 | return 0; |
| 236 | } | 345 | } |
| 237 | 346 | ||
diff --git a/drivers/gpu/drm/v3d/v3d_trace.h b/drivers/gpu/drm/v3d/v3d_trace.h index 85dd351e1e09..edd984afa33f 100644 --- a/drivers/gpu/drm/v3d/v3d_trace.h +++ b/drivers/gpu/drm/v3d/v3d_trace.h | |||
| @@ -12,6 +12,28 @@ | |||
| 12 | #define TRACE_SYSTEM v3d | 12 | #define TRACE_SYSTEM v3d |
| 13 | #define TRACE_INCLUDE_FILE v3d_trace | 13 | #define TRACE_INCLUDE_FILE v3d_trace |
| 14 | 14 | ||
| 15 | TRACE_EVENT(v3d_submit_cl_ioctl, | ||
| 16 | TP_PROTO(struct drm_device *dev, u32 ct1qba, u32 ct1qea), | ||
| 17 | TP_ARGS(dev, ct1qba, ct1qea), | ||
| 18 | |||
| 19 | TP_STRUCT__entry( | ||
| 20 | __field(u32, dev) | ||
| 21 | __field(u32, ct1qba) | ||
| 22 | __field(u32, ct1qea) | ||
| 23 | ), | ||
| 24 | |||
| 25 | TP_fast_assign( | ||
| 26 | __entry->dev = dev->primary->index; | ||
| 27 | __entry->ct1qba = ct1qba; | ||
| 28 | __entry->ct1qea = ct1qea; | ||
| 29 | ), | ||
| 30 | |||
| 31 | TP_printk("dev=%u, RCL 0x%08x..0x%08x", | ||
| 32 | __entry->dev, | ||
| 33 | __entry->ct1qba, | ||
| 34 | __entry->ct1qea) | ||
| 35 | ); | ||
| 36 | |||
| 15 | TRACE_EVENT(v3d_submit_cl, | 37 | TRACE_EVENT(v3d_submit_cl, |
| 16 | TP_PROTO(struct drm_device *dev, bool is_render, | 38 | TP_PROTO(struct drm_device *dev, bool is_render, |
| 17 | uint64_t seqno, | 39 | uint64_t seqno, |
| @@ -42,6 +64,105 @@ TRACE_EVENT(v3d_submit_cl, | |||
| 42 | __entry->ctnqea) | 64 | __entry->ctnqea) |
| 43 | ); | 65 | ); |
| 44 | 66 | ||
| 67 | TRACE_EVENT(v3d_bcl_irq, | ||
| 68 | TP_PROTO(struct drm_device *dev, | ||
| 69 | uint64_t seqno), | ||
| 70 | TP_ARGS(dev, seqno), | ||
| 71 | |||
| 72 | TP_STRUCT__entry( | ||
| 73 | __field(u32, dev) | ||
| 74 | __field(u64, seqno) | ||
| 75 | ), | ||
| 76 | |||
| 77 | TP_fast_assign( | ||
| 78 | __entry->dev = dev->primary->index; | ||
| 79 | __entry->seqno = seqno; | ||
| 80 | ), | ||
| 81 | |||
| 82 | TP_printk("dev=%u, seqno=%llu", | ||
| 83 | __entry->dev, | ||
| 84 | __entry->seqno) | ||
| 85 | ); | ||
| 86 | |||
| 87 | TRACE_EVENT(v3d_rcl_irq, | ||
| 88 | TP_PROTO(struct drm_device *dev, | ||
| 89 | uint64_t seqno), | ||
| 90 | TP_ARGS(dev, seqno), | ||
| 91 | |||
| 92 | TP_STRUCT__entry( | ||
| 93 | __field(u32, dev) | ||
| 94 | __field(u64, seqno) | ||
| 95 | ), | ||
| 96 | |||
| 97 | TP_fast_assign( | ||
| 98 | __entry->dev = dev->primary->index; | ||
| 99 | __entry->seqno = seqno; | ||
| 100 | ), | ||
| 101 | |||
| 102 | TP_printk("dev=%u, seqno=%llu", | ||
| 103 | __entry->dev, | ||
| 104 | __entry->seqno) | ||
| 105 | ); | ||
| 106 | |||
| 107 | TRACE_EVENT(v3d_tfu_irq, | ||
| 108 | TP_PROTO(struct drm_device *dev, | ||
| 109 | uint64_t seqno), | ||
| 110 | TP_ARGS(dev, seqno), | ||
| 111 | |||
| 112 | TP_STRUCT__entry( | ||
| 113 | __field(u32, dev) | ||
| 114 | __field(u64, seqno) | ||
| 115 | ), | ||
| 116 | |||
| 117 | TP_fast_assign( | ||
| 118 | __entry->dev = dev->primary->index; | ||
| 119 | __entry->seqno = seqno; | ||
| 120 | ), | ||
| 121 | |||
| 122 | TP_printk("dev=%u, seqno=%llu", | ||
| 123 | __entry->dev, | ||
| 124 | __entry->seqno) | ||
| 125 | ); | ||
| 126 | |||
| 127 | TRACE_EVENT(v3d_submit_tfu_ioctl, | ||
| 128 | TP_PROTO(struct drm_device *dev, u32 iia), | ||
| 129 | TP_ARGS(dev, iia), | ||
| 130 | |||
| 131 | TP_STRUCT__entry( | ||
| 132 | __field(u32, dev) | ||
| 133 | __field(u32, iia) | ||
| 134 | ), | ||
| 135 | |||
| 136 | TP_fast_assign( | ||
| 137 | __entry->dev = dev->primary->index; | ||
| 138 | __entry->iia = iia; | ||
| 139 | ), | ||
| 140 | |||
| 141 | TP_printk("dev=%u, IIA 0x%08x", | ||
| 142 | __entry->dev, | ||
| 143 | __entry->iia) | ||
| 144 | ); | ||
| 145 | |||
| 146 | TRACE_EVENT(v3d_submit_tfu, | ||
| 147 | TP_PROTO(struct drm_device *dev, | ||
| 148 | uint64_t seqno), | ||
| 149 | TP_ARGS(dev, seqno), | ||
| 150 | |||
| 151 | TP_STRUCT__entry( | ||
| 152 | __field(u32, dev) | ||
| 153 | __field(u64, seqno) | ||
| 154 | ), | ||
| 155 | |||
| 156 | TP_fast_assign( | ||
| 157 | __entry->dev = dev->primary->index; | ||
| 158 | __entry->seqno = seqno; | ||
| 159 | ), | ||
| 160 | |||
| 161 | TP_printk("dev=%u, seqno=%llu", | ||
| 162 | __entry->dev, | ||
| 163 | __entry->seqno) | ||
| 164 | ); | ||
| 165 | |||
| 45 | TRACE_EVENT(v3d_reset_begin, | 166 | TRACE_EVENT(v3d_reset_begin, |
| 46 | TP_PROTO(struct drm_device *dev), | 167 | TP_PROTO(struct drm_device *dev), |
| 47 | TP_ARGS(dev), | 168 | TP_ARGS(dev), |
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index bd6ef1f31822..4f87b03f837d 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h | |||
| @@ -338,6 +338,7 @@ struct vc4_plane_state { | |||
| 338 | u32 pos0_offset; | 338 | u32 pos0_offset; |
| 339 | u32 pos2_offset; | 339 | u32 pos2_offset; |
| 340 | u32 ptr0_offset; | 340 | u32 ptr0_offset; |
| 341 | u32 lbm_offset; | ||
| 341 | 342 | ||
| 342 | /* Offset where the plane's dlist was last stored in the | 343 | /* Offset where the plane's dlist was last stored in the |
| 343 | * hardware at vc4_crtc_atomic_flush() time. | 344 | * hardware at vc4_crtc_atomic_flush() time. |
| @@ -369,6 +370,11 @@ struct vc4_plane_state { | |||
| 369 | * to enable background color fill. | 370 | * to enable background color fill. |
| 370 | */ | 371 | */ |
| 371 | bool needs_bg_fill; | 372 | bool needs_bg_fill; |
| 373 | |||
| 374 | /* Mark the dlist as initialized. Useful to avoid initializing it twice | ||
| 375 | * when async update is not possible. | ||
| 376 | */ | ||
| 377 | bool dlist_initialized; | ||
| 372 | }; | 378 | }; |
| 373 | 379 | ||
| 374 | static inline struct vc4_plane_state * | 380 | static inline struct vc4_plane_state * |
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 41881ce4132d..aea2b8dfec17 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c | |||
| @@ -681,7 +681,7 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec, | |||
| 681 | exec->fence = &fence->base; | 681 | exec->fence = &fence->base; |
| 682 | 682 | ||
| 683 | if (out_sync) | 683 | if (out_sync) |
| 684 | drm_syncobj_replace_fence(out_sync, 0, exec->fence); | 684 | drm_syncobj_replace_fence(out_sync, exec->fence); |
| 685 | 685 | ||
| 686 | vc4_update_bo_seqnos(exec, seqno); | 686 | vc4_update_bo_seqnos(exec, seqno); |
| 687 | 687 | ||
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index c3ded0ba0441..75db62cbe468 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c | |||
| @@ -154,6 +154,7 @@ static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane | |||
| 154 | return NULL; | 154 | return NULL; |
| 155 | 155 | ||
| 156 | memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm)); | 156 | memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm)); |
| 157 | vc4_state->dlist_initialized = 0; | ||
| 157 | 158 | ||
| 158 | __drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base); | 159 | __drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base); |
| 159 | 160 | ||
| @@ -259,14 +260,12 @@ static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane) | |||
| 259 | 260 | ||
| 260 | static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) | 261 | static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) |
| 261 | { | 262 | { |
| 262 | struct drm_plane *plane = state->plane; | ||
| 263 | struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); | 263 | struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); |
| 264 | struct drm_framebuffer *fb = state->fb; | 264 | struct drm_framebuffer *fb = state->fb; |
| 265 | struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0); | 265 | struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0); |
| 266 | u32 subpixel_src_mask = (1 << 16) - 1; | 266 | u32 subpixel_src_mask = (1 << 16) - 1; |
| 267 | u32 format = fb->format->format; | 267 | u32 format = fb->format->format; |
| 268 | int num_planes = fb->format->num_planes; | 268 | int num_planes = fb->format->num_planes; |
| 269 | int min_scale = 1, max_scale = INT_MAX; | ||
| 270 | struct drm_crtc_state *crtc_state; | 269 | struct drm_crtc_state *crtc_state; |
| 271 | u32 h_subsample, v_subsample; | 270 | u32 h_subsample, v_subsample; |
| 272 | int i, ret; | 271 | int i, ret; |
| @@ -278,21 +277,8 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) | |||
| 278 | return -EINVAL; | 277 | return -EINVAL; |
| 279 | } | 278 | } |
| 280 | 279 | ||
| 281 | /* No configuring scaling on the cursor plane, since it gets | 280 | ret = drm_atomic_helper_check_plane_state(state, crtc_state, 1, |
| 282 | * non-vblank-synced updates, and scaling requires LBM changes which | 281 | INT_MAX, true, true); |
| 283 | * have to be vblank-synced. | ||
| 284 | */ | ||
| 285 | if (plane->type == DRM_PLANE_TYPE_CURSOR) { | ||
| 286 | min_scale = DRM_PLANE_HELPER_NO_SCALING; | ||
| 287 | max_scale = DRM_PLANE_HELPER_NO_SCALING; | ||
| 288 | } else { | ||
| 289 | min_scale = 1; | ||
| 290 | max_scale = INT_MAX; | ||
| 291 | } | ||
| 292 | |||
| 293 | ret = drm_atomic_helper_check_plane_state(state, crtc_state, | ||
| 294 | min_scale, max_scale, | ||
| 295 | true, true); | ||
| 296 | if (ret) | 282 | if (ret) |
| 297 | return ret; | 283 | return ret; |
| 298 | 284 | ||
| @@ -395,10 +381,13 @@ static u32 vc4_lbm_size(struct drm_plane_state *state) | |||
| 395 | u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w); | 381 | u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w); |
| 396 | u32 lbm; | 382 | u32 lbm; |
| 397 | 383 | ||
| 384 | /* LBM is not needed when there's no vertical scaling. */ | ||
| 385 | if (vc4_state->y_scaling[0] == VC4_SCALING_NONE && | ||
| 386 | vc4_state->y_scaling[1] == VC4_SCALING_NONE) | ||
| 387 | return 0; | ||
| 388 | |||
| 398 | if (!vc4_state->is_yuv) { | 389 | if (!vc4_state->is_yuv) { |
| 399 | if (vc4_state->is_unity) | 390 | if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ) |
| 400 | return 0; | ||
| 401 | else if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ) | ||
| 402 | lbm = pix_per_line * 8; | 391 | lbm = pix_per_line * 8; |
| 403 | else { | 392 | else { |
| 404 | /* In special cases, this multiplier might be 12. */ | 393 | /* In special cases, this multiplier might be 12. */ |
| @@ -449,6 +438,43 @@ static void vc4_write_scaling_parameters(struct drm_plane_state *state, | |||
| 449 | } | 438 | } |
| 450 | } | 439 | } |
| 451 | 440 | ||
| 441 | static int vc4_plane_allocate_lbm(struct drm_plane_state *state) | ||
| 442 | { | ||
| 443 | struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev); | ||
| 444 | struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); | ||
| 445 | unsigned long irqflags; | ||
| 446 | u32 lbm_size; | ||
| 447 | |||
| 448 | lbm_size = vc4_lbm_size(state); | ||
| 449 | if (!lbm_size) | ||
| 450 | return 0; | ||
| 451 | |||
| 452 | if (WARN_ON(!vc4_state->lbm_offset)) | ||
| 453 | return -EINVAL; | ||
| 454 | |||
| 455 | /* Allocate the LBM memory that the HVS will use for temporary | ||
| 456 | * storage due to our scaling/format conversion. | ||
| 457 | */ | ||
| 458 | if (!vc4_state->lbm.allocated) { | ||
| 459 | int ret; | ||
| 460 | |||
| 461 | spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); | ||
| 462 | ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm, | ||
| 463 | &vc4_state->lbm, | ||
| 464 | lbm_size, 32, 0, 0); | ||
| 465 | spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); | ||
| 466 | |||
| 467 | if (ret) | ||
| 468 | return ret; | ||
| 469 | } else { | ||
| 470 | WARN_ON_ONCE(lbm_size != vc4_state->lbm.size); | ||
| 471 | } | ||
| 472 | |||
| 473 | vc4_state->dlist[vc4_state->lbm_offset] = vc4_state->lbm.start; | ||
| 474 | |||
| 475 | return 0; | ||
| 476 | } | ||
| 477 | |||
| 452 | /* Writes out a full display list for an active plane to the plane's | 478 | /* Writes out a full display list for an active plane to the plane's |
| 453 | * private dlist state. | 479 | * private dlist state. |
| 454 | */ | 480 | */ |
| @@ -466,31 +492,14 @@ static int vc4_plane_mode_set(struct drm_plane *plane, | |||
| 466 | bool mix_plane_alpha; | 492 | bool mix_plane_alpha; |
| 467 | bool covers_screen; | 493 | bool covers_screen; |
| 468 | u32 scl0, scl1, pitch0; | 494 | u32 scl0, scl1, pitch0; |
| 469 | u32 lbm_size, tiling; | 495 | u32 tiling; |
| 470 | unsigned long irqflags; | ||
| 471 | u32 hvs_format = format->hvs; | 496 | u32 hvs_format = format->hvs; |
| 472 | int ret, i; | 497 | int ret, i; |
| 473 | 498 | ||
| 474 | ret = vc4_plane_setup_clipping_and_scaling(state); | 499 | if (vc4_state->dlist_initialized) |
| 475 | if (ret) | 500 | return 0; |
| 476 | return ret; | ||
| 477 | |||
| 478 | /* Allocate the LBM memory that the HVS will use for temporary | ||
| 479 | * storage due to our scaling/format conversion. | ||
| 480 | */ | ||
| 481 | lbm_size = vc4_lbm_size(state); | ||
| 482 | if (lbm_size) { | ||
| 483 | if (!vc4_state->lbm.allocated) { | ||
| 484 | spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); | ||
| 485 | ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm, | ||
| 486 | &vc4_state->lbm, | ||
| 487 | lbm_size, 32, 0, 0); | ||
| 488 | spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); | ||
| 489 | } else { | ||
| 490 | WARN_ON_ONCE(lbm_size != vc4_state->lbm.size); | ||
| 491 | } | ||
| 492 | } | ||
| 493 | 501 | ||
| 502 | ret = vc4_plane_setup_clipping_and_scaling(state); | ||
| 494 | if (ret) | 503 | if (ret) |
| 495 | return ret; | 504 | return ret; |
| 496 | 505 | ||
| @@ -714,15 +723,18 @@ static int vc4_plane_mode_set(struct drm_plane *plane, | |||
| 714 | vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); | 723 | vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); |
| 715 | } | 724 | } |
| 716 | 725 | ||
| 726 | vc4_state->lbm_offset = 0; | ||
| 727 | |||
| 717 | if (vc4_state->x_scaling[0] != VC4_SCALING_NONE || | 728 | if (vc4_state->x_scaling[0] != VC4_SCALING_NONE || |
| 718 | vc4_state->x_scaling[1] != VC4_SCALING_NONE || | 729 | vc4_state->x_scaling[1] != VC4_SCALING_NONE || |
| 719 | vc4_state->y_scaling[0] != VC4_SCALING_NONE || | 730 | vc4_state->y_scaling[0] != VC4_SCALING_NONE || |
| 720 | vc4_state->y_scaling[1] != VC4_SCALING_NONE) { | 731 | vc4_state->y_scaling[1] != VC4_SCALING_NONE) { |
| 721 | /* LBM Base Address. */ | 732 | /* Reserve a slot for the LBM Base Address. The real value will |
| 733 | * be set when calling vc4_plane_allocate_lbm(). | ||
| 734 | */ | ||
| 722 | if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || | 735 | if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || |
| 723 | vc4_state->y_scaling[1] != VC4_SCALING_NONE) { | 736 | vc4_state->y_scaling[1] != VC4_SCALING_NONE) |
| 724 | vc4_dlist_write(vc4_state, vc4_state->lbm.start); | 737 | vc4_state->lbm_offset = vc4_state->dlist_count++; |
| 725 | } | ||
| 726 | 738 | ||
| 727 | if (num_planes > 1) { | 739 | if (num_planes > 1) { |
| 728 | /* Emit Cb/Cr as channel 0 and Y as channel | 740 | /* Emit Cb/Cr as channel 0 and Y as channel |
| @@ -768,6 +780,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane, | |||
| 768 | vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen || | 780 | vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen || |
| 769 | state->alpha != DRM_BLEND_ALPHA_OPAQUE; | 781 | state->alpha != DRM_BLEND_ALPHA_OPAQUE; |
| 770 | 782 | ||
| 783 | /* Flag the dlist as initialized to avoid checking it twice in case | ||
| 784 | * the async update check already called vc4_plane_mode_set() and | ||
| 785 | * decided to fallback to sync update because async update was not | ||
| 786 | * possible. | ||
| 787 | */ | ||
| 788 | vc4_state->dlist_initialized = 1; | ||
| 789 | |||
| 771 | return 0; | 790 | return 0; |
| 772 | } | 791 | } |
| 773 | 792 | ||
| @@ -782,13 +801,18 @@ static int vc4_plane_atomic_check(struct drm_plane *plane, | |||
| 782 | struct drm_plane_state *state) | 801 | struct drm_plane_state *state) |
| 783 | { | 802 | { |
| 784 | struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); | 803 | struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); |
| 804 | int ret; | ||
| 785 | 805 | ||
| 786 | vc4_state->dlist_count = 0; | 806 | vc4_state->dlist_count = 0; |
| 787 | 807 | ||
| 788 | if (plane_enabled(state)) | 808 | if (!plane_enabled(state)) |
| 789 | return vc4_plane_mode_set(plane, state); | ||
| 790 | else | ||
| 791 | return 0; | 809 | return 0; |
| 810 | |||
| 811 | ret = vc4_plane_mode_set(plane, state); | ||
| 812 | if (ret) | ||
| 813 | return ret; | ||
| 814 | |||
| 815 | return vc4_plane_allocate_lbm(state); | ||
| 792 | } | 816 | } |
| 793 | 817 | ||
| 794 | static void vc4_plane_atomic_update(struct drm_plane *plane, | 818 | static void vc4_plane_atomic_update(struct drm_plane *plane, |
| @@ -856,30 +880,50 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane, | |||
| 856 | { | 880 | { |
| 857 | struct vc4_plane_state *vc4_state, *new_vc4_state; | 881 | struct vc4_plane_state *vc4_state, *new_vc4_state; |
| 858 | 882 | ||
| 859 | if (plane->state->fb != state->fb) { | 883 | drm_atomic_set_fb_for_plane(plane->state, state->fb); |
| 860 | vc4_plane_async_set_fb(plane, state->fb); | ||
| 861 | drm_atomic_set_fb_for_plane(plane->state, state->fb); | ||
| 862 | } | ||
| 863 | |||
| 864 | /* Set the cursor's position on the screen. This is the | ||
| 865 | * expected change from the drm_mode_cursor_universal() | ||
| 866 | * helper. | ||
| 867 | */ | ||
| 868 | plane->state->crtc_x = state->crtc_x; | 884 | plane->state->crtc_x = state->crtc_x; |
| 869 | plane->state->crtc_y = state->crtc_y; | 885 | plane->state->crtc_y = state->crtc_y; |
| 870 | 886 | plane->state->crtc_w = state->crtc_w; | |
| 871 | /* Allow changing the start position within the cursor BO, if | 887 | plane->state->crtc_h = state->crtc_h; |
| 872 | * that matters. | ||
| 873 | */ | ||
| 874 | plane->state->src_x = state->src_x; | 888 | plane->state->src_x = state->src_x; |
| 875 | plane->state->src_y = state->src_y; | 889 | plane->state->src_y = state->src_y; |
| 876 | 890 | plane->state->src_w = state->src_w; | |
| 877 | /* Update the display list based on the new crtc_x/y. */ | 891 | plane->state->src_h = state->src_h; |
| 878 | vc4_plane_atomic_check(plane, state); | 892 | plane->state->src_h = state->src_h; |
| 893 | plane->state->alpha = state->alpha; | ||
| 894 | plane->state->pixel_blend_mode = state->pixel_blend_mode; | ||
| 895 | plane->state->rotation = state->rotation; | ||
| 896 | plane->state->zpos = state->zpos; | ||
| 897 | plane->state->normalized_zpos = state->normalized_zpos; | ||
| 898 | plane->state->color_encoding = state->color_encoding; | ||
| 899 | plane->state->color_range = state->color_range; | ||
| 900 | plane->state->src = state->src; | ||
| 901 | plane->state->dst = state->dst; | ||
| 902 | plane->state->visible = state->visible; | ||
| 879 | 903 | ||
| 880 | new_vc4_state = to_vc4_plane_state(state); | 904 | new_vc4_state = to_vc4_plane_state(state); |
| 881 | vc4_state = to_vc4_plane_state(plane->state); | 905 | vc4_state = to_vc4_plane_state(plane->state); |
| 882 | 906 | ||
| 907 | vc4_state->crtc_x = new_vc4_state->crtc_x; | ||
| 908 | vc4_state->crtc_y = new_vc4_state->crtc_y; | ||
| 909 | vc4_state->crtc_h = new_vc4_state->crtc_h; | ||
| 910 | vc4_state->crtc_w = new_vc4_state->crtc_w; | ||
| 911 | vc4_state->src_x = new_vc4_state->src_x; | ||
| 912 | vc4_state->src_y = new_vc4_state->src_y; | ||
| 913 | memcpy(vc4_state->src_w, new_vc4_state->src_w, | ||
| 914 | sizeof(vc4_state->src_w)); | ||
| 915 | memcpy(vc4_state->src_h, new_vc4_state->src_h, | ||
| 916 | sizeof(vc4_state->src_h)); | ||
| 917 | memcpy(vc4_state->x_scaling, new_vc4_state->x_scaling, | ||
| 918 | sizeof(vc4_state->x_scaling)); | ||
| 919 | memcpy(vc4_state->y_scaling, new_vc4_state->y_scaling, | ||
| 920 | sizeof(vc4_state->y_scaling)); | ||
| 921 | vc4_state->is_unity = new_vc4_state->is_unity; | ||
| 922 | vc4_state->is_yuv = new_vc4_state->is_yuv; | ||
| 923 | memcpy(vc4_state->offsets, new_vc4_state->offsets, | ||
| 924 | sizeof(vc4_state->offsets)); | ||
| 925 | vc4_state->needs_bg_fill = new_vc4_state->needs_bg_fill; | ||
| 926 | |||
| 883 | /* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */ | 927 | /* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */ |
| 884 | vc4_state->dlist[vc4_state->pos0_offset] = | 928 | vc4_state->dlist[vc4_state->pos0_offset] = |
| 885 | new_vc4_state->dlist[vc4_state->pos0_offset]; | 929 | new_vc4_state->dlist[vc4_state->pos0_offset]; |
| @@ -903,13 +947,38 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane, | |||
| 903 | static int vc4_plane_atomic_async_check(struct drm_plane *plane, | 947 | static int vc4_plane_atomic_async_check(struct drm_plane *plane, |
| 904 | struct drm_plane_state *state) | 948 | struct drm_plane_state *state) |
| 905 | { | 949 | { |
| 906 | /* No configuring new scaling in the fast path. */ | 950 | struct vc4_plane_state *old_vc4_state, *new_vc4_state; |
| 907 | if (plane->state->crtc_w != state->crtc_w || | 951 | int ret; |
| 908 | plane->state->crtc_h != state->crtc_h || | 952 | u32 i; |
| 909 | plane->state->src_w != state->src_w || | 953 | |
| 910 | plane->state->src_h != state->src_h) | 954 | ret = vc4_plane_mode_set(plane, state); |
| 955 | if (ret) | ||
| 956 | return ret; | ||
| 957 | |||
| 958 | old_vc4_state = to_vc4_plane_state(plane->state); | ||
| 959 | new_vc4_state = to_vc4_plane_state(state); | ||
| 960 | if (old_vc4_state->dlist_count != new_vc4_state->dlist_count || | ||
| 961 | old_vc4_state->pos0_offset != new_vc4_state->pos0_offset || | ||
| 962 | old_vc4_state->pos2_offset != new_vc4_state->pos2_offset || | ||
| 963 | old_vc4_state->ptr0_offset != new_vc4_state->ptr0_offset || | ||
| 964 | vc4_lbm_size(plane->state) != vc4_lbm_size(state)) | ||
| 911 | return -EINVAL; | 965 | return -EINVAL; |
| 912 | 966 | ||
| 967 | /* Only pos0, pos2 and ptr0 DWORDS can be updated in an async update | ||
| 968 | * if anything else has changed, fallback to a sync update. | ||
| 969 | */ | ||
| 970 | for (i = 0; i < new_vc4_state->dlist_count; i++) { | ||
| 971 | if (i == new_vc4_state->pos0_offset || | ||
| 972 | i == new_vc4_state->pos2_offset || | ||
| 973 | i == new_vc4_state->ptr0_offset || | ||
| 974 | (new_vc4_state->lbm_offset && | ||
| 975 | i == new_vc4_state->lbm_offset)) | ||
| 976 | continue; | ||
| 977 | |||
| 978 | if (new_vc4_state->dlist[i] != old_vc4_state->dlist[i]) | ||
| 979 | return -EINVAL; | ||
| 980 | } | ||
| 981 | |||
| 913 | return 0; | 982 | return 0; |
| 914 | } | 983 | } |
| 915 | 984 | ||
| @@ -1026,7 +1095,6 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, | |||
| 1026 | struct drm_plane *plane = NULL; | 1095 | struct drm_plane *plane = NULL; |
| 1027 | struct vc4_plane *vc4_plane; | 1096 | struct vc4_plane *vc4_plane; |
| 1028 | u32 formats[ARRAY_SIZE(hvs_formats)]; | 1097 | u32 formats[ARRAY_SIZE(hvs_formats)]; |
| 1029 | u32 num_formats = 0; | ||
| 1030 | int ret = 0; | 1098 | int ret = 0; |
| 1031 | unsigned i; | 1099 | unsigned i; |
| 1032 | static const uint64_t modifiers[] = { | 1100 | static const uint64_t modifiers[] = { |
| @@ -1043,20 +1111,13 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, | |||
| 1043 | if (!vc4_plane) | 1111 | if (!vc4_plane) |
| 1044 | return ERR_PTR(-ENOMEM); | 1112 | return ERR_PTR(-ENOMEM); |
| 1045 | 1113 | ||
| 1046 | for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) { | 1114 | for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) |
| 1047 | /* Don't allow YUV in cursor planes, since that means | 1115 | formats[i] = hvs_formats[i].drm; |
| 1048 | * tuning on the scaler, which we don't allow for the | 1116 | |
| 1049 | * cursor. | ||
| 1050 | */ | ||
| 1051 | if (type != DRM_PLANE_TYPE_CURSOR || | ||
| 1052 | hvs_formats[i].hvs < HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE) { | ||
| 1053 | formats[num_formats++] = hvs_formats[i].drm; | ||
| 1054 | } | ||
| 1055 | } | ||
| 1056 | plane = &vc4_plane->base; | 1117 | plane = &vc4_plane->base; |
| 1057 | ret = drm_universal_plane_init(dev, plane, 0, | 1118 | ret = drm_universal_plane_init(dev, plane, 0, |
| 1058 | &vc4_plane_funcs, | 1119 | &vc4_plane_funcs, |
| 1059 | formats, num_formats, | 1120 | formats, ARRAY_SIZE(formats), |
| 1060 | modifiers, type, NULL); | 1121 | modifiers, type, NULL); |
| 1061 | 1122 | ||
| 1062 | drm_plane_helper_add(plane, &vc4_plane_helper_funcs); | 1123 | drm_plane_helper_add(plane, &vc4_plane_helper_funcs); |
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index f7e877857c1f..1deb41d42ea4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h | |||
| @@ -270,7 +270,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, | |||
| 270 | uint64_t offset, | 270 | uint64_t offset, |
| 271 | __le32 width, __le32 height, | 271 | __le32 width, __le32 height, |
| 272 | __le32 x, __le32 y, | 272 | __le32 x, __le32 y, |
| 273 | struct virtio_gpu_fence **fence); | 273 | struct virtio_gpu_fence *fence); |
| 274 | void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, | 274 | void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, |
| 275 | uint32_t resource_id, | 275 | uint32_t resource_id, |
| 276 | uint32_t x, uint32_t y, | 276 | uint32_t x, uint32_t y, |
| @@ -281,7 +281,7 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, | |||
| 281 | uint32_t x, uint32_t y); | 281 | uint32_t x, uint32_t y); |
| 282 | int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, | 282 | int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, |
| 283 | struct virtio_gpu_object *obj, | 283 | struct virtio_gpu_object *obj, |
| 284 | struct virtio_gpu_fence **fence); | 284 | struct virtio_gpu_fence *fence); |
| 285 | void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, | 285 | void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, |
| 286 | struct virtio_gpu_object *obj); | 286 | struct virtio_gpu_object *obj); |
| 287 | int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev); | 287 | int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev); |
| @@ -306,23 +306,22 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, | |||
| 306 | uint32_t resource_id); | 306 | uint32_t resource_id); |
| 307 | void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, | 307 | void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, |
| 308 | void *data, uint32_t data_size, | 308 | void *data, uint32_t data_size, |
| 309 | uint32_t ctx_id, struct virtio_gpu_fence **fence); | 309 | uint32_t ctx_id, struct virtio_gpu_fence *fence); |
| 310 | void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, | 310 | void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, |
| 311 | uint32_t resource_id, uint32_t ctx_id, | 311 | uint32_t resource_id, uint32_t ctx_id, |
| 312 | uint64_t offset, uint32_t level, | 312 | uint64_t offset, uint32_t level, |
| 313 | struct virtio_gpu_box *box, | 313 | struct virtio_gpu_box *box, |
| 314 | struct virtio_gpu_fence **fence); | 314 | struct virtio_gpu_fence *fence); |
| 315 | void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, | 315 | void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, |
| 316 | struct virtio_gpu_object *bo, | 316 | struct virtio_gpu_object *bo, |
| 317 | uint32_t ctx_id, | 317 | uint32_t ctx_id, |
| 318 | uint64_t offset, uint32_t level, | 318 | uint64_t offset, uint32_t level, |
| 319 | struct virtio_gpu_box *box, | 319 | struct virtio_gpu_box *box, |
| 320 | struct virtio_gpu_fence **fence); | 320 | struct virtio_gpu_fence *fence); |
| 321 | void | 321 | void |
| 322 | virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, | 322 | virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, |
| 323 | struct virtio_gpu_object *bo, | 323 | struct virtio_gpu_object *bo, |
| 324 | struct virtio_gpu_resource_create_3d *rc_3d, | 324 | struct virtio_gpu_resource_create_3d *rc_3d); |
| 325 | struct virtio_gpu_fence **fence); | ||
| 326 | void virtio_gpu_ctrl_ack(struct virtqueue *vq); | 325 | void virtio_gpu_ctrl_ack(struct virtqueue *vq); |
| 327 | void virtio_gpu_cursor_ack(struct virtqueue *vq); | 326 | void virtio_gpu_cursor_ack(struct virtqueue *vq); |
| 328 | void virtio_gpu_fence_ack(struct virtqueue *vq); | 327 | void virtio_gpu_fence_ack(struct virtqueue *vq); |
| @@ -355,7 +354,7 @@ struct virtio_gpu_fence *virtio_gpu_fence_alloc( | |||
| 355 | void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence); | 354 | void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence); |
| 356 | int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, | 355 | int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, |
| 357 | struct virtio_gpu_ctrl_hdr *cmd_hdr, | 356 | struct virtio_gpu_ctrl_hdr *cmd_hdr, |
| 358 | struct virtio_gpu_fence **fence); | 357 | struct virtio_gpu_fence *fence); |
| 359 | void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev, | 358 | void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev, |
| 360 | u64 last_seq); | 359 | u64 last_seq); |
| 361 | 360 | ||
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index 6b5d92215cfb..4d6826b27814 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c | |||
| @@ -91,19 +91,19 @@ void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence) | |||
| 91 | 91 | ||
| 92 | int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, | 92 | int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, |
| 93 | struct virtio_gpu_ctrl_hdr *cmd_hdr, | 93 | struct virtio_gpu_ctrl_hdr *cmd_hdr, |
| 94 | struct virtio_gpu_fence **fence) | 94 | struct virtio_gpu_fence *fence) |
| 95 | { | 95 | { |
| 96 | struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; | 96 | struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; |
| 97 | unsigned long irq_flags; | 97 | unsigned long irq_flags; |
| 98 | 98 | ||
| 99 | spin_lock_irqsave(&drv->lock, irq_flags); | 99 | spin_lock_irqsave(&drv->lock, irq_flags); |
| 100 | (*fence)->seq = ++drv->sync_seq; | 100 | fence->seq = ++drv->sync_seq; |
| 101 | dma_fence_get(&(*fence)->f); | 101 | dma_fence_get(&fence->f); |
| 102 | list_add_tail(&(*fence)->node, &drv->fences); | 102 | list_add_tail(&fence->node, &drv->fences); |
| 103 | spin_unlock_irqrestore(&drv->lock, irq_flags); | 103 | spin_unlock_irqrestore(&drv->lock, irq_flags); |
| 104 | 104 | ||
| 105 | cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE); | 105 | cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE); |
| 106 | cmd_hdr->fence_id = cpu_to_le64((*fence)->seq); | 106 | cmd_hdr->fence_id = cpu_to_le64(fence->seq); |
| 107 | return 0; | 107 | return 0; |
| 108 | } | 108 | } |
| 109 | 109 | ||
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 340f2513d829..161b80fee492 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c | |||
| @@ -221,7 +221,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, | |||
| 221 | } | 221 | } |
| 222 | 222 | ||
| 223 | virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, | 223 | virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, |
| 224 | vfpriv->ctx_id, &out_fence); | 224 | vfpriv->ctx_id, out_fence); |
| 225 | 225 | ||
| 226 | ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f); | 226 | ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f); |
| 227 | 227 | ||
| @@ -348,8 +348,8 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, | |||
| 348 | goto fail_backoff; | 348 | goto fail_backoff; |
| 349 | } | 349 | } |
| 350 | 350 | ||
| 351 | virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d, NULL); | 351 | virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d); |
| 352 | ret = virtio_gpu_object_attach(vgdev, qobj, &fence); | 352 | ret = virtio_gpu_object_attach(vgdev, qobj, fence); |
| 353 | if (ret) { | 353 | if (ret) { |
| 354 | virtio_gpu_fence_cleanup(fence); | 354 | virtio_gpu_fence_cleanup(fence); |
| 355 | goto fail_backoff; | 355 | goto fail_backoff; |
| @@ -450,7 +450,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, | |||
| 450 | virtio_gpu_cmd_transfer_from_host_3d | 450 | virtio_gpu_cmd_transfer_from_host_3d |
| 451 | (vgdev, qobj->hw_res_handle, | 451 | (vgdev, qobj->hw_res_handle, |
| 452 | vfpriv->ctx_id, offset, args->level, | 452 | vfpriv->ctx_id, offset, args->level, |
| 453 | &box, &fence); | 453 | &box, fence); |
| 454 | reservation_object_add_excl_fence(qobj->tbo.resv, | 454 | reservation_object_add_excl_fence(qobj->tbo.resv, |
| 455 | &fence->f); | 455 | &fence->f); |
| 456 | 456 | ||
| @@ -504,7 +504,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, | |||
| 504 | virtio_gpu_cmd_transfer_to_host_3d | 504 | virtio_gpu_cmd_transfer_to_host_3d |
| 505 | (vgdev, qobj, | 505 | (vgdev, qobj, |
| 506 | vfpriv ? vfpriv->ctx_id : 0, offset, | 506 | vfpriv ? vfpriv->ctx_id : 0, offset, |
| 507 | args->level, &box, &fence); | 507 | args->level, &box, fence); |
| 508 | reservation_object_add_excl_fence(qobj->tbo.resv, | 508 | reservation_object_add_excl_fence(qobj->tbo.resv, |
| 509 | &fence->f); | 509 | &fence->f); |
| 510 | dma_fence_put(&fence->f); | 510 | dma_fence_put(&fence->f); |
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index b84ac8c25856..ead5c53d4e21 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c | |||
| @@ -204,7 +204,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, | |||
| 204 | (vgdev, bo, 0, | 204 | (vgdev, bo, 0, |
| 205 | cpu_to_le32(plane->state->crtc_w), | 205 | cpu_to_le32(plane->state->crtc_w), |
| 206 | cpu_to_le32(plane->state->crtc_h), | 206 | cpu_to_le32(plane->state->crtc_h), |
| 207 | 0, 0, &vgfb->fence); | 207 | 0, 0, vgfb->fence); |
| 208 | ret = virtio_gpu_object_reserve(bo, false); | 208 | ret = virtio_gpu_object_reserve(bo, false); |
| 209 | if (!ret) { | 209 | if (!ret) { |
| 210 | reservation_object_add_excl_fence(bo->tbo.resv, | 210 | reservation_object_add_excl_fence(bo->tbo.resv, |
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 2c6764f08f18..e27c4aedb809 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c | |||
| @@ -298,7 +298,7 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, | |||
| 298 | static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, | 298 | static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, |
| 299 | struct virtio_gpu_vbuffer *vbuf, | 299 | struct virtio_gpu_vbuffer *vbuf, |
| 300 | struct virtio_gpu_ctrl_hdr *hdr, | 300 | struct virtio_gpu_ctrl_hdr *hdr, |
| 301 | struct virtio_gpu_fence **fence) | 301 | struct virtio_gpu_fence *fence) |
| 302 | { | 302 | { |
| 303 | struct virtqueue *vq = vgdev->ctrlq.vq; | 303 | struct virtqueue *vq = vgdev->ctrlq.vq; |
| 304 | int rc; | 304 | int rc; |
| @@ -405,7 +405,7 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, | |||
| 405 | 405 | ||
| 406 | static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, | 406 | static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, |
| 407 | uint32_t resource_id, | 407 | uint32_t resource_id, |
| 408 | struct virtio_gpu_fence **fence) | 408 | struct virtio_gpu_fence *fence) |
| 409 | { | 409 | { |
| 410 | struct virtio_gpu_resource_detach_backing *cmd_p; | 410 | struct virtio_gpu_resource_detach_backing *cmd_p; |
| 411 | struct virtio_gpu_vbuffer *vbuf; | 411 | struct virtio_gpu_vbuffer *vbuf; |
| @@ -467,7 +467,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, | |||
| 467 | uint64_t offset, | 467 | uint64_t offset, |
| 468 | __le32 width, __le32 height, | 468 | __le32 width, __le32 height, |
| 469 | __le32 x, __le32 y, | 469 | __le32 x, __le32 y, |
| 470 | struct virtio_gpu_fence **fence) | 470 | struct virtio_gpu_fence *fence) |
| 471 | { | 471 | { |
| 472 | struct virtio_gpu_transfer_to_host_2d *cmd_p; | 472 | struct virtio_gpu_transfer_to_host_2d *cmd_p; |
| 473 | struct virtio_gpu_vbuffer *vbuf; | 473 | struct virtio_gpu_vbuffer *vbuf; |
| @@ -497,7 +497,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev, | |||
| 497 | uint32_t resource_id, | 497 | uint32_t resource_id, |
| 498 | struct virtio_gpu_mem_entry *ents, | 498 | struct virtio_gpu_mem_entry *ents, |
| 499 | uint32_t nents, | 499 | uint32_t nents, |
| 500 | struct virtio_gpu_fence **fence) | 500 | struct virtio_gpu_fence *fence) |
| 501 | { | 501 | { |
| 502 | struct virtio_gpu_resource_attach_backing *cmd_p; | 502 | struct virtio_gpu_resource_attach_backing *cmd_p; |
| 503 | struct virtio_gpu_vbuffer *vbuf; | 503 | struct virtio_gpu_vbuffer *vbuf; |
| @@ -820,8 +820,7 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, | |||
| 820 | void | 820 | void |
| 821 | virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, | 821 | virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, |
| 822 | struct virtio_gpu_object *bo, | 822 | struct virtio_gpu_object *bo, |
| 823 | struct virtio_gpu_resource_create_3d *rc_3d, | 823 | struct virtio_gpu_resource_create_3d *rc_3d) |
| 824 | struct virtio_gpu_fence **fence) | ||
| 825 | { | 824 | { |
| 826 | struct virtio_gpu_resource_create_3d *cmd_p; | 825 | struct virtio_gpu_resource_create_3d *cmd_p; |
| 827 | struct virtio_gpu_vbuffer *vbuf; | 826 | struct virtio_gpu_vbuffer *vbuf; |
| @@ -833,7 +832,7 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, | |||
| 833 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); | 832 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); |
| 834 | cmd_p->hdr.flags = 0; | 833 | cmd_p->hdr.flags = 0; |
| 835 | 834 | ||
| 836 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); | 835 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
| 837 | bo->created = true; | 836 | bo->created = true; |
| 838 | } | 837 | } |
| 839 | 838 | ||
| @@ -842,7 +841,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, | |||
| 842 | uint32_t ctx_id, | 841 | uint32_t ctx_id, |
| 843 | uint64_t offset, uint32_t level, | 842 | uint64_t offset, uint32_t level, |
| 844 | struct virtio_gpu_box *box, | 843 | struct virtio_gpu_box *box, |
| 845 | struct virtio_gpu_fence **fence) | 844 | struct virtio_gpu_fence *fence) |
| 846 | { | 845 | { |
| 847 | struct virtio_gpu_transfer_host_3d *cmd_p; | 846 | struct virtio_gpu_transfer_host_3d *cmd_p; |
| 848 | struct virtio_gpu_vbuffer *vbuf; | 847 | struct virtio_gpu_vbuffer *vbuf; |
| @@ -870,7 +869,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, | |||
| 870 | uint32_t resource_id, uint32_t ctx_id, | 869 | uint32_t resource_id, uint32_t ctx_id, |
| 871 | uint64_t offset, uint32_t level, | 870 | uint64_t offset, uint32_t level, |
| 872 | struct virtio_gpu_box *box, | 871 | struct virtio_gpu_box *box, |
| 873 | struct virtio_gpu_fence **fence) | 872 | struct virtio_gpu_fence *fence) |
| 874 | { | 873 | { |
| 875 | struct virtio_gpu_transfer_host_3d *cmd_p; | 874 | struct virtio_gpu_transfer_host_3d *cmd_p; |
| 876 | struct virtio_gpu_vbuffer *vbuf; | 875 | struct virtio_gpu_vbuffer *vbuf; |
| @@ -890,7 +889,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, | |||
| 890 | 889 | ||
| 891 | void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, | 890 | void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, |
| 892 | void *data, uint32_t data_size, | 891 | void *data, uint32_t data_size, |
| 893 | uint32_t ctx_id, struct virtio_gpu_fence **fence) | 892 | uint32_t ctx_id, struct virtio_gpu_fence *fence) |
| 894 | { | 893 | { |
| 895 | struct virtio_gpu_cmd_submit *cmd_p; | 894 | struct virtio_gpu_cmd_submit *cmd_p; |
| 896 | struct virtio_gpu_vbuffer *vbuf; | 895 | struct virtio_gpu_vbuffer *vbuf; |
| @@ -910,7 +909,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, | |||
| 910 | 909 | ||
| 911 | int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, | 910 | int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, |
| 912 | struct virtio_gpu_object *obj, | 911 | struct virtio_gpu_object *obj, |
| 913 | struct virtio_gpu_fence **fence) | 912 | struct virtio_gpu_fence *fence) |
| 914 | { | 913 | { |
| 915 | bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); | 914 | bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
| 916 | struct virtio_gpu_mem_entry *ents; | 915 | struct virtio_gpu_mem_entry *ents; |
| @@ -967,7 +966,7 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, | |||
| 967 | if (use_dma_api && obj->mapped) { | 966 | if (use_dma_api && obj->mapped) { |
| 968 | struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); | 967 | struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); |
| 969 | /* detach backing and wait for the host process it ... */ | 968 | /* detach backing and wait for the host process it ... */ |
| 970 | virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence); | 969 | virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence); |
| 971 | dma_fence_wait(&fence->f, true); | 970 | dma_fence_wait(&fence->f, true); |
| 972 | dma_fence_put(&fence->f); | 971 | dma_fence_put(&fence->f); |
| 973 | 972 | ||
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 7041007396ae..418817600ad1 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c | |||
| @@ -23,8 +23,11 @@ vkms_plane_duplicate_state(struct drm_plane *plane) | |||
| 23 | return NULL; | 23 | return NULL; |
| 24 | 24 | ||
| 25 | crc_data = kzalloc(sizeof(*crc_data), GFP_KERNEL); | 25 | crc_data = kzalloc(sizeof(*crc_data), GFP_KERNEL); |
| 26 | if (WARN_ON(!crc_data)) | 26 | if (!crc_data) { |
| 27 | DRM_INFO("Couldn't allocate crc_data"); | 27 | DRM_DEBUG_KMS("Couldn't allocate crc_data\n"); |
| 28 | kfree(vkms_state); | ||
| 29 | return NULL; | ||
| 30 | } | ||
| 28 | 31 | ||
| 29 | vkms_state->crc_data = crc_data; | 32 | vkms_state->crc_data = crc_data; |
| 30 | 33 | ||
| @@ -138,14 +141,12 @@ static int vkms_prepare_fb(struct drm_plane *plane, | |||
| 138 | struct drm_plane_state *state) | 141 | struct drm_plane_state *state) |
| 139 | { | 142 | { |
| 140 | struct drm_gem_object *gem_obj; | 143 | struct drm_gem_object *gem_obj; |
| 141 | struct vkms_gem_object *vkms_obj; | ||
| 142 | int ret; | 144 | int ret; |
| 143 | 145 | ||
| 144 | if (!state->fb) | 146 | if (!state->fb) |
| 145 | return 0; | 147 | return 0; |
| 146 | 148 | ||
| 147 | gem_obj = drm_gem_fb_get_obj(state->fb, 0); | 149 | gem_obj = drm_gem_fb_get_obj(state->fb, 0); |
| 148 | vkms_obj = drm_gem_to_vkms_gem(gem_obj); | ||
| 149 | ret = vkms_gem_vmap(gem_obj); | 150 | ret = vkms_gem_vmap(gem_obj); |
| 150 | if (ret) | 151 | if (ret) |
| 151 | DRM_ERROR("vmap failed: %d\n", ret); | 152 | DRM_ERROR("vmap failed: %d\n", ret); |
