diff options
23 files changed, 276 insertions, 197 deletions
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c index b4fb86d89850..224ff965bcf7 100644 --- a/drivers/gpu/drm/drm_context.c +++ b/drivers/gpu/drm/drm_context.c | |||
| @@ -42,6 +42,10 @@ | |||
| 42 | 42 | ||
| 43 | #include <drm/drmP.h> | 43 | #include <drm/drmP.h> |
| 44 | 44 | ||
| 45 | /******************************************************************/ | ||
| 46 | /** \name Context bitmap support */ | ||
| 47 | /*@{*/ | ||
| 48 | |||
| 45 | /** | 49 | /** |
| 46 | * Free a handle from the context bitmap. | 50 | * Free a handle from the context bitmap. |
| 47 | * | 51 | * |
| @@ -52,48 +56,13 @@ | |||
| 52 | * in drm_device::ctx_idr, while holding the drm_device::struct_mutex | 56 | * in drm_device::ctx_idr, while holding the drm_device::struct_mutex |
| 53 | * lock. | 57 | * lock. |
| 54 | */ | 58 | */ |
| 55 | static void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle) | 59 | void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle) |
| 56 | { | 60 | { |
| 57 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 58 | return; | ||
| 59 | |||
| 60 | mutex_lock(&dev->struct_mutex); | 61 | mutex_lock(&dev->struct_mutex); |
| 61 | idr_remove(&dev->ctx_idr, ctx_handle); | 62 | idr_remove(&dev->ctx_idr, ctx_handle); |
| 62 | mutex_unlock(&dev->struct_mutex); | 63 | mutex_unlock(&dev->struct_mutex); |
| 63 | } | 64 | } |
| 64 | 65 | ||
| 65 | /******************************************************************/ | ||
| 66 | /** \name Context bitmap support */ | ||
| 67 | /*@{*/ | ||
| 68 | |||
| 69 | void drm_legacy_ctxbitmap_release(struct drm_device *dev, | ||
| 70 | struct drm_file *file_priv) | ||
| 71 | { | ||
| 72 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 73 | return; | ||
| 74 | |||
| 75 | mutex_lock(&dev->ctxlist_mutex); | ||
| 76 | if (!list_empty(&dev->ctxlist)) { | ||
| 77 | struct drm_ctx_list *pos, *n; | ||
| 78 | |||
| 79 | list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { | ||
| 80 | if (pos->tag == file_priv && | ||
| 81 | pos->handle != DRM_KERNEL_CONTEXT) { | ||
| 82 | if (dev->driver->context_dtor) | ||
| 83 | dev->driver->context_dtor(dev, | ||
| 84 | pos->handle); | ||
| 85 | |||
| 86 | drm_ctxbitmap_free(dev, pos->handle); | ||
| 87 | |||
| 88 | list_del(&pos->head); | ||
| 89 | kfree(pos); | ||
| 90 | --dev->ctx_count; | ||
| 91 | } | ||
| 92 | } | ||
| 93 | } | ||
| 94 | mutex_unlock(&dev->ctxlist_mutex); | ||
| 95 | } | ||
| 96 | |||
| 97 | /** | 66 | /** |
| 98 | * Context bitmap allocation. | 67 | * Context bitmap allocation. |
| 99 | * | 68 | * |
| @@ -121,12 +90,10 @@ static int drm_ctxbitmap_next(struct drm_device * dev) | |||
| 121 | * | 90 | * |
| 122 | * Initialise the drm_device::ctx_idr | 91 | * Initialise the drm_device::ctx_idr |
| 123 | */ | 92 | */ |
| 124 | void drm_legacy_ctxbitmap_init(struct drm_device * dev) | 93 | int drm_ctxbitmap_init(struct drm_device * dev) |
| 125 | { | 94 | { |
| 126 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 127 | return; | ||
| 128 | |||
| 129 | idr_init(&dev->ctx_idr); | 95 | idr_init(&dev->ctx_idr); |
| 96 | return 0; | ||
| 130 | } | 97 | } |
| 131 | 98 | ||
| 132 | /** | 99 | /** |
| @@ -137,7 +104,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev) | |||
| 137 | * Free all idr members using drm_ctx_sarea_free helper function | 104 | * Free all idr members using drm_ctx_sarea_free helper function |
| 138 | * while holding the drm_device::struct_mutex lock. | 105 | * while holding the drm_device::struct_mutex lock. |
| 139 | */ | 106 | */ |
| 140 | void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev) | 107 | void drm_ctxbitmap_cleanup(struct drm_device * dev) |
| 141 | { | 108 | { |
| 142 | mutex_lock(&dev->struct_mutex); | 109 | mutex_lock(&dev->struct_mutex); |
| 143 | idr_destroy(&dev->ctx_idr); | 110 | idr_destroy(&dev->ctx_idr); |
| @@ -169,9 +136,6 @@ int drm_getsareactx(struct drm_device *dev, void *data, | |||
| 169 | struct drm_local_map *map; | 136 | struct drm_local_map *map; |
| 170 | struct drm_map_list *_entry; | 137 | struct drm_map_list *_entry; |
| 171 | 138 | ||
| 172 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 173 | return -EINVAL; | ||
| 174 | |||
| 175 | mutex_lock(&dev->struct_mutex); | 139 | mutex_lock(&dev->struct_mutex); |
| 176 | 140 | ||
| 177 | map = idr_find(&dev->ctx_idr, request->ctx_id); | 141 | map = idr_find(&dev->ctx_idr, request->ctx_id); |
| @@ -216,9 +180,6 @@ int drm_setsareactx(struct drm_device *dev, void *data, | |||
| 216 | struct drm_local_map *map = NULL; | 180 | struct drm_local_map *map = NULL; |
| 217 | struct drm_map_list *r_list = NULL; | 181 | struct drm_map_list *r_list = NULL; |
| 218 | 182 | ||
| 219 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 220 | return -EINVAL; | ||
| 221 | |||
| 222 | mutex_lock(&dev->struct_mutex); | 183 | mutex_lock(&dev->struct_mutex); |
| 223 | list_for_each_entry(r_list, &dev->maplist, head) { | 184 | list_for_each_entry(r_list, &dev->maplist, head) { |
| 224 | if (r_list->map | 185 | if (r_list->map |
| @@ -319,9 +280,6 @@ int drm_resctx(struct drm_device *dev, void *data, | |||
| 319 | struct drm_ctx ctx; | 280 | struct drm_ctx ctx; |
| 320 | int i; | 281 | int i; |
| 321 | 282 | ||
| 322 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 323 | return -EINVAL; | ||
| 324 | |||
| 325 | if (res->count >= DRM_RESERVED_CONTEXTS) { | 283 | if (res->count >= DRM_RESERVED_CONTEXTS) { |
| 326 | memset(&ctx, 0, sizeof(ctx)); | 284 | memset(&ctx, 0, sizeof(ctx)); |
| 327 | for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { | 285 | for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { |
| @@ -352,9 +310,6 @@ int drm_addctx(struct drm_device *dev, void *data, | |||
| 352 | struct drm_ctx_list *ctx_entry; | 310 | struct drm_ctx_list *ctx_entry; |
| 353 | struct drm_ctx *ctx = data; | 311 | struct drm_ctx *ctx = data; |
| 354 | 312 | ||
| 355 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 356 | return -EINVAL; | ||
| 357 | |||
| 358 | ctx->handle = drm_ctxbitmap_next(dev); | 313 | ctx->handle = drm_ctxbitmap_next(dev); |
| 359 | if (ctx->handle == DRM_KERNEL_CONTEXT) { | 314 | if (ctx->handle == DRM_KERNEL_CONTEXT) { |
| 360 | /* Skip kernel's context and get a new one. */ | 315 | /* Skip kernel's context and get a new one. */ |
| @@ -398,9 +353,6 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
| 398 | { | 353 | { |
| 399 | struct drm_ctx *ctx = data; | 354 | struct drm_ctx *ctx = data; |
| 400 | 355 | ||
| 401 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 402 | return -EINVAL; | ||
| 403 | |||
| 404 | /* This is 0, because we don't handle any context flags */ | 356 | /* This is 0, because we don't handle any context flags */ |
| 405 | ctx->flags = 0; | 357 | ctx->flags = 0; |
| 406 | 358 | ||
| @@ -423,9 +375,6 @@ int drm_switchctx(struct drm_device *dev, void *data, | |||
| 423 | { | 375 | { |
| 424 | struct drm_ctx *ctx = data; | 376 | struct drm_ctx *ctx = data; |
| 425 | 377 | ||
| 426 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 427 | return -EINVAL; | ||
| 428 | |||
| 429 | DRM_DEBUG("%d\n", ctx->handle); | 378 | DRM_DEBUG("%d\n", ctx->handle); |
| 430 | return drm_context_switch(dev, dev->last_context, ctx->handle); | 379 | return drm_context_switch(dev, dev->last_context, ctx->handle); |
| 431 | } | 380 | } |
| @@ -446,9 +395,6 @@ int drm_newctx(struct drm_device *dev, void *data, | |||
| 446 | { | 395 | { |
| 447 | struct drm_ctx *ctx = data; | 396 | struct drm_ctx *ctx = data; |
| 448 | 397 | ||
| 449 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 450 | return -EINVAL; | ||
| 451 | |||
| 452 | DRM_DEBUG("%d\n", ctx->handle); | 398 | DRM_DEBUG("%d\n", ctx->handle); |
| 453 | drm_context_switch_complete(dev, file_priv, ctx->handle); | 399 | drm_context_switch_complete(dev, file_priv, ctx->handle); |
| 454 | 400 | ||
| @@ -471,9 +417,6 @@ int drm_rmctx(struct drm_device *dev, void *data, | |||
| 471 | { | 417 | { |
| 472 | struct drm_ctx *ctx = data; | 418 | struct drm_ctx *ctx = data; |
| 473 | 419 | ||
| 474 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
| 475 | return -EINVAL; | ||
| 476 | |||
| 477 | DRM_DEBUG("%d\n", ctx->handle); | 420 | DRM_DEBUG("%d\n", ctx->handle); |
| 478 | if (ctx->handle != DRM_KERNEL_CONTEXT) { | 421 | if (ctx->handle != DRM_KERNEL_CONTEXT) { |
| 479 | if (dev->driver->context_dtor) | 422 | if (dev->driver->context_dtor) |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 4be8e09a32ef..3f84277d7036 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
| @@ -439,7 +439,26 @@ int drm_release(struct inode *inode, struct file *filp) | |||
| 439 | if (dev->driver->driver_features & DRIVER_GEM) | 439 | if (dev->driver->driver_features & DRIVER_GEM) |
| 440 | drm_gem_release(dev, file_priv); | 440 | drm_gem_release(dev, file_priv); |
| 441 | 441 | ||
| 442 | drm_legacy_ctxbitmap_release(dev, file_priv); | 442 | mutex_lock(&dev->ctxlist_mutex); |
| 443 | if (!list_empty(&dev->ctxlist)) { | ||
| 444 | struct drm_ctx_list *pos, *n; | ||
| 445 | |||
| 446 | list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { | ||
| 447 | if (pos->tag == file_priv && | ||
| 448 | pos->handle != DRM_KERNEL_CONTEXT) { | ||
| 449 | if (dev->driver->context_dtor) | ||
| 450 | dev->driver->context_dtor(dev, | ||
| 451 | pos->handle); | ||
| 452 | |||
| 453 | drm_ctxbitmap_free(dev, pos->handle); | ||
| 454 | |||
| 455 | list_del(&pos->head); | ||
| 456 | kfree(pos); | ||
| 457 | --dev->ctx_count; | ||
| 458 | } | ||
| 459 | } | ||
| 460 | } | ||
| 461 | mutex_unlock(&dev->ctxlist_mutex); | ||
| 443 | 462 | ||
| 444 | mutex_lock(&dev->struct_mutex); | 463 | mutex_lock(&dev->struct_mutex); |
| 445 | 464 | ||
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index e7eb0276f7f1..39d864576be4 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
| @@ -292,7 +292,13 @@ int drm_fill_in_dev(struct drm_device *dev, | |||
| 292 | goto error_out_unreg; | 292 | goto error_out_unreg; |
| 293 | } | 293 | } |
| 294 | 294 | ||
| 295 | drm_legacy_ctxbitmap_init(dev); | 295 | |
| 296 | |||
| 297 | retcode = drm_ctxbitmap_init(dev); | ||
| 298 | if (retcode) { | ||
| 299 | DRM_ERROR("Cannot allocate memory for context bitmap.\n"); | ||
| 300 | goto error_out_unreg; | ||
| 301 | } | ||
| 296 | 302 | ||
| 297 | if (driver->driver_features & DRIVER_GEM) { | 303 | if (driver->driver_features & DRIVER_GEM) { |
| 298 | retcode = drm_gem_init(dev); | 304 | retcode = drm_gem_init(dev); |
| @@ -446,7 +452,7 @@ void drm_put_dev(struct drm_device *dev) | |||
| 446 | drm_rmmap(dev, r_list->map); | 452 | drm_rmmap(dev, r_list->map); |
| 447 | drm_ht_remove(&dev->map_hash); | 453 | drm_ht_remove(&dev->map_hash); |
| 448 | 454 | ||
| 449 | drm_legacy_ctxbitmap_cleanup(dev); | 455 | drm_ctxbitmap_cleanup(dev); |
| 450 | 456 | ||
| 451 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 457 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
| 452 | drm_put_minor(&dev->control); | 458 | drm_put_minor(&dev->control); |
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 4752f223e5b2..45b6ef595965 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig | |||
| @@ -56,7 +56,7 @@ config DRM_EXYNOS_IPP | |||
| 56 | 56 | ||
| 57 | config DRM_EXYNOS_FIMC | 57 | config DRM_EXYNOS_FIMC |
| 58 | bool "Exynos DRM FIMC" | 58 | bool "Exynos DRM FIMC" |
| 59 | depends on DRM_EXYNOS_IPP && MFD_SYSCON && OF | 59 | depends on DRM_EXYNOS_IPP && MFD_SYSCON |
| 60 | help | 60 | help |
| 61 | Choose this option if you want to use Exynos FIMC for DRM. | 61 | Choose this option if you want to use Exynos FIMC for DRM. |
| 62 | 62 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c index 3445a0f3a6b2..9c8088462c26 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c | |||
| @@ -63,7 +63,8 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, | |||
| 63 | return -ENOMEM; | 63 | return -ENOMEM; |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size, | 66 | buf->kvaddr = (void __iomem *)dma_alloc_attrs(dev->dev, |
| 67 | buf->size, | ||
| 67 | &buf->dma_addr, GFP_KERNEL, | 68 | &buf->dma_addr, GFP_KERNEL, |
| 68 | &buf->dma_attrs); | 69 | &buf->dma_attrs); |
| 69 | if (!buf->kvaddr) { | 70 | if (!buf->kvaddr) { |
| @@ -90,9 +91,9 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, | |||
| 90 | } | 91 | } |
| 91 | 92 | ||
| 92 | buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages); | 93 | buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages); |
| 93 | if (!buf->sgt) { | 94 | if (IS_ERR(buf->sgt)) { |
| 94 | DRM_ERROR("failed to get sg table.\n"); | 95 | DRM_ERROR("failed to get sg table.\n"); |
| 95 | ret = -ENOMEM; | 96 | ret = PTR_ERR(buf->sgt); |
| 96 | goto err_free_attrs; | 97 | goto err_free_attrs; |
| 97 | } | 98 | } |
| 98 | 99 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 78e868bcf1ec..e7c2f2d07f19 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c | |||
| @@ -99,12 +99,13 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, | |||
| 99 | if (is_drm_iommu_supported(dev)) { | 99 | if (is_drm_iommu_supported(dev)) { |
| 100 | unsigned int nr_pages = buffer->size >> PAGE_SHIFT; | 100 | unsigned int nr_pages = buffer->size >> PAGE_SHIFT; |
| 101 | 101 | ||
| 102 | buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP, | 102 | buffer->kvaddr = (void __iomem *) vmap(buffer->pages, |
| 103 | nr_pages, VM_MAP, | ||
| 103 | pgprot_writecombine(PAGE_KERNEL)); | 104 | pgprot_writecombine(PAGE_KERNEL)); |
| 104 | } else { | 105 | } else { |
| 105 | phys_addr_t dma_addr = buffer->dma_addr; | 106 | phys_addr_t dma_addr = buffer->dma_addr; |
| 106 | if (dma_addr) | 107 | if (dma_addr) |
| 107 | buffer->kvaddr = phys_to_virt(dma_addr); | 108 | buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr); |
| 108 | else | 109 | else |
| 109 | buffer->kvaddr = (void __iomem *)NULL; | 110 | buffer->kvaddr = (void __iomem *)NULL; |
| 110 | } | 111 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8507c6d1e642..df9253d890ee 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -1392,14 +1392,11 @@ out: | |||
| 1392 | if (i915_terminally_wedged(&dev_priv->gpu_error)) | 1392 | if (i915_terminally_wedged(&dev_priv->gpu_error)) |
| 1393 | return VM_FAULT_SIGBUS; | 1393 | return VM_FAULT_SIGBUS; |
| 1394 | case -EAGAIN: | 1394 | case -EAGAIN: |
| 1395 | /* Give the error handler a chance to run and move the | 1395 | /* |
| 1396 | * objects off the GPU active list. Next time we service the | 1396 | * EAGAIN means the gpu is hung and we'll wait for the error |
| 1397 | * fault, we should be able to transition the page into the | 1397 | * handler to reset everything when re-faulting in |
| 1398 | * GTT without touching the GPU (and so avoid further | 1398 | * i915_mutex_lock_interruptible. |
| 1399 | * EIO/EGAIN). If the GPU is wedged, then there is no issue | ||
| 1400 | * with coherency, just lost writes. | ||
| 1401 | */ | 1399 | */ |
| 1402 | set_need_resched(); | ||
| 1403 | case 0: | 1400 | case 0: |
| 1404 | case -ERESTARTSYS: | 1401 | case -ERESTARTSYS: |
| 1405 | case -EINTR: | 1402 | case -EINTR: |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 83cce0cdb769..4b91228fd9bd 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -1469,6 +1469,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) | |||
| 1469 | return ret; | 1469 | return ret; |
| 1470 | } | 1470 | } |
| 1471 | 1471 | ||
| 1472 | static void i915_error_wake_up(struct drm_i915_private *dev_priv, | ||
| 1473 | bool reset_completed) | ||
| 1474 | { | ||
| 1475 | struct intel_ring_buffer *ring; | ||
| 1476 | int i; | ||
| 1477 | |||
| 1478 | /* | ||
| 1479 | * Notify all waiters for GPU completion events that reset state has | ||
| 1480 | * been changed, and that they need to restart their wait after | ||
| 1481 | * checking for potential errors (and bail out to drop locks if there is | ||
| 1482 | * a gpu reset pending so that i915_error_work_func can acquire them). | ||
| 1483 | */ | ||
| 1484 | |||
| 1485 | /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ | ||
| 1486 | for_each_ring(ring, dev_priv, i) | ||
| 1487 | wake_up_all(&ring->irq_queue); | ||
| 1488 | |||
| 1489 | /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ | ||
| 1490 | wake_up_all(&dev_priv->pending_flip_queue); | ||
| 1491 | |||
| 1492 | /* | ||
| 1493 | * Signal tasks blocked in i915_gem_wait_for_error that the pending | ||
| 1494 | * reset state is cleared. | ||
| 1495 | */ | ||
| 1496 | if (reset_completed) | ||
| 1497 | wake_up_all(&dev_priv->gpu_error.reset_queue); | ||
| 1498 | } | ||
| 1499 | |||
| 1472 | /** | 1500 | /** |
| 1473 | * i915_error_work_func - do process context error handling work | 1501 | * i915_error_work_func - do process context error handling work |
| 1474 | * @work: work struct | 1502 | * @work: work struct |
| @@ -1483,11 +1511,10 @@ static void i915_error_work_func(struct work_struct *work) | |||
| 1483 | drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, | 1511 | drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, |
| 1484 | gpu_error); | 1512 | gpu_error); |
| 1485 | struct drm_device *dev = dev_priv->dev; | 1513 | struct drm_device *dev = dev_priv->dev; |
| 1486 | struct intel_ring_buffer *ring; | ||
| 1487 | char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; | 1514 | char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; |
| 1488 | char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; | 1515 | char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; |
| 1489 | char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; | 1516 | char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; |
| 1490 | int i, ret; | 1517 | int ret; |
| 1491 | 1518 | ||
| 1492 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); | 1519 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); |
| 1493 | 1520 | ||
| @@ -1506,8 +1533,16 @@ static void i915_error_work_func(struct work_struct *work) | |||
| 1506 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, | 1533 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, |
| 1507 | reset_event); | 1534 | reset_event); |
| 1508 | 1535 | ||
| 1536 | /* | ||
| 1537 | * All state reset _must_ be completed before we update the | ||
| 1538 | * reset counter, for otherwise waiters might miss the reset | ||
| 1539 | * pending state and not properly drop locks, resulting in | ||
| 1540 | * deadlocks with the reset work. | ||
| 1541 | */ | ||
| 1509 | ret = i915_reset(dev); | 1542 | ret = i915_reset(dev); |
| 1510 | 1543 | ||
| 1544 | intel_display_handle_reset(dev); | ||
| 1545 | |||
| 1511 | if (ret == 0) { | 1546 | if (ret == 0) { |
| 1512 | /* | 1547 | /* |
| 1513 | * After all the gem state is reset, increment the reset | 1548 | * After all the gem state is reset, increment the reset |
| @@ -1528,12 +1563,11 @@ static void i915_error_work_func(struct work_struct *work) | |||
| 1528 | atomic_set(&error->reset_counter, I915_WEDGED); | 1563 | atomic_set(&error->reset_counter, I915_WEDGED); |
| 1529 | } | 1564 | } |
| 1530 | 1565 | ||
| 1531 | for_each_ring(ring, dev_priv, i) | 1566 | /* |
| 1532 | wake_up_all(&ring->irq_queue); | 1567 | * Note: The wake_up also serves as a memory barrier so that |
| 1533 | 1568 | * waiters see the update value of the reset counter atomic_t. | |
| 1534 | intel_display_handle_reset(dev); | 1569 | */ |
| 1535 | 1570 | i915_error_wake_up(dev_priv, true); | |
| 1536 | wake_up_all(&dev_priv->gpu_error.reset_queue); | ||
| 1537 | } | 1571 | } |
| 1538 | } | 1572 | } |
| 1539 | 1573 | ||
| @@ -1642,8 +1676,6 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
| 1642 | void i915_handle_error(struct drm_device *dev, bool wedged) | 1676 | void i915_handle_error(struct drm_device *dev, bool wedged) |
| 1643 | { | 1677 | { |
| 1644 | struct drm_i915_private *dev_priv = dev->dev_private; | 1678 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1645 | struct intel_ring_buffer *ring; | ||
| 1646 | int i; | ||
| 1647 | 1679 | ||
| 1648 | i915_capture_error_state(dev); | 1680 | i915_capture_error_state(dev); |
| 1649 | i915_report_and_clear_eir(dev); | 1681 | i915_report_and_clear_eir(dev); |
| @@ -1653,11 +1685,19 @@ void i915_handle_error(struct drm_device *dev, bool wedged) | |||
| 1653 | &dev_priv->gpu_error.reset_counter); | 1685 | &dev_priv->gpu_error.reset_counter); |
| 1654 | 1686 | ||
| 1655 | /* | 1687 | /* |
| 1656 | * Wakeup waiting processes so that the reset work item | 1688 | * Wakeup waiting processes so that the reset work function |
| 1657 | * doesn't deadlock trying to grab various locks. | 1689 | * i915_error_work_func doesn't deadlock trying to grab various |
| 1690 | * locks. By bumping the reset counter first, the woken | ||
| 1691 | * processes will see a reset in progress and back off, | ||
| 1692 | * releasing their locks and then wait for the reset completion. | ||
| 1693 | * We must do this for _all_ gpu waiters that might hold locks | ||
| 1694 | * that the reset work needs to acquire. | ||
| 1695 | * | ||
| 1696 | * Note: The wake_up serves as the required memory barrier to | ||
| 1697 | * ensure that the waiters see the updated value of the reset | ||
| 1698 | * counter atomic_t. | ||
| 1658 | */ | 1699 | */ |
| 1659 | for_each_ring(ring, dev_priv, i) | 1700 | i915_error_wake_up(dev_priv, false); |
| 1660 | wake_up_all(&ring->irq_queue); | ||
| 1661 | } | 1701 | } |
| 1662 | 1702 | ||
| 1663 | /* | 1703 | /* |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 63aca49d11a8..63de2701b974 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -778,7 +778,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) | |||
| 778 | /* Can only use the always-on power well for eDP when | 778 | /* Can only use the always-on power well for eDP when |
| 779 | * not using the panel fitter, and when not using motion | 779 | * not using the panel fitter, and when not using motion |
| 780 | * blur mitigation (which we don't support). */ | 780 | * blur mitigation (which we don't support). */ |
| 781 | if (intel_crtc->config.pch_pfit.size) | 781 | if (intel_crtc->config.pch_pfit.enabled) |
| 782 | temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; | 782 | temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; |
| 783 | else | 783 | else |
| 784 | temp |= TRANS_DDI_EDP_INPUT_A_ON; | 784 | temp |= TRANS_DDI_EDP_INPUT_A_ON; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2489d0b4c7d2..d8a1d98693e7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -2249,7 +2249,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 2249 | I915_WRITE(PIPESRC(intel_crtc->pipe), | 2249 | I915_WRITE(PIPESRC(intel_crtc->pipe), |
| 2250 | ((crtc->mode.hdisplay - 1) << 16) | | 2250 | ((crtc->mode.hdisplay - 1) << 16) | |
| 2251 | (crtc->mode.vdisplay - 1)); | 2251 | (crtc->mode.vdisplay - 1)); |
| 2252 | if (!intel_crtc->config.pch_pfit.size && | 2252 | if (!intel_crtc->config.pch_pfit.enabled && |
| 2253 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || | 2253 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || |
| 2254 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { | 2254 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { |
| 2255 | I915_WRITE(PF_CTL(intel_crtc->pipe), 0); | 2255 | I915_WRITE(PF_CTL(intel_crtc->pipe), 0); |
| @@ -3203,7 +3203,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc) | |||
| 3203 | struct drm_i915_private *dev_priv = dev->dev_private; | 3203 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 3204 | int pipe = crtc->pipe; | 3204 | int pipe = crtc->pipe; |
| 3205 | 3205 | ||
| 3206 | if (crtc->config.pch_pfit.size) { | 3206 | if (crtc->config.pch_pfit.enabled) { |
| 3207 | /* Force use of hard-coded filter coefficients | 3207 | /* Force use of hard-coded filter coefficients |
| 3208 | * as some pre-programmed values are broken, | 3208 | * as some pre-programmed values are broken, |
| 3209 | * e.g. x201. | 3209 | * e.g. x201. |
| @@ -3428,7 +3428,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc) | |||
| 3428 | 3428 | ||
| 3429 | /* To avoid upsetting the power well on haswell only disable the pfit if | 3429 | /* To avoid upsetting the power well on haswell only disable the pfit if |
| 3430 | * it's in use. The hw state code will make sure we get this right. */ | 3430 | * it's in use. The hw state code will make sure we get this right. */ |
| 3431 | if (crtc->config.pch_pfit.size) { | 3431 | if (crtc->config.pch_pfit.enabled) { |
| 3432 | I915_WRITE(PF_CTL(pipe), 0); | 3432 | I915_WRITE(PF_CTL(pipe), 0); |
| 3433 | I915_WRITE(PF_WIN_POS(pipe), 0); | 3433 | I915_WRITE(PF_WIN_POS(pipe), 0); |
| 3434 | I915_WRITE(PF_WIN_SZ(pipe), 0); | 3434 | I915_WRITE(PF_WIN_SZ(pipe), 0); |
| @@ -4877,9 +4877,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
| 4877 | return -EINVAL; | 4877 | return -EINVAL; |
| 4878 | } | 4878 | } |
| 4879 | 4879 | ||
| 4880 | /* Ensure that the cursor is valid for the new mode before changing... */ | ||
| 4881 | intel_crtc_update_cursor(crtc, true); | ||
| 4882 | |||
| 4883 | if (is_lvds && dev_priv->lvds_downclock_avail) { | 4880 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
| 4884 | /* | 4881 | /* |
| 4885 | * Ensure we match the reduced clock's P to the target clock. | 4882 | * Ensure we match the reduced clock's P to the target clock. |
| @@ -5768,9 +5765,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
| 5768 | intel_crtc->config.dpll.p2 = clock.p2; | 5765 | intel_crtc->config.dpll.p2 = clock.p2; |
| 5769 | } | 5766 | } |
| 5770 | 5767 | ||
| 5771 | /* Ensure that the cursor is valid for the new mode before changing... */ | ||
| 5772 | intel_crtc_update_cursor(crtc, true); | ||
| 5773 | |||
| 5774 | /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ | 5768 | /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ |
| 5775 | if (intel_crtc->config.has_pch_encoder) { | 5769 | if (intel_crtc->config.has_pch_encoder) { |
| 5776 | fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll); | 5770 | fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll); |
| @@ -5859,6 +5853,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc, | |||
| 5859 | tmp = I915_READ(PF_CTL(crtc->pipe)); | 5853 | tmp = I915_READ(PF_CTL(crtc->pipe)); |
| 5860 | 5854 | ||
| 5861 | if (tmp & PF_ENABLE) { | 5855 | if (tmp & PF_ENABLE) { |
| 5856 | pipe_config->pch_pfit.enabled = true; | ||
| 5862 | pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); | 5857 | pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); |
| 5863 | pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); | 5858 | pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); |
| 5864 | 5859 | ||
| @@ -6236,7 +6231,7 @@ static void haswell_modeset_global_resources(struct drm_device *dev) | |||
| 6236 | if (!crtc->base.enabled) | 6231 | if (!crtc->base.enabled) |
| 6237 | continue; | 6232 | continue; |
| 6238 | 6233 | ||
| 6239 | if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.size || | 6234 | if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled || |
| 6240 | crtc->config.cpu_transcoder != TRANSCODER_EDP) | 6235 | crtc->config.cpu_transcoder != TRANSCODER_EDP) |
| 6241 | enable = true; | 6236 | enable = true; |
| 6242 | } | 6237 | } |
| @@ -6259,9 +6254,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc, | |||
| 6259 | if (!intel_ddi_pll_mode_set(crtc)) | 6254 | if (!intel_ddi_pll_mode_set(crtc)) |
| 6260 | return -EINVAL; | 6255 | return -EINVAL; |
| 6261 | 6256 | ||
| 6262 | /* Ensure that the cursor is valid for the new mode before changing... */ | ||
| 6263 | intel_crtc_update_cursor(crtc, true); | ||
| 6264 | |||
| 6265 | if (intel_crtc->config.has_dp_encoder) | 6257 | if (intel_crtc->config.has_dp_encoder) |
| 6266 | intel_dp_set_m_n(intel_crtc); | 6258 | intel_dp_set_m_n(intel_crtc); |
| 6267 | 6259 | ||
| @@ -6494,15 +6486,15 @@ static void haswell_write_eld(struct drm_connector *connector, | |||
| 6494 | 6486 | ||
| 6495 | /* Set ELD valid state */ | 6487 | /* Set ELD valid state */ |
| 6496 | tmp = I915_READ(aud_cntrl_st2); | 6488 | tmp = I915_READ(aud_cntrl_st2); |
| 6497 | DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp); | 6489 | DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp); |
| 6498 | tmp |= (AUDIO_ELD_VALID_A << (pipe * 4)); | 6490 | tmp |= (AUDIO_ELD_VALID_A << (pipe * 4)); |
| 6499 | I915_WRITE(aud_cntrl_st2, tmp); | 6491 | I915_WRITE(aud_cntrl_st2, tmp); |
| 6500 | tmp = I915_READ(aud_cntrl_st2); | 6492 | tmp = I915_READ(aud_cntrl_st2); |
| 6501 | DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp); | 6493 | DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp); |
| 6502 | 6494 | ||
| 6503 | /* Enable HDMI mode */ | 6495 | /* Enable HDMI mode */ |
| 6504 | tmp = I915_READ(aud_config); | 6496 | tmp = I915_READ(aud_config); |
| 6505 | DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp); | 6497 | DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp); |
| 6506 | /* clear N_programing_enable and N_value_index */ | 6498 | /* clear N_programing_enable and N_value_index */ |
| 6507 | tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE); | 6499 | tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE); |
| 6508 | I915_WRITE(aud_config, tmp); | 6500 | I915_WRITE(aud_config, tmp); |
| @@ -6937,7 +6929,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 6937 | intel_crtc->cursor_width = width; | 6929 | intel_crtc->cursor_width = width; |
| 6938 | intel_crtc->cursor_height = height; | 6930 | intel_crtc->cursor_height = height; |
| 6939 | 6931 | ||
| 6940 | intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); | 6932 | if (intel_crtc->active) |
| 6933 | intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); | ||
| 6941 | 6934 | ||
| 6942 | return 0; | 6935 | return 0; |
| 6943 | fail_unpin: | 6936 | fail_unpin: |
| @@ -6956,7 +6949,8 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | |||
| 6956 | intel_crtc->cursor_x = x; | 6949 | intel_crtc->cursor_x = x; |
| 6957 | intel_crtc->cursor_y = y; | 6950 | intel_crtc->cursor_y = y; |
| 6958 | 6951 | ||
| 6959 | intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); | 6952 | if (intel_crtc->active) |
| 6953 | intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); | ||
| 6960 | 6954 | ||
| 6961 | return 0; | 6955 | return 0; |
| 6962 | } | 6956 | } |
| @@ -8205,9 +8199,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, | |||
| 8205 | pipe_config->gmch_pfit.control, | 8199 | pipe_config->gmch_pfit.control, |
| 8206 | pipe_config->gmch_pfit.pgm_ratios, | 8200 | pipe_config->gmch_pfit.pgm_ratios, |
| 8207 | pipe_config->gmch_pfit.lvds_border_bits); | 8201 | pipe_config->gmch_pfit.lvds_border_bits); |
| 8208 | DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x\n", | 8202 | DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", |
| 8209 | pipe_config->pch_pfit.pos, | 8203 | pipe_config->pch_pfit.pos, |
| 8210 | pipe_config->pch_pfit.size); | 8204 | pipe_config->pch_pfit.size, |
| 8205 | pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); | ||
| 8211 | DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); | 8206 | DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); |
| 8212 | } | 8207 | } |
| 8213 | 8208 | ||
| @@ -8603,8 +8598,11 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
| 8603 | if (INTEL_INFO(dev)->gen < 4) | 8598 | if (INTEL_INFO(dev)->gen < 4) |
| 8604 | PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); | 8599 | PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); |
| 8605 | PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); | 8600 | PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); |
| 8606 | PIPE_CONF_CHECK_I(pch_pfit.pos); | 8601 | PIPE_CONF_CHECK_I(pch_pfit.enabled); |
| 8607 | PIPE_CONF_CHECK_I(pch_pfit.size); | 8602 | if (current_config->pch_pfit.enabled) { |
| 8603 | PIPE_CONF_CHECK_I(pch_pfit.pos); | ||
| 8604 | PIPE_CONF_CHECK_I(pch_pfit.size); | ||
| 8605 | } | ||
| 8608 | 8606 | ||
| 8609 | PIPE_CONF_CHECK_I(ips_enabled); | 8607 | PIPE_CONF_CHECK_I(ips_enabled); |
| 8610 | 8608 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index a47799e832c6..28cae80495e2 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -280,6 +280,7 @@ struct intel_crtc_config { | |||
| 280 | struct { | 280 | struct { |
| 281 | u32 pos; | 281 | u32 pos; |
| 282 | u32 size; | 282 | u32 size; |
| 283 | bool enabled; | ||
| 283 | } pch_pfit; | 284 | } pch_pfit; |
| 284 | 285 | ||
| 285 | /* FDI configuration, only valid if has_pch_encoder is set. */ | 286 | /* FDI configuration, only valid if has_pch_encoder is set. */ |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 406303b509c1..7fa7df546c1e 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
| @@ -263,6 +263,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder, | |||
| 263 | C(vtotal); | 263 | C(vtotal); |
| 264 | C(clock); | 264 | C(clock); |
| 265 | #undef C | 265 | #undef C |
| 266 | |||
| 267 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
| 266 | } | 268 | } |
| 267 | 269 | ||
| 268 | if (intel_dvo->dev.dev_ops->mode_fixup) | 270 | if (intel_dvo->dev.dev_ops->mode_fixup) |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 42114ecbae0e..293564a2896a 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
| @@ -112,6 +112,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc, | |||
| 112 | done: | 112 | done: |
| 113 | pipe_config->pch_pfit.pos = (x << 16) | y; | 113 | pipe_config->pch_pfit.pos = (x << 16) | y; |
| 114 | pipe_config->pch_pfit.size = (width << 16) | height; | 114 | pipe_config->pch_pfit.size = (width << 16) | height; |
| 115 | pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0; | ||
| 115 | } | 116 | } |
| 116 | 117 | ||
| 117 | static void | 118 | static void |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0c115cc4899f..dd176b7296c1 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -2096,16 +2096,16 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev, | |||
| 2096 | struct drm_crtc *crtc) | 2096 | struct drm_crtc *crtc) |
| 2097 | { | 2097 | { |
| 2098 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2098 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 2099 | uint32_t pixel_rate, pfit_size; | 2099 | uint32_t pixel_rate; |
| 2100 | 2100 | ||
| 2101 | pixel_rate = intel_crtc->config.adjusted_mode.clock; | 2101 | pixel_rate = intel_crtc->config.adjusted_mode.clock; |
| 2102 | 2102 | ||
| 2103 | /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to | 2103 | /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to |
| 2104 | * adjust the pixel_rate here. */ | 2104 | * adjust the pixel_rate here. */ |
| 2105 | 2105 | ||
| 2106 | pfit_size = intel_crtc->config.pch_pfit.size; | 2106 | if (intel_crtc->config.pch_pfit.enabled) { |
| 2107 | if (pfit_size) { | ||
| 2108 | uint64_t pipe_w, pipe_h, pfit_w, pfit_h; | 2107 | uint64_t pipe_w, pipe_h, pfit_w, pfit_h; |
| 2108 | uint32_t pfit_size = intel_crtc->config.pch_pfit.size; | ||
| 2109 | 2109 | ||
| 2110 | pipe_w = intel_crtc->config.requested_mode.hdisplay; | 2110 | pipe_w = intel_crtc->config.requested_mode.hdisplay; |
| 2111 | pipe_h = intel_crtc->config.requested_mode.vdisplay; | 2111 | pipe_h = intel_crtc->config.requested_mode.vdisplay; |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 85037b9d4934..49482fd5b76c 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
| @@ -788,6 +788,8 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, | |||
| 788 | uint16_t h_sync_offset, v_sync_offset; | 788 | uint16_t h_sync_offset, v_sync_offset; |
| 789 | int mode_clock; | 789 | int mode_clock; |
| 790 | 790 | ||
| 791 | memset(dtd, 0, sizeof(*dtd)); | ||
| 792 | |||
| 791 | width = mode->hdisplay; | 793 | width = mode->hdisplay; |
| 792 | height = mode->vdisplay; | 794 | height = mode->vdisplay; |
| 793 | 795 | ||
| @@ -830,44 +832,51 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, | |||
| 830 | if (mode->flags & DRM_MODE_FLAG_PVSYNC) | 832 | if (mode->flags & DRM_MODE_FLAG_PVSYNC) |
| 831 | dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE; | 833 | dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE; |
| 832 | 834 | ||
| 833 | dtd->part2.sdvo_flags = 0; | ||
| 834 | dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; | 835 | dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; |
| 835 | dtd->part2.reserved = 0; | ||
| 836 | } | 836 | } |
| 837 | 837 | ||
| 838 | static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, | 838 | static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode *pmode, |
| 839 | const struct intel_sdvo_dtd *dtd) | 839 | const struct intel_sdvo_dtd *dtd) |
| 840 | { | 840 | { |
| 841 | mode->hdisplay = dtd->part1.h_active; | 841 | struct drm_display_mode mode = {}; |
| 842 | mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; | 842 | |
| 843 | mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off; | 843 | mode.hdisplay = dtd->part1.h_active; |
| 844 | mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2; | 844 | mode.hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; |
| 845 | mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width; | 845 | mode.hsync_start = mode.hdisplay + dtd->part2.h_sync_off; |
| 846 | mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4; | 846 | mode.hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2; |
| 847 | mode->htotal = mode->hdisplay + dtd->part1.h_blank; | 847 | mode.hsync_end = mode.hsync_start + dtd->part2.h_sync_width; |
| 848 | mode->htotal += (dtd->part1.h_high & 0xf) << 8; | 848 | mode.hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4; |
| 849 | 849 | mode.htotal = mode.hdisplay + dtd->part1.h_blank; | |
| 850 | mode->vdisplay = dtd->part1.v_active; | 850 | mode.htotal += (dtd->part1.h_high & 0xf) << 8; |
| 851 | mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8; | 851 | |
| 852 | mode->vsync_start = mode->vdisplay; | 852 | mode.vdisplay = dtd->part1.v_active; |
| 853 | mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf; | 853 | mode.vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8; |
| 854 | mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2; | 854 | mode.vsync_start = mode.vdisplay; |
| 855 | mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0; | 855 | mode.vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf; |
| 856 | mode->vsync_end = mode->vsync_start + | 856 | mode.vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2; |
| 857 | mode.vsync_start += dtd->part2.v_sync_off_high & 0xc0; | ||
| 858 | mode.vsync_end = mode.vsync_start + | ||
| 857 | (dtd->part2.v_sync_off_width & 0xf); | 859 | (dtd->part2.v_sync_off_width & 0xf); |
| 858 | mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4; | 860 | mode.vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4; |
| 859 | mode->vtotal = mode->vdisplay + dtd->part1.v_blank; | 861 | mode.vtotal = mode.vdisplay + dtd->part1.v_blank; |
| 860 | mode->vtotal += (dtd->part1.v_high & 0xf) << 8; | 862 | mode.vtotal += (dtd->part1.v_high & 0xf) << 8; |
| 861 | 863 | ||
| 862 | mode->clock = dtd->part1.clock * 10; | 864 | mode.clock = dtd->part1.clock * 10; |
| 863 | 865 | ||
| 864 | mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); | ||
| 865 | if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE) | 866 | if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE) |
| 866 | mode->flags |= DRM_MODE_FLAG_INTERLACE; | 867 | mode.flags |= DRM_MODE_FLAG_INTERLACE; |
| 867 | if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE) | 868 | if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE) |
| 868 | mode->flags |= DRM_MODE_FLAG_PHSYNC; | 869 | mode.flags |= DRM_MODE_FLAG_PHSYNC; |
| 870 | else | ||
| 871 | mode.flags |= DRM_MODE_FLAG_NHSYNC; | ||
| 869 | if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE) | 872 | if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE) |
| 870 | mode->flags |= DRM_MODE_FLAG_PVSYNC; | 873 | mode.flags |= DRM_MODE_FLAG_PVSYNC; |
| 874 | else | ||
| 875 | mode.flags |= DRM_MODE_FLAG_NVSYNC; | ||
| 876 | |||
| 877 | drm_mode_set_crtcinfo(&mode, 0); | ||
| 878 | |||
| 879 | drm_mode_copy(pmode, &mode); | ||
| 871 | } | 880 | } |
| 872 | 881 | ||
| 873 | static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) | 882 | static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index a60584763b61..a0b9d8a95b16 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c | |||
| @@ -124,6 +124,8 @@ void adreno_recover(struct msm_gpu *gpu) | |||
| 124 | 124 | ||
| 125 | /* reset completed fence seqno, just discard anything pending: */ | 125 | /* reset completed fence seqno, just discard anything pending: */ |
| 126 | adreno_gpu->memptrs->fence = gpu->submitted_fence; | 126 | adreno_gpu->memptrs->fence = gpu->submitted_fence; |
| 127 | adreno_gpu->memptrs->rptr = 0; | ||
| 128 | adreno_gpu->memptrs->wptr = 0; | ||
| 127 | 129 | ||
| 128 | gpu->funcs->pm_resume(gpu); | 130 | gpu->funcs->pm_resume(gpu); |
| 129 | ret = gpu->funcs->hw_init(gpu); | 131 | ret = gpu->funcs->hw_init(gpu); |
| @@ -229,7 +231,7 @@ void adreno_idle(struct msm_gpu *gpu) | |||
| 229 | return; | 231 | return; |
| 230 | } while(time_before(jiffies, t)); | 232 | } while(time_before(jiffies, t)); |
| 231 | 233 | ||
| 232 | DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name); | 234 | DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name); |
| 233 | 235 | ||
| 234 | /* TODO maybe we need to reset GPU here to recover from hang? */ | 236 | /* TODO maybe we need to reset GPU here to recover from hang? */ |
| 235 | } | 237 | } |
| @@ -256,11 +258,17 @@ void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords) | |||
| 256 | { | 258 | { |
| 257 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | 259 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 258 | uint32_t freedwords; | 260 | uint32_t freedwords; |
| 261 | unsigned long t = jiffies + ADRENO_IDLE_TIMEOUT; | ||
| 259 | do { | 262 | do { |
| 260 | uint32_t size = gpu->rb->size / 4; | 263 | uint32_t size = gpu->rb->size / 4; |
| 261 | uint32_t wptr = get_wptr(gpu->rb); | 264 | uint32_t wptr = get_wptr(gpu->rb); |
| 262 | uint32_t rptr = adreno_gpu->memptrs->rptr; | 265 | uint32_t rptr = adreno_gpu->memptrs->rptr; |
| 263 | freedwords = (rptr + (size - 1) - wptr) % size; | 266 | freedwords = (rptr + (size - 1) - wptr) % size; |
| 267 | |||
| 268 | if (time_after(jiffies, t)) { | ||
| 269 | DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name); | ||
| 270 | break; | ||
| 271 | } | ||
| 264 | } while(freedwords < ndwords); | 272 | } while(freedwords < ndwords); |
| 265 | } | 273 | } |
| 266 | 274 | ||
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 864c9773636b..008d772384c7 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
| @@ -499,25 +499,41 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, | |||
| 499 | struct timespec *timeout) | 499 | struct timespec *timeout) |
| 500 | { | 500 | { |
| 501 | struct msm_drm_private *priv = dev->dev_private; | 501 | struct msm_drm_private *priv = dev->dev_private; |
| 502 | unsigned long timeout_jiffies = timespec_to_jiffies(timeout); | ||
| 503 | unsigned long start_jiffies = jiffies; | ||
| 504 | unsigned long remaining_jiffies; | ||
| 505 | int ret; | 502 | int ret; |
| 506 | 503 | ||
| 507 | if (time_after(start_jiffies, timeout_jiffies)) | 504 | if (!priv->gpu) |
| 508 | remaining_jiffies = 0; | 505 | return 0; |
| 509 | else | 506 | |
| 510 | remaining_jiffies = timeout_jiffies - start_jiffies; | 507 | if (fence > priv->gpu->submitted_fence) { |
| 511 | 508 | DRM_ERROR("waiting on invalid fence: %u (of %u)\n", | |
| 512 | ret = wait_event_interruptible_timeout(priv->fence_event, | 509 | fence, priv->gpu->submitted_fence); |
| 513 | priv->completed_fence >= fence, | 510 | return -EINVAL; |
| 514 | remaining_jiffies); | 511 | } |
| 515 | if (ret == 0) { | 512 | |
| 516 | DBG("timeout waiting for fence: %u (completed: %u)", | 513 | if (!timeout) { |
| 517 | fence, priv->completed_fence); | 514 | /* no-wait: */ |
| 518 | ret = -ETIMEDOUT; | 515 | ret = fence_completed(dev, fence) ? 0 : -EBUSY; |
| 519 | } else if (ret != -ERESTARTSYS) { | 516 | } else { |
| 520 | ret = 0; | 517 | unsigned long timeout_jiffies = timespec_to_jiffies(timeout); |
| 518 | unsigned long start_jiffies = jiffies; | ||
| 519 | unsigned long remaining_jiffies; | ||
| 520 | |||
| 521 | if (time_after(start_jiffies, timeout_jiffies)) | ||
| 522 | remaining_jiffies = 0; | ||
| 523 | else | ||
| 524 | remaining_jiffies = timeout_jiffies - start_jiffies; | ||
| 525 | |||
| 526 | ret = wait_event_interruptible_timeout(priv->fence_event, | ||
| 527 | fence_completed(dev, fence), | ||
| 528 | remaining_jiffies); | ||
| 529 | |||
| 530 | if (ret == 0) { | ||
| 531 | DBG("timeout waiting for fence: %u (completed: %u)", | ||
| 532 | fence, priv->completed_fence); | ||
| 533 | ret = -ETIMEDOUT; | ||
| 534 | } else if (ret != -ERESTARTSYS) { | ||
| 535 | ret = 0; | ||
| 536 | } | ||
| 521 | } | 537 | } |
| 522 | 538 | ||
| 523 | return ret; | 539 | return ret; |
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 80d75094bf0a..df8f1d084bc1 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
| @@ -153,7 +153,7 @@ void *msm_gem_vaddr(struct drm_gem_object *obj); | |||
| 153 | int msm_gem_queue_inactive_work(struct drm_gem_object *obj, | 153 | int msm_gem_queue_inactive_work(struct drm_gem_object *obj, |
| 154 | struct work_struct *work); | 154 | struct work_struct *work); |
| 155 | void msm_gem_move_to_active(struct drm_gem_object *obj, | 155 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
| 156 | struct msm_gpu *gpu, uint32_t fence); | 156 | struct msm_gpu *gpu, bool write, uint32_t fence); |
| 157 | void msm_gem_move_to_inactive(struct drm_gem_object *obj); | 157 | void msm_gem_move_to_inactive(struct drm_gem_object *obj); |
| 158 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, | 158 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, |
| 159 | struct timespec *timeout); | 159 | struct timespec *timeout); |
| @@ -191,6 +191,12 @@ u32 msm_readl(const void __iomem *addr); | |||
| 191 | #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) | 191 | #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) |
| 192 | #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) | 192 | #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) |
| 193 | 193 | ||
| 194 | static inline bool fence_completed(struct drm_device *dev, uint32_t fence) | ||
| 195 | { | ||
| 196 | struct msm_drm_private *priv = dev->dev_private; | ||
| 197 | return priv->completed_fence >= fence; | ||
| 198 | } | ||
| 199 | |||
| 194 | static inline int align_pitch(int width, int bpp) | 200 | static inline int align_pitch(int width, int bpp) |
| 195 | { | 201 | { |
| 196 | int bytespp = (bpp + 7) / 8; | 202 | int bytespp = (bpp + 7) / 8; |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 6b5a6c8c7658..29eacfa29cfb 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
| @@ -40,9 +40,9 @@ static struct page **get_pages(struct drm_gem_object *obj) | |||
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | msm_obj->sgt = drm_prime_pages_to_sg(p, npages); | 42 | msm_obj->sgt = drm_prime_pages_to_sg(p, npages); |
| 43 | if (!msm_obj->sgt) { | 43 | if (IS_ERR(msm_obj->sgt)) { |
| 44 | dev_err(dev->dev, "failed to allocate sgt\n"); | 44 | dev_err(dev->dev, "failed to allocate sgt\n"); |
| 45 | return ERR_PTR(-ENOMEM); | 45 | return ERR_CAST(msm_obj->sgt); |
| 46 | } | 46 | } |
| 47 | 47 | ||
| 48 | msm_obj->pages = p; | 48 | msm_obj->pages = p; |
| @@ -159,7 +159,6 @@ out_unlock: | |||
| 159 | out: | 159 | out: |
| 160 | switch (ret) { | 160 | switch (ret) { |
| 161 | case -EAGAIN: | 161 | case -EAGAIN: |
| 162 | set_need_resched(); | ||
| 163 | case 0: | 162 | case 0: |
| 164 | case -ERESTARTSYS: | 163 | case -ERESTARTSYS: |
| 165 | case -EINTR: | 164 | case -EINTR: |
| @@ -393,11 +392,14 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj, | |||
| 393 | } | 392 | } |
| 394 | 393 | ||
| 395 | void msm_gem_move_to_active(struct drm_gem_object *obj, | 394 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
| 396 | struct msm_gpu *gpu, uint32_t fence) | 395 | struct msm_gpu *gpu, bool write, uint32_t fence) |
| 397 | { | 396 | { |
| 398 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 397 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 399 | msm_obj->gpu = gpu; | 398 | msm_obj->gpu = gpu; |
| 400 | msm_obj->fence = fence; | 399 | if (write) |
| 400 | msm_obj->write_fence = fence; | ||
| 401 | else | ||
| 402 | msm_obj->read_fence = fence; | ||
| 401 | list_del_init(&msm_obj->mm_list); | 403 | list_del_init(&msm_obj->mm_list); |
| 402 | list_add_tail(&msm_obj->mm_list, &gpu->active_list); | 404 | list_add_tail(&msm_obj->mm_list, &gpu->active_list); |
| 403 | } | 405 | } |
| @@ -411,7 +413,8 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj) | |||
| 411 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 413 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 412 | 414 | ||
| 413 | msm_obj->gpu = NULL; | 415 | msm_obj->gpu = NULL; |
| 414 | msm_obj->fence = 0; | 416 | msm_obj->read_fence = 0; |
| 417 | msm_obj->write_fence = 0; | ||
| 415 | list_del_init(&msm_obj->mm_list); | 418 | list_del_init(&msm_obj->mm_list); |
| 416 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | 419 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
| 417 | 420 | ||
| @@ -433,8 +436,18 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, | |||
| 433 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 436 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 434 | int ret = 0; | 437 | int ret = 0; |
| 435 | 438 | ||
| 436 | if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC)) | 439 | if (is_active(msm_obj)) { |
| 437 | ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout); | 440 | uint32_t fence = 0; |
| 441 | |||
| 442 | if (op & MSM_PREP_READ) | ||
| 443 | fence = msm_obj->write_fence; | ||
| 444 | if (op & MSM_PREP_WRITE) | ||
| 445 | fence = max(fence, msm_obj->read_fence); | ||
| 446 | if (op & MSM_PREP_NOSYNC) | ||
| 447 | timeout = NULL; | ||
| 448 | |||
| 449 | ret = msm_wait_fence_interruptable(dev, fence, timeout); | ||
| 450 | } | ||
| 438 | 451 | ||
| 439 | /* TODO cache maintenance */ | 452 | /* TODO cache maintenance */ |
| 440 | 453 | ||
| @@ -455,9 +468,10 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | |||
| 455 | uint64_t off = drm_vma_node_start(&obj->vma_node); | 468 | uint64_t off = drm_vma_node_start(&obj->vma_node); |
| 456 | 469 | ||
| 457 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 470 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 458 | seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n", | 471 | seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n", |
| 459 | msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', | 472 | msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', |
| 460 | msm_obj->fence, obj->name, obj->refcount.refcount.counter, | 473 | msm_obj->read_fence, msm_obj->write_fence, |
| 474 | obj->name, obj->refcount.refcount.counter, | ||
| 461 | off, msm_obj->vaddr, obj->size); | 475 | off, msm_obj->vaddr, obj->size); |
| 462 | } | 476 | } |
| 463 | 477 | ||
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index d746f13d283c..0676f32e2c6a 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h | |||
| @@ -36,7 +36,7 @@ struct msm_gem_object { | |||
| 36 | */ | 36 | */ |
| 37 | struct list_head mm_list; | 37 | struct list_head mm_list; |
| 38 | struct msm_gpu *gpu; /* non-null if active */ | 38 | struct msm_gpu *gpu; /* non-null if active */ |
| 39 | uint32_t fence; | 39 | uint32_t read_fence, write_fence; |
| 40 | 40 | ||
| 41 | /* Transiently in the process of submit ioctl, objects associated | 41 | /* Transiently in the process of submit ioctl, objects associated |
| 42 | * with the submit are on submit->bo_list.. this only lasts for | 42 | * with the submit are on submit->bo_list.. this only lasts for |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 3e1ef3a00f60..5281d4bc37f7 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
| @@ -78,7 +78,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, | |||
| 78 | } | 78 | } |
| 79 | 79 | ||
| 80 | if (submit_bo.flags & BO_INVALID_FLAGS) { | 80 | if (submit_bo.flags & BO_INVALID_FLAGS) { |
| 81 | DBG("invalid flags: %x", submit_bo.flags); | 81 | DRM_ERROR("invalid flags: %x\n", submit_bo.flags); |
| 82 | ret = -EINVAL; | 82 | ret = -EINVAL; |
| 83 | goto out_unlock; | 83 | goto out_unlock; |
| 84 | } | 84 | } |
| @@ -92,7 +92,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, | |||
| 92 | */ | 92 | */ |
| 93 | obj = idr_find(&file->object_idr, submit_bo.handle); | 93 | obj = idr_find(&file->object_idr, submit_bo.handle); |
| 94 | if (!obj) { | 94 | if (!obj) { |
| 95 | DBG("invalid handle %u at index %u", submit_bo.handle, i); | 95 | DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i); |
| 96 | ret = -EINVAL; | 96 | ret = -EINVAL; |
| 97 | goto out_unlock; | 97 | goto out_unlock; |
| 98 | } | 98 | } |
| @@ -100,7 +100,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, | |||
| 100 | msm_obj = to_msm_bo(obj); | 100 | msm_obj = to_msm_bo(obj); |
| 101 | 101 | ||
| 102 | if (!list_empty(&msm_obj->submit_entry)) { | 102 | if (!list_empty(&msm_obj->submit_entry)) { |
| 103 | DBG("handle %u at index %u already on submit list", | 103 | DRM_ERROR("handle %u at index %u already on submit list\n", |
| 104 | submit_bo.handle, i); | 104 | submit_bo.handle, i); |
| 105 | ret = -EINVAL; | 105 | ret = -EINVAL; |
| 106 | goto out_unlock; | 106 | goto out_unlock; |
| @@ -216,8 +216,9 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, | |||
| 216 | struct msm_gem_object **obj, uint32_t *iova, bool *valid) | 216 | struct msm_gem_object **obj, uint32_t *iova, bool *valid) |
| 217 | { | 217 | { |
| 218 | if (idx >= submit->nr_bos) { | 218 | if (idx >= submit->nr_bos) { |
| 219 | DBG("invalid buffer index: %u (out of %u)", idx, submit->nr_bos); | 219 | DRM_ERROR("invalid buffer index: %u (out of %u)\n", |
| 220 | return EINVAL; | 220 | idx, submit->nr_bos); |
| 221 | return -EINVAL; | ||
| 221 | } | 222 | } |
| 222 | 223 | ||
| 223 | if (obj) | 224 | if (obj) |
| @@ -239,7 +240,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob | |||
| 239 | int ret; | 240 | int ret; |
| 240 | 241 | ||
| 241 | if (offset % 4) { | 242 | if (offset % 4) { |
| 242 | DBG("non-aligned cmdstream buffer: %u", offset); | 243 | DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset); |
| 243 | return -EINVAL; | 244 | return -EINVAL; |
| 244 | } | 245 | } |
| 245 | 246 | ||
| @@ -266,7 +267,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob | |||
| 266 | return -EFAULT; | 267 | return -EFAULT; |
| 267 | 268 | ||
| 268 | if (submit_reloc.submit_offset % 4) { | 269 | if (submit_reloc.submit_offset % 4) { |
| 269 | DBG("non-aligned reloc offset: %u", | 270 | DRM_ERROR("non-aligned reloc offset: %u\n", |
| 270 | submit_reloc.submit_offset); | 271 | submit_reloc.submit_offset); |
| 271 | return -EINVAL; | 272 | return -EINVAL; |
| 272 | } | 273 | } |
| @@ -276,7 +277,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob | |||
| 276 | 277 | ||
| 277 | if ((off >= (obj->base.size / 4)) || | 278 | if ((off >= (obj->base.size / 4)) || |
| 278 | (off < last_offset)) { | 279 | (off < last_offset)) { |
| 279 | DBG("invalid offset %u at reloc %u", off, i); | 280 | DRM_ERROR("invalid offset %u at reloc %u\n", off, i); |
| 280 | return -EINVAL; | 281 | return -EINVAL; |
| 281 | } | 282 | } |
| 282 | 283 | ||
| @@ -374,14 +375,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
| 374 | goto out; | 375 | goto out; |
| 375 | 376 | ||
| 376 | if (submit_cmd.size % 4) { | 377 | if (submit_cmd.size % 4) { |
| 377 | DBG("non-aligned cmdstream buffer size: %u", | 378 | DRM_ERROR("non-aligned cmdstream buffer size: %u\n", |
| 378 | submit_cmd.size); | 379 | submit_cmd.size); |
| 379 | ret = -EINVAL; | 380 | ret = -EINVAL; |
| 380 | goto out; | 381 | goto out; |
| 381 | } | 382 | } |
| 382 | 383 | ||
| 383 | if (submit_cmd.size >= msm_obj->base.size) { | 384 | if ((submit_cmd.size + submit_cmd.submit_offset) >= |
| 384 | DBG("invalid cmdstream size: %u", submit_cmd.size); | 385 | msm_obj->base.size) { |
| 386 | DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size); | ||
| 385 | ret = -EINVAL; | 387 | ret = -EINVAL; |
| 386 | goto out; | 388 | goto out; |
| 387 | } | 389 | } |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index e1e1ec9321ff..3bab937965d1 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
| @@ -29,13 +29,14 @@ | |||
| 29 | static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) | 29 | static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) |
| 30 | { | 30 | { |
| 31 | struct drm_device *dev = gpu->dev; | 31 | struct drm_device *dev = gpu->dev; |
| 32 | struct kgsl_device_platform_data *pdata = pdev->dev.platform_data; | 32 | struct kgsl_device_platform_data *pdata; |
| 33 | 33 | ||
| 34 | if (!pdev) { | 34 | if (!pdev) { |
| 35 | dev_err(dev->dev, "could not find dtv pdata\n"); | 35 | dev_err(dev->dev, "could not find dtv pdata\n"); |
| 36 | return; | 36 | return; |
| 37 | } | 37 | } |
| 38 | 38 | ||
| 39 | pdata = pdev->dev.platform_data; | ||
| 39 | if (pdata->bus_scale_table) { | 40 | if (pdata->bus_scale_table) { |
| 40 | gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table); | 41 | gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table); |
| 41 | DBG("bus scale client: %08x", gpu->bsc); | 42 | DBG("bus scale client: %08x", gpu->bsc); |
| @@ -230,6 +231,8 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu) | |||
| 230 | static void hangcheck_handler(unsigned long data) | 231 | static void hangcheck_handler(unsigned long data) |
| 231 | { | 232 | { |
| 232 | struct msm_gpu *gpu = (struct msm_gpu *)data; | 233 | struct msm_gpu *gpu = (struct msm_gpu *)data; |
| 234 | struct drm_device *dev = gpu->dev; | ||
| 235 | struct msm_drm_private *priv = dev->dev_private; | ||
| 233 | uint32_t fence = gpu->funcs->last_fence(gpu); | 236 | uint32_t fence = gpu->funcs->last_fence(gpu); |
| 234 | 237 | ||
| 235 | if (fence != gpu->hangcheck_fence) { | 238 | if (fence != gpu->hangcheck_fence) { |
| @@ -237,14 +240,22 @@ static void hangcheck_handler(unsigned long data) | |||
| 237 | gpu->hangcheck_fence = fence; | 240 | gpu->hangcheck_fence = fence; |
| 238 | } else if (fence < gpu->submitted_fence) { | 241 | } else if (fence < gpu->submitted_fence) { |
| 239 | /* no progress and not done.. hung! */ | 242 | /* no progress and not done.. hung! */ |
| 240 | struct msm_drm_private *priv = gpu->dev->dev_private; | ||
| 241 | gpu->hangcheck_fence = fence; | 243 | gpu->hangcheck_fence = fence; |
| 244 | dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n", | ||
| 245 | gpu->name); | ||
| 246 | dev_err(dev->dev, "%s: completed fence: %u\n", | ||
| 247 | gpu->name, fence); | ||
| 248 | dev_err(dev->dev, "%s: submitted fence: %u\n", | ||
| 249 | gpu->name, gpu->submitted_fence); | ||
| 242 | queue_work(priv->wq, &gpu->recover_work); | 250 | queue_work(priv->wq, &gpu->recover_work); |
| 243 | } | 251 | } |
| 244 | 252 | ||
| 245 | /* if still more pending work, reset the hangcheck timer: */ | 253 | /* if still more pending work, reset the hangcheck timer: */ |
| 246 | if (gpu->submitted_fence > gpu->hangcheck_fence) | 254 | if (gpu->submitted_fence > gpu->hangcheck_fence) |
| 247 | hangcheck_timer_reset(gpu); | 255 | hangcheck_timer_reset(gpu); |
| 256 | |||
| 257 | /* workaround for missing irq: */ | ||
| 258 | queue_work(priv->wq, &gpu->retire_work); | ||
| 248 | } | 259 | } |
| 249 | 260 | ||
| 250 | /* | 261 | /* |
| @@ -265,7 +276,8 @@ static void retire_worker(struct work_struct *work) | |||
| 265 | obj = list_first_entry(&gpu->active_list, | 276 | obj = list_first_entry(&gpu->active_list, |
| 266 | struct msm_gem_object, mm_list); | 277 | struct msm_gem_object, mm_list); |
| 267 | 278 | ||
| 268 | if (obj->fence <= fence) { | 279 | if ((obj->read_fence <= fence) && |
| 280 | (obj->write_fence <= fence)) { | ||
| 269 | /* move to inactive: */ | 281 | /* move to inactive: */ |
| 270 | msm_gem_move_to_inactive(&obj->base); | 282 | msm_gem_move_to_inactive(&obj->base); |
| 271 | msm_gem_put_iova(&obj->base, gpu->id); | 283 | msm_gem_put_iova(&obj->base, gpu->id); |
| @@ -321,7 +333,11 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, | |||
| 321 | submit->gpu->id, &iova); | 333 | submit->gpu->id, &iova); |
| 322 | } | 334 | } |
| 323 | 335 | ||
| 324 | msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence); | 336 | if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) |
| 337 | msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence); | ||
| 338 | |||
| 339 | if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) | ||
| 340 | msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); | ||
| 325 | } | 341 | } |
| 326 | hangcheck_timer_reset(gpu); | 342 | hangcheck_timer_reset(gpu); |
| 327 | mutex_unlock(&dev->struct_mutex); | 343 | mutex_unlock(&dev->struct_mutex); |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 290734191f72..b46fb45f2cca 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
| @@ -1322,10 +1322,9 @@ extern int drm_newctx(struct drm_device *dev, void *data, | |||
| 1322 | extern int drm_rmctx(struct drm_device *dev, void *data, | 1322 | extern int drm_rmctx(struct drm_device *dev, void *data, |
| 1323 | struct drm_file *file_priv); | 1323 | struct drm_file *file_priv); |
| 1324 | 1324 | ||
| 1325 | extern void drm_legacy_ctxbitmap_init(struct drm_device *dev); | 1325 | extern int drm_ctxbitmap_init(struct drm_device *dev); |
| 1326 | extern void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev); | 1326 | extern void drm_ctxbitmap_cleanup(struct drm_device *dev); |
| 1327 | extern void drm_legacy_ctxbitmap_release(struct drm_device *dev, | 1327 | extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); |
| 1328 | struct drm_file *file_priv); | ||
| 1329 | 1328 | ||
| 1330 | extern int drm_setsareactx(struct drm_device *dev, void *data, | 1329 | extern int drm_setsareactx(struct drm_device *dev, void *data, |
| 1331 | struct drm_file *file_priv); | 1330 | struct drm_file *file_priv); |
