diff options
author | Dave Airlie <airlied@redhat.com> | 2016-10-20 23:26:58 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2016-10-20 23:26:58 -0400 |
commit | 96ebf7cb9fe6cf356fe455ee159a2cea06ff9014 (patch) | |
tree | bdec0d1d437d826a6e8f1de7eb567dfbedcc1059 | |
parent | e947f03d9367d8fdce1054c7df2387662a9daa50 (diff) | |
parent | 862f6157d176c9db5a7ed423245108d9bb3d7038 (diff) |
Merge branch 'drm-vmwgfx-fixes' of ssh://people.freedesktop.org/~syeh/repos_linux into drm-fixes
vmwgfx cleanups and fixes.
* 'drm-vmwgfx-fixes' of ssh://people.freedesktop.org/~syeh/repos_linux:
drm/vmwgfx: Adjust checks for null pointers in 13 functions
drm/vmwgfx: Use memdup_user() rather than duplicating its implementation
drm/vmwgfx: Use kmalloc_array() in vmw_surface_define_ioctl()
drm/vmwgfx: Avoid validating views on view destruction
drm/vmwgfx: Limit the user-space command buffer size
drm/vmwgfx: Remove a leftover debug printout
drm/vmwgfx: Allow resource relocations on byte boundaries
drm/vmwgfx: Enable SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
drm/vmwgfx: Remove call to reservation_object_test_signaled_rcu before wait
drm/vmwgfx: Replace numeric parameter like 0444 with macro
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 145 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 56 |
5 files changed, 146 insertions, 73 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index e8ae3dc476d1..18061a4bc2f2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -241,15 +241,15 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | |||
241 | void *ptr); | 241 | void *ptr); |
242 | 242 | ||
243 | MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); | 243 | MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); |
244 | module_param_named(enable_fbdev, enable_fbdev, int, 0600); | 244 | module_param_named(enable_fbdev, enable_fbdev, int, S_IRUSR | S_IWUSR); |
245 | MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); | 245 | MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); |
246 | module_param_named(force_dma_api, vmw_force_iommu, int, 0600); | 246 | module_param_named(force_dma_api, vmw_force_iommu, int, S_IRUSR | S_IWUSR); |
247 | MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); | 247 | MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); |
248 | module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); | 248 | module_param_named(restrict_iommu, vmw_restrict_iommu, int, S_IRUSR | S_IWUSR); |
249 | MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); | 249 | MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); |
250 | module_param_named(force_coherent, vmw_force_coherent, int, 0600); | 250 | module_param_named(force_coherent, vmw_force_coherent, int, S_IRUSR | S_IWUSR); |
251 | MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); | 251 | MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); |
252 | module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); | 252 | module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR); |
253 | MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); | 253 | MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); |
254 | module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); | 254 | module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); |
255 | 255 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 070d750af16d..1e59a486bba8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -43,7 +43,7 @@ | |||
43 | 43 | ||
44 | #define VMWGFX_DRIVER_DATE "20160210" | 44 | #define VMWGFX_DRIVER_DATE "20160210" |
45 | #define VMWGFX_DRIVER_MAJOR 2 | 45 | #define VMWGFX_DRIVER_MAJOR 2 |
46 | #define VMWGFX_DRIVER_MINOR 10 | 46 | #define VMWGFX_DRIVER_MINOR 11 |
47 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 47 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
48 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 48 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
49 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 49 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index dc5beff2b4aa..c7b53d987f06 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -35,17 +35,37 @@ | |||
35 | #define VMW_RES_HT_ORDER 12 | 35 | #define VMW_RES_HT_ORDER 12 |
36 | 36 | ||
37 | /** | 37 | /** |
38 | * enum vmw_resource_relocation_type - Relocation type for resources | ||
39 | * | ||
40 | * @vmw_res_rel_normal: Traditional relocation. The resource id in the | ||
41 | * command stream is replaced with the actual id after validation. | ||
42 | * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced | ||
43 | * with a NOP. | ||
44 | * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id | ||
45 | * after validation is -1, the command is replaced with a NOP. Otherwise no | ||
46 | * action. | ||
47 | */ | ||
48 | enum vmw_resource_relocation_type { | ||
49 | vmw_res_rel_normal, | ||
50 | vmw_res_rel_nop, | ||
51 | vmw_res_rel_cond_nop, | ||
52 | vmw_res_rel_max | ||
53 | }; | ||
54 | |||
55 | /** | ||
38 | * struct vmw_resource_relocation - Relocation info for resources | 56 | * struct vmw_resource_relocation - Relocation info for resources |
39 | * | 57 | * |
40 | * @head: List head for the software context's relocation list. | 58 | * @head: List head for the software context's relocation list. |
41 | * @res: Non-ref-counted pointer to the resource. | 59 | * @res: Non-ref-counted pointer to the resource. |
42 | * @offset: Offset of 4 byte entries into the command buffer where the | 60 | * @offset: Offset of single byte entries into the command buffer where the |
43 | * id that needs fixup is located. | 61 | * id that needs fixup is located. |
62 | * @rel_type: Type of relocation. | ||
44 | */ | 63 | */ |
45 | struct vmw_resource_relocation { | 64 | struct vmw_resource_relocation { |
46 | struct list_head head; | 65 | struct list_head head; |
47 | const struct vmw_resource *res; | 66 | const struct vmw_resource *res; |
48 | unsigned long offset; | 67 | u32 offset:29; |
68 | enum vmw_resource_relocation_type rel_type:3; | ||
49 | }; | 69 | }; |
50 | 70 | ||
51 | /** | 71 | /** |
@@ -109,7 +129,18 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | |||
109 | struct vmw_dma_buffer *vbo, | 129 | struct vmw_dma_buffer *vbo, |
110 | bool validate_as_mob, | 130 | bool validate_as_mob, |
111 | uint32_t *p_val_node); | 131 | uint32_t *p_val_node); |
112 | 132 | /** | |
133 | * vmw_ptr_diff - Compute the offset from a to b in bytes | ||
134 | * | ||
135 | * @a: A starting pointer. | ||
136 | * @b: A pointer offset in the same address space. | ||
137 | * | ||
138 | * Returns: The offset in bytes between the two pointers. | ||
139 | */ | ||
140 | static size_t vmw_ptr_diff(void *a, void *b) | ||
141 | { | ||
142 | return (unsigned long) b - (unsigned long) a; | ||
143 | } | ||
113 | 144 | ||
114 | /** | 145 | /** |
115 | * vmw_resources_unreserve - unreserve resources previously reserved for | 146 | * vmw_resources_unreserve - unreserve resources previously reserved for |
@@ -409,11 +440,14 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, | |||
409 | * @list: Pointer to head of relocation list. | 440 | * @list: Pointer to head of relocation list. |
410 | * @res: The resource. | 441 | * @res: The resource. |
411 | * @offset: Offset into the command buffer currently being parsed where the | 442 | * @offset: Offset into the command buffer currently being parsed where the |
412 | * id that needs fixup is located. Granularity is 4 bytes. | 443 | * id that needs fixup is located. Granularity is one byte. |
444 | * @rel_type: Relocation type. | ||
413 | */ | 445 | */ |
414 | static int vmw_resource_relocation_add(struct list_head *list, | 446 | static int vmw_resource_relocation_add(struct list_head *list, |
415 | const struct vmw_resource *res, | 447 | const struct vmw_resource *res, |
416 | unsigned long offset) | 448 | unsigned long offset, |
449 | enum vmw_resource_relocation_type | ||
450 | rel_type) | ||
417 | { | 451 | { |
418 | struct vmw_resource_relocation *rel; | 452 | struct vmw_resource_relocation *rel; |
419 | 453 | ||
@@ -425,6 +459,7 @@ static int vmw_resource_relocation_add(struct list_head *list, | |||
425 | 459 | ||
426 | rel->res = res; | 460 | rel->res = res; |
427 | rel->offset = offset; | 461 | rel->offset = offset; |
462 | rel->rel_type = rel_type; | ||
428 | list_add_tail(&rel->head, list); | 463 | list_add_tail(&rel->head, list); |
429 | 464 | ||
430 | return 0; | 465 | return 0; |
@@ -459,11 +494,24 @@ static void vmw_resource_relocations_apply(uint32_t *cb, | |||
459 | { | 494 | { |
460 | struct vmw_resource_relocation *rel; | 495 | struct vmw_resource_relocation *rel; |
461 | 496 | ||
497 | /* Validate the struct vmw_resource_relocation member size */ | ||
498 | BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29)); | ||
499 | BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3)); | ||
500 | |||
462 | list_for_each_entry(rel, list, head) { | 501 | list_for_each_entry(rel, list, head) { |
463 | if (likely(rel->res != NULL)) | 502 | u32 *addr = (u32 *)((unsigned long) cb + rel->offset); |
464 | cb[rel->offset] = rel->res->id; | 503 | switch (rel->rel_type) { |
465 | else | 504 | case vmw_res_rel_normal: |
466 | cb[rel->offset] = SVGA_3D_CMD_NOP; | 505 | *addr = rel->res->id; |
506 | break; | ||
507 | case vmw_res_rel_nop: | ||
508 | *addr = SVGA_3D_CMD_NOP; | ||
509 | break; | ||
510 | default: | ||
511 | if (rel->res->id == -1) | ||
512 | *addr = SVGA_3D_CMD_NOP; | ||
513 | break; | ||
514 | } | ||
467 | } | 515 | } |
468 | } | 516 | } |
469 | 517 | ||
@@ -655,7 +703,9 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, | |||
655 | *p_val = NULL; | 703 | *p_val = NULL; |
656 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, | 704 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, |
657 | res, | 705 | res, |
658 | id_loc - sw_context->buf_start); | 706 | vmw_ptr_diff(sw_context->buf_start, |
707 | id_loc), | ||
708 | vmw_res_rel_normal); | ||
659 | if (unlikely(ret != 0)) | 709 | if (unlikely(ret != 0)) |
660 | return ret; | 710 | return ret; |
661 | 711 | ||
@@ -721,7 +771,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
721 | 771 | ||
722 | return vmw_resource_relocation_add | 772 | return vmw_resource_relocation_add |
723 | (&sw_context->res_relocations, res, | 773 | (&sw_context->res_relocations, res, |
724 | id_loc - sw_context->buf_start); | 774 | vmw_ptr_diff(sw_context->buf_start, id_loc), |
775 | vmw_res_rel_normal); | ||
725 | } | 776 | } |
726 | 777 | ||
727 | ret = vmw_user_resource_lookup_handle(dev_priv, | 778 | ret = vmw_user_resource_lookup_handle(dev_priv, |
@@ -2143,10 +2194,10 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv, | |||
2143 | return ret; | 2194 | return ret; |
2144 | 2195 | ||
2145 | return vmw_resource_relocation_add(&sw_context->res_relocations, | 2196 | return vmw_resource_relocation_add(&sw_context->res_relocations, |
2146 | NULL, &cmd->header.id - | 2197 | NULL, |
2147 | sw_context->buf_start); | 2198 | vmw_ptr_diff(sw_context->buf_start, |
2148 | 2199 | &cmd->header.id), | |
2149 | return 0; | 2200 | vmw_res_rel_nop); |
2150 | } | 2201 | } |
2151 | 2202 | ||
2152 | /** | 2203 | /** |
@@ -2188,10 +2239,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, | |||
2188 | return ret; | 2239 | return ret; |
2189 | 2240 | ||
2190 | return vmw_resource_relocation_add(&sw_context->res_relocations, | 2241 | return vmw_resource_relocation_add(&sw_context->res_relocations, |
2191 | NULL, &cmd->header.id - | 2242 | NULL, |
2192 | sw_context->buf_start); | 2243 | vmw_ptr_diff(sw_context->buf_start, |
2193 | 2244 | &cmd->header.id), | |
2194 | return 0; | 2245 | vmw_res_rel_nop); |
2195 | } | 2246 | } |
2196 | 2247 | ||
2197 | /** | 2248 | /** |
@@ -2848,8 +2899,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, | |||
2848 | * @header: Pointer to the command header in the command stream. | 2899 | * @header: Pointer to the command header in the command stream. |
2849 | * | 2900 | * |
2850 | * Check that the view exists, and if it was not created using this | 2901 | * Check that the view exists, and if it was not created using this |
2851 | * command batch, make sure it's validated (present in the device) so that | 2902 | * command batch, conditionally make this command a NOP. |
2852 | * the remove command will not confuse the device. | ||
2853 | */ | 2903 | */ |
2854 | static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, | 2904 | static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, |
2855 | struct vmw_sw_context *sw_context, | 2905 | struct vmw_sw_context *sw_context, |
@@ -2877,10 +2927,16 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, | |||
2877 | return ret; | 2927 | return ret; |
2878 | 2928 | ||
2879 | /* | 2929 | /* |
2880 | * Add view to the validate list iff it was not created using this | 2930 | * If the view wasn't created during this command batch, it might |
2881 | * command batch. | 2931 | * have been removed due to a context swapout, so add a |
2932 | * relocation to conditionally make this command a NOP to avoid | ||
2933 | * device errors. | ||
2882 | */ | 2934 | */ |
2883 | return vmw_view_res_val_add(sw_context, view); | 2935 | return vmw_resource_relocation_add(&sw_context->res_relocations, |
2936 | view, | ||
2937 | vmw_ptr_diff(sw_context->buf_start, | ||
2938 | &cmd->header.id), | ||
2939 | vmw_res_rel_cond_nop); | ||
2884 | } | 2940 | } |
2885 | 2941 | ||
2886 | /** | 2942 | /** |
@@ -3029,6 +3085,35 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv, | |||
3029 | cmd->body.shaderResourceViewId); | 3085 | cmd->body.shaderResourceViewId); |
3030 | } | 3086 | } |
3031 | 3087 | ||
3088 | /** | ||
3089 | * vmw_cmd_dx_transfer_from_buffer - | ||
3090 | * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command | ||
3091 | * | ||
3092 | * @dev_priv: Pointer to a device private struct. | ||
3093 | * @sw_context: The software context being used for this batch. | ||
3094 | * @header: Pointer to the command header in the command stream. | ||
3095 | */ | ||
3096 | static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv, | ||
3097 | struct vmw_sw_context *sw_context, | ||
3098 | SVGA3dCmdHeader *header) | ||
3099 | { | ||
3100 | struct { | ||
3101 | SVGA3dCmdHeader header; | ||
3102 | SVGA3dCmdDXTransferFromBuffer body; | ||
3103 | } *cmd = container_of(header, typeof(*cmd), header); | ||
3104 | int ret; | ||
3105 | |||
3106 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
3107 | user_surface_converter, | ||
3108 | &cmd->body.srcSid, NULL); | ||
3109 | if (ret != 0) | ||
3110 | return ret; | ||
3111 | |||
3112 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
3113 | user_surface_converter, | ||
3114 | &cmd->body.destSid, NULL); | ||
3115 | } | ||
3116 | |||
3032 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, | 3117 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, |
3033 | struct vmw_sw_context *sw_context, | 3118 | struct vmw_sw_context *sw_context, |
3034 | void *buf, uint32_t *size) | 3119 | void *buf, uint32_t *size) |
@@ -3379,6 +3464,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { | |||
3379 | &vmw_cmd_buffer_copy_check, true, false, true), | 3464 | &vmw_cmd_buffer_copy_check, true, false, true), |
3380 | VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, | 3465 | VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, |
3381 | &vmw_cmd_pred_copy_check, true, false, true), | 3466 | &vmw_cmd_pred_copy_check, true, false, true), |
3467 | VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER, | ||
3468 | &vmw_cmd_dx_transfer_from_buffer, | ||
3469 | true, false, true), | ||
3382 | }; | 3470 | }; |
3383 | 3471 | ||
3384 | static int vmw_cmd_check(struct vmw_private *dev_priv, | 3472 | static int vmw_cmd_check(struct vmw_private *dev_priv, |
@@ -3848,14 +3936,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, | |||
3848 | int ret; | 3936 | int ret; |
3849 | 3937 | ||
3850 | *header = NULL; | 3938 | *header = NULL; |
3851 | if (!dev_priv->cman || kernel_commands) | ||
3852 | return kernel_commands; | ||
3853 | |||
3854 | if (command_size > SVGA_CB_MAX_SIZE) { | 3939 | if (command_size > SVGA_CB_MAX_SIZE) { |
3855 | DRM_ERROR("Command buffer is too large.\n"); | 3940 | DRM_ERROR("Command buffer is too large.\n"); |
3856 | return ERR_PTR(-EINVAL); | 3941 | return ERR_PTR(-EINVAL); |
3857 | } | 3942 | } |
3858 | 3943 | ||
3944 | if (!dev_priv->cman || kernel_commands) | ||
3945 | return kernel_commands; | ||
3946 | |||
3859 | /* If possible, add a little space for fencing. */ | 3947 | /* If possible, add a little space for fencing. */ |
3860 | cmdbuf_size = command_size + 512; | 3948 | cmdbuf_size = command_size + 512; |
3861 | cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); | 3949 | cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); |
@@ -4232,9 +4320,6 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | |||
4232 | ttm_bo_unref(&query_val.bo); | 4320 | ttm_bo_unref(&query_val.bo); |
4233 | ttm_bo_unref(&pinned_val.bo); | 4321 | ttm_bo_unref(&pinned_val.bo); |
4234 | vmw_dmabuf_unreference(&dev_priv->pinned_bo); | 4322 | vmw_dmabuf_unreference(&dev_priv->pinned_bo); |
4235 | DRM_INFO("Dummy query bo pin count: %d\n", | ||
4236 | dev_priv->dummy_query_bo->pin_count); | ||
4237 | |||
4238 | out_unlock: | 4323 | out_unlock: |
4239 | return; | 4324 | return; |
4240 | 4325 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 6a328d507a28..52ca1c9d070e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -574,10 +574,8 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, | |||
574 | bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); | 574 | bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); |
575 | long lret; | 575 | long lret; |
576 | 576 | ||
577 | if (nonblock) | 577 | lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, |
578 | return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY; | 578 | nonblock ? 0 : MAX_SCHEDULE_TIMEOUT); |
579 | |||
580 | lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT); | ||
581 | if (!lret) | 579 | if (!lret) |
582 | return -EBUSY; | 580 | return -EBUSY; |
583 | else if (lret < 0) | 581 | else if (lret < 0) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index c2a721a8cef9..b445ce9b9757 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
@@ -324,7 +324,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) | |||
324 | if (res->id != -1) { | 324 | if (res->id != -1) { |
325 | 325 | ||
326 | cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); | 326 | cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); |
327 | if (unlikely(cmd == NULL)) { | 327 | if (unlikely(!cmd)) { |
328 | DRM_ERROR("Failed reserving FIFO space for surface " | 328 | DRM_ERROR("Failed reserving FIFO space for surface " |
329 | "destruction.\n"); | 329 | "destruction.\n"); |
330 | return; | 330 | return; |
@@ -397,7 +397,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res) | |||
397 | 397 | ||
398 | submit_size = vmw_surface_define_size(srf); | 398 | submit_size = vmw_surface_define_size(srf); |
399 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | 399 | cmd = vmw_fifo_reserve(dev_priv, submit_size); |
400 | if (unlikely(cmd == NULL)) { | 400 | if (unlikely(!cmd)) { |
401 | DRM_ERROR("Failed reserving FIFO space for surface " | 401 | DRM_ERROR("Failed reserving FIFO space for surface " |
402 | "creation.\n"); | 402 | "creation.\n"); |
403 | ret = -ENOMEM; | 403 | ret = -ENOMEM; |
@@ -446,11 +446,10 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res, | |||
446 | uint8_t *cmd; | 446 | uint8_t *cmd; |
447 | struct vmw_private *dev_priv = res->dev_priv; | 447 | struct vmw_private *dev_priv = res->dev_priv; |
448 | 448 | ||
449 | BUG_ON(val_buf->bo == NULL); | 449 | BUG_ON(!val_buf->bo); |
450 | |||
451 | submit_size = vmw_surface_dma_size(srf); | 450 | submit_size = vmw_surface_dma_size(srf); |
452 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | 451 | cmd = vmw_fifo_reserve(dev_priv, submit_size); |
453 | if (unlikely(cmd == NULL)) { | 452 | if (unlikely(!cmd)) { |
454 | DRM_ERROR("Failed reserving FIFO space for surface " | 453 | DRM_ERROR("Failed reserving FIFO space for surface " |
455 | "DMA.\n"); | 454 | "DMA.\n"); |
456 | return -ENOMEM; | 455 | return -ENOMEM; |
@@ -538,7 +537,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res) | |||
538 | 537 | ||
539 | submit_size = vmw_surface_destroy_size(); | 538 | submit_size = vmw_surface_destroy_size(); |
540 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | 539 | cmd = vmw_fifo_reserve(dev_priv, submit_size); |
541 | if (unlikely(cmd == NULL)) { | 540 | if (unlikely(!cmd)) { |
542 | DRM_ERROR("Failed reserving FIFO space for surface " | 541 | DRM_ERROR("Failed reserving FIFO space for surface " |
543 | "eviction.\n"); | 542 | "eviction.\n"); |
544 | return -ENOMEM; | 543 | return -ENOMEM; |
@@ -578,7 +577,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv, | |||
578 | int ret; | 577 | int ret; |
579 | struct vmw_resource *res = &srf->res; | 578 | struct vmw_resource *res = &srf->res; |
580 | 579 | ||
581 | BUG_ON(res_free == NULL); | 580 | BUG_ON(!res_free); |
582 | if (!dev_priv->has_mob) | 581 | if (!dev_priv->has_mob) |
583 | vmw_fifo_resource_inc(dev_priv); | 582 | vmw_fifo_resource_inc(dev_priv); |
584 | ret = vmw_resource_init(dev_priv, res, true, res_free, | 583 | ret = vmw_resource_init(dev_priv, res, true, res_free, |
@@ -700,7 +699,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
700 | struct drm_vmw_surface_create_req *req = &arg->req; | 699 | struct drm_vmw_surface_create_req *req = &arg->req; |
701 | struct drm_vmw_surface_arg *rep = &arg->rep; | 700 | struct drm_vmw_surface_arg *rep = &arg->rep; |
702 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 701 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
703 | struct drm_vmw_size __user *user_sizes; | ||
704 | int ret; | 702 | int ret; |
705 | int i, j; | 703 | int i, j; |
706 | uint32_t cur_bo_offset; | 704 | uint32_t cur_bo_offset; |
@@ -748,7 +746,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
748 | } | 746 | } |
749 | 747 | ||
750 | user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); | 748 | user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); |
751 | if (unlikely(user_srf == NULL)) { | 749 | if (unlikely(!user_srf)) { |
752 | ret = -ENOMEM; | 750 | ret = -ENOMEM; |
753 | goto out_no_user_srf; | 751 | goto out_no_user_srf; |
754 | } | 752 | } |
@@ -763,29 +761,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
763 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); | 761 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); |
764 | srf->num_sizes = num_sizes; | 762 | srf->num_sizes = num_sizes; |
765 | user_srf->size = size; | 763 | user_srf->size = size; |
766 | 764 | srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long) | |
767 | srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); | 765 | req->size_addr, |
768 | if (unlikely(srf->sizes == NULL)) { | 766 | sizeof(*srf->sizes) * srf->num_sizes); |
769 | ret = -ENOMEM; | 767 | if (IS_ERR(srf->sizes)) { |
768 | ret = PTR_ERR(srf->sizes); | ||
770 | goto out_no_sizes; | 769 | goto out_no_sizes; |
771 | } | 770 | } |
772 | srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), | 771 | srf->offsets = kmalloc_array(srf->num_sizes, |
773 | GFP_KERNEL); | 772 | sizeof(*srf->offsets), |
774 | if (unlikely(srf->offsets == NULL)) { | 773 | GFP_KERNEL); |
774 | if (unlikely(!srf->offsets)) { | ||
775 | ret = -ENOMEM; | 775 | ret = -ENOMEM; |
776 | goto out_no_offsets; | 776 | goto out_no_offsets; |
777 | } | 777 | } |
778 | 778 | ||
779 | user_sizes = (struct drm_vmw_size __user *)(unsigned long) | ||
780 | req->size_addr; | ||
781 | |||
782 | ret = copy_from_user(srf->sizes, user_sizes, | ||
783 | srf->num_sizes * sizeof(*srf->sizes)); | ||
784 | if (unlikely(ret != 0)) { | ||
785 | ret = -EFAULT; | ||
786 | goto out_no_copy; | ||
787 | } | ||
788 | |||
789 | srf->base_size = *srf->sizes; | 779 | srf->base_size = *srf->sizes; |
790 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; | 780 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; |
791 | srf->multisample_count = 0; | 781 | srf->multisample_count = 0; |
@@ -923,7 +913,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, | |||
923 | 913 | ||
924 | ret = -EINVAL; | 914 | ret = -EINVAL; |
925 | base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle); | 915 | base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle); |
926 | if (unlikely(base == NULL)) { | 916 | if (unlikely(!base)) { |
927 | DRM_ERROR("Could not find surface to reference.\n"); | 917 | DRM_ERROR("Could not find surface to reference.\n"); |
928 | goto out_no_lookup; | 918 | goto out_no_lookup; |
929 | } | 919 | } |
@@ -1069,7 +1059,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res) | |||
1069 | 1059 | ||
1070 | cmd = vmw_fifo_reserve(dev_priv, submit_len); | 1060 | cmd = vmw_fifo_reserve(dev_priv, submit_len); |
1071 | cmd2 = (typeof(cmd2))cmd; | 1061 | cmd2 = (typeof(cmd2))cmd; |
1072 | if (unlikely(cmd == NULL)) { | 1062 | if (unlikely(!cmd)) { |
1073 | DRM_ERROR("Failed reserving FIFO space for surface " | 1063 | DRM_ERROR("Failed reserving FIFO space for surface " |
1074 | "creation.\n"); | 1064 | "creation.\n"); |
1075 | ret = -ENOMEM; | 1065 | ret = -ENOMEM; |
@@ -1135,7 +1125,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res, | |||
1135 | submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); | 1125 | submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); |
1136 | 1126 | ||
1137 | cmd1 = vmw_fifo_reserve(dev_priv, submit_size); | 1127 | cmd1 = vmw_fifo_reserve(dev_priv, submit_size); |
1138 | if (unlikely(cmd1 == NULL)) { | 1128 | if (unlikely(!cmd1)) { |
1139 | DRM_ERROR("Failed reserving FIFO space for surface " | 1129 | DRM_ERROR("Failed reserving FIFO space for surface " |
1140 | "binding.\n"); | 1130 | "binding.\n"); |
1141 | return -ENOMEM; | 1131 | return -ENOMEM; |
@@ -1185,7 +1175,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res, | |||
1185 | 1175 | ||
1186 | submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); | 1176 | submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); |
1187 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | 1177 | cmd = vmw_fifo_reserve(dev_priv, submit_size); |
1188 | if (unlikely(cmd == NULL)) { | 1178 | if (unlikely(!cmd)) { |
1189 | DRM_ERROR("Failed reserving FIFO space for surface " | 1179 | DRM_ERROR("Failed reserving FIFO space for surface " |
1190 | "unbinding.\n"); | 1180 | "unbinding.\n"); |
1191 | return -ENOMEM; | 1181 | return -ENOMEM; |
@@ -1244,7 +1234,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res) | |||
1244 | vmw_binding_res_list_scrub(&res->binding_head); | 1234 | vmw_binding_res_list_scrub(&res->binding_head); |
1245 | 1235 | ||
1246 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 1236 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
1247 | if (unlikely(cmd == NULL)) { | 1237 | if (unlikely(!cmd)) { |
1248 | DRM_ERROR("Failed reserving FIFO space for surface " | 1238 | DRM_ERROR("Failed reserving FIFO space for surface " |
1249 | "destruction.\n"); | 1239 | "destruction.\n"); |
1250 | mutex_unlock(&dev_priv->binding_mutex); | 1240 | mutex_unlock(&dev_priv->binding_mutex); |
@@ -1410,7 +1400,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, | |||
1410 | 1400 | ||
1411 | user_srf = container_of(base, struct vmw_user_surface, prime.base); | 1401 | user_srf = container_of(base, struct vmw_user_surface, prime.base); |
1412 | srf = &user_srf->srf; | 1402 | srf = &user_srf->srf; |
1413 | if (srf->res.backup == NULL) { | 1403 | if (!srf->res.backup) { |
1414 | DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); | 1404 | DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); |
1415 | goto out_bad_resource; | 1405 | goto out_bad_resource; |
1416 | } | 1406 | } |
@@ -1524,7 +1514,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, | |||
1524 | } | 1514 | } |
1525 | 1515 | ||
1526 | user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); | 1516 | user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); |
1527 | if (unlikely(user_srf == NULL)) { | 1517 | if (unlikely(!user_srf)) { |
1528 | ret = -ENOMEM; | 1518 | ret = -ENOMEM; |
1529 | goto out_no_user_srf; | 1519 | goto out_no_user_srf; |
1530 | } | 1520 | } |