diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2012-11-20 07:19:36 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2012-11-20 16:47:08 -0500 |
commit | 543831cfc976669b8da963b3e94933e21e051846 (patch) | |
tree | d48ddcdaa3909f9295c24922ee953f44bbdb85de | |
parent | c0951b797e7d0f2c6b0df2c0e18185c72d0cf1a1 (diff) |
drm/vmwgfx: Break out surface and context management to separate files
Add a resource-private header for common resource definitions
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Brian Paul <brianp@vmware.com>
Reviewed-by: Dmitry Torokhov <dtor@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r-- | drivers/gpu/drm/vmwgfx/Makefile | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_context.c | 274 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 1243 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h | 84 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 968 |
5 files changed, 1337 insertions, 1235 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 586869c8c11f..2cc6cd91ac11 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile | |||
@@ -5,6 +5,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ | |||
5 | vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ | 5 | vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ |
6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ | 6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ |
7 | vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ | 7 | vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ |
8 | vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o | 8 | vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ |
9 | vmwgfx_surface.o | ||
9 | 10 | ||
10 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o | 11 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c new file mode 100644 index 000000000000..00ae0925aca8 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c | |||
@@ -0,0 +1,274 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "vmwgfx_drv.h" | ||
29 | #include "vmwgfx_resource_priv.h" | ||
30 | #include "ttm/ttm_placement.h" | ||
31 | |||
32 | struct vmw_user_context { | ||
33 | struct ttm_base_object base; | ||
34 | struct vmw_resource res; | ||
35 | }; | ||
36 | |||
37 | static void vmw_user_context_free(struct vmw_resource *res); | ||
38 | static struct vmw_resource * | ||
39 | vmw_user_context_base_to_res(struct ttm_base_object *base); | ||
40 | |||
41 | static uint64_t vmw_user_context_size; | ||
42 | |||
43 | static const struct vmw_user_resource_conv user_context_conv = { | ||
44 | .object_type = VMW_RES_CONTEXT, | ||
45 | .base_obj_to_res = vmw_user_context_base_to_res, | ||
46 | .res_free = vmw_user_context_free | ||
47 | }; | ||
48 | |||
49 | const struct vmw_user_resource_conv *user_context_converter = | ||
50 | &user_context_conv; | ||
51 | |||
52 | |||
53 | static const struct vmw_res_func vmw_legacy_context_func = { | ||
54 | .res_type = vmw_res_context, | ||
55 | .needs_backup = false, | ||
56 | .may_evict = false, | ||
57 | .type_name = "legacy contexts", | ||
58 | .backup_placement = NULL, | ||
59 | .create = NULL, | ||
60 | .destroy = NULL, | ||
61 | .bind = NULL, | ||
62 | .unbind = NULL | ||
63 | }; | ||
64 | |||
65 | /** | ||
66 | * Context management: | ||
67 | */ | ||
68 | |||
69 | static void vmw_hw_context_destroy(struct vmw_resource *res) | ||
70 | { | ||
71 | |||
72 | struct vmw_private *dev_priv = res->dev_priv; | ||
73 | struct { | ||
74 | SVGA3dCmdHeader header; | ||
75 | SVGA3dCmdDestroyContext body; | ||
76 | } *cmd; | ||
77 | |||
78 | |||
79 | vmw_execbuf_release_pinned_bo(dev_priv); | ||
80 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
81 | if (unlikely(cmd == NULL)) { | ||
82 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
83 | "destruction.\n"); | ||
84 | return; | ||
85 | } | ||
86 | |||
87 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY); | ||
88 | cmd->header.size = cpu_to_le32(sizeof(cmd->body)); | ||
89 | cmd->body.cid = cpu_to_le32(res->id); | ||
90 | |||
91 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
92 | vmw_3d_resource_dec(dev_priv, false); | ||
93 | } | ||
94 | |||
95 | static int vmw_context_init(struct vmw_private *dev_priv, | ||
96 | struct vmw_resource *res, | ||
97 | void (*res_free) (struct vmw_resource *res)) | ||
98 | { | ||
99 | int ret; | ||
100 | |||
101 | struct { | ||
102 | SVGA3dCmdHeader header; | ||
103 | SVGA3dCmdDefineContext body; | ||
104 | } *cmd; | ||
105 | |||
106 | ret = vmw_resource_init(dev_priv, res, false, | ||
107 | res_free, &vmw_legacy_context_func); | ||
108 | |||
109 | if (unlikely(ret != 0)) { | ||
110 | DRM_ERROR("Failed to allocate a resource id.\n"); | ||
111 | goto out_early; | ||
112 | } | ||
113 | |||
114 | if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) { | ||
115 | DRM_ERROR("Out of hw context ids.\n"); | ||
116 | vmw_resource_unreference(&res); | ||
117 | return -ENOMEM; | ||
118 | } | ||
119 | |||
120 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
121 | if (unlikely(cmd == NULL)) { | ||
122 | DRM_ERROR("Fifo reserve failed.\n"); | ||
123 | vmw_resource_unreference(&res); | ||
124 | return -ENOMEM; | ||
125 | } | ||
126 | |||
127 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE); | ||
128 | cmd->header.size = cpu_to_le32(sizeof(cmd->body)); | ||
129 | cmd->body.cid = cpu_to_le32(res->id); | ||
130 | |||
131 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
132 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
133 | vmw_resource_activate(res, vmw_hw_context_destroy); | ||
134 | return 0; | ||
135 | |||
136 | out_early: | ||
137 | if (res_free == NULL) | ||
138 | kfree(res); | ||
139 | else | ||
140 | res_free(res); | ||
141 | return ret; | ||
142 | } | ||
143 | |||
144 | struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) | ||
145 | { | ||
146 | struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL); | ||
147 | int ret; | ||
148 | |||
149 | if (unlikely(res == NULL)) | ||
150 | return NULL; | ||
151 | |||
152 | ret = vmw_context_init(dev_priv, res, NULL); | ||
153 | |||
154 | return (ret == 0) ? res : NULL; | ||
155 | } | ||
156 | |||
157 | /** | ||
158 | * User-space context management: | ||
159 | */ | ||
160 | |||
161 | static struct vmw_resource * | ||
162 | vmw_user_context_base_to_res(struct ttm_base_object *base) | ||
163 | { | ||
164 | return &(container_of(base, struct vmw_user_context, base)->res); | ||
165 | } | ||
166 | |||
167 | static void vmw_user_context_free(struct vmw_resource *res) | ||
168 | { | ||
169 | struct vmw_user_context *ctx = | ||
170 | container_of(res, struct vmw_user_context, res); | ||
171 | struct vmw_private *dev_priv = res->dev_priv; | ||
172 | |||
173 | ttm_base_object_kfree(ctx, base); | ||
174 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
175 | vmw_user_context_size); | ||
176 | } | ||
177 | |||
178 | /** | ||
179 | * This function is called when user space has no more references on the | ||
180 | * base object. It releases the base-object's reference on the resource object. | ||
181 | */ | ||
182 | |||
183 | static void vmw_user_context_base_release(struct ttm_base_object **p_base) | ||
184 | { | ||
185 | struct ttm_base_object *base = *p_base; | ||
186 | struct vmw_user_context *ctx = | ||
187 | container_of(base, struct vmw_user_context, base); | ||
188 | struct vmw_resource *res = &ctx->res; | ||
189 | |||
190 | *p_base = NULL; | ||
191 | vmw_resource_unreference(&res); | ||
192 | } | ||
193 | |||
194 | int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | ||
195 | struct drm_file *file_priv) | ||
196 | { | ||
197 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; | ||
198 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
199 | |||
200 | return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); | ||
201 | } | ||
202 | |||
203 | int vmw_context_define_ioctl(struct drm_device *dev, void *data, | ||
204 | struct drm_file *file_priv) | ||
205 | { | ||
206 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
207 | struct vmw_user_context *ctx; | ||
208 | struct vmw_resource *res; | ||
209 | struct vmw_resource *tmp; | ||
210 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; | ||
211 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
212 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
213 | int ret; | ||
214 | |||
215 | |||
216 | /* | ||
217 | * Approximate idr memory usage with 128 bytes. It will be limited | ||
218 | * by maximum number_of contexts anyway. | ||
219 | */ | ||
220 | |||
221 | if (unlikely(vmw_user_context_size == 0)) | ||
222 | vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128; | ||
223 | |||
224 | ret = ttm_read_lock(&vmaster->lock, true); | ||
225 | if (unlikely(ret != 0)) | ||
226 | return ret; | ||
227 | |||
228 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
229 | vmw_user_context_size, | ||
230 | false, true); | ||
231 | if (unlikely(ret != 0)) { | ||
232 | if (ret != -ERESTARTSYS) | ||
233 | DRM_ERROR("Out of graphics memory for context" | ||
234 | " creation.\n"); | ||
235 | goto out_unlock; | ||
236 | } | ||
237 | |||
238 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | ||
239 | if (unlikely(ctx == NULL)) { | ||
240 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
241 | vmw_user_context_size); | ||
242 | ret = -ENOMEM; | ||
243 | goto out_unlock; | ||
244 | } | ||
245 | |||
246 | res = &ctx->res; | ||
247 | ctx->base.shareable = false; | ||
248 | ctx->base.tfile = NULL; | ||
249 | |||
250 | /* | ||
251 | * From here on, the destructor takes over resource freeing. | ||
252 | */ | ||
253 | |||
254 | ret = vmw_context_init(dev_priv, res, vmw_user_context_free); | ||
255 | if (unlikely(ret != 0)) | ||
256 | goto out_unlock; | ||
257 | |||
258 | tmp = vmw_resource_reference(&ctx->res); | ||
259 | ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, | ||
260 | &vmw_user_context_base_release, NULL); | ||
261 | |||
262 | if (unlikely(ret != 0)) { | ||
263 | vmw_resource_unreference(&tmp); | ||
264 | goto out_err; | ||
265 | } | ||
266 | |||
267 | arg->cid = ctx->base.hash.key; | ||
268 | out_err: | ||
269 | vmw_resource_unreference(&res); | ||
270 | out_unlock: | ||
271 | ttm_read_unlock(&vmaster->lock); | ||
272 | return ret; | ||
273 | |||
274 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 44ac46bb5629..88b6f921ee94 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -30,49 +30,7 @@ | |||
30 | #include <drm/ttm/ttm_object.h> | 30 | #include <drm/ttm/ttm_object.h> |
31 | #include <drm/ttm/ttm_placement.h> | 31 | #include <drm/ttm/ttm_placement.h> |
32 | #include <drm/drmP.h> | 32 | #include <drm/drmP.h> |
33 | 33 | #include "vmwgfx_resource_priv.h" | |
34 | /** | ||
35 | * struct vmw_user_resource_conv - Identify a derived user-exported resource | ||
36 | * type and provide a function to convert its ttm_base_object pointer to | ||
37 | * a struct vmw_resource | ||
38 | */ | ||
39 | struct vmw_user_resource_conv { | ||
40 | enum ttm_object_type object_type; | ||
41 | struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base); | ||
42 | void (*res_free) (struct vmw_resource *res); | ||
43 | }; | ||
44 | |||
45 | /** | ||
46 | * struct vmw_res_func - members and functions common for a resource type | ||
47 | * | ||
48 | * @res_type: Enum that identifies the lru list to use for eviction. | ||
49 | * @needs_backup: Whether the resource is guest-backed and needs | ||
50 | * persistent buffer storage. | ||
51 | * @type_name: String that identifies the resource type. | ||
52 | * @backup_placement: TTM placement for backup buffers. | ||
53 | * @may_evict Whether the resource may be evicted. | ||
54 | * @create: Create a hardware resource. | ||
55 | * @destroy: Destroy a hardware resource. | ||
56 | * @bind: Bind a hardware resource to persistent buffer storage. | ||
57 | * @unbind: Unbind a hardware resource from persistent | ||
58 | * buffer storage. | ||
59 | */ | ||
60 | |||
61 | struct vmw_res_func { | ||
62 | enum vmw_res_type res_type; | ||
63 | bool needs_backup; | ||
64 | const char *type_name; | ||
65 | struct ttm_placement *backup_placement; | ||
66 | bool may_evict; | ||
67 | |||
68 | int (*create) (struct vmw_resource *res); | ||
69 | int (*destroy) (struct vmw_resource *res); | ||
70 | int (*bind) (struct vmw_resource *res, | ||
71 | struct ttm_validate_buffer *val_buf); | ||
72 | int (*unbind) (struct vmw_resource *res, | ||
73 | bool readback, | ||
74 | struct ttm_validate_buffer *val_buf); | ||
75 | }; | ||
76 | 34 | ||
77 | struct vmw_user_dma_buffer { | 35 | struct vmw_user_dma_buffer { |
78 | struct ttm_base_object base; | 36 | struct ttm_base_object base; |
@@ -109,104 +67,6 @@ static const struct vmw_res_func vmw_stream_func = { | |||
109 | .unbind = NULL | 67 | .unbind = NULL |
110 | }; | 68 | }; |
111 | 69 | ||
112 | struct vmw_user_context { | ||
113 | struct ttm_base_object base; | ||
114 | struct vmw_resource res; | ||
115 | }; | ||
116 | |||
117 | static void vmw_user_context_free(struct vmw_resource *res); | ||
118 | static struct vmw_resource * | ||
119 | vmw_user_context_base_to_res(struct ttm_base_object *base); | ||
120 | |||
121 | static uint64_t vmw_user_context_size; | ||
122 | |||
123 | static const struct vmw_user_resource_conv user_context_conv = { | ||
124 | .object_type = VMW_RES_CONTEXT, | ||
125 | .base_obj_to_res = vmw_user_context_base_to_res, | ||
126 | .res_free = vmw_user_context_free | ||
127 | }; | ||
128 | |||
129 | const struct vmw_user_resource_conv *user_context_converter = | ||
130 | &user_context_conv; | ||
131 | |||
132 | |||
133 | static const struct vmw_res_func vmw_legacy_context_func = { | ||
134 | .res_type = vmw_res_context, | ||
135 | .needs_backup = false, | ||
136 | .may_evict = false, | ||
137 | .type_name = "legacy contexts", | ||
138 | .backup_placement = NULL, | ||
139 | .create = NULL, | ||
140 | .destroy = NULL, | ||
141 | .bind = NULL, | ||
142 | .unbind = NULL | ||
143 | }; | ||
144 | |||
145 | |||
146 | /** | ||
147 | * struct vmw_user_surface - User-space visible surface resource | ||
148 | * | ||
149 | * @base: The TTM base object handling user-space visibility. | ||
150 | * @srf: The surface metadata. | ||
151 | * @size: TTM accounting size for the surface. | ||
152 | */ | ||
153 | struct vmw_user_surface { | ||
154 | struct ttm_base_object base; | ||
155 | struct vmw_surface srf; | ||
156 | uint32_t size; | ||
157 | uint32_t backup_handle; | ||
158 | }; | ||
159 | |||
160 | /** | ||
161 | * struct vmw_surface_offset - Backing store mip level offset info | ||
162 | * | ||
163 | * @face: Surface face. | ||
164 | * @mip: Mip level. | ||
165 | * @bo_offset: Offset into backing store of this mip level. | ||
166 | * | ||
167 | */ | ||
168 | struct vmw_surface_offset { | ||
169 | uint32_t face; | ||
170 | uint32_t mip; | ||
171 | uint32_t bo_offset; | ||
172 | }; | ||
173 | |||
174 | static void vmw_user_surface_free(struct vmw_resource *res); | ||
175 | static struct vmw_resource * | ||
176 | vmw_user_surface_base_to_res(struct ttm_base_object *base); | ||
177 | static int vmw_legacy_srf_bind(struct vmw_resource *res, | ||
178 | struct ttm_validate_buffer *val_buf); | ||
179 | static int vmw_legacy_srf_unbind(struct vmw_resource *res, | ||
180 | bool readback, | ||
181 | struct ttm_validate_buffer *val_buf); | ||
182 | static int vmw_legacy_srf_create(struct vmw_resource *res); | ||
183 | static int vmw_legacy_srf_destroy(struct vmw_resource *res); | ||
184 | |||
185 | static const struct vmw_user_resource_conv user_surface_conv = { | ||
186 | .object_type = VMW_RES_SURFACE, | ||
187 | .base_obj_to_res = vmw_user_surface_base_to_res, | ||
188 | .res_free = vmw_user_surface_free | ||
189 | }; | ||
190 | |||
191 | const struct vmw_user_resource_conv *user_surface_converter = | ||
192 | &user_surface_conv; | ||
193 | |||
194 | |||
195 | static uint64_t vmw_user_surface_size; | ||
196 | |||
197 | static const struct vmw_res_func vmw_legacy_surface_func = { | ||
198 | .res_type = vmw_res_surface, | ||
199 | .needs_backup = false, | ||
200 | .may_evict = true, | ||
201 | .type_name = "legacy surfaces", | ||
202 | .backup_placement = &vmw_srf_placement, | ||
203 | .create = &vmw_legacy_srf_create, | ||
204 | .destroy = &vmw_legacy_srf_destroy, | ||
205 | .bind = &vmw_legacy_srf_bind, | ||
206 | .unbind = &vmw_legacy_srf_unbind | ||
207 | }; | ||
208 | |||
209 | |||
210 | static inline struct vmw_dma_buffer * | 70 | static inline struct vmw_dma_buffer * |
211 | vmw_dma_buffer(struct ttm_buffer_object *bo) | 71 | vmw_dma_buffer(struct ttm_buffer_object *bo) |
212 | { | 72 | { |
@@ -234,7 +94,7 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) | |||
234 | * | 94 | * |
235 | * Release the resource id to the resource id manager and set it to -1 | 95 | * Release the resource id to the resource id manager and set it to -1 |
236 | */ | 96 | */ |
237 | static void vmw_resource_release_id(struct vmw_resource *res) | 97 | void vmw_resource_release_id(struct vmw_resource *res) |
238 | { | 98 | { |
239 | struct vmw_private *dev_priv = res->dev_priv; | 99 | struct vmw_private *dev_priv = res->dev_priv; |
240 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; | 100 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
@@ -309,7 +169,7 @@ void vmw_resource_unreference(struct vmw_resource **p_res) | |||
309 | * Allocate the lowest free resource from the resource manager, and set | 169 | * Allocate the lowest free resource from the resource manager, and set |
310 | * @res->id to that id. Returns 0 on success and -ENOMEM on failure. | 170 | * @res->id to that id. Returns 0 on success and -ENOMEM on failure. |
311 | */ | 171 | */ |
312 | static int vmw_resource_alloc_id(struct vmw_resource *res) | 172 | int vmw_resource_alloc_id(struct vmw_resource *res) |
313 | { | 173 | { |
314 | struct vmw_private *dev_priv = res->dev_priv; | 174 | struct vmw_private *dev_priv = res->dev_priv; |
315 | int ret; | 175 | int ret; |
@@ -341,11 +201,10 @@ static int vmw_resource_alloc_id(struct vmw_resource *res) | |||
341 | * @res_free: Resource destructor. | 201 | * @res_free: Resource destructor. |
342 | * @func: Resource function table. | 202 | * @func: Resource function table. |
343 | */ | 203 | */ |
344 | static int vmw_resource_init(struct vmw_private *dev_priv, | 204 | int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, |
345 | struct vmw_resource *res, | 205 | bool delay_id, |
346 | bool delay_id, | 206 | void (*res_free) (struct vmw_resource *res), |
347 | void (*res_free) (struct vmw_resource *res), | 207 | const struct vmw_res_func *func) |
348 | const struct vmw_res_func *func) | ||
349 | { | 208 | { |
350 | kref_init(&res->kref); | 209 | kref_init(&res->kref); |
351 | res->hw_destroy = NULL; | 210 | res->hw_destroy = NULL; |
@@ -378,8 +237,8 @@ static int vmw_resource_init(struct vmw_private *dev_priv, | |||
378 | * Activate basically means that the function vmw_resource_lookup will | 237 | * Activate basically means that the function vmw_resource_lookup will |
379 | * find it. | 238 | * find it. |
380 | */ | 239 | */ |
381 | static void vmw_resource_activate(struct vmw_resource *res, | 240 | void vmw_resource_activate(struct vmw_resource *res, |
382 | void (*hw_destroy) (struct vmw_resource *)) | 241 | void (*hw_destroy) (struct vmw_resource *)) |
383 | { | 242 | { |
384 | struct vmw_private *dev_priv = res->dev_priv; | 243 | struct vmw_private *dev_priv = res->dev_priv; |
385 | 244 | ||
@@ -409,1090 +268,6 @@ struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, | |||
409 | } | 268 | } |
410 | 269 | ||
411 | /** | 270 | /** |
412 | * Context management: | ||
413 | */ | ||
414 | |||
415 | static void vmw_hw_context_destroy(struct vmw_resource *res) | ||
416 | { | ||
417 | |||
418 | struct vmw_private *dev_priv = res->dev_priv; | ||
419 | struct { | ||
420 | SVGA3dCmdHeader header; | ||
421 | SVGA3dCmdDestroyContext body; | ||
422 | } *cmd; | ||
423 | |||
424 | |||
425 | vmw_execbuf_release_pinned_bo(dev_priv); | ||
426 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
427 | if (unlikely(cmd == NULL)) { | ||
428 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
429 | "destruction.\n"); | ||
430 | return; | ||
431 | } | ||
432 | |||
433 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY); | ||
434 | cmd->header.size = cpu_to_le32(sizeof(cmd->body)); | ||
435 | cmd->body.cid = cpu_to_le32(res->id); | ||
436 | |||
437 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
438 | vmw_3d_resource_dec(dev_priv, false); | ||
439 | } | ||
440 | |||
441 | static int vmw_context_init(struct vmw_private *dev_priv, | ||
442 | struct vmw_resource *res, | ||
443 | void (*res_free) (struct vmw_resource *res)) | ||
444 | { | ||
445 | int ret; | ||
446 | |||
447 | struct { | ||
448 | SVGA3dCmdHeader header; | ||
449 | SVGA3dCmdDefineContext body; | ||
450 | } *cmd; | ||
451 | |||
452 | ret = vmw_resource_init(dev_priv, res, false, | ||
453 | res_free, &vmw_legacy_context_func); | ||
454 | |||
455 | if (unlikely(ret != 0)) { | ||
456 | DRM_ERROR("Failed to allocate a resource id.\n"); | ||
457 | goto out_early; | ||
458 | } | ||
459 | |||
460 | if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) { | ||
461 | DRM_ERROR("Out of hw context ids.\n"); | ||
462 | vmw_resource_unreference(&res); | ||
463 | return -ENOMEM; | ||
464 | } | ||
465 | |||
466 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
467 | if (unlikely(cmd == NULL)) { | ||
468 | DRM_ERROR("Fifo reserve failed.\n"); | ||
469 | vmw_resource_unreference(&res); | ||
470 | return -ENOMEM; | ||
471 | } | ||
472 | |||
473 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE); | ||
474 | cmd->header.size = cpu_to_le32(sizeof(cmd->body)); | ||
475 | cmd->body.cid = cpu_to_le32(res->id); | ||
476 | |||
477 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
478 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
479 | vmw_resource_activate(res, vmw_hw_context_destroy); | ||
480 | return 0; | ||
481 | |||
482 | out_early: | ||
483 | if (res_free == NULL) | ||
484 | kfree(res); | ||
485 | else | ||
486 | res_free(res); | ||
487 | return ret; | ||
488 | } | ||
489 | |||
490 | struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) | ||
491 | { | ||
492 | struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL); | ||
493 | int ret; | ||
494 | |||
495 | if (unlikely(res == NULL)) | ||
496 | return NULL; | ||
497 | |||
498 | ret = vmw_context_init(dev_priv, res, NULL); | ||
499 | |||
500 | return (ret == 0) ? res : NULL; | ||
501 | } | ||
502 | |||
503 | /** | ||
504 | * User-space context management: | ||
505 | */ | ||
506 | |||
507 | static struct vmw_resource * | ||
508 | vmw_user_context_base_to_res(struct ttm_base_object *base) | ||
509 | { | ||
510 | return &(container_of(base, struct vmw_user_context, base)->res); | ||
511 | } | ||
512 | |||
513 | static void vmw_user_context_free(struct vmw_resource *res) | ||
514 | { | ||
515 | struct vmw_user_context *ctx = | ||
516 | container_of(res, struct vmw_user_context, res); | ||
517 | struct vmw_private *dev_priv = res->dev_priv; | ||
518 | |||
519 | ttm_base_object_kfree(ctx, base); | ||
520 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
521 | vmw_user_context_size); | ||
522 | } | ||
523 | |||
524 | /** | ||
525 | * This function is called when user space has no more references on the | ||
526 | * base object. It releases the base-object's reference on the resource object. | ||
527 | */ | ||
528 | |||
529 | static void vmw_user_context_base_release(struct ttm_base_object **p_base) | ||
530 | { | ||
531 | struct ttm_base_object *base = *p_base; | ||
532 | struct vmw_user_context *ctx = | ||
533 | container_of(base, struct vmw_user_context, base); | ||
534 | struct vmw_resource *res = &ctx->res; | ||
535 | |||
536 | *p_base = NULL; | ||
537 | vmw_resource_unreference(&res); | ||
538 | } | ||
539 | |||
540 | int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | ||
541 | struct drm_file *file_priv) | ||
542 | { | ||
543 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; | ||
544 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
545 | |||
546 | return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); | ||
547 | } | ||
548 | |||
549 | int vmw_context_define_ioctl(struct drm_device *dev, void *data, | ||
550 | struct drm_file *file_priv) | ||
551 | { | ||
552 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
553 | struct vmw_user_context *ctx; | ||
554 | struct vmw_resource *res; | ||
555 | struct vmw_resource *tmp; | ||
556 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; | ||
557 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
558 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
559 | int ret; | ||
560 | |||
561 | |||
562 | /* | ||
563 | * Approximate idr memory usage with 128 bytes. It will be limited | ||
564 | * by maximum number_of contexts anyway. | ||
565 | */ | ||
566 | |||
567 | if (unlikely(vmw_user_context_size == 0)) | ||
568 | vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128; | ||
569 | |||
570 | ret = ttm_read_lock(&vmaster->lock, true); | ||
571 | if (unlikely(ret != 0)) | ||
572 | return ret; | ||
573 | |||
574 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
575 | vmw_user_context_size, | ||
576 | false, true); | ||
577 | if (unlikely(ret != 0)) { | ||
578 | if (ret != -ERESTARTSYS) | ||
579 | DRM_ERROR("Out of graphics memory for context" | ||
580 | " creation.\n"); | ||
581 | goto out_unlock; | ||
582 | } | ||
583 | |||
584 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | ||
585 | if (unlikely(ctx == NULL)) { | ||
586 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
587 | vmw_user_context_size); | ||
588 | ret = -ENOMEM; | ||
589 | goto out_unlock; | ||
590 | } | ||
591 | |||
592 | res = &ctx->res; | ||
593 | ctx->base.shareable = false; | ||
594 | ctx->base.tfile = NULL; | ||
595 | |||
596 | /* | ||
597 | * From here on, the destructor takes over resource freeing. | ||
598 | */ | ||
599 | |||
600 | ret = vmw_context_init(dev_priv, res, vmw_user_context_free); | ||
601 | if (unlikely(ret != 0)) | ||
602 | goto out_unlock; | ||
603 | |||
604 | tmp = vmw_resource_reference(&ctx->res); | ||
605 | ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, | ||
606 | &vmw_user_context_base_release, NULL); | ||
607 | |||
608 | if (unlikely(ret != 0)) { | ||
609 | vmw_resource_unreference(&tmp); | ||
610 | goto out_err; | ||
611 | } | ||
612 | |||
613 | arg->cid = ctx->base.hash.key; | ||
614 | out_err: | ||
615 | vmw_resource_unreference(&res); | ||
616 | out_unlock: | ||
617 | ttm_read_unlock(&vmaster->lock); | ||
618 | return ret; | ||
619 | |||
620 | } | ||
621 | |||
622 | /** | ||
623 | * struct vmw_bpp - Bits per pixel info for surface storage size computation. | ||
624 | * | ||
625 | * @bpp: Bits per pixel. | ||
626 | * @s_bpp: Stride bits per pixel. See definition below. | ||
627 | * | ||
628 | */ | ||
629 | struct vmw_bpp { | ||
630 | uint8_t bpp; | ||
631 | uint8_t s_bpp; | ||
632 | }; | ||
633 | |||
634 | /* | ||
635 | * Size table for the supported SVGA3D surface formats. It consists of | ||
636 | * two values. The bpp value and the s_bpp value which is short for | ||
637 | * "stride bits per pixel" The values are given in such a way that the | ||
638 | * minimum stride for the image is calculated using | ||
639 | * | ||
640 | * min_stride = w*s_bpp | ||
641 | * | ||
642 | * and the total memory requirement for the image is | ||
643 | * | ||
644 | * h*min_stride*bpp/s_bpp | ||
645 | * | ||
646 | */ | ||
647 | static const struct vmw_bpp vmw_sf_bpp[] = { | ||
648 | [SVGA3D_FORMAT_INVALID] = {0, 0}, | ||
649 | [SVGA3D_X8R8G8B8] = {32, 32}, | ||
650 | [SVGA3D_A8R8G8B8] = {32, 32}, | ||
651 | [SVGA3D_R5G6B5] = {16, 16}, | ||
652 | [SVGA3D_X1R5G5B5] = {16, 16}, | ||
653 | [SVGA3D_A1R5G5B5] = {16, 16}, | ||
654 | [SVGA3D_A4R4G4B4] = {16, 16}, | ||
655 | [SVGA3D_Z_D32] = {32, 32}, | ||
656 | [SVGA3D_Z_D16] = {16, 16}, | ||
657 | [SVGA3D_Z_D24S8] = {32, 32}, | ||
658 | [SVGA3D_Z_D15S1] = {16, 16}, | ||
659 | [SVGA3D_LUMINANCE8] = {8, 8}, | ||
660 | [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8}, | ||
661 | [SVGA3D_LUMINANCE16] = {16, 16}, | ||
662 | [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16}, | ||
663 | [SVGA3D_DXT1] = {4, 16}, | ||
664 | [SVGA3D_DXT2] = {8, 32}, | ||
665 | [SVGA3D_DXT3] = {8, 32}, | ||
666 | [SVGA3D_DXT4] = {8, 32}, | ||
667 | [SVGA3D_DXT5] = {8, 32}, | ||
668 | [SVGA3D_BUMPU8V8] = {16, 16}, | ||
669 | [SVGA3D_BUMPL6V5U5] = {16, 16}, | ||
670 | [SVGA3D_BUMPX8L8V8U8] = {32, 32}, | ||
671 | [SVGA3D_ARGB_S10E5] = {16, 16}, | ||
672 | [SVGA3D_ARGB_S23E8] = {32, 32}, | ||
673 | [SVGA3D_A2R10G10B10] = {32, 32}, | ||
674 | [SVGA3D_V8U8] = {16, 16}, | ||
675 | [SVGA3D_Q8W8V8U8] = {32, 32}, | ||
676 | [SVGA3D_CxV8U8] = {16, 16}, | ||
677 | [SVGA3D_X8L8V8U8] = {32, 32}, | ||
678 | [SVGA3D_A2W10V10U10] = {32, 32}, | ||
679 | [SVGA3D_ALPHA8] = {8, 8}, | ||
680 | [SVGA3D_R_S10E5] = {16, 16}, | ||
681 | [SVGA3D_R_S23E8] = {32, 32}, | ||
682 | [SVGA3D_RG_S10E5] = {16, 16}, | ||
683 | [SVGA3D_RG_S23E8] = {32, 32}, | ||
684 | [SVGA3D_BUFFER] = {8, 8}, | ||
685 | [SVGA3D_Z_D24X8] = {32, 32}, | ||
686 | [SVGA3D_V16U16] = {32, 32}, | ||
687 | [SVGA3D_G16R16] = {32, 32}, | ||
688 | [SVGA3D_A16B16G16R16] = {64, 64}, | ||
689 | [SVGA3D_UYVY] = {12, 12}, | ||
690 | [SVGA3D_YUY2] = {12, 12}, | ||
691 | [SVGA3D_NV12] = {12, 8}, | ||
692 | [SVGA3D_AYUV] = {32, 32}, | ||
693 | [SVGA3D_BC4_UNORM] = {4, 16}, | ||
694 | [SVGA3D_BC5_UNORM] = {8, 32}, | ||
695 | [SVGA3D_Z_DF16] = {16, 16}, | ||
696 | [SVGA3D_Z_DF24] = {24, 24}, | ||
697 | [SVGA3D_Z_D24S8_INT] = {32, 32} | ||
698 | }; | ||
699 | |||
700 | |||
701 | /** | ||
702 | * struct vmw_surface_dma - SVGA3D DMA command | ||
703 | */ | ||
704 | struct vmw_surface_dma { | ||
705 | SVGA3dCmdHeader header; | ||
706 | SVGA3dCmdSurfaceDMA body; | ||
707 | SVGA3dCopyBox cb; | ||
708 | SVGA3dCmdSurfaceDMASuffix suffix; | ||
709 | }; | ||
710 | |||
711 | /** | ||
712 | * struct vmw_surface_define - SVGA3D Surface Define command | ||
713 | */ | ||
714 | struct vmw_surface_define { | ||
715 | SVGA3dCmdHeader header; | ||
716 | SVGA3dCmdDefineSurface body; | ||
717 | }; | ||
718 | |||
719 | /** | ||
720 | * struct vmw_surface_destroy - SVGA3D Surface Destroy command | ||
721 | */ | ||
722 | struct vmw_surface_destroy { | ||
723 | SVGA3dCmdHeader header; | ||
724 | SVGA3dCmdDestroySurface body; | ||
725 | }; | ||
726 | |||
727 | |||
728 | /** | ||
729 | * vmw_surface_dma_size - Compute fifo size for a dma command. | ||
730 | * | ||
731 | * @srf: Pointer to a struct vmw_surface | ||
732 | * | ||
733 | * Computes the required size for a surface dma command for backup or | ||
734 | * restoration of the surface represented by @srf. | ||
735 | */ | ||
736 | static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf) | ||
737 | { | ||
738 | return srf->num_sizes * sizeof(struct vmw_surface_dma); | ||
739 | } | ||
740 | |||
741 | |||
742 | /** | ||
743 | * vmw_surface_define_size - Compute fifo size for a surface define command. | ||
744 | * | ||
745 | * @srf: Pointer to a struct vmw_surface | ||
746 | * | ||
747 | * Computes the required size for a surface define command for the definition | ||
748 | * of the surface represented by @srf. | ||
749 | */ | ||
750 | static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf) | ||
751 | { | ||
752 | return sizeof(struct vmw_surface_define) + srf->num_sizes * | ||
753 | sizeof(SVGA3dSize); | ||
754 | } | ||
755 | |||
756 | |||
757 | /** | ||
758 | * vmw_surface_destroy_size - Compute fifo size for a surface destroy command. | ||
759 | * | ||
760 | * Computes the required size for a surface destroy command for the destruction | ||
761 | * of a hw surface. | ||
762 | */ | ||
763 | static inline uint32_t vmw_surface_destroy_size(void) | ||
764 | { | ||
765 | return sizeof(struct vmw_surface_destroy); | ||
766 | } | ||
767 | |||
768 | /** | ||
769 | * vmw_surface_destroy_encode - Encode a surface_destroy command. | ||
770 | * | ||
771 | * @id: The surface id | ||
772 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | ||
773 | */ | ||
774 | static void vmw_surface_destroy_encode(uint32_t id, | ||
775 | void *cmd_space) | ||
776 | { | ||
777 | struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *) | ||
778 | cmd_space; | ||
779 | |||
780 | cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY; | ||
781 | cmd->header.size = sizeof(cmd->body); | ||
782 | cmd->body.sid = id; | ||
783 | } | ||
784 | |||
785 | /** | ||
786 | * vmw_surface_define_encode - Encode a surface_define command. | ||
787 | * | ||
788 | * @srf: Pointer to a struct vmw_surface object. | ||
789 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | ||
790 | */ | ||
791 | static void vmw_surface_define_encode(const struct vmw_surface *srf, | ||
792 | void *cmd_space) | ||
793 | { | ||
794 | struct vmw_surface_define *cmd = (struct vmw_surface_define *) | ||
795 | cmd_space; | ||
796 | struct drm_vmw_size *src_size; | ||
797 | SVGA3dSize *cmd_size; | ||
798 | uint32_t cmd_len; | ||
799 | int i; | ||
800 | |||
801 | cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); | ||
802 | |||
803 | cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE; | ||
804 | cmd->header.size = cmd_len; | ||
805 | cmd->body.sid = srf->res.id; | ||
806 | cmd->body.surfaceFlags = srf->flags; | ||
807 | cmd->body.format = cpu_to_le32(srf->format); | ||
808 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) | ||
809 | cmd->body.face[i].numMipLevels = srf->mip_levels[i]; | ||
810 | |||
811 | cmd += 1; | ||
812 | cmd_size = (SVGA3dSize *) cmd; | ||
813 | src_size = srf->sizes; | ||
814 | |||
815 | for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { | ||
816 | cmd_size->width = src_size->width; | ||
817 | cmd_size->height = src_size->height; | ||
818 | cmd_size->depth = src_size->depth; | ||
819 | } | ||
820 | } | ||
821 | |||
822 | /** | ||
823 | * vmw_surface_dma_encode - Encode a surface_dma command. | ||
824 | * | ||
825 | * @srf: Pointer to a struct vmw_surface object. | ||
826 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | ||
827 | * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents | ||
828 | * should be placed or read from. | ||
829 | * @to_surface: Boolean whether to DMA to the surface or from the surface. | ||
830 | */ | ||
831 | static void vmw_surface_dma_encode(struct vmw_surface *srf, | ||
832 | void *cmd_space, | ||
833 | const SVGAGuestPtr *ptr, | ||
834 | bool to_surface) | ||
835 | { | ||
836 | uint32_t i; | ||
837 | uint32_t bpp = vmw_sf_bpp[srf->format].bpp; | ||
838 | uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp; | ||
839 | struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space; | ||
840 | |||
841 | for (i = 0; i < srf->num_sizes; ++i) { | ||
842 | SVGA3dCmdHeader *header = &cmd->header; | ||
843 | SVGA3dCmdSurfaceDMA *body = &cmd->body; | ||
844 | SVGA3dCopyBox *cb = &cmd->cb; | ||
845 | SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix; | ||
846 | const struct vmw_surface_offset *cur_offset = &srf->offsets[i]; | ||
847 | const struct drm_vmw_size *cur_size = &srf->sizes[i]; | ||
848 | |||
849 | header->id = SVGA_3D_CMD_SURFACE_DMA; | ||
850 | header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix); | ||
851 | |||
852 | body->guest.ptr = *ptr; | ||
853 | body->guest.ptr.offset += cur_offset->bo_offset; | ||
854 | body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3; | ||
855 | body->host.sid = srf->res.id; | ||
856 | body->host.face = cur_offset->face; | ||
857 | body->host.mipmap = cur_offset->mip; | ||
858 | body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM : | ||
859 | SVGA3D_READ_HOST_VRAM); | ||
860 | cb->x = 0; | ||
861 | cb->y = 0; | ||
862 | cb->z = 0; | ||
863 | cb->srcx = 0; | ||
864 | cb->srcy = 0; | ||
865 | cb->srcz = 0; | ||
866 | cb->w = cur_size->width; | ||
867 | cb->h = cur_size->height; | ||
868 | cb->d = cur_size->depth; | ||
869 | |||
870 | suffix->suffixSize = sizeof(*suffix); | ||
871 | suffix->maximumOffset = body->guest.pitch*cur_size->height* | ||
872 | cur_size->depth*bpp / stride_bpp; | ||
873 | suffix->flags.discard = 0; | ||
874 | suffix->flags.unsynchronized = 0; | ||
875 | suffix->flags.reserved = 0; | ||
876 | ++cmd; | ||
877 | } | ||
878 | }; | ||
879 | |||
880 | |||
881 | /** | ||
882 | * vmw_hw_surface_destroy - destroy a Device surface | ||
883 | * | ||
884 | * @res: Pointer to a struct vmw_resource embedded in a struct | ||
885 | * vmw_surface. | ||
886 | * | ||
887 | * Destroys a the device surface associated with a struct vmw_surface if | ||
888 | * any, and adjusts accounting and resource count accordingly. | ||
889 | */ | ||
890 | static void vmw_hw_surface_destroy(struct vmw_resource *res) | ||
891 | { | ||
892 | |||
893 | struct vmw_private *dev_priv = res->dev_priv; | ||
894 | struct vmw_surface *srf; | ||
895 | void *cmd; | ||
896 | |||
897 | if (res->id != -1) { | ||
898 | |||
899 | cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); | ||
900 | if (unlikely(cmd == NULL)) { | ||
901 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
902 | "destruction.\n"); | ||
903 | return; | ||
904 | } | ||
905 | |||
906 | vmw_surface_destroy_encode(res->id, cmd); | ||
907 | vmw_fifo_commit(dev_priv, vmw_surface_destroy_size()); | ||
908 | |||
909 | /* | ||
910 | * used_memory_size_atomic, or separate lock | ||
911 | * to avoid taking dev_priv::cmdbuf_mutex in | ||
912 | * the destroy path. | ||
913 | */ | ||
914 | |||
915 | mutex_lock(&dev_priv->cmdbuf_mutex); | ||
916 | srf = vmw_res_to_srf(res); | ||
917 | dev_priv->used_memory_size -= res->backup_size; | ||
918 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
919 | } | ||
920 | vmw_3d_resource_dec(dev_priv, false); | ||
921 | } | ||
922 | |||
923 | /** | ||
924 | * vmw_legacy_srf_create - Create a device surface as part of the | ||
925 | * resource validation process. | ||
926 | * | ||
927 | * @res: Pointer to a struct vmw_surface. | ||
928 | * | ||
929 | * If the surface doesn't have a hw id. | ||
930 | * | ||
931 | * Returns -EBUSY if there wasn't sufficient device resources to | ||
932 | * complete the validation. Retry after freeing up resources. | ||
933 | * | ||
934 | * May return other errors if the kernel is out of guest resources. | ||
935 | */ | ||
936 | static int vmw_legacy_srf_create(struct vmw_resource *res) | ||
937 | { | ||
938 | struct vmw_private *dev_priv = res->dev_priv; | ||
939 | struct vmw_surface *srf; | ||
940 | uint32_t submit_size; | ||
941 | uint8_t *cmd; | ||
942 | int ret; | ||
943 | |||
944 | if (likely(res->id != -1)) | ||
945 | return 0; | ||
946 | |||
947 | srf = vmw_res_to_srf(res); | ||
948 | if (unlikely(dev_priv->used_memory_size + res->backup_size >= | ||
949 | dev_priv->memory_size)) | ||
950 | return -EBUSY; | ||
951 | |||
952 | /* | ||
953 | * Alloc id for the resource. | ||
954 | */ | ||
955 | |||
956 | ret = vmw_resource_alloc_id(res); | ||
957 | if (unlikely(ret != 0)) { | ||
958 | DRM_ERROR("Failed to allocate a surface id.\n"); | ||
959 | goto out_no_id; | ||
960 | } | ||
961 | |||
962 | if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) { | ||
963 | ret = -EBUSY; | ||
964 | goto out_no_fifo; | ||
965 | } | ||
966 | |||
967 | /* | ||
968 | * Encode surface define- commands. | ||
969 | */ | ||
970 | |||
971 | submit_size = vmw_surface_define_size(srf); | ||
972 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
973 | if (unlikely(cmd == NULL)) { | ||
974 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
975 | "creation.\n"); | ||
976 | ret = -ENOMEM; | ||
977 | goto out_no_fifo; | ||
978 | } | ||
979 | |||
980 | vmw_surface_define_encode(srf, cmd); | ||
981 | vmw_fifo_commit(dev_priv, submit_size); | ||
982 | /* | ||
983 | * Surface memory usage accounting. | ||
984 | */ | ||
985 | |||
986 | dev_priv->used_memory_size += res->backup_size; | ||
987 | return 0; | ||
988 | |||
989 | out_no_fifo: | ||
990 | vmw_resource_release_id(res); | ||
991 | out_no_id: | ||
992 | return ret; | ||
993 | } | ||
994 | |||
995 | /** | ||
996 | * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface. | ||
997 | * | ||
998 | * @res: Pointer to a struct vmw_res embedded in a struct | ||
999 | * vmw_surface. | ||
1000 | * @val_buf: Pointer to a struct ttm_validate_buffer containing | ||
1001 | * information about the backup buffer. | ||
1002 | * @bind: Boolean wether to DMA to the surface. | ||
1003 | * | ||
1004 | * Transfer backup data to or from a legacy surface as part of the | ||
1005 | * validation process. | ||
1006 | * May return other errors if the kernel is out of guest resources. | ||
1007 | * The backup buffer will be fenced or idle upon successful completion, | ||
1008 | * and if the surface needs persistent backup storage, the backup buffer | ||
1009 | * will also be returned reserved iff @bind is true. | ||
1010 | */ | ||
1011 | static int vmw_legacy_srf_dma(struct vmw_resource *res, | ||
1012 | struct ttm_validate_buffer *val_buf, | ||
1013 | bool bind) | ||
1014 | { | ||
1015 | SVGAGuestPtr ptr; | ||
1016 | struct vmw_fence_obj *fence; | ||
1017 | uint32_t submit_size; | ||
1018 | struct vmw_surface *srf = vmw_res_to_srf(res); | ||
1019 | uint8_t *cmd; | ||
1020 | struct vmw_private *dev_priv = res->dev_priv; | ||
1021 | |||
1022 | BUG_ON(val_buf->bo == NULL); | ||
1023 | |||
1024 | submit_size = vmw_surface_dma_size(srf); | ||
1025 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
1026 | if (unlikely(cmd == NULL)) { | ||
1027 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
1028 | "DMA.\n"); | ||
1029 | return -ENOMEM; | ||
1030 | } | ||
1031 | vmw_bo_get_guest_ptr(val_buf->bo, &ptr); | ||
1032 | vmw_surface_dma_encode(srf, cmd, &ptr, bind); | ||
1033 | |||
1034 | vmw_fifo_commit(dev_priv, submit_size); | ||
1035 | |||
1036 | /* | ||
1037 | * Create a fence object and fence the backup buffer. | ||
1038 | */ | ||
1039 | |||
1040 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
1041 | &fence, NULL); | ||
1042 | |||
1043 | vmw_fence_single_bo(val_buf->bo, fence); | ||
1044 | |||
1045 | if (likely(fence != NULL)) | ||
1046 | vmw_fence_obj_unreference(&fence); | ||
1047 | |||
1048 | return 0; | ||
1049 | } | ||
1050 | |||
1051 | /** | ||
1052 | * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the | ||
1053 | * surface validation process. | ||
1054 | * | ||
1055 | * @res: Pointer to a struct vmw_res embedded in a struct | ||
1056 | * vmw_surface. | ||
1057 | * @val_buf: Pointer to a struct ttm_validate_buffer containing | ||
1058 | * information about the backup buffer. | ||
1059 | * | ||
1060 | * This function will copy backup data to the surface if the | ||
1061 | * backup buffer is dirty. | ||
1062 | */ | ||
1063 | static int vmw_legacy_srf_bind(struct vmw_resource *res, | ||
1064 | struct ttm_validate_buffer *val_buf) | ||
1065 | { | ||
1066 | if (!res->backup_dirty) | ||
1067 | return 0; | ||
1068 | |||
1069 | return vmw_legacy_srf_dma(res, val_buf, true); | ||
1070 | } | ||
1071 | |||
1072 | |||
1073 | /** | ||
1074 | * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the | ||
1075 | * surface eviction process. | ||
1076 | * | ||
1077 | * @res: Pointer to a struct vmw_res embedded in a struct | ||
1078 | * vmw_surface. | ||
1079 | * @val_buf: Pointer to a struct ttm_validate_buffer containing | ||
1080 | * information about the backup buffer. | ||
1081 | * | ||
1082 | * This function will copy backup data from the surface. | ||
1083 | */ | ||
1084 | static int vmw_legacy_srf_unbind(struct vmw_resource *res, | ||
1085 | bool readback, | ||
1086 | struct ttm_validate_buffer *val_buf) | ||
1087 | { | ||
1088 | if (unlikely(readback)) | ||
1089 | return vmw_legacy_srf_dma(res, val_buf, false); | ||
1090 | return 0; | ||
1091 | } | ||
1092 | |||
1093 | /** | ||
1094 | * vmw_legacy_srf_destroy - Destroy a device surface as part of a | ||
1095 | * resource eviction process. | ||
1096 | * | ||
1097 | * @res: Pointer to a struct vmw_res embedded in a struct | ||
1098 | * vmw_surface. | ||
1099 | */ | ||
1100 | static int vmw_legacy_srf_destroy(struct vmw_resource *res) | ||
1101 | { | ||
1102 | struct vmw_private *dev_priv = res->dev_priv; | ||
1103 | uint32_t submit_size; | ||
1104 | uint8_t *cmd; | ||
1105 | |||
1106 | BUG_ON(res->id == -1); | ||
1107 | |||
1108 | /* | ||
1109 | * Encode the dma- and surface destroy commands. | ||
1110 | */ | ||
1111 | |||
1112 | submit_size = vmw_surface_destroy_size(); | ||
1113 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
1114 | if (unlikely(cmd == NULL)) { | ||
1115 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
1116 | "eviction.\n"); | ||
1117 | return -ENOMEM; | ||
1118 | } | ||
1119 | |||
1120 | vmw_surface_destroy_encode(res->id, cmd); | ||
1121 | vmw_fifo_commit(dev_priv, submit_size); | ||
1122 | |||
1123 | /* | ||
1124 | * Surface memory usage accounting. | ||
1125 | */ | ||
1126 | |||
1127 | dev_priv->used_memory_size -= res->backup_size; | ||
1128 | |||
1129 | /* | ||
1130 | * Release the surface ID. | ||
1131 | */ | ||
1132 | |||
1133 | vmw_resource_release_id(res); | ||
1134 | |||
1135 | return 0; | ||
1136 | } | ||
1137 | |||
1138 | |||
1139 | /** | ||
1140 | * vmw_surface_init - initialize a struct vmw_surface | ||
1141 | * | ||
1142 | * @dev_priv: Pointer to a device private struct. | ||
1143 | * @srf: Pointer to the struct vmw_surface to initialize. | ||
1144 | * @res_free: Pointer to a resource destructor used to free | ||
1145 | * the object. | ||
1146 | */ | ||
1147 | static int vmw_surface_init(struct vmw_private *dev_priv, | ||
1148 | struct vmw_surface *srf, | ||
1149 | void (*res_free) (struct vmw_resource *res)) | ||
1150 | { | ||
1151 | int ret; | ||
1152 | struct vmw_resource *res = &srf->res; | ||
1153 | |||
1154 | BUG_ON(res_free == NULL); | ||
1155 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
1156 | ret = vmw_resource_init(dev_priv, res, true, res_free, | ||
1157 | &vmw_legacy_surface_func); | ||
1158 | |||
1159 | if (unlikely(ret != 0)) { | ||
1160 | vmw_3d_resource_dec(dev_priv, false); | ||
1161 | res_free(res); | ||
1162 | return ret; | ||
1163 | } | ||
1164 | |||
1165 | /* | ||
1166 | * The surface won't be visible to hardware until a | ||
1167 | * surface validate. | ||
1168 | */ | ||
1169 | |||
1170 | vmw_resource_activate(res, vmw_hw_surface_destroy); | ||
1171 | return ret; | ||
1172 | } | ||
1173 | |||
1174 | /** | ||
1175 | * vmw_user_surface_base_to_res - TTM base object to resource converter for | ||
1176 | * user visible surfaces | ||
1177 | * | ||
1178 | * @base: Pointer to a TTM base object | ||
1179 | * | ||
1180 | * Returns the struct vmw_resource embedded in a struct vmw_surface | ||
1181 | * for the user-visible object identified by the TTM base object @base. | ||
1182 | */ | ||
1183 | static struct vmw_resource * | ||
1184 | vmw_user_surface_base_to_res(struct ttm_base_object *base) | ||
1185 | { | ||
1186 | return &(container_of(base, struct vmw_user_surface, base)->srf.res); | ||
1187 | } | ||
1188 | |||
1189 | /** | ||
1190 | * vmw_user_surface_free - User visible surface resource destructor | ||
1191 | * | ||
1192 | * @res: A struct vmw_resource embedded in a struct vmw_surface. | ||
1193 | */ | ||
1194 | static void vmw_user_surface_free(struct vmw_resource *res) | ||
1195 | { | ||
1196 | struct vmw_surface *srf = vmw_res_to_srf(res); | ||
1197 | struct vmw_user_surface *user_srf = | ||
1198 | container_of(srf, struct vmw_user_surface, srf); | ||
1199 | struct vmw_private *dev_priv = srf->res.dev_priv; | ||
1200 | uint32_t size = user_srf->size; | ||
1201 | |||
1202 | kfree(srf->offsets); | ||
1203 | kfree(srf->sizes); | ||
1204 | kfree(srf->snooper.image); | ||
1205 | ttm_base_object_kfree(user_srf, base); | ||
1206 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | ||
1207 | } | ||
1208 | |||
1209 | /** | ||
1210 | * vmw_user_surface_free - User visible surface TTM base object destructor | ||
1211 | * | ||
1212 | * @p_base: Pointer to a pointer to a TTM base object | ||
1213 | * embedded in a struct vmw_user_surface. | ||
1214 | * | ||
1215 | * Drops the base object's reference on its resource, and the | ||
1216 | * pointer pointed to by *p_base is set to NULL. | ||
1217 | */ | ||
1218 | static void vmw_user_surface_base_release(struct ttm_base_object **p_base) | ||
1219 | { | ||
1220 | struct ttm_base_object *base = *p_base; | ||
1221 | struct vmw_user_surface *user_srf = | ||
1222 | container_of(base, struct vmw_user_surface, base); | ||
1223 | struct vmw_resource *res = &user_srf->srf.res; | ||
1224 | |||
1225 | *p_base = NULL; | ||
1226 | vmw_resource_unreference(&res); | ||
1227 | } | ||
1228 | |||
1229 | /** | ||
1230 | * vmw_user_surface_destroy_ioctl - Ioctl function implementing | ||
1231 | * the user surface destroy functionality. | ||
1232 | * | ||
1233 | * @dev: Pointer to a struct drm_device. | ||
1234 | * @data: Pointer to data copied from / to user-space. | ||
1235 | * @file_priv: Pointer to a drm file private structure. | ||
1236 | */ | ||
1237 | int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, | ||
1238 | struct drm_file *file_priv) | ||
1239 | { | ||
1240 | struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; | ||
1241 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
1242 | |||
1243 | return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE); | ||
1244 | } | ||
1245 | |||
1246 | /** | ||
1247 | * vmw_user_surface_define_ioctl - Ioctl function implementing | ||
1248 | * the user surface define functionality. | ||
1249 | * | ||
1250 | * @dev: Pointer to a struct drm_device. | ||
1251 | * @data: Pointer to data copied from / to user-space. | ||
1252 | * @file_priv: Pointer to a drm file private structure. | ||
1253 | */ | ||
1254 | int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | ||
1255 | struct drm_file *file_priv) | ||
1256 | { | ||
1257 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
1258 | struct vmw_user_surface *user_srf; | ||
1259 | struct vmw_surface *srf; | ||
1260 | struct vmw_resource *res; | ||
1261 | struct vmw_resource *tmp; | ||
1262 | union drm_vmw_surface_create_arg *arg = | ||
1263 | (union drm_vmw_surface_create_arg *)data; | ||
1264 | struct drm_vmw_surface_create_req *req = &arg->req; | ||
1265 | struct drm_vmw_surface_arg *rep = &arg->rep; | ||
1266 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
1267 | struct drm_vmw_size __user *user_sizes; | ||
1268 | int ret; | ||
1269 | int i, j; | ||
1270 | uint32_t cur_bo_offset; | ||
1271 | struct drm_vmw_size *cur_size; | ||
1272 | struct vmw_surface_offset *cur_offset; | ||
1273 | uint32_t stride_bpp; | ||
1274 | uint32_t bpp; | ||
1275 | uint32_t num_sizes; | ||
1276 | uint32_t size; | ||
1277 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
1278 | |||
1279 | if (unlikely(vmw_user_surface_size == 0)) | ||
1280 | vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + | ||
1281 | 128; | ||
1282 | |||
1283 | num_sizes = 0; | ||
1284 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) | ||
1285 | num_sizes += req->mip_levels[i]; | ||
1286 | |||
1287 | if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * | ||
1288 | DRM_VMW_MAX_MIP_LEVELS) | ||
1289 | return -EINVAL; | ||
1290 | |||
1291 | size = vmw_user_surface_size + 128 + | ||
1292 | ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) + | ||
1293 | ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset)); | ||
1294 | |||
1295 | |||
1296 | ret = ttm_read_lock(&vmaster->lock, true); | ||
1297 | if (unlikely(ret != 0)) | ||
1298 | return ret; | ||
1299 | |||
1300 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
1301 | size, false, true); | ||
1302 | if (unlikely(ret != 0)) { | ||
1303 | if (ret != -ERESTARTSYS) | ||
1304 | DRM_ERROR("Out of graphics memory for surface" | ||
1305 | " creation.\n"); | ||
1306 | goto out_unlock; | ||
1307 | } | ||
1308 | |||
1309 | user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); | ||
1310 | if (unlikely(user_srf == NULL)) { | ||
1311 | ret = -ENOMEM; | ||
1312 | goto out_no_user_srf; | ||
1313 | } | ||
1314 | |||
1315 | srf = &user_srf->srf; | ||
1316 | res = &srf->res; | ||
1317 | |||
1318 | srf->flags = req->flags; | ||
1319 | srf->format = req->format; | ||
1320 | srf->scanout = req->scanout; | ||
1321 | |||
1322 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); | ||
1323 | srf->num_sizes = num_sizes; | ||
1324 | user_srf->size = size; | ||
1325 | |||
1326 | srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); | ||
1327 | if (unlikely(srf->sizes == NULL)) { | ||
1328 | ret = -ENOMEM; | ||
1329 | goto out_no_sizes; | ||
1330 | } | ||
1331 | srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), | ||
1332 | GFP_KERNEL); | ||
1333 | if (unlikely(srf->sizes == NULL)) { | ||
1334 | ret = -ENOMEM; | ||
1335 | goto out_no_offsets; | ||
1336 | } | ||
1337 | |||
1338 | user_sizes = (struct drm_vmw_size __user *)(unsigned long) | ||
1339 | req->size_addr; | ||
1340 | |||
1341 | ret = copy_from_user(srf->sizes, user_sizes, | ||
1342 | srf->num_sizes * sizeof(*srf->sizes)); | ||
1343 | if (unlikely(ret != 0)) { | ||
1344 | ret = -EFAULT; | ||
1345 | goto out_no_copy; | ||
1346 | } | ||
1347 | |||
1348 | srf->base_size = *srf->sizes; | ||
1349 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; | ||
1350 | srf->multisample_count = 1; | ||
1351 | |||
1352 | cur_bo_offset = 0; | ||
1353 | cur_offset = srf->offsets; | ||
1354 | cur_size = srf->sizes; | ||
1355 | |||
1356 | bpp = vmw_sf_bpp[srf->format].bpp; | ||
1357 | stride_bpp = vmw_sf_bpp[srf->format].s_bpp; | ||
1358 | |||
1359 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { | ||
1360 | for (j = 0; j < srf->mip_levels[i]; ++j) { | ||
1361 | uint32_t stride = | ||
1362 | (cur_size->width * stride_bpp + 7) >> 3; | ||
1363 | |||
1364 | cur_offset->face = i; | ||
1365 | cur_offset->mip = j; | ||
1366 | cur_offset->bo_offset = cur_bo_offset; | ||
1367 | cur_bo_offset += stride * cur_size->height * | ||
1368 | cur_size->depth * bpp / stride_bpp; | ||
1369 | ++cur_offset; | ||
1370 | ++cur_size; | ||
1371 | } | ||
1372 | } | ||
1373 | res->backup_size = cur_bo_offset; | ||
1374 | |||
1375 | if (srf->scanout && | ||
1376 | srf->num_sizes == 1 && | ||
1377 | srf->sizes[0].width == 64 && | ||
1378 | srf->sizes[0].height == 64 && | ||
1379 | srf->format == SVGA3D_A8R8G8B8) { | ||
1380 | |||
1381 | /* allocate image area and clear it */ | ||
1382 | srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL); | ||
1383 | if (!srf->snooper.image) { | ||
1384 | DRM_ERROR("Failed to allocate cursor_image\n"); | ||
1385 | ret = -ENOMEM; | ||
1386 | goto out_no_copy; | ||
1387 | } | ||
1388 | } else { | ||
1389 | srf->snooper.image = NULL; | ||
1390 | } | ||
1391 | srf->snooper.crtc = NULL; | ||
1392 | |||
1393 | user_srf->base.shareable = false; | ||
1394 | user_srf->base.tfile = NULL; | ||
1395 | |||
1396 | /** | ||
1397 | * From this point, the generic resource management functions | ||
1398 | * destroy the object on failure. | ||
1399 | */ | ||
1400 | |||
1401 | ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); | ||
1402 | if (unlikely(ret != 0)) | ||
1403 | goto out_unlock; | ||
1404 | |||
1405 | tmp = vmw_resource_reference(&srf->res); | ||
1406 | ret = ttm_base_object_init(tfile, &user_srf->base, | ||
1407 | req->shareable, VMW_RES_SURFACE, | ||
1408 | &vmw_user_surface_base_release, NULL); | ||
1409 | |||
1410 | if (unlikely(ret != 0)) { | ||
1411 | vmw_resource_unreference(&tmp); | ||
1412 | vmw_resource_unreference(&res); | ||
1413 | goto out_unlock; | ||
1414 | } | ||
1415 | |||
1416 | rep->sid = user_srf->base.hash.key; | ||
1417 | vmw_resource_unreference(&res); | ||
1418 | |||
1419 | ttm_read_unlock(&vmaster->lock); | ||
1420 | return 0; | ||
1421 | out_no_copy: | ||
1422 | kfree(srf->offsets); | ||
1423 | out_no_offsets: | ||
1424 | kfree(srf->sizes); | ||
1425 | out_no_sizes: | ||
1426 | kfree(user_srf); | ||
1427 | out_no_user_srf: | ||
1428 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | ||
1429 | out_unlock: | ||
1430 | ttm_read_unlock(&vmaster->lock); | ||
1431 | return ret; | ||
1432 | } | ||
1433 | |||
1434 | /** | ||
1435 | * vmw_user_surface_define_ioctl - Ioctl function implementing | ||
1436 | * the user surface reference functionality. | ||
1437 | * | ||
1438 | * @dev: Pointer to a struct drm_device. | ||
1439 | * @data: Pointer to data copied from / to user-space. | ||
1440 | * @file_priv: Pointer to a drm file private structure. | ||
1441 | */ | ||
1442 | int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
1443 | struct drm_file *file_priv) | ||
1444 | { | ||
1445 | union drm_vmw_surface_reference_arg *arg = | ||
1446 | (union drm_vmw_surface_reference_arg *)data; | ||
1447 | struct drm_vmw_surface_arg *req = &arg->req; | ||
1448 | struct drm_vmw_surface_create_req *rep = &arg->rep; | ||
1449 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
1450 | struct vmw_surface *srf; | ||
1451 | struct vmw_user_surface *user_srf; | ||
1452 | struct drm_vmw_size __user *user_sizes; | ||
1453 | struct ttm_base_object *base; | ||
1454 | int ret = -EINVAL; | ||
1455 | |||
1456 | base = ttm_base_object_lookup(tfile, req->sid); | ||
1457 | if (unlikely(base == NULL)) { | ||
1458 | DRM_ERROR("Could not find surface to reference.\n"); | ||
1459 | return -EINVAL; | ||
1460 | } | ||
1461 | |||
1462 | if (unlikely(base->object_type != VMW_RES_SURFACE)) | ||
1463 | goto out_bad_resource; | ||
1464 | |||
1465 | user_srf = container_of(base, struct vmw_user_surface, base); | ||
1466 | srf = &user_srf->srf; | ||
1467 | |||
1468 | ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); | ||
1469 | if (unlikely(ret != 0)) { | ||
1470 | DRM_ERROR("Could not add a reference to a surface.\n"); | ||
1471 | goto out_no_reference; | ||
1472 | } | ||
1473 | |||
1474 | rep->flags = srf->flags; | ||
1475 | rep->format = srf->format; | ||
1476 | memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); | ||
1477 | user_sizes = (struct drm_vmw_size __user *)(unsigned long) | ||
1478 | rep->size_addr; | ||
1479 | |||
1480 | if (user_sizes) | ||
1481 | ret = copy_to_user(user_sizes, srf->sizes, | ||
1482 | srf->num_sizes * sizeof(*srf->sizes)); | ||
1483 | if (unlikely(ret != 0)) { | ||
1484 | DRM_ERROR("copy_to_user failed %p %u\n", | ||
1485 | user_sizes, srf->num_sizes); | ||
1486 | ret = -EFAULT; | ||
1487 | } | ||
1488 | out_bad_resource: | ||
1489 | out_no_reference: | ||
1490 | ttm_base_object_unref(&base); | ||
1491 | |||
1492 | return ret; | ||
1493 | } | ||
1494 | |||
1495 | /** | ||
1496 | * vmw_user_resource_lookup_handle - lookup a struct resource from a | 271 | * vmw_user_resource_lookup_handle - lookup a struct resource from a |
1497 | * TTM user-space handle and perform basic type checks | 272 | * TTM user-space handle and perform basic type checks |
1498 | * | 273 | * |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h new file mode 100644 index 000000000000..f3adeed2854c --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h | |||
@@ -0,0 +1,84 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #ifndef _VMWGFX_RESOURCE_PRIV_H_ | ||
29 | #define _VMWGFX_RESOURCE_PRIV_H_ | ||
30 | |||
31 | #include "vmwgfx_drv.h" | ||
32 | |||
33 | /** | ||
34 | * struct vmw_user_resource_conv - Identify a derived user-exported resource | ||
35 | * type and provide a function to convert its ttm_base_object pointer to | ||
36 | * a struct vmw_resource | ||
37 | */ | ||
38 | struct vmw_user_resource_conv { | ||
39 | enum ttm_object_type object_type; | ||
40 | struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base); | ||
41 | void (*res_free) (struct vmw_resource *res); | ||
42 | }; | ||
43 | |||
44 | /** | ||
45 | * struct vmw_res_func - members and functions common for a resource type | ||
46 | * | ||
47 | * @res_type: Enum that identifies the lru list to use for eviction. | ||
48 | * @needs_backup: Whether the resource is guest-backed and needs | ||
49 | * persistent buffer storage. | ||
50 | * @type_name: String that identifies the resource type. | ||
51 | * @backup_placement: TTM placement for backup buffers. | ||
52 | * @may_evict Whether the resource may be evicted. | ||
53 | * @create: Create a hardware resource. | ||
54 | * @destroy: Destroy a hardware resource. | ||
55 | * @bind: Bind a hardware resource to persistent buffer storage. | ||
56 | * @unbind: Unbind a hardware resource from persistent | ||
57 | * buffer storage. | ||
58 | */ | ||
59 | |||
60 | struct vmw_res_func { | ||
61 | enum vmw_res_type res_type; | ||
62 | bool needs_backup; | ||
63 | const char *type_name; | ||
64 | struct ttm_placement *backup_placement; | ||
65 | bool may_evict; | ||
66 | |||
67 | int (*create) (struct vmw_resource *res); | ||
68 | int (*destroy) (struct vmw_resource *res); | ||
69 | int (*bind) (struct vmw_resource *res, | ||
70 | struct ttm_validate_buffer *val_buf); | ||
71 | int (*unbind) (struct vmw_resource *res, | ||
72 | bool readback, | ||
73 | struct ttm_validate_buffer *val_buf); | ||
74 | }; | ||
75 | |||
76 | int vmw_resource_alloc_id(struct vmw_resource *res); | ||
77 | void vmw_resource_release_id(struct vmw_resource *res); | ||
78 | int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, | ||
79 | bool delay_id, | ||
80 | void (*res_free) (struct vmw_resource *res), | ||
81 | const struct vmw_res_func *func); | ||
82 | void vmw_resource_activate(struct vmw_resource *res, | ||
83 | void (*hw_destroy) (struct vmw_resource *)); | ||
84 | #endif | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c new file mode 100644 index 000000000000..c4a7bcdabd48 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
@@ -0,0 +1,968 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "vmwgfx_drv.h" | ||
29 | #include "vmwgfx_resource_priv.h" | ||
30 | #include <ttm/ttm_placement.h> | ||
31 | |||
32 | /** | ||
33 | * struct vmw_user_surface - User-space visible surface resource | ||
34 | * | ||
35 | * @base: The TTM base object handling user-space visibility. | ||
36 | * @srf: The surface metadata. | ||
37 | * @size: TTM accounting size for the surface. | ||
38 | */ | ||
39 | struct vmw_user_surface { | ||
40 | struct ttm_base_object base; | ||
41 | struct vmw_surface srf; | ||
42 | uint32_t size; | ||
43 | uint32_t backup_handle; | ||
44 | }; | ||
45 | |||
46 | /** | ||
47 | * struct vmw_surface_offset - Backing store mip level offset info | ||
48 | * | ||
49 | * @face: Surface face. | ||
50 | * @mip: Mip level. | ||
51 | * @bo_offset: Offset into backing store of this mip level. | ||
52 | * | ||
53 | */ | ||
54 | struct vmw_surface_offset { | ||
55 | uint32_t face; | ||
56 | uint32_t mip; | ||
57 | uint32_t bo_offset; | ||
58 | }; | ||
59 | |||
60 | static void vmw_user_surface_free(struct vmw_resource *res); | ||
61 | static struct vmw_resource * | ||
62 | vmw_user_surface_base_to_res(struct ttm_base_object *base); | ||
63 | static int vmw_legacy_srf_bind(struct vmw_resource *res, | ||
64 | struct ttm_validate_buffer *val_buf); | ||
65 | static int vmw_legacy_srf_unbind(struct vmw_resource *res, | ||
66 | bool readback, | ||
67 | struct ttm_validate_buffer *val_buf); | ||
68 | static int vmw_legacy_srf_create(struct vmw_resource *res); | ||
69 | static int vmw_legacy_srf_destroy(struct vmw_resource *res); | ||
70 | |||
71 | static const struct vmw_user_resource_conv user_surface_conv = { | ||
72 | .object_type = VMW_RES_SURFACE, | ||
73 | .base_obj_to_res = vmw_user_surface_base_to_res, | ||
74 | .res_free = vmw_user_surface_free | ||
75 | }; | ||
76 | |||
77 | const struct vmw_user_resource_conv *user_surface_converter = | ||
78 | &user_surface_conv; | ||
79 | |||
80 | |||
81 | static uint64_t vmw_user_surface_size; | ||
82 | |||
83 | static const struct vmw_res_func vmw_legacy_surface_func = { | ||
84 | .res_type = vmw_res_surface, | ||
85 | .needs_backup = false, | ||
86 | .may_evict = true, | ||
87 | .type_name = "legacy surfaces", | ||
88 | .backup_placement = &vmw_srf_placement, | ||
89 | .create = &vmw_legacy_srf_create, | ||
90 | .destroy = &vmw_legacy_srf_destroy, | ||
91 | .bind = &vmw_legacy_srf_bind, | ||
92 | .unbind = &vmw_legacy_srf_unbind | ||
93 | }; | ||
94 | |||
95 | /** | ||
96 | * struct vmw_bpp - Bits per pixel info for surface storage size computation. | ||
97 | * | ||
98 | * @bpp: Bits per pixel. | ||
99 | * @s_bpp: Stride bits per pixel. See definition below. | ||
100 | * | ||
101 | */ | ||
102 | struct vmw_bpp { | ||
103 | uint8_t bpp; | ||
104 | uint8_t s_bpp; | ||
105 | }; | ||
106 | |||
107 | /* | ||
108 | * Size table for the supported SVGA3D surface formats. It consists of | ||
109 | * two values. The bpp value and the s_bpp value which is short for | ||
110 | * "stride bits per pixel" The values are given in such a way that the | ||
111 | * minimum stride for the image is calculated using | ||
112 | * | ||
113 | * min_stride = w*s_bpp | ||
114 | * | ||
115 | * and the total memory requirement for the image is | ||
116 | * | ||
117 | * h*min_stride*bpp/s_bpp | ||
118 | * | ||
119 | */ | ||
120 | static const struct vmw_bpp vmw_sf_bpp[] = { | ||
121 | [SVGA3D_FORMAT_INVALID] = {0, 0}, | ||
122 | [SVGA3D_X8R8G8B8] = {32, 32}, | ||
123 | [SVGA3D_A8R8G8B8] = {32, 32}, | ||
124 | [SVGA3D_R5G6B5] = {16, 16}, | ||
125 | [SVGA3D_X1R5G5B5] = {16, 16}, | ||
126 | [SVGA3D_A1R5G5B5] = {16, 16}, | ||
127 | [SVGA3D_A4R4G4B4] = {16, 16}, | ||
128 | [SVGA3D_Z_D32] = {32, 32}, | ||
129 | [SVGA3D_Z_D16] = {16, 16}, | ||
130 | [SVGA3D_Z_D24S8] = {32, 32}, | ||
131 | [SVGA3D_Z_D15S1] = {16, 16}, | ||
132 | [SVGA3D_LUMINANCE8] = {8, 8}, | ||
133 | [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8}, | ||
134 | [SVGA3D_LUMINANCE16] = {16, 16}, | ||
135 | [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16}, | ||
136 | [SVGA3D_DXT1] = {4, 16}, | ||
137 | [SVGA3D_DXT2] = {8, 32}, | ||
138 | [SVGA3D_DXT3] = {8, 32}, | ||
139 | [SVGA3D_DXT4] = {8, 32}, | ||
140 | [SVGA3D_DXT5] = {8, 32}, | ||
141 | [SVGA3D_BUMPU8V8] = {16, 16}, | ||
142 | [SVGA3D_BUMPL6V5U5] = {16, 16}, | ||
143 | [SVGA3D_BUMPX8L8V8U8] = {32, 32}, | ||
144 | [SVGA3D_ARGB_S10E5] = {16, 16}, | ||
145 | [SVGA3D_ARGB_S23E8] = {32, 32}, | ||
146 | [SVGA3D_A2R10G10B10] = {32, 32}, | ||
147 | [SVGA3D_V8U8] = {16, 16}, | ||
148 | [SVGA3D_Q8W8V8U8] = {32, 32}, | ||
149 | [SVGA3D_CxV8U8] = {16, 16}, | ||
150 | [SVGA3D_X8L8V8U8] = {32, 32}, | ||
151 | [SVGA3D_A2W10V10U10] = {32, 32}, | ||
152 | [SVGA3D_ALPHA8] = {8, 8}, | ||
153 | [SVGA3D_R_S10E5] = {16, 16}, | ||
154 | [SVGA3D_R_S23E8] = {32, 32}, | ||
155 | [SVGA3D_RG_S10E5] = {16, 16}, | ||
156 | [SVGA3D_RG_S23E8] = {32, 32}, | ||
157 | [SVGA3D_BUFFER] = {8, 8}, | ||
158 | [SVGA3D_Z_D24X8] = {32, 32}, | ||
159 | [SVGA3D_V16U16] = {32, 32}, | ||
160 | [SVGA3D_G16R16] = {32, 32}, | ||
161 | [SVGA3D_A16B16G16R16] = {64, 64}, | ||
162 | [SVGA3D_UYVY] = {12, 12}, | ||
163 | [SVGA3D_YUY2] = {12, 12}, | ||
164 | [SVGA3D_NV12] = {12, 8}, | ||
165 | [SVGA3D_AYUV] = {32, 32}, | ||
166 | [SVGA3D_BC4_UNORM] = {4, 16}, | ||
167 | [SVGA3D_BC5_UNORM] = {8, 32}, | ||
168 | [SVGA3D_Z_DF16] = {16, 16}, | ||
169 | [SVGA3D_Z_DF24] = {24, 24}, | ||
170 | [SVGA3D_Z_D24S8_INT] = {32, 32} | ||
171 | }; | ||
172 | |||
173 | |||
174 | /** | ||
175 | * struct vmw_surface_dma - SVGA3D DMA command | ||
176 | */ | ||
177 | struct vmw_surface_dma { | ||
178 | SVGA3dCmdHeader header; | ||
179 | SVGA3dCmdSurfaceDMA body; | ||
180 | SVGA3dCopyBox cb; | ||
181 | SVGA3dCmdSurfaceDMASuffix suffix; | ||
182 | }; | ||
183 | |||
184 | /** | ||
185 | * struct vmw_surface_define - SVGA3D Surface Define command | ||
186 | */ | ||
187 | struct vmw_surface_define { | ||
188 | SVGA3dCmdHeader header; | ||
189 | SVGA3dCmdDefineSurface body; | ||
190 | }; | ||
191 | |||
192 | /** | ||
193 | * struct vmw_surface_destroy - SVGA3D Surface Destroy command | ||
194 | */ | ||
195 | struct vmw_surface_destroy { | ||
196 | SVGA3dCmdHeader header; | ||
197 | SVGA3dCmdDestroySurface body; | ||
198 | }; | ||
199 | |||
200 | |||
201 | /** | ||
202 | * vmw_surface_dma_size - Compute fifo size for a dma command. | ||
203 | * | ||
204 | * @srf: Pointer to a struct vmw_surface | ||
205 | * | ||
206 | * Computes the required size for a surface dma command for backup or | ||
207 | * restoration of the surface represented by @srf. | ||
208 | */ | ||
209 | static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf) | ||
210 | { | ||
211 | return srf->num_sizes * sizeof(struct vmw_surface_dma); | ||
212 | } | ||
213 | |||
214 | |||
215 | /** | ||
216 | * vmw_surface_define_size - Compute fifo size for a surface define command. | ||
217 | * | ||
218 | * @srf: Pointer to a struct vmw_surface | ||
219 | * | ||
220 | * Computes the required size for a surface define command for the definition | ||
221 | * of the surface represented by @srf. | ||
222 | */ | ||
223 | static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf) | ||
224 | { | ||
225 | return sizeof(struct vmw_surface_define) + srf->num_sizes * | ||
226 | sizeof(SVGA3dSize); | ||
227 | } | ||
228 | |||
229 | |||
230 | /** | ||
231 | * vmw_surface_destroy_size - Compute fifo size for a surface destroy command. | ||
232 | * | ||
233 | * Computes the required size for a surface destroy command for the destruction | ||
234 | * of a hw surface. | ||
235 | */ | ||
236 | static inline uint32_t vmw_surface_destroy_size(void) | ||
237 | { | ||
238 | return sizeof(struct vmw_surface_destroy); | ||
239 | } | ||
240 | |||
241 | /** | ||
242 | * vmw_surface_destroy_encode - Encode a surface_destroy command. | ||
243 | * | ||
244 | * @id: The surface id | ||
245 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | ||
246 | */ | ||
247 | static void vmw_surface_destroy_encode(uint32_t id, | ||
248 | void *cmd_space) | ||
249 | { | ||
250 | struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *) | ||
251 | cmd_space; | ||
252 | |||
253 | cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY; | ||
254 | cmd->header.size = sizeof(cmd->body); | ||
255 | cmd->body.sid = id; | ||
256 | } | ||
257 | |||
258 | /** | ||
259 | * vmw_surface_define_encode - Encode a surface_define command. | ||
260 | * | ||
261 | * @srf: Pointer to a struct vmw_surface object. | ||
262 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | ||
263 | */ | ||
264 | static void vmw_surface_define_encode(const struct vmw_surface *srf, | ||
265 | void *cmd_space) | ||
266 | { | ||
267 | struct vmw_surface_define *cmd = (struct vmw_surface_define *) | ||
268 | cmd_space; | ||
269 | struct drm_vmw_size *src_size; | ||
270 | SVGA3dSize *cmd_size; | ||
271 | uint32_t cmd_len; | ||
272 | int i; | ||
273 | |||
274 | cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); | ||
275 | |||
276 | cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE; | ||
277 | cmd->header.size = cmd_len; | ||
278 | cmd->body.sid = srf->res.id; | ||
279 | cmd->body.surfaceFlags = srf->flags; | ||
280 | cmd->body.format = cpu_to_le32(srf->format); | ||
281 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) | ||
282 | cmd->body.face[i].numMipLevels = srf->mip_levels[i]; | ||
283 | |||
284 | cmd += 1; | ||
285 | cmd_size = (SVGA3dSize *) cmd; | ||
286 | src_size = srf->sizes; | ||
287 | |||
288 | for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { | ||
289 | cmd_size->width = src_size->width; | ||
290 | cmd_size->height = src_size->height; | ||
291 | cmd_size->depth = src_size->depth; | ||
292 | } | ||
293 | } | ||
294 | |||
295 | /** | ||
296 | * vmw_surface_dma_encode - Encode a surface_dma command. | ||
297 | * | ||
298 | * @srf: Pointer to a struct vmw_surface object. | ||
299 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | ||
300 | * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents | ||
301 | * should be placed or read from. | ||
302 | * @to_surface: Boolean whether to DMA to the surface or from the surface. | ||
303 | */ | ||
304 | static void vmw_surface_dma_encode(struct vmw_surface *srf, | ||
305 | void *cmd_space, | ||
306 | const SVGAGuestPtr *ptr, | ||
307 | bool to_surface) | ||
308 | { | ||
309 | uint32_t i; | ||
310 | uint32_t bpp = vmw_sf_bpp[srf->format].bpp; | ||
311 | uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp; | ||
312 | struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space; | ||
313 | |||
314 | for (i = 0; i < srf->num_sizes; ++i) { | ||
315 | SVGA3dCmdHeader *header = &cmd->header; | ||
316 | SVGA3dCmdSurfaceDMA *body = &cmd->body; | ||
317 | SVGA3dCopyBox *cb = &cmd->cb; | ||
318 | SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix; | ||
319 | const struct vmw_surface_offset *cur_offset = &srf->offsets[i]; | ||
320 | const struct drm_vmw_size *cur_size = &srf->sizes[i]; | ||
321 | |||
322 | header->id = SVGA_3D_CMD_SURFACE_DMA; | ||
323 | header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix); | ||
324 | |||
325 | body->guest.ptr = *ptr; | ||
326 | body->guest.ptr.offset += cur_offset->bo_offset; | ||
327 | body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3; | ||
328 | body->host.sid = srf->res.id; | ||
329 | body->host.face = cur_offset->face; | ||
330 | body->host.mipmap = cur_offset->mip; | ||
331 | body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM : | ||
332 | SVGA3D_READ_HOST_VRAM); | ||
333 | cb->x = 0; | ||
334 | cb->y = 0; | ||
335 | cb->z = 0; | ||
336 | cb->srcx = 0; | ||
337 | cb->srcy = 0; | ||
338 | cb->srcz = 0; | ||
339 | cb->w = cur_size->width; | ||
340 | cb->h = cur_size->height; | ||
341 | cb->d = cur_size->depth; | ||
342 | |||
343 | suffix->suffixSize = sizeof(*suffix); | ||
344 | suffix->maximumOffset = body->guest.pitch*cur_size->height* | ||
345 | cur_size->depth*bpp / stride_bpp; | ||
346 | suffix->flags.discard = 0; | ||
347 | suffix->flags.unsynchronized = 0; | ||
348 | suffix->flags.reserved = 0; | ||
349 | ++cmd; | ||
350 | } | ||
351 | }; | ||
352 | |||
353 | |||
354 | /** | ||
355 | * vmw_hw_surface_destroy - destroy a Device surface | ||
356 | * | ||
357 | * @res: Pointer to a struct vmw_resource embedded in a struct | ||
358 | * vmw_surface. | ||
359 | * | ||
360 | * Destroys a the device surface associated with a struct vmw_surface if | ||
361 | * any, and adjusts accounting and resource count accordingly. | ||
362 | */ | ||
363 | static void vmw_hw_surface_destroy(struct vmw_resource *res) | ||
364 | { | ||
365 | |||
366 | struct vmw_private *dev_priv = res->dev_priv; | ||
367 | struct vmw_surface *srf; | ||
368 | void *cmd; | ||
369 | |||
370 | if (res->id != -1) { | ||
371 | |||
372 | cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); | ||
373 | if (unlikely(cmd == NULL)) { | ||
374 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
375 | "destruction.\n"); | ||
376 | return; | ||
377 | } | ||
378 | |||
379 | vmw_surface_destroy_encode(res->id, cmd); | ||
380 | vmw_fifo_commit(dev_priv, vmw_surface_destroy_size()); | ||
381 | |||
382 | /* | ||
383 | * used_memory_size_atomic, or separate lock | ||
384 | * to avoid taking dev_priv::cmdbuf_mutex in | ||
385 | * the destroy path. | ||
386 | */ | ||
387 | |||
388 | mutex_lock(&dev_priv->cmdbuf_mutex); | ||
389 | srf = vmw_res_to_srf(res); | ||
390 | dev_priv->used_memory_size -= res->backup_size; | ||
391 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
392 | } | ||
393 | vmw_3d_resource_dec(dev_priv, false); | ||
394 | } | ||
395 | |||
396 | /** | ||
397 | * vmw_legacy_srf_create - Create a device surface as part of the | ||
398 | * resource validation process. | ||
399 | * | ||
400 | * @res: Pointer to a struct vmw_surface. | ||
401 | * | ||
402 | * If the surface doesn't have a hw id. | ||
403 | * | ||
404 | * Returns -EBUSY if there wasn't sufficient device resources to | ||
405 | * complete the validation. Retry after freeing up resources. | ||
406 | * | ||
407 | * May return other errors if the kernel is out of guest resources. | ||
408 | */ | ||
409 | static int vmw_legacy_srf_create(struct vmw_resource *res) | ||
410 | { | ||
411 | struct vmw_private *dev_priv = res->dev_priv; | ||
412 | struct vmw_surface *srf; | ||
413 | uint32_t submit_size; | ||
414 | uint8_t *cmd; | ||
415 | int ret; | ||
416 | |||
417 | if (likely(res->id != -1)) | ||
418 | return 0; | ||
419 | |||
420 | srf = vmw_res_to_srf(res); | ||
421 | if (unlikely(dev_priv->used_memory_size + res->backup_size >= | ||
422 | dev_priv->memory_size)) | ||
423 | return -EBUSY; | ||
424 | |||
425 | /* | ||
426 | * Alloc id for the resource. | ||
427 | */ | ||
428 | |||
429 | ret = vmw_resource_alloc_id(res); | ||
430 | if (unlikely(ret != 0)) { | ||
431 | DRM_ERROR("Failed to allocate a surface id.\n"); | ||
432 | goto out_no_id; | ||
433 | } | ||
434 | |||
435 | if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) { | ||
436 | ret = -EBUSY; | ||
437 | goto out_no_fifo; | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * Encode surface define- commands. | ||
442 | */ | ||
443 | |||
444 | submit_size = vmw_surface_define_size(srf); | ||
445 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
446 | if (unlikely(cmd == NULL)) { | ||
447 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
448 | "creation.\n"); | ||
449 | ret = -ENOMEM; | ||
450 | goto out_no_fifo; | ||
451 | } | ||
452 | |||
453 | vmw_surface_define_encode(srf, cmd); | ||
454 | vmw_fifo_commit(dev_priv, submit_size); | ||
455 | /* | ||
456 | * Surface memory usage accounting. | ||
457 | */ | ||
458 | |||
459 | dev_priv->used_memory_size += res->backup_size; | ||
460 | return 0; | ||
461 | |||
462 | out_no_fifo: | ||
463 | vmw_resource_release_id(res); | ||
464 | out_no_id: | ||
465 | return ret; | ||
466 | } | ||
467 | |||
468 | /** | ||
469 | * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface. | ||
470 | * | ||
471 | * @res: Pointer to a struct vmw_res embedded in a struct | ||
472 | * vmw_surface. | ||
473 | * @val_buf: Pointer to a struct ttm_validate_buffer containing | ||
474 | * information about the backup buffer. | ||
475 | * @bind: Boolean wether to DMA to the surface. | ||
476 | * | ||
477 | * Transfer backup data to or from a legacy surface as part of the | ||
478 | * validation process. | ||
479 | * May return other errors if the kernel is out of guest resources. | ||
480 | * The backup buffer will be fenced or idle upon successful completion, | ||
481 | * and if the surface needs persistent backup storage, the backup buffer | ||
482 | * will also be returned reserved iff @bind is true. | ||
483 | */ | ||
484 | static int vmw_legacy_srf_dma(struct vmw_resource *res, | ||
485 | struct ttm_validate_buffer *val_buf, | ||
486 | bool bind) | ||
487 | { | ||
488 | SVGAGuestPtr ptr; | ||
489 | struct vmw_fence_obj *fence; | ||
490 | uint32_t submit_size; | ||
491 | struct vmw_surface *srf = vmw_res_to_srf(res); | ||
492 | uint8_t *cmd; | ||
493 | struct vmw_private *dev_priv = res->dev_priv; | ||
494 | |||
495 | BUG_ON(val_buf->bo == NULL); | ||
496 | |||
497 | submit_size = vmw_surface_dma_size(srf); | ||
498 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
499 | if (unlikely(cmd == NULL)) { | ||
500 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
501 | "DMA.\n"); | ||
502 | return -ENOMEM; | ||
503 | } | ||
504 | vmw_bo_get_guest_ptr(val_buf->bo, &ptr); | ||
505 | vmw_surface_dma_encode(srf, cmd, &ptr, bind); | ||
506 | |||
507 | vmw_fifo_commit(dev_priv, submit_size); | ||
508 | |||
509 | /* | ||
510 | * Create a fence object and fence the backup buffer. | ||
511 | */ | ||
512 | |||
513 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
514 | &fence, NULL); | ||
515 | |||
516 | vmw_fence_single_bo(val_buf->bo, fence); | ||
517 | |||
518 | if (likely(fence != NULL)) | ||
519 | vmw_fence_obj_unreference(&fence); | ||
520 | |||
521 | return 0; | ||
522 | } | ||
523 | |||
524 | /** | ||
525 | * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the | ||
526 | * surface validation process. | ||
527 | * | ||
528 | * @res: Pointer to a struct vmw_res embedded in a struct | ||
529 | * vmw_surface. | ||
530 | * @val_buf: Pointer to a struct ttm_validate_buffer containing | ||
531 | * information about the backup buffer. | ||
532 | * | ||
533 | * This function will copy backup data to the surface if the | ||
534 | * backup buffer is dirty. | ||
535 | */ | ||
536 | static int vmw_legacy_srf_bind(struct vmw_resource *res, | ||
537 | struct ttm_validate_buffer *val_buf) | ||
538 | { | ||
539 | if (!res->backup_dirty) | ||
540 | return 0; | ||
541 | |||
542 | return vmw_legacy_srf_dma(res, val_buf, true); | ||
543 | } | ||
544 | |||
545 | |||
546 | /** | ||
547 | * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the | ||
548 | * surface eviction process. | ||
549 | * | ||
550 | * @res: Pointer to a struct vmw_res embedded in a struct | ||
551 | * vmw_surface. | ||
552 | * @val_buf: Pointer to a struct ttm_validate_buffer containing | ||
553 | * information about the backup buffer. | ||
554 | * | ||
555 | * This function will copy backup data from the surface. | ||
556 | */ | ||
557 | static int vmw_legacy_srf_unbind(struct vmw_resource *res, | ||
558 | bool readback, | ||
559 | struct ttm_validate_buffer *val_buf) | ||
560 | { | ||
561 | if (unlikely(readback)) | ||
562 | return vmw_legacy_srf_dma(res, val_buf, false); | ||
563 | return 0; | ||
564 | } | ||
565 | |||
566 | /** | ||
567 | * vmw_legacy_srf_destroy - Destroy a device surface as part of a | ||
568 | * resource eviction process. | ||
569 | * | ||
570 | * @res: Pointer to a struct vmw_res embedded in a struct | ||
571 | * vmw_surface. | ||
572 | */ | ||
573 | static int vmw_legacy_srf_destroy(struct vmw_resource *res) | ||
574 | { | ||
575 | struct vmw_private *dev_priv = res->dev_priv; | ||
576 | uint32_t submit_size; | ||
577 | uint8_t *cmd; | ||
578 | |||
579 | BUG_ON(res->id == -1); | ||
580 | |||
581 | /* | ||
582 | * Encode the dma- and surface destroy commands. | ||
583 | */ | ||
584 | |||
585 | submit_size = vmw_surface_destroy_size(); | ||
586 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
587 | if (unlikely(cmd == NULL)) { | ||
588 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
589 | "eviction.\n"); | ||
590 | return -ENOMEM; | ||
591 | } | ||
592 | |||
593 | vmw_surface_destroy_encode(res->id, cmd); | ||
594 | vmw_fifo_commit(dev_priv, submit_size); | ||
595 | |||
596 | /* | ||
597 | * Surface memory usage accounting. | ||
598 | */ | ||
599 | |||
600 | dev_priv->used_memory_size -= res->backup_size; | ||
601 | |||
602 | /* | ||
603 | * Release the surface ID. | ||
604 | */ | ||
605 | |||
606 | vmw_resource_release_id(res); | ||
607 | |||
608 | return 0; | ||
609 | } | ||
610 | |||
611 | |||
612 | /** | ||
613 | * vmw_surface_init - initialize a struct vmw_surface | ||
614 | * | ||
615 | * @dev_priv: Pointer to a device private struct. | ||
616 | * @srf: Pointer to the struct vmw_surface to initialize. | ||
617 | * @res_free: Pointer to a resource destructor used to free | ||
618 | * the object. | ||
619 | */ | ||
620 | static int vmw_surface_init(struct vmw_private *dev_priv, | ||
621 | struct vmw_surface *srf, | ||
622 | void (*res_free) (struct vmw_resource *res)) | ||
623 | { | ||
624 | int ret; | ||
625 | struct vmw_resource *res = &srf->res; | ||
626 | |||
627 | BUG_ON(res_free == NULL); | ||
628 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
629 | ret = vmw_resource_init(dev_priv, res, true, res_free, | ||
630 | &vmw_legacy_surface_func); | ||
631 | |||
632 | if (unlikely(ret != 0)) { | ||
633 | vmw_3d_resource_dec(dev_priv, false); | ||
634 | res_free(res); | ||
635 | return ret; | ||
636 | } | ||
637 | |||
638 | /* | ||
639 | * The surface won't be visible to hardware until a | ||
640 | * surface validate. | ||
641 | */ | ||
642 | |||
643 | vmw_resource_activate(res, vmw_hw_surface_destroy); | ||
644 | return ret; | ||
645 | } | ||
646 | |||
647 | /** | ||
648 | * vmw_user_surface_base_to_res - TTM base object to resource converter for | ||
649 | * user visible surfaces | ||
650 | * | ||
651 | * @base: Pointer to a TTM base object | ||
652 | * | ||
653 | * Returns the struct vmw_resource embedded in a struct vmw_surface | ||
654 | * for the user-visible object identified by the TTM base object @base. | ||
655 | */ | ||
656 | static struct vmw_resource * | ||
657 | vmw_user_surface_base_to_res(struct ttm_base_object *base) | ||
658 | { | ||
659 | return &(container_of(base, struct vmw_user_surface, base)->srf.res); | ||
660 | } | ||
661 | |||
662 | /** | ||
663 | * vmw_user_surface_free - User visible surface resource destructor | ||
664 | * | ||
665 | * @res: A struct vmw_resource embedded in a struct vmw_surface. | ||
666 | */ | ||
667 | static void vmw_user_surface_free(struct vmw_resource *res) | ||
668 | { | ||
669 | struct vmw_surface *srf = vmw_res_to_srf(res); | ||
670 | struct vmw_user_surface *user_srf = | ||
671 | container_of(srf, struct vmw_user_surface, srf); | ||
672 | struct vmw_private *dev_priv = srf->res.dev_priv; | ||
673 | uint32_t size = user_srf->size; | ||
674 | |||
675 | kfree(srf->offsets); | ||
676 | kfree(srf->sizes); | ||
677 | kfree(srf->snooper.image); | ||
678 | ttm_base_object_kfree(user_srf, base); | ||
679 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | ||
680 | } | ||
681 | |||
682 | /** | ||
683 | * vmw_user_surface_free - User visible surface TTM base object destructor | ||
684 | * | ||
685 | * @p_base: Pointer to a pointer to a TTM base object | ||
686 | * embedded in a struct vmw_user_surface. | ||
687 | * | ||
688 | * Drops the base object's reference on its resource, and the | ||
689 | * pointer pointed to by *p_base is set to NULL. | ||
690 | */ | ||
691 | static void vmw_user_surface_base_release(struct ttm_base_object **p_base) | ||
692 | { | ||
693 | struct ttm_base_object *base = *p_base; | ||
694 | struct vmw_user_surface *user_srf = | ||
695 | container_of(base, struct vmw_user_surface, base); | ||
696 | struct vmw_resource *res = &user_srf->srf.res; | ||
697 | |||
698 | *p_base = NULL; | ||
699 | vmw_resource_unreference(&res); | ||
700 | } | ||
701 | |||
702 | /** | ||
703 | * vmw_user_surface_destroy_ioctl - Ioctl function implementing | ||
704 | * the user surface destroy functionality. | ||
705 | * | ||
706 | * @dev: Pointer to a struct drm_device. | ||
707 | * @data: Pointer to data copied from / to user-space. | ||
708 | * @file_priv: Pointer to a drm file private structure. | ||
709 | */ | ||
710 | int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, | ||
711 | struct drm_file *file_priv) | ||
712 | { | ||
713 | struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; | ||
714 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
715 | |||
716 | return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE); | ||
717 | } | ||
718 | |||
719 | /** | ||
720 | * vmw_user_surface_define_ioctl - Ioctl function implementing | ||
721 | * the user surface define functionality. | ||
722 | * | ||
723 | * @dev: Pointer to a struct drm_device. | ||
724 | * @data: Pointer to data copied from / to user-space. | ||
725 | * @file_priv: Pointer to a drm file private structure. | ||
726 | */ | ||
727 | int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | ||
728 | struct drm_file *file_priv) | ||
729 | { | ||
730 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
731 | struct vmw_user_surface *user_srf; | ||
732 | struct vmw_surface *srf; | ||
733 | struct vmw_resource *res; | ||
734 | struct vmw_resource *tmp; | ||
735 | union drm_vmw_surface_create_arg *arg = | ||
736 | (union drm_vmw_surface_create_arg *)data; | ||
737 | struct drm_vmw_surface_create_req *req = &arg->req; | ||
738 | struct drm_vmw_surface_arg *rep = &arg->rep; | ||
739 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
740 | struct drm_vmw_size __user *user_sizes; | ||
741 | int ret; | ||
742 | int i, j; | ||
743 | uint32_t cur_bo_offset; | ||
744 | struct drm_vmw_size *cur_size; | ||
745 | struct vmw_surface_offset *cur_offset; | ||
746 | uint32_t stride_bpp; | ||
747 | uint32_t bpp; | ||
748 | uint32_t num_sizes; | ||
749 | uint32_t size; | ||
750 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
751 | |||
752 | if (unlikely(vmw_user_surface_size == 0)) | ||
753 | vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + | ||
754 | 128; | ||
755 | |||
756 | num_sizes = 0; | ||
757 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) | ||
758 | num_sizes += req->mip_levels[i]; | ||
759 | |||
760 | if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * | ||
761 | DRM_VMW_MAX_MIP_LEVELS) | ||
762 | return -EINVAL; | ||
763 | |||
764 | size = vmw_user_surface_size + 128 + | ||
765 | ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) + | ||
766 | ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset)); | ||
767 | |||
768 | |||
769 | ret = ttm_read_lock(&vmaster->lock, true); | ||
770 | if (unlikely(ret != 0)) | ||
771 | return ret; | ||
772 | |||
773 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
774 | size, false, true); | ||
775 | if (unlikely(ret != 0)) { | ||
776 | if (ret != -ERESTARTSYS) | ||
777 | DRM_ERROR("Out of graphics memory for surface" | ||
778 | " creation.\n"); | ||
779 | goto out_unlock; | ||
780 | } | ||
781 | |||
782 | user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); | ||
783 | if (unlikely(user_srf == NULL)) { | ||
784 | ret = -ENOMEM; | ||
785 | goto out_no_user_srf; | ||
786 | } | ||
787 | |||
788 | srf = &user_srf->srf; | ||
789 | res = &srf->res; | ||
790 | |||
791 | srf->flags = req->flags; | ||
792 | srf->format = req->format; | ||
793 | srf->scanout = req->scanout; | ||
794 | |||
795 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); | ||
796 | srf->num_sizes = num_sizes; | ||
797 | user_srf->size = size; | ||
798 | |||
799 | srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); | ||
800 | if (unlikely(srf->sizes == NULL)) { | ||
801 | ret = -ENOMEM; | ||
802 | goto out_no_sizes; | ||
803 | } | ||
804 | srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), | ||
805 | GFP_KERNEL); | ||
806 | if (unlikely(srf->sizes == NULL)) { | ||
807 | ret = -ENOMEM; | ||
808 | goto out_no_offsets; | ||
809 | } | ||
810 | |||
811 | user_sizes = (struct drm_vmw_size __user *)(unsigned long) | ||
812 | req->size_addr; | ||
813 | |||
814 | ret = copy_from_user(srf->sizes, user_sizes, | ||
815 | srf->num_sizes * sizeof(*srf->sizes)); | ||
816 | if (unlikely(ret != 0)) { | ||
817 | ret = -EFAULT; | ||
818 | goto out_no_copy; | ||
819 | } | ||
820 | |||
821 | srf->base_size = *srf->sizes; | ||
822 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; | ||
823 | srf->multisample_count = 1; | ||
824 | |||
825 | cur_bo_offset = 0; | ||
826 | cur_offset = srf->offsets; | ||
827 | cur_size = srf->sizes; | ||
828 | |||
829 | bpp = vmw_sf_bpp[srf->format].bpp; | ||
830 | stride_bpp = vmw_sf_bpp[srf->format].s_bpp; | ||
831 | |||
832 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { | ||
833 | for (j = 0; j < srf->mip_levels[i]; ++j) { | ||
834 | uint32_t stride = | ||
835 | (cur_size->width * stride_bpp + 7) >> 3; | ||
836 | |||
837 | cur_offset->face = i; | ||
838 | cur_offset->mip = j; | ||
839 | cur_offset->bo_offset = cur_bo_offset; | ||
840 | cur_bo_offset += stride * cur_size->height * | ||
841 | cur_size->depth * bpp / stride_bpp; | ||
842 | ++cur_offset; | ||
843 | ++cur_size; | ||
844 | } | ||
845 | } | ||
846 | res->backup_size = cur_bo_offset; | ||
847 | |||
848 | if (srf->scanout && | ||
849 | srf->num_sizes == 1 && | ||
850 | srf->sizes[0].width == 64 && | ||
851 | srf->sizes[0].height == 64 && | ||
852 | srf->format == SVGA3D_A8R8G8B8) { | ||
853 | |||
854 | srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL); | ||
855 | /* clear the image */ | ||
856 | if (srf->snooper.image) { | ||
857 | memset(srf->snooper.image, 0x00, 64 * 64 * 4); | ||
858 | } else { | ||
859 | DRM_ERROR("Failed to allocate cursor_image\n"); | ||
860 | ret = -ENOMEM; | ||
861 | goto out_no_copy; | ||
862 | } | ||
863 | } else { | ||
864 | srf->snooper.image = NULL; | ||
865 | } | ||
866 | srf->snooper.crtc = NULL; | ||
867 | |||
868 | user_srf->base.shareable = false; | ||
869 | user_srf->base.tfile = NULL; | ||
870 | |||
871 | /** | ||
872 | * From this point, the generic resource management functions | ||
873 | * destroy the object on failure. | ||
874 | */ | ||
875 | |||
876 | ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); | ||
877 | if (unlikely(ret != 0)) | ||
878 | goto out_unlock; | ||
879 | |||
880 | tmp = vmw_resource_reference(&srf->res); | ||
881 | ret = ttm_base_object_init(tfile, &user_srf->base, | ||
882 | req->shareable, VMW_RES_SURFACE, | ||
883 | &vmw_user_surface_base_release, NULL); | ||
884 | |||
885 | if (unlikely(ret != 0)) { | ||
886 | vmw_resource_unreference(&tmp); | ||
887 | vmw_resource_unreference(&res); | ||
888 | goto out_unlock; | ||
889 | } | ||
890 | |||
891 | rep->sid = user_srf->base.hash.key; | ||
892 | vmw_resource_unreference(&res); | ||
893 | |||
894 | ttm_read_unlock(&vmaster->lock); | ||
895 | return 0; | ||
896 | out_no_copy: | ||
897 | kfree(srf->offsets); | ||
898 | out_no_offsets: | ||
899 | kfree(srf->sizes); | ||
900 | out_no_sizes: | ||
901 | ttm_base_object_kfree(user_srf, base); | ||
902 | out_no_user_srf: | ||
903 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | ||
904 | out_unlock: | ||
905 | ttm_read_unlock(&vmaster->lock); | ||
906 | return ret; | ||
907 | } | ||
908 | |||
909 | /** | ||
910 | * vmw_user_surface_define_ioctl - Ioctl function implementing | ||
911 | * the user surface reference functionality. | ||
912 | * | ||
913 | * @dev: Pointer to a struct drm_device. | ||
914 | * @data: Pointer to data copied from / to user-space. | ||
915 | * @file_priv: Pointer to a drm file private structure. | ||
916 | */ | ||
917 | int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
918 | struct drm_file *file_priv) | ||
919 | { | ||
920 | union drm_vmw_surface_reference_arg *arg = | ||
921 | (union drm_vmw_surface_reference_arg *)data; | ||
922 | struct drm_vmw_surface_arg *req = &arg->req; | ||
923 | struct drm_vmw_surface_create_req *rep = &arg->rep; | ||
924 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
925 | struct vmw_surface *srf; | ||
926 | struct vmw_user_surface *user_srf; | ||
927 | struct drm_vmw_size __user *user_sizes; | ||
928 | struct ttm_base_object *base; | ||
929 | int ret = -EINVAL; | ||
930 | |||
931 | base = ttm_base_object_lookup(tfile, req->sid); | ||
932 | if (unlikely(base == NULL)) { | ||
933 | DRM_ERROR("Could not find surface to reference.\n"); | ||
934 | return -EINVAL; | ||
935 | } | ||
936 | |||
937 | if (unlikely(base->object_type != VMW_RES_SURFACE)) | ||
938 | goto out_bad_resource; | ||
939 | |||
940 | user_srf = container_of(base, struct vmw_user_surface, base); | ||
941 | srf = &user_srf->srf; | ||
942 | |||
943 | ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); | ||
944 | if (unlikely(ret != 0)) { | ||
945 | DRM_ERROR("Could not add a reference to a surface.\n"); | ||
946 | goto out_no_reference; | ||
947 | } | ||
948 | |||
949 | rep->flags = srf->flags; | ||
950 | rep->format = srf->format; | ||
951 | memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); | ||
952 | user_sizes = (struct drm_vmw_size __user *)(unsigned long) | ||
953 | rep->size_addr; | ||
954 | |||
955 | if (user_sizes) | ||
956 | ret = copy_to_user(user_sizes, srf->sizes, | ||
957 | srf->num_sizes * sizeof(*srf->sizes)); | ||
958 | if (unlikely(ret != 0)) { | ||
959 | DRM_ERROR("copy_to_user failed %p %u\n", | ||
960 | user_sizes, srf->num_sizes); | ||
961 | ret = -EFAULT; | ||
962 | } | ||
963 | out_bad_resource: | ||
964 | out_no_reference: | ||
965 | ttm_base_object_unref(&base); | ||
966 | |||
967 | return ret; | ||
968 | } | ||