aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Carpenter <dan.carpenter@oracle.com>2015-09-23 06:59:28 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-09-23 17:23:43 -0400
commit1d263474c4416efb6d0feca98fe6d462b0d28f56 (patch)
tree7853d95fdbadc7427fa9e5ae40ae1145db30c847
parent5a6adfa20b622a273205e33b20c12332aa7eb724 (diff)
drm/amdgpu: unwind properly in amdgpu_cs_parser_init()
The amdgpu_cs_parser_init() function doesn't clean up after itself but instead the caller uses a free everything function amdgpu_cs_parser_fini() on failure. This style of error handling is often buggy. In this example, we call "drm_free_large(parser->chunks[i].kdata);" when it is an unintialized pointer or when "parser->chunks" is NULL. I fixed this bug by adding unwind code so that it frees everything that it allocates. I also mode some other very minor changes: 1) Renamed "r" to "ret". 2) Moved the chunk_array allocation to the start of the function. 3) Removed some initializers which are no longer needed. Reviewed-by: Christian König <christian.koenig@amd.com> Reported-by: Ilja Van Sprundel <ivansprundel@ioactive.com> Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c85
1 files changed, 51 insertions, 34 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b74b6a8e80a6..749420f1ea6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -154,42 +154,41 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
154{ 154{
155 union drm_amdgpu_cs *cs = data; 155 union drm_amdgpu_cs *cs = data;
156 uint64_t *chunk_array_user; 156 uint64_t *chunk_array_user;
157 uint64_t *chunk_array = NULL; 157 uint64_t *chunk_array;
158 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 158 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
159 unsigned size, i; 159 unsigned size, i;
160 int r = 0; 160 int ret;
161 161
162 if (!cs->in.num_chunks) 162 if (cs->in.num_chunks == 0)
163 goto out; 163 return 0;
164
165 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
166 if (!chunk_array)
167 return -ENOMEM;
164 168
165 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); 169 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
166 if (!p->ctx) { 170 if (!p->ctx) {
167 r = -EINVAL; 171 ret = -EINVAL;
168 goto out; 172 goto free_chunk;
169 } 173 }
174
170 p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); 175 p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
171 176
172 /* get chunks */ 177 /* get chunks */
173 INIT_LIST_HEAD(&p->validated); 178 INIT_LIST_HEAD(&p->validated);
174 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
175 if (chunk_array == NULL) {
176 r = -ENOMEM;
177 goto out;
178 }
179
180 chunk_array_user = (uint64_t __user *)(cs->in.chunks); 179 chunk_array_user = (uint64_t __user *)(cs->in.chunks);
181 if (copy_from_user(chunk_array, chunk_array_user, 180 if (copy_from_user(chunk_array, chunk_array_user,
182 sizeof(uint64_t)*cs->in.num_chunks)) { 181 sizeof(uint64_t)*cs->in.num_chunks)) {
183 r = -EFAULT; 182 ret = -EFAULT;
184 goto out; 183 goto put_bo_list;
185 } 184 }
186 185
187 p->nchunks = cs->in.num_chunks; 186 p->nchunks = cs->in.num_chunks;
188 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), 187 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
189 GFP_KERNEL); 188 GFP_KERNEL);
190 if (p->chunks == NULL) { 189 if (!p->chunks) {
191 r = -ENOMEM; 190 ret = -ENOMEM;
192 goto out; 191 goto put_bo_list;
193 } 192 }
194 193
195 for (i = 0; i < p->nchunks; i++) { 194 for (i = 0; i < p->nchunks; i++) {
@@ -200,8 +199,9 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
200 chunk_ptr = (void __user *)chunk_array[i]; 199 chunk_ptr = (void __user *)chunk_array[i];
201 if (copy_from_user(&user_chunk, chunk_ptr, 200 if (copy_from_user(&user_chunk, chunk_ptr,
202 sizeof(struct drm_amdgpu_cs_chunk))) { 201 sizeof(struct drm_amdgpu_cs_chunk))) {
203 r = -EFAULT; 202 ret = -EFAULT;
204 goto out; 203 i--;
204 goto free_partial_kdata;
205 } 205 }
206 p->chunks[i].chunk_id = user_chunk.chunk_id; 206 p->chunks[i].chunk_id = user_chunk.chunk_id;
207 p->chunks[i].length_dw = user_chunk.length_dw; 207 p->chunks[i].length_dw = user_chunk.length_dw;
@@ -212,13 +212,14 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
212 212
213 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); 213 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
214 if (p->chunks[i].kdata == NULL) { 214 if (p->chunks[i].kdata == NULL) {
215 r = -ENOMEM; 215 ret = -ENOMEM;
216 goto out; 216 i--;
217 goto free_partial_kdata;
217 } 218 }
218 size *= sizeof(uint32_t); 219 size *= sizeof(uint32_t);
219 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { 220 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
220 r = -EFAULT; 221 ret = -EFAULT;
221 goto out; 222 goto free_partial_kdata;
222 } 223 }
223 224
224 switch (p->chunks[i].chunk_id) { 225 switch (p->chunks[i].chunk_id) {
@@ -238,15 +239,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
238 gobj = drm_gem_object_lookup(p->adev->ddev, 239 gobj = drm_gem_object_lookup(p->adev->ddev,
239 p->filp, handle); 240 p->filp, handle);
240 if (gobj == NULL) { 241 if (gobj == NULL) {
241 r = -EINVAL; 242 ret = -EINVAL;
242 goto out; 243 goto free_partial_kdata;
243 } 244 }
244 245
245 p->uf.bo = gem_to_amdgpu_bo(gobj); 246 p->uf.bo = gem_to_amdgpu_bo(gobj);
246 p->uf.offset = fence_data->offset; 247 p->uf.offset = fence_data->offset;
247 } else { 248 } else {
248 r = -EINVAL; 249 ret = -EINVAL;
249 goto out; 250 goto free_partial_kdata;
250 } 251 }
251 break; 252 break;
252 253
@@ -254,19 +255,35 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
254 break; 255 break;
255 256
256 default: 257 default:
257 r = -EINVAL; 258 ret = -EINVAL;
258 goto out; 259 goto free_partial_kdata;
259 } 260 }
260 } 261 }
261 262
262 263
263 p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); 264 p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
264 if (!p->ibs) 265 if (!p->ibs) {
265 r = -ENOMEM; 266 ret = -ENOMEM;
267 goto free_all_kdata;
268 }
266 269
267out:
268 kfree(chunk_array); 270 kfree(chunk_array);
269 return r; 271 return 0;
272
273free_all_kdata:
274 i = p->nchunks - 1;
275free_partial_kdata:
276 for (; i >= 0; i--)
277 drm_free_large(p->chunks[i].kdata);
278 kfree(p->chunks);
279put_bo_list:
280 if (p->bo_list)
281 amdgpu_bo_list_put(p->bo_list);
282 amdgpu_ctx_put(p->ctx);
283free_chunk:
284 kfree(chunk_array);
285
286 return ret;
270} 287}
271 288
272/* Returns how many bytes TTM can move per IB. 289/* Returns how many bytes TTM can move per IB.
@@ -810,7 +827,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
810 r = amdgpu_cs_parser_init(parser, data); 827 r = amdgpu_cs_parser_init(parser, data);
811 if (r) { 828 if (r) {
812 DRM_ERROR("Failed to initialize parser !\n"); 829 DRM_ERROR("Failed to initialize parser !\n");
813 amdgpu_cs_parser_fini(parser, r, false); 830 kfree(parser);
814 up_read(&adev->exclusive_lock); 831 up_read(&adev->exclusive_lock);
815 r = amdgpu_cs_handle_lockup(adev, r); 832 r = amdgpu_cs_handle_lockup(adev, r);
816 return r; 833 return r;