diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2013-12-18 08:13:29 -0500 |
---|---|---|
committer | Thomas Hellstrom <thellstrom@vmware.com> | 2014-01-08 04:11:57 -0500 |
commit | 05efb1abecce6e36457ae1a7be29ded7ac52292a (patch) | |
tree | 59be0d416b8811b273bc1b64f34fa4f0f4cb7b04 /drivers/gpu/drm/ttm | |
parent | 859ae233cd0ee76b6143f948ba1cb6b0b4c342f8 (diff) |
drm/ttm: ttm object security fixes for render nodes
When a client looks up a ttm object, don't look it up through the device hash
table, but rather from the file hash table. That makes sure that the client
has indeed put a reference on the object, or in gem terms, has opened
the object; either using prime or using the global "name".
To avoid a performance loss, make sure the file hash table entries can be
looked up from under an RCU lock, and as a consequence, replace the rwlock
with a spinlock, since we never need to take it in read mode only anymore.
Finally add a ttm object lookup function for the device hash table, that is
intended to be used when we put a ref object on a base object or, in gem terms,
when we open the object.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Brian Paul <brianp@vmware.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_object.c | 90 |
1 files changed, 54 insertions, 36 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index 6fe7b92a82d1..37079859afc8 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c | |||
@@ -68,7 +68,7 @@ | |||
68 | 68 | ||
69 | struct ttm_object_file { | 69 | struct ttm_object_file { |
70 | struct ttm_object_device *tdev; | 70 | struct ttm_object_device *tdev; |
71 | rwlock_t lock; | 71 | spinlock_t lock; |
72 | struct list_head ref_list; | 72 | struct list_head ref_list; |
73 | struct drm_open_hash ref_hash[TTM_REF_NUM]; | 73 | struct drm_open_hash ref_hash[TTM_REF_NUM]; |
74 | struct kref refcount; | 74 | struct kref refcount; |
@@ -118,6 +118,7 @@ struct ttm_object_device { | |||
118 | */ | 118 | */ |
119 | 119 | ||
120 | struct ttm_ref_object { | 120 | struct ttm_ref_object { |
121 | struct rcu_head rcu_head; | ||
121 | struct drm_hash_item hash; | 122 | struct drm_hash_item hash; |
122 | struct list_head head; | 123 | struct list_head head; |
123 | struct kref kref; | 124 | struct kref kref; |
@@ -210,10 +211,9 @@ static void ttm_release_base(struct kref *kref) | |||
210 | * call_rcu() or ttm_base_object_kfree(). | 211 | * call_rcu() or ttm_base_object_kfree(). |
211 | */ | 212 | */ |
212 | 213 | ||
213 | if (base->refcount_release) { | 214 | ttm_object_file_unref(&base->tfile); |
214 | ttm_object_file_unref(&base->tfile); | 215 | if (base->refcount_release) |
215 | base->refcount_release(&base); | 216 | base->refcount_release(&base); |
216 | } | ||
217 | } | 217 | } |
218 | 218 | ||
219 | void ttm_base_object_unref(struct ttm_base_object **p_base) | 219 | void ttm_base_object_unref(struct ttm_base_object **p_base) |
@@ -229,32 +229,46 @@ EXPORT_SYMBOL(ttm_base_object_unref); | |||
229 | struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, | 229 | struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, |
230 | uint32_t key) | 230 | uint32_t key) |
231 | { | 231 | { |
232 | struct ttm_object_device *tdev = tfile->tdev; | 232 | struct ttm_base_object *base = NULL; |
233 | struct ttm_base_object *uninitialized_var(base); | ||
234 | struct drm_hash_item *hash; | 233 | struct drm_hash_item *hash; |
234 | struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; | ||
235 | int ret; | 235 | int ret; |
236 | 236 | ||
237 | rcu_read_lock(); | 237 | rcu_read_lock(); |
238 | ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash); | 238 | ret = drm_ht_find_item_rcu(ht, key, &hash); |
239 | 239 | ||
240 | if (likely(ret == 0)) { | 240 | if (likely(ret == 0)) { |
241 | base = drm_hash_entry(hash, struct ttm_base_object, hash); | 241 | base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj; |
242 | ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL; | 242 | if (!kref_get_unless_zero(&base->refcount)) |
243 | base = NULL; | ||
243 | } | 244 | } |
244 | rcu_read_unlock(); | 245 | rcu_read_unlock(); |
245 | 246 | ||
246 | if (unlikely(ret != 0)) | 247 | return base; |
247 | return NULL; | 248 | } |
249 | EXPORT_SYMBOL(ttm_base_object_lookup); | ||
248 | 250 | ||
249 | if (tfile != base->tfile && !base->shareable) { | 251 | struct ttm_base_object * |
250 | pr_err("Attempted access of non-shareable object\n"); | 252 | ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key) |
251 | ttm_base_object_unref(&base); | 253 | { |
252 | return NULL; | 254 | struct ttm_base_object *base = NULL; |
255 | struct drm_hash_item *hash; | ||
256 | struct drm_open_hash *ht = &tdev->object_hash; | ||
257 | int ret; | ||
258 | |||
259 | rcu_read_lock(); | ||
260 | ret = drm_ht_find_item_rcu(ht, key, &hash); | ||
261 | |||
262 | if (likely(ret == 0)) { | ||
263 | base = drm_hash_entry(hash, struct ttm_base_object, hash); | ||
264 | if (!kref_get_unless_zero(&base->refcount)) | ||
265 | base = NULL; | ||
253 | } | 266 | } |
267 | rcu_read_unlock(); | ||
254 | 268 | ||
255 | return base; | 269 | return base; |
256 | } | 270 | } |
257 | EXPORT_SYMBOL(ttm_base_object_lookup); | 271 | EXPORT_SYMBOL(ttm_base_object_lookup_for_ref); |
258 | 272 | ||
259 | int ttm_ref_object_add(struct ttm_object_file *tfile, | 273 | int ttm_ref_object_add(struct ttm_object_file *tfile, |
260 | struct ttm_base_object *base, | 274 | struct ttm_base_object *base, |
@@ -266,21 +280,25 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, | |||
266 | struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; | 280 | struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; |
267 | int ret = -EINVAL; | 281 | int ret = -EINVAL; |
268 | 282 | ||
283 | if (base->tfile != tfile && !base->shareable) | ||
284 | return -EPERM; | ||
285 | |||
269 | if (existed != NULL) | 286 | if (existed != NULL) |
270 | *existed = true; | 287 | *existed = true; |
271 | 288 | ||
272 | while (ret == -EINVAL) { | 289 | while (ret == -EINVAL) { |
273 | read_lock(&tfile->lock); | 290 | rcu_read_lock(); |
274 | ret = drm_ht_find_item(ht, base->hash.key, &hash); | 291 | ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash); |
275 | 292 | ||
276 | if (ret == 0) { | 293 | if (ret == 0) { |
277 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); | 294 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); |
278 | kref_get(&ref->kref); | 295 | if (!kref_get_unless_zero(&ref->kref)) { |
279 | read_unlock(&tfile->lock); | 296 | rcu_read_unlock(); |
280 | break; | 297 | break; |
298 | } | ||
281 | } | 299 | } |
282 | 300 | ||
283 | read_unlock(&tfile->lock); | 301 | rcu_read_unlock(); |
284 | ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), | 302 | ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), |
285 | false, false); | 303 | false, false); |
286 | if (unlikely(ret != 0)) | 304 | if (unlikely(ret != 0)) |
@@ -297,19 +315,19 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, | |||
297 | ref->ref_type = ref_type; | 315 | ref->ref_type = ref_type; |
298 | kref_init(&ref->kref); | 316 | kref_init(&ref->kref); |
299 | 317 | ||
300 | write_lock(&tfile->lock); | 318 | spin_lock(&tfile->lock); |
301 | ret = drm_ht_insert_item(ht, &ref->hash); | 319 | ret = drm_ht_insert_item_rcu(ht, &ref->hash); |
302 | 320 | ||
303 | if (likely(ret == 0)) { | 321 | if (likely(ret == 0)) { |
304 | list_add_tail(&ref->head, &tfile->ref_list); | 322 | list_add_tail(&ref->head, &tfile->ref_list); |
305 | kref_get(&base->refcount); | 323 | kref_get(&base->refcount); |
306 | write_unlock(&tfile->lock); | 324 | spin_unlock(&tfile->lock); |
307 | if (existed != NULL) | 325 | if (existed != NULL) |
308 | *existed = false; | 326 | *existed = false; |
309 | break; | 327 | break; |
310 | } | 328 | } |
311 | 329 | ||
312 | write_unlock(&tfile->lock); | 330 | spin_unlock(&tfile->lock); |
313 | BUG_ON(ret != -EINVAL); | 331 | BUG_ON(ret != -EINVAL); |
314 | 332 | ||
315 | ttm_mem_global_free(mem_glob, sizeof(*ref)); | 333 | ttm_mem_global_free(mem_glob, sizeof(*ref)); |
@@ -330,17 +348,17 @@ static void ttm_ref_object_release(struct kref *kref) | |||
330 | struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; | 348 | struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; |
331 | 349 | ||
332 | ht = &tfile->ref_hash[ref->ref_type]; | 350 | ht = &tfile->ref_hash[ref->ref_type]; |
333 | (void)drm_ht_remove_item(ht, &ref->hash); | 351 | (void)drm_ht_remove_item_rcu(ht, &ref->hash); |
334 | list_del(&ref->head); | 352 | list_del(&ref->head); |
335 | write_unlock(&tfile->lock); | 353 | spin_unlock(&tfile->lock); |
336 | 354 | ||
337 | if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) | 355 | if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) |
338 | base->ref_obj_release(base, ref->ref_type); | 356 | base->ref_obj_release(base, ref->ref_type); |
339 | 357 | ||
340 | ttm_base_object_unref(&ref->obj); | 358 | ttm_base_object_unref(&ref->obj); |
341 | ttm_mem_global_free(mem_glob, sizeof(*ref)); | 359 | ttm_mem_global_free(mem_glob, sizeof(*ref)); |
342 | kfree(ref); | 360 | kfree_rcu(ref, rcu_head); |
343 | write_lock(&tfile->lock); | 361 | spin_lock(&tfile->lock); |
344 | } | 362 | } |
345 | 363 | ||
346 | int ttm_ref_object_base_unref(struct ttm_object_file *tfile, | 364 | int ttm_ref_object_base_unref(struct ttm_object_file *tfile, |
@@ -351,15 +369,15 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile, | |||
351 | struct drm_hash_item *hash; | 369 | struct drm_hash_item *hash; |
352 | int ret; | 370 | int ret; |
353 | 371 | ||
354 | write_lock(&tfile->lock); | 372 | spin_lock(&tfile->lock); |
355 | ret = drm_ht_find_item(ht, key, &hash); | 373 | ret = drm_ht_find_item(ht, key, &hash); |
356 | if (unlikely(ret != 0)) { | 374 | if (unlikely(ret != 0)) { |
357 | write_unlock(&tfile->lock); | 375 | spin_unlock(&tfile->lock); |
358 | return -EINVAL; | 376 | return -EINVAL; |
359 | } | 377 | } |
360 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); | 378 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); |
361 | kref_put(&ref->kref, ttm_ref_object_release); | 379 | kref_put(&ref->kref, ttm_ref_object_release); |
362 | write_unlock(&tfile->lock); | 380 | spin_unlock(&tfile->lock); |
363 | return 0; | 381 | return 0; |
364 | } | 382 | } |
365 | EXPORT_SYMBOL(ttm_ref_object_base_unref); | 383 | EXPORT_SYMBOL(ttm_ref_object_base_unref); |
@@ -372,7 +390,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) | |||
372 | struct ttm_object_file *tfile = *p_tfile; | 390 | struct ttm_object_file *tfile = *p_tfile; |
373 | 391 | ||
374 | *p_tfile = NULL; | 392 | *p_tfile = NULL; |
375 | write_lock(&tfile->lock); | 393 | spin_lock(&tfile->lock); |
376 | 394 | ||
377 | /* | 395 | /* |
378 | * Since we release the lock within the loop, we have to | 396 | * Since we release the lock within the loop, we have to |
@@ -388,7 +406,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) | |||
388 | for (i = 0; i < TTM_REF_NUM; ++i) | 406 | for (i = 0; i < TTM_REF_NUM; ++i) |
389 | drm_ht_remove(&tfile->ref_hash[i]); | 407 | drm_ht_remove(&tfile->ref_hash[i]); |
390 | 408 | ||
391 | write_unlock(&tfile->lock); | 409 | spin_unlock(&tfile->lock); |
392 | ttm_object_file_unref(&tfile); | 410 | ttm_object_file_unref(&tfile); |
393 | } | 411 | } |
394 | EXPORT_SYMBOL(ttm_object_file_release); | 412 | EXPORT_SYMBOL(ttm_object_file_release); |
@@ -404,7 +422,7 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, | |||
404 | if (unlikely(tfile == NULL)) | 422 | if (unlikely(tfile == NULL)) |
405 | return NULL; | 423 | return NULL; |
406 | 424 | ||
407 | rwlock_init(&tfile->lock); | 425 | spin_lock_init(&tfile->lock); |
408 | tfile->tdev = tdev; | 426 | tfile->tdev = tdev; |
409 | kref_init(&tfile->refcount); | 427 | kref_init(&tfile->refcount); |
410 | INIT_LIST_HEAD(&tfile->ref_list); | 428 | INIT_LIST_HEAD(&tfile->ref_list); |