diff options
author | Dave Airlie <airlied@redhat.com> | 2014-03-30 21:29:38 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2014-03-30 21:29:38 -0400 |
commit | c32fc9c803f8ed90a7548810de48ca33a3020168 (patch) | |
tree | 646c8e4993463ebea6deca76e4b4f7fd9a8da4b6 | |
parent | 60f2b4af1258c05e6b037af866be81abc24438f7 (diff) | |
parent | 03c5b8f077218bec50f1355b76dea405a7112878 (diff) |
Merge tag 'vmwgfx-next-2014-03-28' of git://people.freedesktop.org/~thomash/linux into drm-next
vmwgfx render-node support and drm + ttm changes it depends upon.
Pull request of 2014-03-28
* tag 'vmwgfx-next-2014-03-28' of git://people.freedesktop.org/~thomash/linux:
drm/vmwgfx: Bump driver minor and date
drm/vmwgfx: Enable render nodes
drm/vmwgfx: Tighten the security around buffer maps
drm/ttm: Add a ttm_ref_object_exists function
drm/vmwgfx: Tighten security around surface sharing v2
drm/vmwgfx: Allow prime fds in the surface reference ioctls
drm/vmwgfx: Drop authentication requirement on UNREF ioctls
drm/vmwgfx: Reinstate and tighten security around legacy master model
drm/vmwgfx: Use a per-device semaphore for reservation protection
drm: Add a function to get the ioctl flags
drm: Protect the master management with a drm_device::master_mutex v3
drm: Remove the minor master list
drm: Improve on minor type helpers v3
drm: Make control nodes master-less v3
drm: Break out ioctl permission check to a separate function v2
drm: Have the crtc code only reference master from legacy nodes v2
-rw-r--r-- | drivers/gpu/drm/drm_crtc.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_drv.c | 132 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_fops.c | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_stub.c | 48 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_object.c | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_context.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 143 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 143 | ||||
-rw-r--r-- | include/drm/drmP.h | 61 | ||||
-rw-r--r-- | include/drm/ttm/ttm_object.h | 4 | ||||
-rw-r--r-- | include/uapi/drm/vmwgfx_drm.h | 12 |
19 files changed, 500 insertions, 226 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 16ca28ed5ee8..960ca987c20f 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -1492,9 +1492,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data, | |||
1492 | mutex_unlock(&file_priv->fbs_lock); | 1492 | mutex_unlock(&file_priv->fbs_lock); |
1493 | 1493 | ||
1494 | drm_modeset_lock_all(dev); | 1494 | drm_modeset_lock_all(dev); |
1495 | mode_group = &file_priv->master->minor->mode_group; | 1495 | if (!drm_is_primary_client(file_priv)) { |
1496 | if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { | ||
1497 | 1496 | ||
1497 | mode_group = NULL; | ||
1498 | list_for_each(lh, &dev->mode_config.crtc_list) | 1498 | list_for_each(lh, &dev->mode_config.crtc_list) |
1499 | crtc_count++; | 1499 | crtc_count++; |
1500 | 1500 | ||
@@ -1505,6 +1505,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data, | |||
1505 | encoder_count++; | 1505 | encoder_count++; |
1506 | } else { | 1506 | } else { |
1507 | 1507 | ||
1508 | mode_group = &file_priv->master->minor->mode_group; | ||
1508 | crtc_count = mode_group->num_crtcs; | 1509 | crtc_count = mode_group->num_crtcs; |
1509 | connector_count = mode_group->num_connectors; | 1510 | connector_count = mode_group->num_connectors; |
1510 | encoder_count = mode_group->num_encoders; | 1511 | encoder_count = mode_group->num_encoders; |
@@ -1519,7 +1520,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data, | |||
1519 | if (card_res->count_crtcs >= crtc_count) { | 1520 | if (card_res->count_crtcs >= crtc_count) { |
1520 | copied = 0; | 1521 | copied = 0; |
1521 | crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; | 1522 | crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; |
1522 | if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { | 1523 | if (!mode_group) { |
1523 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, | 1524 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, |
1524 | head) { | 1525 | head) { |
1525 | DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); | 1526 | DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); |
@@ -1546,7 +1547,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data, | |||
1546 | if (card_res->count_encoders >= encoder_count) { | 1547 | if (card_res->count_encoders >= encoder_count) { |
1547 | copied = 0; | 1548 | copied = 0; |
1548 | encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr; | 1549 | encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr; |
1549 | if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { | 1550 | if (!mode_group) { |
1550 | list_for_each_entry(encoder, | 1551 | list_for_each_entry(encoder, |
1551 | &dev->mode_config.encoder_list, | 1552 | &dev->mode_config.encoder_list, |
1552 | head) { | 1553 | head) { |
@@ -1577,7 +1578,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data, | |||
1577 | if (card_res->count_connectors >= connector_count) { | 1578 | if (card_res->count_connectors >= connector_count) { |
1578 | copied = 0; | 1579 | copied = 0; |
1579 | connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr; | 1580 | connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr; |
1580 | if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { | 1581 | if (!mode_group) { |
1581 | list_for_each_entry(connector, | 1582 | list_for_each_entry(connector, |
1582 | &dev->mode_config.connector_list, | 1583 | &dev->mode_config.connector_list, |
1583 | head) { | 1584 | head) { |
@@ -2846,7 +2847,8 @@ int drm_mode_getfb(struct drm_device *dev, | |||
2846 | r->bpp = fb->bits_per_pixel; | 2847 | r->bpp = fb->bits_per_pixel; |
2847 | r->pitch = fb->pitches[0]; | 2848 | r->pitch = fb->pitches[0]; |
2848 | if (fb->funcs->create_handle) { | 2849 | if (fb->funcs->create_handle) { |
2849 | if (file_priv->is_master || capable(CAP_SYS_ADMIN)) { | 2850 | if (file_priv->is_master || capable(CAP_SYS_ADMIN) || |
2851 | drm_is_control_client(file_priv)) { | ||
2850 | ret = fb->funcs->create_handle(fb, file_priv, | 2852 | ret = fb->funcs->create_handle(fb, file_priv, |
2851 | &r->handle); | 2853 | &r->handle); |
2852 | } else { | 2854 | } else { |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index ec651be2f3cb..03711d00aaae 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -286,6 +286,45 @@ static int drm_version(struct drm_device *dev, void *data, | |||
286 | } | 286 | } |
287 | 287 | ||
288 | /** | 288 | /** |
289 | * drm_ioctl_permit - Check ioctl permissions against caller | ||
290 | * | ||
291 | * @flags: ioctl permission flags. | ||
292 | * @file_priv: Pointer to struct drm_file identifying the caller. | ||
293 | * | ||
294 | * Checks whether the caller is allowed to run an ioctl with the | ||
295 | * indicated permissions. If so, returns zero. Otherwise returns an | ||
296 | * error code suitable for ioctl return. | ||
297 | */ | ||
298 | static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) | ||
299 | { | ||
300 | /* ROOT_ONLY is only for CAP_SYS_ADMIN */ | ||
301 | if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN))) | ||
302 | return -EACCES; | ||
303 | |||
304 | /* AUTH is only for authenticated or render client */ | ||
305 | if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) && | ||
306 | !file_priv->authenticated)) | ||
307 | return -EACCES; | ||
308 | |||
309 | /* MASTER is only for master or control clients */ | ||
310 | if (unlikely((flags & DRM_MASTER) && !file_priv->is_master && | ||
311 | !drm_is_control_client(file_priv))) | ||
312 | return -EACCES; | ||
313 | |||
314 | /* Control clients must be explicitly allowed */ | ||
315 | if (unlikely(!(flags & DRM_CONTROL_ALLOW) && | ||
316 | drm_is_control_client(file_priv))) | ||
317 | return -EACCES; | ||
318 | |||
319 | /* Render clients must be explicitly allowed */ | ||
320 | if (unlikely(!(flags & DRM_RENDER_ALLOW) && | ||
321 | drm_is_render_client(file_priv))) | ||
322 | return -EACCES; | ||
323 | |||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | /** | ||
289 | * Called whenever a process performs an ioctl on /dev/drm. | 328 | * Called whenever a process performs an ioctl on /dev/drm. |
290 | * | 329 | * |
291 | * \param inode device inode. | 330 | * \param inode device inode. |
@@ -350,52 +389,51 @@ long drm_ioctl(struct file *filp, | |||
350 | /* Do not trust userspace, use our own definition */ | 389 | /* Do not trust userspace, use our own definition */ |
351 | func = ioctl->func; | 390 | func = ioctl->func; |
352 | 391 | ||
353 | if (!func) { | 392 | if (unlikely(!func)) { |
354 | DRM_DEBUG("no function\n"); | 393 | DRM_DEBUG("no function\n"); |
355 | retcode = -EINVAL; | 394 | retcode = -EINVAL; |
356 | } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || | 395 | goto err_i1; |
357 | ((ioctl->flags & DRM_AUTH) && !drm_is_render_client(file_priv) && !file_priv->authenticated) || | 396 | } |
358 | ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) || | ||
359 | (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL)) || | ||
360 | (!(ioctl->flags & DRM_RENDER_ALLOW) && drm_is_render_client(file_priv))) { | ||
361 | retcode = -EACCES; | ||
362 | } else { | ||
363 | if (cmd & (IOC_IN | IOC_OUT)) { | ||
364 | if (asize <= sizeof(stack_kdata)) { | ||
365 | kdata = stack_kdata; | ||
366 | } else { | ||
367 | kdata = kmalloc(asize, GFP_KERNEL); | ||
368 | if (!kdata) { | ||
369 | retcode = -ENOMEM; | ||
370 | goto err_i1; | ||
371 | } | ||
372 | } | ||
373 | if (asize > usize) | ||
374 | memset(kdata + usize, 0, asize - usize); | ||
375 | } | ||
376 | 397 | ||
377 | if (cmd & IOC_IN) { | 398 | retcode = drm_ioctl_permit(ioctl->flags, file_priv); |
378 | if (copy_from_user(kdata, (void __user *)arg, | 399 | if (unlikely(retcode)) |
379 | usize) != 0) { | 400 | goto err_i1; |
380 | retcode = -EFAULT; | 401 | |
402 | if (cmd & (IOC_IN | IOC_OUT)) { | ||
403 | if (asize <= sizeof(stack_kdata)) { | ||
404 | kdata = stack_kdata; | ||
405 | } else { | ||
406 | kdata = kmalloc(asize, GFP_KERNEL); | ||
407 | if (!kdata) { | ||
408 | retcode = -ENOMEM; | ||
381 | goto err_i1; | 409 | goto err_i1; |
382 | } | 410 | } |
383 | } else | ||
384 | memset(kdata, 0, usize); | ||
385 | |||
386 | if (ioctl->flags & DRM_UNLOCKED) | ||
387 | retcode = func(dev, kdata, file_priv); | ||
388 | else { | ||
389 | mutex_lock(&drm_global_mutex); | ||
390 | retcode = func(dev, kdata, file_priv); | ||
391 | mutex_unlock(&drm_global_mutex); | ||
392 | } | 411 | } |
412 | if (asize > usize) | ||
413 | memset(kdata + usize, 0, asize - usize); | ||
414 | } | ||
393 | 415 | ||
394 | if (cmd & IOC_OUT) { | 416 | if (cmd & IOC_IN) { |
395 | if (copy_to_user((void __user *)arg, kdata, | 417 | if (copy_from_user(kdata, (void __user *)arg, |
396 | usize) != 0) | 418 | usize) != 0) { |
397 | retcode = -EFAULT; | 419 | retcode = -EFAULT; |
420 | goto err_i1; | ||
398 | } | 421 | } |
422 | } else | ||
423 | memset(kdata, 0, usize); | ||
424 | |||
425 | if (ioctl->flags & DRM_UNLOCKED) | ||
426 | retcode = func(dev, kdata, file_priv); | ||
427 | else { | ||
428 | mutex_lock(&drm_global_mutex); | ||
429 | retcode = func(dev, kdata, file_priv); | ||
430 | mutex_unlock(&drm_global_mutex); | ||
431 | } | ||
432 | |||
433 | if (cmd & IOC_OUT) { | ||
434 | if (copy_to_user((void __user *)arg, kdata, | ||
435 | usize) != 0) | ||
436 | retcode = -EFAULT; | ||
399 | } | 437 | } |
400 | 438 | ||
401 | err_i1: | 439 | err_i1: |
@@ -412,3 +450,21 @@ long drm_ioctl(struct file *filp, | |||
412 | return retcode; | 450 | return retcode; |
413 | } | 451 | } |
414 | EXPORT_SYMBOL(drm_ioctl); | 452 | EXPORT_SYMBOL(drm_ioctl); |
453 | |||
454 | /** | ||
455 | * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags | ||
456 | * | ||
457 | * @nr: Ioctl number. | ||
458 | * @flags: Where to return the ioctl permission flags | ||
459 | */ | ||
460 | bool drm_ioctl_flags(unsigned int nr, unsigned int *flags) | ||
461 | { | ||
462 | if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) || | ||
463 | (nr < DRM_COMMAND_BASE)) { | ||
464 | *flags = drm_ioctls[nr].flags; | ||
465 | return true; | ||
466 | } | ||
467 | |||
468 | return false; | ||
469 | } | ||
470 | EXPORT_SYMBOL(drm_ioctl_flags); | ||
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 9b02f126fb0d..a0ce39c96f8e 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -231,12 +231,11 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
231 | 231 | ||
232 | /* if there is no current master make this fd it, but do not create | 232 | /* if there is no current master make this fd it, but do not create |
233 | * any master object for render clients */ | 233 | * any master object for render clients */ |
234 | mutex_lock(&dev->struct_mutex); | 234 | mutex_lock(&dev->master_mutex); |
235 | if (!priv->minor->master && !drm_is_render_client(priv)) { | 235 | if (drm_is_primary_client(priv) && !priv->minor->master) { |
236 | /* create a new master */ | 236 | /* create a new master */ |
237 | priv->minor->master = drm_master_create(priv->minor); | 237 | priv->minor->master = drm_master_create(priv->minor); |
238 | if (!priv->minor->master) { | 238 | if (!priv->minor->master) { |
239 | mutex_unlock(&dev->struct_mutex); | ||
240 | ret = -ENOMEM; | 239 | ret = -ENOMEM; |
241 | goto out_close; | 240 | goto out_close; |
242 | } | 241 | } |
@@ -244,37 +243,31 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
244 | priv->is_master = 1; | 243 | priv->is_master = 1; |
245 | /* take another reference for the copy in the local file priv */ | 244 | /* take another reference for the copy in the local file priv */ |
246 | priv->master = drm_master_get(priv->minor->master); | 245 | priv->master = drm_master_get(priv->minor->master); |
247 | |||
248 | priv->authenticated = 1; | 246 | priv->authenticated = 1; |
249 | 247 | ||
250 | mutex_unlock(&dev->struct_mutex); | ||
251 | if (dev->driver->master_create) { | 248 | if (dev->driver->master_create) { |
252 | ret = dev->driver->master_create(dev, priv->master); | 249 | ret = dev->driver->master_create(dev, priv->master); |
253 | if (ret) { | 250 | if (ret) { |
254 | mutex_lock(&dev->struct_mutex); | ||
255 | /* drop both references if this fails */ | 251 | /* drop both references if this fails */ |
256 | drm_master_put(&priv->minor->master); | 252 | drm_master_put(&priv->minor->master); |
257 | drm_master_put(&priv->master); | 253 | drm_master_put(&priv->master); |
258 | mutex_unlock(&dev->struct_mutex); | ||
259 | goto out_close; | 254 | goto out_close; |
260 | } | 255 | } |
261 | } | 256 | } |
262 | mutex_lock(&dev->struct_mutex); | ||
263 | if (dev->driver->master_set) { | 257 | if (dev->driver->master_set) { |
264 | ret = dev->driver->master_set(dev, priv, true); | 258 | ret = dev->driver->master_set(dev, priv, true); |
265 | if (ret) { | 259 | if (ret) { |
266 | /* drop both references if this fails */ | 260 | /* drop both references if this fails */ |
267 | drm_master_put(&priv->minor->master); | 261 | drm_master_put(&priv->minor->master); |
268 | drm_master_put(&priv->master); | 262 | drm_master_put(&priv->master); |
269 | mutex_unlock(&dev->struct_mutex); | ||
270 | goto out_close; | 263 | goto out_close; |
271 | } | 264 | } |
272 | } | 265 | } |
273 | } else if (!drm_is_render_client(priv)) { | 266 | } else if (drm_is_primary_client(priv)) { |
274 | /* get a reference to the master */ | 267 | /* get a reference to the master */ |
275 | priv->master = drm_master_get(priv->minor->master); | 268 | priv->master = drm_master_get(priv->minor->master); |
276 | } | 269 | } |
277 | mutex_unlock(&dev->struct_mutex); | 270 | mutex_unlock(&dev->master_mutex); |
278 | 271 | ||
279 | mutex_lock(&dev->struct_mutex); | 272 | mutex_lock(&dev->struct_mutex); |
280 | list_add(&priv->lhead, &dev->filelist); | 273 | list_add(&priv->lhead, &dev->filelist); |
@@ -302,6 +295,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
302 | return 0; | 295 | return 0; |
303 | 296 | ||
304 | out_close: | 297 | out_close: |
298 | mutex_unlock(&dev->master_mutex); | ||
305 | if (dev->driver->postclose) | 299 | if (dev->driver->postclose) |
306 | dev->driver->postclose(dev, priv); | 300 | dev->driver->postclose(dev, priv); |
307 | out_prime_destroy: | 301 | out_prime_destroy: |
@@ -489,11 +483,13 @@ int drm_release(struct inode *inode, struct file *filp) | |||
489 | } | 483 | } |
490 | mutex_unlock(&dev->ctxlist_mutex); | 484 | mutex_unlock(&dev->ctxlist_mutex); |
491 | 485 | ||
492 | mutex_lock(&dev->struct_mutex); | 486 | mutex_lock(&dev->master_mutex); |
493 | 487 | ||
494 | if (file_priv->is_master) { | 488 | if (file_priv->is_master) { |
495 | struct drm_master *master = file_priv->master; | 489 | struct drm_master *master = file_priv->master; |
496 | struct drm_file *temp; | 490 | struct drm_file *temp; |
491 | |||
492 | mutex_lock(&dev->struct_mutex); | ||
497 | list_for_each_entry(temp, &dev->filelist, lhead) { | 493 | list_for_each_entry(temp, &dev->filelist, lhead) { |
498 | if ((temp->master == file_priv->master) && | 494 | if ((temp->master == file_priv->master) && |
499 | (temp != file_priv)) | 495 | (temp != file_priv)) |
@@ -512,6 +508,7 @@ int drm_release(struct inode *inode, struct file *filp) | |||
512 | master->lock.file_priv = NULL; | 508 | master->lock.file_priv = NULL; |
513 | wake_up_interruptible_all(&master->lock.lock_queue); | 509 | wake_up_interruptible_all(&master->lock.lock_queue); |
514 | } | 510 | } |
511 | mutex_unlock(&dev->struct_mutex); | ||
515 | 512 | ||
516 | if (file_priv->minor->master == file_priv->master) { | 513 | if (file_priv->minor->master == file_priv->master) { |
517 | /* drop the reference held my the minor */ | 514 | /* drop the reference held my the minor */ |
@@ -521,10 +518,13 @@ int drm_release(struct inode *inode, struct file *filp) | |||
521 | } | 518 | } |
522 | } | 519 | } |
523 | 520 | ||
524 | /* drop the reference held my the file priv */ | 521 | /* drop the master reference held by the file priv */ |
525 | if (file_priv->master) | 522 | if (file_priv->master) |
526 | drm_master_put(&file_priv->master); | 523 | drm_master_put(&file_priv->master); |
527 | file_priv->is_master = 0; | 524 | file_priv->is_master = 0; |
525 | mutex_unlock(&dev->master_mutex); | ||
526 | |||
527 | mutex_lock(&dev->struct_mutex); | ||
528 | list_del(&file_priv->lhead); | 528 | list_del(&file_priv->lhead); |
529 | mutex_unlock(&dev->struct_mutex); | 529 | mutex_unlock(&dev->struct_mutex); |
530 | 530 | ||
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index ed8a99576df3..fac6f9834257 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
@@ -127,8 +127,6 @@ struct drm_master *drm_master_create(struct drm_minor *minor) | |||
127 | INIT_LIST_HEAD(&master->magicfree); | 127 | INIT_LIST_HEAD(&master->magicfree); |
128 | master->minor = minor; | 128 | master->minor = minor; |
129 | 129 | ||
130 | list_add_tail(&master->head, &minor->master_list); | ||
131 | |||
132 | return master; | 130 | return master; |
133 | } | 131 | } |
134 | 132 | ||
@@ -146,8 +144,7 @@ static void drm_master_destroy(struct kref *kref) | |||
146 | struct drm_device *dev = master->minor->dev; | 144 | struct drm_device *dev = master->minor->dev; |
147 | struct drm_map_list *r_list, *list_temp; | 145 | struct drm_map_list *r_list, *list_temp; |
148 | 146 | ||
149 | list_del(&master->head); | 147 | mutex_lock(&dev->struct_mutex); |
150 | |||
151 | if (dev->driver->master_destroy) | 148 | if (dev->driver->master_destroy) |
152 | dev->driver->master_destroy(dev, master); | 149 | dev->driver->master_destroy(dev, master); |
153 | 150 | ||
@@ -175,6 +172,7 @@ static void drm_master_destroy(struct kref *kref) | |||
175 | 172 | ||
176 | drm_ht_remove(&master->magiclist); | 173 | drm_ht_remove(&master->magiclist); |
177 | 174 | ||
175 | mutex_unlock(&dev->struct_mutex); | ||
178 | kfree(master); | 176 | kfree(master); |
179 | } | 177 | } |
180 | 178 | ||
@@ -190,19 +188,20 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, | |||
190 | { | 188 | { |
191 | int ret = 0; | 189 | int ret = 0; |
192 | 190 | ||
191 | mutex_lock(&dev->master_mutex); | ||
193 | if (file_priv->is_master) | 192 | if (file_priv->is_master) |
194 | return 0; | 193 | goto out_unlock; |
195 | 194 | ||
196 | if (file_priv->minor->master && file_priv->minor->master != file_priv->master) | 195 | if (file_priv->minor->master) { |
197 | return -EINVAL; | 196 | ret = -EINVAL; |
198 | 197 | goto out_unlock; | |
199 | if (!file_priv->master) | 198 | } |
200 | return -EINVAL; | ||
201 | 199 | ||
202 | if (file_priv->minor->master) | 200 | if (!file_priv->master) { |
203 | return -EINVAL; | 201 | ret = -EINVAL; |
202 | goto out_unlock; | ||
203 | } | ||
204 | 204 | ||
205 | mutex_lock(&dev->struct_mutex); | ||
206 | file_priv->minor->master = drm_master_get(file_priv->master); | 205 | file_priv->minor->master = drm_master_get(file_priv->master); |
207 | file_priv->is_master = 1; | 206 | file_priv->is_master = 1; |
208 | if (dev->driver->master_set) { | 207 | if (dev->driver->master_set) { |
@@ -212,27 +211,33 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, | |||
212 | drm_master_put(&file_priv->minor->master); | 211 | drm_master_put(&file_priv->minor->master); |
213 | } | 212 | } |
214 | } | 213 | } |
215 | mutex_unlock(&dev->struct_mutex); | ||
216 | 214 | ||
215 | out_unlock: | ||
216 | mutex_unlock(&dev->master_mutex); | ||
217 | return ret; | 217 | return ret; |
218 | } | 218 | } |
219 | 219 | ||
220 | int drm_dropmaster_ioctl(struct drm_device *dev, void *data, | 220 | int drm_dropmaster_ioctl(struct drm_device *dev, void *data, |
221 | struct drm_file *file_priv) | 221 | struct drm_file *file_priv) |
222 | { | 222 | { |
223 | int ret = -EINVAL; | ||
224 | |||
225 | mutex_lock(&dev->master_mutex); | ||
223 | if (!file_priv->is_master) | 226 | if (!file_priv->is_master) |
224 | return -EINVAL; | 227 | goto out_unlock; |
225 | 228 | ||
226 | if (!file_priv->minor->master) | 229 | if (!file_priv->minor->master) |
227 | return -EINVAL; | 230 | goto out_unlock; |
228 | 231 | ||
229 | mutex_lock(&dev->struct_mutex); | 232 | ret = 0; |
230 | if (dev->driver->master_drop) | 233 | if (dev->driver->master_drop) |
231 | dev->driver->master_drop(dev, file_priv, false); | 234 | dev->driver->master_drop(dev, file_priv, false); |
232 | drm_master_put(&file_priv->minor->master); | 235 | drm_master_put(&file_priv->minor->master); |
233 | file_priv->is_master = 0; | 236 | file_priv->is_master = 0; |
234 | mutex_unlock(&dev->struct_mutex); | 237 | |
235 | return 0; | 238 | out_unlock: |
239 | mutex_unlock(&dev->master_mutex); | ||
240 | return ret; | ||
236 | } | 241 | } |
237 | 242 | ||
238 | /* | 243 | /* |
@@ -273,7 +278,6 @@ static int drm_minor_alloc(struct drm_device *dev, unsigned int type) | |||
273 | 278 | ||
274 | minor->type = type; | 279 | minor->type = type; |
275 | minor->dev = dev; | 280 | minor->dev = dev; |
276 | INIT_LIST_HEAD(&minor->master_list); | ||
277 | 281 | ||
278 | *drm_minor_get_slot(dev, type) = minor; | 282 | *drm_minor_get_slot(dev, type) = minor; |
279 | return 0; | 283 | return 0; |
@@ -564,6 +568,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, | |||
564 | spin_lock_init(&dev->event_lock); | 568 | spin_lock_init(&dev->event_lock); |
565 | mutex_init(&dev->struct_mutex); | 569 | mutex_init(&dev->struct_mutex); |
566 | mutex_init(&dev->ctxlist_mutex); | 570 | mutex_init(&dev->ctxlist_mutex); |
571 | mutex_init(&dev->master_mutex); | ||
567 | 572 | ||
568 | dev->anon_inode = drm_fs_inode_new(); | 573 | dev->anon_inode = drm_fs_inode_new(); |
569 | if (IS_ERR(dev->anon_inode)) { | 574 | if (IS_ERR(dev->anon_inode)) { |
@@ -617,6 +622,7 @@ err_minors: | |||
617 | drm_minor_free(dev, DRM_MINOR_CONTROL); | 622 | drm_minor_free(dev, DRM_MINOR_CONTROL); |
618 | drm_fs_inode_free(dev->anon_inode); | 623 | drm_fs_inode_free(dev->anon_inode); |
619 | err_free: | 624 | err_free: |
625 | mutex_destroy(&dev->master_mutex); | ||
620 | kfree(dev); | 626 | kfree(dev); |
621 | return NULL; | 627 | return NULL; |
622 | } | 628 | } |
@@ -638,6 +644,8 @@ static void drm_dev_release(struct kref *ref) | |||
638 | drm_minor_free(dev, DRM_MINOR_CONTROL); | 644 | drm_minor_free(dev, DRM_MINOR_CONTROL); |
639 | 645 | ||
640 | kfree(dev->devname); | 646 | kfree(dev->devname); |
647 | |||
648 | mutex_destroy(&dev->master_mutex); | ||
641 | kfree(dev); | 649 | kfree(dev); |
642 | } | 650 | } |
643 | 651 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index 53b51c4e671a..d2a053352789 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c | |||
@@ -270,6 +270,52 @@ ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key) | |||
270 | } | 270 | } |
271 | EXPORT_SYMBOL(ttm_base_object_lookup_for_ref); | 271 | EXPORT_SYMBOL(ttm_base_object_lookup_for_ref); |
272 | 272 | ||
273 | /** | ||
274 | * ttm_ref_object_exists - Check whether a caller has a valid ref object | ||
275 | * (has opened) a base object. | ||
276 | * | ||
277 | * @tfile: Pointer to a struct ttm_object_file identifying the caller. | ||
278 | * @base: Pointer to a struct base object. | ||
279 | * | ||
280 | * Checks wether the caller identified by @tfile has put a valid USAGE | ||
281 | * reference object on the base object identified by @base. | ||
282 | */ | ||
283 | bool ttm_ref_object_exists(struct ttm_object_file *tfile, | ||
284 | struct ttm_base_object *base) | ||
285 | { | ||
286 | struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; | ||
287 | struct drm_hash_item *hash; | ||
288 | struct ttm_ref_object *ref; | ||
289 | |||
290 | rcu_read_lock(); | ||
291 | if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0)) | ||
292 | goto out_false; | ||
293 | |||
294 | /* | ||
295 | * Verify that the ref object is really pointing to our base object. | ||
296 | * Our base object could actually be dead, and the ref object pointing | ||
297 | * to another base object with the same handle. | ||
298 | */ | ||
299 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); | ||
300 | if (unlikely(base != ref->obj)) | ||
301 | goto out_false; | ||
302 | |||
303 | /* | ||
304 | * Verify that the ref->obj pointer was actually valid! | ||
305 | */ | ||
306 | rmb(); | ||
307 | if (unlikely(atomic_read(&ref->kref.refcount) == 0)) | ||
308 | goto out_false; | ||
309 | |||
310 | rcu_read_unlock(); | ||
311 | return true; | ||
312 | |||
313 | out_false: | ||
314 | rcu_read_unlock(); | ||
315 | return false; | ||
316 | } | ||
317 | EXPORT_SYMBOL(ttm_ref_object_exists); | ||
318 | |||
273 | int ttm_ref_object_add(struct ttm_object_file *tfile, | 319 | int ttm_ref_object_add(struct ttm_object_file *tfile, |
274 | struct ttm_base_object *base, | 320 | struct ttm_base_object *base, |
275 | enum ttm_ref_type ref_type, bool *existed) | 321 | enum ttm_ref_type ref_type, bool *existed) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 1e80152674b5..701d5207def6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c | |||
@@ -462,7 +462,6 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data, | |||
462 | struct vmw_resource *tmp; | 462 | struct vmw_resource *tmp; |
463 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; | 463 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; |
464 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 464 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
465 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
466 | int ret; | 465 | int ret; |
467 | 466 | ||
468 | 467 | ||
@@ -474,7 +473,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data, | |||
474 | if (unlikely(vmw_user_context_size == 0)) | 473 | if (unlikely(vmw_user_context_size == 0)) |
475 | vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128; | 474 | vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128; |
476 | 475 | ||
477 | ret = ttm_read_lock(&vmaster->lock, true); | 476 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
478 | if (unlikely(ret != 0)) | 477 | if (unlikely(ret != 0)) |
479 | return ret; | 478 | return ret; |
480 | 479 | ||
@@ -521,7 +520,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data, | |||
521 | out_err: | 520 | out_err: |
522 | vmw_resource_unreference(&res); | 521 | vmw_resource_unreference(&res); |
523 | out_unlock: | 522 | out_unlock: |
524 | ttm_read_unlock(&vmaster->lock); | 523 | ttm_read_unlock(&dev_priv->reservation_sem); |
525 | return ret; | 524 | return ret; |
526 | 525 | ||
527 | } | 526 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c index a75840211b3c..70ddce8358b0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c | |||
@@ -52,11 +52,10 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, | |||
52 | struct ttm_placement *placement, | 52 | struct ttm_placement *placement, |
53 | bool interruptible) | 53 | bool interruptible) |
54 | { | 54 | { |
55 | struct vmw_master *vmaster = dev_priv->active_master; | ||
56 | struct ttm_buffer_object *bo = &buf->base; | 55 | struct ttm_buffer_object *bo = &buf->base; |
57 | int ret; | 56 | int ret; |
58 | 57 | ||
59 | ret = ttm_write_lock(&vmaster->lock, interruptible); | 58 | ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); |
60 | if (unlikely(ret != 0)) | 59 | if (unlikely(ret != 0)) |
61 | return ret; | 60 | return ret; |
62 | 61 | ||
@@ -71,7 +70,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, | |||
71 | ttm_bo_unreserve(bo); | 70 | ttm_bo_unreserve(bo); |
72 | 71 | ||
73 | err: | 72 | err: |
74 | ttm_write_unlock(&vmaster->lock); | 73 | ttm_write_unlock(&dev_priv->reservation_sem); |
75 | return ret; | 74 | return ret; |
76 | } | 75 | } |
77 | 76 | ||
@@ -95,12 +94,11 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, | |||
95 | struct vmw_dma_buffer *buf, | 94 | struct vmw_dma_buffer *buf, |
96 | bool pin, bool interruptible) | 95 | bool pin, bool interruptible) |
97 | { | 96 | { |
98 | struct vmw_master *vmaster = dev_priv->active_master; | ||
99 | struct ttm_buffer_object *bo = &buf->base; | 97 | struct ttm_buffer_object *bo = &buf->base; |
100 | struct ttm_placement *placement; | 98 | struct ttm_placement *placement; |
101 | int ret; | 99 | int ret; |
102 | 100 | ||
103 | ret = ttm_write_lock(&vmaster->lock, interruptible); | 101 | ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); |
104 | if (unlikely(ret != 0)) | 102 | if (unlikely(ret != 0)) |
105 | return ret; | 103 | return ret; |
106 | 104 | ||
@@ -143,7 +141,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, | |||
143 | err_unreserve: | 141 | err_unreserve: |
144 | ttm_bo_unreserve(bo); | 142 | ttm_bo_unreserve(bo); |
145 | err: | 143 | err: |
146 | ttm_write_unlock(&vmaster->lock); | 144 | ttm_write_unlock(&dev_priv->reservation_sem); |
147 | return ret; | 145 | return ret; |
148 | } | 146 | } |
149 | 147 | ||
@@ -198,7 +196,6 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, | |||
198 | struct vmw_dma_buffer *buf, | 196 | struct vmw_dma_buffer *buf, |
199 | bool pin, bool interruptible) | 197 | bool pin, bool interruptible) |
200 | { | 198 | { |
201 | struct vmw_master *vmaster = dev_priv->active_master; | ||
202 | struct ttm_buffer_object *bo = &buf->base; | 199 | struct ttm_buffer_object *bo = &buf->base; |
203 | struct ttm_placement placement; | 200 | struct ttm_placement placement; |
204 | int ret = 0; | 201 | int ret = 0; |
@@ -209,7 +206,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, | |||
209 | placement = vmw_vram_placement; | 206 | placement = vmw_vram_placement; |
210 | placement.lpfn = bo->num_pages; | 207 | placement.lpfn = bo->num_pages; |
211 | 208 | ||
212 | ret = ttm_write_lock(&vmaster->lock, interruptible); | 209 | ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); |
213 | if (unlikely(ret != 0)) | 210 | if (unlikely(ret != 0)) |
214 | return ret; | 211 | return ret; |
215 | 212 | ||
@@ -232,7 +229,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, | |||
232 | 229 | ||
233 | ttm_bo_unreserve(bo); | 230 | ttm_bo_unreserve(bo); |
234 | err_unlock: | 231 | err_unlock: |
235 | ttm_write_unlock(&vmaster->lock); | 232 | ttm_write_unlock(&dev_priv->reservation_sem); |
236 | 233 | ||
237 | return ret; | 234 | return ret; |
238 | } | 235 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index c35715f26f40..c7009581bb23 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -142,11 +142,11 @@ | |||
142 | 142 | ||
143 | static const struct drm_ioctl_desc vmw_ioctls[] = { | 143 | static const struct drm_ioctl_desc vmw_ioctls[] = { |
144 | VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, | 144 | VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, |
145 | DRM_AUTH | DRM_UNLOCKED), | 145 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
146 | VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, | 146 | VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, |
147 | DRM_AUTH | DRM_UNLOCKED), | 147 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
148 | VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, | 148 | VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, |
149 | DRM_AUTH | DRM_UNLOCKED), | 149 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
150 | VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, | 150 | VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, |
151 | vmw_kms_cursor_bypass_ioctl, | 151 | vmw_kms_cursor_bypass_ioctl, |
152 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), | 152 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
@@ -159,29 +159,28 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { | |||
159 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), | 159 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
160 | 160 | ||
161 | VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, | 161 | VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, |
162 | DRM_AUTH | DRM_UNLOCKED), | 162 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
163 | VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, | 163 | VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, |
164 | DRM_AUTH | DRM_UNLOCKED), | 164 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
165 | VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, | 165 | VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, |
166 | DRM_AUTH | DRM_UNLOCKED), | 166 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
167 | VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, | 167 | VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, |
168 | DRM_AUTH | DRM_UNLOCKED), | 168 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
169 | VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, | 169 | VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, |
170 | DRM_AUTH | DRM_UNLOCKED), | 170 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
171 | VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, | 171 | VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, |
172 | DRM_AUTH | DRM_UNLOCKED), | 172 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
173 | VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, | 173 | VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, |
174 | DRM_AUTH | DRM_UNLOCKED), | 174 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
175 | VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, | 175 | VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, |
176 | vmw_fence_obj_signaled_ioctl, | 176 | vmw_fence_obj_signaled_ioctl, |
177 | DRM_AUTH | DRM_UNLOCKED), | 177 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
178 | VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, | 178 | VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, |
179 | DRM_AUTH | DRM_UNLOCKED), | 179 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
180 | VMW_IOCTL_DEF(VMW_FENCE_EVENT, | 180 | VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl, |
181 | vmw_fence_event_ioctl, | 181 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
182 | DRM_AUTH | DRM_UNLOCKED), | ||
183 | VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, | 182 | VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, |
184 | DRM_AUTH | DRM_UNLOCKED), | 183 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
185 | 184 | ||
186 | /* these allow direct access to the framebuffers mark as master only */ | 185 | /* these allow direct access to the framebuffers mark as master only */ |
187 | VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, | 186 | VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, |
@@ -194,19 +193,19 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { | |||
194 | DRM_MASTER | DRM_UNLOCKED), | 193 | DRM_MASTER | DRM_UNLOCKED), |
195 | VMW_IOCTL_DEF(VMW_CREATE_SHADER, | 194 | VMW_IOCTL_DEF(VMW_CREATE_SHADER, |
196 | vmw_shader_define_ioctl, | 195 | vmw_shader_define_ioctl, |
197 | DRM_AUTH | DRM_UNLOCKED), | 196 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
198 | VMW_IOCTL_DEF(VMW_UNREF_SHADER, | 197 | VMW_IOCTL_DEF(VMW_UNREF_SHADER, |
199 | vmw_shader_destroy_ioctl, | 198 | vmw_shader_destroy_ioctl, |
200 | DRM_AUTH | DRM_UNLOCKED), | 199 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
201 | VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, | 200 | VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, |
202 | vmw_gb_surface_define_ioctl, | 201 | vmw_gb_surface_define_ioctl, |
203 | DRM_AUTH | DRM_UNLOCKED), | 202 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
204 | VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, | 203 | VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, |
205 | vmw_gb_surface_reference_ioctl, | 204 | vmw_gb_surface_reference_ioctl, |
206 | DRM_AUTH | DRM_UNLOCKED), | 205 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
207 | VMW_IOCTL_DEF(VMW_SYNCCPU, | 206 | VMW_IOCTL_DEF(VMW_SYNCCPU, |
208 | vmw_user_dmabuf_synccpu_ioctl, | 207 | vmw_user_dmabuf_synccpu_ioctl, |
209 | DRM_AUTH | DRM_UNLOCKED), | 208 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
210 | }; | 209 | }; |
211 | 210 | ||
212 | static struct pci_device_id vmw_pci_id_list[] = { | 211 | static struct pci_device_id vmw_pci_id_list[] = { |
@@ -606,6 +605,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
606 | mutex_init(&dev_priv->release_mutex); | 605 | mutex_init(&dev_priv->release_mutex); |
607 | mutex_init(&dev_priv->binding_mutex); | 606 | mutex_init(&dev_priv->binding_mutex); |
608 | rwlock_init(&dev_priv->resource_lock); | 607 | rwlock_init(&dev_priv->resource_lock); |
608 | ttm_lock_init(&dev_priv->reservation_sem); | ||
609 | 609 | ||
610 | for (i = vmw_res_context; i < vmw_res_max; ++i) { | 610 | for (i = vmw_res_context; i < vmw_res_max; ++i) { |
611 | idr_init(&dev_priv->res_idr[i]); | 611 | idr_init(&dev_priv->res_idr[i]); |
@@ -981,12 +981,70 @@ out_no_tfile: | |||
981 | return ret; | 981 | return ret; |
982 | } | 982 | } |
983 | 983 | ||
984 | static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, | 984 | static struct vmw_master *vmw_master_check(struct drm_device *dev, |
985 | unsigned long arg) | 985 | struct drm_file *file_priv, |
986 | unsigned int flags) | ||
987 | { | ||
988 | int ret; | ||
989 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | ||
990 | struct vmw_master *vmaster; | ||
991 | |||
992 | if (file_priv->minor->type != DRM_MINOR_LEGACY || | ||
993 | !(flags & DRM_AUTH)) | ||
994 | return NULL; | ||
995 | |||
996 | ret = mutex_lock_interruptible(&dev->master_mutex); | ||
997 | if (unlikely(ret != 0)) | ||
998 | return ERR_PTR(-ERESTARTSYS); | ||
999 | |||
1000 | if (file_priv->is_master) { | ||
1001 | mutex_unlock(&dev->master_mutex); | ||
1002 | return NULL; | ||
1003 | } | ||
1004 | |||
1005 | /* | ||
1006 | * Check if we were previously master, but now dropped. | ||
1007 | */ | ||
1008 | if (vmw_fp->locked_master) { | ||
1009 | mutex_unlock(&dev->master_mutex); | ||
1010 | DRM_ERROR("Dropped master trying to access ioctl that " | ||
1011 | "requires authentication.\n"); | ||
1012 | return ERR_PTR(-EACCES); | ||
1013 | } | ||
1014 | mutex_unlock(&dev->master_mutex); | ||
1015 | |||
1016 | /* | ||
1017 | * Taking the drm_global_mutex after the TTM lock might deadlock | ||
1018 | */ | ||
1019 | if (!(flags & DRM_UNLOCKED)) { | ||
1020 | DRM_ERROR("Refusing locked ioctl access.\n"); | ||
1021 | return ERR_PTR(-EDEADLK); | ||
1022 | } | ||
1023 | |||
1024 | /* | ||
1025 | * Take the TTM lock. Possibly sleep waiting for the authenticating | ||
1026 | * master to become master again, or for a SIGTERM if the | ||
1027 | * authenticating master exits. | ||
1028 | */ | ||
1029 | vmaster = vmw_master(file_priv->master); | ||
1030 | ret = ttm_read_lock(&vmaster->lock, true); | ||
1031 | if (unlikely(ret != 0)) | ||
1032 | vmaster = ERR_PTR(ret); | ||
1033 | |||
1034 | return vmaster; | ||
1035 | } | ||
1036 | |||
1037 | static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, | ||
1038 | unsigned long arg, | ||
1039 | long (*ioctl_func)(struct file *, unsigned int, | ||
1040 | unsigned long)) | ||
986 | { | 1041 | { |
987 | struct drm_file *file_priv = filp->private_data; | 1042 | struct drm_file *file_priv = filp->private_data; |
988 | struct drm_device *dev = file_priv->minor->dev; | 1043 | struct drm_device *dev = file_priv->minor->dev; |
989 | unsigned int nr = DRM_IOCTL_NR(cmd); | 1044 | unsigned int nr = DRM_IOCTL_NR(cmd); |
1045 | struct vmw_master *vmaster; | ||
1046 | unsigned int flags; | ||
1047 | long ret; | ||
990 | 1048 | ||
991 | /* | 1049 | /* |
992 | * Do extra checking on driver private ioctls. | 1050 | * Do extra checking on driver private ioctls. |
@@ -995,18 +1053,44 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, | |||
995 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) | 1053 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) |
996 | && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { | 1054 | && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { |
997 | const struct drm_ioctl_desc *ioctl = | 1055 | const struct drm_ioctl_desc *ioctl = |
998 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; | 1056 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; |
999 | 1057 | ||
1000 | if (unlikely(ioctl->cmd_drv != cmd)) { | 1058 | if (unlikely(ioctl->cmd_drv != cmd)) { |
1001 | DRM_ERROR("Invalid command format, ioctl %d\n", | 1059 | DRM_ERROR("Invalid command format, ioctl %d\n", |
1002 | nr - DRM_COMMAND_BASE); | 1060 | nr - DRM_COMMAND_BASE); |
1003 | return -EINVAL; | 1061 | return -EINVAL; |
1004 | } | 1062 | } |
1063 | flags = ioctl->flags; | ||
1064 | } else if (!drm_ioctl_flags(nr, &flags)) | ||
1065 | return -EINVAL; | ||
1066 | |||
1067 | vmaster = vmw_master_check(dev, file_priv, flags); | ||
1068 | if (unlikely(IS_ERR(vmaster))) { | ||
1069 | DRM_INFO("IOCTL ERROR %d\n", nr); | ||
1070 | return PTR_ERR(vmaster); | ||
1005 | } | 1071 | } |
1006 | 1072 | ||
1007 | return drm_ioctl(filp, cmd, arg); | 1073 | ret = ioctl_func(filp, cmd, arg); |
1074 | if (vmaster) | ||
1075 | ttm_read_unlock(&vmaster->lock); | ||
1076 | |||
1077 | return ret; | ||
1078 | } | ||
1079 | |||
1080 | static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, | ||
1081 | unsigned long arg) | ||
1082 | { | ||
1083 | return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl); | ||
1008 | } | 1084 | } |
1009 | 1085 | ||
1086 | #ifdef CONFIG_COMPAT | ||
1087 | static long vmw_compat_ioctl(struct file *filp, unsigned int cmd, | ||
1088 | unsigned long arg) | ||
1089 | { | ||
1090 | return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl); | ||
1091 | } | ||
1092 | #endif | ||
1093 | |||
1010 | static void vmw_lastclose(struct drm_device *dev) | 1094 | static void vmw_lastclose(struct drm_device *dev) |
1011 | { | 1095 | { |
1012 | struct drm_crtc *crtc; | 1096 | struct drm_crtc *crtc; |
@@ -1175,12 +1259,11 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | |||
1175 | { | 1259 | { |
1176 | struct vmw_private *dev_priv = | 1260 | struct vmw_private *dev_priv = |
1177 | container_of(nb, struct vmw_private, pm_nb); | 1261 | container_of(nb, struct vmw_private, pm_nb); |
1178 | struct vmw_master *vmaster = dev_priv->active_master; | ||
1179 | 1262 | ||
1180 | switch (val) { | 1263 | switch (val) { |
1181 | case PM_HIBERNATION_PREPARE: | 1264 | case PM_HIBERNATION_PREPARE: |
1182 | case PM_SUSPEND_PREPARE: | 1265 | case PM_SUSPEND_PREPARE: |
1183 | ttm_suspend_lock(&vmaster->lock); | 1266 | ttm_suspend_lock(&dev_priv->reservation_sem); |
1184 | 1267 | ||
1185 | /** | 1268 | /** |
1186 | * This empties VRAM and unbinds all GMR bindings. | 1269 | * This empties VRAM and unbinds all GMR bindings. |
@@ -1194,7 +1277,7 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | |||
1194 | case PM_POST_HIBERNATION: | 1277 | case PM_POST_HIBERNATION: |
1195 | case PM_POST_SUSPEND: | 1278 | case PM_POST_SUSPEND: |
1196 | case PM_POST_RESTORE: | 1279 | case PM_POST_RESTORE: |
1197 | ttm_suspend_unlock(&vmaster->lock); | 1280 | ttm_suspend_unlock(&dev_priv->reservation_sem); |
1198 | 1281 | ||
1199 | break; | 1282 | break; |
1200 | case PM_RESTORE_PREPARE: | 1283 | case PM_RESTORE_PREPARE: |
@@ -1315,14 +1398,14 @@ static const struct file_operations vmwgfx_driver_fops = { | |||
1315 | .poll = vmw_fops_poll, | 1398 | .poll = vmw_fops_poll, |
1316 | .read = vmw_fops_read, | 1399 | .read = vmw_fops_read, |
1317 | #if defined(CONFIG_COMPAT) | 1400 | #if defined(CONFIG_COMPAT) |
1318 | .compat_ioctl = drm_compat_ioctl, | 1401 | .compat_ioctl = vmw_compat_ioctl, |
1319 | #endif | 1402 | #endif |
1320 | .llseek = noop_llseek, | 1403 | .llseek = noop_llseek, |
1321 | }; | 1404 | }; |
1322 | 1405 | ||
1323 | static struct drm_driver driver = { | 1406 | static struct drm_driver driver = { |
1324 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | | 1407 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | |
1325 | DRIVER_MODESET | DRIVER_PRIME, | 1408 | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER, |
1326 | .load = vmw_driver_load, | 1409 | .load = vmw_driver_load, |
1327 | .unload = vmw_driver_unload, | 1410 | .unload = vmw_driver_unload, |
1328 | .lastclose = vmw_lastclose, | 1411 | .lastclose = vmw_lastclose, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 07831554dad7..6b252a887ae2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -40,9 +40,9 @@ | |||
40 | #include <drm/ttm/ttm_module.h> | 40 | #include <drm/ttm/ttm_module.h> |
41 | #include "vmwgfx_fence.h" | 41 | #include "vmwgfx_fence.h" |
42 | 42 | ||
43 | #define VMWGFX_DRIVER_DATE "20140228" | 43 | #define VMWGFX_DRIVER_DATE "20140325" |
44 | #define VMWGFX_DRIVER_MAJOR 2 | 44 | #define VMWGFX_DRIVER_MAJOR 2 |
45 | #define VMWGFX_DRIVER_MINOR 5 | 45 | #define VMWGFX_DRIVER_MINOR 6 |
46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
47 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 47 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
48 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 48 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
@@ -487,6 +487,11 @@ struct vmw_private { | |||
487 | uint32_t num_3d_resources; | 487 | uint32_t num_3d_resources; |
488 | 488 | ||
489 | /* | 489 | /* |
490 | * Replace this with an rwsem as soon as we have down_xx_interruptible() | ||
491 | */ | ||
492 | struct ttm_lock reservation_sem; | ||
493 | |||
494 | /* | ||
490 | * Query processing. These members | 495 | * Query processing. These members |
491 | * are protected by the cmdbuf mutex. | 496 | * are protected by the cmdbuf mutex. |
492 | */ | 497 | */ |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index efb575a7996c..931490b9cfed 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -2712,7 +2712,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
2712 | { | 2712 | { |
2713 | struct vmw_private *dev_priv = vmw_priv(dev); | 2713 | struct vmw_private *dev_priv = vmw_priv(dev); |
2714 | struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; | 2714 | struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; |
2715 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
2716 | int ret; | 2715 | int ret; |
2717 | 2716 | ||
2718 | /* | 2717 | /* |
@@ -2729,7 +2728,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
2729 | return -EINVAL; | 2728 | return -EINVAL; |
2730 | } | 2729 | } |
2731 | 2730 | ||
2732 | ret = ttm_read_lock(&vmaster->lock, true); | 2731 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
2733 | if (unlikely(ret != 0)) | 2732 | if (unlikely(ret != 0)) |
2734 | return ret; | 2733 | return ret; |
2735 | 2734 | ||
@@ -2745,6 +2744,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
2745 | vmw_kms_cursor_post_execbuf(dev_priv); | 2744 | vmw_kms_cursor_post_execbuf(dev_priv); |
2746 | 2745 | ||
2747 | out_unlock: | 2746 | out_unlock: |
2748 | ttm_read_unlock(&vmaster->lock); | 2747 | ttm_read_unlock(&dev_priv->reservation_sem); |
2749 | return ret; | 2748 | return ret; |
2750 | } | 2749 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index ed5ce2a41bbf..9699bd174ae4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
@@ -377,14 +377,13 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv, | |||
377 | 377 | ||
378 | ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 378 | ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
379 | 379 | ||
380 | /* interuptable? */ | 380 | (void) ttm_write_lock(&vmw_priv->reservation_sem, false); |
381 | ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false); | ||
382 | if (unlikely(ret != 0)) | ||
383 | return ret; | ||
384 | 381 | ||
385 | vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL); | 382 | vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL); |
386 | if (!vmw_bo) | 383 | if (!vmw_bo) { |
384 | ret = -ENOMEM; | ||
387 | goto err_unlock; | 385 | goto err_unlock; |
386 | } | ||
388 | 387 | ||
389 | ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size, | 388 | ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size, |
390 | &ne_placement, | 389 | &ne_placement, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 47b70949bf3a..37881ecf5d7a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
@@ -226,7 +226,6 @@ int vmw_present_ioctl(struct drm_device *dev, void *data, | |||
226 | struct drm_vmw_present_arg *arg = | 226 | struct drm_vmw_present_arg *arg = |
227 | (struct drm_vmw_present_arg *)data; | 227 | (struct drm_vmw_present_arg *)data; |
228 | struct vmw_surface *surface; | 228 | struct vmw_surface *surface; |
229 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
230 | struct drm_vmw_rect __user *clips_ptr; | 229 | struct drm_vmw_rect __user *clips_ptr; |
231 | struct drm_vmw_rect *clips = NULL; | 230 | struct drm_vmw_rect *clips = NULL; |
232 | struct drm_framebuffer *fb; | 231 | struct drm_framebuffer *fb; |
@@ -271,7 +270,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data, | |||
271 | } | 270 | } |
272 | vfb = vmw_framebuffer_to_vfb(fb); | 271 | vfb = vmw_framebuffer_to_vfb(fb); |
273 | 272 | ||
274 | ret = ttm_read_lock(&vmaster->lock, true); | 273 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
275 | if (unlikely(ret != 0)) | 274 | if (unlikely(ret != 0)) |
276 | goto out_no_ttm_lock; | 275 | goto out_no_ttm_lock; |
277 | 276 | ||
@@ -291,7 +290,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data, | |||
291 | vmw_surface_unreference(&surface); | 290 | vmw_surface_unreference(&surface); |
292 | 291 | ||
293 | out_no_surface: | 292 | out_no_surface: |
294 | ttm_read_unlock(&vmaster->lock); | 293 | ttm_read_unlock(&dev_priv->reservation_sem); |
295 | out_no_ttm_lock: | 294 | out_no_ttm_lock: |
296 | drm_framebuffer_unreference(fb); | 295 | drm_framebuffer_unreference(fb); |
297 | out_no_fb: | 296 | out_no_fb: |
@@ -311,7 +310,6 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data, | |||
311 | struct drm_vmw_fence_rep __user *user_fence_rep = | 310 | struct drm_vmw_fence_rep __user *user_fence_rep = |
312 | (struct drm_vmw_fence_rep __user *) | 311 | (struct drm_vmw_fence_rep __user *) |
313 | (unsigned long)arg->fence_rep; | 312 | (unsigned long)arg->fence_rep; |
314 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
315 | struct drm_vmw_rect __user *clips_ptr; | 313 | struct drm_vmw_rect __user *clips_ptr; |
316 | struct drm_vmw_rect *clips = NULL; | 314 | struct drm_vmw_rect *clips = NULL; |
317 | struct drm_framebuffer *fb; | 315 | struct drm_framebuffer *fb; |
@@ -361,7 +359,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data, | |||
361 | goto out_no_ttm_lock; | 359 | goto out_no_ttm_lock; |
362 | } | 360 | } |
363 | 361 | ||
364 | ret = ttm_read_lock(&vmaster->lock, true); | 362 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
365 | if (unlikely(ret != 0)) | 363 | if (unlikely(ret != 0)) |
366 | goto out_no_ttm_lock; | 364 | goto out_no_ttm_lock; |
367 | 365 | ||
@@ -369,7 +367,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data, | |||
369 | vfb, user_fence_rep, | 367 | vfb, user_fence_rep, |
370 | clips, num_clips); | 368 | clips, num_clips); |
371 | 369 | ||
372 | ttm_read_unlock(&vmaster->lock); | 370 | ttm_read_unlock(&dev_priv->reservation_sem); |
373 | out_no_ttm_lock: | 371 | out_no_ttm_lock: |
374 | drm_framebuffer_unreference(fb); | 372 | drm_framebuffer_unreference(fb); |
375 | out_no_fb: | 373 | out_no_fb: |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 8a650413dea5..159af7ee111f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -596,7 +596,6 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
596 | unsigned num_clips) | 596 | unsigned num_clips) |
597 | { | 597 | { |
598 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); | 598 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); |
599 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
600 | struct vmw_framebuffer_surface *vfbs = | 599 | struct vmw_framebuffer_surface *vfbs = |
601 | vmw_framebuffer_to_vfbs(framebuffer); | 600 | vmw_framebuffer_to_vfbs(framebuffer); |
602 | struct drm_clip_rect norect; | 601 | struct drm_clip_rect norect; |
@@ -611,7 +610,7 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
611 | 610 | ||
612 | drm_modeset_lock_all(dev_priv->dev); | 611 | drm_modeset_lock_all(dev_priv->dev); |
613 | 612 | ||
614 | ret = ttm_read_lock(&vmaster->lock, true); | 613 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
615 | if (unlikely(ret != 0)) { | 614 | if (unlikely(ret != 0)) { |
616 | drm_modeset_unlock_all(dev_priv->dev); | 615 | drm_modeset_unlock_all(dev_priv->dev); |
617 | return ret; | 616 | return ret; |
@@ -632,7 +631,7 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
632 | flags, color, | 631 | flags, color, |
633 | clips, num_clips, inc, NULL); | 632 | clips, num_clips, inc, NULL); |
634 | 633 | ||
635 | ttm_read_unlock(&vmaster->lock); | 634 | ttm_read_unlock(&dev_priv->reservation_sem); |
636 | 635 | ||
637 | drm_modeset_unlock_all(dev_priv->dev); | 636 | drm_modeset_unlock_all(dev_priv->dev); |
638 | 637 | ||
@@ -954,7 +953,6 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
954 | unsigned num_clips) | 953 | unsigned num_clips) |
955 | { | 954 | { |
956 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); | 955 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); |
957 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
958 | struct vmw_framebuffer_dmabuf *vfbd = | 956 | struct vmw_framebuffer_dmabuf *vfbd = |
959 | vmw_framebuffer_to_vfbd(framebuffer); | 957 | vmw_framebuffer_to_vfbd(framebuffer); |
960 | struct drm_clip_rect norect; | 958 | struct drm_clip_rect norect; |
@@ -962,7 +960,7 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
962 | 960 | ||
963 | drm_modeset_lock_all(dev_priv->dev); | 961 | drm_modeset_lock_all(dev_priv->dev); |
964 | 962 | ||
965 | ret = ttm_read_lock(&vmaster->lock, true); | 963 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
966 | if (unlikely(ret != 0)) { | 964 | if (unlikely(ret != 0)) { |
967 | drm_modeset_unlock_all(dev_priv->dev); | 965 | drm_modeset_unlock_all(dev_priv->dev); |
968 | return ret; | 966 | return ret; |
@@ -989,7 +987,7 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
989 | clips, num_clips, increment, NULL); | 987 | clips, num_clips, increment, NULL); |
990 | } | 988 | } |
991 | 989 | ||
992 | ttm_read_unlock(&vmaster->lock); | 990 | ttm_read_unlock(&dev_priv->reservation_sem); |
993 | 991 | ||
994 | drm_modeset_unlock_all(dev_priv->dev); | 992 | drm_modeset_unlock_all(dev_priv->dev); |
995 | 993 | ||
@@ -2022,7 +2020,6 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
2022 | struct vmw_private *dev_priv = vmw_priv(dev); | 2020 | struct vmw_private *dev_priv = vmw_priv(dev); |
2023 | struct drm_vmw_update_layout_arg *arg = | 2021 | struct drm_vmw_update_layout_arg *arg = |
2024 | (struct drm_vmw_update_layout_arg *)data; | 2022 | (struct drm_vmw_update_layout_arg *)data; |
2025 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
2026 | void __user *user_rects; | 2023 | void __user *user_rects; |
2027 | struct drm_vmw_rect *rects; | 2024 | struct drm_vmw_rect *rects; |
2028 | unsigned rects_size; | 2025 | unsigned rects_size; |
@@ -2030,7 +2027,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
2030 | int i; | 2027 | int i; |
2031 | struct drm_mode_config *mode_config = &dev->mode_config; | 2028 | struct drm_mode_config *mode_config = &dev->mode_config; |
2032 | 2029 | ||
2033 | ret = ttm_read_lock(&vmaster->lock, true); | 2030 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
2034 | if (unlikely(ret != 0)) | 2031 | if (unlikely(ret != 0)) |
2035 | return ret; | 2032 | return ret; |
2036 | 2033 | ||
@@ -2072,6 +2069,6 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
2072 | out_free: | 2069 | out_free: |
2073 | kfree(rects); | 2070 | kfree(rects); |
2074 | out_unlock: | 2071 | out_unlock: |
2075 | ttm_read_unlock(&vmaster->lock); | 2072 | ttm_read_unlock(&dev_priv->reservation_sem); |
2076 | return ret; | 2073 | return ret; |
2077 | } | 2074 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 9757b57f8388..01d68f0a69dc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -538,8 +538,13 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, | |||
538 | return -EPERM; | 538 | return -EPERM; |
539 | 539 | ||
540 | vmw_user_bo = vmw_user_dma_buffer(bo); | 540 | vmw_user_bo = vmw_user_dma_buffer(bo); |
541 | return (vmw_user_bo->prime.base.tfile == tfile || | 541 | |
542 | vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; | 542 | /* Check that the caller has opened the object. */ |
543 | if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base))) | ||
544 | return 0; | ||
545 | |||
546 | DRM_ERROR("Could not grant buffer access.\n"); | ||
547 | return -EPERM; | ||
543 | } | 548 | } |
544 | 549 | ||
545 | /** | 550 | /** |
@@ -676,10 +681,9 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | |||
676 | struct drm_vmw_dmabuf_rep *rep = &arg->rep; | 681 | struct drm_vmw_dmabuf_rep *rep = &arg->rep; |
677 | struct vmw_dma_buffer *dma_buf; | 682 | struct vmw_dma_buffer *dma_buf; |
678 | uint32_t handle; | 683 | uint32_t handle; |
679 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
680 | int ret; | 684 | int ret; |
681 | 685 | ||
682 | ret = ttm_read_lock(&vmaster->lock, true); | 686 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
683 | if (unlikely(ret != 0)) | 687 | if (unlikely(ret != 0)) |
684 | return ret; | 688 | return ret; |
685 | 689 | ||
@@ -696,7 +700,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | |||
696 | vmw_dmabuf_unreference(&dma_buf); | 700 | vmw_dmabuf_unreference(&dma_buf); |
697 | 701 | ||
698 | out_no_dmabuf: | 702 | out_no_dmabuf: |
699 | ttm_read_unlock(&vmaster->lock); | 703 | ttm_read_unlock(&dev_priv->reservation_sem); |
700 | 704 | ||
701 | return ret; | 705 | return ret; |
702 | } | 706 | } |
@@ -873,7 +877,6 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, | |||
873 | struct vmw_resource *tmp; | 877 | struct vmw_resource *tmp; |
874 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; | 878 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; |
875 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 879 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
876 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
877 | int ret; | 880 | int ret; |
878 | 881 | ||
879 | /* | 882 | /* |
@@ -884,7 +887,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, | |||
884 | if (unlikely(vmw_user_stream_size == 0)) | 887 | if (unlikely(vmw_user_stream_size == 0)) |
885 | vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128; | 888 | vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128; |
886 | 889 | ||
887 | ret = ttm_read_lock(&vmaster->lock, true); | 890 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
888 | if (unlikely(ret != 0)) | 891 | if (unlikely(ret != 0)) |
889 | return ret; | 892 | return ret; |
890 | 893 | ||
@@ -932,7 +935,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, | |||
932 | out_err: | 935 | out_err: |
933 | vmw_resource_unreference(&res); | 936 | vmw_resource_unreference(&res); |
934 | out_unlock: | 937 | out_unlock: |
935 | ttm_read_unlock(&vmaster->lock); | 938 | ttm_read_unlock(&dev_priv->reservation_sem); |
936 | return ret; | 939 | return ret; |
937 | } | 940 | } |
938 | 941 | ||
@@ -985,14 +988,13 @@ int vmw_dumb_create(struct drm_file *file_priv, | |||
985 | struct drm_mode_create_dumb *args) | 988 | struct drm_mode_create_dumb *args) |
986 | { | 989 | { |
987 | struct vmw_private *dev_priv = vmw_priv(dev); | 990 | struct vmw_private *dev_priv = vmw_priv(dev); |
988 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
989 | struct vmw_dma_buffer *dma_buf; | 991 | struct vmw_dma_buffer *dma_buf; |
990 | int ret; | 992 | int ret; |
991 | 993 | ||
992 | args->pitch = args->width * ((args->bpp + 7) / 8); | 994 | args->pitch = args->width * ((args->bpp + 7) / 8); |
993 | args->size = args->pitch * args->height; | 995 | args->size = args->pitch * args->height; |
994 | 996 | ||
995 | ret = ttm_read_lock(&vmaster->lock, true); | 997 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
996 | if (unlikely(ret != 0)) | 998 | if (unlikely(ret != 0)) |
997 | return ret; | 999 | return ret; |
998 | 1000 | ||
@@ -1004,7 +1006,7 @@ int vmw_dumb_create(struct drm_file *file_priv, | |||
1004 | 1006 | ||
1005 | vmw_dmabuf_unreference(&dma_buf); | 1007 | vmw_dmabuf_unreference(&dma_buf); |
1006 | out_no_dmabuf: | 1008 | out_no_dmabuf: |
1007 | ttm_read_unlock(&vmaster->lock); | 1009 | ttm_read_unlock(&dev_priv->reservation_sem); |
1008 | return ret; | 1010 | return ret; |
1009 | } | 1011 | } |
1010 | 1012 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index ee3856578a12..c1559eeaffe9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | |||
@@ -449,7 +449,6 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | |||
449 | struct drm_vmw_shader_create_arg *arg = | 449 | struct drm_vmw_shader_create_arg *arg = |
450 | (struct drm_vmw_shader_create_arg *)data; | 450 | (struct drm_vmw_shader_create_arg *)data; |
451 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 451 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
452 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
453 | struct vmw_dma_buffer *buffer = NULL; | 452 | struct vmw_dma_buffer *buffer = NULL; |
454 | SVGA3dShaderType shader_type; | 453 | SVGA3dShaderType shader_type; |
455 | int ret; | 454 | int ret; |
@@ -487,14 +486,14 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | |||
487 | goto out_bad_arg; | 486 | goto out_bad_arg; |
488 | } | 487 | } |
489 | 488 | ||
490 | ret = ttm_read_lock(&vmaster->lock, true); | 489 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
491 | if (unlikely(ret != 0)) | 490 | if (unlikely(ret != 0)) |
492 | goto out_bad_arg; | 491 | goto out_bad_arg; |
493 | 492 | ||
494 | ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset, | 493 | ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset, |
495 | shader_type, tfile, &arg->shader_handle); | 494 | shader_type, tfile, &arg->shader_handle); |
496 | 495 | ||
497 | ttm_read_unlock(&vmaster->lock); | 496 | ttm_read_unlock(&dev_priv->reservation_sem); |
498 | out_bad_arg: | 497 | out_bad_arg: |
499 | vmw_dmabuf_unreference(&buffer); | 498 | vmw_dmabuf_unreference(&buffer); |
500 | return ret; | 499 | return ret; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index e7af580ab977..4ecdbf3e59da 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
@@ -36,11 +36,13 @@ | |||
36 | * @base: The TTM base object handling user-space visibility. | 36 | * @base: The TTM base object handling user-space visibility. |
37 | * @srf: The surface metadata. | 37 | * @srf: The surface metadata. |
38 | * @size: TTM accounting size for the surface. | 38 | * @size: TTM accounting size for the surface. |
39 | * @master: master of the creating client. Used for security check. | ||
39 | */ | 40 | */ |
40 | struct vmw_user_surface { | 41 | struct vmw_user_surface { |
41 | struct ttm_prime_object prime; | 42 | struct ttm_prime_object prime; |
42 | struct vmw_surface srf; | 43 | struct vmw_surface srf; |
43 | uint32_t size; | 44 | uint32_t size; |
45 | struct drm_master *master; | ||
44 | }; | 46 | }; |
45 | 47 | ||
46 | /** | 48 | /** |
@@ -624,6 +626,8 @@ static void vmw_user_surface_free(struct vmw_resource *res) | |||
624 | struct vmw_private *dev_priv = srf->res.dev_priv; | 626 | struct vmw_private *dev_priv = srf->res.dev_priv; |
625 | uint32_t size = user_srf->size; | 627 | uint32_t size = user_srf->size; |
626 | 628 | ||
629 | if (user_srf->master) | ||
630 | drm_master_put(&user_srf->master); | ||
627 | kfree(srf->offsets); | 631 | kfree(srf->offsets); |
628 | kfree(srf->sizes); | 632 | kfree(srf->sizes); |
629 | kfree(srf->snooper.image); | 633 | kfree(srf->snooper.image); |
@@ -697,7 +701,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
697 | struct vmw_surface_offset *cur_offset; | 701 | struct vmw_surface_offset *cur_offset; |
698 | uint32_t num_sizes; | 702 | uint32_t num_sizes; |
699 | uint32_t size; | 703 | uint32_t size; |
700 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
701 | const struct svga3d_surface_desc *desc; | 704 | const struct svga3d_surface_desc *desc; |
702 | 705 | ||
703 | if (unlikely(vmw_user_surface_size == 0)) | 706 | if (unlikely(vmw_user_surface_size == 0)) |
@@ -723,7 +726,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
723 | return -EINVAL; | 726 | return -EINVAL; |
724 | } | 727 | } |
725 | 728 | ||
726 | ret = ttm_read_lock(&vmaster->lock, true); | 729 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
727 | if (unlikely(ret != 0)) | 730 | if (unlikely(ret != 0)) |
728 | return ret; | 731 | return ret; |
729 | 732 | ||
@@ -820,6 +823,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
820 | 823 | ||
821 | user_srf->prime.base.shareable = false; | 824 | user_srf->prime.base.shareable = false; |
822 | user_srf->prime.base.tfile = NULL; | 825 | user_srf->prime.base.tfile = NULL; |
826 | if (drm_is_primary_client(file_priv)) | ||
827 | user_srf->master = drm_master_get(file_priv->master); | ||
823 | 828 | ||
824 | /** | 829 | /** |
825 | * From this point, the generic resource management functions | 830 | * From this point, the generic resource management functions |
@@ -862,7 +867,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
862 | rep->sid = user_srf->prime.base.hash.key; | 867 | rep->sid = user_srf->prime.base.hash.key; |
863 | vmw_resource_unreference(&res); | 868 | vmw_resource_unreference(&res); |
864 | 869 | ||
865 | ttm_read_unlock(&vmaster->lock); | 870 | ttm_read_unlock(&dev_priv->reservation_sem); |
866 | return 0; | 871 | return 0; |
867 | out_no_copy: | 872 | out_no_copy: |
868 | kfree(srf->offsets); | 873 | kfree(srf->offsets); |
@@ -873,7 +878,81 @@ out_no_sizes: | |||
873 | out_no_user_srf: | 878 | out_no_user_srf: |
874 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | 879 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); |
875 | out_unlock: | 880 | out_unlock: |
876 | ttm_read_unlock(&vmaster->lock); | 881 | ttm_read_unlock(&dev_priv->reservation_sem); |
882 | return ret; | ||
883 | } | ||
884 | |||
885 | |||
886 | static int | ||
887 | vmw_surface_handle_reference(struct vmw_private *dev_priv, | ||
888 | struct drm_file *file_priv, | ||
889 | uint32_t u_handle, | ||
890 | enum drm_vmw_handle_type handle_type, | ||
891 | struct ttm_base_object **base_p) | ||
892 | { | ||
893 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
894 | struct vmw_user_surface *user_srf; | ||
895 | uint32_t handle; | ||
896 | struct ttm_base_object *base; | ||
897 | int ret; | ||
898 | |||
899 | if (handle_type == DRM_VMW_HANDLE_PRIME) { | ||
900 | ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle); | ||
901 | if (unlikely(ret != 0)) | ||
902 | return ret; | ||
903 | } else { | ||
904 | if (unlikely(drm_is_render_client(file_priv))) { | ||
905 | DRM_ERROR("Render client refused legacy " | ||
906 | "surface reference.\n"); | ||
907 | return -EACCES; | ||
908 | } | ||
909 | handle = u_handle; | ||
910 | } | ||
911 | |||
912 | ret = -EINVAL; | ||
913 | base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle); | ||
914 | if (unlikely(base == NULL)) { | ||
915 | DRM_ERROR("Could not find surface to reference.\n"); | ||
916 | goto out_no_lookup; | ||
917 | } | ||
918 | |||
919 | if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) { | ||
920 | DRM_ERROR("Referenced object is not a surface.\n"); | ||
921 | goto out_bad_resource; | ||
922 | } | ||
923 | |||
924 | if (handle_type != DRM_VMW_HANDLE_PRIME) { | ||
925 | user_srf = container_of(base, struct vmw_user_surface, | ||
926 | prime.base); | ||
927 | |||
928 | /* | ||
929 | * Make sure the surface creator has the same | ||
930 | * authenticating master. | ||
931 | */ | ||
932 | if (drm_is_primary_client(file_priv) && | ||
933 | user_srf->master != file_priv->master) { | ||
934 | DRM_ERROR("Trying to reference surface outside of" | ||
935 | " master domain.\n"); | ||
936 | ret = -EACCES; | ||
937 | goto out_bad_resource; | ||
938 | } | ||
939 | |||
940 | ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); | ||
941 | if (unlikely(ret != 0)) { | ||
942 | DRM_ERROR("Could not add a reference to a surface.\n"); | ||
943 | goto out_bad_resource; | ||
944 | } | ||
945 | } | ||
946 | |||
947 | *base_p = base; | ||
948 | return 0; | ||
949 | |||
950 | out_bad_resource: | ||
951 | ttm_base_object_unref(&base); | ||
952 | out_no_lookup: | ||
953 | if (handle_type == DRM_VMW_HANDLE_PRIME) | ||
954 | (void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); | ||
955 | |||
877 | return ret; | 956 | return ret; |
878 | } | 957 | } |
879 | 958 | ||
@@ -898,27 +977,16 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | |||
898 | struct vmw_user_surface *user_srf; | 977 | struct vmw_user_surface *user_srf; |
899 | struct drm_vmw_size __user *user_sizes; | 978 | struct drm_vmw_size __user *user_sizes; |
900 | struct ttm_base_object *base; | 979 | struct ttm_base_object *base; |
901 | int ret = -EINVAL; | 980 | int ret; |
902 | |||
903 | base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid); | ||
904 | if (unlikely(base == NULL)) { | ||
905 | DRM_ERROR("Could not find surface to reference.\n"); | ||
906 | return -EINVAL; | ||
907 | } | ||
908 | 981 | ||
909 | if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) | 982 | ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, |
910 | goto out_bad_resource; | 983 | req->handle_type, &base); |
984 | if (unlikely(ret != 0)) | ||
985 | return ret; | ||
911 | 986 | ||
912 | user_srf = container_of(base, struct vmw_user_surface, prime.base); | 987 | user_srf = container_of(base, struct vmw_user_surface, prime.base); |
913 | srf = &user_srf->srf; | 988 | srf = &user_srf->srf; |
914 | 989 | ||
915 | ret = ttm_ref_object_add(tfile, &user_srf->prime.base, | ||
916 | TTM_REF_USAGE, NULL); | ||
917 | if (unlikely(ret != 0)) { | ||
918 | DRM_ERROR("Could not add a reference to a surface.\n"); | ||
919 | goto out_no_reference; | ||
920 | } | ||
921 | |||
922 | rep->flags = srf->flags; | 990 | rep->flags = srf->flags; |
923 | rep->format = srf->format; | 991 | rep->format = srf->format; |
924 | memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); | 992 | memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); |
@@ -931,10 +999,10 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | |||
931 | if (unlikely(ret != 0)) { | 999 | if (unlikely(ret != 0)) { |
932 | DRM_ERROR("copy_to_user failed %p %u\n", | 1000 | DRM_ERROR("copy_to_user failed %p %u\n", |
933 | user_sizes, srf->num_sizes); | 1001 | user_sizes, srf->num_sizes); |
1002 | ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE); | ||
934 | ret = -EFAULT; | 1003 | ret = -EFAULT; |
935 | } | 1004 | } |
936 | out_bad_resource: | 1005 | |
937 | out_no_reference: | ||
938 | ttm_base_object_unref(&base); | 1006 | ttm_base_object_unref(&base); |
939 | 1007 | ||
940 | return ret; | 1008 | return ret; |
@@ -1173,7 +1241,6 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | |||
1173 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 1241 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
1174 | int ret; | 1242 | int ret; |
1175 | uint32_t size; | 1243 | uint32_t size; |
1176 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
1177 | const struct svga3d_surface_desc *desc; | 1244 | const struct svga3d_surface_desc *desc; |
1178 | uint32_t backup_handle; | 1245 | uint32_t backup_handle; |
1179 | 1246 | ||
@@ -1189,7 +1256,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | |||
1189 | return -EINVAL; | 1256 | return -EINVAL; |
1190 | } | 1257 | } |
1191 | 1258 | ||
1192 | ret = ttm_read_lock(&vmaster->lock, true); | 1259 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
1193 | if (unlikely(ret != 0)) | 1260 | if (unlikely(ret != 0)) |
1194 | return ret; | 1261 | return ret; |
1195 | 1262 | ||
@@ -1228,6 +1295,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | |||
1228 | 1295 | ||
1229 | user_srf->prime.base.shareable = false; | 1296 | user_srf->prime.base.shareable = false; |
1230 | user_srf->prime.base.tfile = NULL; | 1297 | user_srf->prime.base.tfile = NULL; |
1298 | if (drm_is_primary_client(file_priv)) | ||
1299 | user_srf->master = drm_master_get(file_priv->master); | ||
1231 | 1300 | ||
1232 | /** | 1301 | /** |
1233 | * From this point, the generic resource management functions | 1302 | * From this point, the generic resource management functions |
@@ -1283,12 +1352,12 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | |||
1283 | 1352 | ||
1284 | vmw_resource_unreference(&res); | 1353 | vmw_resource_unreference(&res); |
1285 | 1354 | ||
1286 | ttm_read_unlock(&vmaster->lock); | 1355 | ttm_read_unlock(&dev_priv->reservation_sem); |
1287 | return 0; | 1356 | return 0; |
1288 | out_no_user_srf: | 1357 | out_no_user_srf: |
1289 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | 1358 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); |
1290 | out_unlock: | 1359 | out_unlock: |
1291 | ttm_read_unlock(&vmaster->lock); | 1360 | ttm_read_unlock(&dev_priv->reservation_sem); |
1292 | return ret; | 1361 | return ret; |
1293 | } | 1362 | } |
1294 | 1363 | ||
@@ -1315,14 +1384,10 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, | |||
1315 | uint32_t backup_handle; | 1384 | uint32_t backup_handle; |
1316 | int ret = -EINVAL; | 1385 | int ret = -EINVAL; |
1317 | 1386 | ||
1318 | base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid); | 1387 | ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, |
1319 | if (unlikely(base == NULL)) { | 1388 | req->handle_type, &base); |
1320 | DRM_ERROR("Could not find surface to reference.\n"); | 1389 | if (unlikely(ret != 0)) |
1321 | return -EINVAL; | 1390 | return ret; |
1322 | } | ||
1323 | |||
1324 | if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) | ||
1325 | goto out_bad_resource; | ||
1326 | 1391 | ||
1327 | user_srf = container_of(base, struct vmw_user_surface, prime.base); | 1392 | user_srf = container_of(base, struct vmw_user_surface, prime.base); |
1328 | srf = &user_srf->srf; | 1393 | srf = &user_srf->srf; |
@@ -1331,13 +1396,6 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, | |||
1331 | goto out_bad_resource; | 1396 | goto out_bad_resource; |
1332 | } | 1397 | } |
1333 | 1398 | ||
1334 | ret = ttm_ref_object_add(tfile, &user_srf->prime.base, | ||
1335 | TTM_REF_USAGE, NULL); | ||
1336 | if (unlikely(ret != 0)) { | ||
1337 | DRM_ERROR("Could not add a reference to a GB surface.\n"); | ||
1338 | goto out_bad_resource; | ||
1339 | } | ||
1340 | |||
1341 | mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ | 1399 | mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ |
1342 | ret = vmw_user_dmabuf_reference(tfile, srf->res.backup, | 1400 | ret = vmw_user_dmabuf_reference(tfile, srf->res.backup, |
1343 | &backup_handle); | 1401 | &backup_handle); |
@@ -1346,8 +1404,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, | |||
1346 | if (unlikely(ret != 0)) { | 1404 | if (unlikely(ret != 0)) { |
1347 | DRM_ERROR("Could not add a reference to a GB surface " | 1405 | DRM_ERROR("Could not add a reference to a GB surface " |
1348 | "backup buffer.\n"); | 1406 | "backup buffer.\n"); |
1349 | (void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | 1407 | (void) ttm_ref_object_base_unref(tfile, base->hash.key, |
1350 | req->sid, | ||
1351 | TTM_REF_USAGE); | 1408 | TTM_REF_USAGE); |
1352 | goto out_bad_resource; | 1409 | goto out_bad_resource; |
1353 | } | 1410 | } |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 2242968e7deb..e97fc998374c 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -405,7 +405,8 @@ struct drm_prime_file_private { | |||
405 | struct drm_file { | 405 | struct drm_file { |
406 | unsigned always_authenticated :1; | 406 | unsigned always_authenticated :1; |
407 | unsigned authenticated :1; | 407 | unsigned authenticated :1; |
408 | unsigned is_master :1; /* this file private is a master for a minor */ | 408 | /* Whether we're master for a minor. Protected by master_mutex */ |
409 | unsigned is_master :1; | ||
409 | /* true when the client has asked us to expose stereo 3D mode flags */ | 410 | /* true when the client has asked us to expose stereo 3D mode flags */ |
410 | unsigned stereo_allowed :1; | 411 | unsigned stereo_allowed :1; |
411 | 412 | ||
@@ -684,29 +685,29 @@ struct drm_gem_object { | |||
684 | 685 | ||
685 | #include <drm/drm_crtc.h> | 686 | #include <drm/drm_crtc.h> |
686 | 687 | ||
687 | /* per-master structure */ | 688 | /** |
689 | * struct drm_master - drm master structure | ||
690 | * | ||
691 | * @refcount: Refcount for this master object. | ||
692 | * @minor: Link back to minor char device we are master for. Immutable. | ||
693 | * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex. | ||
694 | * @unique_len: Length of unique field. Protected by drm_global_mutex. | ||
695 | * @unique_size: Amount allocated. Protected by drm_global_mutex. | ||
696 | * @magiclist: Hash of used authentication tokens. Protected by struct_mutex. | ||
697 | * @magicfree: List of used authentication tokens. Protected by struct_mutex. | ||
698 | * @lock: DRI lock information. | ||
699 | * @driver_priv: Pointer to driver-private information. | ||
700 | */ | ||
688 | struct drm_master { | 701 | struct drm_master { |
689 | 702 | struct kref refcount; | |
690 | struct kref refcount; /* refcount for this master */ | 703 | struct drm_minor *minor; |
691 | 704 | char *unique; | |
692 | struct list_head head; /**< each minor contains a list of masters */ | 705 | int unique_len; |
693 | struct drm_minor *minor; /**< link back to minor we are a master for */ | 706 | int unique_size; |
694 | |||
695 | char *unique; /**< Unique identifier: e.g., busid */ | ||
696 | int unique_len; /**< Length of unique field */ | ||
697 | int unique_size; /**< amount allocated */ | ||
698 | |||
699 | int blocked; /**< Blocked due to VC switch? */ | ||
700 | |||
701 | /** \name Authentication */ | ||
702 | /*@{ */ | ||
703 | struct drm_open_hash magiclist; | 707 | struct drm_open_hash magiclist; |
704 | struct list_head magicfree; | 708 | struct list_head magicfree; |
705 | /*@} */ | 709 | struct drm_lock_data lock; |
706 | 710 | void *driver_priv; | |
707 | struct drm_lock_data lock; /**< Information on hardware lock */ | ||
708 | |||
709 | void *driver_priv; /**< Private structure for driver to use */ | ||
710 | }; | 711 | }; |
711 | 712 | ||
712 | /* Size of ringbuffer for vblank timestamps. Just double-buffer | 713 | /* Size of ringbuffer for vblank timestamps. Just double-buffer |
@@ -1021,8 +1022,8 @@ struct drm_minor { | |||
1021 | struct list_head debugfs_list; | 1022 | struct list_head debugfs_list; |
1022 | struct mutex debugfs_lock; /* Protects debugfs_list. */ | 1023 | struct mutex debugfs_lock; /* Protects debugfs_list. */ |
1023 | 1024 | ||
1024 | struct drm_master *master; /* currently active master for this node */ | 1025 | /* currently active master for this node. Protected by master_mutex */ |
1025 | struct list_head master_list; | 1026 | struct drm_master *master; |
1026 | struct drm_mode_group mode_group; | 1027 | struct drm_mode_group mode_group; |
1027 | }; | 1028 | }; |
1028 | 1029 | ||
@@ -1072,6 +1073,7 @@ struct drm_device { | |||
1072 | /*@{ */ | 1073 | /*@{ */ |
1073 | spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ | 1074 | spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ |
1074 | struct mutex struct_mutex; /**< For others */ | 1075 | struct mutex struct_mutex; /**< For others */ |
1076 | struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */ | ||
1075 | /*@} */ | 1077 | /*@} */ |
1076 | 1078 | ||
1077 | /** \name Usage Counters */ | 1079 | /** \name Usage Counters */ |
@@ -1202,11 +1204,21 @@ static inline bool drm_modeset_is_locked(struct drm_device *dev) | |||
1202 | return mutex_is_locked(&dev->mode_config.mutex); | 1204 | return mutex_is_locked(&dev->mode_config.mutex); |
1203 | } | 1205 | } |
1204 | 1206 | ||
1205 | static inline bool drm_is_render_client(struct drm_file *file_priv) | 1207 | static inline bool drm_is_render_client(const struct drm_file *file_priv) |
1206 | { | 1208 | { |
1207 | return file_priv->minor->type == DRM_MINOR_RENDER; | 1209 | return file_priv->minor->type == DRM_MINOR_RENDER; |
1208 | } | 1210 | } |
1209 | 1211 | ||
1212 | static inline bool drm_is_control_client(const struct drm_file *file_priv) | ||
1213 | { | ||
1214 | return file_priv->minor->type == DRM_MINOR_CONTROL; | ||
1215 | } | ||
1216 | |||
1217 | static inline bool drm_is_primary_client(const struct drm_file *file_priv) | ||
1218 | { | ||
1219 | return file_priv->minor->type == DRM_MINOR_LEGACY; | ||
1220 | } | ||
1221 | |||
1210 | /******************************************************************/ | 1222 | /******************************************************************/ |
1211 | /** \name Internal function definitions */ | 1223 | /** \name Internal function definitions */ |
1212 | /*@{*/ | 1224 | /*@{*/ |
@@ -1217,6 +1229,7 @@ extern long drm_ioctl(struct file *filp, | |||
1217 | extern long drm_compat_ioctl(struct file *filp, | 1229 | extern long drm_compat_ioctl(struct file *filp, |
1218 | unsigned int cmd, unsigned long arg); | 1230 | unsigned int cmd, unsigned long arg); |
1219 | extern int drm_lastclose(struct drm_device *dev); | 1231 | extern int drm_lastclose(struct drm_device *dev); |
1232 | extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags); | ||
1220 | 1233 | ||
1221 | /* Device support (drm_fops.h) */ | 1234 | /* Device support (drm_fops.h) */ |
1222 | extern struct mutex drm_global_mutex; | 1235 | extern struct mutex drm_global_mutex; |
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h index 0097cc03034e..ed953f98f0e1 100644 --- a/include/drm/ttm/ttm_object.h +++ b/include/drm/ttm/ttm_object.h | |||
@@ -244,6 +244,10 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base); | |||
244 | extern int ttm_ref_object_add(struct ttm_object_file *tfile, | 244 | extern int ttm_ref_object_add(struct ttm_object_file *tfile, |
245 | struct ttm_base_object *base, | 245 | struct ttm_base_object *base, |
246 | enum ttm_ref_type ref_type, bool *existed); | 246 | enum ttm_ref_type ref_type, bool *existed); |
247 | |||
248 | extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, | ||
249 | struct ttm_base_object *base); | ||
250 | |||
247 | /** | 251 | /** |
248 | * ttm_ref_object_base_unref | 252 | * ttm_ref_object_base_unref |
249 | * | 253 | * |
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index 87792a5fee3b..4fc66f6b12ce 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h | |||
@@ -90,6 +90,15 @@ | |||
90 | #define DRM_VMW_PARAM_MAX_MOB_SIZE 10 | 90 | #define DRM_VMW_PARAM_MAX_MOB_SIZE 10 |
91 | 91 | ||
92 | /** | 92 | /** |
93 | * enum drm_vmw_handle_type - handle type for ref ioctls | ||
94 | * | ||
95 | */ | ||
96 | enum drm_vmw_handle_type { | ||
97 | DRM_VMW_HANDLE_LEGACY = 0, | ||
98 | DRM_VMW_HANDLE_PRIME = 1 | ||
99 | }; | ||
100 | |||
101 | /** | ||
93 | * struct drm_vmw_getparam_arg | 102 | * struct drm_vmw_getparam_arg |
94 | * | 103 | * |
95 | * @value: Returned value. //Out | 104 | * @value: Returned value. //Out |
@@ -177,6 +186,7 @@ struct drm_vmw_surface_create_req { | |||
177 | * struct drm_wmv_surface_arg | 186 | * struct drm_wmv_surface_arg |
178 | * | 187 | * |
179 | * @sid: Surface id of created surface or surface to destroy or reference. | 188 | * @sid: Surface id of created surface or surface to destroy or reference. |
189 | * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl. | ||
180 | * | 190 | * |
181 | * Output data from the DRM_VMW_CREATE_SURFACE Ioctl. | 191 | * Output data from the DRM_VMW_CREATE_SURFACE Ioctl. |
182 | * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. | 192 | * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. |
@@ -185,7 +195,7 @@ struct drm_vmw_surface_create_req { | |||
185 | 195 | ||
186 | struct drm_vmw_surface_arg { | 196 | struct drm_vmw_surface_arg { |
187 | int32_t sid; | 197 | int32_t sid; |
188 | uint32_t pad64; | 198 | enum drm_vmw_handle_type handle_type; |
189 | }; | 199 | }; |
190 | 200 | ||
191 | /** | 201 | /** |