aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-02-27 20:04:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-27 22:10:15 -0500
commit2e928815c1886fe628ed54623aa98d0889cf5509 (patch)
tree50327f66bfd641e3c809db6c69af59956c8554df /drivers/gpu/drm
parent62f516b8d6b0610c257b4f92264e00a8dee77a0b (diff)
drm: convert to idr_alloc()
Convert to the much saner new idr interface. * drm_ctxbitmap_next() error handling in drm_addctx() seems broken. drm_ctxbitmap_next() return -errno on failure not -1. [artem.savkov@gmail.com: missing idr_preload_end in drm_gem_flink_ioctl] [jslaby@suse.cz: fix drm_gem_flink_ioctl() return value] Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: David Airlie <airlied@linux.ie> Signed-off-by: Artem Savkov <artem.savkov@gmail.com> Signed-off-by: Jiri Slaby <jslaby@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_context.c17
-rw-r--r--drivers/gpu/drm/drm_crtc.c19
-rw-r--r--drivers/gpu/drm/drm_gem.c38
-rw-r--r--drivers/gpu/drm/drm_stub.c19
4 files changed, 25 insertions, 68 deletions
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 75f62c5e2a6b..725968d38976 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -74,24 +74,13 @@ void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
74 */ 74 */
75static int drm_ctxbitmap_next(struct drm_device * dev) 75static int drm_ctxbitmap_next(struct drm_device * dev)
76{ 76{
77 int new_id;
78 int ret; 77 int ret;
79 78
80again:
81 if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
82 DRM_ERROR("Out of memory expanding drawable idr\n");
83 return -ENOMEM;
84 }
85 mutex_lock(&dev->struct_mutex); 79 mutex_lock(&dev->struct_mutex);
86 ret = idr_get_new_above(&dev->ctx_idr, NULL, 80 ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0,
87 DRM_RESERVED_CONTEXTS, &new_id); 81 GFP_KERNEL);
88 mutex_unlock(&dev->struct_mutex); 82 mutex_unlock(&dev->struct_mutex);
89 if (ret == -EAGAIN) 83 return ret;
90 goto again;
91 else if (ret)
92 return ret;
93
94 return new_id;
95} 84}
96 85
97/** 86/**
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 99928b933b16..792c3e3795ca 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -266,32 +266,21 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
266static int drm_mode_object_get(struct drm_device *dev, 266static int drm_mode_object_get(struct drm_device *dev,
267 struct drm_mode_object *obj, uint32_t obj_type) 267 struct drm_mode_object *obj, uint32_t obj_type)
268{ 268{
269 int new_id = 0;
270 int ret; 269 int ret;
271 270
272again:
273 if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
274 DRM_ERROR("Ran out memory getting a mode number\n");
275 return -ENOMEM;
276 }
277
278 mutex_lock(&dev->mode_config.idr_mutex); 271 mutex_lock(&dev->mode_config.idr_mutex);
279 ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id); 272 ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL);
280 273 if (ret >= 0) {
281 if (!ret) {
282 /* 274 /*
283 * Set up the object linking under the protection of the idr 275 * Set up the object linking under the protection of the idr
284 * lock so that other users can't see inconsistent state. 276 * lock so that other users can't see inconsistent state.
285 */ 277 */
286 obj->id = new_id; 278 obj->id = ret;
287 obj->type = obj_type; 279 obj->type = obj_type;
288 } 280 }
289 mutex_unlock(&dev->mode_config.idr_mutex); 281 mutex_unlock(&dev->mode_config.idr_mutex);
290 282
291 if (ret == -EAGAIN) 283 return ret < 0 ? ret : 0;
292 goto again;
293
294 return ret;
295} 284}
296 285
297/** 286/**
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index e775859f0a83..af779ae19ebf 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -270,21 +270,19 @@ drm_gem_handle_create(struct drm_file *file_priv,
270 int ret; 270 int ret;
271 271
272 /* 272 /*
273 * Get the user-visible handle using idr. 273 * Get the user-visible handle using idr. Preload and perform
274 * allocation under our spinlock.
274 */ 275 */
275again: 276 idr_preload(GFP_KERNEL);
276 /* ensure there is space available to allocate a handle */
277 if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
278 return -ENOMEM;
279
280 /* do the allocation under our spinlock */
281 spin_lock(&file_priv->table_lock); 277 spin_lock(&file_priv->table_lock);
282 ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep); 278
279 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
280
283 spin_unlock(&file_priv->table_lock); 281 spin_unlock(&file_priv->table_lock);
284 if (ret == -EAGAIN) 282 idr_preload_end();
285 goto again; 283 if (ret < 0)
286 else if (ret)
287 return ret; 284 return ret;
285 *handlep = ret;
288 286
289 drm_gem_object_handle_reference(obj); 287 drm_gem_object_handle_reference(obj);
290 288
@@ -451,29 +449,25 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
451 if (obj == NULL) 449 if (obj == NULL)
452 return -ENOENT; 450 return -ENOENT;
453 451
454again: 452 idr_preload(GFP_KERNEL);
455 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
456 ret = -ENOMEM;
457 goto err;
458 }
459
460 spin_lock(&dev->object_name_lock); 453 spin_lock(&dev->object_name_lock);
461 if (!obj->name) { 454 if (!obj->name) {
462 ret = idr_get_new_above(&dev->object_name_idr, obj, 1, 455 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
463 &obj->name); 456 obj->name = ret;
464 args->name = (uint64_t) obj->name; 457 args->name = (uint64_t) obj->name;
465 spin_unlock(&dev->object_name_lock); 458 spin_unlock(&dev->object_name_lock);
459 idr_preload_end();
466 460
467 if (ret == -EAGAIN) 461 if (ret < 0)
468 goto again;
469 else if (ret)
470 goto err; 462 goto err;
463 ret = 0;
471 464
472 /* Allocate a reference for the name table. */ 465 /* Allocate a reference for the name table. */
473 drm_gem_object_reference(obj); 466 drm_gem_object_reference(obj);
474 } else { 467 } else {
475 args->name = (uint64_t) obj->name; 468 args->name = (uint64_t) obj->name;
476 spin_unlock(&dev->object_name_lock); 469 spin_unlock(&dev->object_name_lock);
470 idr_preload_end();
477 ret = 0; 471 ret = 0;
478 } 472 }
479 473
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 200e104f1fa0..7d30802a018f 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -109,7 +109,6 @@ EXPORT_SYMBOL(drm_ut_debug_printk);
109 109
110static int drm_minor_get_id(struct drm_device *dev, int type) 110static int drm_minor_get_id(struct drm_device *dev, int type)
111{ 111{
112 int new_id;
113 int ret; 112 int ret;
114 int base = 0, limit = 63; 113 int base = 0, limit = 63;
115 114
@@ -121,25 +120,11 @@ static int drm_minor_get_id(struct drm_device *dev, int type)
121 limit = base + 255; 120 limit = base + 255;
122 } 121 }
123 122
124again:
125 if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
126 DRM_ERROR("Out of memory expanding drawable idr\n");
127 return -ENOMEM;
128 }
129 mutex_lock(&dev->struct_mutex); 123 mutex_lock(&dev->struct_mutex);
130 ret = idr_get_new_above(&drm_minors_idr, NULL, 124 ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL);
131 base, &new_id);
132 mutex_unlock(&dev->struct_mutex); 125 mutex_unlock(&dev->struct_mutex);
133 if (ret == -EAGAIN)
134 goto again;
135 else if (ret)
136 return ret;
137 126
138 if (new_id >= limit) { 127 return ret == -ENOSPC ? -EINVAL : ret;
139 idr_remove(&drm_minors_idr, new_id);
140 return -EINVAL;
141 }
142 return new_id;
143} 128}
144 129
145struct drm_master *drm_master_create(struct drm_minor *minor) 130struct drm_master *drm_master_create(struct drm_minor *minor)