diff options
Diffstat (limited to 'drivers/char/drm/drm_context.c')
-rw-r--r-- | drivers/char/drm/drm_context.c | 304 |
1 files changed, 153 insertions, 151 deletions
diff --git a/drivers/char/drm/drm_context.c b/drivers/char/drm/drm_context.c index 502892794c16..6380127be281 100644 --- a/drivers/char/drm/drm_context.c +++ b/drivers/char/drm/drm_context.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /** | 1 | /** |
2 | * \file drm_context.h | 2 | * \file drm_context.c |
3 | * IOCTLs for generic contexts | 3 | * IOCTLs for generic contexts |
4 | * | 4 | * |
5 | * \author Rickard E. (Rik) Faith <faith@valinux.com> | 5 | * \author Rickard E. (Rik) Faith <faith@valinux.com> |
6 | * \author Gareth Hughes <gareth@valinux.com> | 6 | * \author Gareth Hughes <gareth@valinux.com> |
7 | */ | 7 | */ |
@@ -56,25 +56,26 @@ | |||
56 | * in drm_device::context_sareas, while holding the drm_device::struct_sem | 56 | * in drm_device::context_sareas, while holding the drm_device::struct_sem |
57 | * lock. | 57 | * lock. |
58 | */ | 58 | */ |
59 | void drm_ctxbitmap_free( drm_device_t *dev, int ctx_handle ) | 59 | void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle) |
60 | { | 60 | { |
61 | if ( ctx_handle < 0 ) goto failed; | 61 | if (ctx_handle < 0) |
62 | if ( !dev->ctx_bitmap ) goto failed; | 62 | goto failed; |
63 | if (!dev->ctx_bitmap) | ||
64 | goto failed; | ||
63 | 65 | ||
64 | if ( ctx_handle < DRM_MAX_CTXBITMAP ) { | 66 | if (ctx_handle < DRM_MAX_CTXBITMAP) { |
65 | down(&dev->struct_sem); | 67 | down(&dev->struct_sem); |
66 | clear_bit( ctx_handle, dev->ctx_bitmap ); | 68 | clear_bit(ctx_handle, dev->ctx_bitmap); |
67 | dev->context_sareas[ctx_handle] = NULL; | 69 | dev->context_sareas[ctx_handle] = NULL; |
68 | up(&dev->struct_sem); | 70 | up(&dev->struct_sem); |
69 | return; | 71 | return; |
70 | } | 72 | } |
71 | failed: | 73 | failed: |
72 | DRM_ERROR( "Attempt to free invalid context handle: %d\n", | 74 | DRM_ERROR("Attempt to free invalid context handle: %d\n", ctx_handle); |
73 | ctx_handle ); | 75 | return; |
74 | return; | ||
75 | } | 76 | } |
76 | 77 | ||
77 | /** | 78 | /** |
78 | * Context bitmap allocation. | 79 | * Context bitmap allocation. |
79 | * | 80 | * |
80 | * \param dev DRM device. | 81 | * \param dev DRM device. |
@@ -84,29 +85,33 @@ failed: | |||
84 | * drm_device::context_sareas to accommodate the new entry while holding the | 85 | * drm_device::context_sareas to accommodate the new entry while holding the |
85 | * drm_device::struct_sem lock. | 86 | * drm_device::struct_sem lock. |
86 | */ | 87 | */ |
87 | static int drm_ctxbitmap_next( drm_device_t *dev ) | 88 | static int drm_ctxbitmap_next(drm_device_t * dev) |
88 | { | 89 | { |
89 | int bit; | 90 | int bit; |
90 | 91 | ||
91 | if(!dev->ctx_bitmap) return -1; | 92 | if (!dev->ctx_bitmap) |
93 | return -1; | ||
92 | 94 | ||
93 | down(&dev->struct_sem); | 95 | down(&dev->struct_sem); |
94 | bit = find_first_zero_bit( dev->ctx_bitmap, DRM_MAX_CTXBITMAP ); | 96 | bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP); |
95 | if ( bit < DRM_MAX_CTXBITMAP ) { | 97 | if (bit < DRM_MAX_CTXBITMAP) { |
96 | set_bit( bit, dev->ctx_bitmap ); | 98 | set_bit(bit, dev->ctx_bitmap); |
97 | DRM_DEBUG( "drm_ctxbitmap_next bit : %d\n", bit ); | 99 | DRM_DEBUG("drm_ctxbitmap_next bit : %d\n", bit); |
98 | if((bit+1) > dev->max_context) { | 100 | if ((bit + 1) > dev->max_context) { |
99 | dev->max_context = (bit+1); | 101 | dev->max_context = (bit + 1); |
100 | if(dev->context_sareas) { | 102 | if (dev->context_sareas) { |
101 | drm_map_t **ctx_sareas; | 103 | drm_map_t **ctx_sareas; |
102 | 104 | ||
103 | ctx_sareas = drm_realloc(dev->context_sareas, | 105 | ctx_sareas = drm_realloc(dev->context_sareas, |
104 | (dev->max_context - 1) * | 106 | (dev->max_context - |
105 | sizeof(*dev->context_sareas), | 107 | 1) * |
106 | dev->max_context * | 108 | sizeof(*dev-> |
107 | sizeof(*dev->context_sareas), | 109 | context_sareas), |
108 | DRM_MEM_MAPS); | 110 | dev->max_context * |
109 | if(!ctx_sareas) { | 111 | sizeof(*dev-> |
112 | context_sareas), | ||
113 | DRM_MEM_MAPS); | ||
114 | if (!ctx_sareas) { | ||
110 | clear_bit(bit, dev->ctx_bitmap); | 115 | clear_bit(bit, dev->ctx_bitmap); |
111 | up(&dev->struct_sem); | 116 | up(&dev->struct_sem); |
112 | return -1; | 117 | return -1; |
@@ -115,11 +120,11 @@ static int drm_ctxbitmap_next( drm_device_t *dev ) | |||
115 | dev->context_sareas[bit] = NULL; | 120 | dev->context_sareas[bit] = NULL; |
116 | } else { | 121 | } else { |
117 | /* max_context == 1 at this point */ | 122 | /* max_context == 1 at this point */ |
118 | dev->context_sareas = drm_alloc( | 123 | dev->context_sareas = |
119 | dev->max_context * | 124 | drm_alloc(dev->max_context * |
120 | sizeof(*dev->context_sareas), | 125 | sizeof(*dev->context_sareas), |
121 | DRM_MEM_MAPS); | 126 | DRM_MEM_MAPS); |
122 | if(!dev->context_sareas) { | 127 | if (!dev->context_sareas) { |
123 | clear_bit(bit, dev->ctx_bitmap); | 128 | clear_bit(bit, dev->ctx_bitmap); |
124 | up(&dev->struct_sem); | 129 | up(&dev->struct_sem); |
125 | return -1; | 130 | return -1; |
@@ -142,26 +147,26 @@ static int drm_ctxbitmap_next( drm_device_t *dev ) | |||
142 | * Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding | 147 | * Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding |
143 | * the drm_device::struct_sem lock. | 148 | * the drm_device::struct_sem lock. |
144 | */ | 149 | */ |
145 | int drm_ctxbitmap_init( drm_device_t *dev ) | 150 | int drm_ctxbitmap_init(drm_device_t * dev) |
146 | { | 151 | { |
147 | int i; | 152 | int i; |
148 | int temp; | 153 | int temp; |
149 | 154 | ||
150 | down(&dev->struct_sem); | 155 | down(&dev->struct_sem); |
151 | dev->ctx_bitmap = (unsigned long *) drm_alloc( PAGE_SIZE, | 156 | dev->ctx_bitmap = (unsigned long *)drm_alloc(PAGE_SIZE, |
152 | DRM_MEM_CTXBITMAP ); | 157 | DRM_MEM_CTXBITMAP); |
153 | if ( dev->ctx_bitmap == NULL ) { | 158 | if (dev->ctx_bitmap == NULL) { |
154 | up(&dev->struct_sem); | 159 | up(&dev->struct_sem); |
155 | return -ENOMEM; | 160 | return -ENOMEM; |
156 | } | 161 | } |
157 | memset( (void *)dev->ctx_bitmap, 0, PAGE_SIZE ); | 162 | memset((void *)dev->ctx_bitmap, 0, PAGE_SIZE); |
158 | dev->context_sareas = NULL; | 163 | dev->context_sareas = NULL; |
159 | dev->max_context = -1; | 164 | dev->max_context = -1; |
160 | up(&dev->struct_sem); | 165 | up(&dev->struct_sem); |
161 | 166 | ||
162 | for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) { | 167 | for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { |
163 | temp = drm_ctxbitmap_next( dev ); | 168 | temp = drm_ctxbitmap_next(dev); |
164 | DRM_DEBUG( "drm_ctxbitmap_init : %d\n", temp ); | 169 | DRM_DEBUG("drm_ctxbitmap_init : %d\n", temp); |
165 | } | 170 | } |
166 | 171 | ||
167 | return 0; | 172 | return 0; |
@@ -175,14 +180,14 @@ int drm_ctxbitmap_init( drm_device_t *dev ) | |||
175 | * Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding | 180 | * Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding |
176 | * the drm_device::struct_sem lock. | 181 | * the drm_device::struct_sem lock. |
177 | */ | 182 | */ |
178 | void drm_ctxbitmap_cleanup( drm_device_t *dev ) | 183 | void drm_ctxbitmap_cleanup(drm_device_t * dev) |
179 | { | 184 | { |
180 | down(&dev->struct_sem); | 185 | down(&dev->struct_sem); |
181 | if( dev->context_sareas ) drm_free( dev->context_sareas, | 186 | if (dev->context_sareas) |
182 | sizeof(*dev->context_sareas) * | 187 | drm_free(dev->context_sareas, |
183 | dev->max_context, | 188 | sizeof(*dev->context_sareas) * |
184 | DRM_MEM_MAPS ); | 189 | dev->max_context, DRM_MEM_MAPS); |
185 | drm_free( (void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP ); | 190 | drm_free((void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP); |
186 | up(&dev->struct_sem); | 191 | up(&dev->struct_sem); |
187 | } | 192 | } |
188 | 193 | ||
@@ -194,7 +199,7 @@ void drm_ctxbitmap_cleanup( drm_device_t *dev ) | |||
194 | 199 | ||
195 | /** | 200 | /** |
196 | * Get per-context SAREA. | 201 | * Get per-context SAREA. |
197 | * | 202 | * |
198 | * \param inode device inode. | 203 | * \param inode device inode. |
199 | * \param filp file pointer. | 204 | * \param filp file pointer. |
200 | * \param cmd command. | 205 | * \param cmd command. |
@@ -205,10 +210,10 @@ void drm_ctxbitmap_cleanup( drm_device_t *dev ) | |||
205 | * returns its handle. | 210 | * returns its handle. |
206 | */ | 211 | */ |
207 | int drm_getsareactx(struct inode *inode, struct file *filp, | 212 | int drm_getsareactx(struct inode *inode, struct file *filp, |
208 | unsigned int cmd, unsigned long arg) | 213 | unsigned int cmd, unsigned long arg) |
209 | { | 214 | { |
210 | drm_file_t *priv = filp->private_data; | 215 | drm_file_t *priv = filp->private_data; |
211 | drm_device_t *dev = priv->head->dev; | 216 | drm_device_t *dev = priv->head->dev; |
212 | drm_ctx_priv_map_t __user *argp = (void __user *)arg; | 217 | drm_ctx_priv_map_t __user *argp = (void __user *)arg; |
213 | drm_ctx_priv_map_t request; | 218 | drm_ctx_priv_map_t request; |
214 | drm_map_t *map; | 219 | drm_map_t *map; |
@@ -218,7 +223,8 @@ int drm_getsareactx(struct inode *inode, struct file *filp, | |||
218 | return -EFAULT; | 223 | return -EFAULT; |
219 | 224 | ||
220 | down(&dev->struct_sem); | 225 | down(&dev->struct_sem); |
221 | if (dev->max_context < 0 || request.ctx_id >= (unsigned) dev->max_context) { | 226 | if (dev->max_context < 0 |
227 | || request.ctx_id >= (unsigned)dev->max_context) { | ||
222 | up(&dev->struct_sem); | 228 | up(&dev->struct_sem); |
223 | return -EINVAL; | 229 | return -EINVAL; |
224 | } | 230 | } |
@@ -227,16 +233,16 @@ int drm_getsareactx(struct inode *inode, struct file *filp, | |||
227 | up(&dev->struct_sem); | 233 | up(&dev->struct_sem); |
228 | 234 | ||
229 | request.handle = 0; | 235 | request.handle = 0; |
230 | list_for_each_entry(_entry, &dev->maplist->head,head) { | 236 | list_for_each_entry(_entry, &dev->maplist->head, head) { |
231 | if (_entry->map == map) { | 237 | if (_entry->map == map) { |
232 | request.handle = (void *)(unsigned long)_entry->user_token; | 238 | request.handle = |
239 | (void *)(unsigned long)_entry->user_token; | ||
233 | break; | 240 | break; |
234 | } | 241 | } |
235 | } | 242 | } |
236 | if (request.handle == 0) | 243 | if (request.handle == 0) |
237 | return -EINVAL; | 244 | return -EINVAL; |
238 | 245 | ||
239 | |||
240 | if (copy_to_user(argp, &request, sizeof(request))) | 246 | if (copy_to_user(argp, &request, sizeof(request))) |
241 | return -EFAULT; | 247 | return -EFAULT; |
242 | return 0; | 248 | return 0; |
@@ -244,7 +250,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp, | |||
244 | 250 | ||
245 | /** | 251 | /** |
246 | * Set per-context SAREA. | 252 | * Set per-context SAREA. |
247 | * | 253 | * |
248 | * \param inode device inode. | 254 | * \param inode device inode. |
249 | * \param filp file pointer. | 255 | * \param filp file pointer. |
250 | * \param cmd command. | 256 | * \param cmd command. |
@@ -255,37 +261,37 @@ int drm_getsareactx(struct inode *inode, struct file *filp, | |||
255 | * drm_device::context_sareas with it. | 261 | * drm_device::context_sareas with it. |
256 | */ | 262 | */ |
257 | int drm_setsareactx(struct inode *inode, struct file *filp, | 263 | int drm_setsareactx(struct inode *inode, struct file *filp, |
258 | unsigned int cmd, unsigned long arg) | 264 | unsigned int cmd, unsigned long arg) |
259 | { | 265 | { |
260 | drm_file_t *priv = filp->private_data; | 266 | drm_file_t *priv = filp->private_data; |
261 | drm_device_t *dev = priv->head->dev; | 267 | drm_device_t *dev = priv->head->dev; |
262 | drm_ctx_priv_map_t request; | 268 | drm_ctx_priv_map_t request; |
263 | drm_map_t *map = NULL; | 269 | drm_map_t *map = NULL; |
264 | drm_map_list_t *r_list = NULL; | 270 | drm_map_list_t *r_list = NULL; |
265 | struct list_head *list; | 271 | struct list_head *list; |
266 | 272 | ||
267 | if (copy_from_user(&request, | 273 | if (copy_from_user(&request, |
268 | (drm_ctx_priv_map_t __user *)arg, | 274 | (drm_ctx_priv_map_t __user *) arg, sizeof(request))) |
269 | sizeof(request))) | ||
270 | return -EFAULT; | 275 | return -EFAULT; |
271 | 276 | ||
272 | down(&dev->struct_sem); | 277 | down(&dev->struct_sem); |
273 | list_for_each(list, &dev->maplist->head) { | 278 | list_for_each(list, &dev->maplist->head) { |
274 | r_list = list_entry(list, drm_map_list_t, head); | 279 | r_list = list_entry(list, drm_map_list_t, head); |
275 | if (r_list->map | 280 | if (r_list->map |
276 | && r_list->user_token == (unsigned long) request.handle) | 281 | && r_list->user_token == (unsigned long)request.handle) |
277 | goto found; | 282 | goto found; |
278 | } | 283 | } |
279 | bad: | 284 | bad: |
280 | up(&dev->struct_sem); | 285 | up(&dev->struct_sem); |
281 | return -EINVAL; | 286 | return -EINVAL; |
282 | 287 | ||
283 | found: | 288 | found: |
284 | map = r_list->map; | 289 | map = r_list->map; |
285 | if (!map) goto bad; | 290 | if (!map) |
291 | goto bad; | ||
286 | if (dev->max_context < 0) | 292 | if (dev->max_context < 0) |
287 | goto bad; | 293 | goto bad; |
288 | if (request.ctx_id >= (unsigned) dev->max_context) | 294 | if (request.ctx_id >= (unsigned)dev->max_context) |
289 | goto bad; | 295 | goto bad; |
290 | dev->context_sareas[request.ctx_id] = map; | 296 | dev->context_sareas[request.ctx_id] = map; |
291 | up(&dev->struct_sem); | 297 | up(&dev->struct_sem); |
@@ -308,22 +314,21 @@ found: | |||
308 | * | 314 | * |
309 | * Attempt to set drm_device::context_flag. | 315 | * Attempt to set drm_device::context_flag. |
310 | */ | 316 | */ |
311 | static int drm_context_switch( drm_device_t *dev, int old, int new ) | 317 | static int drm_context_switch(drm_device_t * dev, int old, int new) |
312 | { | 318 | { |
313 | if ( test_and_set_bit( 0, &dev->context_flag ) ) { | 319 | if (test_and_set_bit(0, &dev->context_flag)) { |
314 | DRM_ERROR( "Reentering -- FIXME\n" ); | 320 | DRM_ERROR("Reentering -- FIXME\n"); |
315 | return -EBUSY; | 321 | return -EBUSY; |
316 | } | 322 | } |
317 | |||
318 | 323 | ||
319 | DRM_DEBUG( "Context switch from %d to %d\n", old, new ); | 324 | DRM_DEBUG("Context switch from %d to %d\n", old, new); |
320 | 325 | ||
321 | if ( new == dev->last_context ) { | 326 | if (new == dev->last_context) { |
322 | clear_bit( 0, &dev->context_flag ); | 327 | clear_bit(0, &dev->context_flag); |
323 | return 0; | 328 | return 0; |
324 | } | 329 | } |
325 | 330 | ||
326 | return 0; | 331 | return 0; |
327 | } | 332 | } |
328 | 333 | ||
329 | /** | 334 | /** |
@@ -337,22 +342,22 @@ static int drm_context_switch( drm_device_t *dev, int old, int new ) | |||
337 | * hardware lock is held, clears the drm_device::context_flag and wakes up | 342 | * hardware lock is held, clears the drm_device::context_flag and wakes up |
338 | * drm_device::context_wait. | 343 | * drm_device::context_wait. |
339 | */ | 344 | */ |
340 | static int drm_context_switch_complete( drm_device_t *dev, int new ) | 345 | static int drm_context_switch_complete(drm_device_t * dev, int new) |
341 | { | 346 | { |
342 | dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ | 347 | dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ |
343 | dev->last_switch = jiffies; | 348 | dev->last_switch = jiffies; |
344 | 349 | ||
345 | if ( !_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ) { | 350 | if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { |
346 | DRM_ERROR( "Lock isn't held after context switch\n" ); | 351 | DRM_ERROR("Lock isn't held after context switch\n"); |
347 | } | 352 | } |
348 | 353 | ||
349 | /* If a context switch is ever initiated | 354 | /* If a context switch is ever initiated |
350 | when the kernel holds the lock, release | 355 | when the kernel holds the lock, release |
351 | that lock here. */ | 356 | that lock here. */ |
352 | clear_bit( 0, &dev->context_flag ); | 357 | clear_bit(0, &dev->context_flag); |
353 | wake_up( &dev->context_wait ); | 358 | wake_up(&dev->context_wait); |
354 | 359 | ||
355 | return 0; | 360 | return 0; |
356 | } | 361 | } |
357 | 362 | ||
358 | /** | 363 | /** |
@@ -364,29 +369,28 @@ static int drm_context_switch_complete( drm_device_t *dev, int new ) | |||
364 | * \param arg user argument pointing to a drm_ctx_res structure. | 369 | * \param arg user argument pointing to a drm_ctx_res structure. |
365 | * \return zero on success or a negative number on failure. | 370 | * \return zero on success or a negative number on failure. |
366 | */ | 371 | */ |
367 | int drm_resctx( struct inode *inode, struct file *filp, | 372 | int drm_resctx(struct inode *inode, struct file *filp, |
368 | unsigned int cmd, unsigned long arg ) | 373 | unsigned int cmd, unsigned long arg) |
369 | { | 374 | { |
370 | drm_ctx_res_t res; | 375 | drm_ctx_res_t res; |
371 | drm_ctx_t __user *argp = (void __user *)arg; | 376 | drm_ctx_t __user *argp = (void __user *)arg; |
372 | drm_ctx_t ctx; | 377 | drm_ctx_t ctx; |
373 | int i; | 378 | int i; |
374 | 379 | ||
375 | if ( copy_from_user( &res, argp, sizeof(res) ) ) | 380 | if (copy_from_user(&res, argp, sizeof(res))) |
376 | return -EFAULT; | 381 | return -EFAULT; |
377 | 382 | ||
378 | if ( res.count >= DRM_RESERVED_CONTEXTS ) { | 383 | if (res.count >= DRM_RESERVED_CONTEXTS) { |
379 | memset( &ctx, 0, sizeof(ctx) ); | 384 | memset(&ctx, 0, sizeof(ctx)); |
380 | for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) { | 385 | for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { |
381 | ctx.handle = i; | 386 | ctx.handle = i; |
382 | if ( copy_to_user( &res.contexts[i], | 387 | if (copy_to_user(&res.contexts[i], &ctx, sizeof(ctx))) |
383 | &ctx, sizeof(ctx) ) ) | ||
384 | return -EFAULT; | 388 | return -EFAULT; |
385 | } | 389 | } |
386 | } | 390 | } |
387 | res.count = DRM_RESERVED_CONTEXTS; | 391 | res.count = DRM_RESERVED_CONTEXTS; |
388 | 392 | ||
389 | if ( copy_to_user( argp, &res, sizeof(res) ) ) | 393 | if (copy_to_user(argp, &res, sizeof(res))) |
390 | return -EFAULT; | 394 | return -EFAULT; |
391 | return 0; | 395 | return 0; |
392 | } | 396 | } |
@@ -402,58 +406,57 @@ int drm_resctx( struct inode *inode, struct file *filp, | |||
402 | * | 406 | * |
403 | * Get a new handle for the context and copy to userspace. | 407 | * Get a new handle for the context and copy to userspace. |
404 | */ | 408 | */ |
405 | int drm_addctx( struct inode *inode, struct file *filp, | 409 | int drm_addctx(struct inode *inode, struct file *filp, |
406 | unsigned int cmd, unsigned long arg ) | 410 | unsigned int cmd, unsigned long arg) |
407 | { | 411 | { |
408 | drm_file_t *priv = filp->private_data; | 412 | drm_file_t *priv = filp->private_data; |
409 | drm_device_t *dev = priv->head->dev; | 413 | drm_device_t *dev = priv->head->dev; |
410 | drm_ctx_list_t * ctx_entry; | 414 | drm_ctx_list_t *ctx_entry; |
411 | drm_ctx_t __user *argp = (void __user *)arg; | 415 | drm_ctx_t __user *argp = (void __user *)arg; |
412 | drm_ctx_t ctx; | 416 | drm_ctx_t ctx; |
413 | 417 | ||
414 | if ( copy_from_user( &ctx, argp, sizeof(ctx) ) ) | 418 | if (copy_from_user(&ctx, argp, sizeof(ctx))) |
415 | return -EFAULT; | 419 | return -EFAULT; |
416 | 420 | ||
417 | ctx.handle = drm_ctxbitmap_next( dev ); | 421 | ctx.handle = drm_ctxbitmap_next(dev); |
418 | if ( ctx.handle == DRM_KERNEL_CONTEXT ) { | 422 | if (ctx.handle == DRM_KERNEL_CONTEXT) { |
419 | /* Skip kernel's context and get a new one. */ | 423 | /* Skip kernel's context and get a new one. */ |
420 | ctx.handle = drm_ctxbitmap_next( dev ); | 424 | ctx.handle = drm_ctxbitmap_next(dev); |
421 | } | 425 | } |
422 | DRM_DEBUG( "%d\n", ctx.handle ); | 426 | DRM_DEBUG("%d\n", ctx.handle); |
423 | if ( ctx.handle == -1 ) { | 427 | if (ctx.handle == -1) { |
424 | DRM_DEBUG( "Not enough free contexts.\n" ); | 428 | DRM_DEBUG("Not enough free contexts.\n"); |
425 | /* Should this return -EBUSY instead? */ | 429 | /* Should this return -EBUSY instead? */ |
426 | return -ENOMEM; | 430 | return -ENOMEM; |
427 | } | 431 | } |
428 | 432 | ||
429 | if ( ctx.handle != DRM_KERNEL_CONTEXT ) | 433 | if (ctx.handle != DRM_KERNEL_CONTEXT) { |
430 | { | ||
431 | if (dev->driver->context_ctor) | 434 | if (dev->driver->context_ctor) |
432 | dev->driver->context_ctor(dev, ctx.handle); | 435 | dev->driver->context_ctor(dev, ctx.handle); |
433 | } | 436 | } |
434 | 437 | ||
435 | ctx_entry = drm_alloc( sizeof(*ctx_entry), DRM_MEM_CTXLIST ); | 438 | ctx_entry = drm_alloc(sizeof(*ctx_entry), DRM_MEM_CTXLIST); |
436 | if ( !ctx_entry ) { | 439 | if (!ctx_entry) { |
437 | DRM_DEBUG("out of memory\n"); | 440 | DRM_DEBUG("out of memory\n"); |
438 | return -ENOMEM; | 441 | return -ENOMEM; |
439 | } | 442 | } |
440 | 443 | ||
441 | INIT_LIST_HEAD( &ctx_entry->head ); | 444 | INIT_LIST_HEAD(&ctx_entry->head); |
442 | ctx_entry->handle = ctx.handle; | 445 | ctx_entry->handle = ctx.handle; |
443 | ctx_entry->tag = priv; | 446 | ctx_entry->tag = priv; |
444 | 447 | ||
445 | down( &dev->ctxlist_sem ); | 448 | down(&dev->ctxlist_sem); |
446 | list_add( &ctx_entry->head, &dev->ctxlist->head ); | 449 | list_add(&ctx_entry->head, &dev->ctxlist->head); |
447 | ++dev->ctx_count; | 450 | ++dev->ctx_count; |
448 | up( &dev->ctxlist_sem ); | 451 | up(&dev->ctxlist_sem); |
449 | 452 | ||
450 | if ( copy_to_user( argp, &ctx, sizeof(ctx) ) ) | 453 | if (copy_to_user(argp, &ctx, sizeof(ctx))) |
451 | return -EFAULT; | 454 | return -EFAULT; |
452 | return 0; | 455 | return 0; |
453 | } | 456 | } |
454 | 457 | ||
455 | int drm_modctx( struct inode *inode, struct file *filp, | 458 | int drm_modctx(struct inode *inode, struct file *filp, |
456 | unsigned int cmd, unsigned long arg ) | 459 | unsigned int cmd, unsigned long arg) |
457 | { | 460 | { |
458 | /* This does nothing */ | 461 | /* This does nothing */ |
459 | return 0; | 462 | return 0; |
@@ -468,19 +471,19 @@ int drm_modctx( struct inode *inode, struct file *filp, | |||
468 | * \param arg user argument pointing to a drm_ctx structure. | 471 | * \param arg user argument pointing to a drm_ctx structure. |
469 | * \return zero on success or a negative number on failure. | 472 | * \return zero on success or a negative number on failure. |
470 | */ | 473 | */ |
471 | int drm_getctx( struct inode *inode, struct file *filp, | 474 | int drm_getctx(struct inode *inode, struct file *filp, |
472 | unsigned int cmd, unsigned long arg ) | 475 | unsigned int cmd, unsigned long arg) |
473 | { | 476 | { |
474 | drm_ctx_t __user *argp = (void __user *)arg; | 477 | drm_ctx_t __user *argp = (void __user *)arg; |
475 | drm_ctx_t ctx; | 478 | drm_ctx_t ctx; |
476 | 479 | ||
477 | if ( copy_from_user( &ctx, argp, sizeof(ctx) ) ) | 480 | if (copy_from_user(&ctx, argp, sizeof(ctx))) |
478 | return -EFAULT; | 481 | return -EFAULT; |
479 | 482 | ||
480 | /* This is 0, because we don't handle any context flags */ | 483 | /* This is 0, because we don't handle any context flags */ |
481 | ctx.flags = 0; | 484 | ctx.flags = 0; |
482 | 485 | ||
483 | if ( copy_to_user( argp, &ctx, sizeof(ctx) ) ) | 486 | if (copy_to_user(argp, &ctx, sizeof(ctx))) |
484 | return -EFAULT; | 487 | return -EFAULT; |
485 | return 0; | 488 | return 0; |
486 | } | 489 | } |
@@ -496,18 +499,18 @@ int drm_getctx( struct inode *inode, struct file *filp, | |||
496 | * | 499 | * |
497 | * Calls context_switch(). | 500 | * Calls context_switch(). |
498 | */ | 501 | */ |
499 | int drm_switchctx( struct inode *inode, struct file *filp, | 502 | int drm_switchctx(struct inode *inode, struct file *filp, |
500 | unsigned int cmd, unsigned long arg ) | 503 | unsigned int cmd, unsigned long arg) |
501 | { | 504 | { |
502 | drm_file_t *priv = filp->private_data; | 505 | drm_file_t *priv = filp->private_data; |
503 | drm_device_t *dev = priv->head->dev; | 506 | drm_device_t *dev = priv->head->dev; |
504 | drm_ctx_t ctx; | 507 | drm_ctx_t ctx; |
505 | 508 | ||
506 | if ( copy_from_user( &ctx, (drm_ctx_t __user *)arg, sizeof(ctx) ) ) | 509 | if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) |
507 | return -EFAULT; | 510 | return -EFAULT; |
508 | 511 | ||
509 | DRM_DEBUG( "%d\n", ctx.handle ); | 512 | DRM_DEBUG("%d\n", ctx.handle); |
510 | return drm_context_switch( dev, dev->last_context, ctx.handle ); | 513 | return drm_context_switch(dev, dev->last_context, ctx.handle); |
511 | } | 514 | } |
512 | 515 | ||
513 | /** | 516 | /** |
@@ -521,18 +524,18 @@ int drm_switchctx( struct inode *inode, struct file *filp, | |||
521 | * | 524 | * |
522 | * Calls context_switch_complete(). | 525 | * Calls context_switch_complete(). |
523 | */ | 526 | */ |
524 | int drm_newctx( struct inode *inode, struct file *filp, | 527 | int drm_newctx(struct inode *inode, struct file *filp, |
525 | unsigned int cmd, unsigned long arg ) | 528 | unsigned int cmd, unsigned long arg) |
526 | { | 529 | { |
527 | drm_file_t *priv = filp->private_data; | 530 | drm_file_t *priv = filp->private_data; |
528 | drm_device_t *dev = priv->head->dev; | 531 | drm_device_t *dev = priv->head->dev; |
529 | drm_ctx_t ctx; | 532 | drm_ctx_t ctx; |
530 | 533 | ||
531 | if ( copy_from_user( &ctx, (drm_ctx_t __user *)arg, sizeof(ctx) ) ) | 534 | if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) |
532 | return -EFAULT; | 535 | return -EFAULT; |
533 | 536 | ||
534 | DRM_DEBUG( "%d\n", ctx.handle ); | 537 | DRM_DEBUG("%d\n", ctx.handle); |
535 | drm_context_switch_complete( dev, ctx.handle ); | 538 | drm_context_switch_complete(dev, ctx.handle); |
536 | 539 | ||
537 | return 0; | 540 | return 0; |
538 | } | 541 | } |
@@ -548,42 +551,41 @@ int drm_newctx( struct inode *inode, struct file *filp, | |||
548 | * | 551 | * |
549 | * If not the special kernel context, calls ctxbitmap_free() to free the specified context. | 552 | * If not the special kernel context, calls ctxbitmap_free() to free the specified context. |
550 | */ | 553 | */ |
551 | int drm_rmctx( struct inode *inode, struct file *filp, | 554 | int drm_rmctx(struct inode *inode, struct file *filp, |
552 | unsigned int cmd, unsigned long arg ) | 555 | unsigned int cmd, unsigned long arg) |
553 | { | 556 | { |
554 | drm_file_t *priv = filp->private_data; | 557 | drm_file_t *priv = filp->private_data; |
555 | drm_device_t *dev = priv->head->dev; | 558 | drm_device_t *dev = priv->head->dev; |
556 | drm_ctx_t ctx; | 559 | drm_ctx_t ctx; |
557 | 560 | ||
558 | if ( copy_from_user( &ctx, (drm_ctx_t __user *)arg, sizeof(ctx) ) ) | 561 | if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) |
559 | return -EFAULT; | 562 | return -EFAULT; |
560 | 563 | ||
561 | DRM_DEBUG( "%d\n", ctx.handle ); | 564 | DRM_DEBUG("%d\n", ctx.handle); |
562 | if ( ctx.handle == DRM_KERNEL_CONTEXT + 1 ) { | 565 | if (ctx.handle == DRM_KERNEL_CONTEXT + 1) { |
563 | priv->remove_auth_on_close = 1; | 566 | priv->remove_auth_on_close = 1; |
564 | } | 567 | } |
565 | if ( ctx.handle != DRM_KERNEL_CONTEXT ) { | 568 | if (ctx.handle != DRM_KERNEL_CONTEXT) { |
566 | if (dev->driver->context_dtor) | 569 | if (dev->driver->context_dtor) |
567 | dev->driver->context_dtor(dev, ctx.handle); | 570 | dev->driver->context_dtor(dev, ctx.handle); |
568 | drm_ctxbitmap_free( dev, ctx.handle ); | 571 | drm_ctxbitmap_free(dev, ctx.handle); |
569 | } | 572 | } |
570 | 573 | ||
571 | down( &dev->ctxlist_sem ); | 574 | down(&dev->ctxlist_sem); |
572 | if ( !list_empty( &dev->ctxlist->head ) ) { | 575 | if (!list_empty(&dev->ctxlist->head)) { |
573 | drm_ctx_list_t *pos, *n; | 576 | drm_ctx_list_t *pos, *n; |
574 | 577 | ||
575 | list_for_each_entry_safe( pos, n, &dev->ctxlist->head, head ) { | 578 | list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) { |
576 | if ( pos->handle == ctx.handle ) { | 579 | if (pos->handle == ctx.handle) { |
577 | list_del( &pos->head ); | 580 | list_del(&pos->head); |
578 | drm_free( pos, sizeof(*pos), DRM_MEM_CTXLIST ); | 581 | drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); |
579 | --dev->ctx_count; | 582 | --dev->ctx_count; |
580 | } | 583 | } |
581 | } | 584 | } |
582 | } | 585 | } |
583 | up( &dev->ctxlist_sem ); | 586 | up(&dev->ctxlist_sem); |
584 | 587 | ||
585 | return 0; | 588 | return 0; |
586 | } | 589 | } |
587 | 590 | ||
588 | /*@}*/ | 591 | /*@}*/ |
589 | |||