diff options
author | Daniel De Graaf <dgdegra@tycho.nsa.gov> | 2011-10-27 17:58:48 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2011-11-21 17:14:47 -0500 |
commit | 8ca19a8937ad91703cfefccf13bd8017b39510cd (patch) | |
tree | c522460f48b7e7412048750b3874010d18fc36cc /drivers/xen | |
parent | 80df46494846e857399618c54df30ce294dc1edd (diff) |
xen/gntalloc: Change gref_lock to a mutex
The event channel release function cannot be called under a spinlock
because it can attempt to acquire a mutex due to the event channel
reference acquired when setting up unmap notifications.
Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/xen')
-rw-r--r-- | drivers/xen/gntalloc.c | 41 |
1 files changed, 21 insertions, 20 deletions
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c index f6832f46aea4..439352d094db 100644 --- a/drivers/xen/gntalloc.c +++ b/drivers/xen/gntalloc.c | |||
@@ -74,7 +74,7 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be allocated by " | |||
74 | "the gntalloc device"); | 74 | "the gntalloc device"); |
75 | 75 | ||
76 | static LIST_HEAD(gref_list); | 76 | static LIST_HEAD(gref_list); |
77 | static DEFINE_SPINLOCK(gref_lock); | 77 | static DEFINE_MUTEX(gref_mutex); |
78 | static int gref_size; | 78 | static int gref_size; |
79 | 79 | ||
80 | struct notify_info { | 80 | struct notify_info { |
@@ -143,15 +143,15 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op, | |||
143 | } | 143 | } |
144 | 144 | ||
145 | /* Add to gref lists. */ | 145 | /* Add to gref lists. */ |
146 | spin_lock(&gref_lock); | 146 | mutex_lock(&gref_mutex); |
147 | list_splice_tail(&queue_gref, &gref_list); | 147 | list_splice_tail(&queue_gref, &gref_list); |
148 | list_splice_tail(&queue_file, &priv->list); | 148 | list_splice_tail(&queue_file, &priv->list); |
149 | spin_unlock(&gref_lock); | 149 | mutex_unlock(&gref_mutex); |
150 | 150 | ||
151 | return 0; | 151 | return 0; |
152 | 152 | ||
153 | undo: | 153 | undo: |
154 | spin_lock(&gref_lock); | 154 | mutex_lock(&gref_mutex); |
155 | gref_size -= (op->count - i); | 155 | gref_size -= (op->count - i); |
156 | 156 | ||
157 | list_for_each_entry(gref, &queue_file, next_file) { | 157 | list_for_each_entry(gref, &queue_file, next_file) { |
@@ -167,7 +167,7 @@ undo: | |||
167 | */ | 167 | */ |
168 | if (unlikely(!list_empty(&queue_gref))) | 168 | if (unlikely(!list_empty(&queue_gref))) |
169 | list_splice_tail(&queue_gref, &gref_list); | 169 | list_splice_tail(&queue_gref, &gref_list); |
170 | spin_unlock(&gref_lock); | 170 | mutex_unlock(&gref_mutex); |
171 | return rc; | 171 | return rc; |
172 | } | 172 | } |
173 | 173 | ||
@@ -251,7 +251,7 @@ static int gntalloc_release(struct inode *inode, struct file *filp) | |||
251 | 251 | ||
252 | pr_debug("%s: priv %p\n", __func__, priv); | 252 | pr_debug("%s: priv %p\n", __func__, priv); |
253 | 253 | ||
254 | spin_lock(&gref_lock); | 254 | mutex_lock(&gref_mutex); |
255 | while (!list_empty(&priv->list)) { | 255 | while (!list_empty(&priv->list)) { |
256 | gref = list_entry(priv->list.next, | 256 | gref = list_entry(priv->list.next, |
257 | struct gntalloc_gref, next_file); | 257 | struct gntalloc_gref, next_file); |
@@ -261,7 +261,7 @@ static int gntalloc_release(struct inode *inode, struct file *filp) | |||
261 | __del_gref(gref); | 261 | __del_gref(gref); |
262 | } | 262 | } |
263 | kfree(priv); | 263 | kfree(priv); |
264 | spin_unlock(&gref_lock); | 264 | mutex_unlock(&gref_mutex); |
265 | 265 | ||
266 | return 0; | 266 | return 0; |
267 | } | 267 | } |
@@ -286,21 +286,21 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv, | |||
286 | goto out; | 286 | goto out; |
287 | } | 287 | } |
288 | 288 | ||
289 | spin_lock(&gref_lock); | 289 | mutex_lock(&gref_mutex); |
290 | /* Clean up pages that were at zero (local) users but were still mapped | 290 | /* Clean up pages that were at zero (local) users but were still mapped |
291 | * by remote domains. Since those pages count towards the limit that we | 291 | * by remote domains. Since those pages count towards the limit that we |
292 | * are about to enforce, removing them here is a good idea. | 292 | * are about to enforce, removing them here is a good idea. |
293 | */ | 293 | */ |
294 | do_cleanup(); | 294 | do_cleanup(); |
295 | if (gref_size + op.count > limit) { | 295 | if (gref_size + op.count > limit) { |
296 | spin_unlock(&gref_lock); | 296 | mutex_unlock(&gref_mutex); |
297 | rc = -ENOSPC; | 297 | rc = -ENOSPC; |
298 | goto out_free; | 298 | goto out_free; |
299 | } | 299 | } |
300 | gref_size += op.count; | 300 | gref_size += op.count; |
301 | op.index = priv->index; | 301 | op.index = priv->index; |
302 | priv->index += op.count * PAGE_SIZE; | 302 | priv->index += op.count * PAGE_SIZE; |
303 | spin_unlock(&gref_lock); | 303 | mutex_unlock(&gref_mutex); |
304 | 304 | ||
305 | rc = add_grefs(&op, gref_ids, priv); | 305 | rc = add_grefs(&op, gref_ids, priv); |
306 | if (rc < 0) | 306 | if (rc < 0) |
@@ -343,7 +343,7 @@ static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv, | |||
343 | goto dealloc_grant_out; | 343 | goto dealloc_grant_out; |
344 | } | 344 | } |
345 | 345 | ||
346 | spin_lock(&gref_lock); | 346 | mutex_lock(&gref_mutex); |
347 | gref = find_grefs(priv, op.index, op.count); | 347 | gref = find_grefs(priv, op.index, op.count); |
348 | if (gref) { | 348 | if (gref) { |
349 | /* Remove from the file list only, and decrease reference count. | 349 | /* Remove from the file list only, and decrease reference count. |
@@ -363,7 +363,7 @@ static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv, | |||
363 | 363 | ||
364 | do_cleanup(); | 364 | do_cleanup(); |
365 | 365 | ||
366 | spin_unlock(&gref_lock); | 366 | mutex_unlock(&gref_mutex); |
367 | dealloc_grant_out: | 367 | dealloc_grant_out: |
368 | return rc; | 368 | return rc; |
369 | } | 369 | } |
@@ -383,7 +383,7 @@ static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv, | |||
383 | index = op.index & ~(PAGE_SIZE - 1); | 383 | index = op.index & ~(PAGE_SIZE - 1); |
384 | pgoff = op.index & (PAGE_SIZE - 1); | 384 | pgoff = op.index & (PAGE_SIZE - 1); |
385 | 385 | ||
386 | spin_lock(&gref_lock); | 386 | mutex_lock(&gref_mutex); |
387 | 387 | ||
388 | gref = find_grefs(priv, index, 1); | 388 | gref = find_grefs(priv, index, 1); |
389 | if (!gref) { | 389 | if (!gref) { |
@@ -400,8 +400,9 @@ static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv, | |||
400 | gref->notify.pgoff = pgoff; | 400 | gref->notify.pgoff = pgoff; |
401 | gref->notify.event = op.event_channel_port; | 401 | gref->notify.event = op.event_channel_port; |
402 | rc = 0; | 402 | rc = 0; |
403 | |||
403 | unlock_out: | 404 | unlock_out: |
404 | spin_unlock(&gref_lock); | 405 | mutex_unlock(&gref_mutex); |
405 | return rc; | 406 | return rc; |
406 | } | 407 | } |
407 | 408 | ||
@@ -433,9 +434,9 @@ static void gntalloc_vma_open(struct vm_area_struct *vma) | |||
433 | if (!gref) | 434 | if (!gref) |
434 | return; | 435 | return; |
435 | 436 | ||
436 | spin_lock(&gref_lock); | 437 | mutex_lock(&gref_mutex); |
437 | gref->users++; | 438 | gref->users++; |
438 | spin_unlock(&gref_lock); | 439 | mutex_unlock(&gref_mutex); |
439 | } | 440 | } |
440 | 441 | ||
441 | static void gntalloc_vma_close(struct vm_area_struct *vma) | 442 | static void gntalloc_vma_close(struct vm_area_struct *vma) |
@@ -444,11 +445,11 @@ static void gntalloc_vma_close(struct vm_area_struct *vma) | |||
444 | if (!gref) | 445 | if (!gref) |
445 | return; | 446 | return; |
446 | 447 | ||
447 | spin_lock(&gref_lock); | 448 | mutex_lock(&gref_mutex); |
448 | gref->users--; | 449 | gref->users--; |
449 | if (gref->users == 0) | 450 | if (gref->users == 0) |
450 | __del_gref(gref); | 451 | __del_gref(gref); |
451 | spin_unlock(&gref_lock); | 452 | mutex_unlock(&gref_mutex); |
452 | } | 453 | } |
453 | 454 | ||
454 | static struct vm_operations_struct gntalloc_vmops = { | 455 | static struct vm_operations_struct gntalloc_vmops = { |
@@ -471,7 +472,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma) | |||
471 | return -EINVAL; | 472 | return -EINVAL; |
472 | } | 473 | } |
473 | 474 | ||
474 | spin_lock(&gref_lock); | 475 | mutex_lock(&gref_mutex); |
475 | gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count); | 476 | gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count); |
476 | if (gref == NULL) { | 477 | if (gref == NULL) { |
477 | rv = -ENOENT; | 478 | rv = -ENOENT; |
@@ -499,7 +500,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma) | |||
499 | rv = 0; | 500 | rv = 0; |
500 | 501 | ||
501 | out_unlock: | 502 | out_unlock: |
502 | spin_unlock(&gref_lock); | 503 | mutex_unlock(&gref_mutex); |
503 | return rv; | 504 | return rv; |
504 | } | 505 | } |
505 | 506 | ||