aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/sched.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2012-01-17 22:57:37 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2012-01-31 19:28:08 -0500
commit961a828df64979d2a9faeeeee043391670a193b9 (patch)
tree4b7ddaf1a19c589e3c8ec96b6c732faa507f2899 /net/sunrpc/sched.c
parent2aeb98f498ce37742b743080fdc6c8cf64053599 (diff)
SUNRPC: Fix potential races in xprt_lock_write_next()
We have to ensure that the wake up from the waitqueue and the assignment of xprt->snd_task are atomic. We can do this by assigning the snd_task while under the waitqueue spinlock. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc/sched.c')
-rw-r--r--net/sunrpc/sched.c42
1 files changed, 33 insertions, 9 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 3341d8962786..f982dfe53993 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -422,7 +422,7 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
422/* 422/*
423 * Wake up the next task on a priority queue. 423 * Wake up the next task on a priority queue.
424 */ 424 */
425static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue) 425static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
426{ 426{
427 struct list_head *q; 427 struct list_head *q;
428 struct rpc_task *task; 428 struct rpc_task *task;
@@ -467,30 +467,54 @@ new_queue:
467new_owner: 467new_owner:
468 rpc_set_waitqueue_owner(queue, task->tk_owner); 468 rpc_set_waitqueue_owner(queue, task->tk_owner);
469out: 469out:
470 rpc_wake_up_task_queue_locked(queue, task);
471 return task; 470 return task;
472} 471}
473 472
473static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
474{
475 if (RPC_IS_PRIORITY(queue))
476 return __rpc_find_next_queued_priority(queue);
477 if (!list_empty(&queue->tasks[0]))
478 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
479 return NULL;
480}
481
474/* 482/*
475 * Wake up the next task on the wait queue. 483 * Wake up the first task on the wait queue.
476 */ 484 */
477struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) 485struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
486 bool (*func)(struct rpc_task *, void *), void *data)
478{ 487{
479 struct rpc_task *task = NULL; 488 struct rpc_task *task = NULL;
480 489
481 dprintk("RPC: wake_up_next(%p \"%s\")\n", 490 dprintk("RPC: wake_up_first(%p \"%s\")\n",
482 queue, rpc_qname(queue)); 491 queue, rpc_qname(queue));
483 spin_lock_bh(&queue->lock); 492 spin_lock_bh(&queue->lock);
484 if (RPC_IS_PRIORITY(queue)) 493 task = __rpc_find_next_queued(queue);
485 task = __rpc_wake_up_next_priority(queue); 494 if (task != NULL) {
486 else { 495 if (func(task, data))
487 task_for_first(task, &queue->tasks[0])
488 rpc_wake_up_task_queue_locked(queue, task); 496 rpc_wake_up_task_queue_locked(queue, task);
497 else
498 task = NULL;
489 } 499 }
490 spin_unlock_bh(&queue->lock); 500 spin_unlock_bh(&queue->lock);
491 501
492 return task; 502 return task;
493} 503}
504EXPORT_SYMBOL_GPL(rpc_wake_up_first);
505
506static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
507{
508 return true;
509}
510
511/*
512 * Wake up the next task on the wait queue.
513*/
514struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
515{
516 return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
517}
494EXPORT_SYMBOL_GPL(rpc_wake_up_next); 518EXPORT_SYMBOL_GPL(rpc_wake_up_next);
495 519
496/** 520/**