diff options
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/clnt.c | 2 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 64 |
2 files changed, 31 insertions, 35 deletions
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 8c6a7f1a25e9..fe95bd0ab1e9 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -1535,7 +1535,7 @@ void rpc_show_tasks(void) | |||
1535 | proc = -1; | 1535 | proc = -1; |
1536 | 1536 | ||
1537 | if (RPC_IS_QUEUED(t)) | 1537 | if (RPC_IS_QUEUED(t)) |
1538 | rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); | 1538 | rpc_waitq = rpc_qname(t->tk_waitqueue); |
1539 | 1539 | ||
1540 | printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n", | 1540 | printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n", |
1541 | t->tk_pid, proc, | 1541 | t->tk_pid, proc, |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 9433a113862c..9233ace076aa 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -148,7 +148,7 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task * | |||
148 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); | 148 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); |
149 | else | 149 | else |
150 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); | 150 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); |
151 | task->u.tk_wait.rpc_waitq = queue; | 151 | task->tk_waitqueue = queue; |
152 | queue->qlen++; | 152 | queue->qlen++; |
153 | rpc_set_queued(task); | 153 | rpc_set_queued(task); |
154 | 154 | ||
@@ -175,11 +175,8 @@ static void __rpc_remove_wait_queue_priority(struct rpc_task *task) | |||
175 | * Remove request from queue. | 175 | * Remove request from queue. |
176 | * Note: must be called with spin lock held. | 176 | * Note: must be called with spin lock held. |
177 | */ | 177 | */ |
178 | static void __rpc_remove_wait_queue(struct rpc_task *task) | 178 | static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) |
179 | { | 179 | { |
180 | struct rpc_wait_queue *queue; | ||
181 | queue = task->u.tk_wait.rpc_waitq; | ||
182 | |||
183 | if (RPC_IS_PRIORITY(queue)) | 180 | if (RPC_IS_PRIORITY(queue)) |
184 | __rpc_remove_wait_queue_priority(task); | 181 | __rpc_remove_wait_queue_priority(task); |
185 | else | 182 | else |
@@ -364,11 +361,12 @@ EXPORT_SYMBOL_GPL(rpc_sleep_on); | |||
364 | 361 | ||
365 | /** | 362 | /** |
366 | * __rpc_do_wake_up_task - wake up a single rpc_task | 363 | * __rpc_do_wake_up_task - wake up a single rpc_task |
364 | * @queue: wait queue | ||
367 | * @task: task to be woken up | 365 | * @task: task to be woken up |
368 | * | 366 | * |
369 | * Caller must hold queue->lock, and have cleared the task queued flag. | 367 | * Caller must hold queue->lock, and have cleared the task queued flag. |
370 | */ | 368 | */ |
371 | static void __rpc_do_wake_up_task(struct rpc_task *task) | 369 | static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task) |
372 | { | 370 | { |
373 | dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", | 371 | dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", |
374 | task->tk_pid, jiffies); | 372 | task->tk_pid, jiffies); |
@@ -383,7 +381,7 @@ static void __rpc_do_wake_up_task(struct rpc_task *task) | |||
383 | } | 381 | } |
384 | 382 | ||
385 | __rpc_disable_timer(task); | 383 | __rpc_disable_timer(task); |
386 | __rpc_remove_wait_queue(task); | 384 | __rpc_remove_wait_queue(queue, task); |
387 | 385 | ||
388 | rpc_make_runnable(task); | 386 | rpc_make_runnable(task); |
389 | 387 | ||
@@ -391,36 +389,38 @@ static void __rpc_do_wake_up_task(struct rpc_task *task) | |||
391 | } | 389 | } |
392 | 390 | ||
393 | /* | 391 | /* |
394 | * Wake up the specified task | 392 | * Wake up a queued task while the queue lock is being held |
395 | */ | 393 | */ |
396 | static void __rpc_wake_up_task(struct rpc_task *task) | 394 | static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) |
397 | { | 395 | { |
396 | if (!RPC_IS_QUEUED(task) || task->tk_waitqueue != queue) | ||
397 | return; | ||
398 | if (rpc_start_wakeup(task)) { | 398 | if (rpc_start_wakeup(task)) { |
399 | if (RPC_IS_QUEUED(task)) | 399 | __rpc_do_wake_up_task(queue, task); |
400 | __rpc_do_wake_up_task(task); | ||
401 | rpc_finish_wakeup(task); | 400 | rpc_finish_wakeup(task); |
402 | } | 401 | } |
403 | } | 402 | } |
404 | 403 | ||
405 | /* | 404 | /* |
406 | * Wake up the specified task | 405 | * Wake up a task on a specific queue |
407 | */ | 406 | */ |
408 | void rpc_wake_up_task(struct rpc_task *task) | 407 | void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) |
409 | { | 408 | { |
410 | rcu_read_lock_bh(); | 409 | rcu_read_lock_bh(); |
411 | if (rpc_start_wakeup(task)) { | 410 | spin_lock(&queue->lock); |
412 | if (RPC_IS_QUEUED(task)) { | 411 | rpc_wake_up_task_queue_locked(queue, task); |
413 | struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq; | 412 | spin_unlock(&queue->lock); |
414 | |||
415 | /* Note: we're already in a bh-safe context */ | ||
416 | spin_lock(&queue->lock); | ||
417 | __rpc_do_wake_up_task(task); | ||
418 | spin_unlock(&queue->lock); | ||
419 | } | ||
420 | rpc_finish_wakeup(task); | ||
421 | } | ||
422 | rcu_read_unlock_bh(); | 413 | rcu_read_unlock_bh(); |
423 | } | 414 | } |
415 | EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); | ||
416 | |||
417 | /* | ||
418 | * Wake up the specified task | ||
419 | */ | ||
420 | void rpc_wake_up_task(struct rpc_task *task) | ||
421 | { | ||
422 | rpc_wake_up_queued_task(task->tk_waitqueue, task); | ||
423 | } | ||
424 | EXPORT_SYMBOL_GPL(rpc_wake_up_task); | 424 | EXPORT_SYMBOL_GPL(rpc_wake_up_task); |
425 | 425 | ||
426 | /* | 426 | /* |
@@ -471,7 +471,7 @@ new_queue: | |||
471 | new_owner: | 471 | new_owner: |
472 | rpc_set_waitqueue_owner(queue, task->tk_owner); | 472 | rpc_set_waitqueue_owner(queue, task->tk_owner); |
473 | out: | 473 | out: |
474 | __rpc_wake_up_task(task); | 474 | rpc_wake_up_task_queue_locked(queue, task); |
475 | return task; | 475 | return task; |
476 | } | 476 | } |
477 | 477 | ||
@@ -490,7 +490,7 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) | |||
490 | task = __rpc_wake_up_next_priority(queue); | 490 | task = __rpc_wake_up_next_priority(queue); |
491 | else { | 491 | else { |
492 | task_for_first(task, &queue->tasks[0]) | 492 | task_for_first(task, &queue->tasks[0]) |
493 | __rpc_wake_up_task(task); | 493 | rpc_wake_up_task_queue_locked(queue, task); |
494 | } | 494 | } |
495 | spin_unlock(&queue->lock); | 495 | spin_unlock(&queue->lock); |
496 | rcu_read_unlock_bh(); | 496 | rcu_read_unlock_bh(); |
@@ -515,7 +515,7 @@ void rpc_wake_up(struct rpc_wait_queue *queue) | |||
515 | head = &queue->tasks[queue->maxpriority]; | 515 | head = &queue->tasks[queue->maxpriority]; |
516 | for (;;) { | 516 | for (;;) { |
517 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) | 517 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) |
518 | __rpc_wake_up_task(task); | 518 | rpc_wake_up_task_queue_locked(queue, task); |
519 | if (head == &queue->tasks[0]) | 519 | if (head == &queue->tasks[0]) |
520 | break; | 520 | break; |
521 | head--; | 521 | head--; |
@@ -543,7 +543,7 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | |||
543 | for (;;) { | 543 | for (;;) { |
544 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) { | 544 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) { |
545 | task->tk_status = status; | 545 | task->tk_status = status; |
546 | __rpc_wake_up_task(task); | 546 | rpc_wake_up_task_queue_locked(queue, task); |
547 | } | 547 | } |
548 | if (head == &queue->tasks[0]) | 548 | if (head == &queue->tasks[0]) |
549 | break; | 549 | break; |
@@ -562,10 +562,8 @@ static void rpc_run_timer(unsigned long ptr) | |||
562 | struct rpc_task *task = (struct rpc_task *)ptr; | 562 | struct rpc_task *task = (struct rpc_task *)ptr; |
563 | void (*callback)(struct rpc_task *); | 563 | void (*callback)(struct rpc_task *); |
564 | 564 | ||
565 | if (!rpc_start_wakeup(task)) | ||
566 | goto out; | ||
567 | if (RPC_IS_QUEUED(task)) { | 565 | if (RPC_IS_QUEUED(task)) { |
568 | struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq; | 566 | struct rpc_wait_queue *queue = task->tk_waitqueue; |
569 | callback = task->tk_timeout_fn; | 567 | callback = task->tk_timeout_fn; |
570 | 568 | ||
571 | dprintk("RPC: %5u running timer\n", task->tk_pid); | 569 | dprintk("RPC: %5u running timer\n", task->tk_pid); |
@@ -573,11 +571,9 @@ static void rpc_run_timer(unsigned long ptr) | |||
573 | callback(task); | 571 | callback(task); |
574 | /* Note: we're already in a bh-safe context */ | 572 | /* Note: we're already in a bh-safe context */ |
575 | spin_lock(&queue->lock); | 573 | spin_lock(&queue->lock); |
576 | __rpc_do_wake_up_task(task); | 574 | rpc_wake_up_task_queue_locked(queue, task); |
577 | spin_unlock(&queue->lock); | 575 | spin_unlock(&queue->lock); |
578 | } | 576 | } |
579 | rpc_finish_wakeup(task); | ||
580 | out: | ||
581 | smp_mb__before_clear_bit(); | 577 | smp_mb__before_clear_bit(); |
582 | clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate); | 578 | clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate); |
583 | smp_mb__after_clear_bit(); | 579 | smp_mb__after_clear_bit(); |