diff options
author | Trond Myklebust <trond.myklebust@hammerspace.com> | 2019-06-11 14:19:07 -0400 |
---|---|---|
committer | Trond Myklebust <trond.myklebust@hammerspace.com> | 2019-07-06 14:54:51 -0400 |
commit | 41adafa02eeb622ffc1f085c9a862a6554ed667e (patch) | |
tree | af93595de5f6b108ad67ce451ed4685e8092bd90 /net/sunrpc | |
parent | a332518fda4731c07394164b3edcbb6efaf4c4d7 (diff) | |
parent | c049f8ea9a0db11d87bc8cb4c106be65fe06b70b (diff) |
Merge branch 'bh-remove'
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/sched.c | 72 | ||||
-rw-r--r-- | net/sunrpc/xprt.c | 61 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/rpc_rdma.c | 4 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 4 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 8 | ||||
-rw-r--r-- | net/sunrpc/xprtsock.c | 101 |
6 files changed, 160 insertions, 90 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index f8ea362fae91..f820780280b5 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -47,7 +47,7 @@ static mempool_t *rpc_buffer_mempool __read_mostly; | |||
47 | 47 | ||
48 | static void rpc_async_schedule(struct work_struct *); | 48 | static void rpc_async_schedule(struct work_struct *); |
49 | static void rpc_release_task(struct rpc_task *task); | 49 | static void rpc_release_task(struct rpc_task *task); |
50 | static void __rpc_queue_timer_fn(struct timer_list *t); | 50 | static void __rpc_queue_timer_fn(struct work_struct *); |
51 | 51 | ||
52 | /* | 52 | /* |
53 | * RPC tasks sit here while waiting for conditions to improve. | 53 | * RPC tasks sit here while waiting for conditions to improve. |
@@ -88,13 +88,19 @@ __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) | |||
88 | task->tk_timeout = 0; | 88 | task->tk_timeout = 0; |
89 | list_del(&task->u.tk_wait.timer_list); | 89 | list_del(&task->u.tk_wait.timer_list); |
90 | if (list_empty(&queue->timer_list.list)) | 90 | if (list_empty(&queue->timer_list.list)) |
91 | del_timer(&queue->timer_list.timer); | 91 | cancel_delayed_work(&queue->timer_list.dwork); |
92 | } | 92 | } |
93 | 93 | ||
94 | static void | 94 | static void |
95 | rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) | 95 | rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) |
96 | { | 96 | { |
97 | timer_reduce(&queue->timer_list.timer, expires); | 97 | unsigned long now = jiffies; |
98 | queue->timer_list.expires = expires; | ||
99 | if (time_before_eq(expires, now)) | ||
100 | expires = 0; | ||
101 | else | ||
102 | expires -= now; | ||
103 | mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); | ||
98 | } | 104 | } |
99 | 105 | ||
100 | /* | 106 | /* |
@@ -108,7 +114,8 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, | |||
108 | task->tk_pid, jiffies_to_msecs(timeout - jiffies)); | 114 | task->tk_pid, jiffies_to_msecs(timeout - jiffies)); |
109 | 115 | ||
110 | task->tk_timeout = timeout; | 116 | task->tk_timeout = timeout; |
111 | rpc_set_queue_timer(queue, timeout); | 117 | if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) |
118 | rpc_set_queue_timer(queue, timeout); | ||
112 | list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); | 119 | list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); |
113 | } | 120 | } |
114 | 121 | ||
@@ -251,7 +258,8 @@ static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const c | |||
251 | queue->maxpriority = nr_queues - 1; | 258 | queue->maxpriority = nr_queues - 1; |
252 | rpc_reset_waitqueue_priority(queue); | 259 | rpc_reset_waitqueue_priority(queue); |
253 | queue->qlen = 0; | 260 | queue->qlen = 0; |
254 | timer_setup(&queue->timer_list.timer, __rpc_queue_timer_fn, 0); | 261 | queue->timer_list.expires = 0; |
262 | INIT_DEFERRABLE_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn); | ||
255 | INIT_LIST_HEAD(&queue->timer_list.list); | 263 | INIT_LIST_HEAD(&queue->timer_list.list); |
256 | rpc_assign_waitqueue_name(queue, qname); | 264 | rpc_assign_waitqueue_name(queue, qname); |
257 | } | 265 | } |
@@ -270,7 +278,7 @@ EXPORT_SYMBOL_GPL(rpc_init_wait_queue); | |||
270 | 278 | ||
271 | void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) | 279 | void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) |
272 | { | 280 | { |
273 | del_timer_sync(&queue->timer_list.timer); | 281 | cancel_delayed_work_sync(&queue->timer_list.dwork); |
274 | } | 282 | } |
275 | EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); | 283 | EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); |
276 | 284 | ||
@@ -425,9 +433,9 @@ void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task, | |||
425 | /* | 433 | /* |
426 | * Protect the queue operations. | 434 | * Protect the queue operations. |
427 | */ | 435 | */ |
428 | spin_lock_bh(&q->lock); | 436 | spin_lock(&q->lock); |
429 | __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority); | 437 | __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority); |
430 | spin_unlock_bh(&q->lock); | 438 | spin_unlock(&q->lock); |
431 | } | 439 | } |
432 | EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout); | 440 | EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout); |
433 | 441 | ||
@@ -443,9 +451,9 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |||
443 | /* | 451 | /* |
444 | * Protect the queue operations. | 452 | * Protect the queue operations. |
445 | */ | 453 | */ |
446 | spin_lock_bh(&q->lock); | 454 | spin_lock(&q->lock); |
447 | __rpc_sleep_on_priority(q, task, task->tk_priority); | 455 | __rpc_sleep_on_priority(q, task, task->tk_priority); |
448 | spin_unlock_bh(&q->lock); | 456 | spin_unlock(&q->lock); |
449 | } | 457 | } |
450 | EXPORT_SYMBOL_GPL(rpc_sleep_on); | 458 | EXPORT_SYMBOL_GPL(rpc_sleep_on); |
451 | 459 | ||
@@ -459,9 +467,9 @@ void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, | |||
459 | /* | 467 | /* |
460 | * Protect the queue operations. | 468 | * Protect the queue operations. |
461 | */ | 469 | */ |
462 | spin_lock_bh(&q->lock); | 470 | spin_lock(&q->lock); |
463 | __rpc_sleep_on_priority_timeout(q, task, timeout, priority); | 471 | __rpc_sleep_on_priority_timeout(q, task, timeout, priority); |
464 | spin_unlock_bh(&q->lock); | 472 | spin_unlock(&q->lock); |
465 | } | 473 | } |
466 | EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout); | 474 | EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout); |
467 | 475 | ||
@@ -476,9 +484,9 @@ void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, | |||
476 | /* | 484 | /* |
477 | * Protect the queue operations. | 485 | * Protect the queue operations. |
478 | */ | 486 | */ |
479 | spin_lock_bh(&q->lock); | 487 | spin_lock(&q->lock); |
480 | __rpc_sleep_on_priority(q, task, priority); | 488 | __rpc_sleep_on_priority(q, task, priority); |
481 | spin_unlock_bh(&q->lock); | 489 | spin_unlock(&q->lock); |
482 | } | 490 | } |
483 | EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); | 491 | EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); |
484 | 492 | ||
@@ -556,9 +564,9 @@ void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq, | |||
556 | { | 564 | { |
557 | if (!RPC_IS_QUEUED(task)) | 565 | if (!RPC_IS_QUEUED(task)) |
558 | return; | 566 | return; |
559 | spin_lock_bh(&queue->lock); | 567 | spin_lock(&queue->lock); |
560 | rpc_wake_up_task_on_wq_queue_locked(wq, queue, task); | 568 | rpc_wake_up_task_on_wq_queue_locked(wq, queue, task); |
561 | spin_unlock_bh(&queue->lock); | 569 | spin_unlock(&queue->lock); |
562 | } | 570 | } |
563 | 571 | ||
564 | /* | 572 | /* |
@@ -568,9 +576,9 @@ void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task | |||
568 | { | 576 | { |
569 | if (!RPC_IS_QUEUED(task)) | 577 | if (!RPC_IS_QUEUED(task)) |
570 | return; | 578 | return; |
571 | spin_lock_bh(&queue->lock); | 579 | spin_lock(&queue->lock); |
572 | rpc_wake_up_task_queue_locked(queue, task); | 580 | rpc_wake_up_task_queue_locked(queue, task); |
573 | spin_unlock_bh(&queue->lock); | 581 | spin_unlock(&queue->lock); |
574 | } | 582 | } |
575 | EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); | 583 | EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); |
576 | 584 | ||
@@ -603,9 +611,9 @@ rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue, | |||
603 | { | 611 | { |
604 | if (!RPC_IS_QUEUED(task)) | 612 | if (!RPC_IS_QUEUED(task)) |
605 | return; | 613 | return; |
606 | spin_lock_bh(&queue->lock); | 614 | spin_lock(&queue->lock); |
607 | rpc_wake_up_task_queue_set_status_locked(queue, task, status); | 615 | rpc_wake_up_task_queue_set_status_locked(queue, task, status); |
608 | spin_unlock_bh(&queue->lock); | 616 | spin_unlock(&queue->lock); |
609 | } | 617 | } |
610 | 618 | ||
611 | /* | 619 | /* |
@@ -668,12 +676,12 @@ struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, | |||
668 | 676 | ||
669 | dprintk("RPC: wake_up_first(%p \"%s\")\n", | 677 | dprintk("RPC: wake_up_first(%p \"%s\")\n", |
670 | queue, rpc_qname(queue)); | 678 | queue, rpc_qname(queue)); |
671 | spin_lock_bh(&queue->lock); | 679 | spin_lock(&queue->lock); |
672 | task = __rpc_find_next_queued(queue); | 680 | task = __rpc_find_next_queued(queue); |
673 | if (task != NULL) | 681 | if (task != NULL) |
674 | task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, | 682 | task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, |
675 | task, func, data); | 683 | task, func, data); |
676 | spin_unlock_bh(&queue->lock); | 684 | spin_unlock(&queue->lock); |
677 | 685 | ||
678 | return task; | 686 | return task; |
679 | } | 687 | } |
@@ -712,7 +720,7 @@ void rpc_wake_up(struct rpc_wait_queue *queue) | |||
712 | { | 720 | { |
713 | struct list_head *head; | 721 | struct list_head *head; |
714 | 722 | ||
715 | spin_lock_bh(&queue->lock); | 723 | spin_lock(&queue->lock); |
716 | head = &queue->tasks[queue->maxpriority]; | 724 | head = &queue->tasks[queue->maxpriority]; |
717 | for (;;) { | 725 | for (;;) { |
718 | while (!list_empty(head)) { | 726 | while (!list_empty(head)) { |
@@ -726,7 +734,7 @@ void rpc_wake_up(struct rpc_wait_queue *queue) | |||
726 | break; | 734 | break; |
727 | head--; | 735 | head--; |
728 | } | 736 | } |
729 | spin_unlock_bh(&queue->lock); | 737 | spin_unlock(&queue->lock); |
730 | } | 738 | } |
731 | EXPORT_SYMBOL_GPL(rpc_wake_up); | 739 | EXPORT_SYMBOL_GPL(rpc_wake_up); |
732 | 740 | ||
@@ -741,7 +749,7 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | |||
741 | { | 749 | { |
742 | struct list_head *head; | 750 | struct list_head *head; |
743 | 751 | ||
744 | spin_lock_bh(&queue->lock); | 752 | spin_lock(&queue->lock); |
745 | head = &queue->tasks[queue->maxpriority]; | 753 | head = &queue->tasks[queue->maxpriority]; |
746 | for (;;) { | 754 | for (;;) { |
747 | while (!list_empty(head)) { | 755 | while (!list_empty(head)) { |
@@ -756,13 +764,15 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | |||
756 | break; | 764 | break; |
757 | head--; | 765 | head--; |
758 | } | 766 | } |
759 | spin_unlock_bh(&queue->lock); | 767 | spin_unlock(&queue->lock); |
760 | } | 768 | } |
761 | EXPORT_SYMBOL_GPL(rpc_wake_up_status); | 769 | EXPORT_SYMBOL_GPL(rpc_wake_up_status); |
762 | 770 | ||
763 | static void __rpc_queue_timer_fn(struct timer_list *t) | 771 | static void __rpc_queue_timer_fn(struct work_struct *work) |
764 | { | 772 | { |
765 | struct rpc_wait_queue *queue = from_timer(queue, t, timer_list.timer); | 773 | struct rpc_wait_queue *queue = container_of(work, |
774 | struct rpc_wait_queue, | ||
775 | timer_list.dwork.work); | ||
766 | struct rpc_task *task, *n; | 776 | struct rpc_task *task, *n; |
767 | unsigned long expires, now, timeo; | 777 | unsigned long expires, now, timeo; |
768 | 778 | ||
@@ -932,13 +942,13 @@ static void __rpc_execute(struct rpc_task *task) | |||
932 | * rpc_task pointer may still be dereferenced. | 942 | * rpc_task pointer may still be dereferenced. |
933 | */ | 943 | */ |
934 | queue = task->tk_waitqueue; | 944 | queue = task->tk_waitqueue; |
935 | spin_lock_bh(&queue->lock); | 945 | spin_lock(&queue->lock); |
936 | if (!RPC_IS_QUEUED(task)) { | 946 | if (!RPC_IS_QUEUED(task)) { |
937 | spin_unlock_bh(&queue->lock); | 947 | spin_unlock(&queue->lock); |
938 | continue; | 948 | continue; |
939 | } | 949 | } |
940 | rpc_clear_running(task); | 950 | rpc_clear_running(task); |
941 | spin_unlock_bh(&queue->lock); | 951 | spin_unlock(&queue->lock); |
942 | if (task_is_async) | 952 | if (task_is_async) |
943 | return; | 953 | return; |
944 | 954 | ||
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index ab6b4c729ca5..c4d138202abb 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -302,9 +302,9 @@ static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) | |||
302 | 302 | ||
303 | if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task) | 303 | if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task) |
304 | return 1; | 304 | return 1; |
305 | spin_lock_bh(&xprt->transport_lock); | 305 | spin_lock(&xprt->transport_lock); |
306 | retval = xprt->ops->reserve_xprt(xprt, task); | 306 | retval = xprt->ops->reserve_xprt(xprt, task); |
307 | spin_unlock_bh(&xprt->transport_lock); | 307 | spin_unlock(&xprt->transport_lock); |
308 | return retval; | 308 | return retval; |
309 | } | 309 | } |
310 | 310 | ||
@@ -381,9 +381,9 @@ static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *ta | |||
381 | { | 381 | { |
382 | if (xprt->snd_task != task) | 382 | if (xprt->snd_task != task) |
383 | return; | 383 | return; |
384 | spin_lock_bh(&xprt->transport_lock); | 384 | spin_lock(&xprt->transport_lock); |
385 | xprt->ops->release_xprt(xprt, task); | 385 | xprt->ops->release_xprt(xprt, task); |
386 | spin_unlock_bh(&xprt->transport_lock); | 386 | spin_unlock(&xprt->transport_lock); |
387 | } | 387 | } |
388 | 388 | ||
389 | /* | 389 | /* |
@@ -435,9 +435,9 @@ xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) | |||
435 | 435 | ||
436 | if (req->rq_cong) | 436 | if (req->rq_cong) |
437 | return true; | 437 | return true; |
438 | spin_lock_bh(&xprt->transport_lock); | 438 | spin_lock(&xprt->transport_lock); |
439 | ret = __xprt_get_cong(xprt, req) != 0; | 439 | ret = __xprt_get_cong(xprt, req) != 0; |
440 | spin_unlock_bh(&xprt->transport_lock); | 440 | spin_unlock(&xprt->transport_lock); |
441 | return ret; | 441 | return ret; |
442 | } | 442 | } |
443 | EXPORT_SYMBOL_GPL(xprt_request_get_cong); | 443 | EXPORT_SYMBOL_GPL(xprt_request_get_cong); |
@@ -464,9 +464,9 @@ static void | |||
464 | xprt_clear_congestion_window_wait(struct rpc_xprt *xprt) | 464 | xprt_clear_congestion_window_wait(struct rpc_xprt *xprt) |
465 | { | 465 | { |
466 | if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) { | 466 | if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) { |
467 | spin_lock_bh(&xprt->transport_lock); | 467 | spin_lock(&xprt->transport_lock); |
468 | __xprt_lock_write_next_cong(xprt); | 468 | __xprt_lock_write_next_cong(xprt); |
469 | spin_unlock_bh(&xprt->transport_lock); | 469 | spin_unlock(&xprt->transport_lock); |
470 | } | 470 | } |
471 | } | 471 | } |
472 | 472 | ||
@@ -563,9 +563,9 @@ bool xprt_write_space(struct rpc_xprt *xprt) | |||
563 | 563 | ||
564 | if (!test_bit(XPRT_WRITE_SPACE, &xprt->state)) | 564 | if (!test_bit(XPRT_WRITE_SPACE, &xprt->state)) |
565 | return false; | 565 | return false; |
566 | spin_lock_bh(&xprt->transport_lock); | 566 | spin_lock(&xprt->transport_lock); |
567 | ret = xprt_clear_write_space_locked(xprt); | 567 | ret = xprt_clear_write_space_locked(xprt); |
568 | spin_unlock_bh(&xprt->transport_lock); | 568 | spin_unlock(&xprt->transport_lock); |
569 | return ret; | 569 | return ret; |
570 | } | 570 | } |
571 | EXPORT_SYMBOL_GPL(xprt_write_space); | 571 | EXPORT_SYMBOL_GPL(xprt_write_space); |
@@ -634,9 +634,9 @@ int xprt_adjust_timeout(struct rpc_rqst *req) | |||
634 | req->rq_retries = 0; | 634 | req->rq_retries = 0; |
635 | xprt_reset_majortimeo(req); | 635 | xprt_reset_majortimeo(req); |
636 | /* Reset the RTT counters == "slow start" */ | 636 | /* Reset the RTT counters == "slow start" */ |
637 | spin_lock_bh(&xprt->transport_lock); | 637 | spin_lock(&xprt->transport_lock); |
638 | rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); | 638 | rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); |
639 | spin_unlock_bh(&xprt->transport_lock); | 639 | spin_unlock(&xprt->transport_lock); |
640 | status = -ETIMEDOUT; | 640 | status = -ETIMEDOUT; |
641 | } | 641 | } |
642 | 642 | ||
@@ -668,11 +668,11 @@ static void xprt_autoclose(struct work_struct *work) | |||
668 | void xprt_disconnect_done(struct rpc_xprt *xprt) | 668 | void xprt_disconnect_done(struct rpc_xprt *xprt) |
669 | { | 669 | { |
670 | dprintk("RPC: disconnected transport %p\n", xprt); | 670 | dprintk("RPC: disconnected transport %p\n", xprt); |
671 | spin_lock_bh(&xprt->transport_lock); | 671 | spin_lock(&xprt->transport_lock); |
672 | xprt_clear_connected(xprt); | 672 | xprt_clear_connected(xprt); |
673 | xprt_clear_write_space_locked(xprt); | 673 | xprt_clear_write_space_locked(xprt); |
674 | xprt_wake_pending_tasks(xprt, -ENOTCONN); | 674 | xprt_wake_pending_tasks(xprt, -ENOTCONN); |
675 | spin_unlock_bh(&xprt->transport_lock); | 675 | spin_unlock(&xprt->transport_lock); |
676 | } | 676 | } |
677 | EXPORT_SYMBOL_GPL(xprt_disconnect_done); | 677 | EXPORT_SYMBOL_GPL(xprt_disconnect_done); |
678 | 678 | ||
@@ -684,7 +684,7 @@ EXPORT_SYMBOL_GPL(xprt_disconnect_done); | |||
684 | void xprt_force_disconnect(struct rpc_xprt *xprt) | 684 | void xprt_force_disconnect(struct rpc_xprt *xprt) |
685 | { | 685 | { |
686 | /* Don't race with the test_bit() in xprt_clear_locked() */ | 686 | /* Don't race with the test_bit() in xprt_clear_locked() */ |
687 | spin_lock_bh(&xprt->transport_lock); | 687 | spin_lock(&xprt->transport_lock); |
688 | set_bit(XPRT_CLOSE_WAIT, &xprt->state); | 688 | set_bit(XPRT_CLOSE_WAIT, &xprt->state); |
689 | /* Try to schedule an autoclose RPC call */ | 689 | /* Try to schedule an autoclose RPC call */ |
690 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) | 690 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) |
@@ -692,7 +692,7 @@ void xprt_force_disconnect(struct rpc_xprt *xprt) | |||
692 | else if (xprt->snd_task) | 692 | else if (xprt->snd_task) |
693 | rpc_wake_up_queued_task_set_status(&xprt->pending, | 693 | rpc_wake_up_queued_task_set_status(&xprt->pending, |
694 | xprt->snd_task, -ENOTCONN); | 694 | xprt->snd_task, -ENOTCONN); |
695 | spin_unlock_bh(&xprt->transport_lock); | 695 | spin_unlock(&xprt->transport_lock); |
696 | } | 696 | } |
697 | EXPORT_SYMBOL_GPL(xprt_force_disconnect); | 697 | EXPORT_SYMBOL_GPL(xprt_force_disconnect); |
698 | 698 | ||
@@ -726,7 +726,7 @@ xprt_request_retransmit_after_disconnect(struct rpc_task *task) | |||
726 | void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) | 726 | void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) |
727 | { | 727 | { |
728 | /* Don't race with the test_bit() in xprt_clear_locked() */ | 728 | /* Don't race with the test_bit() in xprt_clear_locked() */ |
729 | spin_lock_bh(&xprt->transport_lock); | 729 | spin_lock(&xprt->transport_lock); |
730 | if (cookie != xprt->connect_cookie) | 730 | if (cookie != xprt->connect_cookie) |
731 | goto out; | 731 | goto out; |
732 | if (test_bit(XPRT_CLOSING, &xprt->state)) | 732 | if (test_bit(XPRT_CLOSING, &xprt->state)) |
@@ -737,7 +737,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) | |||
737 | queue_work(xprtiod_workqueue, &xprt->task_cleanup); | 737 | queue_work(xprtiod_workqueue, &xprt->task_cleanup); |
738 | xprt_wake_pending_tasks(xprt, -EAGAIN); | 738 | xprt_wake_pending_tasks(xprt, -EAGAIN); |
739 | out: | 739 | out: |
740 | spin_unlock_bh(&xprt->transport_lock); | 740 | spin_unlock(&xprt->transport_lock); |
741 | } | 741 | } |
742 | 742 | ||
743 | static bool | 743 | static bool |
@@ -759,18 +759,13 @@ xprt_init_autodisconnect(struct timer_list *t) | |||
759 | { | 759 | { |
760 | struct rpc_xprt *xprt = from_timer(xprt, t, timer); | 760 | struct rpc_xprt *xprt = from_timer(xprt, t, timer); |
761 | 761 | ||
762 | spin_lock(&xprt->transport_lock); | ||
763 | if (!RB_EMPTY_ROOT(&xprt->recv_queue)) | 762 | if (!RB_EMPTY_ROOT(&xprt->recv_queue)) |
764 | goto out_abort; | 763 | return; |
765 | /* Reset xprt->last_used to avoid connect/autodisconnect cycling */ | 764 | /* Reset xprt->last_used to avoid connect/autodisconnect cycling */ |
766 | xprt->last_used = jiffies; | 765 | xprt->last_used = jiffies; |
767 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | 766 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
768 | goto out_abort; | 767 | return; |
769 | spin_unlock(&xprt->transport_lock); | ||
770 | queue_work(xprtiod_workqueue, &xprt->task_cleanup); | 768 | queue_work(xprtiod_workqueue, &xprt->task_cleanup); |
771 | return; | ||
772 | out_abort: | ||
773 | spin_unlock(&xprt->transport_lock); | ||
774 | } | 769 | } |
775 | 770 | ||
776 | bool xprt_lock_connect(struct rpc_xprt *xprt, | 771 | bool xprt_lock_connect(struct rpc_xprt *xprt, |
@@ -779,7 +774,7 @@ bool xprt_lock_connect(struct rpc_xprt *xprt, | |||
779 | { | 774 | { |
780 | bool ret = false; | 775 | bool ret = false; |
781 | 776 | ||
782 | spin_lock_bh(&xprt->transport_lock); | 777 | spin_lock(&xprt->transport_lock); |
783 | if (!test_bit(XPRT_LOCKED, &xprt->state)) | 778 | if (!test_bit(XPRT_LOCKED, &xprt->state)) |
784 | goto out; | 779 | goto out; |
785 | if (xprt->snd_task != task) | 780 | if (xprt->snd_task != task) |
@@ -787,13 +782,13 @@ bool xprt_lock_connect(struct rpc_xprt *xprt, | |||
787 | xprt->snd_task = cookie; | 782 | xprt->snd_task = cookie; |
788 | ret = true; | 783 | ret = true; |
789 | out: | 784 | out: |
790 | spin_unlock_bh(&xprt->transport_lock); | 785 | spin_unlock(&xprt->transport_lock); |
791 | return ret; | 786 | return ret; |
792 | } | 787 | } |
793 | 788 | ||
794 | void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) | 789 | void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) |
795 | { | 790 | { |
796 | spin_lock_bh(&xprt->transport_lock); | 791 | spin_lock(&xprt->transport_lock); |
797 | if (xprt->snd_task != cookie) | 792 | if (xprt->snd_task != cookie) |
798 | goto out; | 793 | goto out; |
799 | if (!test_bit(XPRT_LOCKED, &xprt->state)) | 794 | if (!test_bit(XPRT_LOCKED, &xprt->state)) |
@@ -802,7 +797,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) | |||
802 | xprt->ops->release_xprt(xprt, NULL); | 797 | xprt->ops->release_xprt(xprt, NULL); |
803 | xprt_schedule_autodisconnect(xprt); | 798 | xprt_schedule_autodisconnect(xprt); |
804 | out: | 799 | out: |
805 | spin_unlock_bh(&xprt->transport_lock); | 800 | spin_unlock(&xprt->transport_lock); |
806 | wake_up_bit(&xprt->state, XPRT_LOCKED); | 801 | wake_up_bit(&xprt->state, XPRT_LOCKED); |
807 | } | 802 | } |
808 | 803 | ||
@@ -1412,14 +1407,14 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task) | |||
1412 | xprt_inject_disconnect(xprt); | 1407 | xprt_inject_disconnect(xprt); |
1413 | 1408 | ||
1414 | task->tk_flags |= RPC_TASK_SENT; | 1409 | task->tk_flags |= RPC_TASK_SENT; |
1415 | spin_lock_bh(&xprt->transport_lock); | 1410 | spin_lock(&xprt->transport_lock); |
1416 | 1411 | ||
1417 | xprt->stat.sends++; | 1412 | xprt->stat.sends++; |
1418 | xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; | 1413 | xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; |
1419 | xprt->stat.bklog_u += xprt->backlog.qlen; | 1414 | xprt->stat.bklog_u += xprt->backlog.qlen; |
1420 | xprt->stat.sending_u += xprt->sending.qlen; | 1415 | xprt->stat.sending_u += xprt->sending.qlen; |
1421 | xprt->stat.pending_u += xprt->pending.qlen; | 1416 | xprt->stat.pending_u += xprt->pending.qlen; |
1422 | spin_unlock_bh(&xprt->transport_lock); | 1417 | spin_unlock(&xprt->transport_lock); |
1423 | 1418 | ||
1424 | req->rq_connect_cookie = connect_cookie; | 1419 | req->rq_connect_cookie = connect_cookie; |
1425 | out_dequeue: | 1420 | out_dequeue: |
@@ -1766,13 +1761,13 @@ void xprt_release(struct rpc_task *task) | |||
1766 | 1761 | ||
1767 | xprt = req->rq_xprt; | 1762 | xprt = req->rq_xprt; |
1768 | xprt_request_dequeue_all(task, req); | 1763 | xprt_request_dequeue_all(task, req); |
1769 | spin_lock_bh(&xprt->transport_lock); | 1764 | spin_lock(&xprt->transport_lock); |
1770 | xprt->ops->release_xprt(xprt, task); | 1765 | xprt->ops->release_xprt(xprt, task); |
1771 | if (xprt->ops->release_request) | 1766 | if (xprt->ops->release_request) |
1772 | xprt->ops->release_request(task); | 1767 | xprt->ops->release_request(task); |
1773 | xprt->last_used = jiffies; | 1768 | xprt->last_used = jiffies; |
1774 | xprt_schedule_autodisconnect(xprt); | 1769 | xprt_schedule_autodisconnect(xprt); |
1775 | spin_unlock_bh(&xprt->transport_lock); | 1770 | spin_unlock(&xprt->transport_lock); |
1776 | if (req->rq_buffer) | 1771 | if (req->rq_buffer) |
1777 | xprt->ops->buf_free(task); | 1772 | xprt->ops->buf_free(task); |
1778 | xprt_inject_disconnect(xprt); | 1773 | xprt_inject_disconnect(xprt); |
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 85115a2e2639..7dc62e55f526 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
@@ -1360,10 +1360,10 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep) | |||
1360 | else if (credits > buf->rb_max_requests) | 1360 | else if (credits > buf->rb_max_requests) |
1361 | credits = buf->rb_max_requests; | 1361 | credits = buf->rb_max_requests; |
1362 | if (buf->rb_credits != credits) { | 1362 | if (buf->rb_credits != credits) { |
1363 | spin_lock_bh(&xprt->transport_lock); | 1363 | spin_lock(&xprt->transport_lock); |
1364 | buf->rb_credits = credits; | 1364 | buf->rb_credits = credits; |
1365 | xprt->cwnd = credits << RPC_CWNDSHIFT; | 1365 | xprt->cwnd = credits << RPC_CWNDSHIFT; |
1366 | spin_unlock_bh(&xprt->transport_lock); | 1366 | spin_unlock(&xprt->transport_lock); |
1367 | } | 1367 | } |
1368 | 1368 | ||
1369 | req = rpcr_to_rdmar(rqst); | 1369 | req = rpcr_to_rdmar(rqst); |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index bed57d8b5c19..d1fcc41d5eb5 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c | |||
@@ -72,9 +72,9 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, | |||
72 | else if (credits > r_xprt->rx_buf.rb_bc_max_requests) | 72 | else if (credits > r_xprt->rx_buf.rb_bc_max_requests) |
73 | credits = r_xprt->rx_buf.rb_bc_max_requests; | 73 | credits = r_xprt->rx_buf.rb_bc_max_requests; |
74 | 74 | ||
75 | spin_lock_bh(&xprt->transport_lock); | 75 | spin_lock(&xprt->transport_lock); |
76 | xprt->cwnd = credits << RPC_CWNDSHIFT; | 76 | xprt->cwnd = credits << RPC_CWNDSHIFT; |
77 | spin_unlock_bh(&xprt->transport_lock); | 77 | spin_unlock(&xprt->transport_lock); |
78 | 78 | ||
79 | spin_lock(&xprt->queue_lock); | 79 | spin_lock(&xprt->queue_lock); |
80 | ret = 0; | 80 | ret = 0; |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 0004535c0188..3fe665152d95 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -226,9 +226,9 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id, | |||
226 | * Enqueue the new transport on the accept queue of the listening | 226 | * Enqueue the new transport on the accept queue of the listening |
227 | * transport | 227 | * transport |
228 | */ | 228 | */ |
229 | spin_lock_bh(&listen_xprt->sc_lock); | 229 | spin_lock(&listen_xprt->sc_lock); |
230 | list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q); | 230 | list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q); |
231 | spin_unlock_bh(&listen_xprt->sc_lock); | 231 | spin_unlock(&listen_xprt->sc_lock); |
232 | 232 | ||
233 | set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags); | 233 | set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags); |
234 | svc_xprt_enqueue(&listen_xprt->sc_xprt); | 234 | svc_xprt_enqueue(&listen_xprt->sc_xprt); |
@@ -401,7 +401,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
401 | listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); | 401 | listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); |
402 | clear_bit(XPT_CONN, &xprt->xpt_flags); | 402 | clear_bit(XPT_CONN, &xprt->xpt_flags); |
403 | /* Get the next entry off the accept list */ | 403 | /* Get the next entry off the accept list */ |
404 | spin_lock_bh(&listen_rdma->sc_lock); | 404 | spin_lock(&listen_rdma->sc_lock); |
405 | if (!list_empty(&listen_rdma->sc_accept_q)) { | 405 | if (!list_empty(&listen_rdma->sc_accept_q)) { |
406 | newxprt = list_entry(listen_rdma->sc_accept_q.next, | 406 | newxprt = list_entry(listen_rdma->sc_accept_q.next, |
407 | struct svcxprt_rdma, sc_accept_q); | 407 | struct svcxprt_rdma, sc_accept_q); |
@@ -409,7 +409,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
409 | } | 409 | } |
410 | if (!list_empty(&listen_rdma->sc_accept_q)) | 410 | if (!list_empty(&listen_rdma->sc_accept_q)) |
411 | set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags); | 411 | set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags); |
412 | spin_unlock_bh(&listen_rdma->sc_lock); | 412 | spin_unlock(&listen_rdma->sc_lock); |
413 | if (!newxprt) | 413 | if (!newxprt) |
414 | return NULL; | 414 | return NULL; |
415 | 415 | ||
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 36652352a38c..97c15d47f343 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -880,7 +880,7 @@ static int xs_nospace(struct rpc_rqst *req) | |||
880 | req->rq_slen); | 880 | req->rq_slen); |
881 | 881 | ||
882 | /* Protect against races with write_space */ | 882 | /* Protect against races with write_space */ |
883 | spin_lock_bh(&xprt->transport_lock); | 883 | spin_lock(&xprt->transport_lock); |
884 | 884 | ||
885 | /* Don't race with disconnect */ | 885 | /* Don't race with disconnect */ |
886 | if (xprt_connected(xprt)) { | 886 | if (xprt_connected(xprt)) { |
@@ -890,7 +890,7 @@ static int xs_nospace(struct rpc_rqst *req) | |||
890 | } else | 890 | } else |
891 | ret = -ENOTCONN; | 891 | ret = -ENOTCONN; |
892 | 892 | ||
893 | spin_unlock_bh(&xprt->transport_lock); | 893 | spin_unlock(&xprt->transport_lock); |
894 | 894 | ||
895 | /* Race breaker in case memory is freed before above code is called */ | 895 | /* Race breaker in case memory is freed before above code is called */ |
896 | if (ret == -EAGAIN) { | 896 | if (ret == -EAGAIN) { |
@@ -1211,6 +1211,15 @@ static void xs_sock_reset_state_flags(struct rpc_xprt *xprt) | |||
1211 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 1211 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
1212 | 1212 | ||
1213 | clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); | 1213 | clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); |
1214 | clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state); | ||
1215 | clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state); | ||
1216 | clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state); | ||
1217 | } | ||
1218 | |||
1219 | static void xs_run_error_worker(struct sock_xprt *transport, unsigned int nr) | ||
1220 | { | ||
1221 | set_bit(nr, &transport->sock_state); | ||
1222 | queue_work(xprtiod_workqueue, &transport->error_worker); | ||
1214 | } | 1223 | } |
1215 | 1224 | ||
1216 | static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) | 1225 | static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) |
@@ -1231,6 +1240,7 @@ static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) | |||
1231 | */ | 1240 | */ |
1232 | static void xs_error_report(struct sock *sk) | 1241 | static void xs_error_report(struct sock *sk) |
1233 | { | 1242 | { |
1243 | struct sock_xprt *transport; | ||
1234 | struct rpc_xprt *xprt; | 1244 | struct rpc_xprt *xprt; |
1235 | int err; | 1245 | int err; |
1236 | 1246 | ||
@@ -1238,13 +1248,14 @@ static void xs_error_report(struct sock *sk) | |||
1238 | if (!(xprt = xprt_from_sock(sk))) | 1248 | if (!(xprt = xprt_from_sock(sk))) |
1239 | goto out; | 1249 | goto out; |
1240 | 1250 | ||
1251 | transport = container_of(xprt, struct sock_xprt, xprt); | ||
1241 | err = -sk->sk_err; | 1252 | err = -sk->sk_err; |
1242 | if (err == 0) | 1253 | if (err == 0) |
1243 | goto out; | 1254 | goto out; |
1244 | dprintk("RPC: xs_error_report client %p, error=%d...\n", | 1255 | dprintk("RPC: xs_error_report client %p, error=%d...\n", |
1245 | xprt, -err); | 1256 | xprt, -err); |
1246 | trace_rpc_socket_error(xprt, sk->sk_socket, err); | 1257 | trace_rpc_socket_error(xprt, sk->sk_socket, err); |
1247 | xprt_wake_pending_tasks(xprt, err); | 1258 | xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR); |
1248 | out: | 1259 | out: |
1249 | read_unlock_bh(&sk->sk_callback_lock); | 1260 | read_unlock_bh(&sk->sk_callback_lock); |
1250 | } | 1261 | } |
@@ -1333,6 +1344,7 @@ static void xs_destroy(struct rpc_xprt *xprt) | |||
1333 | cancel_delayed_work_sync(&transport->connect_worker); | 1344 | cancel_delayed_work_sync(&transport->connect_worker); |
1334 | xs_close(xprt); | 1345 | xs_close(xprt); |
1335 | cancel_work_sync(&transport->recv_worker); | 1346 | cancel_work_sync(&transport->recv_worker); |
1347 | cancel_work_sync(&transport->error_worker); | ||
1336 | xs_xprt_free(xprt); | 1348 | xs_xprt_free(xprt); |
1337 | module_put(THIS_MODULE); | 1349 | module_put(THIS_MODULE); |
1338 | } | 1350 | } |
@@ -1386,9 +1398,9 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt, | |||
1386 | } | 1398 | } |
1387 | 1399 | ||
1388 | 1400 | ||
1389 | spin_lock_bh(&xprt->transport_lock); | 1401 | spin_lock(&xprt->transport_lock); |
1390 | xprt_adjust_cwnd(xprt, task, copied); | 1402 | xprt_adjust_cwnd(xprt, task, copied); |
1391 | spin_unlock_bh(&xprt->transport_lock); | 1403 | spin_unlock(&xprt->transport_lock); |
1392 | spin_lock(&xprt->queue_lock); | 1404 | spin_lock(&xprt->queue_lock); |
1393 | xprt_complete_rqst(task, copied); | 1405 | xprt_complete_rqst(task, copied); |
1394 | __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS); | 1406 | __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS); |
@@ -1498,7 +1510,6 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1498 | trace_rpc_socket_state_change(xprt, sk->sk_socket); | 1510 | trace_rpc_socket_state_change(xprt, sk->sk_socket); |
1499 | switch (sk->sk_state) { | 1511 | switch (sk->sk_state) { |
1500 | case TCP_ESTABLISHED: | 1512 | case TCP_ESTABLISHED: |
1501 | spin_lock(&xprt->transport_lock); | ||
1502 | if (!xprt_test_and_set_connected(xprt)) { | 1513 | if (!xprt_test_and_set_connected(xprt)) { |
1503 | xprt->connect_cookie++; | 1514 | xprt->connect_cookie++; |
1504 | clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); | 1515 | clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); |
@@ -1507,9 +1518,8 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1507 | xprt->stat.connect_count++; | 1518 | xprt->stat.connect_count++; |
1508 | xprt->stat.connect_time += (long)jiffies - | 1519 | xprt->stat.connect_time += (long)jiffies - |
1509 | xprt->stat.connect_start; | 1520 | xprt->stat.connect_start; |
1510 | xprt_wake_pending_tasks(xprt, -EAGAIN); | 1521 | xs_run_error_worker(transport, XPRT_SOCK_WAKE_PENDING); |
1511 | } | 1522 | } |
1512 | spin_unlock(&xprt->transport_lock); | ||
1513 | break; | 1523 | break; |
1514 | case TCP_FIN_WAIT1: | 1524 | case TCP_FIN_WAIT1: |
1515 | /* The client initiated a shutdown of the socket */ | 1525 | /* The client initiated a shutdown of the socket */ |
@@ -1525,7 +1535,7 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1525 | /* The server initiated a shutdown of the socket */ | 1535 | /* The server initiated a shutdown of the socket */ |
1526 | xprt->connect_cookie++; | 1536 | xprt->connect_cookie++; |
1527 | clear_bit(XPRT_CONNECTED, &xprt->state); | 1537 | clear_bit(XPRT_CONNECTED, &xprt->state); |
1528 | xs_tcp_force_close(xprt); | 1538 | xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT); |
1529 | /* fall through */ | 1539 | /* fall through */ |
1530 | case TCP_CLOSING: | 1540 | case TCP_CLOSING: |
1531 | /* | 1541 | /* |
@@ -1547,7 +1557,7 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1547 | xprt_clear_connecting(xprt); | 1557 | xprt_clear_connecting(xprt); |
1548 | clear_bit(XPRT_CLOSING, &xprt->state); | 1558 | clear_bit(XPRT_CLOSING, &xprt->state); |
1549 | /* Trigger the socket release */ | 1559 | /* Trigger the socket release */ |
1550 | xs_tcp_force_close(xprt); | 1560 | xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT); |
1551 | } | 1561 | } |
1552 | out: | 1562 | out: |
1553 | read_unlock_bh(&sk->sk_callback_lock); | 1563 | read_unlock_bh(&sk->sk_callback_lock); |
@@ -1556,6 +1566,7 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1556 | static void xs_write_space(struct sock *sk) | 1566 | static void xs_write_space(struct sock *sk) |
1557 | { | 1567 | { |
1558 | struct socket_wq *wq; | 1568 | struct socket_wq *wq; |
1569 | struct sock_xprt *transport; | ||
1559 | struct rpc_xprt *xprt; | 1570 | struct rpc_xprt *xprt; |
1560 | 1571 | ||
1561 | if (!sk->sk_socket) | 1572 | if (!sk->sk_socket) |
@@ -1564,13 +1575,14 @@ static void xs_write_space(struct sock *sk) | |||
1564 | 1575 | ||
1565 | if (unlikely(!(xprt = xprt_from_sock(sk)))) | 1576 | if (unlikely(!(xprt = xprt_from_sock(sk)))) |
1566 | return; | 1577 | return; |
1578 | transport = container_of(xprt, struct sock_xprt, xprt); | ||
1567 | rcu_read_lock(); | 1579 | rcu_read_lock(); |
1568 | wq = rcu_dereference(sk->sk_wq); | 1580 | wq = rcu_dereference(sk->sk_wq); |
1569 | if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0) | 1581 | if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0) |
1570 | goto out; | 1582 | goto out; |
1571 | 1583 | ||
1572 | if (xprt_write_space(xprt)) | 1584 | xs_run_error_worker(transport, XPRT_SOCK_WAKE_WRITE); |
1573 | sk->sk_write_pending--; | 1585 | sk->sk_write_pending--; |
1574 | out: | 1586 | out: |
1575 | rcu_read_unlock(); | 1587 | rcu_read_unlock(); |
1576 | } | 1588 | } |
@@ -1664,9 +1676,9 @@ static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t | |||
1664 | */ | 1676 | */ |
1665 | static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task) | 1677 | static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task) |
1666 | { | 1678 | { |
1667 | spin_lock_bh(&xprt->transport_lock); | 1679 | spin_lock(&xprt->transport_lock); |
1668 | xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); | 1680 | xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); |
1669 | spin_unlock_bh(&xprt->transport_lock); | 1681 | spin_unlock(&xprt->transport_lock); |
1670 | } | 1682 | } |
1671 | 1683 | ||
1672 | static int xs_get_random_port(void) | 1684 | static int xs_get_random_port(void) |
@@ -2201,13 +2213,13 @@ static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt, | |||
2201 | unsigned int opt_on = 1; | 2213 | unsigned int opt_on = 1; |
2202 | unsigned int timeo; | 2214 | unsigned int timeo; |
2203 | 2215 | ||
2204 | spin_lock_bh(&xprt->transport_lock); | 2216 | spin_lock(&xprt->transport_lock); |
2205 | keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ); | 2217 | keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ); |
2206 | keepcnt = xprt->timeout->to_retries + 1; | 2218 | keepcnt = xprt->timeout->to_retries + 1; |
2207 | timeo = jiffies_to_msecs(xprt->timeout->to_initval) * | 2219 | timeo = jiffies_to_msecs(xprt->timeout->to_initval) * |
2208 | (xprt->timeout->to_retries + 1); | 2220 | (xprt->timeout->to_retries + 1); |
2209 | clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); | 2221 | clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); |
2210 | spin_unlock_bh(&xprt->transport_lock); | 2222 | spin_unlock(&xprt->transport_lock); |
2211 | 2223 | ||
2212 | /* TCP Keepalive options */ | 2224 | /* TCP Keepalive options */ |
2213 | kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, | 2225 | kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, |
@@ -2232,7 +2244,7 @@ static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt, | |||
2232 | struct rpc_timeout to; | 2244 | struct rpc_timeout to; |
2233 | unsigned long initval; | 2245 | unsigned long initval; |
2234 | 2246 | ||
2235 | spin_lock_bh(&xprt->transport_lock); | 2247 | spin_lock(&xprt->transport_lock); |
2236 | if (reconnect_timeout < xprt->max_reconnect_timeout) | 2248 | if (reconnect_timeout < xprt->max_reconnect_timeout) |
2237 | xprt->max_reconnect_timeout = reconnect_timeout; | 2249 | xprt->max_reconnect_timeout = reconnect_timeout; |
2238 | if (connect_timeout < xprt->connect_timeout) { | 2250 | if (connect_timeout < xprt->connect_timeout) { |
@@ -2249,7 +2261,7 @@ static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt, | |||
2249 | xprt->connect_timeout = connect_timeout; | 2261 | xprt->connect_timeout = connect_timeout; |
2250 | } | 2262 | } |
2251 | set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); | 2263 | set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); |
2252 | spin_unlock_bh(&xprt->transport_lock); | 2264 | spin_unlock(&xprt->transport_lock); |
2253 | } | 2265 | } |
2254 | 2266 | ||
2255 | static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | 2267 | static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) |
@@ -2461,6 +2473,56 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) | |||
2461 | delay); | 2473 | delay); |
2462 | } | 2474 | } |
2463 | 2475 | ||
2476 | static void xs_wake_disconnect(struct sock_xprt *transport) | ||
2477 | { | ||
2478 | if (test_and_clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state)) | ||
2479 | xs_tcp_force_close(&transport->xprt); | ||
2480 | } | ||
2481 | |||
2482 | static void xs_wake_write(struct sock_xprt *transport) | ||
2483 | { | ||
2484 | if (test_and_clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state)) | ||
2485 | xprt_write_space(&transport->xprt); | ||
2486 | } | ||
2487 | |||
2488 | static void xs_wake_error(struct sock_xprt *transport) | ||
2489 | { | ||
2490 | int sockerr; | ||
2491 | int sockerr_len = sizeof(sockerr); | ||
2492 | |||
2493 | if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state)) | ||
2494 | return; | ||
2495 | mutex_lock(&transport->recv_mutex); | ||
2496 | if (transport->sock == NULL) | ||
2497 | goto out; | ||
2498 | if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state)) | ||
2499 | goto out; | ||
2500 | if (kernel_getsockopt(transport->sock, SOL_SOCKET, SO_ERROR, | ||
2501 | (char *)&sockerr, &sockerr_len) != 0) | ||
2502 | goto out; | ||
2503 | if (sockerr < 0) | ||
2504 | xprt_wake_pending_tasks(&transport->xprt, sockerr); | ||
2505 | out: | ||
2506 | mutex_unlock(&transport->recv_mutex); | ||
2507 | } | ||
2508 | |||
2509 | static void xs_wake_pending(struct sock_xprt *transport) | ||
2510 | { | ||
2511 | if (test_and_clear_bit(XPRT_SOCK_WAKE_PENDING, &transport->sock_state)) | ||
2512 | xprt_wake_pending_tasks(&transport->xprt, -EAGAIN); | ||
2513 | } | ||
2514 | |||
2515 | static void xs_error_handle(struct work_struct *work) | ||
2516 | { | ||
2517 | struct sock_xprt *transport = container_of(work, | ||
2518 | struct sock_xprt, error_worker); | ||
2519 | |||
2520 | xs_wake_disconnect(transport); | ||
2521 | xs_wake_write(transport); | ||
2522 | xs_wake_error(transport); | ||
2523 | xs_wake_pending(transport); | ||
2524 | } | ||
2525 | |||
2464 | /** | 2526 | /** |
2465 | * xs_local_print_stats - display AF_LOCAL socket-specifc stats | 2527 | * xs_local_print_stats - display AF_LOCAL socket-specifc stats |
2466 | * @xprt: rpc_xprt struct containing statistics | 2528 | * @xprt: rpc_xprt struct containing statistics |
@@ -2873,6 +2935,7 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args) | |||
2873 | xprt->timeout = &xs_local_default_timeout; | 2935 | xprt->timeout = &xs_local_default_timeout; |
2874 | 2936 | ||
2875 | INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn); | 2937 | INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn); |
2938 | INIT_WORK(&transport->error_worker, xs_error_handle); | ||
2876 | INIT_DELAYED_WORK(&transport->connect_worker, xs_dummy_setup_socket); | 2939 | INIT_DELAYED_WORK(&transport->connect_worker, xs_dummy_setup_socket); |
2877 | 2940 | ||
2878 | switch (sun->sun_family) { | 2941 | switch (sun->sun_family) { |
@@ -2943,6 +3006,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) | |||
2943 | xprt->timeout = &xs_udp_default_timeout; | 3006 | xprt->timeout = &xs_udp_default_timeout; |
2944 | 3007 | ||
2945 | INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn); | 3008 | INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn); |
3009 | INIT_WORK(&transport->error_worker, xs_error_handle); | ||
2946 | INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket); | 3010 | INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket); |
2947 | 3011 | ||
2948 | switch (addr->sa_family) { | 3012 | switch (addr->sa_family) { |
@@ -3024,6 +3088,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
3024 | (xprt->timeout->to_retries + 1); | 3088 | (xprt->timeout->to_retries + 1); |
3025 | 3089 | ||
3026 | INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn); | 3090 | INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn); |
3091 | INIT_WORK(&transport->error_worker, xs_error_handle); | ||
3027 | INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket); | 3092 | INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket); |
3028 | 3093 | ||
3029 | switch (addr->sa_family) { | 3094 | switch (addr->sa_family) { |