diff options
author | Trond Myklebust <trond.myklebust@hammerspace.com> | 2019-05-01 10:49:27 -0400 |
---|---|---|
committer | Trond Myklebust <trond.myklebust@hammerspace.com> | 2019-07-06 14:54:48 -0400 |
commit | 7e0a0e38fcfea47e74b0ff6da6266f00bcd2af43 (patch) | |
tree | 7128a333d8563e59c542b7a7725c7bf0928080b8 /net/sunrpc | |
parent | 44942b4e457beda00981f616402a1a791e8c616e (diff) |
SUNRPC: Replace the queue timer with a delayed work function
The queue timer function, which walks the RPC queue in order to locate
candidates for waking up is one of the current constraints against
removing the bh-safe queue spin locks. Replace it with a delayed
work queue, so that we can do the actual rpc task wake ups from an
ordinary process context.
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/sched.c | 30 |
1 files changed, 20 insertions, 10 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index a2c114812717..e0a0cf381eba 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -46,7 +46,7 @@ static mempool_t *rpc_buffer_mempool __read_mostly; | |||
46 | 46 | ||
47 | static void rpc_async_schedule(struct work_struct *); | 47 | static void rpc_async_schedule(struct work_struct *); |
48 | static void rpc_release_task(struct rpc_task *task); | 48 | static void rpc_release_task(struct rpc_task *task); |
49 | static void __rpc_queue_timer_fn(struct timer_list *t); | 49 | static void __rpc_queue_timer_fn(struct work_struct *); |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * RPC tasks sit here while waiting for conditions to improve. | 52 | * RPC tasks sit here while waiting for conditions to improve. |
@@ -87,13 +87,19 @@ __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) | |||
87 | task->tk_timeout = 0; | 87 | task->tk_timeout = 0; |
88 | list_del(&task->u.tk_wait.timer_list); | 88 | list_del(&task->u.tk_wait.timer_list); |
89 | if (list_empty(&queue->timer_list.list)) | 89 | if (list_empty(&queue->timer_list.list)) |
90 | del_timer(&queue->timer_list.timer); | 90 | cancel_delayed_work(&queue->timer_list.dwork); |
91 | } | 91 | } |
92 | 92 | ||
93 | static void | 93 | static void |
94 | rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) | 94 | rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) |
95 | { | 95 | { |
96 | timer_reduce(&queue->timer_list.timer, expires); | 96 | unsigned long now = jiffies; |
97 | queue->timer_list.expires = expires; | ||
98 | if (time_before_eq(expires, now)) | ||
99 | expires = 0; | ||
100 | else | ||
101 | expires -= now; | ||
102 | mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); | ||
97 | } | 103 | } |
98 | 104 | ||
99 | /* | 105 | /* |
@@ -107,7 +113,8 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, | |||
107 | task->tk_pid, jiffies_to_msecs(timeout - jiffies)); | 113 | task->tk_pid, jiffies_to_msecs(timeout - jiffies)); |
108 | 114 | ||
109 | task->tk_timeout = timeout; | 115 | task->tk_timeout = timeout; |
110 | rpc_set_queue_timer(queue, timeout); | 116 | if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) |
117 | rpc_set_queue_timer(queue, timeout); | ||
111 | list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); | 118 | list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); |
112 | } | 119 | } |
113 | 120 | ||
@@ -250,7 +257,8 @@ static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const c | |||
250 | queue->maxpriority = nr_queues - 1; | 257 | queue->maxpriority = nr_queues - 1; |
251 | rpc_reset_waitqueue_priority(queue); | 258 | rpc_reset_waitqueue_priority(queue); |
252 | queue->qlen = 0; | 259 | queue->qlen = 0; |
253 | timer_setup(&queue->timer_list.timer, __rpc_queue_timer_fn, 0); | 260 | queue->timer_list.expires = 0; |
261 | INIT_DEFERRABLE_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn); | ||
254 | INIT_LIST_HEAD(&queue->timer_list.list); | 262 | INIT_LIST_HEAD(&queue->timer_list.list); |
255 | rpc_assign_waitqueue_name(queue, qname); | 263 | rpc_assign_waitqueue_name(queue, qname); |
256 | } | 264 | } |
@@ -269,7 +277,7 @@ EXPORT_SYMBOL_GPL(rpc_init_wait_queue); | |||
269 | 277 | ||
270 | void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) | 278 | void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) |
271 | { | 279 | { |
272 | del_timer_sync(&queue->timer_list.timer); | 280 | cancel_delayed_work_sync(&queue->timer_list.dwork); |
273 | } | 281 | } |
274 | EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); | 282 | EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); |
275 | 283 | ||
@@ -759,13 +767,15 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | |||
759 | } | 767 | } |
760 | EXPORT_SYMBOL_GPL(rpc_wake_up_status); | 768 | EXPORT_SYMBOL_GPL(rpc_wake_up_status); |
761 | 769 | ||
762 | static void __rpc_queue_timer_fn(struct timer_list *t) | 770 | static void __rpc_queue_timer_fn(struct work_struct *work) |
763 | { | 771 | { |
764 | struct rpc_wait_queue *queue = from_timer(queue, t, timer_list.timer); | 772 | struct rpc_wait_queue *queue = container_of(work, |
773 | struct rpc_wait_queue, | ||
774 | timer_list.dwork.work); | ||
765 | struct rpc_task *task, *n; | 775 | struct rpc_task *task, *n; |
766 | unsigned long expires, now, timeo; | 776 | unsigned long expires, now, timeo; |
767 | 777 | ||
768 | spin_lock(&queue->lock); | 778 | spin_lock_bh(&queue->lock); |
769 | expires = now = jiffies; | 779 | expires = now = jiffies; |
770 | list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { | 780 | list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { |
771 | timeo = task->tk_timeout; | 781 | timeo = task->tk_timeout; |
@@ -780,7 +790,7 @@ static void __rpc_queue_timer_fn(struct timer_list *t) | |||
780 | } | 790 | } |
781 | if (!list_empty(&queue->timer_list.list)) | 791 | if (!list_empty(&queue->timer_list.list)) |
782 | rpc_set_queue_timer(queue, expires); | 792 | rpc_set_queue_timer(queue, expires); |
783 | spin_unlock(&queue->lock); | 793 | spin_unlock_bh(&queue->lock); |
784 | } | 794 | } |
785 | 795 | ||
786 | static void __rpc_atrun(struct rpc_task *task) | 796 | static void __rpc_atrun(struct rpc_task *task) |