aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc/sched.c')
-rw-r--r--net/sunrpc/sched.c117
1 files changed, 83 insertions, 34 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 6357fcb00c7e..fb20f25ddec9 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -98,6 +98,39 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); 98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
99} 99}
100 100
101static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
102{
103 struct list_head *q = &queue->tasks[queue->priority];
104 struct rpc_task *task;
105
106 if (!list_empty(q)) {
107 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
108 if (task->tk_owner == queue->owner)
109 list_move_tail(&task->u.tk_wait.list, q);
110 }
111}
112
113static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
114{
115 if (queue->priority != priority) {
116 /* Fairness: rotate the list when changing priority */
117 rpc_rotate_queue_owner(queue);
118 queue->priority = priority;
119 }
120}
121
122static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
123{
124 queue->owner = pid;
125 queue->nr = RPC_BATCH_COUNT;
126}
127
128static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
129{
130 rpc_set_waitqueue_priority(queue, queue->maxpriority);
131 rpc_set_waitqueue_owner(queue, 0);
132}
133
101/* 134/*
102 * Add new request to a priority queue. 135 * Add new request to a priority queue.
103 */ 136 */
@@ -109,9 +142,11 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
109 struct rpc_task *t; 142 struct rpc_task *t;
110 143
111 INIT_LIST_HEAD(&task->u.tk_wait.links); 144 INIT_LIST_HEAD(&task->u.tk_wait.links);
112 q = &queue->tasks[queue_priority];
113 if (unlikely(queue_priority > queue->maxpriority)) 145 if (unlikely(queue_priority > queue->maxpriority))
114 q = &queue->tasks[queue->maxpriority]; 146 queue_priority = queue->maxpriority;
147 if (queue_priority > queue->priority)
148 rpc_set_waitqueue_priority(queue, queue_priority);
149 q = &queue->tasks[queue_priority];
115 list_for_each_entry(t, q, u.tk_wait.list) { 150 list_for_each_entry(t, q, u.tk_wait.list) {
116 if (t->tk_owner == task->tk_owner) { 151 if (t->tk_owner == task->tk_owner) {
117 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); 152 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
@@ -133,7 +168,9 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
133 struct rpc_task *task, 168 struct rpc_task *task,
134 unsigned char queue_priority) 169 unsigned char queue_priority)
135{ 170{
136 BUG_ON (RPC_IS_QUEUED(task)); 171 WARN_ON_ONCE(RPC_IS_QUEUED(task));
172 if (RPC_IS_QUEUED(task))
173 return;
137 174
138 if (RPC_IS_PRIORITY(queue)) 175 if (RPC_IS_PRIORITY(queue))
139 __rpc_add_wait_queue_priority(queue, task, queue_priority); 176 __rpc_add_wait_queue_priority(queue, task, queue_priority);
@@ -178,24 +215,6 @@ static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_tas
178 task->tk_pid, queue, rpc_qname(queue)); 215 task->tk_pid, queue, rpc_qname(queue));
179} 216}
180 217
181static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
182{
183 queue->priority = priority;
184 queue->count = 1 << (priority * 2);
185}
186
187static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
188{
189 queue->owner = pid;
190 queue->nr = RPC_BATCH_COUNT;
191}
192
193static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
194{
195 rpc_set_waitqueue_priority(queue, queue->maxpriority);
196 rpc_set_waitqueue_owner(queue, 0);
197}
198
199static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) 218static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
200{ 219{
201 int i; 220 int i;
@@ -334,7 +353,7 @@ static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
334 353
335 __rpc_add_wait_queue(q, task, queue_priority); 354 __rpc_add_wait_queue(q, task, queue_priority);
336 355
337 BUG_ON(task->tk_callback != NULL); 356 WARN_ON_ONCE(task->tk_callback != NULL);
338 task->tk_callback = action; 357 task->tk_callback = action;
339 __rpc_add_timer(q, task); 358 __rpc_add_timer(q, task);
340} 359}
@@ -343,7 +362,12 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
343 rpc_action action) 362 rpc_action action)
344{ 363{
345 /* We shouldn't ever put an inactive task to sleep */ 364 /* We shouldn't ever put an inactive task to sleep */
346 BUG_ON(!RPC_IS_ACTIVATED(task)); 365 WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
366 if (!RPC_IS_ACTIVATED(task)) {
367 task->tk_status = -EIO;
368 rpc_put_task_async(task);
369 return;
370 }
347 371
348 /* 372 /*
349 * Protect the queue operations. 373 * Protect the queue operations.
@@ -358,7 +382,12 @@ void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
358 rpc_action action, int priority) 382 rpc_action action, int priority)
359{ 383{
360 /* We shouldn't ever put an inactive task to sleep */ 384 /* We shouldn't ever put an inactive task to sleep */
361 BUG_ON(!RPC_IS_ACTIVATED(task)); 385 WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
386 if (!RPC_IS_ACTIVATED(task)) {
387 task->tk_status = -EIO;
388 rpc_put_task_async(task);
389 return;
390 }
362 391
363 /* 392 /*
364 * Protect the queue operations. 393 * Protect the queue operations.
@@ -367,6 +396,7 @@ void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
367 __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); 396 __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
368 spin_unlock_bh(&q->lock); 397 spin_unlock_bh(&q->lock);
369} 398}
399EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
370 400
371/** 401/**
372 * __rpc_do_wake_up_task - wake up a single rpc_task 402 * __rpc_do_wake_up_task - wake up a single rpc_task
@@ -451,8 +481,7 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
451 /* 481 /*
452 * Check if we need to switch queues. 482 * Check if we need to switch queues.
453 */ 483 */
454 if (--queue->count) 484 goto new_owner;
455 goto new_owner;
456 } 485 }
457 486
458 /* 487 /*
@@ -697,7 +726,9 @@ static void __rpc_execute(struct rpc_task *task)
697 dprintk("RPC: %5u __rpc_execute flags=0x%x\n", 726 dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
698 task->tk_pid, task->tk_flags); 727 task->tk_pid, task->tk_flags);
699 728
700 BUG_ON(RPC_IS_QUEUED(task)); 729 WARN_ON_ONCE(RPC_IS_QUEUED(task));
730 if (RPC_IS_QUEUED(task))
731 return;
701 732
702 for (;;) { 733 for (;;) {
703 void (*do_action)(struct rpc_task *); 734 void (*do_action)(struct rpc_task *);
@@ -919,16 +950,35 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
919 return task; 950 return task;
920} 951}
921 952
953/*
954 * rpc_free_task - release rpc task and perform cleanups
955 *
956 * Note that we free up the rpc_task _after_ rpc_release_calldata()
957 * in order to work around a workqueue dependency issue.
958 *
959 * Tejun Heo states:
960 * "Workqueue currently considers two work items to be the same if they're
961 * on the same address and won't execute them concurrently - ie. it
962 * makes a work item which is queued again while being executed wait
963 * for the previous execution to complete.
964 *
965 * If a work function frees the work item, and then waits for an event
966 * which should be performed by another work item and *that* work item
967 * recycles the freed work item, it can create a false dependency loop.
968 * There really is no reliable way to detect this short of verifying
969 * every memory free."
970 *
971 */
922static void rpc_free_task(struct rpc_task *task) 972static void rpc_free_task(struct rpc_task *task)
923{ 973{
924 const struct rpc_call_ops *tk_ops = task->tk_ops; 974 unsigned short tk_flags = task->tk_flags;
925 void *calldata = task->tk_calldata; 975
976 rpc_release_calldata(task->tk_ops, task->tk_calldata);
926 977
927 if (task->tk_flags & RPC_TASK_DYNAMIC) { 978 if (tk_flags & RPC_TASK_DYNAMIC) {
928 dprintk("RPC: %5u freeing task\n", task->tk_pid); 979 dprintk("RPC: %5u freeing task\n", task->tk_pid);
929 mempool_free(task, rpc_task_mempool); 980 mempool_free(task, rpc_task_mempool);
930 } 981 }
931 rpc_release_calldata(tk_ops, calldata);
932} 982}
933 983
934static void rpc_async_release(struct work_struct *work) 984static void rpc_async_release(struct work_struct *work)
@@ -938,8 +988,7 @@ static void rpc_async_release(struct work_struct *work)
938 988
939static void rpc_release_resources_task(struct rpc_task *task) 989static void rpc_release_resources_task(struct rpc_task *task)
940{ 990{
941 if (task->tk_rqstp) 991 xprt_release(task);
942 xprt_release(task);
943 if (task->tk_msg.rpc_cred) { 992 if (task->tk_msg.rpc_cred) {
944 put_rpccred(task->tk_msg.rpc_cred); 993 put_rpccred(task->tk_msg.rpc_cred);
945 task->tk_msg.rpc_cred = NULL; 994 task->tk_msg.rpc_cred = NULL;
@@ -981,7 +1030,7 @@ static void rpc_release_task(struct rpc_task *task)
981{ 1030{
982 dprintk("RPC: %5u release task\n", task->tk_pid); 1031 dprintk("RPC: %5u release task\n", task->tk_pid);
983 1032
984 BUG_ON (RPC_IS_QUEUED(task)); 1033 WARN_ON_ONCE(RPC_IS_QUEUED(task));
985 1034
986 rpc_release_resources_task(task); 1035 rpc_release_resources_task(task);
987 1036