diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2011-07-17 18:11:34 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2011-07-17 18:11:34 -0400 |
commit | 3b27bad7f7ceacca6d6c0ef647ffb38aa55a8336 (patch) | |
tree | ccf8bcd5f7276655c33133685a1be3f7281d8278 | |
parent | d9ba131d8f58c0d2ff5029e7002ab43f913b36f9 (diff) |
SUNRPC: Allow caller of rpc_sleep_on() to select priority levels
Currently, the caller has to change the value of task->tk_priority if
it wants to select on which priority level the task will sleep.
This patch allows the caller to select a priority level at sleep time
rather than always using task->tk_priority.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
-rw-r--r-- | include/linux/sunrpc/sched.h | 4 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 38 |
2 files changed, 33 insertions, 9 deletions
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index fe2d8e6b923b..e7756896f3ca 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
@@ -227,6 +227,10 @@ void rpc_init_wait_queue(struct rpc_wait_queue *, const char *); | |||
227 | void rpc_destroy_wait_queue(struct rpc_wait_queue *); | 227 | void rpc_destroy_wait_queue(struct rpc_wait_queue *); |
228 | void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *, | 228 | void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *, |
229 | rpc_action action); | 229 | rpc_action action); |
230 | void rpc_sleep_on_priority(struct rpc_wait_queue *, | ||
231 | struct rpc_task *, | ||
232 | rpc_action action, | ||
233 | int priority); | ||
230 | void rpc_wake_up_queued_task(struct rpc_wait_queue *, | 234 | void rpc_wake_up_queued_task(struct rpc_wait_queue *, |
231 | struct rpc_task *); | 235 | struct rpc_task *); |
232 | void rpc_wake_up(struct rpc_wait_queue *); | 236 | void rpc_wake_up(struct rpc_wait_queue *); |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 4814e246a874..d12ffa545811 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -97,14 +97,16 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) | |||
97 | /* | 97 | /* |
98 | * Add new request to a priority queue. | 98 | * Add new request to a priority queue. |
99 | */ | 99 | */ |
100 | static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task) | 100 | static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, |
101 | struct rpc_task *task, | ||
102 | unsigned char queue_priority) | ||
101 | { | 103 | { |
102 | struct list_head *q; | 104 | struct list_head *q; |
103 | struct rpc_task *t; | 105 | struct rpc_task *t; |
104 | 106 | ||
105 | INIT_LIST_HEAD(&task->u.tk_wait.links); | 107 | INIT_LIST_HEAD(&task->u.tk_wait.links); |
106 | q = &queue->tasks[task->tk_priority]; | 108 | q = &queue->tasks[queue_priority]; |
107 | if (unlikely(task->tk_priority > queue->maxpriority)) | 109 | if (unlikely(queue_priority > queue->maxpriority)) |
108 | q = &queue->tasks[queue->maxpriority]; | 110 | q = &queue->tasks[queue->maxpriority]; |
109 | list_for_each_entry(t, q, u.tk_wait.list) { | 111 | list_for_each_entry(t, q, u.tk_wait.list) { |
110 | if (t->tk_owner == task->tk_owner) { | 112 | if (t->tk_owner == task->tk_owner) { |
@@ -123,12 +125,14 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct r | |||
123 | * improve overall performance. | 125 | * improve overall performance. |
124 | * Everyone else gets appended to the queue to ensure proper FIFO behavior. | 126 | * Everyone else gets appended to the queue to ensure proper FIFO behavior. |
125 | */ | 127 | */ |
126 | static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) | 128 | static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, |
129 | struct rpc_task *task, | ||
130 | unsigned char queue_priority) | ||
127 | { | 131 | { |
128 | BUG_ON (RPC_IS_QUEUED(task)); | 132 | BUG_ON (RPC_IS_QUEUED(task)); |
129 | 133 | ||
130 | if (RPC_IS_PRIORITY(queue)) | 134 | if (RPC_IS_PRIORITY(queue)) |
131 | __rpc_add_wait_queue_priority(queue, task); | 135 | __rpc_add_wait_queue_priority(queue, task, queue_priority); |
132 | else if (RPC_IS_SWAPPER(task)) | 136 | else if (RPC_IS_SWAPPER(task)) |
133 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); | 137 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); |
134 | else | 138 | else |
@@ -311,13 +315,15 @@ static void rpc_make_runnable(struct rpc_task *task) | |||
311 | * NB: An RPC task will only receive interrupt-driven events as long | 315 | * NB: An RPC task will only receive interrupt-driven events as long |
312 | * as it's on a wait queue. | 316 | * as it's on a wait queue. |
313 | */ | 317 | */ |
314 | static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | 318 | static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, |
315 | rpc_action action) | 319 | struct rpc_task *task, |
320 | rpc_action action, | ||
321 | unsigned char queue_priority) | ||
316 | { | 322 | { |
317 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", | 323 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", |
318 | task->tk_pid, rpc_qname(q), jiffies); | 324 | task->tk_pid, rpc_qname(q), jiffies); |
319 | 325 | ||
320 | __rpc_add_wait_queue(q, task); | 326 | __rpc_add_wait_queue(q, task, queue_priority); |
321 | 327 | ||
322 | BUG_ON(task->tk_callback != NULL); | 328 | BUG_ON(task->tk_callback != NULL); |
323 | task->tk_callback = action; | 329 | task->tk_callback = action; |
@@ -334,11 +340,25 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |||
334 | * Protect the queue operations. | 340 | * Protect the queue operations. |
335 | */ | 341 | */ |
336 | spin_lock_bh(&q->lock); | 342 | spin_lock_bh(&q->lock); |
337 | __rpc_sleep_on(q, task, action); | 343 | __rpc_sleep_on_priority(q, task, action, task->tk_priority); |
338 | spin_unlock_bh(&q->lock); | 344 | spin_unlock_bh(&q->lock); |
339 | } | 345 | } |
340 | EXPORT_SYMBOL_GPL(rpc_sleep_on); | 346 | EXPORT_SYMBOL_GPL(rpc_sleep_on); |
341 | 347 | ||
348 | void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, | ||
349 | rpc_action action, int priority) | ||
350 | { | ||
351 | /* We shouldn't ever put an inactive task to sleep */ | ||
352 | BUG_ON(!RPC_IS_ACTIVATED(task)); | ||
353 | |||
354 | /* | ||
355 | * Protect the queue operations. | ||
356 | */ | ||
357 | spin_lock_bh(&q->lock); | ||
358 | __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); | ||
359 | spin_unlock_bh(&q->lock); | ||
360 | } | ||
361 | |||
342 | /** | 362 | /** |
343 | * __rpc_do_wake_up_task - wake up a single rpc_task | 363 | * __rpc_do_wake_up_task - wake up a single rpc_task |
344 | * @queue: wait queue | 364 | * @queue: wait queue |