diff options
-rw-r--r-- | fs/nfs/nfs4_fs.h | 1 | ||||
-rw-r--r-- | fs/nfs/nfs4proc.c | 13 | ||||
-rw-r--r-- | fs/nfs/nfs4state.c | 17 | ||||
-rw-r--r-- | include/linux/sunrpc/sched.h | 3 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 42 | ||||
-rw-r--r-- | net/sunrpc/xprt.c | 49 |
6 files changed, 79 insertions, 46 deletions
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index df3d02c3e8cb..c45c21a5470f 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h | |||
@@ -222,6 +222,7 @@ static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *ser | |||
222 | return server->nfs_client->cl_session; | 222 | return server->nfs_client->cl_session; |
223 | } | 223 | } |
224 | 224 | ||
225 | extern bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy); | ||
225 | extern int nfs4_setup_sequence(const struct nfs_server *server, | 226 | extern int nfs4_setup_sequence(const struct nfs_server *server, |
226 | struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, | 227 | struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, |
227 | struct rpc_task *task); | 228 | struct rpc_task *task); |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 360240cc1e9b..828a76590af9 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -385,17 +385,20 @@ nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid) | |||
385 | free_slotid, tbl->highest_used_slotid); | 385 | free_slotid, tbl->highest_used_slotid); |
386 | } | 386 | } |
387 | 387 | ||
388 | bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy) | ||
389 | { | ||
390 | rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); | ||
391 | return true; | ||
392 | } | ||
393 | |||
388 | /* | 394 | /* |
389 | * Signal state manager thread if session fore channel is drained | 395 | * Signal state manager thread if session fore channel is drained |
390 | */ | 396 | */ |
391 | static void nfs4_check_drain_fc_complete(struct nfs4_session *ses) | 397 | static void nfs4_check_drain_fc_complete(struct nfs4_session *ses) |
392 | { | 398 | { |
393 | struct rpc_task *task; | ||
394 | |||
395 | if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { | 399 | if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { |
396 | task = rpc_wake_up_next(&ses->fc_slot_table.slot_tbl_waitq); | 400 | rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq, |
397 | if (task) | 401 | nfs4_set_task_privileged, NULL); |
398 | rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); | ||
399 | return; | 402 | return; |
400 | } | 403 | } |
401 | 404 | ||
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index a42e60d3ee50..f0e9881c2aa2 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -190,23 +190,22 @@ static int nfs41_setup_state_renewal(struct nfs_client *clp) | |||
190 | static void nfs4_end_drain_session(struct nfs_client *clp) | 190 | static void nfs4_end_drain_session(struct nfs_client *clp) |
191 | { | 191 | { |
192 | struct nfs4_session *ses = clp->cl_session; | 192 | struct nfs4_session *ses = clp->cl_session; |
193 | struct nfs4_slot_table *tbl; | ||
193 | int max_slots; | 194 | int max_slots; |
194 | 195 | ||
195 | if (ses == NULL) | 196 | if (ses == NULL) |
196 | return; | 197 | return; |
198 | tbl = &ses->fc_slot_table; | ||
197 | if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { | 199 | if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { |
198 | spin_lock(&ses->fc_slot_table.slot_tbl_lock); | 200 | spin_lock(&tbl->slot_tbl_lock); |
199 | max_slots = ses->fc_slot_table.max_slots; | 201 | max_slots = tbl->max_slots; |
200 | while (max_slots--) { | 202 | while (max_slots--) { |
201 | struct rpc_task *task; | 203 | if (rpc_wake_up_first(&tbl->slot_tbl_waitq, |
202 | 204 | nfs4_set_task_privileged, | |
203 | task = rpc_wake_up_next(&ses->fc_slot_table. | 205 | NULL) == NULL) |
204 | slot_tbl_waitq); | ||
205 | if (!task) | ||
206 | break; | 206 | break; |
207 | rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); | ||
208 | } | 207 | } |
209 | spin_unlock(&ses->fc_slot_table.slot_tbl_lock); | 208 | spin_unlock(&tbl->slot_tbl_lock); |
210 | } | 209 | } |
211 | } | 210 | } |
212 | 211 | ||
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index b16243a35f0b..bd337f990a41 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
@@ -235,6 +235,9 @@ void rpc_wake_up_queued_task(struct rpc_wait_queue *, | |||
235 | struct rpc_task *); | 235 | struct rpc_task *); |
236 | void rpc_wake_up(struct rpc_wait_queue *); | 236 | void rpc_wake_up(struct rpc_wait_queue *); |
237 | struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *); | 237 | struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *); |
238 | struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *, | ||
239 | bool (*)(struct rpc_task *, void *), | ||
240 | void *); | ||
238 | void rpc_wake_up_status(struct rpc_wait_queue *, int); | 241 | void rpc_wake_up_status(struct rpc_wait_queue *, int); |
239 | int rpc_queue_empty(struct rpc_wait_queue *); | 242 | int rpc_queue_empty(struct rpc_wait_queue *); |
240 | void rpc_delay(struct rpc_task *, unsigned long); | 243 | void rpc_delay(struct rpc_task *, unsigned long); |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 3341d8962786..f982dfe53993 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -422,7 +422,7 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); | |||
422 | /* | 422 | /* |
423 | * Wake up the next task on a priority queue. | 423 | * Wake up the next task on a priority queue. |
424 | */ | 424 | */ |
425 | static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue) | 425 | static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue) |
426 | { | 426 | { |
427 | struct list_head *q; | 427 | struct list_head *q; |
428 | struct rpc_task *task; | 428 | struct rpc_task *task; |
@@ -467,30 +467,54 @@ new_queue: | |||
467 | new_owner: | 467 | new_owner: |
468 | rpc_set_waitqueue_owner(queue, task->tk_owner); | 468 | rpc_set_waitqueue_owner(queue, task->tk_owner); |
469 | out: | 469 | out: |
470 | rpc_wake_up_task_queue_locked(queue, task); | ||
471 | return task; | 470 | return task; |
472 | } | 471 | } |
473 | 472 | ||
473 | static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue) | ||
474 | { | ||
475 | if (RPC_IS_PRIORITY(queue)) | ||
476 | return __rpc_find_next_queued_priority(queue); | ||
477 | if (!list_empty(&queue->tasks[0])) | ||
478 | return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list); | ||
479 | return NULL; | ||
480 | } | ||
481 | |||
474 | /* | 482 | /* |
475 | * Wake up the next task on the wait queue. | 483 | * Wake up the first task on the wait queue. |
476 | */ | 484 | */ |
477 | struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) | 485 | struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue, |
486 | bool (*func)(struct rpc_task *, void *), void *data) | ||
478 | { | 487 | { |
479 | struct rpc_task *task = NULL; | 488 | struct rpc_task *task = NULL; |
480 | 489 | ||
481 | dprintk("RPC: wake_up_next(%p \"%s\")\n", | 490 | dprintk("RPC: wake_up_first(%p \"%s\")\n", |
482 | queue, rpc_qname(queue)); | 491 | queue, rpc_qname(queue)); |
483 | spin_lock_bh(&queue->lock); | 492 | spin_lock_bh(&queue->lock); |
484 | if (RPC_IS_PRIORITY(queue)) | 493 | task = __rpc_find_next_queued(queue); |
485 | task = __rpc_wake_up_next_priority(queue); | 494 | if (task != NULL) { |
486 | else { | 495 | if (func(task, data)) |
487 | task_for_first(task, &queue->tasks[0]) | ||
488 | rpc_wake_up_task_queue_locked(queue, task); | 496 | rpc_wake_up_task_queue_locked(queue, task); |
497 | else | ||
498 | task = NULL; | ||
489 | } | 499 | } |
490 | spin_unlock_bh(&queue->lock); | 500 | spin_unlock_bh(&queue->lock); |
491 | 501 | ||
492 | return task; | 502 | return task; |
493 | } | 503 | } |
504 | EXPORT_SYMBOL_GPL(rpc_wake_up_first); | ||
505 | |||
506 | static bool rpc_wake_up_next_func(struct rpc_task *task, void *data) | ||
507 | { | ||
508 | return true; | ||
509 | } | ||
510 | |||
511 | /* | ||
512 | * Wake up the next task on the wait queue. | ||
513 | */ | ||
514 | struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue) | ||
515 | { | ||
516 | return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL); | ||
517 | } | ||
494 | EXPORT_SYMBOL_GPL(rpc_wake_up_next); | 518 | EXPORT_SYMBOL_GPL(rpc_wake_up_next); |
495 | 519 | ||
496 | /** | 520 | /** |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index c64c0ef519b5..839f6ef2326b 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -292,54 +292,57 @@ static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) | |||
292 | return retval; | 292 | return retval; |
293 | } | 293 | } |
294 | 294 | ||
295 | static void __xprt_lock_write_next(struct rpc_xprt *xprt) | 295 | static bool __xprt_lock_write_func(struct rpc_task *task, void *data) |
296 | { | 296 | { |
297 | struct rpc_task *task; | 297 | struct rpc_xprt *xprt = data; |
298 | struct rpc_rqst *req; | 298 | struct rpc_rqst *req; |
299 | 299 | ||
300 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | ||
301 | return; | ||
302 | |||
303 | task = rpc_wake_up_next(&xprt->sending); | ||
304 | if (task == NULL) | ||
305 | goto out_unlock; | ||
306 | |||
307 | req = task->tk_rqstp; | 300 | req = task->tk_rqstp; |
308 | xprt->snd_task = task; | 301 | xprt->snd_task = task; |
309 | if (req) { | 302 | if (req) { |
310 | req->rq_bytes_sent = 0; | 303 | req->rq_bytes_sent = 0; |
311 | req->rq_ntrans++; | 304 | req->rq_ntrans++; |
312 | } | 305 | } |
313 | return; | 306 | return true; |
307 | } | ||
314 | 308 | ||
315 | out_unlock: | 309 | static void __xprt_lock_write_next(struct rpc_xprt *xprt) |
310 | { | ||
311 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | ||
312 | return; | ||
313 | |||
314 | if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt)) | ||
315 | return; | ||
316 | xprt_clear_locked(xprt); | 316 | xprt_clear_locked(xprt); |
317 | } | 317 | } |
318 | 318 | ||
319 | static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) | 319 | static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data) |
320 | { | 320 | { |
321 | struct rpc_task *task; | 321 | struct rpc_xprt *xprt = data; |
322 | struct rpc_rqst *req; | 322 | struct rpc_rqst *req; |
323 | 323 | ||
324 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | ||
325 | return; | ||
326 | if (RPCXPRT_CONGESTED(xprt)) | ||
327 | goto out_unlock; | ||
328 | task = rpc_wake_up_next(&xprt->sending); | ||
329 | if (task == NULL) | ||
330 | goto out_unlock; | ||
331 | |||
332 | req = task->tk_rqstp; | 324 | req = task->tk_rqstp; |
333 | if (req == NULL) { | 325 | if (req == NULL) { |
334 | xprt->snd_task = task; | 326 | xprt->snd_task = task; |
335 | return; | 327 | return true; |
336 | } | 328 | } |
337 | if (__xprt_get_cong(xprt, task)) { | 329 | if (__xprt_get_cong(xprt, task)) { |
338 | xprt->snd_task = task; | 330 | xprt->snd_task = task; |
339 | req->rq_bytes_sent = 0; | 331 | req->rq_bytes_sent = 0; |
340 | req->rq_ntrans++; | 332 | req->rq_ntrans++; |
341 | return; | 333 | return true; |
342 | } | 334 | } |
335 | return false; | ||
336 | } | ||
337 | |||
338 | static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) | ||
339 | { | ||
340 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | ||
341 | return; | ||
342 | if (RPCXPRT_CONGESTED(xprt)) | ||
343 | goto out_unlock; | ||
344 | if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt)) | ||
345 | return; | ||
343 | out_unlock: | 346 | out_unlock: |
344 | xprt_clear_locked(xprt); | 347 | xprt_clear_locked(xprt); |
345 | } | 348 | } |