aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2005-06-22 13:16:28 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2005-06-22 16:07:30 -0400
commit14b218a8e4f110206c46e586a3da372f665631e7 (patch)
tree4a021ff7df8e4e6e4abdb4961575226db7d5452f
parent455a396710b71a743b28da2ed2185e5a9b38e26f (diff)
[PATCH] RPC: Ensure rpc calls respects the RPC_NOINTR flag
For internal purposes, the rpc_clnt_sigmask() call is replaced by a call to rpc_task_sigmask(), which ensures that the current task sigmask respects both the client cl_intr flag and the per-task NOINTR flag. Problem noted by Jiaying Zhang. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
-rw-r--r--net/sunrpc/clnt.c71
1 files changed, 37 insertions, 34 deletions
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index c979fcf88798..f17e6153b688 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -378,38 +378,41 @@ rpc_default_callback(struct rpc_task *task)
378} 378}
379 379
380/* 380/*
381 * Export the signal mask handling for aysnchronous code that 381 * Export the signal mask handling for synchronous code that
382 * sleeps on RPC calls 382 * sleeps on RPC calls
383 */ 383 */
384#define RPC_INTR_SIGNALS (sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGKILL))
384 385
386static void rpc_save_sigmask(sigset_t *oldset, int intr)
387{
388 unsigned long sigallow = 0;
389 sigset_t sigmask;
390
391 /* Block all signals except those listed in sigallow */
392 if (intr)
393 sigallow |= RPC_INTR_SIGNALS;
394 siginitsetinv(&sigmask, sigallow);
395 sigprocmask(SIG_BLOCK, &sigmask, oldset);
396}
397
398static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset)
399{
400 rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task));
401}
402
403static inline void rpc_restore_sigmask(sigset_t *oldset)
404{
405 sigprocmask(SIG_SETMASK, oldset, NULL);
406}
407
385void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset) 408void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
386{ 409{
387 unsigned long sigallow = sigmask(SIGKILL); 410 rpc_save_sigmask(oldset, clnt->cl_intr);
388 unsigned long irqflags;
389
390 /* Turn off various signals */
391 if (clnt->cl_intr) {
392 struct k_sigaction *action = current->sighand->action;
393 if (action[SIGINT-1].sa.sa_handler == SIG_DFL)
394 sigallow |= sigmask(SIGINT);
395 if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL)
396 sigallow |= sigmask(SIGQUIT);
397 }
398 spin_lock_irqsave(&current->sighand->siglock, irqflags);
399 *oldset = current->blocked;
400 siginitsetinv(&current->blocked, sigallow & ~oldset->sig[0]);
401 recalc_sigpending();
402 spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
403} 411}
404 412
405void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset) 413void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
406{ 414{
407 unsigned long irqflags; 415 rpc_restore_sigmask(oldset);
408
409 spin_lock_irqsave(&current->sighand->siglock, irqflags);
410 current->blocked = *oldset;
411 recalc_sigpending();
412 spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
413} 416}
414 417
415/* 418/*
@@ -427,26 +430,26 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
427 430
428 BUG_ON(flags & RPC_TASK_ASYNC); 431 BUG_ON(flags & RPC_TASK_ASYNC);
429 432
430 rpc_clnt_sigmask(clnt, &oldset);
431
432 status = -ENOMEM; 433 status = -ENOMEM;
433 task = rpc_new_task(clnt, NULL, flags); 434 task = rpc_new_task(clnt, NULL, flags);
434 if (task == NULL) 435 if (task == NULL)
435 goto out; 436 goto out;
436 437
438 /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */
439 rpc_task_sigmask(task, &oldset);
440
437 rpc_call_setup(task, msg, 0); 441 rpc_call_setup(task, msg, 0);
438 442
439 /* Set up the call info struct and execute the task */ 443 /* Set up the call info struct and execute the task */
440 if (task->tk_status == 0) 444 if (task->tk_status == 0) {
441 status = rpc_execute(task); 445 status = rpc_execute(task);
442 else { 446 } else {
443 status = task->tk_status; 447 status = task->tk_status;
444 rpc_release_task(task); 448 rpc_release_task(task);
445 } 449 }
446 450
451 rpc_restore_sigmask(&oldset);
447out: 452out:
448 rpc_clnt_sigunmask(clnt, &oldset);
449
450 return status; 453 return status;
451} 454}
452 455
@@ -467,8 +470,6 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
467 470
468 flags |= RPC_TASK_ASYNC; 471 flags |= RPC_TASK_ASYNC;
469 472
470 rpc_clnt_sigmask(clnt, &oldset);
471
472 /* Create/initialize a new RPC task */ 473 /* Create/initialize a new RPC task */
473 if (!callback) 474 if (!callback)
474 callback = rpc_default_callback; 475 callback = rpc_default_callback;
@@ -477,6 +478,9 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
477 goto out; 478 goto out;
478 task->tk_calldata = data; 479 task->tk_calldata = data;
479 480
481 /* Mask signals on GSS_AUTH upcalls */
482 rpc_task_sigmask(task, &oldset);
483
480 rpc_call_setup(task, msg, 0); 484 rpc_call_setup(task, msg, 0);
481 485
482 /* Set up the call info struct and execute the task */ 486 /* Set up the call info struct and execute the task */
@@ -486,9 +490,8 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
486 else 490 else
487 rpc_release_task(task); 491 rpc_release_task(task);
488 492
493 rpc_restore_sigmask(&oldset);
489out: 494out:
490 rpc_clnt_sigunmask(clnt, &oldset);
491
492 return status; 495 return status;
493} 496}
494 497
@@ -666,7 +669,7 @@ call_allocate(struct rpc_task *task)
666 return; 669 return;
667 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); 670 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task);
668 671
669 if (RPC_IS_ASYNC(task) || !(task->tk_client->cl_intr && signalled())) { 672 if (RPC_IS_ASYNC(task) || !signalled()) {
670 xprt_release(task); 673 xprt_release(task);
671 task->tk_action = call_reserve; 674 task->tk_action = call_reserve;
672 rpc_delay(task, HZ>>4); 675 rpc_delay(task, HZ>>4);