aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-01-31 19:45:47 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-01-31 19:45:47 -0500
commit75659ca0c10992dcb39258518368a0f6f56e935d (patch)
tree5d014ceb2f10158061a23d0d976f9a613d85e659 /kernel
parentfbdde7bd274d74729954190f99afcb1e3d9bbfba (diff)
parent2dfe485a2c8afa54cb069fcf48476f6c90ea3fdf (diff)
Merge branch 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc
* 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc: (22 commits) Remove commented-out code copied from NFS NFS: Switch from intr mount option to TASK_KILLABLE Add wait_for_completion_killable Add wait_event_killable Add schedule_timeout_killable Use mutex_lock_killable in vfs_readdir Add mutex_lock_killable Use lock_page_killable Add lock_page_killable Add fatal_signal_pending Add TASK_WAKEKILL exit: Use task_is_* signal: Use task_is_* sched: Use task_contributes_to_load, TASK_ALL and TASK_NORMAL ptrace: Use task_is_* power: Use task_is_* wait: Use TASK_NORMAL proc/base.c: Use task_is_* proc/array.c: Use TASK_REPORT perfmon: Use task_is_* ... Fixed up conflicts in NFS/sunrpc manually..
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c88
-rw-r--r--kernel/mutex.c36
-rw-r--r--kernel/power/process.c6
-rw-r--r--kernel/ptrace.c8
-rw-r--r--kernel/sched.c28
-rw-r--r--kernel/signal.c19
-rw-r--r--kernel/timer.c7
-rw-r--r--kernel/wait.c2
8 files changed, 117 insertions, 77 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 549c0558ba6..bfb1c0e940e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -249,7 +249,7 @@ static int has_stopped_jobs(struct pid *pgrp)
249 struct task_struct *p; 249 struct task_struct *p;
250 250
251 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 251 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
252 if (p->state != TASK_STOPPED) 252 if (!task_is_stopped(p))
253 continue; 253 continue;
254 retval = 1; 254 retval = 1;
255 break; 255 break;
@@ -614,7 +614,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
614 p->parent = p->real_parent; 614 p->parent = p->real_parent;
615 add_parent(p); 615 add_parent(p);
616 616
617 if (p->state == TASK_TRACED) { 617 if (task_is_traced(p)) {
618 /* 618 /*
619 * If it was at a trace stop, turn it into 619 * If it was at a trace stop, turn it into
620 * a normal stop since it's no longer being 620 * a normal stop since it's no longer being
@@ -1563,60 +1563,51 @@ repeat:
1563 } 1563 }
1564 allowed = 1; 1564 allowed = 1;
1565 1565
1566 switch (p->state) { 1566 if (task_is_stopped_or_traced(p)) {
1567 case TASK_TRACED:
1568 /*
1569 * When we hit the race with PTRACE_ATTACH,
1570 * we will not report this child. But the
1571 * race means it has not yet been moved to
1572 * our ptrace_children list, so we need to
1573 * set the flag here to avoid a spurious ECHILD
1574 * when the race happens with the only child.
1575 */
1576 flag = 1;
1577 if (!my_ptrace_child(p))
1578 continue;
1579 /*FALLTHROUGH*/
1580 case TASK_STOPPED:
1581 /* 1567 /*
1582 * It's stopped now, so it might later 1568 * It's stopped now, so it might later
1583 * continue, exit, or stop again. 1569 * continue, exit, or stop again.
1570 *
1571 * When we hit the race with PTRACE_ATTACH, we
1572 * will not report this child. But the race
1573 * means it has not yet been moved to our
1574 * ptrace_children list, so we need to set the
1575 * flag here to avoid a spurious ECHILD when
1576 * the race happens with the only child.
1584 */ 1577 */
1585 flag = 1; 1578 flag = 1;
1586 if (!(options & WUNTRACED) && 1579
1587 !my_ptrace_child(p)) 1580 if (!my_ptrace_child(p)) {
1588 continue; 1581 if (task_is_traced(p))
1582 continue;
1583 if (!(options & WUNTRACED))
1584 continue;
1585 }
1586
1589 retval = wait_task_stopped(p, ret == 2, 1587 retval = wait_task_stopped(p, ret == 2,
1590 (options & WNOWAIT), 1588 (options & WNOWAIT), infop,
1591 infop, 1589 stat_addr, ru);
1592 stat_addr, ru);
1593 if (retval == -EAGAIN) 1590 if (retval == -EAGAIN)
1594 goto repeat; 1591 goto repeat;
1595 if (retval != 0) /* He released the lock. */ 1592 if (retval != 0) /* He released the lock. */
1596 goto end; 1593 goto end;
1597 break; 1594 } else if (p->exit_state == EXIT_DEAD) {
1598 default: 1595 continue;
1599 // case EXIT_DEAD: 1596 } else if (p->exit_state == EXIT_ZOMBIE) {
1600 if (p->exit_state == EXIT_DEAD) 1597 /*
1598 * Eligible but we cannot release it yet:
1599 */
1600 if (ret == 2)
1601 goto check_continued;
1602 if (!likely(options & WEXITED))
1601 continue; 1603 continue;
1602 // case EXIT_ZOMBIE: 1604 retval = wait_task_zombie(p,
1603 if (p->exit_state == EXIT_ZOMBIE) { 1605 (options & WNOWAIT), infop,
1604 /* 1606 stat_addr, ru);
1605 * Eligible but we cannot release 1607 /* He released the lock. */
1606 * it yet: 1608 if (retval != 0)
1607 */ 1609 goto end;
1608 if (ret == 2) 1610 } else {
1609 goto check_continued;
1610 if (!likely(options & WEXITED))
1611 continue;
1612 retval = wait_task_zombie(
1613 p, (options & WNOWAIT),
1614 infop, stat_addr, ru);
1615 /* He released the lock. */
1616 if (retval != 0)
1617 goto end;
1618 break;
1619 }
1620check_continued: 1611check_continued:
1621 /* 1612 /*
1622 * It's running now, so it might later 1613 * It's running now, so it might later
@@ -1625,12 +1616,11 @@ check_continued:
1625 flag = 1; 1616 flag = 1;
1626 if (!unlikely(options & WCONTINUED)) 1617 if (!unlikely(options & WCONTINUED))
1627 continue; 1618 continue;
1628 retval = wait_task_continued( 1619 retval = wait_task_continued(p,
1629 p, (options & WNOWAIT), 1620 (options & WNOWAIT), infop,
1630 infop, stat_addr, ru); 1621 stat_addr, ru);
1631 if (retval != 0) /* He released the lock. */ 1622 if (retval != 0) /* He released the lock. */
1632 goto end; 1623 goto end;
1633 break;
1634 } 1624 }
1635 } 1625 }
1636 if (!flag) { 1626 if (!flag) {
diff --git a/kernel/mutex.c b/kernel/mutex.c
index d7fe50cc556..d9ec9b66625 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -166,9 +166,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
166 * got a signal? (This code gets eliminated in the 166 * got a signal? (This code gets eliminated in the
167 * TASK_UNINTERRUPTIBLE case.) 167 * TASK_UNINTERRUPTIBLE case.)
168 */ 168 */
169 if (unlikely(state == TASK_INTERRUPTIBLE && 169 if (unlikely((state == TASK_INTERRUPTIBLE &&
170 signal_pending(task))) { 170 signal_pending(task)) ||
171 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); 171 (state == TASK_KILLABLE &&
172 fatal_signal_pending(task)))) {
173 mutex_remove_waiter(lock, &waiter,
174 task_thread_info(task));
172 mutex_release(&lock->dep_map, 1, ip); 175 mutex_release(&lock->dep_map, 1, ip);
173 spin_unlock_mutex(&lock->wait_lock, flags); 176 spin_unlock_mutex(&lock->wait_lock, flags);
174 177
@@ -211,6 +214,14 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
211EXPORT_SYMBOL_GPL(mutex_lock_nested); 214EXPORT_SYMBOL_GPL(mutex_lock_nested);
212 215
213int __sched 216int __sched
217mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
218{
219 might_sleep();
220 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
221}
222EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
223
224int __sched
214mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 225mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
215{ 226{
216 might_sleep(); 227 might_sleep();
@@ -272,6 +283,9 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
272 * mutex_lock_interruptible() and mutex_trylock(). 283 * mutex_lock_interruptible() and mutex_trylock().
273 */ 284 */
274static int fastcall noinline __sched 285static int fastcall noinline __sched
286__mutex_lock_killable_slowpath(atomic_t *lock_count);
287
288static noinline int fastcall __sched
275__mutex_lock_interruptible_slowpath(atomic_t *lock_count); 289__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
276 290
277/*** 291/***
@@ -294,6 +308,14 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
294 308
295EXPORT_SYMBOL(mutex_lock_interruptible); 309EXPORT_SYMBOL(mutex_lock_interruptible);
296 310
311int fastcall __sched mutex_lock_killable(struct mutex *lock)
312{
313 might_sleep();
314 return __mutex_fastpath_lock_retval
315 (&lock->count, __mutex_lock_killable_slowpath);
316}
317EXPORT_SYMBOL(mutex_lock_killable);
318
297static void fastcall noinline __sched 319static void fastcall noinline __sched
298__mutex_lock_slowpath(atomic_t *lock_count) 320__mutex_lock_slowpath(atomic_t *lock_count)
299{ 321{
@@ -303,6 +325,14 @@ __mutex_lock_slowpath(atomic_t *lock_count)
303} 325}
304 326
305static int fastcall noinline __sched 327static int fastcall noinline __sched
328__mutex_lock_killable_slowpath(atomic_t *lock_count)
329{
330 struct mutex *lock = container_of(lock_count, struct mutex, count);
331
332 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
333}
334
335static noinline int fastcall __sched
306__mutex_lock_interruptible_slowpath(atomic_t *lock_count) 336__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
307{ 337{
308 struct mutex *lock = container_of(lock_count, struct mutex, count); 338 struct mutex *lock = container_of(lock_count, struct mutex, count);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 6533923e711..7c2118f9597 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -86,9 +86,9 @@ static void fake_signal_wake_up(struct task_struct *p, int resume)
86 86
87static void send_fake_signal(struct task_struct *p) 87static void send_fake_signal(struct task_struct *p)
88{ 88{
89 if (p->state == TASK_STOPPED) 89 if (task_is_stopped(p))
90 force_sig_specific(SIGSTOP, p); 90 force_sig_specific(SIGSTOP, p);
91 fake_signal_wake_up(p, p->state == TASK_STOPPED); 91 fake_signal_wake_up(p, task_is_stopped(p));
92} 92}
93 93
94static int has_mm(struct task_struct *p) 94static int has_mm(struct task_struct *p)
@@ -182,7 +182,7 @@ static int try_to_freeze_tasks(int freeze_user_space)
182 if (frozen(p) || !freezeable(p)) 182 if (frozen(p) || !freezeable(p))
183 continue; 183 continue;
184 184
185 if (p->state == TASK_TRACED && frozen(p->parent)) { 185 if (task_is_traced(p) && frozen(p->parent)) {
186 cancel_freezing(p); 186 cancel_freezing(p);
187 continue; 187 continue;
188 } 188 }
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index e6e9b8be4b0..b0d4ab4dfd3 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -51,7 +51,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
51void ptrace_untrace(struct task_struct *child) 51void ptrace_untrace(struct task_struct *child)
52{ 52{
53 spin_lock(&child->sighand->siglock); 53 spin_lock(&child->sighand->siglock);
54 if (child->state == TASK_TRACED) { 54 if (task_is_traced(child)) {
55 if (child->signal->flags & SIGNAL_STOP_STOPPED) { 55 if (child->signal->flags & SIGNAL_STOP_STOPPED) {
56 child->state = TASK_STOPPED; 56 child->state = TASK_STOPPED;
57 } else { 57 } else {
@@ -79,7 +79,7 @@ void __ptrace_unlink(struct task_struct *child)
79 add_parent(child); 79 add_parent(child);
80 } 80 }
81 81
82 if (child->state == TASK_TRACED) 82 if (task_is_traced(child))
83 ptrace_untrace(child); 83 ptrace_untrace(child);
84} 84}
85 85
@@ -103,9 +103,9 @@ int ptrace_check_attach(struct task_struct *child, int kill)
103 && child->signal != NULL) { 103 && child->signal != NULL) {
104 ret = 0; 104 ret = 0;
105 spin_lock_irq(&child->sighand->siglock); 105 spin_lock_irq(&child->sighand->siglock);
106 if (child->state == TASK_STOPPED) { 106 if (task_is_stopped(child)) {
107 child->state = TASK_TRACED; 107 child->state = TASK_TRACED;
108 } else if (child->state != TASK_TRACED && !kill) { 108 } else if (!task_is_traced(child) && !kill) {
109 ret = -ESRCH; 109 ret = -ESRCH;
110 } 110 }
111 spin_unlock_irq(&child->sighand->siglock); 111 spin_unlock_irq(&child->sighand->siglock);
diff --git a/kernel/sched.c b/kernel/sched.c
index 8355e007e02..9474b23c28b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1350,7 +1350,7 @@ static int effective_prio(struct task_struct *p)
1350 */ 1350 */
1351static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) 1351static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
1352{ 1352{
1353 if (p->state == TASK_UNINTERRUPTIBLE) 1353 if (task_contributes_to_load(p))
1354 rq->nr_uninterruptible--; 1354 rq->nr_uninterruptible--;
1355 1355
1356 enqueue_task(rq, p, wakeup); 1356 enqueue_task(rq, p, wakeup);
@@ -1362,7 +1362,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
1362 */ 1362 */
1363static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) 1363static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
1364{ 1364{
1365 if (p->state == TASK_UNINTERRUPTIBLE) 1365 if (task_contributes_to_load(p))
1366 rq->nr_uninterruptible++; 1366 rq->nr_uninterruptible++;
1367 1367
1368 dequeue_task(rq, p, sleep); 1368 dequeue_task(rq, p, sleep);
@@ -1895,8 +1895,7 @@ out:
1895 1895
1896int fastcall wake_up_process(struct task_struct *p) 1896int fastcall wake_up_process(struct task_struct *p)
1897{ 1897{
1898 return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | 1898 return try_to_wake_up(p, TASK_ALL, 0);
1899 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
1900} 1899}
1901EXPORT_SYMBOL(wake_up_process); 1900EXPORT_SYMBOL(wake_up_process);
1902 1901
@@ -4124,8 +4123,7 @@ void complete(struct completion *x)
4124 4123
4125 spin_lock_irqsave(&x->wait.lock, flags); 4124 spin_lock_irqsave(&x->wait.lock, flags);
4126 x->done++; 4125 x->done++;
4127 __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 4126 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
4128 1, 0, NULL);
4129 spin_unlock_irqrestore(&x->wait.lock, flags); 4127 spin_unlock_irqrestore(&x->wait.lock, flags);
4130} 4128}
4131EXPORT_SYMBOL(complete); 4129EXPORT_SYMBOL(complete);
@@ -4136,8 +4134,7 @@ void complete_all(struct completion *x)
4136 4134
4137 spin_lock_irqsave(&x->wait.lock, flags); 4135 spin_lock_irqsave(&x->wait.lock, flags);
4138 x->done += UINT_MAX/2; 4136 x->done += UINT_MAX/2;
4139 __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 4137 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
4140 0, 0, NULL);
4141 spin_unlock_irqrestore(&x->wait.lock, flags); 4138 spin_unlock_irqrestore(&x->wait.lock, flags);
4142} 4139}
4143EXPORT_SYMBOL(complete_all); 4140EXPORT_SYMBOL(complete_all);
@@ -4151,8 +4148,10 @@ do_wait_for_common(struct completion *x, long timeout, int state)
4151 wait.flags |= WQ_FLAG_EXCLUSIVE; 4148 wait.flags |= WQ_FLAG_EXCLUSIVE;
4152 __add_wait_queue_tail(&x->wait, &wait); 4149 __add_wait_queue_tail(&x->wait, &wait);
4153 do { 4150 do {
4154 if (state == TASK_INTERRUPTIBLE && 4151 if ((state == TASK_INTERRUPTIBLE &&
4155 signal_pending(current)) { 4152 signal_pending(current)) ||
4153 (state == TASK_KILLABLE &&
4154 fatal_signal_pending(current))) {
4156 __remove_wait_queue(&x->wait, &wait); 4155 __remove_wait_queue(&x->wait, &wait);
4157 return -ERESTARTSYS; 4156 return -ERESTARTSYS;
4158 } 4157 }
@@ -4212,6 +4211,15 @@ wait_for_completion_interruptible_timeout(struct completion *x,
4212} 4211}
4213EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); 4212EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4214 4213
4214int __sched wait_for_completion_killable(struct completion *x)
4215{
4216 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
4217 if (t == -ERESTARTSYS)
4218 return t;
4219 return 0;
4220}
4221EXPORT_SYMBOL(wait_for_completion_killable);
4222
4215static long __sched 4223static long __sched
4216sleep_on_common(wait_queue_head_t *q, int state, long timeout) 4224sleep_on_common(wait_queue_head_t *q, int state, long timeout)
4217{ 4225{
diff --git a/kernel/signal.c b/kernel/signal.c
index bf49ce6f016..8054dd4e2d7 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -456,15 +456,15 @@ void signal_wake_up(struct task_struct *t, int resume)
456 set_tsk_thread_flag(t, TIF_SIGPENDING); 456 set_tsk_thread_flag(t, TIF_SIGPENDING);
457 457
458 /* 458 /*
459 * For SIGKILL, we want to wake it up in the stopped/traced case. 459 * For SIGKILL, we want to wake it up in the stopped/traced/killable
460 * We don't check t->state here because there is a race with it 460 * case. We don't check t->state here because there is a race with it
461 * executing another processor and just now entering stopped state. 461 * executing another processor and just now entering stopped state.
462 * By using wake_up_state, we ensure the process will wake up and 462 * By using wake_up_state, we ensure the process will wake up and
463 * handle its death signal. 463 * handle its death signal.
464 */ 464 */
465 mask = TASK_INTERRUPTIBLE; 465 mask = TASK_INTERRUPTIBLE;
466 if (resume) 466 if (resume)
467 mask |= TASK_STOPPED | TASK_TRACED; 467 mask |= TASK_WAKEKILL;
468 if (!wake_up_state(t, mask)) 468 if (!wake_up_state(t, mask))
469 kick_process(t); 469 kick_process(t);
470} 470}
@@ -620,7 +620,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
620 * Wake up the stopped thread _after_ setting 620 * Wake up the stopped thread _after_ setting
621 * TIF_SIGPENDING 621 * TIF_SIGPENDING
622 */ 622 */
623 state = TASK_STOPPED; 623 state = __TASK_STOPPED;
624 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { 624 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
625 set_tsk_thread_flag(t, TIF_SIGPENDING); 625 set_tsk_thread_flag(t, TIF_SIGPENDING);
626 state |= TASK_INTERRUPTIBLE; 626 state |= TASK_INTERRUPTIBLE;
@@ -838,7 +838,7 @@ static inline int wants_signal(int sig, struct task_struct *p)
838 return 0; 838 return 0;
839 if (sig == SIGKILL) 839 if (sig == SIGKILL)
840 return 1; 840 return 1;
841 if (p->state & (TASK_STOPPED | TASK_TRACED)) 841 if (task_is_stopped_or_traced(p))
842 return 0; 842 return 0;
843 return task_curr(p) || !signal_pending(p); 843 return task_curr(p) || !signal_pending(p);
844} 844}
@@ -994,6 +994,11 @@ void zap_other_threads(struct task_struct *p)
994 } 994 }
995} 995}
996 996
997int fastcall __fatal_signal_pending(struct task_struct *tsk)
998{
999 return sigismember(&tsk->pending.signal, SIGKILL);
1000}
1001
997/* 1002/*
998 * Must be called under rcu_read_lock() or with tasklist_lock read-held. 1003 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
999 */ 1004 */
@@ -1441,7 +1446,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
1441 BUG_ON(sig == -1); 1446 BUG_ON(sig == -1);
1442 1447
1443 /* do_notify_parent_cldstop should have been called instead. */ 1448 /* do_notify_parent_cldstop should have been called instead. */
1444 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED)); 1449 BUG_ON(task_is_stopped_or_traced(tsk));
1445 1450
1446 BUG_ON(!tsk->ptrace && 1451 BUG_ON(!tsk->ptrace &&
1447 (tsk->group_leader != tsk || !thread_group_empty(tsk))); 1452 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
@@ -1729,7 +1734,7 @@ static int do_signal_stop(int signr)
1729 * so this check has no races. 1734 * so this check has no races.
1730 */ 1735 */
1731 if (!t->exit_state && 1736 if (!t->exit_state &&
1732 !(t->state & (TASK_STOPPED|TASK_TRACED))) { 1737 !task_is_stopped_or_traced(t)) {
1733 stop_count++; 1738 stop_count++;
1734 signal_wake_up(t, 0); 1739 signal_wake_up(t, 0);
1735 } 1740 }
diff --git a/kernel/timer.c b/kernel/timer.c
index 23f7ead78fa..9fbb472b8cf 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1099,6 +1099,13 @@ signed long __sched schedule_timeout_interruptible(signed long timeout)
1099} 1099}
1100EXPORT_SYMBOL(schedule_timeout_interruptible); 1100EXPORT_SYMBOL(schedule_timeout_interruptible);
1101 1101
1102signed long __sched schedule_timeout_killable(signed long timeout)
1103{
1104 __set_current_state(TASK_KILLABLE);
1105 return schedule_timeout(timeout);
1106}
1107EXPORT_SYMBOL(schedule_timeout_killable);
1108
1102signed long __sched schedule_timeout_uninterruptible(signed long timeout) 1109signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1103{ 1110{
1104 __set_current_state(TASK_UNINTERRUPTIBLE); 1111 __set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/kernel/wait.c b/kernel/wait.c
index 444ddbfaefc..f9876888a56 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -215,7 +215,7 @@ void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
215{ 215{
216 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); 216 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
217 if (waitqueue_active(wq)) 217 if (waitqueue_active(wq))
218 __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key); 218 __wake_up(wq, TASK_NORMAL, 1, &key);
219} 219}
220EXPORT_SYMBOL(__wake_up_bit); 220EXPORT_SYMBOL(__wake_up_bit);
221 221