aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2006-08-02 14:15:21 -0400
committerJohn W. Linville <linville@tuxdriver.com>2006-08-02 14:15:21 -0400
commitebf572b448757190027d8ee34e73deb989ec7b60 (patch)
tree2320a92b1aed2c9475e291d75c92616af75e195a /kernel
parent48c86da1a211ef13bbfb1c8f2e35dda44a66b8a1 (diff)
parent49b1e3ea19b1c95c2f012b8331ffb3b169e4c042 (diff)
Merge branch 'from-linus' into bcm43xx
Diffstat (limited to 'kernel')
-rw-r--r--kernel/delayacct.c8
-rw-r--r--kernel/futex.c121
-rw-r--r--kernel/futex_compat.c34
-rw-r--r--kernel/hrtimer.c4
-rw-r--r--kernel/irq/manage.c28
-rw-r--r--kernel/kprobes.c1
-rw-r--r--kernel/rcupdate.c4
-rw-r--r--kernel/rtmutex.c2
-rw-r--r--kernel/sched.c22
-rw-r--r--kernel/softirq.c22
-rw-r--r--kernel/softlockup.c4
-rw-r--r--kernel/taskstats.c32
-rw-r--r--kernel/timer.c8
-rw-r--r--kernel/workqueue.c58
14 files changed, 257 insertions, 91 deletions
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index f05392d64267..57ca3730205d 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -19,15 +19,15 @@
19#include <linux/sysctl.h> 19#include <linux/sysctl.h>
20#include <linux/delayacct.h> 20#include <linux/delayacct.h>
21 21
22int delayacct_on __read_mostly; /* Delay accounting turned on/off */ 22int delayacct_on __read_mostly = 1; /* Delay accounting turned on/off */
23kmem_cache_t *delayacct_cache; 23kmem_cache_t *delayacct_cache;
24 24
25static int __init delayacct_setup_enable(char *str) 25static int __init delayacct_setup_disable(char *str)
26{ 26{
27 delayacct_on = 1; 27 delayacct_on = 0;
28 return 1; 28 return 1;
29} 29}
30__setup("delayacct", delayacct_setup_enable); 30__setup("nodelayacct", delayacct_setup_disable);
31 31
32void delayacct_init(void) 32void delayacct_init(void)
33{ 33{
diff --git a/kernel/futex.c b/kernel/futex.c
index cf0c8e21d1ab..dda2049692a2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -415,15 +415,15 @@ out_unlock:
415 */ 415 */
416void exit_pi_state_list(struct task_struct *curr) 416void exit_pi_state_list(struct task_struct *curr)
417{ 417{
418 struct futex_hash_bucket *hb;
419 struct list_head *next, *head = &curr->pi_state_list; 418 struct list_head *next, *head = &curr->pi_state_list;
420 struct futex_pi_state *pi_state; 419 struct futex_pi_state *pi_state;
420 struct futex_hash_bucket *hb;
421 union futex_key key; 421 union futex_key key;
422 422
423 /* 423 /*
424 * We are a ZOMBIE and nobody can enqueue itself on 424 * We are a ZOMBIE and nobody can enqueue itself on
425 * pi_state_list anymore, but we have to be careful 425 * pi_state_list anymore, but we have to be careful
426 * versus waiters unqueueing themselfs 426 * versus waiters unqueueing themselves:
427 */ 427 */
428 spin_lock_irq(&curr->pi_lock); 428 spin_lock_irq(&curr->pi_lock);
429 while (!list_empty(head)) { 429 while (!list_empty(head)) {
@@ -431,21 +431,24 @@ void exit_pi_state_list(struct task_struct *curr)
431 next = head->next; 431 next = head->next;
432 pi_state = list_entry(next, struct futex_pi_state, list); 432 pi_state = list_entry(next, struct futex_pi_state, list);
433 key = pi_state->key; 433 key = pi_state->key;
434 hb = hash_futex(&key);
434 spin_unlock_irq(&curr->pi_lock); 435 spin_unlock_irq(&curr->pi_lock);
435 436
436 hb = hash_futex(&key);
437 spin_lock(&hb->lock); 437 spin_lock(&hb->lock);
438 438
439 spin_lock_irq(&curr->pi_lock); 439 spin_lock_irq(&curr->pi_lock);
440 /*
441 * We dropped the pi-lock, so re-check whether this
442 * task still owns the PI-state:
443 */
440 if (head->next != next) { 444 if (head->next != next) {
441 spin_unlock(&hb->lock); 445 spin_unlock(&hb->lock);
442 continue; 446 continue;
443 } 447 }
444 448
445 list_del_init(&pi_state->list);
446
447 WARN_ON(pi_state->owner != curr); 449 WARN_ON(pi_state->owner != curr);
448 450 WARN_ON(list_empty(&pi_state->list));
451 list_del_init(&pi_state->list);
449 pi_state->owner = NULL; 452 pi_state->owner = NULL;
450 spin_unlock_irq(&curr->pi_lock); 453 spin_unlock_irq(&curr->pi_lock);
451 454
@@ -470,7 +473,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
470 head = &hb->chain; 473 head = &hb->chain;
471 474
472 list_for_each_entry_safe(this, next, head, list) { 475 list_for_each_entry_safe(this, next, head, list) {
473 if (match_futex (&this->key, &me->key)) { 476 if (match_futex(&this->key, &me->key)) {
474 /* 477 /*
475 * Another waiter already exists - bump up 478 * Another waiter already exists - bump up
476 * the refcount and return its pi_state: 479 * the refcount and return its pi_state:
@@ -482,6 +485,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
482 if (unlikely(!pi_state)) 485 if (unlikely(!pi_state))
483 return -EINVAL; 486 return -EINVAL;
484 487
488 WARN_ON(!atomic_read(&pi_state->refcount));
489
485 atomic_inc(&pi_state->refcount); 490 atomic_inc(&pi_state->refcount);
486 me->pi_state = pi_state; 491 me->pi_state = pi_state;
487 492
@@ -490,10 +495,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
490 } 495 }
491 496
492 /* 497 /*
493 * We are the first waiter - try to look up the real owner and 498 * We are the first waiter - try to look up the real owner and attach
494 * attach the new pi_state to it: 499 * the new pi_state to it, but bail out when the owner died bit is set
500 * and TID = 0:
495 */ 501 */
496 pid = uval & FUTEX_TID_MASK; 502 pid = uval & FUTEX_TID_MASK;
503 if (!pid && (uval & FUTEX_OWNER_DIED))
504 return -ESRCH;
497 p = futex_find_get_task(pid); 505 p = futex_find_get_task(pid);
498 if (!p) 506 if (!p)
499 return -ESRCH; 507 return -ESRCH;
@@ -510,6 +518,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
510 pi_state->key = me->key; 518 pi_state->key = me->key;
511 519
512 spin_lock_irq(&p->pi_lock); 520 spin_lock_irq(&p->pi_lock);
521 WARN_ON(!list_empty(&pi_state->list));
513 list_add(&pi_state->list, &p->pi_state_list); 522 list_add(&pi_state->list, &p->pi_state_list);
514 pi_state->owner = p; 523 pi_state->owner = p;
515 spin_unlock_irq(&p->pi_lock); 524 spin_unlock_irq(&p->pi_lock);
@@ -573,20 +582,29 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
573 * kept enabled while there is PI state around. We must also 582 * kept enabled while there is PI state around. We must also
574 * preserve the owner died bit.) 583 * preserve the owner died bit.)
575 */ 584 */
576 newval = (uval & FUTEX_OWNER_DIED) | FUTEX_WAITERS | new_owner->pid; 585 if (!(uval & FUTEX_OWNER_DIED)) {
586 newval = FUTEX_WAITERS | new_owner->pid;
577 587
578 inc_preempt_count(); 588 inc_preempt_count();
579 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); 589 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
580 dec_preempt_count(); 590 dec_preempt_count();
591 if (curval == -EFAULT)
592 return -EFAULT;
593 if (curval != uval)
594 return -EINVAL;
595 }
581 596
582 if (curval == -EFAULT) 597 spin_lock_irq(&pi_state->owner->pi_lock);
583 return -EFAULT; 598 WARN_ON(list_empty(&pi_state->list));
584 if (curval != uval) 599 list_del_init(&pi_state->list);
585 return -EINVAL; 600 spin_unlock_irq(&pi_state->owner->pi_lock);
586 601
587 list_del_init(&pi_state->owner->pi_state_list); 602 spin_lock_irq(&new_owner->pi_lock);
603 WARN_ON(!list_empty(&pi_state->list));
588 list_add(&pi_state->list, &new_owner->pi_state_list); 604 list_add(&pi_state->list, &new_owner->pi_state_list);
589 pi_state->owner = new_owner; 605 pi_state->owner = new_owner;
606 spin_unlock_irq(&new_owner->pi_lock);
607
590 rt_mutex_unlock(&pi_state->pi_mutex); 608 rt_mutex_unlock(&pi_state->pi_mutex);
591 609
592 return 0; 610 return 0;
@@ -1236,6 +1254,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1236 /* Owner died? */ 1254 /* Owner died? */
1237 if (q.pi_state->owner != NULL) { 1255 if (q.pi_state->owner != NULL) {
1238 spin_lock_irq(&q.pi_state->owner->pi_lock); 1256 spin_lock_irq(&q.pi_state->owner->pi_lock);
1257 WARN_ON(list_empty(&q.pi_state->list));
1239 list_del_init(&q.pi_state->list); 1258 list_del_init(&q.pi_state->list);
1240 spin_unlock_irq(&q.pi_state->owner->pi_lock); 1259 spin_unlock_irq(&q.pi_state->owner->pi_lock);
1241 } else 1260 } else
@@ -1244,6 +1263,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1244 q.pi_state->owner = current; 1263 q.pi_state->owner = current;
1245 1264
1246 spin_lock_irq(&current->pi_lock); 1265 spin_lock_irq(&current->pi_lock);
1266 WARN_ON(!list_empty(&q.pi_state->list));
1247 list_add(&q.pi_state->list, &current->pi_state_list); 1267 list_add(&q.pi_state->list, &current->pi_state_list);
1248 spin_unlock_irq(&current->pi_lock); 1268 spin_unlock_irq(&current->pi_lock);
1249 1269
@@ -1427,9 +1447,11 @@ retry_locked:
1427 * again. If it succeeds then we can return without waking 1447 * again. If it succeeds then we can return without waking
1428 * anyone else up: 1448 * anyone else up:
1429 */ 1449 */
1430 inc_preempt_count(); 1450 if (!(uval & FUTEX_OWNER_DIED)) {
1431 uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); 1451 inc_preempt_count();
1432 dec_preempt_count(); 1452 uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
1453 dec_preempt_count();
1454 }
1433 1455
1434 if (unlikely(uval == -EFAULT)) 1456 if (unlikely(uval == -EFAULT))
1435 goto pi_faulted; 1457 goto pi_faulted;
@@ -1462,9 +1484,11 @@ retry_locked:
1462 /* 1484 /*
1463 * No waiters - kernel unlocks the futex: 1485 * No waiters - kernel unlocks the futex:
1464 */ 1486 */
1465 ret = unlock_futex_pi(uaddr, uval); 1487 if (!(uval & FUTEX_OWNER_DIED)) {
1466 if (ret == -EFAULT) 1488 ret = unlock_futex_pi(uaddr, uval);
1467 goto pi_faulted; 1489 if (ret == -EFAULT)
1490 goto pi_faulted;
1491 }
1468 1492
1469out_unlock: 1493out_unlock:
1470 spin_unlock(&hb->lock); 1494 spin_unlock(&hb->lock);
@@ -1683,9 +1707,9 @@ err_unlock:
1683 * Process a futex-list entry, check whether it's owned by the 1707 * Process a futex-list entry, check whether it's owned by the
1684 * dying task, and do notification if so: 1708 * dying task, and do notification if so:
1685 */ 1709 */
1686int handle_futex_death(u32 __user *uaddr, struct task_struct *curr) 1710int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
1687{ 1711{
1688 u32 uval, nval; 1712 u32 uval, nval, mval;
1689 1713
1690retry: 1714retry:
1691 if (get_user(uval, uaddr)) 1715 if (get_user(uval, uaddr))
@@ -1702,21 +1726,45 @@ retry:
1702 * thread-death.) The rest of the cleanup is done in 1726 * thread-death.) The rest of the cleanup is done in
1703 * userspace. 1727 * userspace.
1704 */ 1728 */
1705 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 1729 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
1706 uval | FUTEX_OWNER_DIED); 1730 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
1731
1707 if (nval == -EFAULT) 1732 if (nval == -EFAULT)
1708 return -1; 1733 return -1;
1709 1734
1710 if (nval != uval) 1735 if (nval != uval)
1711 goto retry; 1736 goto retry;
1712 1737
1713 if (uval & FUTEX_WAITERS) 1738 /*
1714 futex_wake(uaddr, 1); 1739 * Wake robust non-PI futexes here. The wakeup of
1740 * PI futexes happens in exit_pi_state():
1741 */
1742 if (!pi) {
1743 if (uval & FUTEX_WAITERS)
1744 futex_wake(uaddr, 1);
1745 }
1715 } 1746 }
1716 return 0; 1747 return 0;
1717} 1748}
1718 1749
1719/* 1750/*
1751 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1752 */
1753static inline int fetch_robust_entry(struct robust_list __user **entry,
1754 struct robust_list __user **head, int *pi)
1755{
1756 unsigned long uentry;
1757
1758 if (get_user(uentry, (unsigned long *)head))
1759 return -EFAULT;
1760
1761 *entry = (void *)(uentry & ~1UL);
1762 *pi = uentry & 1;
1763
1764 return 0;
1765}
1766
1767/*
1720 * Walk curr->robust_list (very carefully, it's a userspace list!) 1768 * Walk curr->robust_list (very carefully, it's a userspace list!)
1721 * and mark any locks found there dead, and notify any waiters. 1769 * and mark any locks found there dead, and notify any waiters.
1722 * 1770 *
@@ -1726,14 +1774,14 @@ void exit_robust_list(struct task_struct *curr)
1726{ 1774{
1727 struct robust_list_head __user *head = curr->robust_list; 1775 struct robust_list_head __user *head = curr->robust_list;
1728 struct robust_list __user *entry, *pending; 1776 struct robust_list __user *entry, *pending;
1729 unsigned int limit = ROBUST_LIST_LIMIT; 1777 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
1730 unsigned long futex_offset; 1778 unsigned long futex_offset;
1731 1779
1732 /* 1780 /*
1733 * Fetch the list head (which was registered earlier, via 1781 * Fetch the list head (which was registered earlier, via
1734 * sys_set_robust_list()): 1782 * sys_set_robust_list()):
1735 */ 1783 */
1736 if (get_user(entry, &head->list.next)) 1784 if (fetch_robust_entry(&entry, &head->list.next, &pi))
1737 return; 1785 return;
1738 /* 1786 /*
1739 * Fetch the relative futex offset: 1787 * Fetch the relative futex offset:
@@ -1744,10 +1792,11 @@ void exit_robust_list(struct task_struct *curr)
1744 * Fetch any possibly pending lock-add first, and handle it 1792 * Fetch any possibly pending lock-add first, and handle it
1745 * if it exists: 1793 * if it exists:
1746 */ 1794 */
1747 if (get_user(pending, &head->list_op_pending)) 1795 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
1748 return; 1796 return;
1797
1749 if (pending) 1798 if (pending)
1750 handle_futex_death((void *)pending + futex_offset, curr); 1799 handle_futex_death((void *)pending + futex_offset, curr, pip);
1751 1800
1752 while (entry != &head->list) { 1801 while (entry != &head->list) {
1753 /* 1802 /*
@@ -1756,12 +1805,12 @@ void exit_robust_list(struct task_struct *curr)
1756 */ 1805 */
1757 if (entry != pending) 1806 if (entry != pending)
1758 if (handle_futex_death((void *)entry + futex_offset, 1807 if (handle_futex_death((void *)entry + futex_offset,
1759 curr)) 1808 curr, pi))
1760 return; 1809 return;
1761 /* 1810 /*
1762 * Fetch the next entry in the list: 1811 * Fetch the next entry in the list:
1763 */ 1812 */
1764 if (get_user(entry, &entry->next)) 1813 if (fetch_robust_entry(&entry, &entry->next, &pi))
1765 return; 1814 return;
1766 /* 1815 /*
1767 * Avoid excessively long or circular lists: 1816 * Avoid excessively long or circular lists:
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index d1d92b441fb7..d1aab1a452cc 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -12,6 +12,23 @@
12 12
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14 14
15
16/*
17 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
18 */
19static inline int
20fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
21 compat_uptr_t *head, int *pi)
22{
23 if (get_user(*uentry, head))
24 return -EFAULT;
25
26 *entry = compat_ptr((*uentry) & ~1);
27 *pi = (unsigned int)(*uentry) & 1;
28
29 return 0;
30}
31
15/* 32/*
16 * Walk curr->robust_list (very carefully, it's a userspace list!) 33 * Walk curr->robust_list (very carefully, it's a userspace list!)
17 * and mark any locks found there dead, and notify any waiters. 34 * and mark any locks found there dead, and notify any waiters.
@@ -22,17 +39,16 @@ void compat_exit_robust_list(struct task_struct *curr)
22{ 39{
23 struct compat_robust_list_head __user *head = curr->compat_robust_list; 40 struct compat_robust_list_head __user *head = curr->compat_robust_list;
24 struct robust_list __user *entry, *pending; 41 struct robust_list __user *entry, *pending;
42 unsigned int limit = ROBUST_LIST_LIMIT, pi;
25 compat_uptr_t uentry, upending; 43 compat_uptr_t uentry, upending;
26 unsigned int limit = ROBUST_LIST_LIMIT;
27 compat_long_t futex_offset; 44 compat_long_t futex_offset;
28 45
29 /* 46 /*
30 * Fetch the list head (which was registered earlier, via 47 * Fetch the list head (which was registered earlier, via
31 * sys_set_robust_list()): 48 * sys_set_robust_list()):
32 */ 49 */
33 if (get_user(uentry, &head->list.next)) 50 if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
34 return; 51 return;
35 entry = compat_ptr(uentry);
36 /* 52 /*
37 * Fetch the relative futex offset: 53 * Fetch the relative futex offset:
38 */ 54 */
@@ -42,11 +58,11 @@ void compat_exit_robust_list(struct task_struct *curr)
42 * Fetch any possibly pending lock-add first, and handle it 58 * Fetch any possibly pending lock-add first, and handle it
43 * if it exists: 59 * if it exists:
44 */ 60 */
45 if (get_user(upending, &head->list_op_pending)) 61 if (fetch_robust_entry(&upending, &pending,
62 &head->list_op_pending, &pi))
46 return; 63 return;
47 pending = compat_ptr(upending);
48 if (upending) 64 if (upending)
49 handle_futex_death((void *)pending + futex_offset, curr); 65 handle_futex_death((void *)pending + futex_offset, curr, pi);
50 66
51 while (compat_ptr(uentry) != &head->list) { 67 while (compat_ptr(uentry) != &head->list) {
52 /* 68 /*
@@ -55,15 +71,15 @@ void compat_exit_robust_list(struct task_struct *curr)
55 */ 71 */
56 if (entry != pending) 72 if (entry != pending)
57 if (handle_futex_death((void *)entry + futex_offset, 73 if (handle_futex_death((void *)entry + futex_offset,
58 curr)) 74 curr, pi))
59 return; 75 return;
60 76
61 /* 77 /*
62 * Fetch the next entry in the list: 78 * Fetch the next entry in the list:
63 */ 79 */
64 if (get_user(uentry, (compat_uptr_t *)&entry->next)) 80 if (fetch_robust_entry(&uentry, &entry,
81 (compat_uptr_t *)&entry->next, &pi))
65 return; 82 return;
66 entry = compat_ptr(uentry);
67 /* 83 /*
68 * Avoid excessively long or circular lists: 84 * Avoid excessively long or circular lists:
69 */ 85 */
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index d17766d40dab..be989efc7856 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -835,7 +835,7 @@ static void migrate_hrtimers(int cpu)
835} 835}
836#endif /* CONFIG_HOTPLUG_CPU */ 836#endif /* CONFIG_HOTPLUG_CPU */
837 837
838static int __devinit hrtimer_cpu_notify(struct notifier_block *self, 838static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
839 unsigned long action, void *hcpu) 839 unsigned long action, void *hcpu)
840{ 840{
841 long cpu = (long)hcpu; 841 long cpu = (long)hcpu;
@@ -859,7 +859,7 @@ static int __devinit hrtimer_cpu_notify(struct notifier_block *self,
859 return NOTIFY_OK; 859 return NOTIFY_OK;
860} 860}
861 861
862static struct notifier_block __devinitdata hrtimers_nb = { 862static struct notifier_block __cpuinitdata hrtimers_nb = {
863 .notifier_call = hrtimer_cpu_notify, 863 .notifier_call = hrtimer_cpu_notify,
864}; 864};
865 865
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 4e461438e48b..92be519eff26 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -137,16 +137,40 @@ EXPORT_SYMBOL(enable_irq);
137 * @irq: interrupt to control 137 * @irq: interrupt to control
138 * @on: enable/disable power management wakeup 138 * @on: enable/disable power management wakeup
139 * 139 *
140 * Enable/disable power management wakeup mode 140 * Enable/disable power management wakeup mode, which is
141 * disabled by default. Enables and disables must match,
142 * just as they match for non-wakeup mode support.
143 *
144 * Wakeup mode lets this IRQ wake the system from sleep
145 * states like "suspend to RAM".
141 */ 146 */
142int set_irq_wake(unsigned int irq, unsigned int on) 147int set_irq_wake(unsigned int irq, unsigned int on)
143{ 148{
144 struct irq_desc *desc = irq_desc + irq; 149 struct irq_desc *desc = irq_desc + irq;
145 unsigned long flags; 150 unsigned long flags;
146 int ret = -ENXIO; 151 int ret = -ENXIO;
152 int (*set_wake)(unsigned, unsigned) = desc->chip->set_wake;
147 153
154 /* wakeup-capable irqs can be shared between drivers that
155 * don't need to have the same sleep mode behaviors.
156 */
148 spin_lock_irqsave(&desc->lock, flags); 157 spin_lock_irqsave(&desc->lock, flags);
149 if (desc->chip->set_wake) 158 if (on) {
159 if (desc->wake_depth++ == 0)
160 desc->status |= IRQ_WAKEUP;
161 else
162 set_wake = NULL;
163 } else {
164 if (desc->wake_depth == 0) {
165 printk(KERN_WARNING "Unbalanced IRQ %d "
166 "wake disable\n", irq);
167 WARN_ON(1);
168 } else if (--desc->wake_depth == 0)
169 desc->status &= ~IRQ_WAKEUP;
170 else
171 set_wake = NULL;
172 }
173 if (set_wake)
150 ret = desc->chip->set_wake(irq, on); 174 ret = desc->chip->set_wake(irq, on);
151 spin_unlock_irqrestore(&desc->lock, flags); 175 spin_unlock_irqrestore(&desc->lock, flags);
152 return ret; 176 return ret;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 64aab081153b..3f57dfdc8f92 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -393,6 +393,7 @@ static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
393static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 393static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
394{ 394{
395 copy_kprobe(p, ap); 395 copy_kprobe(p, ap);
396 flush_insn_slot(ap);
396 ap->addr = p->addr; 397 ap->addr = p->addr;
397 ap->pre_handler = aggr_pre_handler; 398 ap->pre_handler = aggr_pre_handler;
398 ap->fault_handler = aggr_fault_handler; 399 ap->fault_handler = aggr_fault_handler;
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 759805c9859a..436ab35f6fa7 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -548,7 +548,7 @@ static void __devinit rcu_online_cpu(int cpu)
548 tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL); 548 tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);
549} 549}
550 550
551static int __devinit rcu_cpu_notify(struct notifier_block *self, 551static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
552 unsigned long action, void *hcpu) 552 unsigned long action, void *hcpu)
553{ 553{
554 long cpu = (long)hcpu; 554 long cpu = (long)hcpu;
@@ -565,7 +565,7 @@ static int __devinit rcu_cpu_notify(struct notifier_block *self,
565 return NOTIFY_OK; 565 return NOTIFY_OK;
566} 566}
567 567
568static struct notifier_block __devinitdata rcu_nb = { 568static struct notifier_block __cpuinitdata rcu_nb = {
569 .notifier_call = rcu_cpu_notify, 569 .notifier_call = rcu_cpu_notify,
570}; 570};
571 571
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index d2ef13b485e7..3e13a1e5856f 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -7,6 +7,8 @@
7 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> 7 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt 8 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9 * Copyright (C) 2006 Esben Nielsen 9 * Copyright (C) 2006 Esben Nielsen
10 *
11 * See Documentation/rt-mutex-design.txt for details.
10 */ 12 */
11#include <linux/spinlock.h> 13#include <linux/spinlock.h>
12#include <linux/module.h> 14#include <linux/module.h>
diff --git a/kernel/sched.c b/kernel/sched.c
index b44b9a43b0fc..a2be2d055299 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4456,9 +4456,9 @@ asmlinkage long sys_sched_yield(void)
4456 return 0; 4456 return 0;
4457} 4457}
4458 4458
4459static inline int __resched_legal(void) 4459static inline int __resched_legal(int expected_preempt_count)
4460{ 4460{
4461 if (unlikely(preempt_count())) 4461 if (unlikely(preempt_count() != expected_preempt_count))
4462 return 0; 4462 return 0;
4463 if (unlikely(system_state != SYSTEM_RUNNING)) 4463 if (unlikely(system_state != SYSTEM_RUNNING))
4464 return 0; 4464 return 0;
@@ -4484,7 +4484,7 @@ static void __cond_resched(void)
4484 4484
4485int __sched cond_resched(void) 4485int __sched cond_resched(void)
4486{ 4486{
4487 if (need_resched() && __resched_legal()) { 4487 if (need_resched() && __resched_legal(0)) {
4488 __cond_resched(); 4488 __cond_resched();
4489 return 1; 4489 return 1;
4490 } 4490 }
@@ -4510,7 +4510,7 @@ int cond_resched_lock(spinlock_t *lock)
4510 ret = 1; 4510 ret = 1;
4511 spin_lock(lock); 4511 spin_lock(lock);
4512 } 4512 }
4513 if (need_resched() && __resched_legal()) { 4513 if (need_resched() && __resched_legal(1)) {
4514 spin_release(&lock->dep_map, 1, _THIS_IP_); 4514 spin_release(&lock->dep_map, 1, _THIS_IP_);
4515 _raw_spin_unlock(lock); 4515 _raw_spin_unlock(lock);
4516 preempt_enable_no_resched(); 4516 preempt_enable_no_resched();
@@ -4526,7 +4526,7 @@ int __sched cond_resched_softirq(void)
4526{ 4526{
4527 BUG_ON(!in_softirq()); 4527 BUG_ON(!in_softirq());
4528 4528
4529 if (need_resched() && __resched_legal()) { 4529 if (need_resched() && __resched_legal(0)) {
4530 raw_local_irq_disable(); 4530 raw_local_irq_disable();
4531 _local_bh_enable(); 4531 _local_bh_enable();
4532 raw_local_irq_enable(); 4532 raw_local_irq_enable();
@@ -6494,7 +6494,12 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6494 for (i = 0; i < MAX_NUMNODES; i++) 6494 for (i = 0; i < MAX_NUMNODES; i++)
6495 init_numa_sched_groups_power(sched_group_nodes[i]); 6495 init_numa_sched_groups_power(sched_group_nodes[i]);
6496 6496
6497 init_numa_sched_groups_power(sched_group_allnodes); 6497 if (sched_group_allnodes) {
6498 int group = cpu_to_allnodes_group(first_cpu(*cpu_map));
6499 struct sched_group *sg = &sched_group_allnodes[group];
6500
6501 init_numa_sched_groups_power(sg);
6502 }
6498#endif 6503#endif
6499 6504
6500 /* Attach the domains */ 6505 /* Attach the domains */
@@ -6761,6 +6766,11 @@ void __init sched_init(void)
6761 } 6766 }
6762 6767
6763 set_load_weight(&init_task); 6768 set_load_weight(&init_task);
6769
6770#ifdef CONFIG_RT_MUTEXES
6771 plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
6772#endif
6773
6764 /* 6774 /*
6765 * The boot idle thread does lazy MMU switching as well: 6775 * The boot idle thread does lazy MMU switching as well:
6766 */ 6776 */
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 0f08a84ae307..3789ca98197c 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -65,6 +65,7 @@ static inline void wakeup_softirqd(void)
65 * This one is for softirq.c-internal use, 65 * This one is for softirq.c-internal use,
66 * where hardirqs are disabled legitimately: 66 * where hardirqs are disabled legitimately:
67 */ 67 */
68#ifdef CONFIG_TRACE_IRQFLAGS
68static void __local_bh_disable(unsigned long ip) 69static void __local_bh_disable(unsigned long ip)
69{ 70{
70 unsigned long flags; 71 unsigned long flags;
@@ -80,6 +81,13 @@ static void __local_bh_disable(unsigned long ip)
80 trace_softirqs_off(ip); 81 trace_softirqs_off(ip);
81 raw_local_irq_restore(flags); 82 raw_local_irq_restore(flags);
82} 83}
84#else /* !CONFIG_TRACE_IRQFLAGS */
85static inline void __local_bh_disable(unsigned long ip)
86{
87 add_preempt_count(SOFTIRQ_OFFSET);
88 barrier();
89}
90#endif /* CONFIG_TRACE_IRQFLAGS */
83 91
84void local_bh_disable(void) 92void local_bh_disable(void)
85{ 93{
@@ -121,12 +129,16 @@ EXPORT_SYMBOL(_local_bh_enable);
121 129
122void local_bh_enable(void) 130void local_bh_enable(void)
123{ 131{
132#ifdef CONFIG_TRACE_IRQFLAGS
124 unsigned long flags; 133 unsigned long flags;
125 134
126 WARN_ON_ONCE(in_irq()); 135 WARN_ON_ONCE(in_irq());
136#endif
127 WARN_ON_ONCE(irqs_disabled()); 137 WARN_ON_ONCE(irqs_disabled());
128 138
139#ifdef CONFIG_TRACE_IRQFLAGS
129 local_irq_save(flags); 140 local_irq_save(flags);
141#endif
130 /* 142 /*
131 * Are softirqs going to be turned on now: 143 * Are softirqs going to be turned on now:
132 */ 144 */
@@ -142,18 +154,22 @@ void local_bh_enable(void)
142 do_softirq(); 154 do_softirq();
143 155
144 dec_preempt_count(); 156 dec_preempt_count();
157#ifdef CONFIG_TRACE_IRQFLAGS
145 local_irq_restore(flags); 158 local_irq_restore(flags);
159#endif
146 preempt_check_resched(); 160 preempt_check_resched();
147} 161}
148EXPORT_SYMBOL(local_bh_enable); 162EXPORT_SYMBOL(local_bh_enable);
149 163
150void local_bh_enable_ip(unsigned long ip) 164void local_bh_enable_ip(unsigned long ip)
151{ 165{
166#ifdef CONFIG_TRACE_IRQFLAGS
152 unsigned long flags; 167 unsigned long flags;
153 168
154 WARN_ON_ONCE(in_irq()); 169 WARN_ON_ONCE(in_irq());
155 170
156 local_irq_save(flags); 171 local_irq_save(flags);
172#endif
157 /* 173 /*
158 * Are softirqs going to be turned on now: 174 * Are softirqs going to be turned on now:
159 */ 175 */
@@ -169,7 +185,9 @@ void local_bh_enable_ip(unsigned long ip)
169 do_softirq(); 185 do_softirq();
170 186
171 dec_preempt_count(); 187 dec_preempt_count();
188#ifdef CONFIG_TRACE_IRQFLAGS
172 local_irq_restore(flags); 189 local_irq_restore(flags);
190#endif
173 preempt_check_resched(); 191 preempt_check_resched();
174} 192}
175EXPORT_SYMBOL(local_bh_enable_ip); 193EXPORT_SYMBOL(local_bh_enable_ip);
@@ -547,7 +565,7 @@ static void takeover_tasklets(unsigned int cpu)
547} 565}
548#endif /* CONFIG_HOTPLUG_CPU */ 566#endif /* CONFIG_HOTPLUG_CPU */
549 567
550static int __devinit cpu_callback(struct notifier_block *nfb, 568static int __cpuinit cpu_callback(struct notifier_block *nfb,
551 unsigned long action, 569 unsigned long action,
552 void *hcpu) 570 void *hcpu)
553{ 571{
@@ -587,7 +605,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
587 return NOTIFY_OK; 605 return NOTIFY_OK;
588} 606}
589 607
590static struct notifier_block __devinitdata cpu_nfb = { 608static struct notifier_block __cpuinitdata cpu_nfb = {
591 .notifier_call = cpu_callback 609 .notifier_call = cpu_callback
592}; 610};
593 611
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 6b76caa22981..03e6a2b0b787 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -104,7 +104,7 @@ static int watchdog(void * __bind_cpu)
104/* 104/*
105 * Create/destroy watchdog threads as CPUs come and go: 105 * Create/destroy watchdog threads as CPUs come and go:
106 */ 106 */
107static int __devinit 107static int __cpuinit
108cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 108cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
109{ 109{
110 int hotcpu = (unsigned long)hcpu; 110 int hotcpu = (unsigned long)hcpu;
@@ -142,7 +142,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
142 return NOTIFY_OK; 142 return NOTIFY_OK;
143} 143}
144 144
145static struct notifier_block __devinitdata cpu_nfb = { 145static struct notifier_block __cpuinitdata cpu_nfb = {
146 .notifier_call = cpu_callback 146 .notifier_call = cpu_callback
147}; 147};
148 148
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index f45179ce028e..e78187657330 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -121,46 +121,45 @@ static int send_reply(struct sk_buff *skb, pid_t pid)
121/* 121/*
122 * Send taskstats data in @skb to listeners registered for @cpu's exit data 122 * Send taskstats data in @skb to listeners registered for @cpu's exit data
123 */ 123 */
124static int send_cpu_listeners(struct sk_buff *skb, unsigned int cpu) 124static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu)
125{ 125{
126 struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data); 126 struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
127 struct listener_list *listeners; 127 struct listener_list *listeners;
128 struct listener *s, *tmp; 128 struct listener *s, *tmp;
129 struct sk_buff *skb_next, *skb_cur = skb; 129 struct sk_buff *skb_next, *skb_cur = skb;
130 void *reply = genlmsg_data(genlhdr); 130 void *reply = genlmsg_data(genlhdr);
131 int rc, ret, delcount = 0; 131 int rc, delcount = 0;
132 132
133 rc = genlmsg_end(skb, reply); 133 rc = genlmsg_end(skb, reply);
134 if (rc < 0) { 134 if (rc < 0) {
135 nlmsg_free(skb); 135 nlmsg_free(skb);
136 return rc; 136 return;
137 } 137 }
138 138
139 rc = 0; 139 rc = 0;
140 listeners = &per_cpu(listener_array, cpu); 140 listeners = &per_cpu(listener_array, cpu);
141 down_read(&listeners->sem); 141 down_read(&listeners->sem);
142 list_for_each_entry_safe(s, tmp, &listeners->list, list) { 142 list_for_each_entry(s, &listeners->list, list) {
143 skb_next = NULL; 143 skb_next = NULL;
144 if (!list_is_last(&s->list, &listeners->list)) { 144 if (!list_is_last(&s->list, &listeners->list)) {
145 skb_next = skb_clone(skb_cur, GFP_KERNEL); 145 skb_next = skb_clone(skb_cur, GFP_KERNEL);
146 if (!skb_next) { 146 if (!skb_next)
147 nlmsg_free(skb_cur);
148 rc = -ENOMEM;
149 break; 147 break;
150 }
151 } 148 }
152 ret = genlmsg_unicast(skb_cur, s->pid); 149 rc = genlmsg_unicast(skb_cur, s->pid);
153 if (ret == -ECONNREFUSED) { 150 if (rc == -ECONNREFUSED) {
154 s->valid = 0; 151 s->valid = 0;
155 delcount++; 152 delcount++;
156 rc = ret;
157 } 153 }
158 skb_cur = skb_next; 154 skb_cur = skb_next;
159 } 155 }
160 up_read(&listeners->sem); 156 up_read(&listeners->sem);
161 157
158 if (skb_cur)
159 nlmsg_free(skb_cur);
160
162 if (!delcount) 161 if (!delcount)
163 return rc; 162 return;
164 163
165 /* Delete invalidated entries */ 164 /* Delete invalidated entries */
166 down_write(&listeners->sem); 165 down_write(&listeners->sem);
@@ -171,13 +170,12 @@ static int send_cpu_listeners(struct sk_buff *skb, unsigned int cpu)
171 } 170 }
172 } 171 }
173 up_write(&listeners->sem); 172 up_write(&listeners->sem);
174 return rc;
175} 173}
176 174
177static int fill_pid(pid_t pid, struct task_struct *pidtsk, 175static int fill_pid(pid_t pid, struct task_struct *pidtsk,
178 struct taskstats *stats) 176 struct taskstats *stats)
179{ 177{
180 int rc; 178 int rc = 0;
181 struct task_struct *tsk = pidtsk; 179 struct task_struct *tsk = pidtsk;
182 180
183 if (!pidtsk) { 181 if (!pidtsk) {
@@ -196,12 +194,10 @@ static int fill_pid(pid_t pid, struct task_struct *pidtsk,
196 * Each accounting subsystem adds calls to its functions to 194 * Each accounting subsystem adds calls to its functions to
197 * fill in relevant parts of struct taskstsats as follows 195 * fill in relevant parts of struct taskstsats as follows
198 * 196 *
199 * rc = per-task-foo(stats, tsk); 197 * per-task-foo(stats, tsk);
200 * if (rc)
201 * goto err;
202 */ 198 */
203 199
204 rc = delayacct_add_tsk(stats, tsk); 200 delayacct_add_tsk(stats, tsk);
205 stats->version = TASKSTATS_VERSION; 201 stats->version = TASKSTATS_VERSION;
206 202
207 /* Define err: label here if needed */ 203 /* Define err: label here if needed */
diff --git a/kernel/timer.c b/kernel/timer.c
index 05809c2e2fd6..b650f04888ed 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -84,7 +84,7 @@ typedef struct tvec_t_base_s tvec_base_t;
84 84
85tvec_base_t boot_tvec_bases; 85tvec_base_t boot_tvec_bases;
86EXPORT_SYMBOL(boot_tvec_bases); 86EXPORT_SYMBOL(boot_tvec_bases);
87static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases }; 87static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
88 88
89static inline void set_running_timer(tvec_base_t *base, 89static inline void set_running_timer(tvec_base_t *base,
90 struct timer_list *timer) 90 struct timer_list *timer)
@@ -408,7 +408,7 @@ static int cascade(tvec_base_t *base, tvec_t *tv, int index)
408 * This function cascades all vectors and executes all expired timer 408 * This function cascades all vectors and executes all expired timer
409 * vectors. 409 * vectors.
410 */ 410 */
411#define INDEX(N) (base->timer_jiffies >> (TVR_BITS + N * TVN_BITS)) & TVN_MASK 411#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
412 412
413static inline void __run_timers(tvec_base_t *base) 413static inline void __run_timers(tvec_base_t *base)
414{ 414{
@@ -1688,7 +1688,7 @@ static void __devinit migrate_timers(int cpu)
1688} 1688}
1689#endif /* CONFIG_HOTPLUG_CPU */ 1689#endif /* CONFIG_HOTPLUG_CPU */
1690 1690
1691static int __devinit timer_cpu_notify(struct notifier_block *self, 1691static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1692 unsigned long action, void *hcpu) 1692 unsigned long action, void *hcpu)
1693{ 1693{
1694 long cpu = (long)hcpu; 1694 long cpu = (long)hcpu;
@@ -1708,7 +1708,7 @@ static int __devinit timer_cpu_notify(struct notifier_block *self,
1708 return NOTIFY_OK; 1708 return NOTIFY_OK;
1709} 1709}
1710 1710
1711static struct notifier_block __devinitdata timers_nb = { 1711static struct notifier_block __cpuinitdata timers_nb = {
1712 .notifier_call = timer_cpu_notify, 1712 .notifier_call = timer_cpu_notify,
1713}; 1713};
1714 1714
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index eebb1d839235..448e8f7b342d 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -93,9 +93,12 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
93 spin_unlock_irqrestore(&cwq->lock, flags); 93 spin_unlock_irqrestore(&cwq->lock, flags);
94} 94}
95 95
96/* 96/**
97 * Queue work on a workqueue. Return non-zero if it was successfully 97 * queue_work - queue work on a workqueue
98 * added. 98 * @wq: workqueue to use
99 * @work: work to queue
100 *
101 * Returns non-zero if it was successfully added.
99 * 102 *
100 * We queue the work to the CPU it was submitted, but there is no 103 * We queue the work to the CPU it was submitted, but there is no
101 * guarantee that it will be processed by that CPU. 104 * guarantee that it will be processed by that CPU.
@@ -128,6 +131,14 @@ static void delayed_work_timer_fn(unsigned long __data)
128 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 131 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
129} 132}
130 133
134/**
135 * queue_delayed_work - queue work on a workqueue after delay
136 * @wq: workqueue to use
137 * @work: work to queue
138 * @delay: number of jiffies to wait before queueing
139 *
140 * Returns non-zero if it was successfully added.
141 */
131int fastcall queue_delayed_work(struct workqueue_struct *wq, 142int fastcall queue_delayed_work(struct workqueue_struct *wq,
132 struct work_struct *work, unsigned long delay) 143 struct work_struct *work, unsigned long delay)
133{ 144{
@@ -150,6 +161,15 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
150} 161}
151EXPORT_SYMBOL_GPL(queue_delayed_work); 162EXPORT_SYMBOL_GPL(queue_delayed_work);
152 163
164/**
165 * queue_delayed_work_on - queue work on specific CPU after delay
166 * @cpu: CPU number to execute work on
167 * @wq: workqueue to use
168 * @work: work to queue
169 * @delay: number of jiffies to wait before queueing
170 *
171 * Returns non-zero if it was successfully added.
172 */
153int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 173int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
154 struct work_struct *work, unsigned long delay) 174 struct work_struct *work, unsigned long delay)
155{ 175{
@@ -275,8 +295,9 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
275 } 295 }
276} 296}
277 297
278/* 298/**
279 * flush_workqueue - ensure that any scheduled work has run to completion. 299 * flush_workqueue - ensure that any scheduled work has run to completion.
300 * @wq: workqueue to flush
280 * 301 *
281 * Forces execution of the workqueue and blocks until its completion. 302 * Forces execution of the workqueue and blocks until its completion.
282 * This is typically used in driver shutdown handlers. 303 * This is typically used in driver shutdown handlers.
@@ -400,6 +421,12 @@ static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
400 kthread_stop(p); 421 kthread_stop(p);
401} 422}
402 423
424/**
425 * destroy_workqueue - safely terminate a workqueue
426 * @wq: target workqueue
427 *
428 * Safely destroy a workqueue. All work currently pending will be done first.
429 */
403void destroy_workqueue(struct workqueue_struct *wq) 430void destroy_workqueue(struct workqueue_struct *wq)
404{ 431{
405 int cpu; 432 int cpu;
@@ -425,18 +452,41 @@ EXPORT_SYMBOL_GPL(destroy_workqueue);
425 452
426static struct workqueue_struct *keventd_wq; 453static struct workqueue_struct *keventd_wq;
427 454
455/**
456 * schedule_work - put work task in global workqueue
457 * @work: job to be done
458 *
459 * This puts a job in the kernel-global workqueue.
460 */
428int fastcall schedule_work(struct work_struct *work) 461int fastcall schedule_work(struct work_struct *work)
429{ 462{
430 return queue_work(keventd_wq, work); 463 return queue_work(keventd_wq, work);
431} 464}
432EXPORT_SYMBOL(schedule_work); 465EXPORT_SYMBOL(schedule_work);
433 466
467/**
468 * schedule_delayed_work - put work task in global workqueue after delay
469 * @work: job to be done
470 * @delay: number of jiffies to wait
471 *
472 * After waiting for a given time this puts a job in the kernel-global
473 * workqueue.
474 */
434int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) 475int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
435{ 476{
436 return queue_delayed_work(keventd_wq, work, delay); 477 return queue_delayed_work(keventd_wq, work, delay);
437} 478}
438EXPORT_SYMBOL(schedule_delayed_work); 479EXPORT_SYMBOL(schedule_delayed_work);
439 480
481/**
482 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
483 * @cpu: cpu to use
484 * @work: job to be done
485 * @delay: number of jiffies to wait
486 *
487 * After waiting for a given time this puts a job in the kernel-global
488 * workqueue on the specified CPU.
489 */
440int schedule_delayed_work_on(int cpu, 490int schedule_delayed_work_on(int cpu,
441 struct work_struct *work, unsigned long delay) 491 struct work_struct *work, unsigned long delay)
442{ 492{