diff options
Diffstat (limited to 'kernel')
36 files changed, 428 insertions, 243 deletions
diff --git a/kernel/async.c b/kernel/async.c index 608b32b4281..67a2be71f51 100644 --- a/kernel/async.c +++ b/kernel/async.c | |||
@@ -138,15 +138,18 @@ static void run_one_entry(void) | |||
138 | 138 | ||
139 | /* 3) run it (and print duration)*/ | 139 | /* 3) run it (and print duration)*/ |
140 | if (initcall_debug && system_state == SYSTEM_BOOTING) { | 140 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
141 | printk("calling %lli_%pF @ %i\n", entry->cookie, entry->func, task_pid_nr(current)); | 141 | printk("calling %lli_%pF @ %i\n", (long long)entry->cookie, |
142 | entry->func, task_pid_nr(current)); | ||
142 | calltime = ktime_get(); | 143 | calltime = ktime_get(); |
143 | } | 144 | } |
144 | entry->func(entry->data, entry->cookie); | 145 | entry->func(entry->data, entry->cookie); |
145 | if (initcall_debug && system_state == SYSTEM_BOOTING) { | 146 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
146 | rettime = ktime_get(); | 147 | rettime = ktime_get(); |
147 | delta = ktime_sub(rettime, calltime); | 148 | delta = ktime_sub(rettime, calltime); |
148 | printk("initcall %lli_%pF returned 0 after %lld usecs\n", entry->cookie, | 149 | printk("initcall %lli_%pF returned 0 after %lld usecs\n", |
149 | entry->func, ktime_to_ns(delta) >> 10); | 150 | (long long)entry->cookie, |
151 | entry->func, | ||
152 | (long long)ktime_to_ns(delta) >> 10); | ||
150 | } | 153 | } |
151 | 154 | ||
152 | /* 4) remove it from the running queue */ | 155 | /* 4) remove it from the running queue */ |
@@ -247,7 +250,8 @@ void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *r | |||
247 | delta = ktime_sub(endtime, starttime); | 250 | delta = ktime_sub(endtime, starttime); |
248 | 251 | ||
249 | printk("async_continuing @ %i after %lli usec\n", | 252 | printk("async_continuing @ %i after %lli usec\n", |
250 | task_pid_nr(current), ktime_to_ns(delta) >> 10); | 253 | task_pid_nr(current), |
254 | (long long)ktime_to_ns(delta) >> 10); | ||
251 | } | 255 | } |
252 | } | 256 | } |
253 | EXPORT_SYMBOL_GPL(async_synchronize_cookie_special); | 257 | EXPORT_SYMBOL_GPL(async_synchronize_cookie_special); |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index c29831076e7..5a54ff42874 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1115,8 +1115,10 @@ static void cgroup_kill_sb(struct super_block *sb) { | |||
1115 | } | 1115 | } |
1116 | write_unlock(&css_set_lock); | 1116 | write_unlock(&css_set_lock); |
1117 | 1117 | ||
1118 | list_del(&root->root_list); | 1118 | if (!list_empty(&root->root_list)) { |
1119 | root_count--; | 1119 | list_del(&root->root_list); |
1120 | root_count--; | ||
1121 | } | ||
1120 | 1122 | ||
1121 | mutex_unlock(&cgroup_mutex); | 1123 | mutex_unlock(&cgroup_mutex); |
1122 | 1124 | ||
@@ -2434,7 +2436,9 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
2434 | 2436 | ||
2435 | err_remove: | 2437 | err_remove: |
2436 | 2438 | ||
2439 | cgroup_lock_hierarchy(root); | ||
2437 | list_del(&cgrp->sibling); | 2440 | list_del(&cgrp->sibling); |
2441 | cgroup_unlock_hierarchy(root); | ||
2438 | root->number_of_cgroups--; | 2442 | root->number_of_cgroups--; |
2439 | 2443 | ||
2440 | err_destroy: | 2444 | err_destroy: |
@@ -2507,7 +2511,7 @@ static int cgroup_clear_css_refs(struct cgroup *cgrp) | |||
2507 | for_each_subsys(cgrp->root, ss) { | 2511 | for_each_subsys(cgrp->root, ss) { |
2508 | struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; | 2512 | struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; |
2509 | int refcnt; | 2513 | int refcnt; |
2510 | do { | 2514 | while (1) { |
2511 | /* We can only remove a CSS with a refcnt==1 */ | 2515 | /* We can only remove a CSS with a refcnt==1 */ |
2512 | refcnt = atomic_read(&css->refcnt); | 2516 | refcnt = atomic_read(&css->refcnt); |
2513 | if (refcnt > 1) { | 2517 | if (refcnt > 1) { |
@@ -2521,7 +2525,10 @@ static int cgroup_clear_css_refs(struct cgroup *cgrp) | |||
2521 | * css_tryget() to spin until we set the | 2525 | * css_tryget() to spin until we set the |
2522 | * CSS_REMOVED bits or abort | 2526 | * CSS_REMOVED bits or abort |
2523 | */ | 2527 | */ |
2524 | } while (atomic_cmpxchg(&css->refcnt, refcnt, 0) != refcnt); | 2528 | if (atomic_cmpxchg(&css->refcnt, refcnt, 0) == refcnt) |
2529 | break; | ||
2530 | cpu_relax(); | ||
2531 | } | ||
2525 | } | 2532 | } |
2526 | done: | 2533 | done: |
2527 | for_each_subsys(cgrp->root, ss) { | 2534 | for_each_subsys(cgrp->root, ss) { |
@@ -2991,20 +2998,21 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys, | |||
2991 | mutex_unlock(&cgroup_mutex); | 2998 | mutex_unlock(&cgroup_mutex); |
2992 | return 0; | 2999 | return 0; |
2993 | } | 3000 | } |
2994 | task_lock(tsk); | ||
2995 | cg = tsk->cgroups; | ||
2996 | parent = task_cgroup(tsk, subsys->subsys_id); | ||
2997 | 3001 | ||
2998 | /* Pin the hierarchy */ | 3002 | /* Pin the hierarchy */ |
2999 | if (!atomic_inc_not_zero(&parent->root->sb->s_active)) { | 3003 | if (!atomic_inc_not_zero(&root->sb->s_active)) { |
3000 | /* We race with the final deactivate_super() */ | 3004 | /* We race with the final deactivate_super() */ |
3001 | mutex_unlock(&cgroup_mutex); | 3005 | mutex_unlock(&cgroup_mutex); |
3002 | return 0; | 3006 | return 0; |
3003 | } | 3007 | } |
3004 | 3008 | ||
3005 | /* Keep the cgroup alive */ | 3009 | /* Keep the cgroup alive */ |
3010 | task_lock(tsk); | ||
3011 | parent = task_cgroup(tsk, subsys->subsys_id); | ||
3012 | cg = tsk->cgroups; | ||
3006 | get_css_set(cg); | 3013 | get_css_set(cg); |
3007 | task_unlock(tsk); | 3014 | task_unlock(tsk); |
3015 | |||
3008 | mutex_unlock(&cgroup_mutex); | 3016 | mutex_unlock(&cgroup_mutex); |
3009 | 3017 | ||
3010 | /* Now do the VFS work to create a cgroup */ | 3018 | /* Now do the VFS work to create a cgroup */ |
@@ -3043,7 +3051,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys, | |||
3043 | mutex_unlock(&inode->i_mutex); | 3051 | mutex_unlock(&inode->i_mutex); |
3044 | put_css_set(cg); | 3052 | put_css_set(cg); |
3045 | 3053 | ||
3046 | deactivate_super(parent->root->sb); | 3054 | deactivate_super(root->sb); |
3047 | /* The cgroup is still accessible in the VFS, but | 3055 | /* The cgroup is still accessible in the VFS, but |
3048 | * we're not going to try to rmdir() it at this | 3056 | * we're not going to try to rmdir() it at this |
3049 | * point. */ | 3057 | * point. */ |
@@ -3069,7 +3077,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys, | |||
3069 | mutex_lock(&cgroup_mutex); | 3077 | mutex_lock(&cgroup_mutex); |
3070 | put_css_set(cg); | 3078 | put_css_set(cg); |
3071 | mutex_unlock(&cgroup_mutex); | 3079 | mutex_unlock(&cgroup_mutex); |
3072 | deactivate_super(parent->root->sb); | 3080 | deactivate_super(root->sb); |
3073 | return ret; | 3081 | return ret; |
3074 | } | 3082 | } |
3075 | 3083 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index a85678865c5..f76db9dcaa0 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -61,6 +61,14 @@ | |||
61 | #include <linux/cgroup.h> | 61 | #include <linux/cgroup.h> |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * Workqueue for cpuset related tasks. | ||
65 | * | ||
66 | * Using kevent workqueue may cause deadlock when memory_migrate | ||
67 | * is set. So we create a separate workqueue thread for cpuset. | ||
68 | */ | ||
69 | static struct workqueue_struct *cpuset_wq; | ||
70 | |||
71 | /* | ||
64 | * Tracks how many cpusets are currently defined in system. | 72 | * Tracks how many cpusets are currently defined in system. |
65 | * When there is only one cpuset (the root cpuset) we can | 73 | * When there is only one cpuset (the root cpuset) we can |
66 | * short circuit some hooks. | 74 | * short circuit some hooks. |
@@ -831,7 +839,7 @@ static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains); | |||
831 | */ | 839 | */ |
832 | static void async_rebuild_sched_domains(void) | 840 | static void async_rebuild_sched_domains(void) |
833 | { | 841 | { |
834 | schedule_work(&rebuild_sched_domains_work); | 842 | queue_work(cpuset_wq, &rebuild_sched_domains_work); |
835 | } | 843 | } |
836 | 844 | ||
837 | /* | 845 | /* |
@@ -2111,6 +2119,9 @@ void __init cpuset_init_smp(void) | |||
2111 | 2119 | ||
2112 | hotcpu_notifier(cpuset_track_online_cpus, 0); | 2120 | hotcpu_notifier(cpuset_track_online_cpus, 0); |
2113 | hotplug_memory_notifier(cpuset_track_online_nodes, 10); | 2121 | hotplug_memory_notifier(cpuset_track_online_nodes, 10); |
2122 | |||
2123 | cpuset_wq = create_singlethread_workqueue("cpuset"); | ||
2124 | BUG_ON(!cpuset_wq); | ||
2114 | } | 2125 | } |
2115 | 2126 | ||
2116 | /** | 2127 | /** |
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c index 038707404b7..962a3b574f2 100644 --- a/kernel/dma-coherent.c +++ b/kernel/dma-coherent.c | |||
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | |||
98 | * @size: size of requested memory area | 98 | * @size: size of requested memory area |
99 | * @dma_handle: This will be filled with the correct dma handle | 99 | * @dma_handle: This will be filled with the correct dma handle |
100 | * @ret: This pointer will be filled with the virtual address | 100 | * @ret: This pointer will be filled with the virtual address |
101 | * to allocated area. | 101 | * to allocated area. |
102 | * | 102 | * |
103 | * This function should be only called from per-arch dma_alloc_coherent() | 103 | * This function should be only called from per-arch dma_alloc_coherent() |
104 | * to support allocation from per-device coherent memory pools. | 104 | * to support allocation from per-device coherent memory pools. |
@@ -118,31 +118,32 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, | |||
118 | mem = dev->dma_mem; | 118 | mem = dev->dma_mem; |
119 | if (!mem) | 119 | if (!mem) |
120 | return 0; | 120 | return 0; |
121 | if (unlikely(size > mem->size)) | 121 | |
122 | return 0; | 122 | *ret = NULL; |
123 | |||
124 | if (unlikely(size > (mem->size << PAGE_SHIFT))) | ||
125 | goto err; | ||
123 | 126 | ||
124 | pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); | 127 | pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); |
125 | if (pageno >= 0) { | 128 | if (unlikely(pageno < 0)) |
126 | /* | 129 | goto err; |
127 | * Memory was found in the per-device arena. | 130 | |
128 | */ | 131 | /* |
129 | *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); | 132 | * Memory was found in the per-device area. |
130 | *ret = mem->virt_base + (pageno << PAGE_SHIFT); | 133 | */ |
131 | memset(*ret, 0, size); | 134 | *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); |
132 | } else if (mem->flags & DMA_MEMORY_EXCLUSIVE) { | 135 | *ret = mem->virt_base + (pageno << PAGE_SHIFT); |
133 | /* | 136 | memset(*ret, 0, size); |
134 | * The per-device arena is exhausted and we are not | 137 | |
135 | * permitted to fall back to generic memory. | ||
136 | */ | ||
137 | *ret = NULL; | ||
138 | } else { | ||
139 | /* | ||
140 | * The per-device arena is exhausted and we are | ||
141 | * permitted to fall back to generic memory. | ||
142 | */ | ||
143 | return 0; | ||
144 | } | ||
145 | return 1; | 138 | return 1; |
139 | |||
140 | err: | ||
141 | /* | ||
142 | * In the case where the allocation can not be satisfied from the | ||
143 | * per-device area, try to fall back to generic memory if the | ||
144 | * constraints allow it. | ||
145 | */ | ||
146 | return mem->flags & DMA_MEMORY_EXCLUSIVE; | ||
146 | } | 147 | } |
147 | EXPORT_SYMBOL(dma_alloc_from_coherent); | 148 | EXPORT_SYMBOL(dma_alloc_from_coherent); |
148 | 149 | ||
diff --git a/kernel/fork.c b/kernel/fork.c index bf0cef8bbdf..6d5dbb7a13e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -817,17 +817,17 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) | |||
817 | static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | 817 | static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) |
818 | { | 818 | { |
819 | struct signal_struct *sig; | 819 | struct signal_struct *sig; |
820 | int ret; | ||
821 | 820 | ||
822 | if (clone_flags & CLONE_THREAD) { | 821 | if (clone_flags & CLONE_THREAD) { |
823 | ret = thread_group_cputime_clone_thread(current); | 822 | atomic_inc(¤t->signal->count); |
824 | if (likely(!ret)) { | 823 | atomic_inc(¤t->signal->live); |
825 | atomic_inc(¤t->signal->count); | 824 | return 0; |
826 | atomic_inc(¤t->signal->live); | ||
827 | } | ||
828 | return ret; | ||
829 | } | 825 | } |
830 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); | 826 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); |
827 | |||
828 | if (sig) | ||
829 | posix_cpu_timers_init_group(sig); | ||
830 | |||
831 | tsk->signal = sig; | 831 | tsk->signal = sig; |
832 | if (!sig) | 832 | if (!sig) |
833 | return -ENOMEM; | 833 | return -ENOMEM; |
@@ -864,8 +864,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
864 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); | 864 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); |
865 | task_unlock(current->group_leader); | 865 | task_unlock(current->group_leader); |
866 | 866 | ||
867 | posix_cpu_timers_init_group(sig); | ||
868 | |||
869 | acct_init_pacct(&sig->pacct); | 867 | acct_init_pacct(&sig->pacct); |
870 | 868 | ||
871 | tty_audit_fork(sig); | 869 | tty_audit_fork(sig); |
@@ -1007,6 +1005,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1007 | * triggers too late. This doesn't hurt, the check is only there | 1005 | * triggers too late. This doesn't hurt, the check is only there |
1008 | * to stop root fork bombs. | 1006 | * to stop root fork bombs. |
1009 | */ | 1007 | */ |
1008 | retval = -EAGAIN; | ||
1010 | if (nr_threads >= max_threads) | 1009 | if (nr_threads >= max_threads) |
1011 | goto bad_fork_cleanup_count; | 1010 | goto bad_fork_cleanup_count; |
1012 | 1011 | ||
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 2dc30c59c5f..f394d2a42ca 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -501,6 +501,13 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) | |||
501 | continue; | 501 | continue; |
502 | timer = rb_entry(base->first, struct hrtimer, node); | 502 | timer = rb_entry(base->first, struct hrtimer, node); |
503 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | 503 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
504 | /* | ||
505 | * clock_was_set() has changed base->offset so the | ||
506 | * result might be negative. Fix it up to prevent a | ||
507 | * false positive in clockevents_program_event() | ||
508 | */ | ||
509 | if (expires.tv64 < 0) | ||
510 | expires.tv64 = 0; | ||
504 | if (expires.tv64 < cpu_base->expires_next.tv64) | 511 | if (expires.tv64 < cpu_base->expires_next.tv64) |
505 | cpu_base->expires_next = expires; | 512 | cpu_base->expires_next = expires; |
506 | } | 513 | } |
@@ -614,7 +621,9 @@ void clock_was_set(void) | |||
614 | */ | 621 | */ |
615 | void hres_timers_resume(void) | 622 | void hres_timers_resume(void) |
616 | { | 623 | { |
617 | /* Retrigger the CPU local events: */ | 624 | WARN_ONCE(!irqs_disabled(), |
625 | KERN_INFO "hres_timers_resume() called with IRQs enabled!"); | ||
626 | |||
618 | retrigger_next_event(NULL); | 627 | retrigger_next_event(NULL); |
619 | } | 628 | } |
620 | 629 | ||
@@ -1156,6 +1165,29 @@ static void __run_hrtimer(struct hrtimer *timer) | |||
1156 | 1165 | ||
1157 | #ifdef CONFIG_HIGH_RES_TIMERS | 1166 | #ifdef CONFIG_HIGH_RES_TIMERS |
1158 | 1167 | ||
1168 | static int force_clock_reprogram; | ||
1169 | |||
1170 | /* | ||
1171 | * After 5 iteration's attempts, we consider that hrtimer_interrupt() | ||
1172 | * is hanging, which could happen with something that slows the interrupt | ||
1173 | * such as the tracing. Then we force the clock reprogramming for each future | ||
1174 | * hrtimer interrupts to avoid infinite loops and use the min_delta_ns | ||
1175 | * threshold that we will overwrite. | ||
1176 | * The next tick event will be scheduled to 3 times we currently spend on | ||
1177 | * hrtimer_interrupt(). This gives a good compromise, the cpus will spend | ||
1178 | * 1/4 of their time to process the hrtimer interrupts. This is enough to | ||
1179 | * let it running without serious starvation. | ||
1180 | */ | ||
1181 | |||
1182 | static inline void | ||
1183 | hrtimer_interrupt_hanging(struct clock_event_device *dev, | ||
1184 | ktime_t try_time) | ||
1185 | { | ||
1186 | force_clock_reprogram = 1; | ||
1187 | dev->min_delta_ns = (unsigned long)try_time.tv64 * 3; | ||
1188 | printk(KERN_WARNING "hrtimer: interrupt too slow, " | ||
1189 | "forcing clock min delta to %lu ns\n", dev->min_delta_ns); | ||
1190 | } | ||
1159 | /* | 1191 | /* |
1160 | * High resolution timer interrupt | 1192 | * High resolution timer interrupt |
1161 | * Called with interrupts disabled | 1193 | * Called with interrupts disabled |
@@ -1165,6 +1197,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1165 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1197 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
1166 | struct hrtimer_clock_base *base; | 1198 | struct hrtimer_clock_base *base; |
1167 | ktime_t expires_next, now; | 1199 | ktime_t expires_next, now; |
1200 | int nr_retries = 0; | ||
1168 | int i; | 1201 | int i; |
1169 | 1202 | ||
1170 | BUG_ON(!cpu_base->hres_active); | 1203 | BUG_ON(!cpu_base->hres_active); |
@@ -1172,6 +1205,10 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1172 | dev->next_event.tv64 = KTIME_MAX; | 1205 | dev->next_event.tv64 = KTIME_MAX; |
1173 | 1206 | ||
1174 | retry: | 1207 | retry: |
1208 | /* 5 retries is enough to notice a hang */ | ||
1209 | if (!(++nr_retries % 5)) | ||
1210 | hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now)); | ||
1211 | |||
1175 | now = ktime_get(); | 1212 | now = ktime_get(); |
1176 | 1213 | ||
1177 | expires_next.tv64 = KTIME_MAX; | 1214 | expires_next.tv64 = KTIME_MAX; |
@@ -1224,7 +1261,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1224 | 1261 | ||
1225 | /* Reprogramming necessary ? */ | 1262 | /* Reprogramming necessary ? */ |
1226 | if (expires_next.tv64 != KTIME_MAX) { | 1263 | if (expires_next.tv64 != KTIME_MAX) { |
1227 | if (tick_program_event(expires_next, 0)) | 1264 | if (tick_program_event(expires_next, force_clock_reprogram)) |
1228 | goto retry; | 1265 | goto retry; |
1229 | } | 1266 | } |
1230 | } | 1267 | } |
@@ -1578,6 +1615,10 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, | |||
1578 | break; | 1615 | break; |
1579 | 1616 | ||
1580 | #ifdef CONFIG_HOTPLUG_CPU | 1617 | #ifdef CONFIG_HOTPLUG_CPU |
1618 | case CPU_DYING: | ||
1619 | case CPU_DYING_FROZEN: | ||
1620 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu); | ||
1621 | break; | ||
1581 | case CPU_DEAD: | 1622 | case CPU_DEAD: |
1582 | case CPU_DEAD_FROZEN: | 1623 | case CPU_DEAD_FROZEN: |
1583 | { | 1624 | { |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index f63c706d25e..7de11bd64df 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -383,6 +383,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
383 | out_unlock: | 383 | out_unlock: |
384 | spin_unlock(&desc->lock); | 384 | spin_unlock(&desc->lock); |
385 | } | 385 | } |
386 | EXPORT_SYMBOL_GPL(handle_level_irq); | ||
386 | 387 | ||
387 | /** | 388 | /** |
388 | * handle_fasteoi_irq - irq handler for transparent controllers | 389 | * handle_fasteoi_irq - irq handler for transparent controllers |
@@ -593,6 +594,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
593 | } | 594 | } |
594 | spin_unlock_irqrestore(&desc->lock, flags); | 595 | spin_unlock_irqrestore(&desc->lock, flags); |
595 | } | 596 | } |
597 | EXPORT_SYMBOL_GPL(__set_irq_handler); | ||
596 | 598 | ||
597 | void | 599 | void |
598 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, | 600 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index c20db0be917..3aba8d12f32 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -39,6 +39,18 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc) | |||
39 | ack_bad_irq(irq); | 39 | ack_bad_irq(irq); |
40 | } | 40 | } |
41 | 41 | ||
42 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | ||
43 | static void __init init_irq_default_affinity(void) | ||
44 | { | ||
45 | alloc_bootmem_cpumask_var(&irq_default_affinity); | ||
46 | cpumask_setall(irq_default_affinity); | ||
47 | } | ||
48 | #else | ||
49 | static void __init init_irq_default_affinity(void) | ||
50 | { | ||
51 | } | ||
52 | #endif | ||
53 | |||
42 | /* | 54 | /* |
43 | * Linux has a controller-independent interrupt architecture. | 55 | * Linux has a controller-independent interrupt architecture. |
44 | * Every controller has a 'controller-template', that is used | 56 | * Every controller has a 'controller-template', that is used |
@@ -134,6 +146,8 @@ int __init early_irq_init(void) | |||
134 | int legacy_count; | 146 | int legacy_count; |
135 | int i; | 147 | int i; |
136 | 148 | ||
149 | init_irq_default_affinity(); | ||
150 | |||
137 | desc = irq_desc_legacy; | 151 | desc = irq_desc_legacy; |
138 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | 152 | legacy_count = ARRAY_SIZE(irq_desc_legacy); |
139 | 153 | ||
@@ -219,6 +233,8 @@ int __init early_irq_init(void) | |||
219 | int count; | 233 | int count; |
220 | int i; | 234 | int i; |
221 | 235 | ||
236 | init_irq_default_affinity(); | ||
237 | |||
222 | desc = irq_desc; | 238 | desc = irq_desc; |
223 | count = ARRAY_SIZE(irq_desc); | 239 | count = ARRAY_SIZE(irq_desc); |
224 | 240 | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index cd0cd8dcb34..291f0366455 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -15,17 +15,9 @@ | |||
15 | 15 | ||
16 | #include "internals.h" | 16 | #include "internals.h" |
17 | 17 | ||
18 | #ifdef CONFIG_SMP | 18 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) |
19 | cpumask_var_t irq_default_affinity; | 19 | cpumask_var_t irq_default_affinity; |
20 | 20 | ||
21 | static int init_irq_default_affinity(void) | ||
22 | { | ||
23 | alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL); | ||
24 | cpumask_setall(irq_default_affinity); | ||
25 | return 0; | ||
26 | } | ||
27 | core_initcall(init_irq_default_affinity); | ||
28 | |||
29 | /** | 21 | /** |
30 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 22 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
31 | * @irq: interrupt number to wait for | 23 | * @irq: interrupt number to wait for |
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index ecf765c6a77..acd88356ac7 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c | |||
@@ -71,7 +71,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | |||
71 | desc = irq_desc_ptrs[irq]; | 71 | desc = irq_desc_ptrs[irq]; |
72 | 72 | ||
73 | if (desc && old_desc != desc) | 73 | if (desc && old_desc != desc) |
74 | goto out_unlock; | 74 | goto out_unlock; |
75 | 75 | ||
76 | node = cpu_to_node(cpu); | 76 | node = cpu_to_node(cpu); |
77 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | 77 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); |
@@ -84,10 +84,15 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | |||
84 | init_copy_one_irq_desc(irq, old_desc, desc, cpu); | 84 | init_copy_one_irq_desc(irq, old_desc, desc, cpu); |
85 | 85 | ||
86 | irq_desc_ptrs[irq] = desc; | 86 | irq_desc_ptrs[irq] = desc; |
87 | spin_unlock_irqrestore(&sparse_irq_lock, flags); | ||
87 | 88 | ||
88 | /* free the old one */ | 89 | /* free the old one */ |
89 | free_one_irq_desc(old_desc, desc); | 90 | free_one_irq_desc(old_desc, desc); |
91 | spin_unlock(&old_desc->lock); | ||
90 | kfree(old_desc); | 92 | kfree(old_desc); |
93 | spin_lock(&desc->lock); | ||
94 | |||
95 | return desc; | ||
91 | 96 | ||
92 | out_unlock: | 97 | out_unlock: |
93 | spin_unlock_irqrestore(&sparse_irq_lock, flags); | 98 | spin_unlock_irqrestore(&sparse_irq_lock, flags); |
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index e694afa0eb8..7b8b0f21a5b 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c | |||
@@ -30,19 +30,20 @@ | |||
30 | #define all_var 0 | 30 | #define all_var 0 |
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | extern const unsigned long kallsyms_addresses[]; | 33 | /* These will be re-linked against their real values during the second link stage */ |
34 | extern const u8 kallsyms_names[]; | 34 | extern const unsigned long kallsyms_addresses[] __attribute__((weak)); |
35 | extern const u8 kallsyms_names[] __attribute__((weak)); | ||
35 | 36 | ||
36 | /* tell the compiler that the count isn't in the small data section if the arch | 37 | /* tell the compiler that the count isn't in the small data section if the arch |
37 | * has one (eg: FRV) | 38 | * has one (eg: FRV) |
38 | */ | 39 | */ |
39 | extern const unsigned long kallsyms_num_syms | 40 | extern const unsigned long kallsyms_num_syms |
40 | __attribute__((__section__(".rodata"))); | 41 | __attribute__((weak, section(".rodata"))); |
41 | 42 | ||
42 | extern const u8 kallsyms_token_table[]; | 43 | extern const u8 kallsyms_token_table[] __attribute__((weak)); |
43 | extern const u16 kallsyms_token_index[]; | 44 | extern const u16 kallsyms_token_index[] __attribute__((weak)); |
44 | 45 | ||
45 | extern const unsigned long kallsyms_markers[]; | 46 | extern const unsigned long kallsyms_markers[] __attribute__((weak)); |
46 | 47 | ||
47 | static inline int is_kernel_inittext(unsigned long addr) | 48 | static inline int is_kernel_inittext(unsigned long addr) |
48 | { | 49 | { |
@@ -167,6 +168,9 @@ static unsigned long get_symbol_pos(unsigned long addr, | |||
167 | unsigned long symbol_start = 0, symbol_end = 0; | 168 | unsigned long symbol_start = 0, symbol_end = 0; |
168 | unsigned long i, low, high, mid; | 169 | unsigned long i, low, high, mid; |
169 | 170 | ||
171 | /* This kernel should never had been booted. */ | ||
172 | BUG_ON(!kallsyms_addresses); | ||
173 | |||
170 | /* do a binary search on the sorted kallsyms_addresses array */ | 174 | /* do a binary search on the sorted kallsyms_addresses array */ |
171 | low = 0; | 175 | low = 0; |
172 | high = kallsyms_num_syms; | 176 | high = kallsyms_num_syms; |
diff --git a/kernel/module.c b/kernel/module.c index e8b51d41dd7..ba22484a987 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -573,13 +573,13 @@ static char last_unloaded_module[MODULE_NAME_LEN+1]; | |||
573 | /* Init the unload section of the module. */ | 573 | /* Init the unload section of the module. */ |
574 | static void module_unload_init(struct module *mod) | 574 | static void module_unload_init(struct module *mod) |
575 | { | 575 | { |
576 | unsigned int i; | 576 | int cpu; |
577 | 577 | ||
578 | INIT_LIST_HEAD(&mod->modules_which_use_me); | 578 | INIT_LIST_HEAD(&mod->modules_which_use_me); |
579 | for (i = 0; i < NR_CPUS; i++) | 579 | for_each_possible_cpu(cpu) |
580 | local_set(&mod->ref[i].count, 0); | 580 | local_set(__module_ref_addr(mod, cpu), 0); |
581 | /* Hold reference count during initialization. */ | 581 | /* Hold reference count during initialization. */ |
582 | local_set(&mod->ref[raw_smp_processor_id()].count, 1); | 582 | local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1); |
583 | /* Backwards compatibility macros put refcount during init. */ | 583 | /* Backwards compatibility macros put refcount during init. */ |
584 | mod->waiter = current; | 584 | mod->waiter = current; |
585 | } | 585 | } |
@@ -717,10 +717,11 @@ static int try_stop_module(struct module *mod, int flags, int *forced) | |||
717 | 717 | ||
718 | unsigned int module_refcount(struct module *mod) | 718 | unsigned int module_refcount(struct module *mod) |
719 | { | 719 | { |
720 | unsigned int i, total = 0; | 720 | unsigned int total = 0; |
721 | int cpu; | ||
721 | 722 | ||
722 | for (i = 0; i < NR_CPUS; i++) | 723 | for_each_possible_cpu(cpu) |
723 | total += local_read(&mod->ref[i].count); | 724 | total += local_read(__module_ref_addr(mod, cpu)); |
724 | return total; | 725 | return total; |
725 | } | 726 | } |
726 | EXPORT_SYMBOL(module_refcount); | 727 | EXPORT_SYMBOL(module_refcount); |
@@ -894,7 +895,7 @@ void module_put(struct module *module) | |||
894 | { | 895 | { |
895 | if (module) { | 896 | if (module) { |
896 | unsigned int cpu = get_cpu(); | 897 | unsigned int cpu = get_cpu(); |
897 | local_dec(&module->ref[cpu].count); | 898 | local_dec(__module_ref_addr(module, cpu)); |
898 | /* Maybe they're waiting for us to drop reference? */ | 899 | /* Maybe they're waiting for us to drop reference? */ |
899 | if (unlikely(!module_is_live(module))) | 900 | if (unlikely(!module_is_live(module))) |
900 | wake_up_process(module->waiter); | 901 | wake_up_process(module->waiter); |
@@ -1464,7 +1465,10 @@ static void free_module(struct module *mod) | |||
1464 | kfree(mod->args); | 1465 | kfree(mod->args); |
1465 | if (mod->percpu) | 1466 | if (mod->percpu) |
1466 | percpu_modfree(mod->percpu); | 1467 | percpu_modfree(mod->percpu); |
1467 | 1468 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | |
1469 | if (mod->refptr) | ||
1470 | percpu_modfree(mod->refptr); | ||
1471 | #endif | ||
1468 | /* Free lock-classes: */ | 1472 | /* Free lock-classes: */ |
1469 | lockdep_free_key_range(mod->module_core, mod->core_size); | 1473 | lockdep_free_key_range(mod->module_core, mod->core_size); |
1470 | 1474 | ||
@@ -2011,6 +2015,14 @@ static noinline struct module *load_module(void __user *umod, | |||
2011 | if (err < 0) | 2015 | if (err < 0) |
2012 | goto free_mod; | 2016 | goto free_mod; |
2013 | 2017 | ||
2018 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | ||
2019 | mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), | ||
2020 | mod->name); | ||
2021 | if (!mod->refptr) { | ||
2022 | err = -ENOMEM; | ||
2023 | goto free_mod; | ||
2024 | } | ||
2025 | #endif | ||
2014 | if (pcpuindex) { | 2026 | if (pcpuindex) { |
2015 | /* We have a special allocation for this section. */ | 2027 | /* We have a special allocation for this section. */ |
2016 | percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, | 2028 | percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, |
@@ -2018,7 +2030,7 @@ static noinline struct module *load_module(void __user *umod, | |||
2018 | mod->name); | 2030 | mod->name); |
2019 | if (!percpu) { | 2031 | if (!percpu) { |
2020 | err = -ENOMEM; | 2032 | err = -ENOMEM; |
2021 | goto free_mod; | 2033 | goto free_percpu; |
2022 | } | 2034 | } |
2023 | sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; | 2035 | sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; |
2024 | mod->percpu = percpu; | 2036 | mod->percpu = percpu; |
@@ -2282,6 +2294,9 @@ static noinline struct module *load_module(void __user *umod, | |||
2282 | free_percpu: | 2294 | free_percpu: |
2283 | if (percpu) | 2295 | if (percpu) |
2284 | percpu_modfree(percpu); | 2296 | percpu_modfree(percpu); |
2297 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | ||
2298 | percpu_modfree(mod->refptr); | ||
2299 | #endif | ||
2285 | free_mod: | 2300 | free_mod: |
2286 | kfree(args); | 2301 | kfree(args); |
2287 | free_hdr: | 2302 | free_hdr: |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 157de3a4783..fa07da94d7b 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -10,76 +10,6 @@ | |||
10 | #include <linux/kernel_stat.h> | 10 | #include <linux/kernel_stat.h> |
11 | 11 | ||
12 | /* | 12 | /* |
13 | * Allocate the thread_group_cputime structure appropriately and fill in the | ||
14 | * current values of the fields. Called from copy_signal() via | ||
15 | * thread_group_cputime_clone_thread() when adding a second or subsequent | ||
16 | * thread to a thread group. Assumes interrupts are enabled when called. | ||
17 | */ | ||
18 | int thread_group_cputime_alloc(struct task_struct *tsk) | ||
19 | { | ||
20 | struct signal_struct *sig = tsk->signal; | ||
21 | struct task_cputime *cputime; | ||
22 | |||
23 | /* | ||
24 | * If we have multiple threads and we don't already have a | ||
25 | * per-CPU task_cputime struct (checked in the caller), allocate | ||
26 | * one and fill it in with the times accumulated so far. We may | ||
27 | * race with another thread so recheck after we pick up the sighand | ||
28 | * lock. | ||
29 | */ | ||
30 | cputime = alloc_percpu(struct task_cputime); | ||
31 | if (cputime == NULL) | ||
32 | return -ENOMEM; | ||
33 | spin_lock_irq(&tsk->sighand->siglock); | ||
34 | if (sig->cputime.totals) { | ||
35 | spin_unlock_irq(&tsk->sighand->siglock); | ||
36 | free_percpu(cputime); | ||
37 | return 0; | ||
38 | } | ||
39 | sig->cputime.totals = cputime; | ||
40 | cputime = per_cpu_ptr(sig->cputime.totals, smp_processor_id()); | ||
41 | cputime->utime = tsk->utime; | ||
42 | cputime->stime = tsk->stime; | ||
43 | cputime->sum_exec_runtime = tsk->se.sum_exec_runtime; | ||
44 | spin_unlock_irq(&tsk->sighand->siglock); | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | /** | ||
49 | * thread_group_cputime - Sum the thread group time fields across all CPUs. | ||
50 | * | ||
51 | * @tsk: The task we use to identify the thread group. | ||
52 | * @times: task_cputime structure in which we return the summed fields. | ||
53 | * | ||
54 | * Walk the list of CPUs to sum the per-CPU time fields in the thread group | ||
55 | * time structure. | ||
56 | */ | ||
57 | void thread_group_cputime( | ||
58 | struct task_struct *tsk, | ||
59 | struct task_cputime *times) | ||
60 | { | ||
61 | struct task_cputime *totals, *tot; | ||
62 | int i; | ||
63 | |||
64 | totals = tsk->signal->cputime.totals; | ||
65 | if (!totals) { | ||
66 | times->utime = tsk->utime; | ||
67 | times->stime = tsk->stime; | ||
68 | times->sum_exec_runtime = tsk->se.sum_exec_runtime; | ||
69 | return; | ||
70 | } | ||
71 | |||
72 | times->stime = times->utime = cputime_zero; | ||
73 | times->sum_exec_runtime = 0; | ||
74 | for_each_possible_cpu(i) { | ||
75 | tot = per_cpu_ptr(totals, i); | ||
76 | times->utime = cputime_add(times->utime, tot->utime); | ||
77 | times->stime = cputime_add(times->stime, tot->stime); | ||
78 | times->sum_exec_runtime += tot->sum_exec_runtime; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. | 13 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. |
84 | */ | 14 | */ |
85 | void update_rlimit_cpu(unsigned long rlim_new) | 15 | void update_rlimit_cpu(unsigned long rlim_new) |
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 45e8541ab7e..432ee575c9e 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
@@ -71,6 +71,14 @@ void hibernation_set_ops(struct platform_hibernation_ops *ops) | |||
71 | mutex_unlock(&pm_mutex); | 71 | mutex_unlock(&pm_mutex); |
72 | } | 72 | } |
73 | 73 | ||
74 | static bool entering_platform_hibernation; | ||
75 | |||
76 | bool system_entering_hibernation(void) | ||
77 | { | ||
78 | return entering_platform_hibernation; | ||
79 | } | ||
80 | EXPORT_SYMBOL(system_entering_hibernation); | ||
81 | |||
74 | #ifdef CONFIG_PM_DEBUG | 82 | #ifdef CONFIG_PM_DEBUG |
75 | static void hibernation_debug_sleep(void) | 83 | static void hibernation_debug_sleep(void) |
76 | { | 84 | { |
@@ -411,6 +419,7 @@ int hibernation_platform_enter(void) | |||
411 | if (error) | 419 | if (error) |
412 | goto Close; | 420 | goto Close; |
413 | 421 | ||
422 | entering_platform_hibernation = true; | ||
414 | suspend_console(); | 423 | suspend_console(); |
415 | error = device_suspend(PMSG_HIBERNATE); | 424 | error = device_suspend(PMSG_HIBERNATE); |
416 | if (error) { | 425 | if (error) { |
@@ -445,6 +454,7 @@ int hibernation_platform_enter(void) | |||
445 | Finish: | 454 | Finish: |
446 | hibernation_ops->finish(); | 455 | hibernation_ops->finish(); |
447 | Resume_devices: | 456 | Resume_devices: |
457 | entering_platform_hibernation = false; | ||
448 | device_resume(PMSG_RESTORE); | 458 | device_resume(PMSG_RESTORE); |
449 | resume_console(); | 459 | resume_console(); |
450 | Close: | 460 | Close: |
diff --git a/kernel/power/main.c b/kernel/power/main.c index 23998887397..b4d219016b6 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -57,16 +57,6 @@ int pm_notifier_call_chain(unsigned long val) | |||
57 | #ifdef CONFIG_PM_DEBUG | 57 | #ifdef CONFIG_PM_DEBUG |
58 | int pm_test_level = TEST_NONE; | 58 | int pm_test_level = TEST_NONE; |
59 | 59 | ||
60 | static int suspend_test(int level) | ||
61 | { | ||
62 | if (pm_test_level == level) { | ||
63 | printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n"); | ||
64 | mdelay(5000); | ||
65 | return 1; | ||
66 | } | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static const char * const pm_tests[__TEST_AFTER_LAST] = { | 60 | static const char * const pm_tests[__TEST_AFTER_LAST] = { |
71 | [TEST_NONE] = "none", | 61 | [TEST_NONE] = "none", |
72 | [TEST_CORE] = "core", | 62 | [TEST_CORE] = "core", |
@@ -125,14 +115,24 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
125 | } | 115 | } |
126 | 116 | ||
127 | power_attr(pm_test); | 117 | power_attr(pm_test); |
128 | #else /* !CONFIG_PM_DEBUG */ | 118 | #endif /* CONFIG_PM_DEBUG */ |
129 | static inline int suspend_test(int level) { return 0; } | ||
130 | #endif /* !CONFIG_PM_DEBUG */ | ||
131 | 119 | ||
132 | #endif /* CONFIG_PM_SLEEP */ | 120 | #endif /* CONFIG_PM_SLEEP */ |
133 | 121 | ||
134 | #ifdef CONFIG_SUSPEND | 122 | #ifdef CONFIG_SUSPEND |
135 | 123 | ||
124 | static int suspend_test(int level) | ||
125 | { | ||
126 | #ifdef CONFIG_PM_DEBUG | ||
127 | if (pm_test_level == level) { | ||
128 | printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n"); | ||
129 | mdelay(5000); | ||
130 | return 1; | ||
131 | } | ||
132 | #endif /* !CONFIG_PM_DEBUG */ | ||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | #ifdef CONFIG_PM_TEST_SUSPEND | 136 | #ifdef CONFIG_PM_TEST_SUSPEND |
137 | 137 | ||
138 | /* | 138 | /* |
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index 490934fc7ac..bd5a9003497 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
@@ -716,7 +716,7 @@ void rcu_check_callbacks(int cpu, int user) | |||
716 | raise_rcu_softirq(); | 716 | raise_rcu_softirq(); |
717 | } | 717 | } |
718 | 718 | ||
719 | static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, | 719 | static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, |
720 | struct rcu_data *rdp) | 720 | struct rcu_data *rdp) |
721 | { | 721 | { |
722 | unsigned long flags; | 722 | unsigned long flags; |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index f2d8638e6c6..b2fd602a6f6 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -1314,7 +1314,7 @@ int rcu_needs_cpu(int cpu) | |||
1314 | * access due to the fact that this CPU cannot possibly have any RCU | 1314 | * access due to the fact that this CPU cannot possibly have any RCU |
1315 | * callbacks in flight yet. | 1315 | * callbacks in flight yet. |
1316 | */ | 1316 | */ |
1317 | static void | 1317 | static void __cpuinit |
1318 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | 1318 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp) |
1319 | { | 1319 | { |
1320 | unsigned long flags; | 1320 | unsigned long flags; |
diff --git a/kernel/relay.c b/kernel/relay.c index 09ac2008f77..9d79b7854fa 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -663,8 +663,10 @@ int relay_late_setup_files(struct rchan *chan, | |||
663 | 663 | ||
664 | mutex_lock(&relay_channels_mutex); | 664 | mutex_lock(&relay_channels_mutex); |
665 | /* Is chan already set up? */ | 665 | /* Is chan already set up? */ |
666 | if (unlikely(chan->has_base_filename)) | 666 | if (unlikely(chan->has_base_filename)) { |
667 | mutex_unlock(&relay_channels_mutex); | ||
667 | return -EEXIST; | 668 | return -EEXIST; |
669 | } | ||
668 | chan->has_base_filename = 1; | 670 | chan->has_base_filename = 1; |
669 | chan->parent = parent; | 671 | chan->parent = parent; |
670 | curr_cpu = get_cpu(); | 672 | curr_cpu = get_cpu(); |
diff --git a/kernel/sched.c b/kernel/sched.c index 52bbf1c842a..8ee437a5ec1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2266,6 +2266,16 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2266 | if (!sched_feat(SYNC_WAKEUPS)) | 2266 | if (!sched_feat(SYNC_WAKEUPS)) |
2267 | sync = 0; | 2267 | sync = 0; |
2268 | 2268 | ||
2269 | if (!sync) { | ||
2270 | if (current->se.avg_overlap < sysctl_sched_migration_cost && | ||
2271 | p->se.avg_overlap < sysctl_sched_migration_cost) | ||
2272 | sync = 1; | ||
2273 | } else { | ||
2274 | if (current->se.avg_overlap >= sysctl_sched_migration_cost || | ||
2275 | p->se.avg_overlap >= sysctl_sched_migration_cost) | ||
2276 | sync = 0; | ||
2277 | } | ||
2278 | |||
2269 | #ifdef CONFIG_SMP | 2279 | #ifdef CONFIG_SMP |
2270 | if (sched_feat(LB_WAKEUP_UPDATE)) { | 2280 | if (sched_feat(LB_WAKEUP_UPDATE)) { |
2271 | struct sched_domain *sd; | 2281 | struct sched_domain *sd; |
@@ -4687,8 +4697,8 @@ EXPORT_SYMBOL(default_wake_function); | |||
4687 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns | 4697 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns |
4688 | * zero in this (rare) case, and we handle it by continuing to scan the queue. | 4698 | * zero in this (rare) case, and we handle it by continuing to scan the queue. |
4689 | */ | 4699 | */ |
4690 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | 4700 | void __wake_up_common(wait_queue_head_t *q, unsigned int mode, |
4691 | int nr_exclusive, int sync, void *key) | 4701 | int nr_exclusive, int sync, void *key) |
4692 | { | 4702 | { |
4693 | wait_queue_t *curr, *next; | 4703 | wait_queue_t *curr, *next; |
4694 | 4704 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5cc1c162044..a7e50ba185a 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -719,7 +719,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) | |||
719 | __enqueue_entity(cfs_rq, se); | 719 | __enqueue_entity(cfs_rq, se); |
720 | } | 720 | } |
721 | 721 | ||
722 | static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | 722 | static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) |
723 | { | 723 | { |
724 | if (cfs_rq->last == se) | 724 | if (cfs_rq->last == se) |
725 | cfs_rq->last = NULL; | 725 | cfs_rq->last = NULL; |
@@ -728,6 +728,12 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
728 | cfs_rq->next = NULL; | 728 | cfs_rq->next = NULL; |
729 | } | 729 | } |
730 | 730 | ||
731 | static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
732 | { | ||
733 | for_each_sched_entity(se) | ||
734 | __clear_buddies(cfs_rq_of(se), se); | ||
735 | } | ||
736 | |||
731 | static void | 737 | static void |
732 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | 738 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) |
733 | { | 739 | { |
@@ -768,8 +774,14 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
768 | 774 | ||
769 | ideal_runtime = sched_slice(cfs_rq, curr); | 775 | ideal_runtime = sched_slice(cfs_rq, curr); |
770 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | 776 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; |
771 | if (delta_exec > ideal_runtime) | 777 | if (delta_exec > ideal_runtime) { |
772 | resched_task(rq_of(cfs_rq)->curr); | 778 | resched_task(rq_of(cfs_rq)->curr); |
779 | /* | ||
780 | * The current task ran long enough, ensure it doesn't get | ||
781 | * re-elected due to buddy favours. | ||
782 | */ | ||
783 | clear_buddies(cfs_rq, curr); | ||
784 | } | ||
773 | } | 785 | } |
774 | 786 | ||
775 | static void | 787 | static void |
@@ -1179,20 +1191,15 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq, | |||
1179 | int idx, unsigned long load, unsigned long this_load, | 1191 | int idx, unsigned long load, unsigned long this_load, |
1180 | unsigned int imbalance) | 1192 | unsigned int imbalance) |
1181 | { | 1193 | { |
1182 | struct task_struct *curr = this_rq->curr; | ||
1183 | struct task_group *tg; | ||
1184 | unsigned long tl = this_load; | 1194 | unsigned long tl = this_load; |
1185 | unsigned long tl_per_task; | 1195 | unsigned long tl_per_task; |
1196 | struct task_group *tg; | ||
1186 | unsigned long weight; | 1197 | unsigned long weight; |
1187 | int balanced; | 1198 | int balanced; |
1188 | 1199 | ||
1189 | if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) | 1200 | if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) |
1190 | return 0; | 1201 | return 0; |
1191 | 1202 | ||
1192 | if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost || | ||
1193 | p->se.avg_overlap > sysctl_sched_migration_cost)) | ||
1194 | sync = 0; | ||
1195 | |||
1196 | /* | 1203 | /* |
1197 | * If sync wakeup then subtract the (maximum possible) | 1204 | * If sync wakeup then subtract the (maximum possible) |
1198 | * effect of the currently running task from the load | 1205 | * effect of the currently running task from the load |
@@ -1419,9 +1426,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | |||
1419 | if (!sched_feat(WAKEUP_PREEMPT)) | 1426 | if (!sched_feat(WAKEUP_PREEMPT)) |
1420 | return; | 1427 | return; |
1421 | 1428 | ||
1422 | if (sched_feat(WAKEUP_OVERLAP) && (sync || | 1429 | if (sched_feat(WAKEUP_OVERLAP) && sync) { |
1423 | (se->avg_overlap < sysctl_sched_migration_cost && | ||
1424 | pse->avg_overlap < sysctl_sched_migration_cost))) { | ||
1425 | resched_task(curr); | 1430 | resched_task(curr); |
1426 | return; | 1431 | return; |
1427 | } | 1432 | } |
@@ -1452,6 +1457,11 @@ static struct task_struct *pick_next_task_fair(struct rq *rq) | |||
1452 | 1457 | ||
1453 | do { | 1458 | do { |
1454 | se = pick_next_entity(cfs_rq); | 1459 | se = pick_next_entity(cfs_rq); |
1460 | /* | ||
1461 | * If se was a buddy, clear it so that it will have to earn | ||
1462 | * the favour again. | ||
1463 | */ | ||
1464 | __clear_buddies(cfs_rq, se); | ||
1455 | set_next_entity(cfs_rq, se); | 1465 | set_next_entity(cfs_rq, se); |
1456 | cfs_rq = group_cfs_rq(se); | 1466 | cfs_rq = group_cfs_rq(se); |
1457 | } while (cfs_rq); | 1467 | } while (cfs_rq); |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 954e1a81b79..bac1061cea2 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -968,8 +968,8 @@ static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) | |||
968 | if ((this_cpu != -1) && cpu_isset(this_cpu, *mask)) | 968 | if ((this_cpu != -1) && cpu_isset(this_cpu, *mask)) |
969 | return this_cpu; | 969 | return this_cpu; |
970 | 970 | ||
971 | first = first_cpu(*mask); | 971 | first = cpumask_first(mask); |
972 | if (first != NR_CPUS) | 972 | if (first < nr_cpu_ids) |
973 | return first; | 973 | return first; |
974 | 974 | ||
975 | return -1; | 975 | return -1; |
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index f2773b5d122..8ab0cef8eca 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
@@ -296,6 +296,7 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next) | |||
296 | static inline void account_group_user_time(struct task_struct *tsk, | 296 | static inline void account_group_user_time(struct task_struct *tsk, |
297 | cputime_t cputime) | 297 | cputime_t cputime) |
298 | { | 298 | { |
299 | struct task_cputime *times; | ||
299 | struct signal_struct *sig; | 300 | struct signal_struct *sig; |
300 | 301 | ||
301 | /* tsk == current, ensure it is safe to use ->signal */ | 302 | /* tsk == current, ensure it is safe to use ->signal */ |
@@ -303,13 +304,11 @@ static inline void account_group_user_time(struct task_struct *tsk, | |||
303 | return; | 304 | return; |
304 | 305 | ||
305 | sig = tsk->signal; | 306 | sig = tsk->signal; |
306 | if (sig->cputime.totals) { | 307 | times = &sig->cputime.totals; |
307 | struct task_cputime *times; | ||
308 | 308 | ||
309 | times = per_cpu_ptr(sig->cputime.totals, get_cpu()); | 309 | spin_lock(×->lock); |
310 | times->utime = cputime_add(times->utime, cputime); | 310 | times->utime = cputime_add(times->utime, cputime); |
311 | put_cpu_no_resched(); | 311 | spin_unlock(×->lock); |
312 | } | ||
313 | } | 312 | } |
314 | 313 | ||
315 | /** | 314 | /** |
@@ -325,6 +324,7 @@ static inline void account_group_user_time(struct task_struct *tsk, | |||
325 | static inline void account_group_system_time(struct task_struct *tsk, | 324 | static inline void account_group_system_time(struct task_struct *tsk, |
326 | cputime_t cputime) | 325 | cputime_t cputime) |
327 | { | 326 | { |
327 | struct task_cputime *times; | ||
328 | struct signal_struct *sig; | 328 | struct signal_struct *sig; |
329 | 329 | ||
330 | /* tsk == current, ensure it is safe to use ->signal */ | 330 | /* tsk == current, ensure it is safe to use ->signal */ |
@@ -332,13 +332,11 @@ static inline void account_group_system_time(struct task_struct *tsk, | |||
332 | return; | 332 | return; |
333 | 333 | ||
334 | sig = tsk->signal; | 334 | sig = tsk->signal; |
335 | if (sig->cputime.totals) { | 335 | times = &sig->cputime.totals; |
336 | struct task_cputime *times; | ||
337 | 336 | ||
338 | times = per_cpu_ptr(sig->cputime.totals, get_cpu()); | 337 | spin_lock(×->lock); |
339 | times->stime = cputime_add(times->stime, cputime); | 338 | times->stime = cputime_add(times->stime, cputime); |
340 | put_cpu_no_resched(); | 339 | spin_unlock(×->lock); |
341 | } | ||
342 | } | 340 | } |
343 | 341 | ||
344 | /** | 342 | /** |
@@ -354,6 +352,7 @@ static inline void account_group_system_time(struct task_struct *tsk, | |||
354 | static inline void account_group_exec_runtime(struct task_struct *tsk, | 352 | static inline void account_group_exec_runtime(struct task_struct *tsk, |
355 | unsigned long long ns) | 353 | unsigned long long ns) |
356 | { | 354 | { |
355 | struct task_cputime *times; | ||
357 | struct signal_struct *sig; | 356 | struct signal_struct *sig; |
358 | 357 | ||
359 | sig = tsk->signal; | 358 | sig = tsk->signal; |
@@ -362,11 +361,9 @@ static inline void account_group_exec_runtime(struct task_struct *tsk, | |||
362 | if (unlikely(!sig)) | 361 | if (unlikely(!sig)) |
363 | return; | 362 | return; |
364 | 363 | ||
365 | if (sig->cputime.totals) { | 364 | times = &sig->cputime.totals; |
366 | struct task_cputime *times; | ||
367 | 365 | ||
368 | times = per_cpu_ptr(sig->cputime.totals, get_cpu()); | 366 | spin_lock(×->lock); |
369 | times->sum_exec_runtime += ns; | 367 | times->sum_exec_runtime += ns; |
370 | put_cpu_no_resched(); | 368 | spin_unlock(×->lock); |
371 | } | ||
372 | } | 369 | } |
diff --git a/kernel/signal.c b/kernel/signal.c index e73759783dc..b6b36768b75 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -909,7 +909,9 @@ static void print_fatal_signal(struct pt_regs *regs, int signr) | |||
909 | } | 909 | } |
910 | #endif | 910 | #endif |
911 | printk("\n"); | 911 | printk("\n"); |
912 | preempt_disable(); | ||
912 | show_regs(regs); | 913 | show_regs(regs); |
914 | preempt_enable(); | ||
913 | } | 915 | } |
914 | 916 | ||
915 | static int __init setup_print_fatal_signals(char *str) | 917 | static int __init setup_print_fatal_signals(char *str) |
diff --git a/kernel/smp.c b/kernel/smp.c index 5cfa0e5e3e8..bbedbb7efe3 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -18,6 +18,7 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock); | |||
18 | enum { | 18 | enum { |
19 | CSD_FLAG_WAIT = 0x01, | 19 | CSD_FLAG_WAIT = 0x01, |
20 | CSD_FLAG_ALLOC = 0x02, | 20 | CSD_FLAG_ALLOC = 0x02, |
21 | CSD_FLAG_LOCK = 0x04, | ||
21 | }; | 22 | }; |
22 | 23 | ||
23 | struct call_function_data { | 24 | struct call_function_data { |
@@ -186,6 +187,9 @@ void generic_smp_call_function_single_interrupt(void) | |||
186 | if (data_flags & CSD_FLAG_WAIT) { | 187 | if (data_flags & CSD_FLAG_WAIT) { |
187 | smp_wmb(); | 188 | smp_wmb(); |
188 | data->flags &= ~CSD_FLAG_WAIT; | 189 | data->flags &= ~CSD_FLAG_WAIT; |
190 | } else if (data_flags & CSD_FLAG_LOCK) { | ||
191 | smp_wmb(); | ||
192 | data->flags &= ~CSD_FLAG_LOCK; | ||
189 | } else if (data_flags & CSD_FLAG_ALLOC) | 193 | } else if (data_flags & CSD_FLAG_ALLOC) |
190 | kfree(data); | 194 | kfree(data); |
191 | } | 195 | } |
@@ -196,6 +200,8 @@ void generic_smp_call_function_single_interrupt(void) | |||
196 | } | 200 | } |
197 | } | 201 | } |
198 | 202 | ||
203 | static DEFINE_PER_CPU(struct call_single_data, csd_data); | ||
204 | |||
199 | /* | 205 | /* |
200 | * smp_call_function_single - Run a function on a specific CPU | 206 | * smp_call_function_single - Run a function on a specific CPU |
201 | * @func: The function to run. This must be fast and non-blocking. | 207 | * @func: The function to run. This must be fast and non-blocking. |
@@ -224,14 +230,38 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
224 | func(info); | 230 | func(info); |
225 | local_irq_restore(flags); | 231 | local_irq_restore(flags); |
226 | } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { | 232 | } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { |
227 | struct call_single_data *data = NULL; | 233 | struct call_single_data *data; |
228 | 234 | ||
229 | if (!wait) { | 235 | if (!wait) { |
236 | /* | ||
237 | * We are calling a function on a single CPU | ||
238 | * and we are not going to wait for it to finish. | ||
239 | * We first try to allocate the data, but if we | ||
240 | * fail, we fall back to use a per cpu data to pass | ||
241 | * the information to that CPU. Since all callers | ||
242 | * of this code will use the same data, we must | ||
243 | * synchronize the callers to prevent a new caller | ||
244 | * from corrupting the data before the callee | ||
245 | * can access it. | ||
246 | * | ||
247 | * The CSD_FLAG_LOCK is used to let us know when | ||
248 | * the IPI handler is done with the data. | ||
249 | * The first caller will set it, and the callee | ||
250 | * will clear it. The next caller must wait for | ||
251 | * it to clear before we set it again. This | ||
252 | * will make sure the callee is done with the | ||
253 | * data before a new caller will use it. | ||
254 | */ | ||
230 | data = kmalloc(sizeof(*data), GFP_ATOMIC); | 255 | data = kmalloc(sizeof(*data), GFP_ATOMIC); |
231 | if (data) | 256 | if (data) |
232 | data->flags = CSD_FLAG_ALLOC; | 257 | data->flags = CSD_FLAG_ALLOC; |
233 | } | 258 | else { |
234 | if (!data) { | 259 | data = &per_cpu(csd_data, me); |
260 | while (data->flags & CSD_FLAG_LOCK) | ||
261 | cpu_relax(); | ||
262 | data->flags = CSD_FLAG_LOCK; | ||
263 | } | ||
264 | } else { | ||
235 | data = &d; | 265 | data = &d; |
236 | data->flags = CSD_FLAG_WAIT; | 266 | data->flags = CSD_FLAG_WAIT; |
237 | } | 267 | } |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index d9188c66278..85d5a245510 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/lockdep.h> | 16 | #include <linux/lockdep.h> |
17 | #include <linux/notifier.h> | 17 | #include <linux/notifier.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/sysctl.h> | ||
19 | 20 | ||
20 | #include <asm/irq_regs.h> | 21 | #include <asm/irq_regs.h> |
21 | 22 | ||
@@ -88,6 +89,14 @@ void touch_all_softlockup_watchdogs(void) | |||
88 | } | 89 | } |
89 | EXPORT_SYMBOL(touch_all_softlockup_watchdogs); | 90 | EXPORT_SYMBOL(touch_all_softlockup_watchdogs); |
90 | 91 | ||
92 | int proc_dosoftlockup_thresh(struct ctl_table *table, int write, | ||
93 | struct file *filp, void __user *buffer, | ||
94 | size_t *lenp, loff_t *ppos) | ||
95 | { | ||
96 | touch_all_softlockup_watchdogs(); | ||
97 | return proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | ||
98 | } | ||
99 | |||
91 | /* | 100 | /* |
92 | * This callback runs from the timer interrupt, and checks | 101 | * This callback runs from the timer interrupt, and checks |
93 | * whether the watchdog thread has hung or not: | 102 | * whether the watchdog thread has hung or not: |
diff --git a/kernel/sys.c b/kernel/sys.c index e7dc0e10a48..f145c415bc1 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1525,22 +1525,14 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) | |||
1525 | return -EINVAL; | 1525 | return -EINVAL; |
1526 | if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) | 1526 | if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) |
1527 | return -EFAULT; | 1527 | return -EFAULT; |
1528 | if (new_rlim.rlim_cur > new_rlim.rlim_max) | ||
1529 | return -EINVAL; | ||
1528 | old_rlim = current->signal->rlim + resource; | 1530 | old_rlim = current->signal->rlim + resource; |
1529 | if ((new_rlim.rlim_max > old_rlim->rlim_max) && | 1531 | if ((new_rlim.rlim_max > old_rlim->rlim_max) && |
1530 | !capable(CAP_SYS_RESOURCE)) | 1532 | !capable(CAP_SYS_RESOURCE)) |
1531 | return -EPERM; | 1533 | return -EPERM; |
1532 | 1534 | if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open) | |
1533 | if (resource == RLIMIT_NOFILE) { | 1535 | return -EPERM; |
1534 | if (new_rlim.rlim_max == RLIM_INFINITY) | ||
1535 | new_rlim.rlim_max = sysctl_nr_open; | ||
1536 | if (new_rlim.rlim_cur == RLIM_INFINITY) | ||
1537 | new_rlim.rlim_cur = sysctl_nr_open; | ||
1538 | if (new_rlim.rlim_max > sysctl_nr_open) | ||
1539 | return -EPERM; | ||
1540 | } | ||
1541 | |||
1542 | if (new_rlim.rlim_cur > new_rlim.rlim_max) | ||
1543 | return -EINVAL; | ||
1544 | 1536 | ||
1545 | retval = security_task_setrlimit(resource, &new_rlim); | 1537 | retval = security_task_setrlimit(resource, &new_rlim); |
1546 | if (retval) | 1538 | if (retval) |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 368d1638ee7..790f9d78566 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -809,7 +809,7 @@ static struct ctl_table kern_table[] = { | |||
809 | .data = &softlockup_thresh, | 809 | .data = &softlockup_thresh, |
810 | .maxlen = sizeof(int), | 810 | .maxlen = sizeof(int), |
811 | .mode = 0644, | 811 | .mode = 0644, |
812 | .proc_handler = &proc_dointvec_minmax, | 812 | .proc_handler = &proc_dosoftlockup_thresh, |
813 | .strategy = &sysctl_intvec, | 813 | .strategy = &sysctl_intvec, |
814 | .extra1 = &neg_one, | 814 | .extra1 = &neg_one, |
815 | .extra2 = &sixty, | 815 | .extra2 = &sixty, |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 63e05d423a0..21a5ca84951 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -274,6 +274,21 @@ out_bc: | |||
274 | } | 274 | } |
275 | 275 | ||
276 | /* | 276 | /* |
277 | * Transfer the do_timer job away from a dying cpu. | ||
278 | * | ||
279 | * Called with interrupts disabled. | ||
280 | */ | ||
281 | static void tick_handover_do_timer(int *cpup) | ||
282 | { | ||
283 | if (*cpup == tick_do_timer_cpu) { | ||
284 | int cpu = cpumask_first(cpu_online_mask); | ||
285 | |||
286 | tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : | ||
287 | TICK_DO_TIMER_NONE; | ||
288 | } | ||
289 | } | ||
290 | |||
291 | /* | ||
277 | * Shutdown an event device on a given cpu: | 292 | * Shutdown an event device on a given cpu: |
278 | * | 293 | * |
279 | * This is called on a life CPU, when a CPU is dead. So we cannot | 294 | * This is called on a life CPU, when a CPU is dead. So we cannot |
@@ -297,13 +312,6 @@ static void tick_shutdown(unsigned int *cpup) | |||
297 | clockevents_exchange_device(dev, NULL); | 312 | clockevents_exchange_device(dev, NULL); |
298 | td->evtdev = NULL; | 313 | td->evtdev = NULL; |
299 | } | 314 | } |
300 | /* Transfer the do_timer job away from this cpu */ | ||
301 | if (*cpup == tick_do_timer_cpu) { | ||
302 | int cpu = cpumask_first(cpu_online_mask); | ||
303 | |||
304 | tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : | ||
305 | TICK_DO_TIMER_NONE; | ||
306 | } | ||
307 | spin_unlock_irqrestore(&tick_device_lock, flags); | 315 | spin_unlock_irqrestore(&tick_device_lock, flags); |
308 | } | 316 | } |
309 | 317 | ||
@@ -357,6 +365,10 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason, | |||
357 | tick_broadcast_oneshot_control(reason); | 365 | tick_broadcast_oneshot_control(reason); |
358 | break; | 366 | break; |
359 | 367 | ||
368 | case CLOCK_EVT_NOTIFY_CPU_DYING: | ||
369 | tick_handover_do_timer(dev); | ||
370 | break; | ||
371 | |||
360 | case CLOCK_EVT_NOTIFY_CPU_DEAD: | 372 | case CLOCK_EVT_NOTIFY_CPU_DEAD: |
361 | tick_shutdown_broadcast_oneshot(dev); | 373 | tick_shutdown_broadcast_oneshot(dev); |
362 | tick_shutdown_broadcast(dev); | 374 | tick_shutdown_broadcast(dev); |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 1b6c05bd0d0..d3f1ef4d5cb 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -134,7 +134,7 @@ __setup("nohz=", setup_tick_nohz); | |||
134 | * value. We do this unconditionally on any cpu, as we don't know whether the | 134 | * value. We do this unconditionally on any cpu, as we don't know whether the |
135 | * cpu, which has the update task assigned is in a long sleep. | 135 | * cpu, which has the update task assigned is in a long sleep. |
136 | */ | 136 | */ |
137 | void tick_nohz_update_jiffies(void) | 137 | static void tick_nohz_update_jiffies(void) |
138 | { | 138 | { |
139 | int cpu = smp_processor_id(); | 139 | int cpu = smp_processor_id(); |
140 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 140 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 2f32969c09d..9a236ffe2aa 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/clocksource.h> | 17 | #include <linux/clocksource.h> |
18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <linux/suspend.h> | ||
20 | #include <linux/debugfs.h> | 21 | #include <linux/debugfs.h> |
21 | #include <linux/hardirq.h> | 22 | #include <linux/hardirq.h> |
22 | #include <linux/kthread.h> | 23 | #include <linux/kthread.h> |
@@ -1736,9 +1737,12 @@ static void clear_ftrace_pid(struct pid *pid) | |||
1736 | { | 1737 | { |
1737 | struct task_struct *p; | 1738 | struct task_struct *p; |
1738 | 1739 | ||
1740 | rcu_read_lock(); | ||
1739 | do_each_pid_task(pid, PIDTYPE_PID, p) { | 1741 | do_each_pid_task(pid, PIDTYPE_PID, p) { |
1740 | clear_tsk_trace_trace(p); | 1742 | clear_tsk_trace_trace(p); |
1741 | } while_each_pid_task(pid, PIDTYPE_PID, p); | 1743 | } while_each_pid_task(pid, PIDTYPE_PID, p); |
1744 | rcu_read_unlock(); | ||
1745 | |||
1742 | put_pid(pid); | 1746 | put_pid(pid); |
1743 | } | 1747 | } |
1744 | 1748 | ||
@@ -1746,9 +1750,11 @@ static void set_ftrace_pid(struct pid *pid) | |||
1746 | { | 1750 | { |
1747 | struct task_struct *p; | 1751 | struct task_struct *p; |
1748 | 1752 | ||
1753 | rcu_read_lock(); | ||
1749 | do_each_pid_task(pid, PIDTYPE_PID, p) { | 1754 | do_each_pid_task(pid, PIDTYPE_PID, p) { |
1750 | set_tsk_trace_trace(p); | 1755 | set_tsk_trace_trace(p); |
1751 | } while_each_pid_task(pid, PIDTYPE_PID, p); | 1756 | } while_each_pid_task(pid, PIDTYPE_PID, p); |
1757 | rcu_read_unlock(); | ||
1752 | } | 1758 | } |
1753 | 1759 | ||
1754 | static void clear_ftrace_pid_task(struct pid **pid) | 1760 | static void clear_ftrace_pid_task(struct pid **pid) |
@@ -1965,6 +1971,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
1965 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1971 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1966 | 1972 | ||
1967 | static atomic_t ftrace_graph_active; | 1973 | static atomic_t ftrace_graph_active; |
1974 | static struct notifier_block ftrace_suspend_notifier; | ||
1968 | 1975 | ||
1969 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | 1976 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
1970 | { | 1977 | { |
@@ -2043,6 +2050,27 @@ static int start_graph_tracing(void) | |||
2043 | return ret; | 2050 | return ret; |
2044 | } | 2051 | } |
2045 | 2052 | ||
2053 | /* | ||
2054 | * Hibernation protection. | ||
2055 | * The state of the current task is too much unstable during | ||
2056 | * suspend/restore to disk. We want to protect against that. | ||
2057 | */ | ||
2058 | static int | ||
2059 | ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, | ||
2060 | void *unused) | ||
2061 | { | ||
2062 | switch (state) { | ||
2063 | case PM_HIBERNATION_PREPARE: | ||
2064 | pause_graph_tracing(); | ||
2065 | break; | ||
2066 | |||
2067 | case PM_POST_HIBERNATION: | ||
2068 | unpause_graph_tracing(); | ||
2069 | break; | ||
2070 | } | ||
2071 | return NOTIFY_DONE; | ||
2072 | } | ||
2073 | |||
2046 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, | 2074 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
2047 | trace_func_graph_ent_t entryfunc) | 2075 | trace_func_graph_ent_t entryfunc) |
2048 | { | 2076 | { |
@@ -2050,6 +2078,9 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
2050 | 2078 | ||
2051 | mutex_lock(&ftrace_sysctl_lock); | 2079 | mutex_lock(&ftrace_sysctl_lock); |
2052 | 2080 | ||
2081 | ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; | ||
2082 | register_pm_notifier(&ftrace_suspend_notifier); | ||
2083 | |||
2053 | atomic_inc(&ftrace_graph_active); | 2084 | atomic_inc(&ftrace_graph_active); |
2054 | ret = start_graph_tracing(); | 2085 | ret = start_graph_tracing(); |
2055 | if (ret) { | 2086 | if (ret) { |
@@ -2075,6 +2106,7 @@ void unregister_ftrace_graph(void) | |||
2075 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 2106 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
2076 | ftrace_graph_entry = ftrace_graph_entry_stub; | 2107 | ftrace_graph_entry = ftrace_graph_entry_stub; |
2077 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); | 2108 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); |
2109 | unregister_pm_notifier(&ftrace_suspend_notifier); | ||
2078 | 2110 | ||
2079 | mutex_unlock(&ftrace_sysctl_lock); | 2111 | mutex_unlock(&ftrace_sysctl_lock); |
2080 | } | 2112 | } |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 8b0daf0662e..bd38c5cfd8a 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -246,7 +246,7 @@ static inline int test_time_stamp(u64 delta) | |||
246 | return 0; | 246 | return 0; |
247 | } | 247 | } |
248 | 248 | ||
249 | #define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page)) | 249 | #define BUF_PAGE_SIZE (PAGE_SIZE - offsetof(struct buffer_data_page, data)) |
250 | 250 | ||
251 | /* | 251 | /* |
252 | * head_page == tail_page && head == tail then buffer is empty. | 252 | * head_page == tail_page && head == tail then buffer is empty. |
@@ -1025,12 +1025,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1025 | } | 1025 | } |
1026 | 1026 | ||
1027 | if (next_page == head_page) { | 1027 | if (next_page == head_page) { |
1028 | if (!(buffer->flags & RB_FL_OVERWRITE)) { | 1028 | if (!(buffer->flags & RB_FL_OVERWRITE)) |
1029 | /* reset write */ | ||
1030 | if (tail <= BUF_PAGE_SIZE) | ||
1031 | local_set(&tail_page->write, tail); | ||
1032 | goto out_unlock; | 1029 | goto out_unlock; |
1033 | } | ||
1034 | 1030 | ||
1035 | /* tail_page has not moved yet? */ | 1031 | /* tail_page has not moved yet? */ |
1036 | if (tail_page == cpu_buffer->tail_page) { | 1032 | if (tail_page == cpu_buffer->tail_page) { |
@@ -1105,6 +1101,10 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1105 | return event; | 1101 | return event; |
1106 | 1102 | ||
1107 | out_unlock: | 1103 | out_unlock: |
1104 | /* reset write */ | ||
1105 | if (tail <= BUF_PAGE_SIZE) | ||
1106 | local_set(&tail_page->write, tail); | ||
1107 | |||
1108 | __raw_spin_unlock(&cpu_buffer->lock); | 1108 | __raw_spin_unlock(&cpu_buffer->lock); |
1109 | local_irq_restore(flags); | 1109 | local_irq_restore(flags); |
1110 | return NULL; | 1110 | return NULL; |
@@ -2174,6 +2174,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
2174 | 2174 | ||
2175 | cpu_buffer->overrun = 0; | 2175 | cpu_buffer->overrun = 0; |
2176 | cpu_buffer->entries = 0; | 2176 | cpu_buffer->entries = 0; |
2177 | |||
2178 | cpu_buffer->write_stamp = 0; | ||
2179 | cpu_buffer->read_stamp = 0; | ||
2177 | } | 2180 | } |
2178 | 2181 | ||
2179 | /** | 2182 | /** |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c580233add9..17bb88d86ac 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -40,7 +40,7 @@ | |||
40 | 40 | ||
41 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) | 41 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) |
42 | 42 | ||
43 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; | 43 | unsigned long __read_mostly tracing_max_latency; |
44 | unsigned long __read_mostly tracing_thresh; | 44 | unsigned long __read_mostly tracing_thresh; |
45 | 45 | ||
46 | /* | 46 | /* |
@@ -3736,7 +3736,7 @@ static struct notifier_block trace_die_notifier = { | |||
3736 | * it if we decide to change what log level the ftrace dump | 3736 | * it if we decide to change what log level the ftrace dump |
3737 | * should be at. | 3737 | * should be at. |
3738 | */ | 3738 | */ |
3739 | #define KERN_TRACE KERN_INFO | 3739 | #define KERN_TRACE KERN_EMERG |
3740 | 3740 | ||
3741 | static void | 3741 | static void |
3742 | trace_printk_seq(struct trace_seq *s) | 3742 | trace_printk_seq(struct trace_seq *s) |
@@ -3770,6 +3770,7 @@ void ftrace_dump(void) | |||
3770 | dump_ran = 1; | 3770 | dump_ran = 1; |
3771 | 3771 | ||
3772 | /* No turning back! */ | 3772 | /* No turning back! */ |
3773 | tracing_off(); | ||
3773 | ftrace_kill(); | 3774 | ftrace_kill(); |
3774 | 3775 | ||
3775 | for_each_tracing_cpu(cpu) { | 3776 | for_each_tracing_cpu(cpu) { |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 7c2e326bbc8..62a78d94353 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -380,6 +380,7 @@ static void stop_irqsoff_tracer(struct trace_array *tr) | |||
380 | 380 | ||
381 | static void __irqsoff_tracer_init(struct trace_array *tr) | 381 | static void __irqsoff_tracer_init(struct trace_array *tr) |
382 | { | 382 | { |
383 | tracing_max_latency = 0; | ||
383 | irqsoff_trace = tr; | 384 | irqsoff_trace = tr; |
384 | /* make sure that the tracer is visible */ | 385 | /* make sure that the tracer is visible */ |
385 | smp_wmb(); | 386 | smp_wmb(); |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 43586b689e3..42ae1e77b6b 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -333,6 +333,7 @@ static void stop_wakeup_tracer(struct trace_array *tr) | |||
333 | 333 | ||
334 | static int wakeup_tracer_init(struct trace_array *tr) | 334 | static int wakeup_tracer_init(struct trace_array *tr) |
335 | { | 335 | { |
336 | tracing_max_latency = 0; | ||
336 | wakeup_trace = tr; | 337 | wakeup_trace = tr; |
337 | start_wakeup_tracer(tr); | 338 | start_wakeup_tracer(tr); |
338 | return 0; | 339 | return 0; |
diff --git a/kernel/wait.c b/kernel/wait.c index cd87131f2fc..42a2dbc181c 100644 --- a/kernel/wait.c +++ b/kernel/wait.c | |||
@@ -91,6 +91,15 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) | |||
91 | } | 91 | } |
92 | EXPORT_SYMBOL(prepare_to_wait_exclusive); | 92 | EXPORT_SYMBOL(prepare_to_wait_exclusive); |
93 | 93 | ||
94 | /* | ||
95 | * finish_wait - clean up after waiting in a queue | ||
96 | * @q: waitqueue waited on | ||
97 | * @wait: wait descriptor | ||
98 | * | ||
99 | * Sets current thread back to running state and removes | ||
100 | * the wait descriptor from the given waitqueue if still | ||
101 | * queued. | ||
102 | */ | ||
94 | void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) | 103 | void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) |
95 | { | 104 | { |
96 | unsigned long flags; | 105 | unsigned long flags; |
@@ -117,6 +126,39 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) | |||
117 | } | 126 | } |
118 | EXPORT_SYMBOL(finish_wait); | 127 | EXPORT_SYMBOL(finish_wait); |
119 | 128 | ||
129 | /* | ||
130 | * abort_exclusive_wait - abort exclusive waiting in a queue | ||
131 | * @q: waitqueue waited on | ||
132 | * @wait: wait descriptor | ||
133 | * @state: runstate of the waiter to be woken | ||
134 | * @key: key to identify a wait bit queue or %NULL | ||
135 | * | ||
136 | * Sets current thread back to running state and removes | ||
137 | * the wait descriptor from the given waitqueue if still | ||
138 | * queued. | ||
139 | * | ||
140 | * Wakes up the next waiter if the caller is concurrently | ||
141 | * woken up through the queue. | ||
142 | * | ||
143 | * This prevents waiter starvation where an exclusive waiter | ||
144 | * aborts and is woken up concurrently and noone wakes up | ||
145 | * the next waiter. | ||
146 | */ | ||
147 | void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, | ||
148 | unsigned int mode, void *key) | ||
149 | { | ||
150 | unsigned long flags; | ||
151 | |||
152 | __set_current_state(TASK_RUNNING); | ||
153 | spin_lock_irqsave(&q->lock, flags); | ||
154 | if (!list_empty(&wait->task_list)) | ||
155 | list_del_init(&wait->task_list); | ||
156 | else if (waitqueue_active(q)) | ||
157 | __wake_up_common(q, mode, 1, 0, key); | ||
158 | spin_unlock_irqrestore(&q->lock, flags); | ||
159 | } | ||
160 | EXPORT_SYMBOL(abort_exclusive_wait); | ||
161 | |||
120 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) | 162 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) |
121 | { | 163 | { |
122 | int ret = default_wake_function(wait, mode, sync, key); | 164 | int ret = default_wake_function(wait, mode, sync, key); |
@@ -177,17 +219,20 @@ int __sched | |||
177 | __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, | 219 | __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, |
178 | int (*action)(void *), unsigned mode) | 220 | int (*action)(void *), unsigned mode) |
179 | { | 221 | { |
180 | int ret = 0; | ||
181 | |||
182 | do { | 222 | do { |
223 | int ret; | ||
224 | |||
183 | prepare_to_wait_exclusive(wq, &q->wait, mode); | 225 | prepare_to_wait_exclusive(wq, &q->wait, mode); |
184 | if (test_bit(q->key.bit_nr, q->key.flags)) { | 226 | if (!test_bit(q->key.bit_nr, q->key.flags)) |
185 | if ((ret = (*action)(q->key.flags))) | 227 | continue; |
186 | break; | 228 | ret = action(q->key.flags); |
187 | } | 229 | if (!ret) |
230 | continue; | ||
231 | abort_exclusive_wait(wq, &q->wait, mode, &q->key); | ||
232 | return ret; | ||
188 | } while (test_and_set_bit(q->key.bit_nr, q->key.flags)); | 233 | } while (test_and_set_bit(q->key.bit_nr, q->key.flags)); |
189 | finish_wait(wq, &q->wait); | 234 | finish_wait(wq, &q->wait); |
190 | return ret; | 235 | return 0; |
191 | } | 236 | } |
192 | EXPORT_SYMBOL(__wait_on_bit_lock); | 237 | EXPORT_SYMBOL(__wait_on_bit_lock); |
193 | 238 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2f445833ae3..1f0c509b40d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -971,6 +971,8 @@ undo: | |||
971 | } | 971 | } |
972 | 972 | ||
973 | #ifdef CONFIG_SMP | 973 | #ifdef CONFIG_SMP |
974 | static struct workqueue_struct *work_on_cpu_wq __read_mostly; | ||
975 | |||
974 | struct work_for_cpu { | 976 | struct work_for_cpu { |
975 | struct work_struct work; | 977 | struct work_struct work; |
976 | long (*fn)(void *); | 978 | long (*fn)(void *); |
@@ -991,8 +993,8 @@ static void do_work_for_cpu(struct work_struct *w) | |||
991 | * @fn: the function to run | 993 | * @fn: the function to run |
992 | * @arg: the function arg | 994 | * @arg: the function arg |
993 | * | 995 | * |
994 | * This will return -EINVAL in the cpu is not online, or the return value | 996 | * This will return the value @fn returns. |
995 | * of @fn otherwise. | 997 | * It is up to the caller to ensure that the cpu doesn't go offline. |
996 | */ | 998 | */ |
997 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | 999 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) |
998 | { | 1000 | { |
@@ -1001,14 +1003,8 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | |||
1001 | INIT_WORK(&wfc.work, do_work_for_cpu); | 1003 | INIT_WORK(&wfc.work, do_work_for_cpu); |
1002 | wfc.fn = fn; | 1004 | wfc.fn = fn; |
1003 | wfc.arg = arg; | 1005 | wfc.arg = arg; |
1004 | get_online_cpus(); | 1006 | queue_work_on(cpu, work_on_cpu_wq, &wfc.work); |
1005 | if (unlikely(!cpu_online(cpu))) | 1007 | flush_work(&wfc.work); |
1006 | wfc.ret = -EINVAL; | ||
1007 | else { | ||
1008 | schedule_work_on(cpu, &wfc.work); | ||
1009 | flush_work(&wfc.work); | ||
1010 | } | ||
1011 | put_online_cpus(); | ||
1012 | 1008 | ||
1013 | return wfc.ret; | 1009 | return wfc.ret; |
1014 | } | 1010 | } |
@@ -1025,4 +1021,8 @@ void __init init_workqueues(void) | |||
1025 | hotcpu_notifier(workqueue_cpu_callback, 0); | 1021 | hotcpu_notifier(workqueue_cpu_callback, 0); |
1026 | keventd_wq = create_workqueue("events"); | 1022 | keventd_wq = create_workqueue("events"); |
1027 | BUG_ON(!keventd_wq); | 1023 | BUG_ON(!keventd_wq); |
1024 | #ifdef CONFIG_SMP | ||
1025 | work_on_cpu_wq = create_workqueue("work_on_cpu"); | ||
1026 | BUG_ON(!work_on_cpu_wq); | ||
1027 | #endif | ||
1028 | } | 1028 | } |