diff options
-rw-r--r-- | arch/powerpc/platforms/powermac/pic.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 2 | ||||
-rw-r--r-- | fs/autofs4/root.c | 2 | ||||
-rw-r--r-- | fs/namei.c | 3 | ||||
-rw-r--r-- | kernel/rcutree.c | 54 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 11 |
8 files changed, 65 insertions, 14 deletions
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 9089b0421191..7667db448aa7 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c | |||
@@ -715,7 +715,8 @@ static struct syscore_ops pmacpic_syscore_ops = { | |||
715 | 715 | ||
716 | static int __init init_pmacpic_syscore(void) | 716 | static int __init init_pmacpic_syscore(void) |
717 | { | 717 | { |
718 | register_syscore_ops(&pmacpic_syscore_ops); | 718 | if (pmac_irq_hw[0]) |
719 | register_syscore_ops(&pmacpic_syscore_ops); | ||
719 | return 0; | 720 | return 0; |
720 | } | 721 | } |
721 | 722 | ||
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index f5abe3a245b8..90b06d4daee2 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -8,6 +8,7 @@ CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) | |||
8 | 8 | ||
9 | ifdef CONFIG_FUNCTION_TRACER | 9 | ifdef CONFIG_FUNCTION_TRACER |
10 | # Do not profile debug and lowlevel utilities | 10 | # Do not profile debug and lowlevel utilities |
11 | CFLAGS_REMOVE_tsc.o = -pg | ||
11 | CFLAGS_REMOVE_rtc.o = -pg | 12 | CFLAGS_REMOVE_rtc.o = -pg |
12 | CFLAGS_REMOVE_paravirt-spinlocks.o = -pg | 13 | CFLAGS_REMOVE_paravirt-spinlocks.o = -pg |
13 | CFLAGS_REMOVE_pvclock.o = -pg | 14 | CFLAGS_REMOVE_pvclock.o = -pg |
@@ -28,6 +29,7 @@ CFLAGS_paravirt.o := $(nostackp) | |||
28 | GCOV_PROFILE_vsyscall_64.o := n | 29 | GCOV_PROFILE_vsyscall_64.o := n |
29 | GCOV_PROFILE_hpet.o := n | 30 | GCOV_PROFILE_hpet.o := n |
30 | GCOV_PROFILE_tsc.o := n | 31 | GCOV_PROFILE_tsc.o := n |
32 | GCOV_PROFILE_vread_tsc_64.o := n | ||
31 | GCOV_PROFILE_paravirt.o := n | 33 | GCOV_PROFILE_paravirt.o := n |
32 | 34 | ||
33 | # vread_tsc_64 is hot and should be fully optimized: | 35 | # vread_tsc_64 is hot and should be fully optimized: |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 426a5b66f7e4..2e4928d45a2d 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -642,7 +642,7 @@ static int __init idle_setup(char *str) | |||
642 | boot_option_idle_override = IDLE_POLL; | 642 | boot_option_idle_override = IDLE_POLL; |
643 | } else if (!strcmp(str, "mwait")) { | 643 | } else if (!strcmp(str, "mwait")) { |
644 | boot_option_idle_override = IDLE_FORCE_MWAIT; | 644 | boot_option_idle_override = IDLE_FORCE_MWAIT; |
645 | WARN_ONCE(1, "\idle=mwait\" will be removed in 2012\"\n"); | 645 | WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n"); |
646 | } else if (!strcmp(str, "halt")) { | 646 | } else if (!strcmp(str, "halt")) { |
647 | /* | 647 | /* |
648 | * When the boot option of idle=halt is added, halt is | 648 | * When the boot option of idle=halt is added, halt is |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index eefd96765e79..33a0c11797de 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1332,7 +1332,7 @@ static inline void mwait_play_dead(void) | |||
1332 | void *mwait_ptr; | 1332 | void *mwait_ptr; |
1333 | struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); | 1333 | struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); |
1334 | 1334 | ||
1335 | if (!this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c)) | 1335 | if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c))) |
1336 | return; | 1336 | return; |
1337 | if (!this_cpu_has(X86_FEATURE_CLFLSH)) | 1337 | if (!this_cpu_has(X86_FEATURE_CLFLSH)) |
1338 | return; | 1338 | return; |
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 87d95a8cddbc..f55ae23b137e 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c | |||
@@ -583,8 +583,6 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry) | |||
583 | if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN)) | 583 | if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN)) |
584 | return -EACCES; | 584 | return -EACCES; |
585 | 585 | ||
586 | dentry_unhash(dentry); | ||
587 | |||
588 | if (atomic_dec_and_test(&ino->count)) { | 586 | if (atomic_dec_and_test(&ino->count)) { |
589 | p_ino = autofs4_dentry_ino(dentry->d_parent); | 587 | p_ino = autofs4_dentry_ino(dentry->d_parent); |
590 | if (p_ino && dentry->d_parent != dentry) | 588 | if (p_ino && dentry->d_parent != dentry) |
diff --git a/fs/namei.c b/fs/namei.c index 1ab641f2e78e..e2e4e8d032ee 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -2579,6 +2579,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
2579 | if (error) | 2579 | if (error) |
2580 | goto out; | 2580 | goto out; |
2581 | 2581 | ||
2582 | shrink_dcache_parent(dentry); | ||
2582 | error = dir->i_op->rmdir(dir, dentry); | 2583 | error = dir->i_op->rmdir(dir, dentry); |
2583 | if (error) | 2584 | if (error) |
2584 | goto out; | 2585 | goto out; |
@@ -2993,6 +2994,8 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, | |||
2993 | if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry)) | 2994 | if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry)) |
2994 | goto out; | 2995 | goto out; |
2995 | 2996 | ||
2997 | if (target) | ||
2998 | shrink_dcache_parent(new_dentry); | ||
2996 | error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); | 2999 | error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); |
2997 | if (error) | 3000 | if (error) |
2998 | goto out; | 3001 | goto out; |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 77a7671dd147..89419ff92e99 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -1648,7 +1648,6 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | |||
1648 | if (IS_ERR(t)) | 1648 | if (IS_ERR(t)) |
1649 | return PTR_ERR(t); | 1649 | return PTR_ERR(t); |
1650 | kthread_bind(t, cpu); | 1650 | kthread_bind(t, cpu); |
1651 | set_task_state(t, TASK_INTERRUPTIBLE); | ||
1652 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | 1651 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; |
1653 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); | 1652 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); |
1654 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | 1653 | per_cpu(rcu_cpu_kthread_task, cpu) = t; |
@@ -1756,7 +1755,6 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, | |||
1756 | if (IS_ERR(t)) | 1755 | if (IS_ERR(t)) |
1757 | return PTR_ERR(t); | 1756 | return PTR_ERR(t); |
1758 | raw_spin_lock_irqsave(&rnp->lock, flags); | 1757 | raw_spin_lock_irqsave(&rnp->lock, flags); |
1759 | set_task_state(t, TASK_INTERRUPTIBLE); | ||
1760 | rnp->node_kthread_task = t; | 1758 | rnp->node_kthread_task = t; |
1761 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1759 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1762 | sp.sched_priority = 99; | 1760 | sp.sched_priority = 99; |
@@ -1765,6 +1763,8 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, | |||
1765 | return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); | 1763 | return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); |
1766 | } | 1764 | } |
1767 | 1765 | ||
1766 | static void rcu_wake_one_boost_kthread(struct rcu_node *rnp); | ||
1767 | |||
1768 | /* | 1768 | /* |
1769 | * Spawn all kthreads -- called as soon as the scheduler is running. | 1769 | * Spawn all kthreads -- called as soon as the scheduler is running. |
1770 | */ | 1770 | */ |
@@ -1772,18 +1772,30 @@ static int __init rcu_spawn_kthreads(void) | |||
1772 | { | 1772 | { |
1773 | int cpu; | 1773 | int cpu; |
1774 | struct rcu_node *rnp; | 1774 | struct rcu_node *rnp; |
1775 | struct task_struct *t; | ||
1775 | 1776 | ||
1776 | rcu_kthreads_spawnable = 1; | 1777 | rcu_kthreads_spawnable = 1; |
1777 | for_each_possible_cpu(cpu) { | 1778 | for_each_possible_cpu(cpu) { |
1778 | per_cpu(rcu_cpu_has_work, cpu) = 0; | 1779 | per_cpu(rcu_cpu_has_work, cpu) = 0; |
1779 | if (cpu_online(cpu)) | 1780 | if (cpu_online(cpu)) { |
1780 | (void)rcu_spawn_one_cpu_kthread(cpu); | 1781 | (void)rcu_spawn_one_cpu_kthread(cpu); |
1782 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1783 | if (t) | ||
1784 | wake_up_process(t); | ||
1785 | } | ||
1781 | } | 1786 | } |
1782 | rnp = rcu_get_root(rcu_state); | 1787 | rnp = rcu_get_root(rcu_state); |
1783 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | 1788 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); |
1789 | if (rnp->node_kthread_task) | ||
1790 | wake_up_process(rnp->node_kthread_task); | ||
1784 | if (NUM_RCU_NODES > 1) { | 1791 | if (NUM_RCU_NODES > 1) { |
1785 | rcu_for_each_leaf_node(rcu_state, rnp) | 1792 | rcu_for_each_leaf_node(rcu_state, rnp) { |
1786 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | 1793 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); |
1794 | t = rnp->node_kthread_task; | ||
1795 | if (t) | ||
1796 | wake_up_process(t); | ||
1797 | rcu_wake_one_boost_kthread(rnp); | ||
1798 | } | ||
1787 | } | 1799 | } |
1788 | return 0; | 1800 | return 0; |
1789 | } | 1801 | } |
@@ -2188,14 +2200,14 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) | |||
2188 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); | 2200 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); |
2189 | } | 2201 | } |
2190 | 2202 | ||
2191 | static void __cpuinit rcu_online_cpu(int cpu) | 2203 | static void __cpuinit rcu_prepare_cpu(int cpu) |
2192 | { | 2204 | { |
2193 | rcu_init_percpu_data(cpu, &rcu_sched_state, 0); | 2205 | rcu_init_percpu_data(cpu, &rcu_sched_state, 0); |
2194 | rcu_init_percpu_data(cpu, &rcu_bh_state, 0); | 2206 | rcu_init_percpu_data(cpu, &rcu_bh_state, 0); |
2195 | rcu_preempt_init_percpu_data(cpu); | 2207 | rcu_preempt_init_percpu_data(cpu); |
2196 | } | 2208 | } |
2197 | 2209 | ||
2198 | static void __cpuinit rcu_online_kthreads(int cpu) | 2210 | static void __cpuinit rcu_prepare_kthreads(int cpu) |
2199 | { | 2211 | { |
2200 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | 2212 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); |
2201 | struct rcu_node *rnp = rdp->mynode; | 2213 | struct rcu_node *rnp = rdp->mynode; |
@@ -2209,6 +2221,31 @@ static void __cpuinit rcu_online_kthreads(int cpu) | |||
2209 | } | 2221 | } |
2210 | 2222 | ||
2211 | /* | 2223 | /* |
2224 | * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state, | ||
2225 | * but the RCU threads are woken on demand, and if demand is low this | ||
2226 | * could be a while triggering the hung task watchdog. | ||
2227 | * | ||
2228 | * In order to avoid this, poke all tasks once the CPU is fully | ||
2229 | * up and running. | ||
2230 | */ | ||
2231 | static void __cpuinit rcu_online_kthreads(int cpu) | ||
2232 | { | ||
2233 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | ||
2234 | struct rcu_node *rnp = rdp->mynode; | ||
2235 | struct task_struct *t; | ||
2236 | |||
2237 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
2238 | if (t) | ||
2239 | wake_up_process(t); | ||
2240 | |||
2241 | t = rnp->node_kthread_task; | ||
2242 | if (t) | ||
2243 | wake_up_process(t); | ||
2244 | |||
2245 | rcu_wake_one_boost_kthread(rnp); | ||
2246 | } | ||
2247 | |||
2248 | /* | ||
2212 | * Handle CPU online/offline notification events. | 2249 | * Handle CPU online/offline notification events. |
2213 | */ | 2250 | */ |
2214 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | 2251 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, |
@@ -2221,10 +2258,11 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
2221 | switch (action) { | 2258 | switch (action) { |
2222 | case CPU_UP_PREPARE: | 2259 | case CPU_UP_PREPARE: |
2223 | case CPU_UP_PREPARE_FROZEN: | 2260 | case CPU_UP_PREPARE_FROZEN: |
2224 | rcu_online_cpu(cpu); | 2261 | rcu_prepare_cpu(cpu); |
2225 | rcu_online_kthreads(cpu); | 2262 | rcu_prepare_kthreads(cpu); |
2226 | break; | 2263 | break; |
2227 | case CPU_ONLINE: | 2264 | case CPU_ONLINE: |
2265 | rcu_online_kthreads(cpu); | ||
2228 | case CPU_DOWN_FAILED: | 2266 | case CPU_DOWN_FAILED: |
2229 | rcu_node_kthread_setaffinity(rnp, -1); | 2267 | rcu_node_kthread_setaffinity(rnp, -1); |
2230 | rcu_cpu_kthread_setrt(cpu, 1); | 2268 | rcu_cpu_kthread_setrt(cpu, 1); |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index a767b7dac365..c8bff3099a89 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -1295,7 +1295,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
1295 | if (IS_ERR(t)) | 1295 | if (IS_ERR(t)) |
1296 | return PTR_ERR(t); | 1296 | return PTR_ERR(t); |
1297 | raw_spin_lock_irqsave(&rnp->lock, flags); | 1297 | raw_spin_lock_irqsave(&rnp->lock, flags); |
1298 | set_task_state(t, TASK_INTERRUPTIBLE); | ||
1299 | rnp->boost_kthread_task = t; | 1298 | rnp->boost_kthread_task = t; |
1300 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1299 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1301 | sp.sched_priority = RCU_KTHREAD_PRIO; | 1300 | sp.sched_priority = RCU_KTHREAD_PRIO; |
@@ -1303,6 +1302,12 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
1303 | return 0; | 1302 | return 0; |
1304 | } | 1303 | } |
1305 | 1304 | ||
1305 | static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) | ||
1306 | { | ||
1307 | if (rnp->boost_kthread_task) | ||
1308 | wake_up_process(rnp->boost_kthread_task); | ||
1309 | } | ||
1310 | |||
1306 | #else /* #ifdef CONFIG_RCU_BOOST */ | 1311 | #else /* #ifdef CONFIG_RCU_BOOST */ |
1307 | 1312 | ||
1308 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) | 1313 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) |
@@ -1326,6 +1331,10 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
1326 | return 0; | 1331 | return 0; |
1327 | } | 1332 | } |
1328 | 1333 | ||
1334 | static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) | ||
1335 | { | ||
1336 | } | ||
1337 | |||
1329 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | 1338 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ |
1330 | 1339 | ||
1331 | #ifndef CONFIG_SMP | 1340 | #ifndef CONFIG_SMP |