aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/dcache.c51
-rw-r--r--fs/namei.c2
-rw-r--r--kernel/rcutree.c26
-rw-r--r--kernel/rcutree_plugin.h15
-rw-r--r--kernel/sched.c3
5 files changed, 75 insertions, 22 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 37f72ee5bf7c..6e4ea6d87774 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2213,14 +2213,15 @@ static void dentry_unlock_parents_for_move(struct dentry *dentry,
2213 * The hash value has to match the hash queue that the dentry is on.. 2213 * The hash value has to match the hash queue that the dentry is on..
2214 */ 2214 */
2215/* 2215/*
2216 * d_move - move a dentry 2216 * __d_move - move a dentry
2217 * @dentry: entry to move 2217 * @dentry: entry to move
2218 * @target: new dentry 2218 * @target: new dentry
2219 * 2219 *
2220 * Update the dcache to reflect the move of a file name. Negative 2220 * Update the dcache to reflect the move of a file name. Negative
2221 * dcache entries should not be moved in this way. 2221 * dcache entries should not be moved in this way. Caller hold
2222 * rename_lock.
2222 */ 2223 */
2223void d_move(struct dentry * dentry, struct dentry * target) 2224static void __d_move(struct dentry * dentry, struct dentry * target)
2224{ 2225{
2225 if (!dentry->d_inode) 2226 if (!dentry->d_inode)
2226 printk(KERN_WARNING "VFS: moving negative dcache entry\n"); 2227 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
@@ -2228,8 +2229,6 @@ void d_move(struct dentry * dentry, struct dentry * target)
2228 BUG_ON(d_ancestor(dentry, target)); 2229 BUG_ON(d_ancestor(dentry, target));
2229 BUG_ON(d_ancestor(target, dentry)); 2230 BUG_ON(d_ancestor(target, dentry));
2230 2231
2231 write_seqlock(&rename_lock);
2232
2233 dentry_lock_for_move(dentry, target); 2232 dentry_lock_for_move(dentry, target);
2234 2233
2235 write_seqcount_begin(&dentry->d_seq); 2234 write_seqcount_begin(&dentry->d_seq);
@@ -2275,6 +2274,20 @@ void d_move(struct dentry * dentry, struct dentry * target)
2275 spin_unlock(&target->d_lock); 2274 spin_unlock(&target->d_lock);
2276 fsnotify_d_move(dentry); 2275 fsnotify_d_move(dentry);
2277 spin_unlock(&dentry->d_lock); 2276 spin_unlock(&dentry->d_lock);
2277}
2278
2279/*
2280 * d_move - move a dentry
2281 * @dentry: entry to move
2282 * @target: new dentry
2283 *
2284 * Update the dcache to reflect the move of a file name. Negative
2285 * dcache entries should not be moved in this way.
2286 */
2287void d_move(struct dentry *dentry, struct dentry *target)
2288{
2289 write_seqlock(&rename_lock);
2290 __d_move(dentry, target);
2278 write_sequnlock(&rename_lock); 2291 write_sequnlock(&rename_lock);
2279} 2292}
2280EXPORT_SYMBOL(d_move); 2293EXPORT_SYMBOL(d_move);
@@ -2302,7 +2315,7 @@ struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2302 * This helper attempts to cope with remotely renamed directories 2315 * This helper attempts to cope with remotely renamed directories
2303 * 2316 *
2304 * It assumes that the caller is already holding 2317 * It assumes that the caller is already holding
2305 * dentry->d_parent->d_inode->i_mutex and the inode->i_lock 2318 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
2306 * 2319 *
2307 * Note: If ever the locking in lock_rename() changes, then please 2320 * Note: If ever the locking in lock_rename() changes, then please
2308 * remember to update this too... 2321 * remember to update this too...
@@ -2317,11 +2330,6 @@ static struct dentry *__d_unalias(struct inode *inode,
2317 if (alias->d_parent == dentry->d_parent) 2330 if (alias->d_parent == dentry->d_parent)
2318 goto out_unalias; 2331 goto out_unalias;
2319 2332
2320 /* Check for loops */
2321 ret = ERR_PTR(-ELOOP);
2322 if (d_ancestor(alias, dentry))
2323 goto out_err;
2324
2325 /* See lock_rename() */ 2333 /* See lock_rename() */
2326 ret = ERR_PTR(-EBUSY); 2334 ret = ERR_PTR(-EBUSY);
2327 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) 2335 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
@@ -2331,7 +2339,7 @@ static struct dentry *__d_unalias(struct inode *inode,
2331 goto out_err; 2339 goto out_err;
2332 m2 = &alias->d_parent->d_inode->i_mutex; 2340 m2 = &alias->d_parent->d_inode->i_mutex;
2333out_unalias: 2341out_unalias:
2334 d_move(alias, dentry); 2342 __d_move(alias, dentry);
2335 ret = alias; 2343 ret = alias;
2336out_err: 2344out_err:
2337 spin_unlock(&inode->i_lock); 2345 spin_unlock(&inode->i_lock);
@@ -2416,15 +2424,24 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2416 alias = __d_find_alias(inode, 0); 2424 alias = __d_find_alias(inode, 0);
2417 if (alias) { 2425 if (alias) {
2418 actual = alias; 2426 actual = alias;
2419 /* Is this an anonymous mountpoint that we could splice 2427 write_seqlock(&rename_lock);
2420 * into our tree? */ 2428
2421 if (IS_ROOT(alias)) { 2429 if (d_ancestor(alias, dentry)) {
2430 /* Check for loops */
2431 actual = ERR_PTR(-ELOOP);
2432 } else if (IS_ROOT(alias)) {
2433 /* Is this an anonymous mountpoint that we
2434 * could splice into our tree? */
2422 __d_materialise_dentry(dentry, alias); 2435 __d_materialise_dentry(dentry, alias);
2436 write_sequnlock(&rename_lock);
2423 __d_drop(alias); 2437 __d_drop(alias);
2424 goto found; 2438 goto found;
2439 } else {
2440 /* Nope, but we must(!) avoid directory
2441 * aliasing */
2442 actual = __d_unalias(inode, dentry, alias);
2425 } 2443 }
2426 /* Nope, but we must(!) avoid directory aliasing */ 2444 write_sequnlock(&rename_lock);
2427 actual = __d_unalias(inode, dentry, alias);
2428 if (IS_ERR(actual)) 2445 if (IS_ERR(actual))
2429 dput(alias); 2446 dput(alias);
2430 goto out_nolock; 2447 goto out_nolock;
diff --git a/fs/namei.c b/fs/namei.c
index 0223c41fb114..5c867dd1c0b3 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -433,6 +433,8 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
433 goto err_parent; 433 goto err_parent;
434 BUG_ON(nd->inode != parent->d_inode); 434 BUG_ON(nd->inode != parent->d_inode);
435 } else { 435 } else {
436 if (dentry->d_parent != parent)
437 goto err_parent;
436 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 438 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
437 if (!__d_rcu_to_refcount(dentry, nd->seq)) 439 if (!__d_rcu_to_refcount(dentry, nd->seq))
438 goto err_child; 440 goto err_child;
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 7e59ffb3d0ba..ba06207b1dd3 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -84,9 +84,32 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
84 84
85static struct rcu_state *rcu_state; 85static struct rcu_state *rcu_state;
86 86
87/*
88 * The rcu_scheduler_active variable transitions from zero to one just
89 * before the first task is spawned. So when this variable is zero, RCU
90 * can assume that there is but one task, allowing RCU to (for example)
91 * optimized synchronize_sched() to a simple barrier(). When this variable
92 * is one, RCU must actually do all the hard work required to detect real
93 * grace periods. This variable is also used to suppress boot-time false
94 * positives from lockdep-RCU error checking.
95 */
87int rcu_scheduler_active __read_mostly; 96int rcu_scheduler_active __read_mostly;
88EXPORT_SYMBOL_GPL(rcu_scheduler_active); 97EXPORT_SYMBOL_GPL(rcu_scheduler_active);
89 98
99/*
100 * The rcu_scheduler_fully_active variable transitions from zero to one
101 * during the early_initcall() processing, which is after the scheduler
102 * is capable of creating new tasks. So RCU processing (for example,
103 * creating tasks for RCU priority boosting) must be delayed until after
104 * rcu_scheduler_fully_active transitions from zero to one. We also
105 * currently delay invocation of any RCU callbacks until after this point.
106 *
107 * It might later prove better for people registering RCU callbacks during
108 * early boot to take responsibility for these callbacks, but one step at
109 * a time.
110 */
111static int rcu_scheduler_fully_active __read_mostly;
112
90#ifdef CONFIG_RCU_BOOST 113#ifdef CONFIG_RCU_BOOST
91 114
92/* 115/*
@@ -98,7 +121,6 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
98DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu); 121DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
99DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); 122DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
100DEFINE_PER_CPU(char, rcu_cpu_has_work); 123DEFINE_PER_CPU(char, rcu_cpu_has_work);
101static char rcu_kthreads_spawnable;
102 124
103#endif /* #ifdef CONFIG_RCU_BOOST */ 125#endif /* #ifdef CONFIG_RCU_BOOST */
104 126
@@ -1467,6 +1489,8 @@ static void rcu_process_callbacks(struct softirq_action *unused)
1467 */ 1489 */
1468static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) 1490static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1469{ 1491{
1492 if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
1493 return;
1470 if (likely(!rsp->boost)) { 1494 if (likely(!rsp->boost)) {
1471 rcu_do_batch(rsp, rdp); 1495 rcu_do_batch(rsp, rdp);
1472 return; 1496 return;
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 14dc7dd00902..75113cb7c4fb 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1532,7 +1532,7 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1532 struct sched_param sp; 1532 struct sched_param sp;
1533 struct task_struct *t; 1533 struct task_struct *t;
1534 1534
1535 if (!rcu_kthreads_spawnable || 1535 if (!rcu_scheduler_fully_active ||
1536 per_cpu(rcu_cpu_kthread_task, cpu) != NULL) 1536 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1537 return 0; 1537 return 0;
1538 t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); 1538 t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
@@ -1639,7 +1639,7 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1639 struct sched_param sp; 1639 struct sched_param sp;
1640 struct task_struct *t; 1640 struct task_struct *t;
1641 1641
1642 if (!rcu_kthreads_spawnable || 1642 if (!rcu_scheduler_fully_active ||
1643 rnp->qsmaskinit == 0) 1643 rnp->qsmaskinit == 0)
1644 return 0; 1644 return 0;
1645 if (rnp->node_kthread_task == NULL) { 1645 if (rnp->node_kthread_task == NULL) {
@@ -1665,7 +1665,7 @@ static int __init rcu_spawn_kthreads(void)
1665 int cpu; 1665 int cpu;
1666 struct rcu_node *rnp; 1666 struct rcu_node *rnp;
1667 1667
1668 rcu_kthreads_spawnable = 1; 1668 rcu_scheduler_fully_active = 1;
1669 for_each_possible_cpu(cpu) { 1669 for_each_possible_cpu(cpu) {
1670 per_cpu(rcu_cpu_has_work, cpu) = 0; 1670 per_cpu(rcu_cpu_has_work, cpu) = 0;
1671 if (cpu_online(cpu)) 1671 if (cpu_online(cpu))
@@ -1687,7 +1687,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
1687 struct rcu_node *rnp = rdp->mynode; 1687 struct rcu_node *rnp = rdp->mynode;
1688 1688
1689 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ 1689 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1690 if (rcu_kthreads_spawnable) { 1690 if (rcu_scheduler_fully_active) {
1691 (void)rcu_spawn_one_cpu_kthread(cpu); 1691 (void)rcu_spawn_one_cpu_kthread(cpu);
1692 if (rnp->node_kthread_task == NULL) 1692 if (rnp->node_kthread_task == NULL)
1693 (void)rcu_spawn_one_node_kthread(rcu_state, rnp); 1693 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
@@ -1726,6 +1726,13 @@ static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1726{ 1726{
1727} 1727}
1728 1728
1729static int __init rcu_scheduler_really_started(void)
1730{
1731 rcu_scheduler_fully_active = 1;
1732 return 0;
1733}
1734early_initcall(rcu_scheduler_really_started);
1735
1729static void __cpuinit rcu_prepare_kthreads(int cpu) 1736static void __cpuinit rcu_prepare_kthreads(int cpu)
1730{ 1737{
1731} 1738}
diff --git a/kernel/sched.c b/kernel/sched.c
index 9769c756ad66..3dc716f6d8ad 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7757,6 +7757,9 @@ static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
7757#endif 7757#endif
7758#endif 7758#endif
7759 cfs_rq->min_vruntime = (u64)(-(1LL << 20)); 7759 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
7760#ifndef CONFIG_64BIT
7761 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
7762#endif
7760} 7763}
7761 7764
7762static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) 7765static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)