aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/spurious.c2
-rw-r--r--kernel/rcutree.c16
-rw-r--r--kernel/rcutree.h7
-rw-r--r--kernel/sched.c5
-rw-r--r--kernel/user.c2
-rw-r--r--kernel/workqueue.c28
6 files changed, 34 insertions, 26 deletions
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 114e704760fe..bd7273e6282e 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -121,7 +121,9 @@ static void poll_all_shared_irqs(void)
121 if (!(status & IRQ_SPURIOUS_DISABLED)) 121 if (!(status & IRQ_SPURIOUS_DISABLED))
122 continue; 122 continue;
123 123
124 local_irq_disable();
124 try_one_irq(i, desc); 125 try_one_irq(i, desc);
126 local_irq_enable();
125 } 127 }
126} 128}
127 129
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 0536125b0497..f3077c0ab181 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -59,7 +59,7 @@
59 NUM_RCU_LVL_2, \ 59 NUM_RCU_LVL_2, \
60 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ 60 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \
61 }, \ 61 }, \
62 .signaled = RCU_SIGNAL_INIT, \ 62 .signaled = RCU_GP_IDLE, \
63 .gpnum = -300, \ 63 .gpnum = -300, \
64 .completed = -300, \ 64 .completed = -300, \
65 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ 65 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
@@ -657,14 +657,17 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
657 * irqs disabled. 657 * irqs disabled.
658 */ 658 */
659 rcu_for_each_node_breadth_first(rsp, rnp) { 659 rcu_for_each_node_breadth_first(rsp, rnp) {
660 spin_lock(&rnp->lock); /* irqs already disabled. */ 660 spin_lock(&rnp->lock); /* irqs already disabled. */
661 rcu_preempt_check_blocked_tasks(rnp); 661 rcu_preempt_check_blocked_tasks(rnp);
662 rnp->qsmask = rnp->qsmaskinit; 662 rnp->qsmask = rnp->qsmaskinit;
663 rnp->gpnum = rsp->gpnum; 663 rnp->gpnum = rsp->gpnum;
664 spin_unlock(&rnp->lock); /* irqs already disabled. */ 664 spin_unlock(&rnp->lock); /* irqs remain disabled. */
665 } 665 }
666 666
667 rnp = rcu_get_root(rsp);
668 spin_lock(&rnp->lock); /* irqs already disabled. */
667 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ 669 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
670 spin_unlock(&rnp->lock); /* irqs remain disabled. */
668 spin_unlock_irqrestore(&rsp->onofflock, flags); 671 spin_unlock_irqrestore(&rsp->onofflock, flags);
669} 672}
670 673
@@ -706,6 +709,7 @@ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
706{ 709{
707 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 710 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
708 rsp->completed = rsp->gpnum; 711 rsp->completed = rsp->gpnum;
712 rsp->signaled = RCU_GP_IDLE;
709 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); 713 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
710 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 714 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
711} 715}
@@ -1162,9 +1166,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1162 } 1166 }
1163 spin_unlock(&rnp->lock); 1167 spin_unlock(&rnp->lock);
1164 switch (signaled) { 1168 switch (signaled) {
1169 case RCU_GP_IDLE:
1165 case RCU_GP_INIT: 1170 case RCU_GP_INIT:
1166 1171
1167 break; /* grace period still initializing, ignore. */ 1172 break; /* grace period idle or initializing, ignore. */
1168 1173
1169 case RCU_SAVE_DYNTICK: 1174 case RCU_SAVE_DYNTICK:
1170 1175
@@ -1178,7 +1183,8 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1178 1183
1179 /* Update state, record completion counter. */ 1184 /* Update state, record completion counter. */
1180 spin_lock(&rnp->lock); 1185 spin_lock(&rnp->lock);
1181 if (lastcomp == rsp->completed) { 1186 if (lastcomp == rsp->completed &&
1187 rsp->signaled == RCU_SAVE_DYNTICK) {
1182 rsp->signaled = RCU_FORCE_QS; 1188 rsp->signaled = RCU_FORCE_QS;
1183 dyntick_record_completed(rsp, lastcomp); 1189 dyntick_record_completed(rsp, lastcomp);
1184 } 1190 }
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 1823c6e20609..1899023b0962 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -201,9 +201,10 @@ struct rcu_data {
201}; 201};
202 202
203/* Values for signaled field in struct rcu_state. */ 203/* Values for signaled field in struct rcu_state. */
204#define RCU_GP_INIT 0 /* Grace period being initialized. */ 204#define RCU_GP_IDLE 0 /* No grace period in progress. */
205#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ 205#define RCU_GP_INIT 1 /* Grace period being initialized. */
206#define RCU_FORCE_QS 2 /* Need to force quiescent state. */ 206#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
207#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
207#ifdef CONFIG_NO_HZ 208#ifdef CONFIG_NO_HZ
208#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK 209#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
209#else /* #ifdef CONFIG_NO_HZ */ 210#else /* #ifdef CONFIG_NO_HZ */
diff --git a/kernel/sched.c b/kernel/sched.c
index 28dd4f490bfc..3c11ae0a948d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -309,6 +309,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
309 */ 309 */
310static DEFINE_SPINLOCK(task_group_lock); 310static DEFINE_SPINLOCK(task_group_lock);
311 311
312#ifdef CONFIG_FAIR_GROUP_SCHED
313
312#ifdef CONFIG_SMP 314#ifdef CONFIG_SMP
313static int root_task_group_empty(void) 315static int root_task_group_empty(void)
314{ 316{
@@ -316,7 +318,6 @@ static int root_task_group_empty(void)
316} 318}
317#endif 319#endif
318 320
319#ifdef CONFIG_FAIR_GROUP_SCHED
320#ifdef CONFIG_USER_SCHED 321#ifdef CONFIG_USER_SCHED
321# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) 322# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
322#else /* !CONFIG_USER_SCHED */ 323#else /* !CONFIG_USER_SCHED */
@@ -1994,7 +1995,7 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1994 1995
1995/** 1996/**
1996 * kthread_bind - bind a just-created kthread to a cpu. 1997 * kthread_bind - bind a just-created kthread to a cpu.
1997 * @k: thread created by kthread_create(). 1998 * @p: thread created by kthread_create().
1998 * @cpu: cpu (might not be online, must be possible) for @k to run on. 1999 * @cpu: cpu (might not be online, must be possible) for @k to run on.
1999 * 2000 *
2000 * Description: This function is equivalent to set_cpus_allowed(), 2001 * Description: This function is equivalent to set_cpus_allowed(),
diff --git a/kernel/user.c b/kernel/user.c
index 2c000e7132ac..46d0165ca70c 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -330,9 +330,9 @@ done:
330 */ 330 */
331static void free_user(struct user_struct *up, unsigned long flags) 331static void free_user(struct user_struct *up, unsigned long flags)
332{ 332{
333 spin_unlock_irqrestore(&uidhash_lock, flags);
334 INIT_DELAYED_WORK(&up->work, cleanup_user_struct); 333 INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
335 schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); 334 schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
335 spin_unlock_irqrestore(&uidhash_lock, flags);
336} 336}
337 337
338#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ 338#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 12328147132c..67e526b6ae81 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -692,31 +692,29 @@ int schedule_on_each_cpu(work_func_t func)
692 if (!works) 692 if (!works)
693 return -ENOMEM; 693 return -ENOMEM;
694 694
695 get_online_cpus();
696
695 /* 697 /*
696 * when running in keventd don't schedule a work item on itself. 698 * When running in keventd don't schedule a work item on
697 * Can just call directly because the work queue is already bound. 699 * itself. Can just call directly because the work queue is
698 * This also is faster. 700 * already bound. This also is faster.
699 * Make this a generic parameter for other workqueues?
700 */ 701 */
701 if (current_is_keventd()) { 702 if (current_is_keventd())
702 orig = raw_smp_processor_id(); 703 orig = raw_smp_processor_id();
703 INIT_WORK(per_cpu_ptr(works, orig), func);
704 func(per_cpu_ptr(works, orig));
705 }
706 704
707 get_online_cpus();
708 for_each_online_cpu(cpu) { 705 for_each_online_cpu(cpu) {
709 struct work_struct *work = per_cpu_ptr(works, cpu); 706 struct work_struct *work = per_cpu_ptr(works, cpu);
710 707
711 if (cpu == orig)
712 continue;
713 INIT_WORK(work, func); 708 INIT_WORK(work, func);
714 schedule_work_on(cpu, work);
715 }
716 for_each_online_cpu(cpu) {
717 if (cpu != orig) 709 if (cpu != orig)
718 flush_work(per_cpu_ptr(works, cpu)); 710 schedule_work_on(cpu, work);
719 } 711 }
712 if (orig >= 0)
713 func(per_cpu_ptr(works, orig));
714
715 for_each_online_cpu(cpu)
716 flush_work(per_cpu_ptr(works, cpu));
717
720 put_online_cpus(); 718 put_online_cpus();
721 free_percpu(works); 719 free_percpu(works);
722 return 0; 720 return 0;