aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShi, Alex <alex.shi@intel.com>2011-07-28 02:56:12 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-09-29 00:38:29 -0400
commitfc0763f53e3ff6a6bfa66934662a3446b9ca6f16 (patch)
tree2671d3f6275c9707a620032658ecb05025e4e9fa
parentd7bd2d68aa2ee2738a10c8ad9346b805e4ab2e1c (diff)
nohz: Remove nohz_cpu_mask
RCU no longer uses this global variable, nor does anyone else. This commit therefore removes this variable. This reduces memory footprint and also removes some atomic instructions and memory barriers from the dyntick-idle path. Signed-off-by: Alex Shi <alex.shi@intel.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/sched.c11
-rw-r--r--kernel/time/tick-sched.c6
3 files changed, 0 insertions, 18 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4ac2c0578e0..6ee91e20353 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -270,7 +270,6 @@ extern void init_idle_bootup_task(struct task_struct *idle);
270 270
271extern int runqueue_is_locked(int cpu); 271extern int runqueue_is_locked(int cpu);
272 272
273extern cpumask_var_t nohz_cpu_mask;
274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 273#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
275extern void select_nohz_load_balancer(int stop_tick); 274extern void select_nohz_load_balancer(int stop_tick);
276extern int get_nohz_timer_target(void); 275extern int get_nohz_timer_target(void);
diff --git a/kernel/sched.c b/kernel/sched.c
index e24cebe0e6c..3e552563045 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5980,15 +5980,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5980} 5980}
5981 5981
5982/* 5982/*
5983 * In a system that switches off the HZ timer nohz_cpu_mask
5984 * indicates which cpus entered this state. This is used
5985 * in the rcu update to wait only for active cpus. For system
5986 * which do not switch off the HZ timer nohz_cpu_mask should
5987 * always be CPU_BITS_NONE.
5988 */
5989cpumask_var_t nohz_cpu_mask;
5990
5991/*
5992 * Increase the granularity value when there are more CPUs, 5983 * Increase the granularity value when there are more CPUs,
5993 * because with more CPUs the 'effective latency' as visible 5984 * because with more CPUs the 'effective latency' as visible
5994 * to users decreases. But the relationship is not linear, 5985 * to users decreases. But the relationship is not linear,
@@ -8200,8 +8191,6 @@ void __init sched_init(void)
8200 */ 8191 */
8201 current->sched_class = &fair_sched_class; 8192 current->sched_class = &fair_sched_class;
8202 8193
8203 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
8204 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
8205#ifdef CONFIG_SMP 8194#ifdef CONFIG_SMP
8206 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); 8195 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
8207#ifdef CONFIG_NO_HZ 8196#ifdef CONFIG_NO_HZ
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index d5097c44b40..eb98e55196b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -139,7 +139,6 @@ static void tick_nohz_update_jiffies(ktime_t now)
139 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 139 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
140 unsigned long flags; 140 unsigned long flags;
141 141
142 cpumask_clear_cpu(cpu, nohz_cpu_mask);
143 ts->idle_waketime = now; 142 ts->idle_waketime = now;
144 143
145 local_irq_save(flags); 144 local_irq_save(flags);
@@ -389,9 +388,6 @@ void tick_nohz_stop_sched_tick(int inidle)
389 else 388 else
390 expires.tv64 = KTIME_MAX; 389 expires.tv64 = KTIME_MAX;
391 390
392 if (delta_jiffies > 1)
393 cpumask_set_cpu(cpu, nohz_cpu_mask);
394
395 /* Skip reprogram of event if its not changed */ 391 /* Skip reprogram of event if its not changed */
396 if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) 392 if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
397 goto out; 393 goto out;
@@ -441,7 +437,6 @@ void tick_nohz_stop_sched_tick(int inidle)
441 * softirq. 437 * softirq.
442 */ 438 */
443 tick_do_update_jiffies64(ktime_get()); 439 tick_do_update_jiffies64(ktime_get());
444 cpumask_clear_cpu(cpu, nohz_cpu_mask);
445 } 440 }
446 raise_softirq_irqoff(TIMER_SOFTIRQ); 441 raise_softirq_irqoff(TIMER_SOFTIRQ);
447out: 442out:
@@ -524,7 +519,6 @@ void tick_nohz_restart_sched_tick(void)
524 /* Update jiffies first */ 519 /* Update jiffies first */
525 select_nohz_load_balancer(0); 520 select_nohz_load_balancer(0);
526 tick_do_update_jiffies64(now); 521 tick_do_update_jiffies64(now);
527 cpumask_clear_cpu(cpu, nohz_cpu_mask);
528 522
529#ifndef CONFIG_VIRT_CPU_ACCOUNTING 523#ifndef CONFIG_VIRT_CPU_ACCOUNTING
530 /* 524 /*