aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-09-23 10:19:26 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-23 10:19:26 -0400
commit9b9b181ce53ef387dfe3df9316bbc641fca13d51 (patch)
treeacc34cf3d5172536c37de61eafbda399163e4265 /kernel
parentfb71e45338453698bd7460f7e8f171ea0304d218 (diff)
parent72d31053f62c4bc464c2783974926969614a8649 (diff)
Merge commit 'v2.6.27-rc7' into core/locking
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c37
-rw-r--r--kernel/sched.c6
-rw-r--r--kernel/sched_rt.c1
-rw-r--r--kernel/time/clockevents.c12
-rw-r--r--kernel/time/tick-broadcast.c9
-rw-r--r--kernel/time/tick-common.c4
-rw-r--r--kernel/time/tick-internal.h2
7 files changed, 41 insertions, 30 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index f227bc172690..827cd9adccb2 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -843,37 +843,25 @@ static void cpuset_change_cpumask(struct task_struct *tsk,
843/** 843/**
844 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. 844 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
845 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed 845 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
846 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
846 * 847 *
847 * Called with cgroup_mutex held 848 * Called with cgroup_mutex held
848 * 849 *
849 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, 850 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
850 * calling callback functions for each. 851 * calling callback functions for each.
851 * 852 *
852 * Return 0 if successful, -errno if not. 853 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
854 * if @heap != NULL.
853 */ 855 */
854static int update_tasks_cpumask(struct cpuset *cs) 856static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
855{ 857{
856 struct cgroup_scanner scan; 858 struct cgroup_scanner scan;
857 struct ptr_heap heap;
858 int retval;
859
860 /*
861 * cgroup_scan_tasks() will initialize heap->gt for us.
862 * heap_init() is still needed here for we should not change
863 * cs->cpus_allowed when heap_init() fails.
864 */
865 retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
866 if (retval)
867 return retval;
868 859
869 scan.cg = cs->css.cgroup; 860 scan.cg = cs->css.cgroup;
870 scan.test_task = cpuset_test_cpumask; 861 scan.test_task = cpuset_test_cpumask;
871 scan.process_task = cpuset_change_cpumask; 862 scan.process_task = cpuset_change_cpumask;
872 scan.heap = &heap; 863 scan.heap = heap;
873 retval = cgroup_scan_tasks(&scan); 864 cgroup_scan_tasks(&scan);
874
875 heap_free(&heap);
876 return retval;
877} 865}
878 866
879/** 867/**
@@ -883,6 +871,7 @@ static int update_tasks_cpumask(struct cpuset *cs)
883 */ 871 */
884static int update_cpumask(struct cpuset *cs, const char *buf) 872static int update_cpumask(struct cpuset *cs, const char *buf)
885{ 873{
874 struct ptr_heap heap;
886 struct cpuset trialcs; 875 struct cpuset trialcs;
887 int retval; 876 int retval;
888 int is_load_balanced; 877 int is_load_balanced;
@@ -917,6 +906,10 @@ static int update_cpumask(struct cpuset *cs, const char *buf)
917 if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed)) 906 if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed))
918 return 0; 907 return 0;
919 908
909 retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
910 if (retval)
911 return retval;
912
920 is_load_balanced = is_sched_load_balance(&trialcs); 913 is_load_balanced = is_sched_load_balance(&trialcs);
921 914
922 mutex_lock(&callback_mutex); 915 mutex_lock(&callback_mutex);
@@ -927,9 +920,9 @@ static int update_cpumask(struct cpuset *cs, const char *buf)
927 * Scan tasks in the cpuset, and update the cpumasks of any 920 * Scan tasks in the cpuset, and update the cpumasks of any
928 * that need an update. 921 * that need an update.
929 */ 922 */
930 retval = update_tasks_cpumask(cs); 923 update_tasks_cpumask(cs, &heap);
931 if (retval < 0) 924
932 return retval; 925 heap_free(&heap);
933 926
934 if (is_load_balanced) 927 if (is_load_balanced)
935 async_rebuild_sched_domains(); 928 async_rebuild_sched_domains();
@@ -1965,7 +1958,7 @@ static void scan_for_empty_cpusets(const struct cpuset *root)
1965 nodes_empty(cp->mems_allowed)) 1958 nodes_empty(cp->mems_allowed))
1966 remove_tasks_in_empty_cpuset(cp); 1959 remove_tasks_in_empty_cpuset(cp);
1967 else { 1960 else {
1968 update_tasks_cpumask(cp); 1961 update_tasks_cpumask(cp, NULL);
1969 update_tasks_nodemask(cp, &oldmems); 1962 update_tasks_nodemask(cp, &oldmems);
1970 } 1963 }
1971 } 1964 }
diff --git a/kernel/sched.c b/kernel/sched.c
index cc1f81b50b82..98890807375b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8909,6 +8909,9 @@ static int sched_rt_global_constraints(void)
8909 u64 rt_runtime, rt_period; 8909 u64 rt_runtime, rt_period;
8910 int ret = 0; 8910 int ret = 0;
8911 8911
8912 if (sysctl_sched_rt_period <= 0)
8913 return -EINVAL;
8914
8912 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); 8915 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8913 rt_runtime = tg->rt_bandwidth.rt_runtime; 8916 rt_runtime = tg->rt_bandwidth.rt_runtime;
8914 8917
@@ -8925,6 +8928,9 @@ static int sched_rt_global_constraints(void)
8925 unsigned long flags; 8928 unsigned long flags;
8926 int i; 8929 int i;
8927 8930
8931 if (sysctl_sched_rt_period <= 0)
8932 return -EINVAL;
8933
8928 spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 8934 spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
8929 for_each_possible_cpu(i) { 8935 for_each_possible_cpu(i) {
8930 struct rt_rq *rt_rq = &cpu_rq(i)->rt; 8936 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 552310798dad..1113157b2058 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -350,6 +350,7 @@ static void __enable_runtime(struct rq *rq)
350 spin_lock(&rt_rq->rt_runtime_lock); 350 spin_lock(&rt_rq->rt_runtime_lock);
351 rt_rq->rt_runtime = rt_b->rt_runtime; 351 rt_rq->rt_runtime = rt_b->rt_runtime;
352 rt_rq->rt_time = 0; 352 rt_rq->rt_time = 0;
353 rt_rq->rt_throttled = 0;
353 spin_unlock(&rt_rq->rt_runtime_lock); 354 spin_unlock(&rt_rq->rt_runtime_lock);
354 spin_unlock(&rt_b->rt_runtime_lock); 355 spin_unlock(&rt_b->rt_runtime_lock);
355 } 356 }
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 1876b526c778..f8d968063cea 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -72,6 +72,16 @@ void clockevents_set_mode(struct clock_event_device *dev,
72} 72}
73 73
74/** 74/**
75 * clockevents_shutdown - shutdown the device and clear next_event
76 * @dev: device to shutdown
77 */
78void clockevents_shutdown(struct clock_event_device *dev)
79{
80 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
81 dev->next_event.tv64 = KTIME_MAX;
82}
83
84/**
75 * clockevents_program_event - Reprogram the clock event device. 85 * clockevents_program_event - Reprogram the clock event device.
76 * @expires: absolute expiry time (monotonic clock) 86 * @expires: absolute expiry time (monotonic clock)
77 * 87 *
@@ -206,7 +216,7 @@ void clockevents_exchange_device(struct clock_event_device *old,
206 216
207 if (new) { 217 if (new) {
208 BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); 218 BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
209 clockevents_set_mode(new, CLOCK_EVT_MODE_SHUTDOWN); 219 clockevents_shutdown(new);
210 } 220 }
211 local_irq_restore(flags); 221 local_irq_restore(flags);
212} 222}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 2f5a38294bf9..f1f3eee28113 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -236,8 +236,7 @@ static void tick_do_broadcast_on_off(void *why)
236 if (!cpu_isset(cpu, tick_broadcast_mask)) { 236 if (!cpu_isset(cpu, tick_broadcast_mask)) {
237 cpu_set(cpu, tick_broadcast_mask); 237 cpu_set(cpu, tick_broadcast_mask);
238 if (td->mode == TICKDEV_MODE_PERIODIC) 238 if (td->mode == TICKDEV_MODE_PERIODIC)
239 clockevents_set_mode(dev, 239 clockevents_shutdown(dev);
240 CLOCK_EVT_MODE_SHUTDOWN);
241 } 240 }
242 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) 241 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
243 tick_broadcast_force = 1; 242 tick_broadcast_force = 1;
@@ -254,7 +253,7 @@ static void tick_do_broadcast_on_off(void *why)
254 253
255 if (cpus_empty(tick_broadcast_mask)) { 254 if (cpus_empty(tick_broadcast_mask)) {
256 if (!bc_stopped) 255 if (!bc_stopped)
257 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 256 clockevents_shutdown(bc);
258 } else if (bc_stopped) { 257 } else if (bc_stopped) {
259 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 258 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
260 tick_broadcast_start_periodic(bc); 259 tick_broadcast_start_periodic(bc);
@@ -306,7 +305,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
306 305
307 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { 306 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
308 if (bc && cpus_empty(tick_broadcast_mask)) 307 if (bc && cpus_empty(tick_broadcast_mask))
309 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 308 clockevents_shutdown(bc);
310 } 309 }
311 310
312 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 311 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
@@ -321,7 +320,7 @@ void tick_suspend_broadcast(void)
321 320
322 bc = tick_broadcast_device.evtdev; 321 bc = tick_broadcast_device.evtdev;
323 if (bc) 322 if (bc)
324 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 323 clockevents_shutdown(bc);
325 324
326 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 325 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
327} 326}
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index c4777193d567..019315ebf9de 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -249,7 +249,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
249 * not give it back to the clockevents layer ! 249 * not give it back to the clockevents layer !
250 */ 250 */
251 if (tick_is_broadcast_device(curdev)) { 251 if (tick_is_broadcast_device(curdev)) {
252 clockevents_set_mode(curdev, CLOCK_EVT_MODE_SHUTDOWN); 252 clockevents_shutdown(curdev);
253 curdev = NULL; 253 curdev = NULL;
254 } 254 }
255 clockevents_exchange_device(curdev, newdev); 255 clockevents_exchange_device(curdev, newdev);
@@ -311,7 +311,7 @@ static void tick_suspend(void)
311 unsigned long flags; 311 unsigned long flags;
312 312
313 spin_lock_irqsave(&tick_device_lock, flags); 313 spin_lock_irqsave(&tick_device_lock, flags);
314 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN); 314 clockevents_shutdown(td->evtdev);
315 spin_unlock_irqrestore(&tick_device_lock, flags); 315 spin_unlock_irqrestore(&tick_device_lock, flags);
316} 316}
317 317
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 0ffc2918ea6f..6e9db9734aa6 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -10,6 +10,8 @@ extern int tick_do_timer_cpu __read_mostly;
10extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); 10extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
11extern void tick_handle_periodic(struct clock_event_device *dev); 11extern void tick_handle_periodic(struct clock_event_device *dev);
12 12
13extern void clockevents_shutdown(struct clock_event_device *dev);
14
13/* 15/*
14 * NO_HZ / high resolution timer shared code 16 * NO_HZ / high resolution timer shared code
15 */ 17 */