diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-10-03 04:34:36 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-03 04:34:36 -0400 |
commit | b5259d944279d0b7e78a83849a352d8ba0447c4c (patch) | |
tree | 42f0e7dc404bc776f9a736c17f52c121da166fc1 /kernel | |
parent | 1c50b728c3e734150b8a4a8310ce3e01bc5c70be (diff) | |
parent | 94aca1dac6f6d21f4b07e4864baf7768cabcc6e7 (diff) |
Merge commit 'v2.6.27-rc8' into core/rcu
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 5 | ||||
-rw-r--r-- | kernel/cpuset.c | 37 | ||||
-rw-r--r-- | kernel/exit.c | 12 | ||||
-rw-r--r-- | kernel/kexec.c | 8 | ||||
-rw-r--r-- | kernel/kgdb.c | 10 | ||||
-rw-r--r-- | kernel/sched.c | 8 | ||||
-rw-r--r-- | kernel/sched_rt.c | 1 | ||||
-rw-r--r-- | kernel/time/clockevents.c | 12 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 21 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 14 | ||||
-rw-r--r-- | kernel/time/tick-internal.h | 9 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 11 |
12 files changed, 97 insertions, 51 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 13932abde159..a0123d75ec9a 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -2738,14 +2738,15 @@ void cgroup_fork_callbacks(struct task_struct *child) | |||
2738 | */ | 2738 | */ |
2739 | void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) | 2739 | void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) |
2740 | { | 2740 | { |
2741 | struct cgroup *oldcgrp, *newcgrp; | 2741 | struct cgroup *oldcgrp, *newcgrp = NULL; |
2742 | 2742 | ||
2743 | if (need_mm_owner_callback) { | 2743 | if (need_mm_owner_callback) { |
2744 | int i; | 2744 | int i; |
2745 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | 2745 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { |
2746 | struct cgroup_subsys *ss = subsys[i]; | 2746 | struct cgroup_subsys *ss = subsys[i]; |
2747 | oldcgrp = task_cgroup(old, ss->subsys_id); | 2747 | oldcgrp = task_cgroup(old, ss->subsys_id); |
2748 | newcgrp = task_cgroup(new, ss->subsys_id); | 2748 | if (new) |
2749 | newcgrp = task_cgroup(new, ss->subsys_id); | ||
2749 | if (oldcgrp == newcgrp) | 2750 | if (oldcgrp == newcgrp) |
2750 | continue; | 2751 | continue; |
2751 | if (ss->mm_owner_changed) | 2752 | if (ss->mm_owner_changed) |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index f227bc172690..827cd9adccb2 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -843,37 +843,25 @@ static void cpuset_change_cpumask(struct task_struct *tsk, | |||
843 | /** | 843 | /** |
844 | * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. | 844 | * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. |
845 | * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed | 845 | * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed |
846 | * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks() | ||
846 | * | 847 | * |
847 | * Called with cgroup_mutex held | 848 | * Called with cgroup_mutex held |
848 | * | 849 | * |
849 | * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, | 850 | * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, |
850 | * calling callback functions for each. | 851 | * calling callback functions for each. |
851 | * | 852 | * |
852 | * Return 0 if successful, -errno if not. | 853 | * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0 |
854 | * if @heap != NULL. | ||
853 | */ | 855 | */ |
854 | static int update_tasks_cpumask(struct cpuset *cs) | 856 | static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap) |
855 | { | 857 | { |
856 | struct cgroup_scanner scan; | 858 | struct cgroup_scanner scan; |
857 | struct ptr_heap heap; | ||
858 | int retval; | ||
859 | |||
860 | /* | ||
861 | * cgroup_scan_tasks() will initialize heap->gt for us. | ||
862 | * heap_init() is still needed here for we should not change | ||
863 | * cs->cpus_allowed when heap_init() fails. | ||
864 | */ | ||
865 | retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); | ||
866 | if (retval) | ||
867 | return retval; | ||
868 | 859 | ||
869 | scan.cg = cs->css.cgroup; | 860 | scan.cg = cs->css.cgroup; |
870 | scan.test_task = cpuset_test_cpumask; | 861 | scan.test_task = cpuset_test_cpumask; |
871 | scan.process_task = cpuset_change_cpumask; | 862 | scan.process_task = cpuset_change_cpumask; |
872 | scan.heap = &heap; | 863 | scan.heap = heap; |
873 | retval = cgroup_scan_tasks(&scan); | 864 | cgroup_scan_tasks(&scan); |
874 | |||
875 | heap_free(&heap); | ||
876 | return retval; | ||
877 | } | 865 | } |
878 | 866 | ||
879 | /** | 867 | /** |
@@ -883,6 +871,7 @@ static int update_tasks_cpumask(struct cpuset *cs) | |||
883 | */ | 871 | */ |
884 | static int update_cpumask(struct cpuset *cs, const char *buf) | 872 | static int update_cpumask(struct cpuset *cs, const char *buf) |
885 | { | 873 | { |
874 | struct ptr_heap heap; | ||
886 | struct cpuset trialcs; | 875 | struct cpuset trialcs; |
887 | int retval; | 876 | int retval; |
888 | int is_load_balanced; | 877 | int is_load_balanced; |
@@ -917,6 +906,10 @@ static int update_cpumask(struct cpuset *cs, const char *buf) | |||
917 | if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed)) | 906 | if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed)) |
918 | return 0; | 907 | return 0; |
919 | 908 | ||
909 | retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); | ||
910 | if (retval) | ||
911 | return retval; | ||
912 | |||
920 | is_load_balanced = is_sched_load_balance(&trialcs); | 913 | is_load_balanced = is_sched_load_balance(&trialcs); |
921 | 914 | ||
922 | mutex_lock(&callback_mutex); | 915 | mutex_lock(&callback_mutex); |
@@ -927,9 +920,9 @@ static int update_cpumask(struct cpuset *cs, const char *buf) | |||
927 | * Scan tasks in the cpuset, and update the cpumasks of any | 920 | * Scan tasks in the cpuset, and update the cpumasks of any |
928 | * that need an update. | 921 | * that need an update. |
929 | */ | 922 | */ |
930 | retval = update_tasks_cpumask(cs); | 923 | update_tasks_cpumask(cs, &heap); |
931 | if (retval < 0) | 924 | |
932 | return retval; | 925 | heap_free(&heap); |
933 | 926 | ||
934 | if (is_load_balanced) | 927 | if (is_load_balanced) |
935 | async_rebuild_sched_domains(); | 928 | async_rebuild_sched_domains(); |
@@ -1965,7 +1958,7 @@ static void scan_for_empty_cpusets(const struct cpuset *root) | |||
1965 | nodes_empty(cp->mems_allowed)) | 1958 | nodes_empty(cp->mems_allowed)) |
1966 | remove_tasks_in_empty_cpuset(cp); | 1959 | remove_tasks_in_empty_cpuset(cp); |
1967 | else { | 1960 | else { |
1968 | update_tasks_cpumask(cp); | 1961 | update_tasks_cpumask(cp, NULL); |
1969 | update_tasks_nodemask(cp, &oldmems); | 1962 | update_tasks_nodemask(cp, &oldmems); |
1970 | } | 1963 | } |
1971 | } | 1964 | } |
diff --git a/kernel/exit.c b/kernel/exit.c index 16395644a98f..85a83c831856 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -583,8 +583,6 @@ mm_need_new_owner(struct mm_struct *mm, struct task_struct *p) | |||
583 | * If there are other users of the mm and the owner (us) is exiting | 583 | * If there are other users of the mm and the owner (us) is exiting |
584 | * we need to find a new owner to take on the responsibility. | 584 | * we need to find a new owner to take on the responsibility. |
585 | */ | 585 | */ |
586 | if (!mm) | ||
587 | return 0; | ||
588 | if (atomic_read(&mm->mm_users) <= 1) | 586 | if (atomic_read(&mm->mm_users) <= 1) |
589 | return 0; | 587 | return 0; |
590 | if (mm->owner != p) | 588 | if (mm->owner != p) |
@@ -627,6 +625,16 @@ retry: | |||
627 | } while_each_thread(g, c); | 625 | } while_each_thread(g, c); |
628 | 626 | ||
629 | read_unlock(&tasklist_lock); | 627 | read_unlock(&tasklist_lock); |
628 | /* | ||
629 | * We found no owner yet mm_users > 1: this implies that we are | ||
630 | * most likely racing with swapoff (try_to_unuse()) or /proc or | ||
631 | * ptrace or page migration (get_task_mm()). Mark owner as NULL, | ||
632 | * so that subsystems can understand the callback and take action. | ||
633 | */ | ||
634 | down_write(&mm->mmap_sem); | ||
635 | cgroup_mm_owner_callbacks(mm->owner, NULL); | ||
636 | mm->owner = NULL; | ||
637 | up_write(&mm->mmap_sem); | ||
630 | return; | 638 | return; |
631 | 639 | ||
632 | assign_new_owner: | 640 | assign_new_owner: |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 59f3f0df35d4..aef265325cd3 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -753,8 +753,14 @@ static struct page *kimage_alloc_page(struct kimage *image, | |||
753 | *old = addr | (*old & ~PAGE_MASK); | 753 | *old = addr | (*old & ~PAGE_MASK); |
754 | 754 | ||
755 | /* The old page I have found cannot be a | 755 | /* The old page I have found cannot be a |
756 | * destination page, so return it. | 756 | * destination page, so return it if it's |
757 | * gfp_flags honor the ones passed in. | ||
757 | */ | 758 | */ |
759 | if (!(gfp_mask & __GFP_HIGHMEM) && | ||
760 | PageHighMem(old_page)) { | ||
761 | kimage_free_pages(old_page); | ||
762 | continue; | ||
763 | } | ||
758 | addr = old_addr; | 764 | addr = old_addr; |
759 | page = old_page; | 765 | page = old_page; |
760 | break; | 766 | break; |
diff --git a/kernel/kgdb.c b/kernel/kgdb.c index eaa21fc9ad1d..25d955dbb989 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c | |||
@@ -488,7 +488,7 @@ static int write_mem_msg(int binary) | |||
488 | if (err) | 488 | if (err) |
489 | return err; | 489 | return err; |
490 | if (CACHE_FLUSH_IS_SAFE) | 490 | if (CACHE_FLUSH_IS_SAFE) |
491 | flush_icache_range(addr, addr + length + 1); | 491 | flush_icache_range(addr, addr + length); |
492 | return 0; | 492 | return 0; |
493 | } | 493 | } |
494 | 494 | ||
@@ -1462,7 +1462,7 @@ acquirelock: | |||
1462 | * Get the passive CPU lock which will hold all the non-primary | 1462 | * Get the passive CPU lock which will hold all the non-primary |
1463 | * CPU in a spin state while the debugger is active | 1463 | * CPU in a spin state while the debugger is active |
1464 | */ | 1464 | */ |
1465 | if (!kgdb_single_step || !kgdb_contthread) { | 1465 | if (!kgdb_single_step) { |
1466 | for (i = 0; i < NR_CPUS; i++) | 1466 | for (i = 0; i < NR_CPUS; i++) |
1467 | atomic_set(&passive_cpu_wait[i], 1); | 1467 | atomic_set(&passive_cpu_wait[i], 1); |
1468 | } | 1468 | } |
@@ -1475,7 +1475,7 @@ acquirelock: | |||
1475 | 1475 | ||
1476 | #ifdef CONFIG_SMP | 1476 | #ifdef CONFIG_SMP |
1477 | /* Signal the other CPUs to enter kgdb_wait() */ | 1477 | /* Signal the other CPUs to enter kgdb_wait() */ |
1478 | if ((!kgdb_single_step || !kgdb_contthread) && kgdb_do_roundup) | 1478 | if ((!kgdb_single_step) && kgdb_do_roundup) |
1479 | kgdb_roundup_cpus(flags); | 1479 | kgdb_roundup_cpus(flags); |
1480 | #endif | 1480 | #endif |
1481 | 1481 | ||
@@ -1494,7 +1494,7 @@ acquirelock: | |||
1494 | kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code); | 1494 | kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code); |
1495 | kgdb_deactivate_sw_breakpoints(); | 1495 | kgdb_deactivate_sw_breakpoints(); |
1496 | kgdb_single_step = 0; | 1496 | kgdb_single_step = 0; |
1497 | kgdb_contthread = NULL; | 1497 | kgdb_contthread = current; |
1498 | exception_level = 0; | 1498 | exception_level = 0; |
1499 | 1499 | ||
1500 | /* Talk to debugger with gdbserial protocol */ | 1500 | /* Talk to debugger with gdbserial protocol */ |
@@ -1508,7 +1508,7 @@ acquirelock: | |||
1508 | kgdb_info[ks->cpu].task = NULL; | 1508 | kgdb_info[ks->cpu].task = NULL; |
1509 | atomic_set(&cpu_in_kgdb[ks->cpu], 0); | 1509 | atomic_set(&cpu_in_kgdb[ks->cpu], 0); |
1510 | 1510 | ||
1511 | if (!kgdb_single_step || !kgdb_contthread) { | 1511 | if (!kgdb_single_step) { |
1512 | for (i = NR_CPUS-1; i >= 0; i--) | 1512 | for (i = NR_CPUS-1; i >= 0; i--) |
1513 | atomic_set(&passive_cpu_wait[i], 0); | 1513 | atomic_set(&passive_cpu_wait[i], 0); |
1514 | /* | 1514 | /* |
diff --git a/kernel/sched.c b/kernel/sched.c index cc1f81b50b82..13dd2db9fb2d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1087,7 +1087,7 @@ hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
1087 | return NOTIFY_DONE; | 1087 | return NOTIFY_DONE; |
1088 | } | 1088 | } |
1089 | 1089 | ||
1090 | static void init_hrtick(void) | 1090 | static __init void init_hrtick(void) |
1091 | { | 1091 | { |
1092 | hotcpu_notifier(hotplug_hrtick, 0); | 1092 | hotcpu_notifier(hotplug_hrtick, 0); |
1093 | } | 1093 | } |
@@ -8909,6 +8909,9 @@ static int sched_rt_global_constraints(void) | |||
8909 | u64 rt_runtime, rt_period; | 8909 | u64 rt_runtime, rt_period; |
8910 | int ret = 0; | 8910 | int ret = 0; |
8911 | 8911 | ||
8912 | if (sysctl_sched_rt_period <= 0) | ||
8913 | return -EINVAL; | ||
8914 | |||
8912 | rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); | 8915 | rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); |
8913 | rt_runtime = tg->rt_bandwidth.rt_runtime; | 8916 | rt_runtime = tg->rt_bandwidth.rt_runtime; |
8914 | 8917 | ||
@@ -8925,6 +8928,9 @@ static int sched_rt_global_constraints(void) | |||
8925 | unsigned long flags; | 8928 | unsigned long flags; |
8926 | int i; | 8929 | int i; |
8927 | 8930 | ||
8931 | if (sysctl_sched_rt_period <= 0) | ||
8932 | return -EINVAL; | ||
8933 | |||
8928 | spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); | 8934 | spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); |
8929 | for_each_possible_cpu(i) { | 8935 | for_each_possible_cpu(i) { |
8930 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; | 8936 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 552310798dad..1113157b2058 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -350,6 +350,7 @@ static void __enable_runtime(struct rq *rq) | |||
350 | spin_lock(&rt_rq->rt_runtime_lock); | 350 | spin_lock(&rt_rq->rt_runtime_lock); |
351 | rt_rq->rt_runtime = rt_b->rt_runtime; | 351 | rt_rq->rt_runtime = rt_b->rt_runtime; |
352 | rt_rq->rt_time = 0; | 352 | rt_rq->rt_time = 0; |
353 | rt_rq->rt_throttled = 0; | ||
353 | spin_unlock(&rt_rq->rt_runtime_lock); | 354 | spin_unlock(&rt_rq->rt_runtime_lock); |
354 | spin_unlock(&rt_b->rt_runtime_lock); | 355 | spin_unlock(&rt_b->rt_runtime_lock); |
355 | } | 356 | } |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 1876b526c778..f8d968063cea 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -72,6 +72,16 @@ void clockevents_set_mode(struct clock_event_device *dev, | |||
72 | } | 72 | } |
73 | 73 | ||
74 | /** | 74 | /** |
75 | * clockevents_shutdown - shutdown the device and clear next_event | ||
76 | * @dev: device to shutdown | ||
77 | */ | ||
78 | void clockevents_shutdown(struct clock_event_device *dev) | ||
79 | { | ||
80 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); | ||
81 | dev->next_event.tv64 = KTIME_MAX; | ||
82 | } | ||
83 | |||
84 | /** | ||
75 | * clockevents_program_event - Reprogram the clock event device. | 85 | * clockevents_program_event - Reprogram the clock event device. |
76 | * @expires: absolute expiry time (monotonic clock) | 86 | * @expires: absolute expiry time (monotonic clock) |
77 | * | 87 | * |
@@ -206,7 +216,7 @@ void clockevents_exchange_device(struct clock_event_device *old, | |||
206 | 216 | ||
207 | if (new) { | 217 | if (new) { |
208 | BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); | 218 | BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); |
209 | clockevents_set_mode(new, CLOCK_EVT_MODE_SHUTDOWN); | 219 | clockevents_shutdown(new); |
210 | } | 220 | } |
211 | local_irq_restore(flags); | 221 | local_irq_restore(flags); |
212 | } | 222 | } |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 2f5a38294bf9..bd7034542399 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -235,9 +235,8 @@ static void tick_do_broadcast_on_off(void *why) | |||
235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | 235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: |
236 | if (!cpu_isset(cpu, tick_broadcast_mask)) { | 236 | if (!cpu_isset(cpu, tick_broadcast_mask)) { |
237 | cpu_set(cpu, tick_broadcast_mask); | 237 | cpu_set(cpu, tick_broadcast_mask); |
238 | if (td->mode == TICKDEV_MODE_PERIODIC) | 238 | if (bc->mode == TICKDEV_MODE_PERIODIC) |
239 | clockevents_set_mode(dev, | 239 | clockevents_shutdown(dev); |
240 | CLOCK_EVT_MODE_SHUTDOWN); | ||
241 | } | 240 | } |
242 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) | 241 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) |
243 | tick_broadcast_force = 1; | 242 | tick_broadcast_force = 1; |
@@ -246,7 +245,7 @@ static void tick_do_broadcast_on_off(void *why) | |||
246 | if (!tick_broadcast_force && | 245 | if (!tick_broadcast_force && |
247 | cpu_isset(cpu, tick_broadcast_mask)) { | 246 | cpu_isset(cpu, tick_broadcast_mask)) { |
248 | cpu_clear(cpu, tick_broadcast_mask); | 247 | cpu_clear(cpu, tick_broadcast_mask); |
249 | if (td->mode == TICKDEV_MODE_PERIODIC) | 248 | if (bc->mode == TICKDEV_MODE_PERIODIC) |
250 | tick_setup_periodic(dev, 0); | 249 | tick_setup_periodic(dev, 0); |
251 | } | 250 | } |
252 | break; | 251 | break; |
@@ -254,7 +253,7 @@ static void tick_do_broadcast_on_off(void *why) | |||
254 | 253 | ||
255 | if (cpus_empty(tick_broadcast_mask)) { | 254 | if (cpus_empty(tick_broadcast_mask)) { |
256 | if (!bc_stopped) | 255 | if (!bc_stopped) |
257 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 256 | clockevents_shutdown(bc); |
258 | } else if (bc_stopped) { | 257 | } else if (bc_stopped) { |
259 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | 258 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
260 | tick_broadcast_start_periodic(bc); | 259 | tick_broadcast_start_periodic(bc); |
@@ -306,7 +305,7 @@ void tick_shutdown_broadcast(unsigned int *cpup) | |||
306 | 305 | ||
307 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { | 306 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { |
308 | if (bc && cpus_empty(tick_broadcast_mask)) | 307 | if (bc && cpus_empty(tick_broadcast_mask)) |
309 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 308 | clockevents_shutdown(bc); |
310 | } | 309 | } |
311 | 310 | ||
312 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 311 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
@@ -321,7 +320,7 @@ void tick_suspend_broadcast(void) | |||
321 | 320 | ||
322 | bc = tick_broadcast_device.evtdev; | 321 | bc = tick_broadcast_device.evtdev; |
323 | if (bc) | 322 | if (bc) |
324 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 323 | clockevents_shutdown(bc); |
325 | 324 | ||
326 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 325 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
327 | } | 326 | } |
@@ -576,4 +575,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
576 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 575 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
577 | } | 576 | } |
578 | 577 | ||
578 | /* | ||
579 | * Check, whether the broadcast device is in one shot mode | ||
580 | */ | ||
581 | int tick_broadcast_oneshot_active(void) | ||
582 | { | ||
583 | return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; | ||
584 | } | ||
585 | |||
579 | #endif | 586 | #endif |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index c4777193d567..df12434b43ca 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -33,7 +33,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); | |||
33 | */ | 33 | */ |
34 | ktime_t tick_next_period; | 34 | ktime_t tick_next_period; |
35 | ktime_t tick_period; | 35 | ktime_t tick_period; |
36 | int tick_do_timer_cpu __read_mostly = -1; | 36 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; |
37 | DEFINE_SPINLOCK(tick_device_lock); | 37 | DEFINE_SPINLOCK(tick_device_lock); |
38 | 38 | ||
39 | /* | 39 | /* |
@@ -109,7 +109,8 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) | |||
109 | if (!tick_device_is_functional(dev)) | 109 | if (!tick_device_is_functional(dev)) |
110 | return; | 110 | return; |
111 | 111 | ||
112 | if (dev->features & CLOCK_EVT_FEAT_PERIODIC) { | 112 | if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && |
113 | !tick_broadcast_oneshot_active()) { | ||
113 | clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); | 114 | clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); |
114 | } else { | 115 | } else { |
115 | unsigned long seq; | 116 | unsigned long seq; |
@@ -148,7 +149,7 @@ static void tick_setup_device(struct tick_device *td, | |||
148 | * If no cpu took the do_timer update, assign it to | 149 | * If no cpu took the do_timer update, assign it to |
149 | * this cpu: | 150 | * this cpu: |
150 | */ | 151 | */ |
151 | if (tick_do_timer_cpu == -1) { | 152 | if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { |
152 | tick_do_timer_cpu = cpu; | 153 | tick_do_timer_cpu = cpu; |
153 | tick_next_period = ktime_get(); | 154 | tick_next_period = ktime_get(); |
154 | tick_period = ktime_set(0, NSEC_PER_SEC / HZ); | 155 | tick_period = ktime_set(0, NSEC_PER_SEC / HZ); |
@@ -249,7 +250,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
249 | * not give it back to the clockevents layer ! | 250 | * not give it back to the clockevents layer ! |
250 | */ | 251 | */ |
251 | if (tick_is_broadcast_device(curdev)) { | 252 | if (tick_is_broadcast_device(curdev)) { |
252 | clockevents_set_mode(curdev, CLOCK_EVT_MODE_SHUTDOWN); | 253 | clockevents_shutdown(curdev); |
253 | curdev = NULL; | 254 | curdev = NULL; |
254 | } | 255 | } |
255 | clockevents_exchange_device(curdev, newdev); | 256 | clockevents_exchange_device(curdev, newdev); |
@@ -300,7 +301,8 @@ static void tick_shutdown(unsigned int *cpup) | |||
300 | if (*cpup == tick_do_timer_cpu) { | 301 | if (*cpup == tick_do_timer_cpu) { |
301 | int cpu = first_cpu(cpu_online_map); | 302 | int cpu = first_cpu(cpu_online_map); |
302 | 303 | ||
303 | tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1; | 304 | tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : |
305 | TICK_DO_TIMER_NONE; | ||
304 | } | 306 | } |
305 | spin_unlock_irqrestore(&tick_device_lock, flags); | 307 | spin_unlock_irqrestore(&tick_device_lock, flags); |
306 | } | 308 | } |
@@ -311,7 +313,7 @@ static void tick_suspend(void) | |||
311 | unsigned long flags; | 313 | unsigned long flags; |
312 | 314 | ||
313 | spin_lock_irqsave(&tick_device_lock, flags); | 315 | spin_lock_irqsave(&tick_device_lock, flags); |
314 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN); | 316 | clockevents_shutdown(td->evtdev); |
315 | spin_unlock_irqrestore(&tick_device_lock, flags); | 317 | spin_unlock_irqrestore(&tick_device_lock, flags); |
316 | } | 318 | } |
317 | 319 | ||
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 0ffc2918ea6f..469248782c23 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
@@ -1,6 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * tick internal variable and functions used by low/high res code | 2 | * tick internal variable and functions used by low/high res code |
3 | */ | 3 | */ |
4 | |||
5 | #define TICK_DO_TIMER_NONE -1 | ||
6 | #define TICK_DO_TIMER_BOOT -2 | ||
7 | |||
4 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); | 8 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); |
5 | extern spinlock_t tick_device_lock; | 9 | extern spinlock_t tick_device_lock; |
6 | extern ktime_t tick_next_period; | 10 | extern ktime_t tick_next_period; |
@@ -10,6 +14,8 @@ extern int tick_do_timer_cpu __read_mostly; | |||
10 | extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); | 14 | extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); |
11 | extern void tick_handle_periodic(struct clock_event_device *dev); | 15 | extern void tick_handle_periodic(struct clock_event_device *dev); |
12 | 16 | ||
17 | extern void clockevents_shutdown(struct clock_event_device *dev); | ||
18 | |||
13 | /* | 19 | /* |
14 | * NO_HZ / high resolution timer shared code | 20 | * NO_HZ / high resolution timer shared code |
15 | */ | 21 | */ |
@@ -29,6 +35,7 @@ extern void tick_broadcast_oneshot_control(unsigned long reason); | |||
29 | extern void tick_broadcast_switch_to_oneshot(void); | 35 | extern void tick_broadcast_switch_to_oneshot(void); |
30 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); | 36 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); |
31 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); | 37 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
38 | extern int tick_broadcast_oneshot_active(void); | ||
32 | # else /* BROADCAST */ | 39 | # else /* BROADCAST */ |
33 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 40 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
34 | { | 41 | { |
@@ -37,6 +44,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
37 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } | 44 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } |
38 | static inline void tick_broadcast_switch_to_oneshot(void) { } | 45 | static inline void tick_broadcast_switch_to_oneshot(void) { } |
39 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 46 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
47 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | ||
40 | # endif /* !BROADCAST */ | 48 | # endif /* !BROADCAST */ |
41 | 49 | ||
42 | #else /* !ONESHOT */ | 50 | #else /* !ONESHOT */ |
@@ -66,6 +74,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | |||
66 | { | 74 | { |
67 | return 0; | 75 | return 0; |
68 | } | 76 | } |
77 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | ||
69 | #endif /* !TICK_ONESHOT */ | 78 | #endif /* !TICK_ONESHOT */ |
70 | 79 | ||
71 | /* | 80 | /* |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index a87b0468568b..39019b3f7621 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -75,6 +75,9 @@ static void tick_do_update_jiffies64(ktime_t now) | |||
75 | incr * ticks); | 75 | incr * ticks); |
76 | } | 76 | } |
77 | do_timer(++ticks); | 77 | do_timer(++ticks); |
78 | |||
79 | /* Keep the tick_next_period variable up to date */ | ||
80 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | ||
78 | } | 81 | } |
79 | write_sequnlock(&xtime_lock); | 82 | write_sequnlock(&xtime_lock); |
80 | } | 83 | } |
@@ -221,7 +224,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
221 | */ | 224 | */ |
222 | if (unlikely(!cpu_online(cpu))) { | 225 | if (unlikely(!cpu_online(cpu))) { |
223 | if (cpu == tick_do_timer_cpu) | 226 | if (cpu == tick_do_timer_cpu) |
224 | tick_do_timer_cpu = -1; | 227 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
225 | } | 228 | } |
226 | 229 | ||
227 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | 230 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
@@ -303,7 +306,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
303 | * invoked. | 306 | * invoked. |
304 | */ | 307 | */ |
305 | if (cpu == tick_do_timer_cpu) | 308 | if (cpu == tick_do_timer_cpu) |
306 | tick_do_timer_cpu = -1; | 309 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
307 | 310 | ||
308 | ts->idle_sleeps++; | 311 | ts->idle_sleeps++; |
309 | 312 | ||
@@ -468,7 +471,7 @@ static void tick_nohz_handler(struct clock_event_device *dev) | |||
468 | * this duty, then the jiffies update is still serialized by | 471 | * this duty, then the jiffies update is still serialized by |
469 | * xtime_lock. | 472 | * xtime_lock. |
470 | */ | 473 | */ |
471 | if (unlikely(tick_do_timer_cpu == -1)) | 474 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
472 | tick_do_timer_cpu = cpu; | 475 | tick_do_timer_cpu = cpu; |
473 | 476 | ||
474 | /* Check, if the jiffies need an update */ | 477 | /* Check, if the jiffies need an update */ |
@@ -570,7 +573,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
570 | * this duty, then the jiffies update is still serialized by | 573 | * this duty, then the jiffies update is still serialized by |
571 | * xtime_lock. | 574 | * xtime_lock. |
572 | */ | 575 | */ |
573 | if (unlikely(tick_do_timer_cpu == -1)) | 576 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
574 | tick_do_timer_cpu = cpu; | 577 | tick_do_timer_cpu = cpu; |
575 | #endif | 578 | #endif |
576 | 579 | ||