diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-10-23 01:01:49 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-10-23 01:01:49 -0400 |
commit | 3dd41424090a0ca3a660218d06afe6ff4441bad3 (patch) | |
tree | 511ef1bb1799027fc5aad574adce49120ecadd87 /kernel/hrtimer.c | |
parent | 5c5456402d467969b217d7fdd6670f8c8600f5a8 (diff) | |
parent | f6f94e2ab1b33f0082ac22d71f66385a60d8157f (diff) |
Merge commit 'v2.6.36' into wip-merge-2.6.36
Conflicts:
Makefile
arch/x86/include/asm/unistd_32.h
arch/x86/kernel/syscall_table_32.S
kernel/sched.c
kernel/time/tick-sched.c
Relevant API and functions changes (solved in this commit):
- (API) .enqueue_task() (enqueue_task_litmus),
dequeue_task() (dequeue_task_litmus),
[litmus/sched_litmus.c]
- (API) .select_task_rq() (select_task_rq_litmus)
[litmus/sched_litmus.c]
- (API) sysrq_dump_trace_buffer() and sysrq_handle_kill_rt_tasks()
[litmus/sched_trace.c]
- struct kfifo internal buffer name changed (buffer -> buf)
[litmus/sched_trace.c]
- add_wait_queue_exclusive_locked -> __add_wait_queue_tail_exclusive
[litmus/fmlp.c]
- syscall numbers for both x86_32 and x86_64
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r-- | kernel/hrtimer.c | 102 |
1 files changed, 60 insertions, 42 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index fdf95968e517..cb49883b64e5 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -91,8 +91,8 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) | |||
91 | 91 | ||
92 | do { | 92 | do { |
93 | seq = read_seqbegin(&xtime_lock); | 93 | seq = read_seqbegin(&xtime_lock); |
94 | xts = current_kernel_time(); | 94 | xts = __current_kernel_time(); |
95 | tom = wall_to_monotonic; | 95 | tom = __get_wall_to_monotonic(); |
96 | } while (read_seqretry(&xtime_lock, seq)); | 96 | } while (read_seqretry(&xtime_lock, seq)); |
97 | 97 | ||
98 | xtim = timespec_to_ktime(xts); | 98 | xtim = timespec_to_ktime(xts); |
@@ -146,12 +146,8 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, | |||
146 | static int hrtimer_get_target(int this_cpu, int pinned) | 146 | static int hrtimer_get_target(int this_cpu, int pinned) |
147 | { | 147 | { |
148 | #ifdef CONFIG_NO_HZ | 148 | #ifdef CONFIG_NO_HZ |
149 | if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) { | 149 | if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) |
150 | int preferred_cpu = get_nohz_load_balancer(); | 150 | return get_nohz_timer_target(); |
151 | |||
152 | if (preferred_cpu >= 0) | ||
153 | return preferred_cpu; | ||
154 | } | ||
155 | #endif | 151 | #endif |
156 | return this_cpu; | 152 | return this_cpu; |
157 | } | 153 | } |
@@ -614,7 +610,7 @@ static int hrtimer_reprogram(struct hrtimer *timer, | |||
614 | static void retrigger_next_event(void *arg) | 610 | static void retrigger_next_event(void *arg) |
615 | { | 611 | { |
616 | struct hrtimer_cpu_base *base; | 612 | struct hrtimer_cpu_base *base; |
617 | struct timespec realtime_offset; | 613 | struct timespec realtime_offset, wtm; |
618 | unsigned long seq; | 614 | unsigned long seq; |
619 | 615 | ||
620 | if (!hrtimer_hres_active()) | 616 | if (!hrtimer_hres_active()) |
@@ -622,10 +618,9 @@ static void retrigger_next_event(void *arg) | |||
622 | 618 | ||
623 | do { | 619 | do { |
624 | seq = read_seqbegin(&xtime_lock); | 620 | seq = read_seqbegin(&xtime_lock); |
625 | set_normalized_timespec(&realtime_offset, | 621 | wtm = __get_wall_to_monotonic(); |
626 | -wall_to_monotonic.tv_sec, | ||
627 | -wall_to_monotonic.tv_nsec); | ||
628 | } while (read_seqretry(&xtime_lock, seq)); | 622 | } while (read_seqretry(&xtime_lock, seq)); |
623 | set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec); | ||
629 | 624 | ||
630 | base = &__get_cpu_var(hrtimer_bases); | 625 | base = &__get_cpu_var(hrtimer_bases); |
631 | 626 | ||
@@ -938,6 +933,7 @@ static inline int | |||
938 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) | 933 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) |
939 | { | 934 | { |
940 | if (hrtimer_is_queued(timer)) { | 935 | if (hrtimer_is_queued(timer)) { |
936 | unsigned long state; | ||
941 | int reprogram; | 937 | int reprogram; |
942 | 938 | ||
943 | /* | 939 | /* |
@@ -951,8 +947,13 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) | |||
951 | debug_deactivate(timer); | 947 | debug_deactivate(timer); |
952 | timer_stats_hrtimer_clear_start_info(timer); | 948 | timer_stats_hrtimer_clear_start_info(timer); |
953 | reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); | 949 | reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); |
954 | __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, | 950 | /* |
955 | reprogram); | 951 | * We must preserve the CALLBACK state flag here, |
952 | * otherwise we could move the timer base in | ||
953 | * switch_hrtimer_base. | ||
954 | */ | ||
955 | state = timer->state & HRTIMER_STATE_CALLBACK; | ||
956 | __remove_hrtimer(timer, base, state, reprogram); | ||
956 | return 1; | 957 | return 1; |
957 | } | 958 | } |
958 | return 0; | 959 | return 0; |
@@ -1190,11 +1191,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel); | |||
1190 | */ | 1191 | */ |
1191 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) | 1192 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) |
1192 | { | 1193 | { |
1193 | struct hrtimer_clock_base *base; | ||
1194 | unsigned long flags; | 1194 | unsigned long flags; |
1195 | ktime_t rem; | 1195 | ktime_t rem; |
1196 | 1196 | ||
1197 | base = lock_hrtimer_base(timer, &flags); | 1197 | lock_hrtimer_base(timer, &flags); |
1198 | rem = hrtimer_expires_remaining(timer); | 1198 | rem = hrtimer_expires_remaining(timer); |
1199 | unlock_hrtimer_base(timer, &flags); | 1199 | unlock_hrtimer_base(timer, &flags); |
1200 | 1200 | ||
@@ -1331,6 +1331,9 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) | |||
1331 | BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); | 1331 | BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); |
1332 | enqueue_hrtimer(timer, base); | 1332 | enqueue_hrtimer(timer, base); |
1333 | } | 1333 | } |
1334 | |||
1335 | WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK)); | ||
1336 | |||
1334 | timer->state &= ~HRTIMER_STATE_CALLBACK; | 1337 | timer->state &= ~HRTIMER_STATE_CALLBACK; |
1335 | } | 1338 | } |
1336 | 1339 | ||
@@ -1844,35 +1847,15 @@ void __init hrtimers_init(void) | |||
1844 | } | 1847 | } |
1845 | 1848 | ||
1846 | /** | 1849 | /** |
1847 | * schedule_hrtimeout_range - sleep until timeout | 1850 | * schedule_hrtimeout_range_clock - sleep until timeout |
1848 | * @expires: timeout value (ktime_t) | 1851 | * @expires: timeout value (ktime_t) |
1849 | * @delta: slack in expires timeout (ktime_t) | 1852 | * @delta: slack in expires timeout (ktime_t) |
1850 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL | 1853 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL |
1851 | * | 1854 | * @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME |
1852 | * Make the current task sleep until the given expiry time has | ||
1853 | * elapsed. The routine will return immediately unless | ||
1854 | * the current task state has been set (see set_current_state()). | ||
1855 | * | ||
1856 | * The @delta argument gives the kernel the freedom to schedule the | ||
1857 | * actual wakeup to a time that is both power and performance friendly. | ||
1858 | * The kernel give the normal best effort behavior for "@expires+@delta", | ||
1859 | * but may decide to fire the timer earlier, but no earlier than @expires. | ||
1860 | * | ||
1861 | * You can set the task state as follows - | ||
1862 | * | ||
1863 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | ||
1864 | * pass before the routine returns. | ||
1865 | * | ||
1866 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | ||
1867 | * delivered to the current task. | ||
1868 | * | ||
1869 | * The current task state is guaranteed to be TASK_RUNNING when this | ||
1870 | * routine returns. | ||
1871 | * | ||
1872 | * Returns 0 when the timer has expired otherwise -EINTR | ||
1873 | */ | 1855 | */ |
1874 | int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, | 1856 | int __sched |
1875 | const enum hrtimer_mode mode) | 1857 | schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta, |
1858 | const enum hrtimer_mode mode, int clock) | ||
1876 | { | 1859 | { |
1877 | struct hrtimer_sleeper t; | 1860 | struct hrtimer_sleeper t; |
1878 | 1861 | ||
@@ -1894,7 +1877,7 @@ int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, | |||
1894 | return -EINTR; | 1877 | return -EINTR; |
1895 | } | 1878 | } |
1896 | 1879 | ||
1897 | hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode); | 1880 | hrtimer_init_on_stack(&t.timer, clock, mode); |
1898 | hrtimer_set_expires_range_ns(&t.timer, *expires, delta); | 1881 | hrtimer_set_expires_range_ns(&t.timer, *expires, delta); |
1899 | 1882 | ||
1900 | hrtimer_init_sleeper(&t, current); | 1883 | hrtimer_init_sleeper(&t, current); |
@@ -1913,6 +1896,41 @@ int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, | |||
1913 | 1896 | ||
1914 | return !t.task ? 0 : -EINTR; | 1897 | return !t.task ? 0 : -EINTR; |
1915 | } | 1898 | } |
1899 | |||
1900 | /** | ||
1901 | * schedule_hrtimeout_range - sleep until timeout | ||
1902 | * @expires: timeout value (ktime_t) | ||
1903 | * @delta: slack in expires timeout (ktime_t) | ||
1904 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL | ||
1905 | * | ||
1906 | * Make the current task sleep until the given expiry time has | ||
1907 | * elapsed. The routine will return immediately unless | ||
1908 | * the current task state has been set (see set_current_state()). | ||
1909 | * | ||
1910 | * The @delta argument gives the kernel the freedom to schedule the | ||
1911 | * actual wakeup to a time that is both power and performance friendly. | ||
1912 | * The kernel give the normal best effort behavior for "@expires+@delta", | ||
1913 | * but may decide to fire the timer earlier, but no earlier than @expires. | ||
1914 | * | ||
1915 | * You can set the task state as follows - | ||
1916 | * | ||
1917 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | ||
1918 | * pass before the routine returns. | ||
1919 | * | ||
1920 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | ||
1921 | * delivered to the current task. | ||
1922 | * | ||
1923 | * The current task state is guaranteed to be TASK_RUNNING when this | ||
1924 | * routine returns. | ||
1925 | * | ||
1926 | * Returns 0 when the timer has expired otherwise -EINTR | ||
1927 | */ | ||
1928 | int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, | ||
1929 | const enum hrtimer_mode mode) | ||
1930 | { | ||
1931 | return schedule_hrtimeout_range_clock(expires, delta, mode, | ||
1932 | CLOCK_MONOTONIC); | ||
1933 | } | ||
1916 | EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); | 1934 | EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); |
1917 | 1935 | ||
1918 | /** | 1936 | /** |