diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-26 11:15:03 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-26 11:15:03 -0400 |
| commit | 39adff5f69d6849ca22353a88058c9f8630528c0 (patch) | |
| tree | b0c2d2de77ebc5c97fd19c29b81eeb03549553f8 | |
| parent | 8a4a8918ed6e4a361f4df19f199bbc2d0a89a46c (diff) | |
| parent | e35f95b36e43f67a6f806172555a152c11ea0a78 (diff) | |
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits)
time, s390: Get rid of compile warning
dw_apb_timer: constify clocksource name
time: Cleanup old CONFIG_GENERIC_TIME references that snuck in
time: Change jiffies_to_clock_t() argument type to unsigned long
alarmtimers: Fix error handling
clocksource: Make watchdog reset lockless
posix-cpu-timers: Cure SMP accounting oddities
s390: Use direct ktime path for s390 clockevent device
clockevents: Add direct ktime programming function
clockevents: Make minimum delay adjustments configurable
nohz: Remove "Switched to NOHz mode" debugging messages
proc: Consider NO_HZ when printing idle and iowait times
nohz: Make idle/iowait counter update conditional
nohz: Fix update_ts_time_stat idle accounting
cputime: Clean up cputime_to_usecs and usecs_to_cputime macros
alarmtimers: Rework RTC device selection using class interface
alarmtimers: Add try_to_cancel functionality
alarmtimers: Add more refined alarm state tracking
alarmtimers: Remove period from alarm structure
alarmtimers: Remove interval cap limit hack
...
30 files changed, 477 insertions, 258 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 795126ea4935..8090cad0dd52 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
| @@ -346,7 +346,6 @@ config ARCH_GEMINI | |||
| 346 | config ARCH_PRIMA2 | 346 | config ARCH_PRIMA2 |
| 347 | bool "CSR SiRFSoC PRIMA2 ARM Cortex A9 Platform" | 347 | bool "CSR SiRFSoC PRIMA2 ARM Cortex A9 Platform" |
| 348 | select CPU_V7 | 348 | select CPU_V7 |
| 349 | select GENERIC_TIME | ||
| 350 | select NO_IOPORT | 349 | select NO_IOPORT |
| 351 | select GENERIC_CLOCKEVENTS | 350 | select GENERIC_CLOCKEVENTS |
| 352 | select CLKDEV_LOOKUP | 351 | select CLKDEV_LOOKUP |
| @@ -520,7 +519,6 @@ config ARCH_LPC32XX | |||
| 520 | select ARM_AMBA | 519 | select ARM_AMBA |
| 521 | select USB_ARCH_HAS_OHCI | 520 | select USB_ARCH_HAS_OHCI |
| 522 | select CLKDEV_LOOKUP | 521 | select CLKDEV_LOOKUP |
| 523 | select GENERIC_TIME | ||
| 524 | select GENERIC_CLOCKEVENTS | 522 | select GENERIC_CLOCKEVENTS |
| 525 | help | 523 | help |
| 526 | Support for the NXP LPC32XX family of processors | 524 | Support for the NXP LPC32XX family of processors |
| @@ -599,7 +597,6 @@ config ARCH_TEGRA | |||
| 599 | bool "NVIDIA Tegra" | 597 | bool "NVIDIA Tegra" |
| 600 | select CLKDEV_LOOKUP | 598 | select CLKDEV_LOOKUP |
| 601 | select CLKSRC_MMIO | 599 | select CLKSRC_MMIO |
| 602 | select GENERIC_TIME | ||
| 603 | select GENERIC_CLOCKEVENTS | 600 | select GENERIC_CLOCKEVENTS |
| 604 | select GENERIC_GPIO | 601 | select GENERIC_GPIO |
| 605 | select HAVE_CLK | 602 | select HAVE_CLK |
| @@ -914,7 +911,6 @@ config ARCH_VT8500 | |||
| 914 | config ARCH_ZYNQ | 911 | config ARCH_ZYNQ |
| 915 | bool "Xilinx Zynq ARM Cortex A9 Platform" | 912 | bool "Xilinx Zynq ARM Cortex A9 Platform" |
| 916 | select CPU_V7 | 913 | select CPU_V7 |
| 917 | select GENERIC_TIME | ||
| 918 | select GENERIC_CLOCKEVENTS | 914 | select GENERIC_CLOCKEVENTS |
| 919 | select CLKDEV_LOOKUP | 915 | select CLKDEV_LOOKUP |
| 920 | select ARM_GIC | 916 | select ARM_GIC |
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index f093b3a8a4a1..438db84a1f7c 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig | |||
| @@ -47,9 +47,6 @@ config GENERIC_CMOS_UPDATE | |||
| 47 | config GENERIC_HWEIGHT | 47 | config GENERIC_HWEIGHT |
| 48 | def_bool y | 48 | def_bool y |
| 49 | 49 | ||
| 50 | config GENERIC_TIME | ||
| 51 | def_bool y | ||
| 52 | |||
| 53 | config GENERIC_CLOCKEVENTS | 50 | config GENERIC_CLOCKEVENTS |
| 54 | def_bool y | 51 | def_bool y |
| 55 | 52 | ||
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index dff933065ab6..8d65bd0383fc 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
| @@ -109,10 +109,14 @@ static void fixup_clock_comparator(unsigned long long delta) | |||
| 109 | set_clock_comparator(S390_lowcore.clock_comparator); | 109 | set_clock_comparator(S390_lowcore.clock_comparator); |
| 110 | } | 110 | } |
| 111 | 111 | ||
| 112 | static int s390_next_event(unsigned long delta, | 112 | static int s390_next_ktime(ktime_t expires, |
| 113 | struct clock_event_device *evt) | 113 | struct clock_event_device *evt) |
| 114 | { | 114 | { |
| 115 | S390_lowcore.clock_comparator = get_clock() + delta; | 115 | u64 nsecs; |
| 116 | |||
| 117 | nsecs = ktime_to_ns(ktime_sub(expires, ktime_get_monotonic_offset())); | ||
| 118 | do_div(nsecs, 125); | ||
| 119 | S390_lowcore.clock_comparator = TOD_UNIX_EPOCH + (nsecs << 9); | ||
| 116 | set_clock_comparator(S390_lowcore.clock_comparator); | 120 | set_clock_comparator(S390_lowcore.clock_comparator); |
| 117 | return 0; | 121 | return 0; |
| 118 | } | 122 | } |
| @@ -137,14 +141,15 @@ void init_cpu_timer(void) | |||
| 137 | cpu = smp_processor_id(); | 141 | cpu = smp_processor_id(); |
| 138 | cd = &per_cpu(comparators, cpu); | 142 | cd = &per_cpu(comparators, cpu); |
| 139 | cd->name = "comparator"; | 143 | cd->name = "comparator"; |
| 140 | cd->features = CLOCK_EVT_FEAT_ONESHOT; | 144 | cd->features = CLOCK_EVT_FEAT_ONESHOT | |
| 145 | CLOCK_EVT_FEAT_KTIME; | ||
| 141 | cd->mult = 16777; | 146 | cd->mult = 16777; |
| 142 | cd->shift = 12; | 147 | cd->shift = 12; |
| 143 | cd->min_delta_ns = 1; | 148 | cd->min_delta_ns = 1; |
| 144 | cd->max_delta_ns = LONG_MAX; | 149 | cd->max_delta_ns = LONG_MAX; |
| 145 | cd->rating = 400; | 150 | cd->rating = 400; |
| 146 | cd->cpumask = cpumask_of(cpu); | 151 | cd->cpumask = cpumask_of(cpu); |
| 147 | cd->set_next_event = s390_next_event; | 152 | cd->set_next_ktime = s390_next_ktime; |
| 148 | cd->set_mode = s390_set_mode; | 153 | cd->set_mode = s390_set_mode; |
| 149 | 154 | ||
| 150 | clockevents_register_device(cd); | 155 | clockevents_register_device(cd); |
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index b30f71ac0d06..70a0de46cd1b 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig | |||
| @@ -46,9 +46,6 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK | |||
| 46 | config SYS_SUPPORTS_HUGETLBFS | 46 | config SYS_SUPPORTS_HUGETLBFS |
| 47 | def_bool y | 47 | def_bool y |
| 48 | 48 | ||
| 49 | config GENERIC_TIME | ||
| 50 | def_bool y | ||
| 51 | |||
| 52 | config GENERIC_CLOCKEVENTS | 49 | config GENERIC_CLOCKEVENTS |
| 53 | def_bool y | 50 | def_bool y |
| 54 | 51 | ||
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig index 2ad73fb707b9..dafdbbae1124 100644 --- a/arch/tile/configs/tilegx_defconfig +++ b/arch/tile/configs/tilegx_defconfig | |||
| @@ -11,7 +11,6 @@ CONFIG_HAVE_ARCH_ALLOC_REMAP=y | |||
| 11 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y | 11 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y |
| 12 | CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y | 12 | CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y |
| 13 | CONFIG_SYS_SUPPORTS_HUGETLBFS=y | 13 | CONFIG_SYS_SUPPORTS_HUGETLBFS=y |
| 14 | CONFIG_GENERIC_TIME=y | ||
| 15 | CONFIG_GENERIC_CLOCKEVENTS=y | 14 | CONFIG_GENERIC_CLOCKEVENTS=y |
| 16 | CONFIG_RWSEM_GENERIC_SPINLOCK=y | 15 | CONFIG_RWSEM_GENERIC_SPINLOCK=y |
| 17 | CONFIG_DEFAULT_MIGRATION_COST=10000000 | 16 | CONFIG_DEFAULT_MIGRATION_COST=10000000 |
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig index f58dc362b944..6f05f969b564 100644 --- a/arch/tile/configs/tilepro_defconfig +++ b/arch/tile/configs/tilepro_defconfig | |||
| @@ -11,7 +11,6 @@ CONFIG_HAVE_ARCH_ALLOC_REMAP=y | |||
| 11 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y | 11 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y |
| 12 | CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y | 12 | CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y |
| 13 | CONFIG_SYS_SUPPORTS_HUGETLBFS=y | 13 | CONFIG_SYS_SUPPORTS_HUGETLBFS=y |
| 14 | CONFIG_GENERIC_TIME=y | ||
| 15 | CONFIG_GENERIC_CLOCKEVENTS=y | 14 | CONFIG_GENERIC_CLOCKEVENTS=y |
| 16 | CONFIG_RWSEM_GENERIC_SPINLOCK=y | 15 | CONFIG_RWSEM_GENERIC_SPINLOCK=y |
| 17 | CONFIG_DEFAULT_MIGRATION_COST=10000000 | 16 | CONFIG_DEFAULT_MIGRATION_COST=10000000 |
diff --git a/arch/um/defconfig b/arch/um/defconfig index 9f7634f08cf3..761f5e1a657e 100644 --- a/arch/um/defconfig +++ b/arch/um/defconfig | |||
| @@ -13,7 +13,6 @@ CONFIG_LOCKDEP_SUPPORT=y | |||
| 13 | # CONFIG_STACKTRACE_SUPPORT is not set | 13 | # CONFIG_STACKTRACE_SUPPORT is not set |
| 14 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 14 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
| 15 | CONFIG_GENERIC_BUG=y | 15 | CONFIG_GENERIC_BUG=y |
| 16 | CONFIG_GENERIC_TIME=y | ||
| 17 | CONFIG_GENERIC_CLOCKEVENTS=y | 16 | CONFIG_GENERIC_CLOCKEVENTS=y |
| 18 | CONFIG_IRQ_RELEASE_METHOD=y | 17 | CONFIG_IRQ_RELEASE_METHOD=y |
| 19 | CONFIG_HZ=100 | 18 | CONFIG_HZ=100 |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9037289617ac..e022f82cd0d0 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -68,6 +68,7 @@ config X86 | |||
| 68 | select GENERIC_IRQ_PROBE | 68 | select GENERIC_IRQ_PROBE |
| 69 | select GENERIC_PENDING_IRQ if SMP | 69 | select GENERIC_PENDING_IRQ if SMP |
| 70 | select GENERIC_IRQ_SHOW | 70 | select GENERIC_IRQ_SHOW |
| 71 | select GENERIC_CLOCKEVENTS_MIN_ADJUST | ||
| 71 | select IRQ_FORCED_THREADING | 72 | select IRQ_FORCED_THREADING |
| 72 | select USE_GENERIC_SMP_HELPERS if SMP | 73 | select USE_GENERIC_SMP_HELPERS if SMP |
| 73 | select HAVE_BPF_JIT if (X86_64 && NET) | 74 | select HAVE_BPF_JIT if (X86_64 && NET) |
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig index 0234cd198c54..f932b30b47fb 100644 --- a/arch/xtensa/configs/iss_defconfig +++ b/arch/xtensa/configs/iss_defconfig | |||
| @@ -15,7 +15,6 @@ CONFIG_GENERIC_GPIO=y | |||
| 15 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set | 15 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set |
| 16 | CONFIG_NO_IOPORT=y | 16 | CONFIG_NO_IOPORT=y |
| 17 | CONFIG_HZ=100 | 17 | CONFIG_HZ=100 |
| 18 | CONFIG_GENERIC_TIME=y | ||
| 19 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | 18 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" |
| 20 | CONFIG_CONSTRUCTORS=y | 19 | CONFIG_CONSTRUCTORS=y |
| 21 | 20 | ||
diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig index 4891abbf16bc..550e8ed5b5c6 100644 --- a/arch/xtensa/configs/s6105_defconfig +++ b/arch/xtensa/configs/s6105_defconfig | |||
| @@ -15,7 +15,6 @@ CONFIG_GENERIC_GPIO=y | |||
| 15 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set | 15 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set |
| 16 | CONFIG_NO_IOPORT=y | 16 | CONFIG_NO_IOPORT=y |
| 17 | CONFIG_HZ=100 | 17 | CONFIG_HZ=100 |
| 18 | CONFIG_GENERIC_TIME=y | ||
| 19 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | 18 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" |
| 20 | 19 | ||
| 21 | # | 20 | # |
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c index 580f870541a3..8c2a35f26d9b 100644 --- a/drivers/clocksource/dw_apb_timer.c +++ b/drivers/clocksource/dw_apb_timer.c | |||
| @@ -348,7 +348,7 @@ static void apbt_restart_clocksource(struct clocksource *cs) | |||
| 348 | * dw_apb_clocksource_register() as the next step. | 348 | * dw_apb_clocksource_register() as the next step. |
| 349 | */ | 349 | */ |
| 350 | struct dw_apb_clocksource * | 350 | struct dw_apb_clocksource * |
| 351 | dw_apb_clocksource_init(unsigned rating, char *name, void __iomem *base, | 351 | dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base, |
| 352 | unsigned long freq) | 352 | unsigned long freq) |
| 353 | { | 353 | { |
| 354 | struct dw_apb_clocksource *dw_cs = kzalloc(sizeof(*dw_cs), GFP_KERNEL); | 354 | struct dw_apb_clocksource *dw_cs = kzalloc(sizeof(*dw_cs), GFP_KERNEL); |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 33b56e5c5c14..c97b468ee9f7 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
| @@ -120,10 +120,12 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, | |||
| 120 | 120 | ||
| 121 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) | 121 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) |
| 122 | { | 122 | { |
| 123 | u64 idle_time = get_cpu_idle_time_us(cpu, wall); | 123 | u64 idle_time = get_cpu_idle_time_us(cpu, NULL); |
| 124 | 124 | ||
| 125 | if (idle_time == -1ULL) | 125 | if (idle_time == -1ULL) |
| 126 | return get_cpu_idle_time_jiffy(cpu, wall); | 126 | return get_cpu_idle_time_jiffy(cpu, wall); |
| 127 | else | ||
| 128 | idle_time += get_cpu_iowait_time_us(cpu, wall); | ||
| 127 | 129 | ||
| 128 | return idle_time; | 130 | return idle_time; |
| 129 | } | 131 | } |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 629b3ec698e2..fa8af4ebb1d6 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
| @@ -144,10 +144,12 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, | |||
| 144 | 144 | ||
| 145 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) | 145 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) |
| 146 | { | 146 | { |
| 147 | u64 idle_time = get_cpu_idle_time_us(cpu, wall); | 147 | u64 idle_time = get_cpu_idle_time_us(cpu, NULL); |
| 148 | 148 | ||
| 149 | if (idle_time == -1ULL) | 149 | if (idle_time == -1ULL) |
| 150 | return get_cpu_idle_time_jiffy(cpu, wall); | 150 | return get_cpu_idle_time_jiffy(cpu, wall); |
| 151 | else | ||
| 152 | idle_time += get_cpu_iowait_time_us(cpu, wall); | ||
| 151 | 153 | ||
| 152 | return idle_time; | 154 | return idle_time; |
| 153 | } | 155 | } |
diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 9758b654a1bc..42b274da92c3 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/time.h> | 10 | #include <linux/time.h> |
| 11 | #include <linux/irqnr.h> | 11 | #include <linux/irqnr.h> |
| 12 | #include <asm/cputime.h> | 12 | #include <asm/cputime.h> |
| 13 | #include <linux/tick.h> | ||
| 13 | 14 | ||
| 14 | #ifndef arch_irq_stat_cpu | 15 | #ifndef arch_irq_stat_cpu |
| 15 | #define arch_irq_stat_cpu(cpu) 0 | 16 | #define arch_irq_stat_cpu(cpu) 0 |
| @@ -21,6 +22,35 @@ | |||
| 21 | #define arch_idle_time(cpu) 0 | 22 | #define arch_idle_time(cpu) 0 |
| 22 | #endif | 23 | #endif |
| 23 | 24 | ||
| 25 | static cputime64_t get_idle_time(int cpu) | ||
| 26 | { | ||
| 27 | u64 idle_time = get_cpu_idle_time_us(cpu, NULL); | ||
| 28 | cputime64_t idle; | ||
| 29 | |||
| 30 | if (idle_time == -1ULL) { | ||
| 31 | /* !NO_HZ so we can rely on cpustat.idle */ | ||
| 32 | idle = kstat_cpu(cpu).cpustat.idle; | ||
| 33 | idle = cputime64_add(idle, arch_idle_time(cpu)); | ||
| 34 | } else | ||
| 35 | idle = usecs_to_cputime(idle_time); | ||
| 36 | |||
| 37 | return idle; | ||
| 38 | } | ||
| 39 | |||
| 40 | static cputime64_t get_iowait_time(int cpu) | ||
| 41 | { | ||
| 42 | u64 iowait_time = get_cpu_iowait_time_us(cpu, NULL); | ||
| 43 | cputime64_t iowait; | ||
| 44 | |||
| 45 | if (iowait_time == -1ULL) | ||
| 46 | /* !NO_HZ so we can rely on cpustat.iowait */ | ||
| 47 | iowait = kstat_cpu(cpu).cpustat.iowait; | ||
| 48 | else | ||
| 49 | iowait = usecs_to_cputime(iowait_time); | ||
| 50 | |||
| 51 | return iowait; | ||
| 52 | } | ||
| 53 | |||
| 24 | static int show_stat(struct seq_file *p, void *v) | 54 | static int show_stat(struct seq_file *p, void *v) |
| 25 | { | 55 | { |
| 26 | int i, j; | 56 | int i, j; |
| @@ -42,9 +72,8 @@ static int show_stat(struct seq_file *p, void *v) | |||
| 42 | user = cputime64_add(user, kstat_cpu(i).cpustat.user); | 72 | user = cputime64_add(user, kstat_cpu(i).cpustat.user); |
| 43 | nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice); | 73 | nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice); |
| 44 | system = cputime64_add(system, kstat_cpu(i).cpustat.system); | 74 | system = cputime64_add(system, kstat_cpu(i).cpustat.system); |
| 45 | idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle); | 75 | idle = cputime64_add(idle, get_idle_time(i)); |
| 46 | idle = cputime64_add(idle, arch_idle_time(i)); | 76 | iowait = cputime64_add(iowait, get_iowait_time(i)); |
| 47 | iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait); | ||
| 48 | irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); | 77 | irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); |
| 49 | softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); | 78 | softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); |
| 50 | steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); | 79 | steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); |
| @@ -76,14 +105,12 @@ static int show_stat(struct seq_file *p, void *v) | |||
| 76 | (unsigned long long)cputime64_to_clock_t(guest), | 105 | (unsigned long long)cputime64_to_clock_t(guest), |
| 77 | (unsigned long long)cputime64_to_clock_t(guest_nice)); | 106 | (unsigned long long)cputime64_to_clock_t(guest_nice)); |
| 78 | for_each_online_cpu(i) { | 107 | for_each_online_cpu(i) { |
| 79 | |||
| 80 | /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ | 108 | /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ |
| 81 | user = kstat_cpu(i).cpustat.user; | 109 | user = kstat_cpu(i).cpustat.user; |
| 82 | nice = kstat_cpu(i).cpustat.nice; | 110 | nice = kstat_cpu(i).cpustat.nice; |
| 83 | system = kstat_cpu(i).cpustat.system; | 111 | system = kstat_cpu(i).cpustat.system; |
| 84 | idle = kstat_cpu(i).cpustat.idle; | 112 | idle = get_idle_time(i); |
| 85 | idle = cputime64_add(idle, arch_idle_time(i)); | 113 | iowait = get_iowait_time(i); |
| 86 | iowait = kstat_cpu(i).cpustat.iowait; | ||
| 87 | irq = kstat_cpu(i).cpustat.irq; | 114 | irq = kstat_cpu(i).cpustat.irq; |
| 88 | softirq = kstat_cpu(i).cpustat.softirq; | 115 | softirq = kstat_cpu(i).cpustat.softirq; |
| 89 | steal = kstat_cpu(i).cpustat.steal; | 116 | steal = kstat_cpu(i).cpustat.steal; |
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h index 61e03dd7939e..62ce6823c0f2 100644 --- a/include/asm-generic/cputime.h +++ b/include/asm-generic/cputime.h | |||
| @@ -38,8 +38,8 @@ typedef u64 cputime64_t; | |||
| 38 | /* | 38 | /* |
| 39 | * Convert cputime to microseconds and back. | 39 | * Convert cputime to microseconds and back. |
| 40 | */ | 40 | */ |
| 41 | #define cputime_to_usecs(__ct) jiffies_to_usecs(__ct); | 41 | #define cputime_to_usecs(__ct) jiffies_to_usecs(__ct) |
| 42 | #define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs); | 42 | #define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs) |
| 43 | 43 | ||
| 44 | /* | 44 | /* |
| 45 | * Convert cputime to seconds and back. | 45 | * Convert cputime to seconds and back. |
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h index c5d6095b46f8..975009e1cbe6 100644 --- a/include/linux/alarmtimer.h +++ b/include/linux/alarmtimer.h | |||
| @@ -13,6 +13,16 @@ enum alarmtimer_type { | |||
| 13 | ALARM_NUMTYPE, | 13 | ALARM_NUMTYPE, |
| 14 | }; | 14 | }; |
| 15 | 15 | ||
| 16 | enum alarmtimer_restart { | ||
| 17 | ALARMTIMER_NORESTART, | ||
| 18 | ALARMTIMER_RESTART, | ||
| 19 | }; | ||
| 20 | |||
| 21 | |||
| 22 | #define ALARMTIMER_STATE_INACTIVE 0x00 | ||
| 23 | #define ALARMTIMER_STATE_ENQUEUED 0x01 | ||
| 24 | #define ALARMTIMER_STATE_CALLBACK 0x02 | ||
| 25 | |||
| 16 | /** | 26 | /** |
| 17 | * struct alarm - Alarm timer structure | 27 | * struct alarm - Alarm timer structure |
| 18 | * @node: timerqueue node for adding to the event list this value | 28 | * @node: timerqueue node for adding to the event list this value |
| @@ -25,16 +35,45 @@ enum alarmtimer_type { | |||
| 25 | */ | 35 | */ |
| 26 | struct alarm { | 36 | struct alarm { |
| 27 | struct timerqueue_node node; | 37 | struct timerqueue_node node; |
| 28 | ktime_t period; | 38 | enum alarmtimer_restart (*function)(struct alarm *, ktime_t now); |
| 29 | void (*function)(struct alarm *); | ||
| 30 | enum alarmtimer_type type; | 39 | enum alarmtimer_type type; |
| 31 | bool enabled; | 40 | int state; |
| 32 | void *data; | 41 | void *data; |
| 33 | }; | 42 | }; |
| 34 | 43 | ||
| 35 | void alarm_init(struct alarm *alarm, enum alarmtimer_type type, | 44 | void alarm_init(struct alarm *alarm, enum alarmtimer_type type, |
| 36 | void (*function)(struct alarm *)); | 45 | enum alarmtimer_restart (*function)(struct alarm *, ktime_t)); |
| 37 | void alarm_start(struct alarm *alarm, ktime_t start, ktime_t period); | 46 | void alarm_start(struct alarm *alarm, ktime_t start); |
| 38 | void alarm_cancel(struct alarm *alarm); | 47 | int alarm_try_to_cancel(struct alarm *alarm); |
| 48 | int alarm_cancel(struct alarm *alarm); | ||
| 49 | |||
| 50 | u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval); | ||
| 51 | |||
| 52 | /* | ||
| 53 | * A alarmtimer is active, when it is enqueued into timerqueue or the | ||
| 54 | * callback function is running. | ||
| 55 | */ | ||
| 56 | static inline int alarmtimer_active(const struct alarm *timer) | ||
| 57 | { | ||
| 58 | return timer->state != ALARMTIMER_STATE_INACTIVE; | ||
| 59 | } | ||
| 60 | |||
| 61 | /* | ||
| 62 | * Helper function to check, whether the timer is on one of the queues | ||
| 63 | */ | ||
| 64 | static inline int alarmtimer_is_queued(struct alarm *timer) | ||
| 65 | { | ||
| 66 | return timer->state & ALARMTIMER_STATE_ENQUEUED; | ||
| 67 | } | ||
| 68 | |||
| 69 | /* | ||
| 70 | * Helper function to check, whether the timer is running the callback | ||
| 71 | * function | ||
| 72 | */ | ||
| 73 | static inline int alarmtimer_callback_running(struct alarm *timer) | ||
| 74 | { | ||
| 75 | return timer->state & ALARMTIMER_STATE_CALLBACK; | ||
| 76 | } | ||
| 77 | |||
| 39 | 78 | ||
| 40 | #endif | 79 | #endif |
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index d6733e27af34..81e803e90aa4 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h | |||
| @@ -45,20 +45,22 @@ enum clock_event_nofitiers { | |||
| 45 | */ | 45 | */ |
| 46 | #define CLOCK_EVT_FEAT_PERIODIC 0x000001 | 46 | #define CLOCK_EVT_FEAT_PERIODIC 0x000001 |
| 47 | #define CLOCK_EVT_FEAT_ONESHOT 0x000002 | 47 | #define CLOCK_EVT_FEAT_ONESHOT 0x000002 |
| 48 | #define CLOCK_EVT_FEAT_KTIME 0x000004 | ||
| 48 | /* | 49 | /* |
| 49 | * x86(64) specific misfeatures: | 50 | * x86(64) specific misfeatures: |
| 50 | * | 51 | * |
| 51 | * - Clockevent source stops in C3 State and needs broadcast support. | 52 | * - Clockevent source stops in C3 State and needs broadcast support. |
| 52 | * - Local APIC timer is used as a dummy device. | 53 | * - Local APIC timer is used as a dummy device. |
| 53 | */ | 54 | */ |
| 54 | #define CLOCK_EVT_FEAT_C3STOP 0x000004 | 55 | #define CLOCK_EVT_FEAT_C3STOP 0x000008 |
| 55 | #define CLOCK_EVT_FEAT_DUMMY 0x000008 | 56 | #define CLOCK_EVT_FEAT_DUMMY 0x000010 |
| 56 | 57 | ||
| 57 | /** | 58 | /** |
| 58 | * struct clock_event_device - clock event device descriptor | 59 | * struct clock_event_device - clock event device descriptor |
| 59 | * @event_handler: Assigned by the framework to be called by the low | 60 | * @event_handler: Assigned by the framework to be called by the low |
| 60 | * level handler of the event source | 61 | * level handler of the event source |
| 61 | * @set_next_event: set next event function | 62 | * @set_next_event: set next event function using a clocksource delta |
| 63 | * @set_next_ktime: set next event function using a direct ktime value | ||
| 62 | * @next_event: local storage for the next event in oneshot mode | 64 | * @next_event: local storage for the next event in oneshot mode |
| 63 | * @max_delta_ns: maximum delta value in ns | 65 | * @max_delta_ns: maximum delta value in ns |
| 64 | * @min_delta_ns: minimum delta value in ns | 66 | * @min_delta_ns: minimum delta value in ns |
| @@ -81,6 +83,8 @@ struct clock_event_device { | |||
| 81 | void (*event_handler)(struct clock_event_device *); | 83 | void (*event_handler)(struct clock_event_device *); |
| 82 | int (*set_next_event)(unsigned long evt, | 84 | int (*set_next_event)(unsigned long evt, |
| 83 | struct clock_event_device *); | 85 | struct clock_event_device *); |
| 86 | int (*set_next_ktime)(ktime_t expires, | ||
| 87 | struct clock_event_device *); | ||
| 84 | ktime_t next_event; | 88 | ktime_t next_event; |
| 85 | u64 max_delta_ns; | 89 | u64 max_delta_ns; |
| 86 | u64 min_delta_ns; | 90 | u64 min_delta_ns; |
| @@ -140,7 +144,7 @@ extern void clockevents_set_mode(struct clock_event_device *dev, | |||
| 140 | enum clock_event_mode mode); | 144 | enum clock_event_mode mode); |
| 141 | extern int clockevents_register_notifier(struct notifier_block *nb); | 145 | extern int clockevents_register_notifier(struct notifier_block *nb); |
| 142 | extern int clockevents_program_event(struct clock_event_device *dev, | 146 | extern int clockevents_program_event(struct clock_event_device *dev, |
| 143 | ktime_t expires, ktime_t now); | 147 | ktime_t expires, bool force); |
| 144 | 148 | ||
| 145 | extern void clockevents_handle_noop(struct clock_event_device *dev); | 149 | extern void clockevents_handle_noop(struct clock_event_device *dev); |
| 146 | 150 | ||
diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h index 49638ea3b776..07261d52a6df 100644 --- a/include/linux/dw_apb_timer.h +++ b/include/linux/dw_apb_timer.h | |||
| @@ -46,7 +46,7 @@ struct dw_apb_clock_event_device * | |||
| 46 | dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, | 46 | dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, |
| 47 | void __iomem *base, int irq, unsigned long freq); | 47 | void __iomem *base, int irq, unsigned long freq); |
| 48 | struct dw_apb_clocksource * | 48 | struct dw_apb_clocksource * |
| 49 | dw_apb_clocksource_init(unsigned rating, char *name, void __iomem *base, | 49 | dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base, |
| 50 | unsigned long freq); | 50 | unsigned long freq); |
| 51 | void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs); | 51 | void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs); |
| 52 | void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs); | 52 | void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs); |
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index f97672a36fa8..265e2c3cbd1c 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h | |||
| @@ -303,7 +303,7 @@ extern void jiffies_to_timespec(const unsigned long jiffies, | |||
| 303 | extern unsigned long timeval_to_jiffies(const struct timeval *value); | 303 | extern unsigned long timeval_to_jiffies(const struct timeval *value); |
| 304 | extern void jiffies_to_timeval(const unsigned long jiffies, | 304 | extern void jiffies_to_timeval(const unsigned long jiffies, |
| 305 | struct timeval *value); | 305 | struct timeval *value); |
| 306 | extern clock_t jiffies_to_clock_t(long x); | 306 | extern clock_t jiffies_to_clock_t(unsigned long x); |
| 307 | extern unsigned long clock_t_to_jiffies(unsigned long x); | 307 | extern unsigned long clock_t_to_jiffies(unsigned long x); |
| 308 | extern u64 jiffies_64_to_clock_t(u64 x); | 308 | extern u64 jiffies_64_to_clock_t(u64 x); |
| 309 | extern u64 nsec_to_clock_t(u64 x); | 309 | extern u64 nsec_to_clock_t(u64 x); |
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 959c14132f46..042058fdb0af 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h | |||
| @@ -81,7 +81,10 @@ struct k_itimer { | |||
| 81 | unsigned long incr; | 81 | unsigned long incr; |
| 82 | unsigned long expires; | 82 | unsigned long expires; |
| 83 | } mmtimer; | 83 | } mmtimer; |
| 84 | struct alarm alarmtimer; | 84 | struct { |
| 85 | struct alarm alarmtimer; | ||
| 86 | ktime_t interval; | ||
| 87 | } alarm; | ||
| 85 | struct rcu_head rcu; | 88 | struct rcu_head rcu; |
| 86 | } it; | 89 | } it; |
| 87 | }; | 90 | }; |
diff --git a/kernel/time.c b/kernel/time.c index 8e8dc6d705c9..d77606214529 100644 --- a/kernel/time.c +++ b/kernel/time.c | |||
| @@ -575,7 +575,7 @@ EXPORT_SYMBOL(jiffies_to_timeval); | |||
| 575 | /* | 575 | /* |
| 576 | * Convert jiffies/jiffies_64 to clock_t and back. | 576 | * Convert jiffies/jiffies_64 to clock_t and back. |
| 577 | */ | 577 | */ |
| 578 | clock_t jiffies_to_clock_t(long x) | 578 | clock_t jiffies_to_clock_t(unsigned long x) |
| 579 | { | 579 | { |
| 580 | #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 | 580 | #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 |
| 581 | # if HZ < USER_HZ | 581 | # if HZ < USER_HZ |
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index f06a8a365648..b26c2228fe92 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig | |||
| @@ -27,3 +27,5 @@ config GENERIC_CLOCKEVENTS_BUILD | |||
| 27 | default y | 27 | default y |
| 28 | depends on GENERIC_CLOCKEVENTS || GENERIC_CLOCKEVENTS_MIGR | 28 | depends on GENERIC_CLOCKEVENTS || GENERIC_CLOCKEVENTS_MIGR |
| 29 | 29 | ||
| 30 | config GENERIC_CLOCKEVENTS_MIN_ADJUST | ||
| 31 | bool | ||
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index ea5e1a928d5b..c436e790b21b 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c | |||
| @@ -53,27 +53,6 @@ static struct rtc_device *rtcdev; | |||
| 53 | static DEFINE_SPINLOCK(rtcdev_lock); | 53 | static DEFINE_SPINLOCK(rtcdev_lock); |
| 54 | 54 | ||
| 55 | /** | 55 | /** |
| 56 | * has_wakealarm - check rtc device has wakealarm ability | ||
| 57 | * @dev: current device | ||
| 58 | * @name_ptr: name to be returned | ||
| 59 | * | ||
| 60 | * This helper function checks to see if the rtc device can wake | ||
| 61 | * from suspend. | ||
| 62 | */ | ||
| 63 | static int has_wakealarm(struct device *dev, void *name_ptr) | ||
| 64 | { | ||
| 65 | struct rtc_device *candidate = to_rtc_device(dev); | ||
| 66 | |||
| 67 | if (!candidate->ops->set_alarm) | ||
| 68 | return 0; | ||
| 69 | if (!device_may_wakeup(candidate->dev.parent)) | ||
| 70 | return 0; | ||
| 71 | |||
| 72 | *(const char **)name_ptr = dev_name(dev); | ||
| 73 | return 1; | ||
| 74 | } | ||
| 75 | |||
| 76 | /** | ||
| 77 | * alarmtimer_get_rtcdev - Return selected rtcdevice | 56 | * alarmtimer_get_rtcdev - Return selected rtcdevice |
| 78 | * | 57 | * |
| 79 | * This function returns the rtc device to use for wakealarms. | 58 | * This function returns the rtc device to use for wakealarms. |
| @@ -82,37 +61,64 @@ static int has_wakealarm(struct device *dev, void *name_ptr) | |||
| 82 | */ | 61 | */ |
| 83 | static struct rtc_device *alarmtimer_get_rtcdev(void) | 62 | static struct rtc_device *alarmtimer_get_rtcdev(void) |
| 84 | { | 63 | { |
| 85 | struct device *dev; | ||
| 86 | char *str; | ||
| 87 | unsigned long flags; | 64 | unsigned long flags; |
| 88 | struct rtc_device *ret; | 65 | struct rtc_device *ret; |
| 89 | 66 | ||
| 90 | spin_lock_irqsave(&rtcdev_lock, flags); | 67 | spin_lock_irqsave(&rtcdev_lock, flags); |
| 91 | if (!rtcdev) { | ||
| 92 | /* Find an rtc device and init the rtc_timer */ | ||
| 93 | dev = class_find_device(rtc_class, NULL, &str, has_wakealarm); | ||
| 94 | /* If we have a device then str is valid. See has_wakealarm() */ | ||
| 95 | if (dev) { | ||
| 96 | rtcdev = rtc_class_open(str); | ||
| 97 | /* | ||
| 98 | * Drop the reference we got in class_find_device, | ||
| 99 | * rtc_open takes its own. | ||
| 100 | */ | ||
| 101 | put_device(dev); | ||
| 102 | rtc_timer_init(&rtctimer, NULL, NULL); | ||
| 103 | } | ||
| 104 | } | ||
| 105 | ret = rtcdev; | 68 | ret = rtcdev; |
| 106 | spin_unlock_irqrestore(&rtcdev_lock, flags); | 69 | spin_unlock_irqrestore(&rtcdev_lock, flags); |
| 107 | 70 | ||
| 108 | return ret; | 71 | return ret; |
| 109 | } | 72 | } |
| 73 | |||
| 74 | |||
| 75 | static int alarmtimer_rtc_add_device(struct device *dev, | ||
| 76 | struct class_interface *class_intf) | ||
| 77 | { | ||
| 78 | unsigned long flags; | ||
| 79 | struct rtc_device *rtc = to_rtc_device(dev); | ||
| 80 | |||
| 81 | if (rtcdev) | ||
| 82 | return -EBUSY; | ||
| 83 | |||
| 84 | if (!rtc->ops->set_alarm) | ||
| 85 | return -1; | ||
| 86 | if (!device_may_wakeup(rtc->dev.parent)) | ||
| 87 | return -1; | ||
| 88 | |||
| 89 | spin_lock_irqsave(&rtcdev_lock, flags); | ||
| 90 | if (!rtcdev) { | ||
| 91 | rtcdev = rtc; | ||
| 92 | /* hold a reference so it doesn't go away */ | ||
| 93 | get_device(dev); | ||
| 94 | } | ||
| 95 | spin_unlock_irqrestore(&rtcdev_lock, flags); | ||
| 96 | return 0; | ||
| 97 | } | ||
| 98 | |||
| 99 | static struct class_interface alarmtimer_rtc_interface = { | ||
| 100 | .add_dev = &alarmtimer_rtc_add_device, | ||
| 101 | }; | ||
| 102 | |||
| 103 | static int alarmtimer_rtc_interface_setup(void) | ||
| 104 | { | ||
| 105 | alarmtimer_rtc_interface.class = rtc_class; | ||
| 106 | return class_interface_register(&alarmtimer_rtc_interface); | ||
| 107 | } | ||
| 108 | static void alarmtimer_rtc_interface_remove(void) | ||
| 109 | { | ||
| 110 | class_interface_unregister(&alarmtimer_rtc_interface); | ||
| 111 | } | ||
| 110 | #else | 112 | #else |
| 111 | #define alarmtimer_get_rtcdev() (0) | 113 | static inline struct rtc_device *alarmtimer_get_rtcdev(void) |
| 112 | #define rtcdev (0) | 114 | { |
| 115 | return NULL; | ||
| 116 | } | ||
| 117 | #define rtcdev (NULL) | ||
| 118 | static inline int alarmtimer_rtc_interface_setup(void) { return 0; } | ||
| 119 | static inline void alarmtimer_rtc_interface_remove(void) { } | ||
| 113 | #endif | 120 | #endif |
| 114 | 121 | ||
| 115 | |||
| 116 | /** | 122 | /** |
| 117 | * alarmtimer_enqueue - Adds an alarm timer to an alarm_base timerqueue | 123 | * alarmtimer_enqueue - Adds an alarm timer to an alarm_base timerqueue |
| 118 | * @base: pointer to the base where the timer is being run | 124 | * @base: pointer to the base where the timer is being run |
| @@ -126,6 +132,8 @@ static struct rtc_device *alarmtimer_get_rtcdev(void) | |||
| 126 | static void alarmtimer_enqueue(struct alarm_base *base, struct alarm *alarm) | 132 | static void alarmtimer_enqueue(struct alarm_base *base, struct alarm *alarm) |
| 127 | { | 133 | { |
| 128 | timerqueue_add(&base->timerqueue, &alarm->node); | 134 | timerqueue_add(&base->timerqueue, &alarm->node); |
| 135 | alarm->state |= ALARMTIMER_STATE_ENQUEUED; | ||
| 136 | |||
| 129 | if (&alarm->node == timerqueue_getnext(&base->timerqueue)) { | 137 | if (&alarm->node == timerqueue_getnext(&base->timerqueue)) { |
| 130 | hrtimer_try_to_cancel(&base->timer); | 138 | hrtimer_try_to_cancel(&base->timer); |
| 131 | hrtimer_start(&base->timer, alarm->node.expires, | 139 | hrtimer_start(&base->timer, alarm->node.expires, |
| @@ -147,7 +155,12 @@ static void alarmtimer_remove(struct alarm_base *base, struct alarm *alarm) | |||
| 147 | { | 155 | { |
| 148 | struct timerqueue_node *next = timerqueue_getnext(&base->timerqueue); | 156 | struct timerqueue_node *next = timerqueue_getnext(&base->timerqueue); |
| 149 | 157 | ||
| 158 | if (!(alarm->state & ALARMTIMER_STATE_ENQUEUED)) | ||
| 159 | return; | ||
| 160 | |||
| 150 | timerqueue_del(&base->timerqueue, &alarm->node); | 161 | timerqueue_del(&base->timerqueue, &alarm->node); |
| 162 | alarm->state &= ~ALARMTIMER_STATE_ENQUEUED; | ||
| 163 | |||
| 151 | if (next == &alarm->node) { | 164 | if (next == &alarm->node) { |
| 152 | hrtimer_try_to_cancel(&base->timer); | 165 | hrtimer_try_to_cancel(&base->timer); |
| 153 | next = timerqueue_getnext(&base->timerqueue); | 166 | next = timerqueue_getnext(&base->timerqueue); |
| @@ -174,6 +187,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer) | |||
| 174 | unsigned long flags; | 187 | unsigned long flags; |
| 175 | ktime_t now; | 188 | ktime_t now; |
| 176 | int ret = HRTIMER_NORESTART; | 189 | int ret = HRTIMER_NORESTART; |
| 190 | int restart = ALARMTIMER_NORESTART; | ||
| 177 | 191 | ||
| 178 | spin_lock_irqsave(&base->lock, flags); | 192 | spin_lock_irqsave(&base->lock, flags); |
| 179 | now = base->gettime(); | 193 | now = base->gettime(); |
| @@ -187,17 +201,19 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer) | |||
| 187 | alarm = container_of(next, struct alarm, node); | 201 | alarm = container_of(next, struct alarm, node); |
| 188 | 202 | ||
| 189 | timerqueue_del(&base->timerqueue, &alarm->node); | 203 | timerqueue_del(&base->timerqueue, &alarm->node); |
| 190 | alarm->enabled = 0; | 204 | alarm->state &= ~ALARMTIMER_STATE_ENQUEUED; |
| 191 | /* Re-add periodic timers */ | 205 | |
| 192 | if (alarm->period.tv64) { | 206 | alarm->state |= ALARMTIMER_STATE_CALLBACK; |
| 193 | alarm->node.expires = ktime_add(expired, alarm->period); | ||
| 194 | timerqueue_add(&base->timerqueue, &alarm->node); | ||
| 195 | alarm->enabled = 1; | ||
| 196 | } | ||
| 197 | spin_unlock_irqrestore(&base->lock, flags); | 207 | spin_unlock_irqrestore(&base->lock, flags); |
| 198 | if (alarm->function) | 208 | if (alarm->function) |
| 199 | alarm->function(alarm); | 209 | restart = alarm->function(alarm, now); |
| 200 | spin_lock_irqsave(&base->lock, flags); | 210 | spin_lock_irqsave(&base->lock, flags); |
| 211 | alarm->state &= ~ALARMTIMER_STATE_CALLBACK; | ||
| 212 | |||
| 213 | if (restart != ALARMTIMER_NORESTART) { | ||
| 214 | timerqueue_add(&base->timerqueue, &alarm->node); | ||
| 215 | alarm->state |= ALARMTIMER_STATE_ENQUEUED; | ||
| 216 | } | ||
| 201 | } | 217 | } |
| 202 | 218 | ||
| 203 | if (next) { | 219 | if (next) { |
| @@ -234,7 +250,7 @@ static int alarmtimer_suspend(struct device *dev) | |||
| 234 | freezer_delta = ktime_set(0, 0); | 250 | freezer_delta = ktime_set(0, 0); |
| 235 | spin_unlock_irqrestore(&freezer_delta_lock, flags); | 251 | spin_unlock_irqrestore(&freezer_delta_lock, flags); |
| 236 | 252 | ||
| 237 | rtc = rtcdev; | 253 | rtc = alarmtimer_get_rtcdev(); |
| 238 | /* If we have no rtcdev, just return */ | 254 | /* If we have no rtcdev, just return */ |
| 239 | if (!rtc) | 255 | if (!rtc) |
| 240 | return 0; | 256 | return 0; |
| @@ -299,53 +315,111 @@ static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type) | |||
| 299 | * @function: callback that is run when the alarm fires | 315 | * @function: callback that is run when the alarm fires |
| 300 | */ | 316 | */ |
| 301 | void alarm_init(struct alarm *alarm, enum alarmtimer_type type, | 317 | void alarm_init(struct alarm *alarm, enum alarmtimer_type type, |
| 302 | void (*function)(struct alarm *)) | 318 | enum alarmtimer_restart (*function)(struct alarm *, ktime_t)) |
| 303 | { | 319 | { |
| 304 | timerqueue_init(&alarm->node); | 320 | timerqueue_init(&alarm->node); |
| 305 | alarm->period = ktime_set(0, 0); | ||
| 306 | alarm->function = function; | 321 | alarm->function = function; |
| 307 | alarm->type = type; | 322 | alarm->type = type; |
| 308 | alarm->enabled = 0; | 323 | alarm->state = ALARMTIMER_STATE_INACTIVE; |
| 309 | } | 324 | } |
| 310 | 325 | ||
| 311 | /** | 326 | /** |
| 312 | * alarm_start - Sets an alarm to fire | 327 | * alarm_start - Sets an alarm to fire |
| 313 | * @alarm: ptr to alarm to set | 328 | * @alarm: ptr to alarm to set |
| 314 | * @start: time to run the alarm | 329 | * @start: time to run the alarm |
| 315 | * @period: period at which the alarm will recur | ||
| 316 | */ | 330 | */ |
| 317 | void alarm_start(struct alarm *alarm, ktime_t start, ktime_t period) | 331 | void alarm_start(struct alarm *alarm, ktime_t start) |
| 318 | { | 332 | { |
| 319 | struct alarm_base *base = &alarm_bases[alarm->type]; | 333 | struct alarm_base *base = &alarm_bases[alarm->type]; |
| 320 | unsigned long flags; | 334 | unsigned long flags; |
| 321 | 335 | ||
| 322 | spin_lock_irqsave(&base->lock, flags); | 336 | spin_lock_irqsave(&base->lock, flags); |
| 323 | if (alarm->enabled) | 337 | if (alarmtimer_active(alarm)) |
| 324 | alarmtimer_remove(base, alarm); | 338 | alarmtimer_remove(base, alarm); |
| 325 | alarm->node.expires = start; | 339 | alarm->node.expires = start; |
| 326 | alarm->period = period; | ||
| 327 | alarmtimer_enqueue(base, alarm); | 340 | alarmtimer_enqueue(base, alarm); |
| 328 | alarm->enabled = 1; | ||
| 329 | spin_unlock_irqrestore(&base->lock, flags); | 341 | spin_unlock_irqrestore(&base->lock, flags); |
| 330 | } | 342 | } |
| 331 | 343 | ||
| 332 | /** | 344 | /** |
| 333 | * alarm_cancel - Tries to cancel an alarm timer | 345 | * alarm_try_to_cancel - Tries to cancel an alarm timer |
| 334 | * @alarm: ptr to alarm to be canceled | 346 | * @alarm: ptr to alarm to be canceled |
| 347 | * | ||
| 348 | * Returns 1 if the timer was canceled, 0 if it was not running, | ||
| 349 | * and -1 if the callback was running | ||
| 335 | */ | 350 | */ |
| 336 | void alarm_cancel(struct alarm *alarm) | 351 | int alarm_try_to_cancel(struct alarm *alarm) |
| 337 | { | 352 | { |
| 338 | struct alarm_base *base = &alarm_bases[alarm->type]; | 353 | struct alarm_base *base = &alarm_bases[alarm->type]; |
| 339 | unsigned long flags; | 354 | unsigned long flags; |
| 340 | 355 | int ret = -1; | |
| 341 | spin_lock_irqsave(&base->lock, flags); | 356 | spin_lock_irqsave(&base->lock, flags); |
| 342 | if (alarm->enabled) | 357 | |
| 358 | if (alarmtimer_callback_running(alarm)) | ||
| 359 | goto out; | ||
| 360 | |||
| 361 | if (alarmtimer_is_queued(alarm)) { | ||
| 343 | alarmtimer_remove(base, alarm); | 362 | alarmtimer_remove(base, alarm); |
| 344 | alarm->enabled = 0; | 363 | ret = 1; |
| 364 | } else | ||
| 365 | ret = 0; | ||
| 366 | out: | ||
| 345 | spin_unlock_irqrestore(&base->lock, flags); | 367 | spin_unlock_irqrestore(&base->lock, flags); |
| 368 | return ret; | ||
| 369 | } | ||
| 370 | |||
| 371 | |||
| 372 | /** | ||
| 373 | * alarm_cancel - Spins trying to cancel an alarm timer until it is done | ||
| 374 | * @alarm: ptr to alarm to be canceled | ||
| 375 | * | ||
| 376 | * Returns 1 if the timer was canceled, 0 if it was not active. | ||
| 377 | */ | ||
| 378 | int alarm_cancel(struct alarm *alarm) | ||
| 379 | { | ||
| 380 | for (;;) { | ||
| 381 | int ret = alarm_try_to_cancel(alarm); | ||
| 382 | if (ret >= 0) | ||
| 383 | return ret; | ||
| 384 | cpu_relax(); | ||
| 385 | } | ||
| 386 | } | ||
| 387 | |||
| 388 | |||
| 389 | u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval) | ||
| 390 | { | ||
| 391 | u64 overrun = 1; | ||
| 392 | ktime_t delta; | ||
| 393 | |||
| 394 | delta = ktime_sub(now, alarm->node.expires); | ||
| 395 | |||
| 396 | if (delta.tv64 < 0) | ||
| 397 | return 0; | ||
| 398 | |||
| 399 | if (unlikely(delta.tv64 >= interval.tv64)) { | ||
| 400 | s64 incr = ktime_to_ns(interval); | ||
| 401 | |||
| 402 | overrun = ktime_divns(delta, incr); | ||
| 403 | |||
| 404 | alarm->node.expires = ktime_add_ns(alarm->node.expires, | ||
| 405 | incr*overrun); | ||
| 406 | |||
| 407 | if (alarm->node.expires.tv64 > now.tv64) | ||
| 408 | return overrun; | ||
| 409 | /* | ||
| 410 | * This (and the ktime_add() below) is the | ||
| 411 | * correction for exact: | ||
| 412 | */ | ||
| 413 | overrun++; | ||
| 414 | } | ||
| 415 | |||
| 416 | alarm->node.expires = ktime_add(alarm->node.expires, interval); | ||
| 417 | return overrun; | ||
| 346 | } | 418 | } |
| 347 | 419 | ||
| 348 | 420 | ||
| 421 | |||
| 422 | |||
| 349 | /** | 423 | /** |
| 350 | * clock2alarm - helper that converts from clockid to alarmtypes | 424 | * clock2alarm - helper that converts from clockid to alarmtypes |
| 351 | * @clockid: clockid. | 425 | * @clockid: clockid. |
| @@ -365,12 +439,21 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid) | |||
| 365 | * | 439 | * |
| 366 | * Posix timer callback for expired alarm timers. | 440 | * Posix timer callback for expired alarm timers. |
| 367 | */ | 441 | */ |
| 368 | static void alarm_handle_timer(struct alarm *alarm) | 442 | static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm, |
| 443 | ktime_t now) | ||
| 369 | { | 444 | { |
| 370 | struct k_itimer *ptr = container_of(alarm, struct k_itimer, | 445 | struct k_itimer *ptr = container_of(alarm, struct k_itimer, |
| 371 | it.alarmtimer); | 446 | it.alarm.alarmtimer); |
| 372 | if (posix_timer_event(ptr, 0) != 0) | 447 | if (posix_timer_event(ptr, 0) != 0) |
| 373 | ptr->it_overrun++; | 448 | ptr->it_overrun++; |
| 449 | |||
| 450 | /* Re-add periodic timers */ | ||
| 451 | if (ptr->it.alarm.interval.tv64) { | ||
| 452 | ptr->it_overrun += alarm_forward(alarm, now, | ||
| 453 | ptr->it.alarm.interval); | ||
| 454 | return ALARMTIMER_RESTART; | ||
| 455 | } | ||
| 456 | return ALARMTIMER_NORESTART; | ||
| 374 | } | 457 | } |
| 375 | 458 | ||
| 376 | /** | 459 | /** |
| @@ -427,7 +510,7 @@ static int alarm_timer_create(struct k_itimer *new_timer) | |||
| 427 | 510 | ||
| 428 | type = clock2alarm(new_timer->it_clock); | 511 | type = clock2alarm(new_timer->it_clock); |
| 429 | base = &alarm_bases[type]; | 512 | base = &alarm_bases[type]; |
| 430 | alarm_init(&new_timer->it.alarmtimer, type, alarm_handle_timer); | 513 | alarm_init(&new_timer->it.alarm.alarmtimer, type, alarm_handle_timer); |
| 431 | return 0; | 514 | return 0; |
| 432 | } | 515 | } |
| 433 | 516 | ||
| @@ -444,9 +527,9 @@ static void alarm_timer_get(struct k_itimer *timr, | |||
| 444 | memset(cur_setting, 0, sizeof(struct itimerspec)); | 527 | memset(cur_setting, 0, sizeof(struct itimerspec)); |
| 445 | 528 | ||
| 446 | cur_setting->it_interval = | 529 | cur_setting->it_interval = |
| 447 | ktime_to_timespec(timr->it.alarmtimer.period); | 530 | ktime_to_timespec(timr->it.alarm.interval); |
| 448 | cur_setting->it_value = | 531 | cur_setting->it_value = |
| 449 | ktime_to_timespec(timr->it.alarmtimer.node.expires); | 532 | ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires); |
| 450 | return; | 533 | return; |
| 451 | } | 534 | } |
| 452 | 535 | ||
| @@ -461,7 +544,9 @@ static int alarm_timer_del(struct k_itimer *timr) | |||
| 461 | if (!rtcdev) | 544 | if (!rtcdev) |
| 462 | return -ENOTSUPP; | 545 | return -ENOTSUPP; |
| 463 | 546 | ||
| 464 | alarm_cancel(&timr->it.alarmtimer); | 547 | if (alarm_try_to_cancel(&timr->it.alarm.alarmtimer) < 0) |
| 548 | return TIMER_RETRY; | ||
| 549 | |||
| 465 | return 0; | 550 | return 0; |
| 466 | } | 551 | } |
| 467 | 552 | ||
| @@ -481,25 +566,17 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, | |||
| 481 | if (!rtcdev) | 566 | if (!rtcdev) |
| 482 | return -ENOTSUPP; | 567 | return -ENOTSUPP; |
| 483 | 568 | ||
| 484 | /* | ||
| 485 | * XXX HACK! Currently we can DOS a system if the interval | ||
| 486 | * period on alarmtimers is too small. Cap the interval here | ||
| 487 | * to 100us and solve this properly in a future patch! -jstultz | ||
| 488 | */ | ||
| 489 | if ((new_setting->it_interval.tv_sec == 0) && | ||
| 490 | (new_setting->it_interval.tv_nsec < 100000)) | ||
| 491 | new_setting->it_interval.tv_nsec = 100000; | ||
| 492 | |||
| 493 | if (old_setting) | 569 | if (old_setting) |
| 494 | alarm_timer_get(timr, old_setting); | 570 | alarm_timer_get(timr, old_setting); |
| 495 | 571 | ||
| 496 | /* If the timer was already set, cancel it */ | 572 | /* If the timer was already set, cancel it */ |
| 497 | alarm_cancel(&timr->it.alarmtimer); | 573 | if (alarm_try_to_cancel(&timr->it.alarm.alarmtimer) < 0) |
| 574 | return TIMER_RETRY; | ||
| 498 | 575 | ||
| 499 | /* start the timer */ | 576 | /* start the timer */ |
| 500 | alarm_start(&timr->it.alarmtimer, | 577 | timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval); |
| 501 | timespec_to_ktime(new_setting->it_value), | 578 | alarm_start(&timr->it.alarm.alarmtimer, |
| 502 | timespec_to_ktime(new_setting->it_interval)); | 579 | timespec_to_ktime(new_setting->it_value)); |
| 503 | return 0; | 580 | return 0; |
| 504 | } | 581 | } |
| 505 | 582 | ||
| @@ -509,13 +586,15 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, | |||
| 509 | * | 586 | * |
| 510 | * Wakes up the task that set the alarmtimer | 587 | * Wakes up the task that set the alarmtimer |
| 511 | */ | 588 | */ |
| 512 | static void alarmtimer_nsleep_wakeup(struct alarm *alarm) | 589 | static enum alarmtimer_restart alarmtimer_nsleep_wakeup(struct alarm *alarm, |
| 590 | ktime_t now) | ||
| 513 | { | 591 | { |
| 514 | struct task_struct *task = (struct task_struct *)alarm->data; | 592 | struct task_struct *task = (struct task_struct *)alarm->data; |
| 515 | 593 | ||
| 516 | alarm->data = NULL; | 594 | alarm->data = NULL; |
| 517 | if (task) | 595 | if (task) |
| 518 | wake_up_process(task); | 596 | wake_up_process(task); |
| 597 | return ALARMTIMER_NORESTART; | ||
| 519 | } | 598 | } |
| 520 | 599 | ||
| 521 | /** | 600 | /** |
| @@ -530,7 +609,7 @@ static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp) | |||
| 530 | alarm->data = (void *)current; | 609 | alarm->data = (void *)current; |
| 531 | do { | 610 | do { |
| 532 | set_current_state(TASK_INTERRUPTIBLE); | 611 | set_current_state(TASK_INTERRUPTIBLE); |
| 533 | alarm_start(alarm, absexp, ktime_set(0, 0)); | 612 | alarm_start(alarm, absexp); |
| 534 | if (likely(alarm->data)) | 613 | if (likely(alarm->data)) |
| 535 | schedule(); | 614 | schedule(); |
| 536 | 615 | ||
| @@ -691,6 +770,7 @@ static struct platform_driver alarmtimer_driver = { | |||
| 691 | */ | 770 | */ |
| 692 | static int __init alarmtimer_init(void) | 771 | static int __init alarmtimer_init(void) |
| 693 | { | 772 | { |
| 773 | struct platform_device *pdev; | ||
| 694 | int error = 0; | 774 | int error = 0; |
| 695 | int i; | 775 | int i; |
| 696 | struct k_clock alarm_clock = { | 776 | struct k_clock alarm_clock = { |
| @@ -719,10 +799,26 @@ static int __init alarmtimer_init(void) | |||
| 719 | HRTIMER_MODE_ABS); | 799 | HRTIMER_MODE_ABS); |
| 720 | alarm_bases[i].timer.function = alarmtimer_fired; | 800 | alarm_bases[i].timer.function = alarmtimer_fired; |
| 721 | } | 801 | } |
| 802 | |||
| 803 | error = alarmtimer_rtc_interface_setup(); | ||
| 804 | if (error) | ||
| 805 | return error; | ||
| 806 | |||
| 722 | error = platform_driver_register(&alarmtimer_driver); | 807 | error = platform_driver_register(&alarmtimer_driver); |
| 723 | platform_device_register_simple("alarmtimer", -1, NULL, 0); | 808 | if (error) |
| 809 | goto out_if; | ||
| 724 | 810 | ||
| 811 | pdev = platform_device_register_simple("alarmtimer", -1, NULL, 0); | ||
| 812 | if (IS_ERR(pdev)) { | ||
| 813 | error = PTR_ERR(pdev); | ||
| 814 | goto out_drv; | ||
| 815 | } | ||
| 816 | return 0; | ||
| 817 | |||
| 818 | out_drv: | ||
| 819 | platform_driver_unregister(&alarmtimer_driver); | ||
| 820 | out_if: | ||
| 821 | alarmtimer_rtc_interface_remove(); | ||
| 725 | return error; | 822 | return error; |
| 726 | } | 823 | } |
| 727 | device_initcall(alarmtimer_init); | 824 | device_initcall(alarmtimer_init); |
| 728 | |||
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index e4c699dfa4e8..1ecd6ba36d6c 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
| @@ -94,42 +94,143 @@ void clockevents_shutdown(struct clock_event_device *dev) | |||
| 94 | dev->next_event.tv64 = KTIME_MAX; | 94 | dev->next_event.tv64 = KTIME_MAX; |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST | ||
| 98 | |||
| 99 | /* Limit min_delta to a jiffie */ | ||
| 100 | #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) | ||
| 101 | |||
| 102 | /** | ||
| 103 | * clockevents_increase_min_delta - raise minimum delta of a clock event device | ||
| 104 | * @dev: device to increase the minimum delta | ||
| 105 | * | ||
| 106 | * Returns 0 on success, -ETIME when the minimum delta reached the limit. | ||
| 107 | */ | ||
| 108 | static int clockevents_increase_min_delta(struct clock_event_device *dev) | ||
| 109 | { | ||
| 110 | /* Nothing to do if we already reached the limit */ | ||
| 111 | if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { | ||
| 112 | printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n"); | ||
| 113 | dev->next_event.tv64 = KTIME_MAX; | ||
| 114 | return -ETIME; | ||
| 115 | } | ||
| 116 | |||
| 117 | if (dev->min_delta_ns < 5000) | ||
| 118 | dev->min_delta_ns = 5000; | ||
| 119 | else | ||
| 120 | dev->min_delta_ns += dev->min_delta_ns >> 1; | ||
| 121 | |||
| 122 | if (dev->min_delta_ns > MIN_DELTA_LIMIT) | ||
| 123 | dev->min_delta_ns = MIN_DELTA_LIMIT; | ||
| 124 | |||
| 125 | printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n", | ||
| 126 | dev->name ? dev->name : "?", | ||
| 127 | (unsigned long long) dev->min_delta_ns); | ||
| 128 | return 0; | ||
| 129 | } | ||
| 130 | |||
| 131 | /** | ||
| 132 | * clockevents_program_min_delta - Set clock event device to the minimum delay. | ||
| 133 | * @dev: device to program | ||
| 134 | * | ||
| 135 | * Returns 0 on success, -ETIME when the retry loop failed. | ||
| 136 | */ | ||
| 137 | static int clockevents_program_min_delta(struct clock_event_device *dev) | ||
| 138 | { | ||
| 139 | unsigned long long clc; | ||
| 140 | int64_t delta; | ||
| 141 | int i; | ||
| 142 | |||
| 143 | for (i = 0;;) { | ||
| 144 | delta = dev->min_delta_ns; | ||
| 145 | dev->next_event = ktime_add_ns(ktime_get(), delta); | ||
| 146 | |||
| 147 | if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) | ||
| 148 | return 0; | ||
| 149 | |||
| 150 | dev->retries++; | ||
| 151 | clc = ((unsigned long long) delta * dev->mult) >> dev->shift; | ||
| 152 | if (dev->set_next_event((unsigned long) clc, dev) == 0) | ||
| 153 | return 0; | ||
| 154 | |||
| 155 | if (++i > 2) { | ||
| 156 | /* | ||
| 157 | * We tried 3 times to program the device with the | ||
| 158 | * given min_delta_ns. Try to increase the minimum | ||
| 159 | * delta, if that fails as well get out of here. | ||
| 160 | */ | ||
| 161 | if (clockevents_increase_min_delta(dev)) | ||
| 162 | return -ETIME; | ||
| 163 | i = 0; | ||
| 164 | } | ||
| 165 | } | ||
| 166 | } | ||
| 167 | |||
| 168 | #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ | ||
| 169 | |||
| 170 | /** | ||
| 171 | * clockevents_program_min_delta - Set clock event device to the minimum delay. | ||
| 172 | * @dev: device to program | ||
| 173 | * | ||
| 174 | * Returns 0 on success, -ETIME when the retry loop failed. | ||
| 175 | */ | ||
| 176 | static int clockevents_program_min_delta(struct clock_event_device *dev) | ||
| 177 | { | ||
| 178 | unsigned long long clc; | ||
| 179 | int64_t delta; | ||
| 180 | |||
| 181 | delta = dev->min_delta_ns; | ||
| 182 | dev->next_event = ktime_add_ns(ktime_get(), delta); | ||
| 183 | |||
| 184 | if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) | ||
| 185 | return 0; | ||
| 186 | |||
| 187 | dev->retries++; | ||
| 188 | clc = ((unsigned long long) delta * dev->mult) >> dev->shift; | ||
| 189 | return dev->set_next_event((unsigned long) clc, dev); | ||
| 190 | } | ||
| 191 | |||
| 192 | #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ | ||
| 193 | |||
| 97 | /** | 194 | /** |
| 98 | * clockevents_program_event - Reprogram the clock event device. | 195 | * clockevents_program_event - Reprogram the clock event device. |
| 196 | * @dev: device to program | ||
| 99 | * @expires: absolute expiry time (monotonic clock) | 197 | * @expires: absolute expiry time (monotonic clock) |
| 198 | * @force: program minimum delay if expires can not be set | ||
| 100 | * | 199 | * |
| 101 | * Returns 0 on success, -ETIME when the event is in the past. | 200 | * Returns 0 on success, -ETIME when the event is in the past. |
| 102 | */ | 201 | */ |
| 103 | int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, | 202 | int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, |
| 104 | ktime_t now) | 203 | bool force) |
| 105 | { | 204 | { |
| 106 | unsigned long long clc; | 205 | unsigned long long clc; |
| 107 | int64_t delta; | 206 | int64_t delta; |
| 207 | int rc; | ||
| 108 | 208 | ||
| 109 | if (unlikely(expires.tv64 < 0)) { | 209 | if (unlikely(expires.tv64 < 0)) { |
| 110 | WARN_ON_ONCE(1); | 210 | WARN_ON_ONCE(1); |
| 111 | return -ETIME; | 211 | return -ETIME; |
| 112 | } | 212 | } |
| 113 | 213 | ||
| 114 | delta = ktime_to_ns(ktime_sub(expires, now)); | ||
| 115 | |||
| 116 | if (delta <= 0) | ||
| 117 | return -ETIME; | ||
| 118 | |||
| 119 | dev->next_event = expires; | 214 | dev->next_event = expires; |
| 120 | 215 | ||
| 121 | if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) | 216 | if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) |
| 122 | return 0; | 217 | return 0; |
| 123 | 218 | ||
| 124 | if (delta > dev->max_delta_ns) | 219 | /* Shortcut for clockevent devices that can deal with ktime. */ |
| 125 | delta = dev->max_delta_ns; | 220 | if (dev->features & CLOCK_EVT_FEAT_KTIME) |
| 126 | if (delta < dev->min_delta_ns) | 221 | return dev->set_next_ktime(expires, dev); |
| 127 | delta = dev->min_delta_ns; | 222 | |
| 223 | delta = ktime_to_ns(ktime_sub(expires, ktime_get())); | ||
| 224 | if (delta <= 0) | ||
| 225 | return force ? clockevents_program_min_delta(dev) : -ETIME; | ||
| 128 | 226 | ||
| 129 | clc = delta * dev->mult; | 227 | delta = min(delta, (int64_t) dev->max_delta_ns); |
| 130 | clc >>= dev->shift; | 228 | delta = max(delta, (int64_t) dev->min_delta_ns); |
| 131 | 229 | ||
| 132 | return dev->set_next_event((unsigned long) clc, dev); | 230 | clc = ((unsigned long long) delta * dev->mult) >> dev->shift; |
| 231 | rc = dev->set_next_event((unsigned long) clc, dev); | ||
| 232 | |||
| 233 | return (rc && force) ? clockevents_program_min_delta(dev) : rc; | ||
| 133 | } | 234 | } |
| 134 | 235 | ||
| 135 | /** | 236 | /** |
| @@ -258,7 +359,7 @@ int clockevents_update_freq(struct clock_event_device *dev, u32 freq) | |||
| 258 | if (dev->mode != CLOCK_EVT_MODE_ONESHOT) | 359 | if (dev->mode != CLOCK_EVT_MODE_ONESHOT) |
| 259 | return 0; | 360 | return 0; |
| 260 | 361 | ||
| 261 | return clockevents_program_event(dev, dev->next_event, ktime_get()); | 362 | return clockevents_program_event(dev, dev->next_event, false); |
| 262 | } | 363 | } |
| 263 | 364 | ||
| 264 | /* | 365 | /* |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index e0980f0d9a0a..cf52fda2e096 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
| @@ -186,6 +186,7 @@ static struct timer_list watchdog_timer; | |||
| 186 | static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); | 186 | static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); |
| 187 | static DEFINE_SPINLOCK(watchdog_lock); | 187 | static DEFINE_SPINLOCK(watchdog_lock); |
| 188 | static int watchdog_running; | 188 | static int watchdog_running; |
| 189 | static atomic_t watchdog_reset_pending; | ||
| 189 | 190 | ||
| 190 | static int clocksource_watchdog_kthread(void *data); | 191 | static int clocksource_watchdog_kthread(void *data); |
| 191 | static void __clocksource_change_rating(struct clocksource *cs, int rating); | 192 | static void __clocksource_change_rating(struct clocksource *cs, int rating); |
| @@ -247,12 +248,14 @@ static void clocksource_watchdog(unsigned long data) | |||
| 247 | struct clocksource *cs; | 248 | struct clocksource *cs; |
| 248 | cycle_t csnow, wdnow; | 249 | cycle_t csnow, wdnow; |
| 249 | int64_t wd_nsec, cs_nsec; | 250 | int64_t wd_nsec, cs_nsec; |
| 250 | int next_cpu; | 251 | int next_cpu, reset_pending; |
| 251 | 252 | ||
| 252 | spin_lock(&watchdog_lock); | 253 | spin_lock(&watchdog_lock); |
| 253 | if (!watchdog_running) | 254 | if (!watchdog_running) |
| 254 | goto out; | 255 | goto out; |
| 255 | 256 | ||
| 257 | reset_pending = atomic_read(&watchdog_reset_pending); | ||
| 258 | |||
| 256 | list_for_each_entry(cs, &watchdog_list, wd_list) { | 259 | list_for_each_entry(cs, &watchdog_list, wd_list) { |
| 257 | 260 | ||
| 258 | /* Clocksource already marked unstable? */ | 261 | /* Clocksource already marked unstable? */ |
| @@ -268,7 +271,8 @@ static void clocksource_watchdog(unsigned long data) | |||
| 268 | local_irq_enable(); | 271 | local_irq_enable(); |
| 269 | 272 | ||
| 270 | /* Clocksource initialized ? */ | 273 | /* Clocksource initialized ? */ |
| 271 | if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { | 274 | if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || |
| 275 | atomic_read(&watchdog_reset_pending)) { | ||
| 272 | cs->flags |= CLOCK_SOURCE_WATCHDOG; | 276 | cs->flags |= CLOCK_SOURCE_WATCHDOG; |
| 273 | cs->wd_last = wdnow; | 277 | cs->wd_last = wdnow; |
| 274 | cs->cs_last = csnow; | 278 | cs->cs_last = csnow; |
| @@ -283,8 +287,11 @@ static void clocksource_watchdog(unsigned long data) | |||
| 283 | cs->cs_last = csnow; | 287 | cs->cs_last = csnow; |
| 284 | cs->wd_last = wdnow; | 288 | cs->wd_last = wdnow; |
| 285 | 289 | ||
| 290 | if (atomic_read(&watchdog_reset_pending)) | ||
| 291 | continue; | ||
| 292 | |||
| 286 | /* Check the deviation from the watchdog clocksource. */ | 293 | /* Check the deviation from the watchdog clocksource. */ |
| 287 | if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { | 294 | if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) { |
| 288 | clocksource_unstable(cs, cs_nsec - wd_nsec); | 295 | clocksource_unstable(cs, cs_nsec - wd_nsec); |
| 289 | continue; | 296 | continue; |
| 290 | } | 297 | } |
| @@ -303,6 +310,13 @@ static void clocksource_watchdog(unsigned long data) | |||
| 303 | } | 310 | } |
| 304 | 311 | ||
| 305 | /* | 312 | /* |
| 313 | * We only clear the watchdog_reset_pending, when we did a | ||
| 314 | * full cycle through all clocksources. | ||
| 315 | */ | ||
| 316 | if (reset_pending) | ||
| 317 | atomic_dec(&watchdog_reset_pending); | ||
| 318 | |||
| 319 | /* | ||
| 306 | * Cycle through CPUs to check if the CPUs stay synchronized | 320 | * Cycle through CPUs to check if the CPUs stay synchronized |
| 307 | * to each other. | 321 | * to each other. |
| 308 | */ | 322 | */ |
| @@ -344,23 +358,7 @@ static inline void clocksource_reset_watchdog(void) | |||
| 344 | 358 | ||
| 345 | static void clocksource_resume_watchdog(void) | 359 | static void clocksource_resume_watchdog(void) |
| 346 | { | 360 | { |
| 347 | unsigned long flags; | 361 | atomic_inc(&watchdog_reset_pending); |
| 348 | |||
| 349 | /* | ||
| 350 | * We use trylock here to avoid a potential dead lock when | ||
| 351 | * kgdb calls this code after the kernel has been stopped with | ||
| 352 | * watchdog_lock held. When watchdog_lock is held we just | ||
| 353 | * return and accept, that the watchdog might trigger and mark | ||
| 354 | * the monitored clock source (usually TSC) unstable. | ||
| 355 | * | ||
| 356 | * This does not affect the other caller clocksource_resume() | ||
| 357 | * because at this point the kernel is UP, interrupts are | ||
| 358 | * disabled and nothing can hold watchdog_lock. | ||
| 359 | */ | ||
| 360 | if (!spin_trylock_irqsave(&watchdog_lock, flags)) | ||
| 361 | return; | ||
| 362 | clocksource_reset_watchdog(); | ||
| 363 | spin_unlock_irqrestore(&watchdog_lock, flags); | ||
| 364 | } | 362 | } |
| 365 | 363 | ||
| 366 | static void clocksource_enqueue_watchdog(struct clocksource *cs) | 364 | static void clocksource_enqueue_watchdog(struct clocksource *cs) |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index c7218d132738..f954282d9a82 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -194,7 +194,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |||
| 194 | for (next = dev->next_event; ;) { | 194 | for (next = dev->next_event; ;) { |
| 195 | next = ktime_add(next, tick_period); | 195 | next = ktime_add(next, tick_period); |
| 196 | 196 | ||
| 197 | if (!clockevents_program_event(dev, next, ktime_get())) | 197 | if (!clockevents_program_event(dev, next, false)) |
| 198 | return; | 198 | return; |
| 199 | tick_do_periodic_broadcast(); | 199 | tick_do_periodic_broadcast(); |
| 200 | } | 200 | } |
| @@ -373,7 +373,7 @@ static int tick_broadcast_set_event(ktime_t expires, int force) | |||
| 373 | { | 373 | { |
| 374 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | 374 | struct clock_event_device *bc = tick_broadcast_device.evtdev; |
| 375 | 375 | ||
| 376 | return tick_dev_program_event(bc, expires, force); | 376 | return clockevents_program_event(bc, expires, force); |
| 377 | } | 377 | } |
| 378 | 378 | ||
| 379 | int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | 379 | int tick_resume_broadcast_oneshot(struct clock_event_device *bc) |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 119528de8235..da6c9ecad4e4 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
| @@ -94,7 +94,7 @@ void tick_handle_periodic(struct clock_event_device *dev) | |||
| 94 | */ | 94 | */ |
| 95 | next = ktime_add(dev->next_event, tick_period); | 95 | next = ktime_add(dev->next_event, tick_period); |
| 96 | for (;;) { | 96 | for (;;) { |
| 97 | if (!clockevents_program_event(dev, next, ktime_get())) | 97 | if (!clockevents_program_event(dev, next, false)) |
| 98 | return; | 98 | return; |
| 99 | /* | 99 | /* |
| 100 | * Have to be careful here. If we're in oneshot mode, | 100 | * Have to be careful here. If we're in oneshot mode, |
| @@ -137,7 +137,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) | |||
| 137 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); | 137 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); |
| 138 | 138 | ||
| 139 | for (;;) { | 139 | for (;;) { |
| 140 | if (!clockevents_program_event(dev, next, ktime_get())) | 140 | if (!clockevents_program_event(dev, next, false)) |
| 141 | return; | 141 | return; |
| 142 | next = ktime_add(next, tick_period); | 142 | next = ktime_add(next, tick_period); |
| 143 | } | 143 | } |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 1009b06d6f89..4e265b901fed 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
| @@ -26,8 +26,6 @@ extern void clockevents_shutdown(struct clock_event_device *dev); | |||
| 26 | extern void tick_setup_oneshot(struct clock_event_device *newdev, | 26 | extern void tick_setup_oneshot(struct clock_event_device *newdev, |
| 27 | void (*handler)(struct clock_event_device *), | 27 | void (*handler)(struct clock_event_device *), |
| 28 | ktime_t nextevt); | 28 | ktime_t nextevt); |
| 29 | extern int tick_dev_program_event(struct clock_event_device *dev, | ||
| 30 | ktime_t expires, int force); | ||
| 31 | extern int tick_program_event(ktime_t expires, int force); | 29 | extern int tick_program_event(ktime_t expires, int force); |
| 32 | extern void tick_oneshot_notify(void); | 30 | extern void tick_oneshot_notify(void); |
| 33 | extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); | 31 | extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); |
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index 2d04411a5f05..824109060a33 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c | |||
| @@ -21,74 +21,6 @@ | |||
| 21 | 21 | ||
| 22 | #include "tick-internal.h" | 22 | #include "tick-internal.h" |
| 23 | 23 | ||
| 24 | /* Limit min_delta to a jiffie */ | ||
| 25 | #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) | ||
| 26 | |||
| 27 | static int tick_increase_min_delta(struct clock_event_device *dev) | ||
| 28 | { | ||
| 29 | /* Nothing to do if we already reached the limit */ | ||
| 30 | if (dev->min_delta_ns >= MIN_DELTA_LIMIT) | ||
| 31 | return -ETIME; | ||
| 32 | |||
| 33 | if (dev->min_delta_ns < 5000) | ||
| 34 | dev->min_delta_ns = 5000; | ||
| 35 | else | ||
| 36 | dev->min_delta_ns += dev->min_delta_ns >> 1; | ||
| 37 | |||
| 38 | if (dev->min_delta_ns > MIN_DELTA_LIMIT) | ||
| 39 | dev->min_delta_ns = MIN_DELTA_LIMIT; | ||
| 40 | |||
| 41 | printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n", | ||
| 42 | dev->name ? dev->name : "?", | ||
| 43 | (unsigned long long) dev->min_delta_ns); | ||
| 44 | return 0; | ||
| 45 | } | ||
| 46 | |||
| 47 | /** | ||
| 48 | * tick_program_event internal worker function | ||
| 49 | */ | ||
| 50 | int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires, | ||
| 51 | int force) | ||
| 52 | { | ||
| 53 | ktime_t now = ktime_get(); | ||
| 54 | int i; | ||
| 55 | |||
| 56 | for (i = 0;;) { | ||
| 57 | int ret = clockevents_program_event(dev, expires, now); | ||
| 58 | |||
| 59 | if (!ret || !force) | ||
| 60 | return ret; | ||
| 61 | |||
| 62 | dev->retries++; | ||
| 63 | /* | ||
| 64 | * We tried 3 times to program the device with the given | ||
| 65 | * min_delta_ns. If that's not working then we increase it | ||
| 66 | * and emit a warning. | ||
| 67 | */ | ||
| 68 | if (++i > 2) { | ||
| 69 | /* Increase the min. delta and try again */ | ||
| 70 | if (tick_increase_min_delta(dev)) { | ||
| 71 | /* | ||
| 72 | * Get out of the loop if min_delta_ns | ||
| 73 | * hit the limit already. That's | ||
| 74 | * better than staying here forever. | ||
| 75 | * | ||
| 76 | * We clear next_event so we have a | ||
| 77 | * chance that the box survives. | ||
| 78 | */ | ||
| 79 | printk(KERN_WARNING | ||
| 80 | "CE: Reprogramming failure. Giving up\n"); | ||
| 81 | dev->next_event.tv64 = KTIME_MAX; | ||
| 82 | return -ETIME; | ||
| 83 | } | ||
| 84 | i = 0; | ||
| 85 | } | ||
| 86 | |||
| 87 | now = ktime_get(); | ||
| 88 | expires = ktime_add_ns(now, dev->min_delta_ns); | ||
| 89 | } | ||
| 90 | } | ||
| 91 | |||
| 92 | /** | 24 | /** |
| 93 | * tick_program_event | 25 | * tick_program_event |
| 94 | */ | 26 | */ |
| @@ -96,7 +28,7 @@ int tick_program_event(ktime_t expires, int force) | |||
| 96 | { | 28 | { |
| 97 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); | 29 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
| 98 | 30 | ||
| 99 | return tick_dev_program_event(dev, expires, force); | 31 | return clockevents_program_event(dev, expires, force); |
| 100 | } | 32 | } |
| 101 | 33 | ||
| 102 | /** | 34 | /** |
| @@ -104,11 +36,10 @@ int tick_program_event(ktime_t expires, int force) | |||
| 104 | */ | 36 | */ |
| 105 | void tick_resume_oneshot(void) | 37 | void tick_resume_oneshot(void) |
| 106 | { | 38 | { |
| 107 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); | 39 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
| 108 | struct clock_event_device *dev = td->evtdev; | ||
| 109 | 40 | ||
| 110 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); | 41 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); |
| 111 | tick_program_event(ktime_get(), 1); | 42 | clockevents_program_event(dev, ktime_get(), true); |
| 112 | } | 43 | } |
| 113 | 44 | ||
| 114 | /** | 45 | /** |
| @@ -120,7 +51,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev, | |||
| 120 | { | 51 | { |
| 121 | newdev->event_handler = handler; | 52 | newdev->event_handler = handler; |
| 122 | clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT); | 53 | clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT); |
| 123 | tick_dev_program_event(newdev, next_event, 1); | 54 | clockevents_program_event(newdev, next_event, true); |
| 124 | } | 55 | } |
| 125 | 56 | ||
| 126 | /** | 57 | /** |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index eb98e55196b9..40420644d0ba 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -158,9 +158,10 @@ update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_upda | |||
| 158 | 158 | ||
| 159 | if (ts->idle_active) { | 159 | if (ts->idle_active) { |
| 160 | delta = ktime_sub(now, ts->idle_entrytime); | 160 | delta = ktime_sub(now, ts->idle_entrytime); |
| 161 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | ||
| 162 | if (nr_iowait_cpu(cpu) > 0) | 161 | if (nr_iowait_cpu(cpu) > 0) |
| 163 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); | 162 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); |
| 163 | else | ||
| 164 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | ||
| 164 | ts->idle_entrytime = now; | 165 | ts->idle_entrytime = now; |
| 165 | } | 166 | } |
| 166 | 167 | ||
| @@ -196,11 +197,11 @@ static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) | |||
| 196 | /** | 197 | /** |
| 197 | * get_cpu_idle_time_us - get the total idle time of a cpu | 198 | * get_cpu_idle_time_us - get the total idle time of a cpu |
| 198 | * @cpu: CPU number to query | 199 | * @cpu: CPU number to query |
| 199 | * @last_update_time: variable to store update time in | 200 | * @last_update_time: variable to store update time in. Do not update |
| 201 | * counters if NULL. | ||
| 200 | * | 202 | * |
| 201 | * Return the cummulative idle time (since boot) for a given | 203 | * Return the cummulative idle time (since boot) for a given |
| 202 | * CPU, in microseconds. The idle time returned includes | 204 | * CPU, in microseconds. |
| 203 | * the iowait time (unlike what "top" and co report). | ||
| 204 | * | 205 | * |
| 205 | * This time is measured via accounting rather than sampling, | 206 | * This time is measured via accounting rather than sampling, |
| 206 | * and is as accurate as ktime_get() is. | 207 | * and is as accurate as ktime_get() is. |
| @@ -210,20 +211,35 @@ static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) | |||
| 210 | u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | 211 | u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) |
| 211 | { | 212 | { |
| 212 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 213 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 214 | ktime_t now, idle; | ||
| 213 | 215 | ||
| 214 | if (!tick_nohz_enabled) | 216 | if (!tick_nohz_enabled) |
| 215 | return -1; | 217 | return -1; |
| 216 | 218 | ||
| 217 | update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); | 219 | now = ktime_get(); |
| 220 | if (last_update_time) { | ||
| 221 | update_ts_time_stats(cpu, ts, now, last_update_time); | ||
| 222 | idle = ts->idle_sleeptime; | ||
| 223 | } else { | ||
| 224 | if (ts->idle_active && !nr_iowait_cpu(cpu)) { | ||
| 225 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); | ||
| 226 | |||
| 227 | idle = ktime_add(ts->idle_sleeptime, delta); | ||
| 228 | } else { | ||
| 229 | idle = ts->idle_sleeptime; | ||
| 230 | } | ||
| 231 | } | ||
| 232 | |||
| 233 | return ktime_to_us(idle); | ||
| 218 | 234 | ||
| 219 | return ktime_to_us(ts->idle_sleeptime); | ||
| 220 | } | 235 | } |
| 221 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); | 236 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); |
| 222 | 237 | ||
| 223 | /* | 238 | /** |
| 224 | * get_cpu_iowait_time_us - get the total iowait time of a cpu | 239 | * get_cpu_iowait_time_us - get the total iowait time of a cpu |
| 225 | * @cpu: CPU number to query | 240 | * @cpu: CPU number to query |
| 226 | * @last_update_time: variable to store update time in | 241 | * @last_update_time: variable to store update time in. Do not update |
| 242 | * counters if NULL. | ||
| 227 | * | 243 | * |
| 228 | * Return the cummulative iowait time (since boot) for a given | 244 | * Return the cummulative iowait time (since boot) for a given |
| 229 | * CPU, in microseconds. | 245 | * CPU, in microseconds. |
| @@ -236,13 +252,26 @@ EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); | |||
| 236 | u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) | 252 | u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) |
| 237 | { | 253 | { |
| 238 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 254 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 255 | ktime_t now, iowait; | ||
| 239 | 256 | ||
| 240 | if (!tick_nohz_enabled) | 257 | if (!tick_nohz_enabled) |
| 241 | return -1; | 258 | return -1; |
| 242 | 259 | ||
| 243 | update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); | 260 | now = ktime_get(); |
| 261 | if (last_update_time) { | ||
| 262 | update_ts_time_stats(cpu, ts, now, last_update_time); | ||
| 263 | iowait = ts->iowait_sleeptime; | ||
| 264 | } else { | ||
| 265 | if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { | ||
| 266 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); | ||
| 244 | 267 | ||
| 245 | return ktime_to_us(ts->iowait_sleeptime); | 268 | iowait = ktime_add(ts->iowait_sleeptime, delta); |
| 269 | } else { | ||
| 270 | iowait = ts->iowait_sleeptime; | ||
| 271 | } | ||
| 272 | } | ||
| 273 | |||
| 274 | return ktime_to_us(iowait); | ||
| 246 | } | 275 | } |
| 247 | EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); | 276 | EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); |
| 248 | 277 | ||
| @@ -634,8 +663,6 @@ static void tick_nohz_switch_to_nohz(void) | |||
| 634 | next = ktime_add(next, tick_period); | 663 | next = ktime_add(next, tick_period); |
| 635 | } | 664 | } |
| 636 | local_irq_enable(); | 665 | local_irq_enable(); |
| 637 | |||
| 638 | printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id()); | ||
| 639 | } | 666 | } |
| 640 | 667 | ||
| 641 | /* | 668 | /* |
| @@ -787,10 +814,8 @@ void tick_setup_sched_timer(void) | |||
| 787 | } | 814 | } |
| 788 | 815 | ||
| 789 | #ifdef CONFIG_NO_HZ | 816 | #ifdef CONFIG_NO_HZ |
| 790 | if (tick_nohz_enabled) { | 817 | if (tick_nohz_enabled) |
| 791 | ts->nohz_mode = NOHZ_MODE_HIGHRES; | 818 | ts->nohz_mode = NOHZ_MODE_HIGHRES; |
| 792 | printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id()); | ||
| 793 | } | ||
| 794 | #endif | 819 | #endif |
| 795 | } | 820 | } |
| 796 | #endif /* HIGH_RES_TIMERS */ | 821 | #endif /* HIGH_RES_TIMERS */ |
