diff options
| -rw-r--r-- | arch/s390/Kconfig | 24 | ||||
| -rw-r--r-- | arch/s390/kernel/process.c | 4 | ||||
| -rw-r--r-- | arch/s390/kernel/s390_ext.c | 9 | ||||
| -rw-r--r-- | arch/s390/kernel/setup.c | 2 | ||||
| -rw-r--r-- | arch/s390/kernel/time.c | 256 | ||||
| -rw-r--r-- | arch/s390/lib/delay.c | 14 | ||||
| -rw-r--r-- | drivers/s390/cio/cio.c | 9 | ||||
| -rw-r--r-- | include/asm-s390/hardirq.h | 2 | ||||
| -rw-r--r-- | include/asm-s390/lowcore.h | 6 |
9 files changed, 98 insertions, 228 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index da6ea64cc34d..f6a68e178fc5 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
| @@ -43,6 +43,9 @@ config GENERIC_HWEIGHT | |||
| 43 | config GENERIC_TIME | 43 | config GENERIC_TIME |
| 44 | def_bool y | 44 | def_bool y |
| 45 | 45 | ||
| 46 | config GENERIC_CLOCKEVENTS | ||
| 47 | def_bool y | ||
| 48 | |||
| 46 | config GENERIC_BUG | 49 | config GENERIC_BUG |
| 47 | bool | 50 | bool |
| 48 | depends on BUG | 51 | depends on BUG |
| @@ -73,6 +76,8 @@ menu "Base setup" | |||
| 73 | 76 | ||
| 74 | comment "Processor type and features" | 77 | comment "Processor type and features" |
| 75 | 78 | ||
| 79 | source "kernel/time/Kconfig" | ||
| 80 | |||
| 76 | config 64BIT | 81 | config 64BIT |
| 77 | bool "64 bit kernel" | 82 | bool "64 bit kernel" |
| 78 | help | 83 | help |
| @@ -487,25 +492,6 @@ config APPLDATA_NET_SUM | |||
| 487 | 492 | ||
| 488 | source kernel/Kconfig.hz | 493 | source kernel/Kconfig.hz |
| 489 | 494 | ||
| 490 | config NO_IDLE_HZ | ||
| 491 | bool "No HZ timer ticks in idle" | ||
| 492 | help | ||
| 493 | Switches the regular HZ timer off when the system is going idle. | ||
| 494 | This helps z/VM to detect that the Linux system is idle. VM can | ||
| 495 | then "swap-out" this guest which reduces memory usage. It also | ||
| 496 | reduces the overhead of idle systems. | ||
| 497 | |||
| 498 | The HZ timer can be switched on/off via /proc/sys/kernel/hz_timer. | ||
| 499 | hz_timer=0 means HZ timer is disabled. hz_timer=1 means HZ | ||
| 500 | timer is active. | ||
| 501 | |||
| 502 | config NO_IDLE_HZ_INIT | ||
| 503 | bool "HZ timer in idle off by default" | ||
| 504 | depends on NO_IDLE_HZ | ||
| 505 | help | ||
| 506 | The HZ timer is switched off in idle by default. That means the | ||
| 507 | HZ timer is already disabled at boot time. | ||
| 508 | |||
| 509 | config S390_HYPFS_FS | 495 | config S390_HYPFS_FS |
| 510 | bool "s390 hypervisor file system support" | 496 | bool "s390 hypervisor file system support" |
| 511 | select SYS_HYPERVISOR | 497 | select SYS_HYPERVISOR |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index eb768ce88672..df033249f6b1 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <linux/module.h> | 36 | #include <linux/module.h> |
| 37 | #include <linux/notifier.h> | 37 | #include <linux/notifier.h> |
| 38 | #include <linux/utsname.h> | 38 | #include <linux/utsname.h> |
| 39 | #include <linux/tick.h> | ||
| 39 | #include <asm/uaccess.h> | 40 | #include <asm/uaccess.h> |
| 40 | #include <asm/pgtable.h> | 41 | #include <asm/pgtable.h> |
| 41 | #include <asm/system.h> | 42 | #include <asm/system.h> |
| @@ -167,9 +168,10 @@ static void default_idle(void) | |||
| 167 | void cpu_idle(void) | 168 | void cpu_idle(void) |
| 168 | { | 169 | { |
| 169 | for (;;) { | 170 | for (;;) { |
| 171 | tick_nohz_stop_sched_tick(); | ||
| 170 | while (!need_resched()) | 172 | while (!need_resched()) |
| 171 | default_idle(); | 173 | default_idle(); |
| 172 | 174 | tick_nohz_restart_sched_tick(); | |
| 173 | preempt_enable_no_resched(); | 175 | preempt_enable_no_resched(); |
| 174 | schedule(); | 176 | schedule(); |
| 175 | preempt_disable(); | 177 | preempt_disable(); |
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c index 3a8772d3baea..947d8c74403b 100644 --- a/arch/s390/kernel/s390_ext.c +++ b/arch/s390/kernel/s390_ext.c | |||
| @@ -120,12 +120,9 @@ void do_extint(struct pt_regs *regs, unsigned short code) | |||
| 120 | old_regs = set_irq_regs(regs); | 120 | old_regs = set_irq_regs(regs); |
| 121 | irq_enter(); | 121 | irq_enter(); |
| 122 | s390_idle_check(); | 122 | s390_idle_check(); |
| 123 | if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) | 123 | if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) |
| 124 | /** | 124 | /* Serve timer interrupts first. */ |
| 125 | * Make sure that the i/o interrupt did not "overtake" | 125 | clock_comparator_work(); |
| 126 | * the last HZ timer interrupt. | ||
| 127 | */ | ||
| 128 | account_ticks(S390_lowcore.int_clock); | ||
| 129 | kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; | 126 | kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; |
| 130 | index = ext_hash(code); | 127 | index = ext_hash(code); |
| 131 | for (p = ext_int_hash[index]; p; p = p->next) { | 128 | for (p = ext_int_hash[index]; p; p = p->next) { |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 22040d087d8a..7141147e6b63 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
| @@ -428,7 +428,7 @@ setup_lowcore(void) | |||
| 428 | lc->io_new_psw.mask = psw_kernel_bits; | 428 | lc->io_new_psw.mask = psw_kernel_bits; |
| 429 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; | 429 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; |
| 430 | lc->ipl_device = S390_lowcore.ipl_device; | 430 | lc->ipl_device = S390_lowcore.ipl_device; |
| 431 | lc->jiffy_timer = -1LL; | 431 | lc->clock_comparator = -1ULL; |
| 432 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; | 432 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; |
| 433 | lc->async_stack = (unsigned long) | 433 | lc->async_stack = (unsigned long) |
| 434 | __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; | 434 | __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 925f9dc0b0a0..17c4de9e1b6b 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
| @@ -30,7 +30,7 @@ | |||
| 30 | #include <linux/timex.h> | 30 | #include <linux/timex.h> |
| 31 | #include <linux/notifier.h> | 31 | #include <linux/notifier.h> |
| 32 | #include <linux/clocksource.h> | 32 | #include <linux/clocksource.h> |
| 33 | 33 | #include <linux/clockchips.h> | |
| 34 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
| 35 | #include <asm/delay.h> | 35 | #include <asm/delay.h> |
| 36 | #include <asm/s390_ext.h> | 36 | #include <asm/s390_ext.h> |
| @@ -57,9 +57,9 @@ | |||
| 57 | 57 | ||
| 58 | static ext_int_info_t ext_int_info_cc; | 58 | static ext_int_info_t ext_int_info_cc; |
| 59 | static ext_int_info_t ext_int_etr_cc; | 59 | static ext_int_info_t ext_int_etr_cc; |
| 60 | static u64 init_timer_cc; | ||
| 61 | static u64 jiffies_timer_cc; | 60 | static u64 jiffies_timer_cc; |
| 62 | static u64 xtime_cc; | 61 | |
| 62 | static DEFINE_PER_CPU(struct clock_event_device, comparators); | ||
| 63 | 63 | ||
| 64 | /* | 64 | /* |
| 65 | * Scheduler clock - returns current time in nanosec units. | 65 | * Scheduler clock - returns current time in nanosec units. |
| @@ -95,162 +95,40 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime) | |||
| 95 | #define s390_do_profile() do { ; } while(0) | 95 | #define s390_do_profile() do { ; } while(0) |
| 96 | #endif /* CONFIG_PROFILING */ | 96 | #endif /* CONFIG_PROFILING */ |
| 97 | 97 | ||
| 98 | /* | 98 | void clock_comparator_work(void) |
| 99 | * Advance the per cpu tick counter up to the time given with the | ||
| 100 | * "time" argument. The per cpu update consists of accounting | ||
| 101 | * the virtual cpu time, calling update_process_times and calling | ||
| 102 | * the profiling hook. If xtime is before time it is advanced as well. | ||
| 103 | */ | ||
| 104 | void account_ticks(u64 time) | ||
| 105 | { | 99 | { |
| 106 | __u32 ticks; | 100 | struct clock_event_device *cd; |
| 107 | __u64 tmp; | ||
| 108 | |||
| 109 | /* Calculate how many ticks have passed. */ | ||
| 110 | if (time < S390_lowcore.jiffy_timer) | ||
| 111 | return; | ||
| 112 | tmp = time - S390_lowcore.jiffy_timer; | ||
| 113 | if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */ | ||
| 114 | ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1; | ||
| 115 | S390_lowcore.jiffy_timer += | ||
| 116 | CLK_TICKS_PER_JIFFY * (__u64) ticks; | ||
| 117 | } else if (tmp >= CLK_TICKS_PER_JIFFY) { | ||
| 118 | ticks = 2; | ||
| 119 | S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY; | ||
| 120 | } else { | ||
| 121 | ticks = 1; | ||
| 122 | S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY; | ||
| 123 | } | ||
| 124 | |||
| 125 | #ifdef CONFIG_SMP | ||
| 126 | /* | ||
| 127 | * Do not rely on the boot cpu to do the calls to do_timer. | ||
| 128 | * Spread it over all cpus instead. | ||
| 129 | */ | ||
| 130 | write_seqlock(&xtime_lock); | ||
| 131 | if (S390_lowcore.jiffy_timer > xtime_cc) { | ||
| 132 | __u32 xticks; | ||
| 133 | tmp = S390_lowcore.jiffy_timer - xtime_cc; | ||
| 134 | if (tmp >= 2*CLK_TICKS_PER_JIFFY) { | ||
| 135 | xticks = __div(tmp, CLK_TICKS_PER_JIFFY); | ||
| 136 | xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY; | ||
| 137 | } else { | ||
| 138 | xticks = 1; | ||
| 139 | xtime_cc += CLK_TICKS_PER_JIFFY; | ||
| 140 | } | ||
| 141 | do_timer(xticks); | ||
| 142 | } | ||
| 143 | write_sequnlock(&xtime_lock); | ||
| 144 | #else | ||
| 145 | do_timer(ticks); | ||
| 146 | #endif | ||
| 147 | |||
| 148 | while (ticks--) | ||
| 149 | update_process_times(user_mode(get_irq_regs())); | ||
| 150 | 101 | ||
| 102 | S390_lowcore.clock_comparator = -1ULL; | ||
| 103 | set_clock_comparator(S390_lowcore.clock_comparator); | ||
| 104 | cd = &__get_cpu_var(comparators); | ||
| 105 | cd->event_handler(cd); | ||
| 151 | s390_do_profile(); | 106 | s390_do_profile(); |
| 152 | } | 107 | } |
| 153 | 108 | ||
| 154 | #ifdef CONFIG_NO_IDLE_HZ | ||
| 155 | |||
| 156 | #ifdef CONFIG_NO_IDLE_HZ_INIT | ||
| 157 | int sysctl_hz_timer = 0; | ||
| 158 | #else | ||
| 159 | int sysctl_hz_timer = 1; | ||
| 160 | #endif | ||
| 161 | |||
| 162 | /* | ||
| 163 | * Stop the HZ tick on the current CPU. | ||
| 164 | * Only cpu_idle may call this function. | ||
| 165 | */ | ||
| 166 | static void stop_hz_timer(void) | ||
| 167 | { | ||
| 168 | unsigned long flags; | ||
| 169 | unsigned long seq, next; | ||
| 170 | __u64 timer, todval; | ||
| 171 | int cpu = smp_processor_id(); | ||
| 172 | |||
| 173 | if (sysctl_hz_timer != 0) | ||
| 174 | return; | ||
| 175 | |||
| 176 | cpu_set(cpu, nohz_cpu_mask); | ||
| 177 | |||
| 178 | /* | ||
| 179 | * Leave the clock comparator set up for the next timer | ||
| 180 | * tick if either rcu or a softirq is pending. | ||
| 181 | */ | ||
| 182 | if (rcu_needs_cpu(cpu) || local_softirq_pending()) { | ||
| 183 | cpu_clear(cpu, nohz_cpu_mask); | ||
| 184 | return; | ||
| 185 | } | ||
| 186 | |||
| 187 | /* | ||
| 188 | * This cpu is going really idle. Set up the clock comparator | ||
| 189 | * for the next event. | ||
| 190 | */ | ||
| 191 | next = next_timer_interrupt(); | ||
| 192 | do { | ||
| 193 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | ||
| 194 | timer = ((__u64) next) - ((__u64) jiffies) + jiffies_64; | ||
| 195 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | ||
| 196 | todval = -1ULL; | ||
| 197 | /* Be careful about overflows. */ | ||
| 198 | if (timer < (-1ULL / CLK_TICKS_PER_JIFFY)) { | ||
| 199 | timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY; | ||
| 200 | if (timer >= jiffies_timer_cc) | ||
| 201 | todval = timer; | ||
| 202 | } | ||
| 203 | set_clock_comparator(todval); | ||
| 204 | } | ||
| 205 | |||
| 206 | /* | 109 | /* |
| 207 | * Start the HZ tick on the current CPU. | 110 | * Fixup the clock comparator. |
| 208 | * Only cpu_idle may call this function. | ||
| 209 | */ | 111 | */ |
| 210 | static void start_hz_timer(void) | 112 | static void fixup_clock_comparator(unsigned long long delta) |
| 211 | { | 113 | { |
| 212 | if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) | 114 | /* If nobody is waiting there's nothing to fix. */ |
| 115 | if (S390_lowcore.clock_comparator == -1ULL) | ||
| 213 | return; | 116 | return; |
| 214 | account_ticks(get_clock()); | 117 | S390_lowcore.clock_comparator += delta; |
| 215 | set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); | 118 | set_clock_comparator(S390_lowcore.clock_comparator); |
| 216 | cpu_clear(smp_processor_id(), nohz_cpu_mask); | ||
| 217 | } | ||
| 218 | |||
| 219 | static int nohz_idle_notify(struct notifier_block *self, | ||
| 220 | unsigned long action, void *hcpu) | ||
| 221 | { | ||
| 222 | switch (action) { | ||
| 223 | case S390_CPU_IDLE: | ||
| 224 | stop_hz_timer(); | ||
| 225 | break; | ||
| 226 | case S390_CPU_NOT_IDLE: | ||
| 227 | start_hz_timer(); | ||
| 228 | break; | ||
| 229 | } | ||
| 230 | return NOTIFY_OK; | ||
| 231 | } | 119 | } |
| 232 | 120 | ||
| 233 | static struct notifier_block nohz_idle_nb = { | 121 | static int s390_next_event(unsigned long delta, |
| 234 | .notifier_call = nohz_idle_notify, | 122 | struct clock_event_device *evt) |
| 235 | }; | ||
| 236 | |||
| 237 | static void __init nohz_init(void) | ||
| 238 | { | 123 | { |
| 239 | if (register_idle_notifier(&nohz_idle_nb)) | 124 | S390_lowcore.clock_comparator = get_clock() + delta; |
| 240 | panic("Couldn't register idle notifier"); | 125 | set_clock_comparator(S390_lowcore.clock_comparator); |
| 126 | return 0; | ||
| 241 | } | 127 | } |
| 242 | 128 | ||
| 243 | #endif | 129 | static void s390_set_mode(enum clock_event_mode mode, |
| 244 | 130 | struct clock_event_device *evt) | |
| 245 | /* | ||
| 246 | * Set up per cpu jiffy timer and set the clock comparator. | ||
| 247 | */ | ||
| 248 | static void setup_jiffy_timer(void) | ||
| 249 | { | 131 | { |
| 250 | /* Set up clock comparator to next jiffy. */ | ||
| 251 | S390_lowcore.jiffy_timer = | ||
| 252 | jiffies_timer_cc + (jiffies_64 + 1) * CLK_TICKS_PER_JIFFY; | ||
| 253 | set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); | ||
| 254 | } | 132 | } |
| 255 | 133 | ||
| 256 | /* | 134 | /* |
| @@ -259,7 +137,26 @@ static void setup_jiffy_timer(void) | |||
| 259 | */ | 137 | */ |
| 260 | void init_cpu_timer(void) | 138 | void init_cpu_timer(void) |
| 261 | { | 139 | { |
| 262 | setup_jiffy_timer(); | 140 | struct clock_event_device *cd; |
| 141 | int cpu; | ||
| 142 | |||
| 143 | S390_lowcore.clock_comparator = -1ULL; | ||
| 144 | set_clock_comparator(S390_lowcore.clock_comparator); | ||
| 145 | |||
| 146 | cpu = smp_processor_id(); | ||
| 147 | cd = &per_cpu(comparators, cpu); | ||
| 148 | cd->name = "comparator"; | ||
| 149 | cd->features = CLOCK_EVT_FEAT_ONESHOT; | ||
| 150 | cd->mult = 16777; | ||
| 151 | cd->shift = 12; | ||
| 152 | cd->min_delta_ns = 1; | ||
| 153 | cd->max_delta_ns = LONG_MAX; | ||
| 154 | cd->rating = 400; | ||
| 155 | cd->cpumask = cpumask_of_cpu(cpu); | ||
| 156 | cd->set_next_event = s390_next_event; | ||
| 157 | cd->set_mode = s390_set_mode; | ||
| 158 | |||
| 159 | clockevents_register_device(cd); | ||
| 263 | 160 | ||
| 264 | /* Enable clock comparator timer interrupt. */ | 161 | /* Enable clock comparator timer interrupt. */ |
| 265 | __ctl_set_bit(0,11); | 162 | __ctl_set_bit(0,11); |
| @@ -270,8 +167,6 @@ void init_cpu_timer(void) | |||
| 270 | 167 | ||
| 271 | static void clock_comparator_interrupt(__u16 code) | 168 | static void clock_comparator_interrupt(__u16 code) |
| 272 | { | 169 | { |
| 273 | /* set clock comparator for next tick */ | ||
| 274 | set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); | ||
| 275 | } | 170 | } |
| 276 | 171 | ||
| 277 | static void etr_reset(void); | 172 | static void etr_reset(void); |
| @@ -316,8 +211,9 @@ static struct clocksource clocksource_tod = { | |||
| 316 | */ | 211 | */ |
| 317 | void __init time_init(void) | 212 | void __init time_init(void) |
| 318 | { | 213 | { |
| 214 | u64 init_timer_cc; | ||
| 215 | |||
| 319 | init_timer_cc = reset_tod_clock(); | 216 | init_timer_cc = reset_tod_clock(); |
| 320 | xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY; | ||
| 321 | jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; | 217 | jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; |
| 322 | 218 | ||
| 323 | /* set xtime */ | 219 | /* set xtime */ |
| @@ -342,10 +238,6 @@ void __init time_init(void) | |||
| 342 | /* Enable TOD clock interrupts on the boot cpu. */ | 238 | /* Enable TOD clock interrupts on the boot cpu. */ |
| 343 | init_cpu_timer(); | 239 | init_cpu_timer(); |
| 344 | 240 | ||
| 345 | #ifdef CONFIG_NO_IDLE_HZ | ||
| 346 | nohz_init(); | ||
| 347 | #endif | ||
| 348 | |||
| 349 | #ifdef CONFIG_VIRT_TIMER | 241 | #ifdef CONFIG_VIRT_TIMER |
| 350 | vtime_init(); | 242 | vtime_init(); |
| 351 | #endif | 243 | #endif |
| @@ -699,53 +591,49 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p) | |||
| 699 | } | 591 | } |
| 700 | 592 | ||
| 701 | /* | 593 | /* |
| 702 | * The time is "clock". xtime is what we think the time is. | 594 | * The time is "clock". old is what we think the time is. |
| 703 | * Adjust the value by a multiple of jiffies and add the delta to ntp. | 595 | * Adjust the value by a multiple of jiffies and add the delta to ntp. |
| 704 | * "delay" is an approximation how long the synchronization took. If | 596 | * "delay" is an approximation how long the synchronization took. If |
| 705 | * the time correction is positive, then "delay" is subtracted from | 597 | * the time correction is positive, then "delay" is subtracted from |
| 706 | * the time difference and only the remaining part is passed to ntp. | 598 | * the time difference and only the remaining part is passed to ntp. |
| 707 | */ | 599 | */ |
| 708 | static void etr_adjust_time(unsigned long long clock, unsigned long long delay) | 600 | static unsigned long long etr_adjust_time(unsigned long long old, |
| 601 | unsigned long long clock, | ||
| 602 | unsigned long long delay) | ||
| 709 | { | 603 | { |
| 710 | unsigned long long delta, ticks; | 604 | unsigned long long delta, ticks; |
| 711 | struct timex adjust; | 605 | struct timex adjust; |
| 712 | 606 | ||
| 713 | /* | 607 | if (clock > old) { |
| 714 | * We don't have to take the xtime lock because the cpu | ||
| 715 | * executing etr_adjust_time is running disabled in | ||
| 716 | * tasklet context and all other cpus are looping in | ||
| 717 | * etr_sync_cpu_start. | ||
| 718 | */ | ||
| 719 | if (clock > xtime_cc) { | ||
| 720 | /* It is later than we thought. */ | 608 | /* It is later than we thought. */ |
| 721 | delta = ticks = clock - xtime_cc; | 609 | delta = ticks = clock - old; |
| 722 | delta = ticks = (delta < delay) ? 0 : delta - delay; | 610 | delta = ticks = (delta < delay) ? 0 : delta - delay; |
| 723 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | 611 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); |
| 724 | init_timer_cc = init_timer_cc + delta; | ||
| 725 | jiffies_timer_cc = jiffies_timer_cc + delta; | ||
| 726 | xtime_cc = xtime_cc + delta; | ||
| 727 | adjust.offset = ticks * (1000000 / HZ); | 612 | adjust.offset = ticks * (1000000 / HZ); |
| 728 | } else { | 613 | } else { |
| 729 | /* It is earlier than we thought. */ | 614 | /* It is earlier than we thought. */ |
| 730 | delta = ticks = xtime_cc - clock; | 615 | delta = ticks = old - clock; |
| 731 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | 616 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); |
| 732 | init_timer_cc = init_timer_cc - delta; | 617 | delta = -delta; |
| 733 | jiffies_timer_cc = jiffies_timer_cc - delta; | ||
| 734 | xtime_cc = xtime_cc - delta; | ||
| 735 | adjust.offset = -ticks * (1000000 / HZ); | 618 | adjust.offset = -ticks * (1000000 / HZ); |
| 736 | } | 619 | } |
| 620 | jiffies_timer_cc += delta; | ||
| 737 | if (adjust.offset != 0) { | 621 | if (adjust.offset != 0) { |
| 738 | printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", | 622 | printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", |
| 739 | adjust.offset); | 623 | adjust.offset); |
| 740 | adjust.modes = ADJ_OFFSET_SINGLESHOT; | 624 | adjust.modes = ADJ_OFFSET_SINGLESHOT; |
| 741 | do_adjtimex(&adjust); | 625 | do_adjtimex(&adjust); |
| 742 | } | 626 | } |
| 627 | return delta; | ||
| 743 | } | 628 | } |
| 744 | 629 | ||
| 630 | static struct { | ||
| 631 | int in_sync; | ||
| 632 | unsigned long long fixup_cc; | ||
| 633 | } etr_sync; | ||
| 634 | |||
| 745 | static void etr_sync_cpu_start(void *dummy) | 635 | static void etr_sync_cpu_start(void *dummy) |
| 746 | { | 636 | { |
| 747 | int *in_sync = dummy; | ||
| 748 | |||
| 749 | etr_enable_sync_clock(); | 637 | etr_enable_sync_clock(); |
| 750 | /* | 638 | /* |
| 751 | * This looks like a busy wait loop but it isn't. etr_sync_cpus | 639 | * This looks like a busy wait loop but it isn't. etr_sync_cpus |
| @@ -753,7 +641,7 @@ static void etr_sync_cpu_start(void *dummy) | |||
| 753 | * __udelay will stop the cpu on an enabled wait psw until the | 641 | * __udelay will stop the cpu on an enabled wait psw until the |
| 754 | * TOD is running again. | 642 | * TOD is running again. |
| 755 | */ | 643 | */ |
| 756 | while (*in_sync == 0) { | 644 | while (etr_sync.in_sync == 0) { |
| 757 | __udelay(1); | 645 | __udelay(1); |
| 758 | /* | 646 | /* |
| 759 | * A different cpu changes *in_sync. Therefore use | 647 | * A different cpu changes *in_sync. Therefore use |
| @@ -761,14 +649,14 @@ static void etr_sync_cpu_start(void *dummy) | |||
| 761 | */ | 649 | */ |
| 762 | barrier(); | 650 | barrier(); |
| 763 | } | 651 | } |
| 764 | if (*in_sync != 1) | 652 | if (etr_sync.in_sync != 1) |
| 765 | /* Didn't work. Clear per-cpu in sync bit again. */ | 653 | /* Didn't work. Clear per-cpu in sync bit again. */ |
| 766 | etr_disable_sync_clock(NULL); | 654 | etr_disable_sync_clock(NULL); |
| 767 | /* | 655 | /* |
| 768 | * This round of TOD syncing is done. Set the clock comparator | 656 | * This round of TOD syncing is done. Set the clock comparator |
| 769 | * to the next tick and let the processor continue. | 657 | * to the next tick and let the processor continue. |
| 770 | */ | 658 | */ |
| 771 | setup_jiffy_timer(); | 659 | fixup_clock_comparator(etr_sync.fixup_cc); |
| 772 | } | 660 | } |
| 773 | 661 | ||
| 774 | static void etr_sync_cpu_end(void *dummy) | 662 | static void etr_sync_cpu_end(void *dummy) |
| @@ -783,8 +671,8 @@ static void etr_sync_cpu_end(void *dummy) | |||
| 783 | static int etr_sync_clock(struct etr_aib *aib, int port) | 671 | static int etr_sync_clock(struct etr_aib *aib, int port) |
| 784 | { | 672 | { |
| 785 | struct etr_aib *sync_port; | 673 | struct etr_aib *sync_port; |
| 786 | unsigned long long clock, delay; | 674 | unsigned long long clock, old_clock, delay, delta; |
| 787 | int in_sync, follows; | 675 | int follows; |
| 788 | int rc; | 676 | int rc; |
| 789 | 677 | ||
| 790 | /* Check if the current aib is adjacent to the sync port aib. */ | 678 | /* Check if the current aib is adjacent to the sync port aib. */ |
| @@ -799,9 +687,9 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
| 799 | * successfully synced the clock. smp_call_function will | 687 | * successfully synced the clock. smp_call_function will |
| 800 | * return after all other cpus are in etr_sync_cpu_start. | 688 | * return after all other cpus are in etr_sync_cpu_start. |
| 801 | */ | 689 | */ |
| 802 | in_sync = 0; | 690 | memset(&etr_sync, 0, sizeof(etr_sync)); |
| 803 | preempt_disable(); | 691 | preempt_disable(); |
| 804 | smp_call_function(etr_sync_cpu_start,&in_sync,0,0); | 692 | smp_call_function(etr_sync_cpu_start, NULL, 0, 0); |
| 805 | local_irq_disable(); | 693 | local_irq_disable(); |
| 806 | etr_enable_sync_clock(); | 694 | etr_enable_sync_clock(); |
| 807 | 695 | ||
| @@ -809,6 +697,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
| 809 | __ctl_set_bit(14, 21); | 697 | __ctl_set_bit(14, 21); |
| 810 | __ctl_set_bit(0, 29); | 698 | __ctl_set_bit(0, 29); |
| 811 | clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; | 699 | clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; |
| 700 | old_clock = get_clock(); | ||
| 812 | if (set_clock(clock) == 0) { | 701 | if (set_clock(clock) == 0) { |
| 813 | __udelay(1); /* Wait for the clock to start. */ | 702 | __udelay(1); /* Wait for the clock to start. */ |
| 814 | __ctl_clear_bit(0, 29); | 703 | __ctl_clear_bit(0, 29); |
| @@ -817,16 +706,17 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
| 817 | /* Adjust Linux timing variables. */ | 706 | /* Adjust Linux timing variables. */ |
| 818 | delay = (unsigned long long) | 707 | delay = (unsigned long long) |
| 819 | (aib->edf2.etv - sync_port->edf2.etv) << 32; | 708 | (aib->edf2.etv - sync_port->edf2.etv) << 32; |
| 820 | etr_adjust_time(clock, delay); | 709 | delta = etr_adjust_time(old_clock, clock, delay); |
| 821 | setup_jiffy_timer(); | 710 | etr_sync.fixup_cc = delta; |
| 711 | fixup_clock_comparator(delta); | ||
| 822 | /* Verify that the clock is properly set. */ | 712 | /* Verify that the clock is properly set. */ |
| 823 | if (!etr_aib_follows(sync_port, aib, port)) { | 713 | if (!etr_aib_follows(sync_port, aib, port)) { |
| 824 | /* Didn't work. */ | 714 | /* Didn't work. */ |
| 825 | etr_disable_sync_clock(NULL); | 715 | etr_disable_sync_clock(NULL); |
| 826 | in_sync = -EAGAIN; | 716 | etr_sync.in_sync = -EAGAIN; |
| 827 | rc = -EAGAIN; | 717 | rc = -EAGAIN; |
| 828 | } else { | 718 | } else { |
| 829 | in_sync = 1; | 719 | etr_sync.in_sync = 1; |
| 830 | rc = 0; | 720 | rc = 0; |
| 831 | } | 721 | } |
| 832 | } else { | 722 | } else { |
| @@ -834,7 +724,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
| 834 | __ctl_clear_bit(0, 29); | 724 | __ctl_clear_bit(0, 29); |
| 835 | __ctl_clear_bit(14, 21); | 725 | __ctl_clear_bit(14, 21); |
| 836 | etr_disable_sync_clock(NULL); | 726 | etr_disable_sync_clock(NULL); |
| 837 | in_sync = -EAGAIN; | 727 | etr_sync.in_sync = -EAGAIN; |
| 838 | rc = -EAGAIN; | 728 | rc = -EAGAIN; |
| 839 | } | 729 | } |
| 840 | local_irq_enable(); | 730 | local_irq_enable(); |
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 70f2a862b670..eae21a8ac72d 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c | |||
| @@ -34,7 +34,7 @@ void __delay(unsigned long loops) | |||
| 34 | */ | 34 | */ |
| 35 | void __udelay(unsigned long usecs) | 35 | void __udelay(unsigned long usecs) |
| 36 | { | 36 | { |
| 37 | u64 end, time, jiffy_timer = 0; | 37 | u64 end, time, old_cc = 0; |
| 38 | unsigned long flags, cr0, mask, dummy; | 38 | unsigned long flags, cr0, mask, dummy; |
| 39 | int irq_context; | 39 | int irq_context; |
| 40 | 40 | ||
| @@ -43,8 +43,8 @@ void __udelay(unsigned long usecs) | |||
| 43 | local_bh_disable(); | 43 | local_bh_disable(); |
| 44 | local_irq_save(flags); | 44 | local_irq_save(flags); |
| 45 | if (raw_irqs_disabled_flags(flags)) { | 45 | if (raw_irqs_disabled_flags(flags)) { |
| 46 | jiffy_timer = S390_lowcore.jiffy_timer; | 46 | old_cc = S390_lowcore.clock_comparator; |
| 47 | S390_lowcore.jiffy_timer = -1ULL - (4096 << 12); | 47 | S390_lowcore.clock_comparator = -1ULL; |
| 48 | __ctl_store(cr0, 0, 0); | 48 | __ctl_store(cr0, 0, 0); |
| 49 | dummy = (cr0 & 0xffff00e0) | 0x00000800; | 49 | dummy = (cr0 & 0xffff00e0) | 0x00000800; |
| 50 | __ctl_load(dummy , 0, 0); | 50 | __ctl_load(dummy , 0, 0); |
| @@ -55,8 +55,8 @@ void __udelay(unsigned long usecs) | |||
| 55 | 55 | ||
| 56 | end = get_clock() + ((u64) usecs << 12); | 56 | end = get_clock() + ((u64) usecs << 12); |
| 57 | do { | 57 | do { |
| 58 | time = end < S390_lowcore.jiffy_timer ? | 58 | time = end < S390_lowcore.clock_comparator ? |
| 59 | end : S390_lowcore.jiffy_timer; | 59 | end : S390_lowcore.clock_comparator; |
| 60 | set_clock_comparator(time); | 60 | set_clock_comparator(time); |
| 61 | trace_hardirqs_on(); | 61 | trace_hardirqs_on(); |
| 62 | __load_psw_mask(mask); | 62 | __load_psw_mask(mask); |
| @@ -65,10 +65,10 @@ void __udelay(unsigned long usecs) | |||
| 65 | 65 | ||
| 66 | if (raw_irqs_disabled_flags(flags)) { | 66 | if (raw_irqs_disabled_flags(flags)) { |
| 67 | __ctl_load(cr0, 0, 0); | 67 | __ctl_load(cr0, 0, 0); |
| 68 | S390_lowcore.jiffy_timer = jiffy_timer; | 68 | S390_lowcore.clock_comparator = old_cc; |
| 69 | } | 69 | } |
| 70 | if (!irq_context) | 70 | if (!irq_context) |
| 71 | _local_bh_enable(); | 71 | _local_bh_enable(); |
| 72 | set_clock_comparator(S390_lowcore.jiffy_timer); | 72 | set_clock_comparator(S390_lowcore.clock_comparator); |
| 73 | local_irq_restore(flags); | 73 | local_irq_restore(flags); |
| 74 | } | 74 | } |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 6dbe9488d3f9..41db3cc653f5 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
| @@ -651,12 +651,9 @@ do_IRQ (struct pt_regs *regs) | |||
| 651 | old_regs = set_irq_regs(regs); | 651 | old_regs = set_irq_regs(regs); |
| 652 | irq_enter(); | 652 | irq_enter(); |
| 653 | s390_idle_check(); | 653 | s390_idle_check(); |
| 654 | if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) | 654 | if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) |
| 655 | /** | 655 | /* Serve timer interrupts first. */ |
| 656 | * Make sure that the i/o interrupt did not "overtake" | 656 | clock_comparator_work(); |
| 657 | * the last HZ timer interrupt. | ||
| 658 | */ | ||
| 659 | account_ticks(S390_lowcore.int_clock); | ||
| 660 | /* | 657 | /* |
| 661 | * Get interrupt information from lowcore | 658 | * Get interrupt information from lowcore |
| 662 | */ | 659 | */ |
diff --git a/include/asm-s390/hardirq.h b/include/asm-s390/hardirq.h index 31beb18cb3d1..4b7cb964ff35 100644 --- a/include/asm-s390/hardirq.h +++ b/include/asm-s390/hardirq.h | |||
| @@ -32,6 +32,6 @@ typedef struct { | |||
| 32 | 32 | ||
| 33 | #define HARDIRQ_BITS 8 | 33 | #define HARDIRQ_BITS 8 |
| 34 | 34 | ||
| 35 | extern void account_ticks(u64 time); | 35 | void clock_comparator_work(void); |
| 36 | 36 | ||
| 37 | #endif /* __ASM_HARDIRQ_H */ | 37 | #endif /* __ASM_HARDIRQ_H */ |
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h index 801a6fd35b5b..6baa51b8683b 100644 --- a/include/asm-s390/lowcore.h +++ b/include/asm-s390/lowcore.h | |||
| @@ -80,7 +80,6 @@ | |||
| 80 | #define __LC_CPUID 0xC60 | 80 | #define __LC_CPUID 0xC60 |
| 81 | #define __LC_CPUADDR 0xC68 | 81 | #define __LC_CPUADDR 0xC68 |
| 82 | #define __LC_IPLDEV 0xC7C | 82 | #define __LC_IPLDEV 0xC7C |
| 83 | #define __LC_JIFFY_TIMER 0xC80 | ||
| 84 | #define __LC_CURRENT 0xC90 | 83 | #define __LC_CURRENT 0xC90 |
| 85 | #define __LC_INT_CLOCK 0xC98 | 84 | #define __LC_INT_CLOCK 0xC98 |
| 86 | #else /* __s390x__ */ | 85 | #else /* __s390x__ */ |
| @@ -103,7 +102,6 @@ | |||
| 103 | #define __LC_CPUID 0xD80 | 102 | #define __LC_CPUID 0xD80 |
| 104 | #define __LC_CPUADDR 0xD88 | 103 | #define __LC_CPUADDR 0xD88 |
| 105 | #define __LC_IPLDEV 0xDB8 | 104 | #define __LC_IPLDEV 0xDB8 |
| 106 | #define __LC_JIFFY_TIMER 0xDC0 | ||
| 107 | #define __LC_CURRENT 0xDD8 | 105 | #define __LC_CURRENT 0xDD8 |
| 108 | #define __LC_INT_CLOCK 0xDE8 | 106 | #define __LC_INT_CLOCK 0xDE8 |
| 109 | #endif /* __s390x__ */ | 107 | #endif /* __s390x__ */ |
| @@ -276,7 +274,7 @@ struct _lowcore | |||
| 276 | /* entry.S sensitive area end */ | 274 | /* entry.S sensitive area end */ |
| 277 | 275 | ||
| 278 | /* SMP info area: defined by DJB */ | 276 | /* SMP info area: defined by DJB */ |
| 279 | __u64 jiffy_timer; /* 0xc80 */ | 277 | __u64 clock_comparator; /* 0xc80 */ |
| 280 | __u32 ext_call_fast; /* 0xc88 */ | 278 | __u32 ext_call_fast; /* 0xc88 */ |
| 281 | __u32 percpu_offset; /* 0xc8c */ | 279 | __u32 percpu_offset; /* 0xc8c */ |
| 282 | __u32 current_task; /* 0xc90 */ | 280 | __u32 current_task; /* 0xc90 */ |
| @@ -368,7 +366,7 @@ struct _lowcore | |||
| 368 | /* entry.S sensitive area end */ | 366 | /* entry.S sensitive area end */ |
| 369 | 367 | ||
| 370 | /* SMP info area: defined by DJB */ | 368 | /* SMP info area: defined by DJB */ |
| 371 | __u64 jiffy_timer; /* 0xdc0 */ | 369 | __u64 clock_comparator; /* 0xdc0 */ |
| 372 | __u64 ext_call_fast; /* 0xdc8 */ | 370 | __u64 ext_call_fast; /* 0xdc8 */ |
| 373 | __u64 percpu_offset; /* 0xdd0 */ | 371 | __u64 percpu_offset; /* 0xdd0 */ |
| 374 | __u64 current_task; /* 0xdd8 */ | 372 | __u64 current_task; /* 0xdd8 */ |
