aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/um/Kconfig12
-rw-r--r--arch/um/configs/i386_defconfig1
-rw-r--r--arch/um/configs/x86_64_defconfig1
-rw-r--r--arch/um/drivers/chan_kern.c52
-rw-r--r--arch/um/drivers/ssl.c1
-rw-r--r--arch/um/drivers/ssl.h13
-rw-r--r--arch/um/include/asm/mmu_context.h2
-rw-r--r--arch/um/include/shared/os.h10
-rw-r--r--arch/um/include/shared/timer-internal.h48
-rw-r--r--arch/um/kernel/irq.c9
-rw-r--r--arch/um/kernel/process.c42
-rw-r--r--arch/um/kernel/skas/Makefile2
-rw-r--r--arch/um/kernel/skas/syscall.c11
-rw-r--r--arch/um/kernel/time.c131
-rw-r--r--arch/um/os-Linux/time.c127
15 files changed, 320 insertions, 142 deletions
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 6b6eb938fcc1..3c3adfc486f2 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -184,6 +184,18 @@ config SECCOMP
184 184
185 If unsure, say Y. 185 If unsure, say Y.
186 186
187config UML_TIME_TRAVEL_SUPPORT
188 bool
189 prompt "Support time-travel mode (e.g. for test execution)"
190 help
191 Enable this option to support time travel inside the UML instance.
192
193 After enabling this option, two modes are accessible at runtime
194 (selected by the kernel command line), see the kernel's command-
195 line help for more details.
196
197 It is safe to say Y, but you probably don't need this.
198
187endmenu 199endmenu
188 200
189source "arch/um/drivers/Kconfig" 201source "arch/um/drivers/Kconfig"
diff --git a/arch/um/configs/i386_defconfig b/arch/um/configs/i386_defconfig
index 8f114e3b0a7a..73e98bb57bf5 100644
--- a/arch/um/configs/i386_defconfig
+++ b/arch/um/configs/i386_defconfig
@@ -36,7 +36,6 @@ CONFIG_XTERM_CHAN=y
36CONFIG_CON_CHAN="pts" 36CONFIG_CON_CHAN="pts"
37CONFIG_SSL_CHAN="pts" 37CONFIG_SSL_CHAN="pts"
38CONFIG_UML_SOUND=m 38CONFIG_UML_SOUND=m
39CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
40CONFIG_DEVTMPFS=y 39CONFIG_DEVTMPFS=y
41CONFIG_DEVTMPFS_MOUNT=y 40CONFIG_DEVTMPFS_MOUNT=y
42CONFIG_BLK_DEV_UBD=y 41CONFIG_BLK_DEV_UBD=y
diff --git a/arch/um/configs/x86_64_defconfig b/arch/um/configs/x86_64_defconfig
index 5d0875fc0db2..3281d7600225 100644
--- a/arch/um/configs/x86_64_defconfig
+++ b/arch/um/configs/x86_64_defconfig
@@ -34,7 +34,6 @@ CONFIG_XTERM_CHAN=y
34CONFIG_CON_CHAN="pts" 34CONFIG_CON_CHAN="pts"
35CONFIG_SSL_CHAN="pts" 35CONFIG_SSL_CHAN="pts"
36CONFIG_UML_SOUND=m 36CONFIG_UML_SOUND=m
37CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
38CONFIG_DEVTMPFS=y 37CONFIG_DEVTMPFS=y
39CONFIG_DEVTMPFS_MOUNT=y 38CONFIG_DEVTMPFS_MOUNT=y
40CONFIG_BLK_DEV_UBD=y 39CONFIG_BLK_DEV_UBD=y
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index a4e64edb8f38..749d2bf59599 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -171,19 +171,55 @@ int enable_chan(struct line *line)
171 return err; 171 return err;
172} 172}
173 173
174/* Items are added in IRQ context, when free_irq can't be called, and
175 * removed in process context, when it can.
176 * This handles interrupt sources which disappear, and which need to
177 * be permanently disabled. This is discovered in IRQ context, but
178 * the freeing of the IRQ must be done later.
179 */
180static DEFINE_SPINLOCK(irqs_to_free_lock);
181static LIST_HEAD(irqs_to_free);
182
183void free_irqs(void)
184{
185 struct chan *chan;
186 LIST_HEAD(list);
187 struct list_head *ele;
188 unsigned long flags;
189
190 spin_lock_irqsave(&irqs_to_free_lock, flags);
191 list_splice_init(&irqs_to_free, &list);
192 spin_unlock_irqrestore(&irqs_to_free_lock, flags);
193
194 list_for_each(ele, &list) {
195 chan = list_entry(ele, struct chan, free_list);
196
197 if (chan->input && chan->enabled)
198 um_free_irq(chan->line->driver->read_irq, chan);
199 if (chan->output && chan->enabled)
200 um_free_irq(chan->line->driver->write_irq, chan);
201 chan->enabled = 0;
202 }
203}
204
174static void close_one_chan(struct chan *chan, int delay_free_irq) 205static void close_one_chan(struct chan *chan, int delay_free_irq)
175{ 206{
207 unsigned long flags;
208
176 if (!chan->opened) 209 if (!chan->opened)
177 return; 210 return;
178 211
179 /* we can safely call free now - it will be marked 212 if (delay_free_irq) {
180 * as free and freed once the IRQ stopped processing 213 spin_lock_irqsave(&irqs_to_free_lock, flags);
181 */ 214 list_add(&chan->free_list, &irqs_to_free);
182 if (chan->input && chan->enabled) 215 spin_unlock_irqrestore(&irqs_to_free_lock, flags);
183 um_free_irq(chan->line->driver->read_irq, chan); 216 } else {
184 if (chan->output && chan->enabled) 217 if (chan->input && chan->enabled)
185 um_free_irq(chan->line->driver->write_irq, chan); 218 um_free_irq(chan->line->driver->read_irq, chan);
186 chan->enabled = 0; 219 if (chan->output && chan->enabled)
220 um_free_irq(chan->line->driver->write_irq, chan);
221 chan->enabled = 0;
222 }
187 if (chan->ops->close != NULL) 223 if (chan->ops->close != NULL)
188 (*chan->ops->close)(chan->fd, chan->data); 224 (*chan->ops->close)(chan->fd, chan->data);
189 225
diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c
index b8d14fa52059..7ae407d5337e 100644
--- a/arch/um/drivers/ssl.c
+++ b/arch/um/drivers/ssl.c
@@ -12,7 +12,6 @@
12#include <linux/console.h> 12#include <linux/console.h>
13#include <asm/termbits.h> 13#include <asm/termbits.h>
14#include <asm/irq.h> 14#include <asm/irq.h>
15#include "ssl.h"
16#include "chan.h" 15#include "chan.h"
17#include <init.h> 16#include <init.h>
18#include <irq_user.h> 17#include <irq_user.h>
diff --git a/arch/um/drivers/ssl.h b/arch/um/drivers/ssl.h
deleted file mode 100644
index 314d17725ce6..000000000000
--- a/arch/um/drivers/ssl.h
+++ /dev/null
@@ -1,13 +0,0 @@
1/*
2 * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SSL_H__
7#define __SSL_H__
8
9extern int ssl_read(int fd, int line);
10extern void ssl_receive_char(int line, char ch);
11
12#endif
13
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index 9f4b4bb78120..00cefd33afdd 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -52,7 +52,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
52 * when the new ->mm is used for the first time. 52 * when the new ->mm is used for the first time.
53 */ 53 */
54 __switch_mm(&new->context.id); 54 __switch_mm(&new->context.id);
55 down_write(&new->mmap_sem); 55 down_write_nested(&new->mmap_sem, 1);
56 uml_setup_stubs(new); 56 uml_setup_stubs(new);
57 up_write(&new->mmap_sem); 57 up_write(&new->mmap_sem);
58} 58}
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
index ebf23012a59b..4a62ac4251a5 100644
--- a/arch/um/include/shared/os.h
+++ b/arch/um/include/shared/os.h
@@ -250,15 +250,13 @@ extern void os_warn(const char *fmt, ...)
250 250
251/* time.c */ 251/* time.c */
252extern void os_idle_sleep(unsigned long long nsecs); 252extern void os_idle_sleep(unsigned long long nsecs);
253extern int os_timer_create(void* timer); 253extern int os_timer_create(void);
254extern int os_timer_set_interval(void* timer, void* its); 254extern int os_timer_set_interval(unsigned long long nsecs);
255extern int os_timer_one_shot(int ticks); 255extern int os_timer_one_shot(unsigned long long nsecs);
256extern long long os_timer_disable(void); 256extern void os_timer_disable(void);
257extern long os_timer_remain(void* timer);
258extern void uml_idle_timer(void); 257extern void uml_idle_timer(void);
259extern long long os_persistent_clock_emulation(void); 258extern long long os_persistent_clock_emulation(void);
260extern long long os_nsecs(void); 259extern long long os_nsecs(void);
261extern long long os_vnsecs(void);
262 260
263/* skas/mem.c */ 261/* skas/mem.c */
264extern long run_syscall_stub(struct mm_id * mm_idp, 262extern long run_syscall_stub(struct mm_id * mm_idp,
diff --git a/arch/um/include/shared/timer-internal.h b/arch/um/include/shared/timer-internal.h
index 03e6f217f807..8574338bf23b 100644
--- a/arch/um/include/shared/timer-internal.h
+++ b/arch/um/include/shared/timer-internal.h
@@ -10,4 +10,52 @@
10#define TIMER_MULTIPLIER 256 10#define TIMER_MULTIPLIER 256
11#define TIMER_MIN_DELTA 500 11#define TIMER_MIN_DELTA 500
12 12
13enum time_travel_mode {
14 TT_MODE_OFF,
15 TT_MODE_BASIC,
16 TT_MODE_INFCPU,
17};
18
19enum time_travel_timer_mode {
20 TT_TMR_DISABLED,
21 TT_TMR_ONESHOT,
22 TT_TMR_PERIODIC,
23};
24
25#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
26extern enum time_travel_mode time_travel_mode;
27extern unsigned long long time_travel_time;
28extern enum time_travel_timer_mode time_travel_timer_mode;
29extern unsigned long long time_travel_timer_expiry;
30extern unsigned long long time_travel_timer_interval;
31
32static inline void time_travel_set_time(unsigned long long ns)
33{
34 time_travel_time = ns;
35}
36
37static inline void time_travel_set_timer(enum time_travel_timer_mode mode,
38 unsigned long long expiry)
39{
40 time_travel_timer_mode = mode;
41 time_travel_timer_expiry = expiry;
42}
43#else
44#define time_travel_mode TT_MODE_OFF
45#define time_travel_time 0
46#define time_travel_timer_expiry 0
47#define time_travel_timer_interval 0
48
49static inline void time_travel_set_time(unsigned long long ns)
50{
51}
52
53static inline void time_travel_set_timer(enum time_travel_timer_mode mode,
54 unsigned long long expiry)
55{
56}
57
58#define time_travel_timer_mode TT_TMR_DISABLED
59#endif
60
13#endif 61#endif
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 598d7b3d9355..efde1f16c603 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -21,6 +21,8 @@
21#include <irq_user.h> 21#include <irq_user.h>
22 22
23 23
24extern void free_irqs(void);
25
24/* When epoll triggers we do not know why it did so 26/* When epoll triggers we do not know why it did so
25 * we can also have different IRQs for read and write. 27 * we can also have different IRQs for read and write.
26 * This is why we keep a small irq_fd array for each fd - 28 * This is why we keep a small irq_fd array for each fd -
@@ -100,6 +102,8 @@ void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
100 } 102 }
101 } 103 }
102 } 104 }
105
106 free_irqs();
103} 107}
104 108
105static int assign_epoll_events_to_irq(struct irq_entry *irq_entry) 109static int assign_epoll_events_to_irq(struct irq_entry *irq_entry)
@@ -380,10 +384,8 @@ EXPORT_SYMBOL(deactivate_fd);
380 */ 384 */
381int deactivate_all_fds(void) 385int deactivate_all_fds(void)
382{ 386{
383 unsigned long flags;
384 struct irq_entry *to_free; 387 struct irq_entry *to_free;
385 388
386 spin_lock_irqsave(&irq_lock, flags);
387 /* Stop IO. The IRQ loop has no lock so this is our 389 /* Stop IO. The IRQ loop has no lock so this is our
388 * only way of making sure we are safe to dispose 390 * only way of making sure we are safe to dispose
389 * of all IRQ handlers 391 * of all IRQ handlers
@@ -399,8 +401,7 @@ int deactivate_all_fds(void)
399 ); 401 );
400 to_free = to_free->next; 402 to_free = to_free->next;
401 } 403 }
402 garbage_collect_irq_entries(); 404 /* don't garbage collect - we can no longer call kfree() here */
403 spin_unlock_irqrestore(&irq_lock, flags);
404 os_close_epoll_fd(); 405 os_close_epoll_fd();
405 return 0; 406 return 0;
406} 407}
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 691b83b10649..67c0d1a860e9 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -203,10 +203,50 @@ void initial_thread_cb(void (*proc)(void *), void *arg)
203 kmalloc_ok = save_kmalloc_ok; 203 kmalloc_ok = save_kmalloc_ok;
204} 204}
205 205
206static void time_travel_sleep(unsigned long long duration)
207{
208 unsigned long long next = time_travel_time + duration;
209
210 if (time_travel_mode != TT_MODE_INFCPU)
211 os_timer_disable();
212
213 if (time_travel_timer_mode != TT_TMR_DISABLED ||
214 time_travel_timer_expiry < next) {
215 if (time_travel_timer_mode == TT_TMR_ONESHOT)
216 time_travel_set_timer(TT_TMR_DISABLED, 0);
217 /*
218 * time_travel_time will be adjusted in the timer
219 * IRQ handler so it works even when the signal
220 * comes from the OS timer
221 */
222 deliver_alarm();
223 } else {
224 time_travel_set_time(next);
225 }
226
227 if (time_travel_mode != TT_MODE_INFCPU) {
228 if (time_travel_timer_mode == TT_TMR_PERIODIC)
229 os_timer_set_interval(time_travel_timer_interval);
230 else if (time_travel_timer_mode == TT_TMR_ONESHOT)
231 os_timer_one_shot(time_travel_timer_expiry - next);
232 }
233}
234
235static void um_idle_sleep(void)
236{
237 unsigned long long duration = UM_NSEC_PER_SEC;
238
239 if (time_travel_mode != TT_MODE_OFF) {
240 time_travel_sleep(duration);
241 } else {
242 os_idle_sleep(duration);
243 }
244}
245
206void arch_cpu_idle(void) 246void arch_cpu_idle(void)
207{ 247{
208 cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); 248 cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
209 os_idle_sleep(UM_NSEC_PER_SEC); 249 um_idle_sleep();
210 local_irq_enable(); 250 local_irq_enable();
211} 251}
212 252
diff --git a/arch/um/kernel/skas/Makefile b/arch/um/kernel/skas/Makefile
index 0b76d8869c94..5bd3edfcfedf 100644
--- a/arch/um/kernel/skas/Makefile
+++ b/arch/um/kernel/skas/Makefile
@@ -12,4 +12,6 @@ obj-y := clone.o mmu.o process.o syscall.o uaccess.o
12CFLAGS_clone.o := $(CFLAGS_NO_HARDENING) 12CFLAGS_clone.o := $(CFLAGS_NO_HARDENING)
13UNPROFILE_OBJS := clone.o 13UNPROFILE_OBJS := clone.o
14 14
15KCOV_INSTRUMENT := n
16
15include arch/um/scripts/Makefile.rules 17include arch/um/scripts/Makefile.rules
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index b783ac87d98a..44bb10785075 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -10,12 +10,23 @@
10#include <sysdep/ptrace.h> 10#include <sysdep/ptrace.h>
11#include <sysdep/ptrace_user.h> 11#include <sysdep/ptrace_user.h>
12#include <sysdep/syscalls.h> 12#include <sysdep/syscalls.h>
13#include <shared/timer-internal.h>
13 14
14void handle_syscall(struct uml_pt_regs *r) 15void handle_syscall(struct uml_pt_regs *r)
15{ 16{
16 struct pt_regs *regs = container_of(r, struct pt_regs, regs); 17 struct pt_regs *regs = container_of(r, struct pt_regs, regs);
17 int syscall; 18 int syscall;
18 19
20 /*
21 * If we have infinite CPU resources, then make every syscall also a
22 * preemption point, since we don't have any other preemption in this
23 * case, and kernel threads would basically never run until userspace
24 * went to sleep, even if said userspace interacts with the kernel in
25 * various ways.
26 */
27 if (time_travel_mode == TT_MODE_INFCPU)
28 schedule();
29
19 /* Initialize the syscall number and default return value. */ 30 /* Initialize the syscall number and default return value. */
20 UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp); 31 UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp);
21 PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS); 32 PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS);
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 0c572a48158e..6a051b078359 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -19,11 +19,29 @@
19#include <kern_util.h> 19#include <kern_util.h>
20#include <os.h> 20#include <os.h>
21#include <timer-internal.h> 21#include <timer-internal.h>
22#include <shared/init.h>
23
24#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
25enum time_travel_mode time_travel_mode;
26unsigned long long time_travel_time;
27enum time_travel_timer_mode time_travel_timer_mode;
28unsigned long long time_travel_timer_expiry;
29unsigned long long time_travel_timer_interval;
30
31static bool time_travel_start_set;
32static unsigned long long time_travel_start;
33#else
34#define time_travel_start_set 0
35#define time_travel_start 0
36#endif
22 37
23void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) 38void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
24{ 39{
25 unsigned long flags; 40 unsigned long flags;
26 41
42 if (time_travel_mode != TT_MODE_OFF)
43 time_travel_set_time(time_travel_timer_expiry);
44
27 local_irq_save(flags); 45 local_irq_save(flags);
28 do_IRQ(TIMER_IRQ, regs); 46 do_IRQ(TIMER_IRQ, regs);
29 local_irq_restore(flags); 47 local_irq_restore(flags);
@@ -31,26 +49,47 @@ void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
31 49
32static int itimer_shutdown(struct clock_event_device *evt) 50static int itimer_shutdown(struct clock_event_device *evt)
33{ 51{
34 os_timer_disable(); 52 if (time_travel_mode != TT_MODE_OFF)
53 time_travel_set_timer(TT_TMR_DISABLED, 0);
54
55 if (time_travel_mode != TT_MODE_INFCPU)
56 os_timer_disable();
57
35 return 0; 58 return 0;
36} 59}
37 60
38static int itimer_set_periodic(struct clock_event_device *evt) 61static int itimer_set_periodic(struct clock_event_device *evt)
39{ 62{
40 os_timer_set_interval(NULL, NULL); 63 unsigned long long interval = NSEC_PER_SEC / HZ;
64
65 if (time_travel_mode != TT_MODE_OFF)
66 time_travel_set_timer(TT_TMR_PERIODIC,
67 time_travel_time + interval);
68
69 if (time_travel_mode != TT_MODE_INFCPU)
70 os_timer_set_interval(interval);
71
41 return 0; 72 return 0;
42} 73}
43 74
44static int itimer_next_event(unsigned long delta, 75static int itimer_next_event(unsigned long delta,
45 struct clock_event_device *evt) 76 struct clock_event_device *evt)
46{ 77{
47 return os_timer_one_shot(delta); 78 delta += 1;
79
80 if (time_travel_mode != TT_MODE_OFF)
81 time_travel_set_timer(TT_TMR_ONESHOT,
82 time_travel_time + delta);
83
84 if (time_travel_mode != TT_MODE_INFCPU)
85 return os_timer_one_shot(delta);
86
87 return 0;
48} 88}
49 89
50static int itimer_one_shot(struct clock_event_device *evt) 90static int itimer_one_shot(struct clock_event_device *evt)
51{ 91{
52 os_timer_one_shot(1); 92 return itimer_next_event(0, evt);
53 return 0;
54} 93}
55 94
56static struct clock_event_device timer_clockevent = { 95static struct clock_event_device timer_clockevent = {
@@ -87,6 +126,17 @@ static irqreturn_t um_timer(int irq, void *dev)
87 126
88static u64 timer_read(struct clocksource *cs) 127static u64 timer_read(struct clocksource *cs)
89{ 128{
129 if (time_travel_mode != TT_MODE_OFF) {
130 /*
131 * We make reading the timer cost a bit so that we don't get
132 * stuck in loops that expect time to move more than the
133 * exact requested sleep amount, e.g. python's socket server,
134 * see https://bugs.python.org/issue37026.
135 */
136 time_travel_set_time(time_travel_time + TIMER_MULTIPLIER);
137 return time_travel_time / TIMER_MULTIPLIER;
138 }
139
90 return os_nsecs() / TIMER_MULTIPLIER; 140 return os_nsecs() / TIMER_MULTIPLIER;
91} 141}
92 142
@@ -107,7 +157,7 @@ static void __init um_timer_setup(void)
107 printk(KERN_ERR "register_timer : request_irq failed - " 157 printk(KERN_ERR "register_timer : request_irq failed - "
108 "errno = %d\n", -err); 158 "errno = %d\n", -err);
109 159
110 err = os_timer_create(NULL); 160 err = os_timer_create();
111 if (err != 0) { 161 if (err != 0) {
112 printk(KERN_ERR "creation of timer failed - errno = %d\n", -err); 162 printk(KERN_ERR "creation of timer failed - errno = %d\n", -err);
113 return; 163 return;
@@ -123,7 +173,12 @@ static void __init um_timer_setup(void)
123 173
124void read_persistent_clock64(struct timespec64 *ts) 174void read_persistent_clock64(struct timespec64 *ts)
125{ 175{
126 long long nsecs = os_persistent_clock_emulation(); 176 long long nsecs;
177
178 if (time_travel_start_set)
179 nsecs = time_travel_start + time_travel_time;
180 else
181 nsecs = os_persistent_clock_emulation();
127 182
128 set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC, 183 set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC,
129 nsecs % NSEC_PER_SEC); 184 nsecs % NSEC_PER_SEC);
@@ -134,3 +189,65 @@ void __init time_init(void)
134 timer_set_signal_handler(); 189 timer_set_signal_handler();
135 late_time_init = um_timer_setup; 190 late_time_init = um_timer_setup;
136} 191}
192
193#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
194unsigned long calibrate_delay_is_known(void)
195{
196 if (time_travel_mode == TT_MODE_INFCPU)
197 return 1;
198 return 0;
199}
200
201int setup_time_travel(char *str)
202{
203 if (strcmp(str, "=inf-cpu") == 0) {
204 time_travel_mode = TT_MODE_INFCPU;
205 timer_clockevent.name = "time-travel-timer-infcpu";
206 timer_clocksource.name = "time-travel-clock";
207 return 1;
208 }
209
210 if (!*str) {
211 time_travel_mode = TT_MODE_BASIC;
212 timer_clockevent.name = "time-travel-timer";
213 timer_clocksource.name = "time-travel-clock";
214 return 1;
215 }
216
217 return -EINVAL;
218}
219
220__setup("time-travel", setup_time_travel);
221__uml_help(setup_time_travel,
222"time-travel\n"
223"This option just enables basic time travel mode, in which the clock/timers\n"
224"inside the UML instance skip forward when there's nothing to do, rather than\n"
225"waiting for real time to elapse. However, instance CPU speed is limited by\n"
226"the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n"
227"clock (but quicker when there's nothing to do).\n"
228"\n"
229"time-travel=inf-cpu\n"
230"This enables time travel mode with infinite processing power, in which there\n"
231"are no wall clock timers, and any CPU processing happens - as seen from the\n"
232"guest - instantly. This can be useful for accurate simulation regardless of\n"
233"debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
234"easily lead to getting stuck (e.g. if anything in the system busy loops).\n");
235
236int setup_time_travel_start(char *str)
237{
238 int err;
239
240 err = kstrtoull(str, 0, &time_travel_start);
241 if (err)
242 return err;
243
244 time_travel_start_set = 1;
245 return 1;
246}
247
248__setup("time-travel-start", setup_time_travel_start);
249__uml_help(setup_time_travel_start,
250"time-travel-start=<seconds>\n"
251"Configure the UML instance's wall clock to start at this value rather than\n"
252"the host's wall clock at the time of UML boot.\n");
253#endif
diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c
index 0e39b9978729..6d94ff52362c 100644
--- a/arch/um/os-Linux/time.c
+++ b/arch/um/os-Linux/time.c
@@ -26,11 +26,11 @@ static inline long long timeval_to_ns(const struct timeval *tv)
26 26
27static inline long long timespec_to_ns(const struct timespec *ts) 27static inline long long timespec_to_ns(const struct timespec *ts)
28{ 28{
29 return ((long long) ts->tv_sec * UM_NSEC_PER_SEC) + 29 return ((long long) ts->tv_sec * UM_NSEC_PER_SEC) + ts->tv_nsec;
30 ts->tv_nsec;
31} 30}
32 31
33long long os_persistent_clock_emulation (void) { 32long long os_persistent_clock_emulation(void)
33{
34 struct timespec realtime_tp; 34 struct timespec realtime_tp;
35 35
36 clock_gettime(CLOCK_REALTIME, &realtime_tp); 36 clock_gettime(CLOCK_REALTIME, &realtime_tp);
@@ -40,94 +40,41 @@ long long os_persistent_clock_emulation (void) {
40/** 40/**
41 * os_timer_create() - create an new posix (interval) timer 41 * os_timer_create() - create an new posix (interval) timer
42 */ 42 */
43int os_timer_create(void* timer) { 43int os_timer_create(void)
44 44{
45 timer_t* t = timer; 45 timer_t *t = &event_high_res_timer;
46
47 if(t == NULL) {
48 t = &event_high_res_timer;
49 }
50 46
51 if (timer_create( 47 if (timer_create(CLOCK_MONOTONIC, NULL, t) == -1)
52 CLOCK_MONOTONIC,
53 NULL,
54 t) == -1) {
55 return -1; 48 return -1;
56 } 49
57 return 0; 50 return 0;
58} 51}
59 52
60int os_timer_set_interval(void* timer, void* i) 53int os_timer_set_interval(unsigned long long nsecs)
61{ 54{
62 struct itimerspec its; 55 struct itimerspec its;
63 unsigned long long nsec;
64 timer_t* t = timer;
65 struct itimerspec* its_in = i;
66
67 if(t == NULL) {
68 t = &event_high_res_timer;
69 }
70 56
71 nsec = UM_NSEC_PER_SEC / UM_HZ; 57 its.it_value.tv_sec = nsecs / UM_NSEC_PER_SEC;
58 its.it_value.tv_nsec = nsecs % UM_NSEC_PER_SEC;
72 59
73 if(its_in != NULL) { 60 its.it_interval.tv_sec = nsecs / UM_NSEC_PER_SEC;
74 its.it_value.tv_sec = its_in->it_value.tv_sec; 61 its.it_interval.tv_nsec = nsecs % UM_NSEC_PER_SEC;
75 its.it_value.tv_nsec = its_in->it_value.tv_nsec;
76 } else {
77 its.it_value.tv_sec = 0;
78 its.it_value.tv_nsec = nsec;
79 }
80 62
81 its.it_interval.tv_sec = 0; 63 if (timer_settime(event_high_res_timer, 0, &its, NULL) == -1)
82 its.it_interval.tv_nsec = nsec;
83
84 if(timer_settime(*t, 0, &its, NULL) == -1) {
85 return -errno; 64 return -errno;
86 }
87 65
88 return 0; 66 return 0;
89} 67}
90 68
91/** 69int os_timer_one_shot(unsigned long long nsecs)
92 * os_timer_remain() - returns the remaining nano seconds of the given interval
93 * timer
94 * Because this is the remaining time of an interval timer, which correspondends
95 * to HZ, this value can never be bigger than one second. Just
96 * the nanosecond part of the timer is returned.
97 * The returned time is relative to the start time of the interval timer.
98 * Return an negative value in an error case.
99 */
100long os_timer_remain(void* timer)
101{ 70{
102 struct itimerspec its; 71 struct itimerspec its = {
103 timer_t* t = timer; 72 .it_value.tv_sec = nsecs / UM_NSEC_PER_SEC,
104 73 .it_value.tv_nsec = nsecs % UM_NSEC_PER_SEC,
105 if(t == NULL) {
106 t = &event_high_res_timer;
107 }
108
109 if(timer_gettime(t, &its) == -1) {
110 return -errno;
111 }
112 74
113 return its.it_value.tv_nsec; 75 .it_interval.tv_sec = 0,
114} 76 .it_interval.tv_nsec = 0, // we cheat here
115 77 };
116int os_timer_one_shot(int ticks)
117{
118 struct itimerspec its;
119 unsigned long long nsec;
120 unsigned long sec;
121
122 nsec = (ticks + 1);
123 sec = nsec / UM_NSEC_PER_SEC;
124 nsec = nsec % UM_NSEC_PER_SEC;
125
126 its.it_value.tv_sec = nsec / UM_NSEC_PER_SEC;
127 its.it_value.tv_nsec = nsec;
128
129 its.it_interval.tv_sec = 0;
130 its.it_interval.tv_nsec = 0; // we cheat here
131 78
132 timer_settime(event_high_res_timer, 0, &its, NULL); 79 timer_settime(event_high_res_timer, 0, &its, NULL);
133 return 0; 80 return 0;
@@ -135,24 +82,13 @@ int os_timer_one_shot(int ticks)
135 82
136/** 83/**
137 * os_timer_disable() - disable the posix (interval) timer 84 * os_timer_disable() - disable the posix (interval) timer
138 * Returns the remaining interval timer time in nanoseconds
139 */ 85 */
140long long os_timer_disable(void) 86void os_timer_disable(void)
141{ 87{
142 struct itimerspec its; 88 struct itimerspec its;
143 89
144 memset(&its, 0, sizeof(struct itimerspec)); 90 memset(&its, 0, sizeof(struct itimerspec));
145 timer_settime(event_high_res_timer, 0, &its, &its); 91 timer_settime(event_high_res_timer, 0, &its, NULL);
146
147 return its.it_value.tv_sec * UM_NSEC_PER_SEC + its.it_value.tv_nsec;
148}
149
150long long os_vnsecs(void)
151{
152 struct timespec ts;
153
154 clock_gettime(CLOCK_PROCESS_CPUTIME_ID,&ts);
155 return timespec_to_ns(&ts);
156} 92}
157 93
158long long os_nsecs(void) 94long long os_nsecs(void)
@@ -169,21 +105,14 @@ long long os_nsecs(void)
169 */ 105 */
170void os_idle_sleep(unsigned long long nsecs) 106void os_idle_sleep(unsigned long long nsecs)
171{ 107{
172 struct timespec ts; 108 struct timespec ts = {
173 109 .tv_sec = nsecs / UM_NSEC_PER_SEC,
174 if (nsecs <= 0) { 110 .tv_nsec = nsecs % UM_NSEC_PER_SEC
175 return; 111 };
176 }
177
178 ts = ((struct timespec) {
179 .tv_sec = nsecs / UM_NSEC_PER_SEC,
180 .tv_nsec = nsecs % UM_NSEC_PER_SEC
181 });
182 112
183 /* 113 /*
184 * Relay the signal if clock_nanosleep is interrupted. 114 * Relay the signal if clock_nanosleep is interrupted.
185 */ 115 */
186 if (clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, NULL)) { 116 if (clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, NULL))
187 deliver_alarm(); 117 deliver_alarm();
188 }
189} 118}