diff options
author | Jeremy Erickson <jerickso@cs.unc.edu> | 2012-08-30 21:01:47 -0400 |
---|---|---|
committer | Jeremy Erickson <jerickso@cs.unc.edu> | 2012-08-30 21:01:47 -0400 |
commit | b1e1fea67bca3796d5f9133a92c300ec4fa93a4f (patch) | |
tree | 5cc1336e1fe1d6f93b1067e73e43381dd20db690 /arch/x86 | |
parent | f6f94e2ab1b33f0082ac22d71f66385a60d8157f (diff) |
Bjoern's Dissertation Code with Priority Donationwip-splitting-omlp-jerickso
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Kconfig | 8 | ||||
-rw-r--r-- | arch/x86/include/asm/entry_arch.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/feather_trace.h | 17 | ||||
-rw-r--r-- | arch/x86/include/asm/feather_trace_32.h | 79 | ||||
-rw-r--r-- | arch/x86/include/asm/feather_trace_64.h | 67 | ||||
-rw-r--r-- | arch/x86/include/asm/hw_irq.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/irq_vectors.h | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/unistd_32.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/unistd_64.h | 4 | ||||
-rw-r--r-- | arch/x86/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 17 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 2 | ||||
-rw-r--r-- | arch/x86/kernel/ft_event.c | 118 | ||||
-rw-r--r-- | arch/x86/kernel/irqinit.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/smp.c | 27 | ||||
-rw-r--r-- | arch/x86/kernel/syscall_table_32.S | 12 |
17 files changed, 374 insertions, 1 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cea0cd9a316f..5181ed3a211a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -2142,3 +2142,11 @@ source "crypto/Kconfig" | |||
2142 | source "arch/x86/kvm/Kconfig" | 2142 | source "arch/x86/kvm/Kconfig" |
2143 | 2143 | ||
2144 | source "lib/Kconfig" | 2144 | source "lib/Kconfig" |
2145 | |||
2146 | config ARCH_HAS_FEATHER_TRACE | ||
2147 | def_bool y | ||
2148 | |||
2149 | config ARCH_HAS_SEND_PULL_TIMERS | ||
2150 | def_bool y | ||
2151 | |||
2152 | source "litmus/Kconfig" | ||
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index 8e8ec663a98f..5d07dea2ebb8 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h | |||
@@ -13,6 +13,7 @@ | |||
13 | BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) | 13 | BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) |
14 | BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) | 14 | BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) |
15 | BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) | 15 | BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) |
16 | BUILD_INTERRUPT(pull_timers_interrupt,PULL_TIMERS_VECTOR) | ||
16 | BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) | 17 | BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) |
17 | BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) | 18 | BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) |
18 | 19 | ||
diff --git a/arch/x86/include/asm/feather_trace.h b/arch/x86/include/asm/feather_trace.h new file mode 100644 index 000000000000..4fd31633405d --- /dev/null +++ b/arch/x86/include/asm/feather_trace.h | |||
@@ -0,0 +1,17 @@ | |||
1 | #ifndef _ARCH_FEATHER_TRACE_H | ||
2 | #define _ARCH_FEATHER_TRACE_H | ||
3 | |||
4 | #include <asm/msr.h> | ||
5 | |||
6 | static inline unsigned long long ft_timestamp(void) | ||
7 | { | ||
8 | return __native_read_tsc(); | ||
9 | } | ||
10 | |||
11 | #ifdef CONFIG_X86_32 | ||
12 | #include "feather_trace_32.h" | ||
13 | #else | ||
14 | #include "feather_trace_64.h" | ||
15 | #endif | ||
16 | |||
17 | #endif | ||
diff --git a/arch/x86/include/asm/feather_trace_32.h b/arch/x86/include/asm/feather_trace_32.h new file mode 100644 index 000000000000..70202f90f169 --- /dev/null +++ b/arch/x86/include/asm/feather_trace_32.h | |||
@@ -0,0 +1,79 @@ | |||
1 | /* Do not directly include this file. Include feather_trace.h instead */ | ||
2 | |||
3 | #define feather_callback __attribute__((regparm(0))) | ||
4 | |||
5 | /* | ||
6 | * make the compiler reload any register that is not saved in | ||
7 | * a cdecl function call | ||
8 | */ | ||
9 | #define CLOBBER_LIST "memory", "cc", "eax", "ecx", "edx" | ||
10 | |||
11 | #define ft_event(id, callback) \ | ||
12 | __asm__ __volatile__( \ | ||
13 | "1: jmp 2f \n\t" \ | ||
14 | " call " #callback " \n\t" \ | ||
15 | ".section __event_table, \"aw\" \n\t" \ | ||
16 | ".long " #id ", 0, 1b, 2f \n\t" \ | ||
17 | ".previous \n\t" \ | ||
18 | "2: \n\t" \ | ||
19 | : : : CLOBBER_LIST) | ||
20 | |||
21 | #define ft_event0(id, callback) \ | ||
22 | __asm__ __volatile__( \ | ||
23 | "1: jmp 2f \n\t" \ | ||
24 | " subl $4, %%esp \n\t" \ | ||
25 | " movl $" #id ", (%%esp) \n\t" \ | ||
26 | " call " #callback " \n\t" \ | ||
27 | " addl $4, %%esp \n\t" \ | ||
28 | ".section __event_table, \"aw\" \n\t" \ | ||
29 | ".long " #id ", 0, 1b, 2f \n\t" \ | ||
30 | ".previous \n\t" \ | ||
31 | "2: \n\t" \ | ||
32 | : : : CLOBBER_LIST) | ||
33 | |||
34 | #define ft_event1(id, callback, param) \ | ||
35 | __asm__ __volatile__( \ | ||
36 | "1: jmp 2f \n\t" \ | ||
37 | " subl $8, %%esp \n\t" \ | ||
38 | " movl %0, 4(%%esp) \n\t" \ | ||
39 | " movl $" #id ", (%%esp) \n\t" \ | ||
40 | " call " #callback " \n\t" \ | ||
41 | " addl $8, %%esp \n\t" \ | ||
42 | ".section __event_table, \"aw\" \n\t" \ | ||
43 | ".long " #id ", 0, 1b, 2f \n\t" \ | ||
44 | ".previous \n\t" \ | ||
45 | "2: \n\t" \ | ||
46 | : : "r" (param) : CLOBBER_LIST) | ||
47 | |||
48 | #define ft_event2(id, callback, param, param2) \ | ||
49 | __asm__ __volatile__( \ | ||
50 | "1: jmp 2f \n\t" \ | ||
51 | " subl $12, %%esp \n\t" \ | ||
52 | " movl %1, 8(%%esp) \n\t" \ | ||
53 | " movl %0, 4(%%esp) \n\t" \ | ||
54 | " movl $" #id ", (%%esp) \n\t" \ | ||
55 | " call " #callback " \n\t" \ | ||
56 | " addl $12, %%esp \n\t" \ | ||
57 | ".section __event_table, \"aw\" \n\t" \ | ||
58 | ".long " #id ", 0, 1b, 2f \n\t" \ | ||
59 | ".previous \n\t" \ | ||
60 | "2: \n\t" \ | ||
61 | : : "r" (param), "r" (param2) : CLOBBER_LIST) | ||
62 | |||
63 | |||
64 | #define ft_event3(id, callback, p, p2, p3) \ | ||
65 | __asm__ __volatile__( \ | ||
66 | "1: jmp 2f \n\t" \ | ||
67 | " subl $16, %%esp \n\t" \ | ||
68 | " movl %2, 12(%%esp) \n\t" \ | ||
69 | " movl %1, 8(%%esp) \n\t" \ | ||
70 | " movl %0, 4(%%esp) \n\t" \ | ||
71 | " movl $" #id ", (%%esp) \n\t" \ | ||
72 | " call " #callback " \n\t" \ | ||
73 | " addl $16, %%esp \n\t" \ | ||
74 | ".section __event_table, \"aw\" \n\t" \ | ||
75 | ".long " #id ", 0, 1b, 2f \n\t" \ | ||
76 | ".previous \n\t" \ | ||
77 | "2: \n\t" \ | ||
78 | : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST) | ||
79 | |||
diff --git a/arch/x86/include/asm/feather_trace_64.h b/arch/x86/include/asm/feather_trace_64.h new file mode 100644 index 000000000000..54ac2aeb3a28 --- /dev/null +++ b/arch/x86/include/asm/feather_trace_64.h | |||
@@ -0,0 +1,67 @@ | |||
1 | /* Do not directly include this file. Include feather_trace.h instead */ | ||
2 | |||
3 | /* regparm is the default on x86_64 */ | ||
4 | #define feather_callback | ||
5 | |||
6 | # define _EVENT_TABLE(id,from,to) \ | ||
7 | ".section __event_table, \"aw\"\n\t" \ | ||
8 | ".balign 8\n\t" \ | ||
9 | ".quad " #id ", 0, " #from ", " #to " \n\t" \ | ||
10 | ".previous \n\t" | ||
11 | |||
12 | /* | ||
13 | * x86_64 callee only owns rbp, rbx, r12 -> r15 | ||
14 | * the called can freely modify the others | ||
15 | */ | ||
16 | #define CLOBBER_LIST "memory", "cc", "rdi", "rsi", "rdx", "rcx", \ | ||
17 | "r8", "r9", "r10", "r11", "rax" | ||
18 | |||
19 | #define ft_event(id, callback) \ | ||
20 | __asm__ __volatile__( \ | ||
21 | "1: jmp 2f \n\t" \ | ||
22 | " call " #callback " \n\t" \ | ||
23 | _EVENT_TABLE(id,1b,2f) \ | ||
24 | "2: \n\t" \ | ||
25 | : : : CLOBBER_LIST) | ||
26 | |||
27 | #define ft_event0(id, callback) \ | ||
28 | __asm__ __volatile__( \ | ||
29 | "1: jmp 2f \n\t" \ | ||
30 | " movq $" #id ", %%rdi \n\t" \ | ||
31 | " call " #callback " \n\t" \ | ||
32 | _EVENT_TABLE(id,1b,2f) \ | ||
33 | "2: \n\t" \ | ||
34 | : : : CLOBBER_LIST) | ||
35 | |||
36 | #define ft_event1(id, callback, param) \ | ||
37 | __asm__ __volatile__( \ | ||
38 | "1: jmp 2f \n\t" \ | ||
39 | " movq %0, %%rsi \n\t" \ | ||
40 | " movq $" #id ", %%rdi \n\t" \ | ||
41 | " call " #callback " \n\t" \ | ||
42 | _EVENT_TABLE(id,1b,2f) \ | ||
43 | "2: \n\t" \ | ||
44 | : : "r" (param) : CLOBBER_LIST) | ||
45 | |||
46 | #define ft_event2(id, callback, param, param2) \ | ||
47 | __asm__ __volatile__( \ | ||
48 | "1: jmp 2f \n\t" \ | ||
49 | " movq %1, %%rdx \n\t" \ | ||
50 | " movq %0, %%rsi \n\t" \ | ||
51 | " movq $" #id ", %%rdi \n\t" \ | ||
52 | " call " #callback " \n\t" \ | ||
53 | _EVENT_TABLE(id,1b,2f) \ | ||
54 | "2: \n\t" \ | ||
55 | : : "r" (param), "r" (param2) : CLOBBER_LIST) | ||
56 | |||
57 | #define ft_event3(id, callback, p, p2, p3) \ | ||
58 | __asm__ __volatile__( \ | ||
59 | "1: jmp 2f \n\t" \ | ||
60 | " movq %2, %%rcx \n\t" \ | ||
61 | " movq %1, %%rdx \n\t" \ | ||
62 | " movq %0, %%rsi \n\t" \ | ||
63 | " movq $" #id ", %%rdi \n\t" \ | ||
64 | " call " #callback " \n\t" \ | ||
65 | _EVENT_TABLE(id,1b,2f) \ | ||
66 | "2: \n\t" \ | ||
67 | : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST) | ||
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 46c0fe05f230..c17411503f28 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -53,6 +53,8 @@ extern void threshold_interrupt(void); | |||
53 | extern void call_function_interrupt(void); | 53 | extern void call_function_interrupt(void); |
54 | extern void call_function_single_interrupt(void); | 54 | extern void call_function_single_interrupt(void); |
55 | 55 | ||
56 | extern void pull_timers_interrupt(void); | ||
57 | |||
56 | /* IOAPIC */ | 58 | /* IOAPIC */ |
57 | #define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs)) | 59 | #define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs)) |
58 | extern unsigned long io_apic_irqs; | 60 | extern unsigned long io_apic_irqs; |
@@ -122,6 +124,7 @@ extern asmlinkage void smp_irq_move_cleanup_interrupt(void); | |||
122 | extern void smp_reschedule_interrupt(struct pt_regs *); | 124 | extern void smp_reschedule_interrupt(struct pt_regs *); |
123 | extern void smp_call_function_interrupt(struct pt_regs *); | 125 | extern void smp_call_function_interrupt(struct pt_regs *); |
124 | extern void smp_call_function_single_interrupt(struct pt_regs *); | 126 | extern void smp_call_function_single_interrupt(struct pt_regs *); |
127 | extern void smp_pull_timers_interrupt(struct pt_regs *); | ||
125 | #ifdef CONFIG_X86_32 | 128 | #ifdef CONFIG_X86_32 |
126 | extern void smp_invalidate_interrupt(struct pt_regs *); | 129 | extern void smp_invalidate_interrupt(struct pt_regs *); |
127 | #else | 130 | #else |
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index e2ca30092557..6143ebeeebfa 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
@@ -109,6 +109,11 @@ | |||
109 | #define LOCAL_TIMER_VECTOR 0xef | 109 | #define LOCAL_TIMER_VECTOR 0xef |
110 | 110 | ||
111 | /* | 111 | /* |
112 | * LITMUS^RT pull timers IRQ vector | ||
113 | */ | ||
114 | #define PULL_TIMERS_VECTOR 0xee | ||
115 | |||
116 | /* | ||
112 | * Generic system vector for platform specific use | 117 | * Generic system vector for platform specific use |
113 | */ | 118 | */ |
114 | #define X86_PLATFORM_IPI_VECTOR 0xed | 119 | #define X86_PLATFORM_IPI_VECTOR 0xed |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 325b7bdbebaa..ebaa04a8d3af 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -169,6 +169,10 @@ extern void print_cpu_info(struct cpuinfo_x86 *); | |||
169 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | 169 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); |
170 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | 170 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); |
171 | extern unsigned short num_cache_leaves; | 171 | extern unsigned short num_cache_leaves; |
172 | #ifdef CONFIG_SYSFS | ||
173 | extern int get_shared_cpu_map(cpumask_var_t mask, | ||
174 | unsigned int cpu, int index); | ||
175 | #endif | ||
172 | 176 | ||
173 | extern void detect_extended_topology(struct cpuinfo_x86 *c); | 177 | extern void detect_extended_topology(struct cpuinfo_x86 *c); |
174 | extern void detect_ht(struct cpuinfo_x86 *c); | 178 | extern void detect_ht(struct cpuinfo_x86 *c); |
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h index b766a5e8ba0e..b7ba19acd3f8 100644 --- a/arch/x86/include/asm/unistd_32.h +++ b/arch/x86/include/asm/unistd_32.h | |||
@@ -347,9 +347,13 @@ | |||
347 | #define __NR_fanotify_mark 339 | 347 | #define __NR_fanotify_mark 339 |
348 | #define __NR_prlimit64 340 | 348 | #define __NR_prlimit64 340 |
349 | 349 | ||
350 | #define __NR_LITMUS 341 | ||
351 | |||
352 | #include "litmus/unistd_32.h" | ||
353 | |||
350 | #ifdef __KERNEL__ | 354 | #ifdef __KERNEL__ |
351 | 355 | ||
352 | #define NR_syscalls 341 | 356 | #define NR_syscalls 341 + NR_litmus_syscalls |
353 | 357 | ||
354 | #define __ARCH_WANT_IPC_PARSE_VERSION | 358 | #define __ARCH_WANT_IPC_PARSE_VERSION |
355 | #define __ARCH_WANT_OLD_READDIR | 359 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index 363e9b8a715b..332bf3c9c84c 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h | |||
@@ -670,6 +670,10 @@ __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) | |||
670 | #define __NR_prlimit64 302 | 670 | #define __NR_prlimit64 302 |
671 | __SYSCALL(__NR_prlimit64, sys_prlimit64) | 671 | __SYSCALL(__NR_prlimit64, sys_prlimit64) |
672 | 672 | ||
673 | #define __NR_LITMUS 303 | ||
674 | |||
675 | #include "litmus/unistd_64.h" | ||
676 | |||
673 | #ifndef __NO_STUBS | 677 | #ifndef __NO_STUBS |
674 | #define __ARCH_WANT_OLD_READDIR | 678 | #define __ARCH_WANT_OLD_READDIR |
675 | #define __ARCH_WANT_OLD_STAT | 679 | #define __ARCH_WANT_OLD_STAT |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index fedf32a8c3ec..6890dbb9ac15 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -118,6 +118,8 @@ obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o | |||
118 | 118 | ||
119 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o | 119 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o |
120 | 120 | ||
121 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o | ||
122 | |||
121 | ### | 123 | ### |
122 | # 64 bit specific files | 124 | # 64 bit specific files |
123 | ifeq ($(CONFIG_X86_64),y) | 125 | ifeq ($(CONFIG_X86_64),y) |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 898c2f4eab88..3fec7d9bfd62 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -758,6 +758,23 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
758 | static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); | 758 | static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); |
759 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) | 759 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) |
760 | 760 | ||
761 | /* returns CPUs that share the index cache with cpu */ | ||
762 | int get_shared_cpu_map(cpumask_var_t mask, unsigned int cpu, int index) | ||
763 | { | ||
764 | int ret = 0; | ||
765 | struct _cpuid4_info *this_leaf; | ||
766 | |||
767 | if (index >= num_cache_leaves) { | ||
768 | index = num_cache_leaves - 1; | ||
769 | ret = index; | ||
770 | } | ||
771 | |||
772 | this_leaf = CPUID4_INFO_IDX(cpu,index); | ||
773 | cpumask_copy(mask, to_cpumask(this_leaf->shared_cpu_map)); | ||
774 | |||
775 | return ret; | ||
776 | } | ||
777 | |||
761 | #ifdef CONFIG_SMP | 778 | #ifdef CONFIG_SMP |
762 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 779 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) |
763 | { | 780 | { |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 17be5ec7cbba..115e8951e8c8 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -1016,6 +1016,8 @@ apicinterrupt CALL_FUNCTION_VECTOR \ | |||
1016 | call_function_interrupt smp_call_function_interrupt | 1016 | call_function_interrupt smp_call_function_interrupt |
1017 | apicinterrupt RESCHEDULE_VECTOR \ | 1017 | apicinterrupt RESCHEDULE_VECTOR \ |
1018 | reschedule_interrupt smp_reschedule_interrupt | 1018 | reschedule_interrupt smp_reschedule_interrupt |
1019 | apicinterrupt PULL_TIMERS_VECTOR \ | ||
1020 | pull_timers_interrupt smp_pull_timers_interrupt | ||
1019 | #endif | 1021 | #endif |
1020 | 1022 | ||
1021 | apicinterrupt ERROR_APIC_VECTOR \ | 1023 | apicinterrupt ERROR_APIC_VECTOR \ |
diff --git a/arch/x86/kernel/ft_event.c b/arch/x86/kernel/ft_event.c new file mode 100644 index 000000000000..37cc33252713 --- /dev/null +++ b/arch/x86/kernel/ft_event.c | |||
@@ -0,0 +1,118 @@ | |||
1 | #include <linux/types.h> | ||
2 | |||
3 | #include <litmus/feather_trace.h> | ||
4 | |||
5 | /* the feather trace management functions assume | ||
6 | * exclusive access to the event table | ||
7 | */ | ||
8 | |||
9 | #ifndef CONFIG_DEBUG_RODATA | ||
10 | |||
11 | #define BYTE_JUMP 0xeb | ||
12 | #define BYTE_JUMP_LEN 0x02 | ||
13 | |||
14 | /* for each event, there is an entry in the event table */ | ||
15 | struct trace_event { | ||
16 | long id; | ||
17 | long count; | ||
18 | long start_addr; | ||
19 | long end_addr; | ||
20 | }; | ||
21 | |||
22 | extern struct trace_event __start___event_table[]; | ||
23 | extern struct trace_event __stop___event_table[]; | ||
24 | |||
25 | /* Workaround: if no events are defined, then the event_table section does not | ||
26 | * exist and the above references cause linker errors. This could probably be | ||
27 | * fixed by adjusting the linker script, but it is easier to maintain for us if | ||
28 | * we simply create a dummy symbol in the event table section. | ||
29 | */ | ||
30 | int __event_table_dummy[0] __attribute__ ((section("__event_table"))); | ||
31 | |||
32 | int ft_enable_event(unsigned long id) | ||
33 | { | ||
34 | struct trace_event* te = __start___event_table; | ||
35 | int count = 0; | ||
36 | char* delta; | ||
37 | unsigned char* instr; | ||
38 | |||
39 | while (te < __stop___event_table) { | ||
40 | if (te->id == id && ++te->count == 1) { | ||
41 | instr = (unsigned char*) te->start_addr; | ||
42 | /* make sure we don't clobber something wrong */ | ||
43 | if (*instr == BYTE_JUMP) { | ||
44 | delta = (((unsigned char*) te->start_addr) + 1); | ||
45 | *delta = 0; | ||
46 | } | ||
47 | } | ||
48 | if (te->id == id) | ||
49 | count++; | ||
50 | te++; | ||
51 | } | ||
52 | |||
53 | printk(KERN_DEBUG "ft_enable_event: enabled %d events\n", count); | ||
54 | return count; | ||
55 | } | ||
56 | |||
57 | int ft_disable_event(unsigned long id) | ||
58 | { | ||
59 | struct trace_event* te = __start___event_table; | ||
60 | int count = 0; | ||
61 | char* delta; | ||
62 | unsigned char* instr; | ||
63 | |||
64 | while (te < __stop___event_table) { | ||
65 | if (te->id == id && --te->count == 0) { | ||
66 | instr = (unsigned char*) te->start_addr; | ||
67 | if (*instr == BYTE_JUMP) { | ||
68 | delta = (((unsigned char*) te->start_addr) + 1); | ||
69 | *delta = te->end_addr - te->start_addr - | ||
70 | BYTE_JUMP_LEN; | ||
71 | } | ||
72 | } | ||
73 | if (te->id == id) | ||
74 | count++; | ||
75 | te++; | ||
76 | } | ||
77 | |||
78 | printk(KERN_DEBUG "ft_disable_event: disabled %d events\n", count); | ||
79 | return count; | ||
80 | } | ||
81 | |||
82 | int ft_disable_all_events(void) | ||
83 | { | ||
84 | struct trace_event* te = __start___event_table; | ||
85 | int count = 0; | ||
86 | char* delta; | ||
87 | unsigned char* instr; | ||
88 | |||
89 | while (te < __stop___event_table) { | ||
90 | if (te->count) { | ||
91 | instr = (unsigned char*) te->start_addr; | ||
92 | if (*instr == BYTE_JUMP) { | ||
93 | delta = (((unsigned char*) te->start_addr) | ||
94 | + 1); | ||
95 | *delta = te->end_addr - te->start_addr - | ||
96 | BYTE_JUMP_LEN; | ||
97 | te->count = 0; | ||
98 | count++; | ||
99 | } | ||
100 | } | ||
101 | te++; | ||
102 | } | ||
103 | return count; | ||
104 | } | ||
105 | |||
106 | int ft_is_event_enabled(unsigned long id) | ||
107 | { | ||
108 | struct trace_event* te = __start___event_table; | ||
109 | |||
110 | while (te < __stop___event_table) { | ||
111 | if (te->id == id) | ||
112 | return te->count; | ||
113 | te++; | ||
114 | } | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | #endif | ||
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 990ae7cfc578..9772b1a0f9a4 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -189,6 +189,9 @@ static void __init smp_intr_init(void) | |||
189 | alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, | 189 | alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, |
190 | call_function_single_interrupt); | 190 | call_function_single_interrupt); |
191 | 191 | ||
192 | /* IPI for hrtimer pulling on remote cpus */ | ||
193 | alloc_intr_gate(PULL_TIMERS_VECTOR, pull_timers_interrupt); | ||
194 | |||
192 | /* Low priority IPI to cleanup after moving an irq */ | 195 | /* Low priority IPI to cleanup after moving an irq */ |
193 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | 196 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); |
194 | set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); | 197 | set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index d801210945d6..74cca6014c0e 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -23,6 +23,10 @@ | |||
23 | #include <linux/cpu.h> | 23 | #include <linux/cpu.h> |
24 | #include <linux/gfp.h> | 24 | #include <linux/gfp.h> |
25 | 25 | ||
26 | #include <litmus/preempt.h> | ||
27 | #include <litmus/debug_trace.h> | ||
28 | #include <litmus/trace.h> | ||
29 | |||
26 | #include <asm/mtrr.h> | 30 | #include <asm/mtrr.h> |
27 | #include <asm/tlbflush.h> | 31 | #include <asm/tlbflush.h> |
28 | #include <asm/mmu_context.h> | 32 | #include <asm/mmu_context.h> |
@@ -118,6 +122,7 @@ static void native_smp_send_reschedule(int cpu) | |||
118 | WARN_ON(1); | 122 | WARN_ON(1); |
119 | return; | 123 | return; |
120 | } | 124 | } |
125 | TS_SEND_RESCHED_START(cpu); | ||
121 | apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); | 126 | apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); |
122 | } | 127 | } |
123 | 128 | ||
@@ -147,6 +152,16 @@ void native_send_call_func_ipi(const struct cpumask *mask) | |||
147 | free_cpumask_var(allbutself); | 152 | free_cpumask_var(allbutself); |
148 | } | 153 | } |
149 | 154 | ||
155 | /* trigger timers on remote cpu */ | ||
156 | void smp_send_pull_timers(int cpu) | ||
157 | { | ||
158 | if (unlikely(cpu_is_offline(cpu))) { | ||
159 | WARN_ON(1); | ||
160 | return; | ||
161 | } | ||
162 | apic->send_IPI_mask(cpumask_of(cpu), PULL_TIMERS_VECTOR); | ||
163 | } | ||
164 | |||
150 | /* | 165 | /* |
151 | * this function calls the 'stop' function on all other CPUs in the system. | 166 | * this function calls the 'stop' function on all other CPUs in the system. |
152 | */ | 167 | */ |
@@ -198,7 +213,10 @@ static void native_smp_send_stop(void) | |||
198 | void smp_reschedule_interrupt(struct pt_regs *regs) | 213 | void smp_reschedule_interrupt(struct pt_regs *regs) |
199 | { | 214 | { |
200 | ack_APIC_irq(); | 215 | ack_APIC_irq(); |
216 | /* LITMUS^RT: this IPI might need to trigger the sched state machine. */ | ||
217 | sched_state_ipi(); | ||
201 | inc_irq_stat(irq_resched_count); | 218 | inc_irq_stat(irq_resched_count); |
219 | TS_SEND_RESCHED_END; | ||
202 | /* | 220 | /* |
203 | * KVM uses this interrupt to force a cpu out of guest mode | 221 | * KVM uses this interrupt to force a cpu out of guest mode |
204 | */ | 222 | */ |
@@ -222,6 +240,15 @@ void smp_call_function_single_interrupt(struct pt_regs *regs) | |||
222 | irq_exit(); | 240 | irq_exit(); |
223 | } | 241 | } |
224 | 242 | ||
243 | extern void hrtimer_pull(void); | ||
244 | |||
245 | void smp_pull_timers_interrupt(struct pt_regs *regs) | ||
246 | { | ||
247 | ack_APIC_irq(); | ||
248 | TRACE("pull timer interrupt\n"); | ||
249 | hrtimer_pull(); | ||
250 | } | ||
251 | |||
225 | struct smp_ops smp_ops = { | 252 | struct smp_ops smp_ops = { |
226 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, | 253 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, |
227 | .smp_prepare_cpus = native_smp_prepare_cpus, | 254 | .smp_prepare_cpus = native_smp_prepare_cpus, |
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index b35786dc9b8f..37702905f658 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S | |||
@@ -340,3 +340,15 @@ ENTRY(sys_call_table) | |||
340 | .long sys_fanotify_init | 340 | .long sys_fanotify_init |
341 | .long sys_fanotify_mark | 341 | .long sys_fanotify_mark |
342 | .long sys_prlimit64 /* 340 */ | 342 | .long sys_prlimit64 /* 340 */ |
343 | .long sys_set_rt_task_param /* LITMUS^RT 341 */ | ||
344 | .long sys_get_rt_task_param | ||
345 | .long sys_complete_job | ||
346 | .long sys_od_open | ||
347 | .long sys_od_close | ||
348 | .long sys_litmus_lock | ||
349 | .long sys_litmus_unlock | ||
350 | .long sys_query_job_no | ||
351 | .long sys_wait_for_job_release | ||
352 | .long sys_wait_for_ts_release | ||
353 | .long sys_release_ts | ||
354 | .long sys_null_call | ||