diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-04-19 17:31:52 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-04-19 17:31:52 -0400 |
commit | f70a290e8a889caa905ab7650c696f2bb299be1a (patch) | |
tree | 56f0886d839499e9f522f189999024b3e86f9be2 /arch/x86/kernel | |
parent | fcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (diff) | |
parent | 7ef4a793a624c6e66c16ca1051847f75161f5bec (diff) |
Merge branch 'wip-nested-locking' into tegra-nested-lockingwip-nested-locking
Conflicts:
Makefile
include/linux/fs.h
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 17 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 2 | ||||
-rw-r--r-- | arch/x86/kernel/ft_event.c | 118 | ||||
-rw-r--r-- | arch/x86/kernel/irqinit.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/smp.c | 29 | ||||
-rw-r--r-- | arch/x86/kernel/syscall_table_32.S | 14 |
7 files changed, 185 insertions, 0 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 82f2912155a..c84954ad12f 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -99,6 +99,8 @@ obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o | |||
99 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o | 99 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o |
100 | obj-$(CONFIG_OF) += devicetree.o | 100 | obj-$(CONFIG_OF) += devicetree.o |
101 | 101 | ||
102 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o | ||
103 | |||
102 | ### | 104 | ### |
103 | # 64 bit specific files | 105 | # 64 bit specific files |
104 | ifeq ($(CONFIG_X86_64),y) | 106 | ifeq ($(CONFIG_X86_64),y) |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index c105c533ed9..0bf12644aa7 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -747,6 +747,23 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
747 | static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); | 747 | static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); |
748 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) | 748 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) |
749 | 749 | ||
750 | /* returns CPUs that share the index cache with cpu */ | ||
751 | int get_shared_cpu_map(cpumask_var_t mask, unsigned int cpu, int index) | ||
752 | { | ||
753 | int ret = 0; | ||
754 | struct _cpuid4_info *this_leaf; | ||
755 | |||
756 | if (index >= num_cache_leaves) { | ||
757 | index = num_cache_leaves - 1; | ||
758 | ret = index; | ||
759 | } | ||
760 | |||
761 | this_leaf = CPUID4_INFO_IDX(cpu,index); | ||
762 | cpumask_copy(mask, to_cpumask(this_leaf->shared_cpu_map)); | ||
763 | |||
764 | return ret; | ||
765 | } | ||
766 | |||
750 | #ifdef CONFIG_SMP | 767 | #ifdef CONFIG_SMP |
751 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 768 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) |
752 | { | 769 | { |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 6419bb05ecd..e5d2d3fa7a0 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -993,6 +993,8 @@ apicinterrupt CALL_FUNCTION_VECTOR \ | |||
993 | call_function_interrupt smp_call_function_interrupt | 993 | call_function_interrupt smp_call_function_interrupt |
994 | apicinterrupt RESCHEDULE_VECTOR \ | 994 | apicinterrupt RESCHEDULE_VECTOR \ |
995 | reschedule_interrupt smp_reschedule_interrupt | 995 | reschedule_interrupt smp_reschedule_interrupt |
996 | apicinterrupt PULL_TIMERS_VECTOR \ | ||
997 | pull_timers_interrupt smp_pull_timers_interrupt | ||
996 | #endif | 998 | #endif |
997 | 999 | ||
998 | apicinterrupt ERROR_APIC_VECTOR \ | 1000 | apicinterrupt ERROR_APIC_VECTOR \ |
diff --git a/arch/x86/kernel/ft_event.c b/arch/x86/kernel/ft_event.c new file mode 100644 index 00000000000..37cc3325271 --- /dev/null +++ b/arch/x86/kernel/ft_event.c | |||
@@ -0,0 +1,118 @@ | |||
1 | #include <linux/types.h> | ||
2 | |||
3 | #include <litmus/feather_trace.h> | ||
4 | |||
5 | /* the feather trace management functions assume | ||
6 | * exclusive access to the event table | ||
7 | */ | ||
8 | |||
9 | #ifndef CONFIG_DEBUG_RODATA | ||
10 | |||
11 | #define BYTE_JUMP 0xeb | ||
12 | #define BYTE_JUMP_LEN 0x02 | ||
13 | |||
14 | /* for each event, there is an entry in the event table */ | ||
15 | struct trace_event { | ||
16 | long id; | ||
17 | long count; | ||
18 | long start_addr; | ||
19 | long end_addr; | ||
20 | }; | ||
21 | |||
22 | extern struct trace_event __start___event_table[]; | ||
23 | extern struct trace_event __stop___event_table[]; | ||
24 | |||
25 | /* Workaround: if no events are defined, then the event_table section does not | ||
26 | * exist and the above references cause linker errors. This could probably be | ||
27 | * fixed by adjusting the linker script, but it is easier to maintain for us if | ||
28 | * we simply create a dummy symbol in the event table section. | ||
29 | */ | ||
30 | int __event_table_dummy[0] __attribute__ ((section("__event_table"))); | ||
31 | |||
32 | int ft_enable_event(unsigned long id) | ||
33 | { | ||
34 | struct trace_event* te = __start___event_table; | ||
35 | int count = 0; | ||
36 | char* delta; | ||
37 | unsigned char* instr; | ||
38 | |||
39 | while (te < __stop___event_table) { | ||
40 | if (te->id == id && ++te->count == 1) { | ||
41 | instr = (unsigned char*) te->start_addr; | ||
42 | /* make sure we don't clobber something wrong */ | ||
43 | if (*instr == BYTE_JUMP) { | ||
44 | delta = (((unsigned char*) te->start_addr) + 1); | ||
45 | *delta = 0; | ||
46 | } | ||
47 | } | ||
48 | if (te->id == id) | ||
49 | count++; | ||
50 | te++; | ||
51 | } | ||
52 | |||
53 | printk(KERN_DEBUG "ft_enable_event: enabled %d events\n", count); | ||
54 | return count; | ||
55 | } | ||
56 | |||
57 | int ft_disable_event(unsigned long id) | ||
58 | { | ||
59 | struct trace_event* te = __start___event_table; | ||
60 | int count = 0; | ||
61 | char* delta; | ||
62 | unsigned char* instr; | ||
63 | |||
64 | while (te < __stop___event_table) { | ||
65 | if (te->id == id && --te->count == 0) { | ||
66 | instr = (unsigned char*) te->start_addr; | ||
67 | if (*instr == BYTE_JUMP) { | ||
68 | delta = (((unsigned char*) te->start_addr) + 1); | ||
69 | *delta = te->end_addr - te->start_addr - | ||
70 | BYTE_JUMP_LEN; | ||
71 | } | ||
72 | } | ||
73 | if (te->id == id) | ||
74 | count++; | ||
75 | te++; | ||
76 | } | ||
77 | |||
78 | printk(KERN_DEBUG "ft_disable_event: disabled %d events\n", count); | ||
79 | return count; | ||
80 | } | ||
81 | |||
82 | int ft_disable_all_events(void) | ||
83 | { | ||
84 | struct trace_event* te = __start___event_table; | ||
85 | int count = 0; | ||
86 | char* delta; | ||
87 | unsigned char* instr; | ||
88 | |||
89 | while (te < __stop___event_table) { | ||
90 | if (te->count) { | ||
91 | instr = (unsigned char*) te->start_addr; | ||
92 | if (*instr == BYTE_JUMP) { | ||
93 | delta = (((unsigned char*) te->start_addr) | ||
94 | + 1); | ||
95 | *delta = te->end_addr - te->start_addr - | ||
96 | BYTE_JUMP_LEN; | ||
97 | te->count = 0; | ||
98 | count++; | ||
99 | } | ||
100 | } | ||
101 | te++; | ||
102 | } | ||
103 | return count; | ||
104 | } | ||
105 | |||
106 | int ft_is_event_enabled(unsigned long id) | ||
107 | { | ||
108 | struct trace_event* te = __start___event_table; | ||
109 | |||
110 | while (te < __stop___event_table) { | ||
111 | if (te->id == id) | ||
112 | return te->count; | ||
113 | te++; | ||
114 | } | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | #endif | ||
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index b3300e6bace..f3a90e926f5 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -252,6 +252,9 @@ static void __init smp_intr_init(void) | |||
252 | alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, | 252 | alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, |
253 | call_function_single_interrupt); | 253 | call_function_single_interrupt); |
254 | 254 | ||
255 | /* IPI for hrtimer pulling on remote cpus */ | ||
256 | alloc_intr_gate(PULL_TIMERS_VECTOR, pull_timers_interrupt); | ||
257 | |||
255 | /* Low priority IPI to cleanup after moving an irq */ | 258 | /* Low priority IPI to cleanup after moving an irq */ |
256 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | 259 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); |
257 | set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); | 260 | set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 013e7eba83b..7539d84628f 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -23,6 +23,9 @@ | |||
23 | #include <linux/cpu.h> | 23 | #include <linux/cpu.h> |
24 | #include <linux/gfp.h> | 24 | #include <linux/gfp.h> |
25 | 25 | ||
26 | #include <litmus/preempt.h> | ||
27 | #include <litmus/debug_trace.h> | ||
28 | |||
26 | #include <asm/mtrr.h> | 29 | #include <asm/mtrr.h> |
27 | #include <asm/tlbflush.h> | 30 | #include <asm/tlbflush.h> |
28 | #include <asm/mmu_context.h> | 31 | #include <asm/mmu_context.h> |
@@ -147,6 +150,16 @@ void native_send_call_func_ipi(const struct cpumask *mask) | |||
147 | free_cpumask_var(allbutself); | 150 | free_cpumask_var(allbutself); |
148 | } | 151 | } |
149 | 152 | ||
153 | /* trigger timers on remote cpu */ | ||
154 | void smp_send_pull_timers(int cpu) | ||
155 | { | ||
156 | if (unlikely(cpu_is_offline(cpu))) { | ||
157 | WARN_ON(1); | ||
158 | return; | ||
159 | } | ||
160 | apic->send_IPI_mask(cpumask_of(cpu), PULL_TIMERS_VECTOR); | ||
161 | } | ||
162 | |||
150 | /* | 163 | /* |
151 | * this function calls the 'stop' function on all other CPUs in the system. | 164 | * this function calls the 'stop' function on all other CPUs in the system. |
152 | */ | 165 | */ |
@@ -204,6 +217,11 @@ void smp_reschedule_interrupt(struct pt_regs *regs) | |||
204 | /* | 217 | /* |
205 | * KVM uses this interrupt to force a cpu out of guest mode | 218 | * KVM uses this interrupt to force a cpu out of guest mode |
206 | */ | 219 | */ |
220 | |||
221 | /* LITMUS^RT: this IPI might need to trigger the sched state machine. | ||
222 | * Starting from 3.0 schedule_ipi() actually does something. This may | ||
223 | * increase IPI latencies compared with previous versions. */ | ||
224 | sched_state_ipi(); | ||
207 | } | 225 | } |
208 | 226 | ||
209 | void smp_call_function_interrupt(struct pt_regs *regs) | 227 | void smp_call_function_interrupt(struct pt_regs *regs) |
@@ -224,6 +242,17 @@ void smp_call_function_single_interrupt(struct pt_regs *regs) | |||
224 | irq_exit(); | 242 | irq_exit(); |
225 | } | 243 | } |
226 | 244 | ||
245 | extern void hrtimer_pull(void); | ||
246 | |||
247 | void smp_pull_timers_interrupt(struct pt_regs *regs) | ||
248 | { | ||
249 | ack_APIC_irq(); | ||
250 | irq_enter(); | ||
251 | TRACE("pull timer interrupt\n"); | ||
252 | hrtimer_pull(); | ||
253 | irq_exit(); | ||
254 | } | ||
255 | |||
227 | struct smp_ops smp_ops = { | 256 | struct smp_ops smp_ops = { |
228 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, | 257 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, |
229 | .smp_prepare_cpus = native_smp_prepare_cpus, | 258 | .smp_prepare_cpus = native_smp_prepare_cpus, |
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index bc19be332bc..058cac30916 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S | |||
@@ -346,3 +346,17 @@ ENTRY(sys_call_table) | |||
346 | .long sys_syncfs | 346 | .long sys_syncfs |
347 | .long sys_sendmmsg /* 345 */ | 347 | .long sys_sendmmsg /* 345 */ |
348 | .long sys_setns | 348 | .long sys_setns |
349 | .long sys_set_rt_task_param /* LITMUS^RT 347 */ | ||
350 | .long sys_get_rt_task_param | ||
351 | .long sys_complete_job | ||
352 | .long sys_od_open | ||
353 | .long sys_od_close | ||
354 | .long sys_litmus_lock /* +5 */ | ||
355 | .long sys_litmus_unlock | ||
356 | .long sys_query_job_no | ||
357 | .long sys_wait_for_job_release | ||
358 | .long sys_wait_for_ts_release | ||
359 | .long sys_release_ts /* +10 */ | ||
360 | .long sys_null_call | ||
361 | .long sys_dynamic_group_lock | ||
362 | .long sys_dynamic_group_unlock | ||