aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 15:30:43 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 15:30:43 -0500
commit7806057274c493d53a214232d4df6f96aadc7547 (patch)
treeb3aa02ee438a0c5f592e125f4657a2f1e5c225f5 /arch
parenta5d1599facc1b934e0b8d68e360dadd66c1df730 (diff)
Merge with branch wip-mc.
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig8
-rw-r--r--arch/arm/include/asm/spinlock.h76
-rw-r--r--arch/arm/include/asm/spinlock_types.h17
-rw-r--r--arch/arm/include/asm/timex.h4
-rw-r--r--arch/arm/include/asm/unistd.h3
-rw-r--r--arch/arm/kernel/calls.S14
-rw-r--r--arch/arm/kernel/smp.c7
-rw-r--r--arch/arm/mach-realview/include/mach/timex.h27
-rw-r--r--arch/arm/mm/cache-l2x0.c22
-rw-r--r--arch/sparc/Kconfig9
-rw-r--r--arch/sparc/include/asm/spinlock_64.h92
-rw-r--r--arch/sparc/include/asm/spinlock_types.h5
-rw-r--r--arch/sparc/include/asm/unistd.h6
-rw-r--r--arch/sparc/kernel/systbls_32.S3
-rw-r--r--arch/sparc/kernel/systbls_64.S8
-rw-r--r--arch/x86/Kconfig8
-rw-r--r--arch/x86/include/asm/entry_arch.h1
-rw-r--r--arch/x86/include/asm/feather_trace.h17
-rw-r--r--arch/x86/include/asm/feather_trace_32.h115
-rw-r--r--arch/x86/include/asm/feather_trace_64.h124
-rw-r--r--arch/x86/include/asm/hw_irq.h3
-rw-r--r--arch/x86/include/asm/irq_vectors.h7
-rw-r--r--arch/x86/include/asm/processor.h4
-rw-r--r--arch/x86/include/asm/unistd_32.h6
-rw-r--r--arch/x86/include/asm/unistd_64.h8
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c33
-rw-r--r--arch/x86/kernel/entry_64.S2
-rw-r--r--arch/x86/kernel/ft_event.c118
-rw-r--r--arch/x86/kernel/irqinit.c3
-rw-r--r--arch/x86/kernel/smp.c31
-rw-r--r--arch/x86/kernel/syscall_table_32.S14
32 files changed, 701 insertions, 96 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 3c3b868948a..6dc9a2f42ab 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2214,3 +2214,11 @@ source "security/Kconfig"
2214source "crypto/Kconfig" 2214source "crypto/Kconfig"
2215 2215
2216source "lib/Kconfig" 2216source "lib/Kconfig"
2217
2218config ARCH_HAS_SEND_PULL_TIMERS
2219 def_bool n
2220
2221config ARCH_HAS_FEATHER_TRACE
2222 def_bool n
2223
2224source "litmus/Kconfig"
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 65fa3c88095..b4ca707d0a6 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -59,18 +59,13 @@ static inline void dsb_sev(void)
59} 59}
60 60
61/* 61/*
62 * ARMv6 Spin-locking. 62 * ARMv6 ticket-based spin-locking.
63 * 63 *
64 * We exclusively read the old value. If it is zero, we may have 64 * A memory barrier is required after we get a lock, and before we
65 * won the lock, so we try exclusively storing it. A memory barrier 65 * release it, because V6 CPUs are assumed to have weakly ordered
66 * is required after we get a lock, and before we release it, because 66 * memory.
67 * V6 CPUs are assumed to have weakly ordered memory.
68 *
69 * Unlocked value: 0
70 * Locked value: 1
71 */ 67 */
72 68
73#define arch_spin_is_locked(x) ((x)->lock != 0)
74#define arch_spin_unlock_wait(lock) \ 69#define arch_spin_unlock_wait(lock) \
75 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 70 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
76 71
@@ -79,31 +74,39 @@ static inline void dsb_sev(void)
79static inline void arch_spin_lock(arch_spinlock_t *lock) 74static inline void arch_spin_lock(arch_spinlock_t *lock)
80{ 75{
81 unsigned long tmp; 76 unsigned long tmp;
77 u32 newval;
78 arch_spinlock_t lockval;
82 79
83 __asm__ __volatile__( 80 __asm__ __volatile__(
84"1: ldrex %0, [%1]\n" 81"1: ldrex %0, [%3]\n"
85" teq %0, #0\n" 82" add %1, %0, %4\n"
86 WFE("ne") 83" strex %2, %1, [%3]\n"
87" strexeq %0, %2, [%1]\n" 84" teq %2, #0\n"
88" teqeq %0, #0\n"
89" bne 1b" 85" bne 1b"
90 : "=&r" (tmp) 86 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
91 : "r" (&lock->lock), "r" (1) 87 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
92 : "cc"); 88 : "cc");
93 89
90 while (lockval.tickets.next != lockval.tickets.owner) {
91 wfe();
92 lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
93 }
94
94 smp_mb(); 95 smp_mb();
95} 96}
96 97
97static inline int arch_spin_trylock(arch_spinlock_t *lock) 98static inline int arch_spin_trylock(arch_spinlock_t *lock)
98{ 99{
99 unsigned long tmp; 100 unsigned long tmp;
101 u32 slock;
100 102
101 __asm__ __volatile__( 103 __asm__ __volatile__(
102" ldrex %0, [%1]\n" 104" ldrex %0, [%2]\n"
103" teq %0, #0\n" 105" subs %1, %0, %0, ror #16\n"
104" strexeq %0, %2, [%1]" 106" addeq %0, %0, %3\n"
105 : "=&r" (tmp) 107" strexeq %1, %0, [%2]"
106 : "r" (&lock->lock), "r" (1) 108 : "=&r" (slock), "=&r" (tmp)
109 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
107 : "cc"); 110 : "cc");
108 111
109 if (tmp == 0) { 112 if (tmp == 0) {
@@ -116,17 +119,38 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
116 119
117static inline void arch_spin_unlock(arch_spinlock_t *lock) 120static inline void arch_spin_unlock(arch_spinlock_t *lock)
118{ 121{
122 unsigned long tmp;
123 u32 slock;
124
119 smp_mb(); 125 smp_mb();
120 126
121 __asm__ __volatile__( 127 __asm__ __volatile__(
122" str %1, [%0]\n" 128" mov %1, #1\n"
123 : 129"1: ldrex %0, [%2]\n"
124 : "r" (&lock->lock), "r" (0) 130" uadd16 %0, %0, %1\n"
131" strex %1, %0, [%2]\n"
132" teq %1, #0\n"
133" bne 1b"
134 : "=&r" (slock), "=&r" (tmp)
135 : "r" (&lock->slock)
125 : "cc"); 136 : "cc");
126 137
127 dsb_sev(); 138 dsb_sev();
128} 139}
129 140
141static inline int arch_spin_is_locked(arch_spinlock_t *lock)
142{
143 struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
144 return tickets.owner != tickets.next;
145}
146
147static inline int arch_spin_is_contended(arch_spinlock_t *lock)
148{
149 struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
150 return (tickets.next - tickets.owner) > 1;
151}
152#define arch_spin_is_contended arch_spin_is_contended
153
130/* 154/*
131 * RWLOCKS 155 * RWLOCKS
132 * 156 *
@@ -158,7 +182,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
158 unsigned long tmp; 182 unsigned long tmp;
159 183
160 __asm__ __volatile__( 184 __asm__ __volatile__(
161"1: ldrex %0, [%1]\n" 185" ldrex %0, [%1]\n"
162" teq %0, #0\n" 186" teq %0, #0\n"
163" strexeq %0, %2, [%1]" 187" strexeq %0, %2, [%1]"
164 : "=&r" (tmp) 188 : "=&r" (tmp)
@@ -244,7 +268,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
244 unsigned long tmp, tmp2 = 1; 268 unsigned long tmp, tmp2 = 1;
245 269
246 __asm__ __volatile__( 270 __asm__ __volatile__(
247"1: ldrex %0, [%2]\n" 271" ldrex %0, [%2]\n"
248" adds %0, %0, #1\n" 272" adds %0, %0, #1\n"
249" strexpl %1, %0, [%2]\n" 273" strexpl %1, %0, [%2]\n"
250 : "=&r" (tmp), "+r" (tmp2) 274 : "=&r" (tmp), "+r" (tmp2)
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index d14d197ae04..b262d2f8b47 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -5,11 +5,24 @@
5# error "please don't include this file directly" 5# error "please don't include this file directly"
6#endif 6#endif
7 7
8#define TICKET_SHIFT 16
9
8typedef struct { 10typedef struct {
9 volatile unsigned int lock; 11 union {
12 u32 slock;
13 struct __raw_tickets {
14#ifdef __ARMEB__
15 u16 next;
16 u16 owner;
17#else
18 u16 owner;
19 u16 next;
20#endif
21 } tickets;
22 };
10} arch_spinlock_t; 23} arch_spinlock_t;
11 24
12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 25#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
13 26
14typedef struct { 27typedef struct {
15 volatile unsigned int lock; 28 volatile unsigned int lock;
diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
index 3be8de3adab..7f75e9a0cd3 100644
--- a/arch/arm/include/asm/timex.h
+++ b/arch/arm/include/asm/timex.h
@@ -14,11 +14,13 @@
14 14
15#include <mach/timex.h> 15#include <mach/timex.h>
16 16
17typedef unsigned long cycles_t; 17typedef u64 cycles_t;
18 18
19#ifndef get_cycles
19static inline cycles_t get_cycles (void) 20static inline cycles_t get_cycles (void)
20{ 21{
21 return 0; 22 return 0;
22} 23}
24#endif
23 25
24#endif 26#endif
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index c60a2944f95..23ae09ffc49 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -403,6 +403,9 @@
403#define __NR_sendmmsg (__NR_SYSCALL_BASE+374) 403#define __NR_sendmmsg (__NR_SYSCALL_BASE+374)
404#define __NR_setns (__NR_SYSCALL_BASE+375) 404#define __NR_setns (__NR_SYSCALL_BASE+375)
405 405
406#define __NR_LITMUS (__NR_SYSCALL_BASE+376)
407#include <litmus/unistd_32.h>
408
406/* 409/*
407 * The following SWIs are ARM private. 410 * The following SWIs are ARM private.
408 */ 411 */
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 9943e9e74a1..c431abd8765 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -385,6 +385,20 @@
385 CALL(sys_syncfs) 385 CALL(sys_syncfs)
386 CALL(sys_sendmmsg) 386 CALL(sys_sendmmsg)
387/* 375 */ CALL(sys_setns) 387/* 375 */ CALL(sys_setns)
388 CALL(sys_set_rt_task_param)
389 CALL(sys_get_rt_task_param)
390 CALL(sys_complete_job)
391 CALL(sys_od_open)
392/* 380 */ CALL(sys_od_close)
393 CALL(sys_litmus_lock)
394 CALL(sys_litmus_unlock)
395 CALL(sys_query_job_no)
396 CALL(sys_wait_for_job_release)
397/* 385 */ CALL(sys_wait_for_ts_release)
398 CALL(sys_release_ts)
399 CALL(sys_null_call)
400 CALL(sys_set_rt_task_mc_param)
401 CALL(sys_set_color_page_info)
388#ifndef syscalls_counted 402#ifndef syscalls_counted
389.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 403.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
390#define syscalls_counted 404#define syscalls_counted
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 14d06f50d16..c9f64693fd1 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -40,6 +40,9 @@
40#include <asm/ptrace.h> 40#include <asm/ptrace.h>
41#include <asm/localtimer.h> 41#include <asm/localtimer.h>
42 42
43#include <litmus/trace.h>
44#include <litmus/preempt.h>
45
43/* 46/*
44 * as from 2.5, kernels no longer have an init_tasks structure 47 * as from 2.5, kernels no longer have an init_tasks structure
45 * so we need some other way of telling a new secondary core 48 * so we need some other way of telling a new secondary core
@@ -629,6 +632,9 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
629 break; 632 break;
630 633
631 case IPI_RESCHEDULE: 634 case IPI_RESCHEDULE:
635 /* LITMUS^RT: take action based on scheduler state */
636 TS_SEND_RESCHED_END;
637 sched_state_ipi();
632 scheduler_ipi(); 638 scheduler_ipi();
633 break; 639 break;
634 640
@@ -664,6 +670,7 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
664 670
665void smp_send_reschedule(int cpu) 671void smp_send_reschedule(int cpu)
666{ 672{
673 TS_SEND_RESCHED_START(cpu);
667 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 674 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
668} 675}
669 676
diff --git a/arch/arm/mach-realview/include/mach/timex.h b/arch/arm/mach-realview/include/mach/timex.h
index 4eeb069373c..e8bcc40d1f0 100644
--- a/arch/arm/mach-realview/include/mach/timex.h
+++ b/arch/arm/mach-realview/include/mach/timex.h
@@ -21,3 +21,30 @@
21 */ 21 */
22 22
23#define CLOCK_TICK_RATE (50000000 / 16) 23#define CLOCK_TICK_RATE (50000000 / 16)
24
25#if defined(CONFIG_MACH_REALVIEW_PB11MP) || defined(CONFIG_MACH_REALVIEW_PB1176)
26
27static inline unsigned long realview_get_arm11_cp15_ccnt(void)
28{
29 unsigned long cycles;
30 /* Read CP15 CCNT register. */
31 asm volatile ("mrc p15, 0, %0, c15, c12, 1" : "=r" (cycles));
32 return cycles;
33}
34
35#define get_cycles realview_get_arm11_cp15_ccnt
36
37#elif defined(CONFIG_MACH_REALVIEW_PBA8)
38
39
40static inline unsigned long realview_get_a8_cp15_ccnt(void)
41{
42 unsigned long cycles;
43 /* Read CP15 CCNT register. */
44 asm volatile ("mrc p15, 0, %0, c9, c13, 0" : "=r" (cycles));
45 return cycles;
46}
47
48#define get_cycles realview_get_a8_cp15_ccnt
49
50#endif
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 0dddb54ea98..899c6650ae7 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -23,6 +23,8 @@
23#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
24#include <asm/hardware/cache-l2x0.h> 24#include <asm/hardware/cache-l2x0.h>
25 25
26#include <litmus/color.h>
27
26#define CACHE_LINE_SIZE 32 28#define CACHE_LINE_SIZE 32
27 29
28static void __iomem *l2x0_base; 30static void __iomem *l2x0_base;
@@ -33,13 +35,6 @@ static u32 l2x0_cache_id;
33static unsigned int l2x0_sets; 35static unsigned int l2x0_sets;
34static unsigned int l2x0_ways; 36static unsigned int l2x0_ways;
35 37
36static inline bool is_pl310_rev(int rev)
37{
38 return (l2x0_cache_id &
39 (L2X0_CACHE_ID_PART_MASK | L2X0_CACHE_ID_REV_MASK)) ==
40 (L2X0_CACHE_ID_PART_L310 | rev);
41}
42
43static inline void cache_wait_way(void __iomem *reg, unsigned long mask) 38static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
44{ 39{
45 /* wait for cache operation by line or way to complete */ 40 /* wait for cache operation by line or way to complete */
@@ -349,7 +344,7 @@ static void __init l2x0_unlock(__u32 cache_id)
349 } 344 }
350} 345}
351 346
352void l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 347void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
353{ 348{
354 __u32 aux; 349 __u32 aux;
355 __u32 way_size = 0; 350 __u32 way_size = 0;
@@ -420,7 +415,14 @@ void l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
420 outer_cache.disable = l2x0_disable; 415 outer_cache.disable = l2x0_disable;
421 outer_cache.set_debug = l2x0_set_debug; 416 outer_cache.set_debug = l2x0_set_debug;
422 417
423 pr_info_once("%s cache controller enabled\n", type); 418 color_cache_info.size = l2x0_size;
424 pr_info_once("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", 419 color_cache_info.line_size = CACHE_LINE_SIZE;
420 color_cache_info.ways = l2x0_ways;
421 color_cache_info.sets = l2x0_sets;
422
423 printk(KERN_INFO "%s cache controller enabled\n", type);
424 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
425 l2x0_ways, l2x0_cache_id, aux, l2x0_size); 425 l2x0_ways, l2x0_cache_id, aux, l2x0_size);
426
427 litmus_setup_lockdown(l2x0_base, l2x0_cache_id);
426} 428}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 1a6f20d4e7e..30910bb5c95 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -605,3 +605,12 @@ source "security/Kconfig"
605source "crypto/Kconfig" 605source "crypto/Kconfig"
606 606
607source "lib/Kconfig" 607source "lib/Kconfig"
608
609config ARCH_HAS_FEATHER_TRACE
610 def_bool n
611
612# Probably add these later
613config ARCH_HAS_SEND_PULL_TIMERS
614 def_bool n
615
616source "litmus/Kconfig"
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 96891769497..862ee5c7dad 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -13,82 +13,72 @@
13 * and rebuild your kernel. 13 * and rebuild your kernel.
14 */ 14 */
15 15
16/* Because we play games to save cycles in the non-contention case, we 16#define arch_spin_is_locked(lp) ((lp)->tail != (lp)->head)
17 * need to be extra careful about branch targets into the "spinning"
18 * code. They live in their own section, but the newer V9 branches
19 * have a shorter range than the traditional 32-bit sparc branch
20 * variants. The rule is that the branches that go into and out of
21 * the spinner sections must be pre-V9 branches.
22 */
23
24#define arch_spin_is_locked(lp) ((lp)->lock != 0)
25 17
26#define arch_spin_unlock_wait(lp) \ 18#define arch_spin_unlock_wait(lp) \
27 do { rmb(); \ 19 do { rmb(); \
28 } while((lp)->lock) 20 } while((lp)->tail != (lp)->head)
29 21
30static inline void arch_spin_lock(arch_spinlock_t *lock) 22static inline void arch_spin_lock(arch_spinlock_t *lock)
31{ 23{
32 unsigned long tmp; 24 int ticket, tmp;
33 25
34 __asm__ __volatile__( 26 __asm__ __volatile__(
35"1: ldstub [%1], %0\n" 27"1: lduw [%2], %0 \n" /* read ticket */
36" brnz,pn %0, 2f\n" 28" add %0, 1, %1 \n"
37" nop\n" 29" cas [%2], %0, %1 \n"
38" .subsection 2\n" 30" cmp %0, %1 \n"
39"2: ldub [%1], %0\n" 31" be,a,pt %%icc, 2f \n"
40" brnz,pt %0, 2b\n" 32" nop \n"
41" nop\n" 33" ba 1b\n"
42" ba,a,pt %%xcc, 1b\n" 34" nop \n"
43" .previous" 35"2: lduw [%3], %1 \n"
44 : "=&r" (tmp) 36" cmp %0, %1 \n"
45 : "r" (lock) 37" be,a,pt %%icc, 3f \n"
38" nop \n"
39" ba 2b\n"
40"3: nop"
41 : "=&r" (ticket), "=&r" (tmp)
42 : "r" (&lock->tail), "r" (&lock->head)
46 : "memory"); 43 : "memory");
47} 44}
48 45
49static inline int arch_spin_trylock(arch_spinlock_t *lock) 46static inline int arch_spin_trylock(arch_spinlock_t *lock)
50{ 47{
51 unsigned long result; 48 int tail, head;
52
53 __asm__ __volatile__( 49 __asm__ __volatile__(
54" ldstub [%1], %0\n" 50" lduw [%2], %0 \n" /* read tail */
55 : "=r" (result) 51" lduw [%3], %1 \n" /* read head */
56 : "r" (lock) 52" cmp %0, %1 \n"
53" bne,a,pn %%icc, 1f \n"
54" nop \n"
55" inc %1 \n"
56" cas [%2], %0, %1 \n" /* try to inc ticket */
57"1: "
58 : "=&r" (tail), "=&r" (head)
59 : "r" (&lock->tail), "r" (&lock->head)
57 : "memory"); 60 : "memory");
58 61
59 return (result == 0UL); 62 return (tail == head);
60} 63}
61 64
62static inline void arch_spin_unlock(arch_spinlock_t *lock) 65static inline void arch_spin_unlock(arch_spinlock_t *lock)
63{ 66{
67 int tmp;
64 __asm__ __volatile__( 68 __asm__ __volatile__(
65" stb %%g0, [%0]" 69" lduw [%1], %0 \n"
66 : /* No outputs */ 70" inc %0 \n"
67 : "r" (lock) 71" st %0, [%1] \n"
72 : "=&r" (tmp)
73 : "r" (&lock->head)
68 : "memory"); 74 : "memory");
69} 75}
70 76
71static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 77/* We don't handle this yet, but it looks like not re-enabling the interrupts
72{ 78 * works fine, too. For example, lockdep also does it like this.
73 unsigned long tmp1, tmp2; 79 */
80#define arch_spin_lock_flags(l, f) arch_spin_lock(l)
74 81
75 __asm__ __volatile__(
76"1: ldstub [%2], %0\n"
77" brnz,pn %0, 2f\n"
78" nop\n"
79" .subsection 2\n"
80"2: rdpr %%pil, %1\n"
81" wrpr %3, %%pil\n"
82"3: ldub [%2], %0\n"
83" brnz,pt %0, 3b\n"
84" nop\n"
85" ba,pt %%xcc, 1b\n"
86" wrpr %1, %%pil\n"
87" .previous"
88 : "=&r" (tmp1), "=&r" (tmp2)
89 : "r"(lock), "r"(flags)
90 : "memory");
91}
92 82
93/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ 83/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
94 84
diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h
index 9c454fdeaad..49b89fe2ccf 100644
--- a/arch/sparc/include/asm/spinlock_types.h
+++ b/arch/sparc/include/asm/spinlock_types.h
@@ -6,10 +6,11 @@
6#endif 6#endif
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned char lock; 9 volatile int tail;
10 volatile int head;
10} arch_spinlock_t; 11} arch_spinlock_t;
11 12
12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 13#define __ARCH_SPIN_LOCK_UNLOCKED { 0, 0 }
13 14
14typedef struct { 15typedef struct {
15 volatile unsigned int lock; 16 volatile unsigned int lock;
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index 6260d5deeab..54c43b6bc1d 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -407,7 +407,11 @@
407#define __NR_sendmmsg 336 407#define __NR_sendmmsg 336
408#define __NR_setns 337 408#define __NR_setns 337
409 409
410#define NR_syscalls 338 410#define __NR_LITMUS 338
411
412#include "litmus/unistd_32.h"
413
414#define NR_syscalls 338 + NR_litmus_syscalls
411 415
412#ifdef __32bit_syscall_numbers__ 416#ifdef __32bit_syscall_numbers__
413/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, 417/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 09d8ec45445..699bcc165d2 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -85,3 +85,6 @@ sys_call_table:
85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init 85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns 87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns
88/*338*/ sys_set_rt_task_param, sys_get_rt_task_param
89/*340*/ sys_complete_job, sys_od_open, sys_od_close, sys_litmus_lock, sys_litmus_unlock
90/*345*/ sys_query_job_no, sys_wait_for_job_release, sys_wait_for_ts_release, sys_release_ts, sys_null_call
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index edbec45d468..0c1e6434803 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -85,7 +85,9 @@ sys_call_table32:
85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv 85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init 86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
87/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime 87/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns 88 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_set_rt_task_param, sys_get_rt_task_param
89/*340*/ .word sys_complete_job, sys_od_open, sys_od_close, sys_litmus_lock, sys_litmus_unlock
90 .word sys_query_job_no, sys_wait_for_job_release, sys_wait_for_ts_release, sys_release_ts, sys_null_call
89 91
90#endif /* CONFIG_COMPAT */ 92#endif /* CONFIG_COMPAT */
91 93
@@ -162,4 +164,6 @@ sys_call_table:
162/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 164/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
163 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init 165 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
164/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 166/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
165 .word sys_syncfs, sys_sendmmsg, sys_setns 167 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_set_rt_task_param, sys_get_rt_task_param
168/*340*/ .word sys_complete_job, sys_od_open, sys_od_close, sys_litmus_lock, sys_litmus_unlock
169 .word sys_query_job_no, sys_wait_for_job_release, sys_wait_for_ts_release, sys_release_ts, sys_null_call
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index b8cd5448b0e..4ff921c9f84 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2151,3 +2151,11 @@ source "crypto/Kconfig"
2151source "arch/x86/kvm/Kconfig" 2151source "arch/x86/kvm/Kconfig"
2152 2152
2153source "lib/Kconfig" 2153source "lib/Kconfig"
2154
2155config ARCH_HAS_FEATHER_TRACE
2156 def_bool y
2157
2158config ARCH_HAS_SEND_PULL_TIMERS
2159 def_bool y
2160
2161source "litmus/Kconfig"
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 0baa628e330..e2c555f2191 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -13,6 +13,7 @@
13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) 13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
14BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) 14BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
15BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) 15BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
16BUILD_INTERRUPT(pull_timers_interrupt,PULL_TIMERS_VECTOR)
16BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) 17BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
17BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) 18BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR)
18 19
diff --git a/arch/x86/include/asm/feather_trace.h b/arch/x86/include/asm/feather_trace.h
new file mode 100644
index 00000000000..4fd31633405
--- /dev/null
+++ b/arch/x86/include/asm/feather_trace.h
@@ -0,0 +1,17 @@
1#ifndef _ARCH_FEATHER_TRACE_H
2#define _ARCH_FEATHER_TRACE_H
3
4#include <asm/msr.h>
5
6static inline unsigned long long ft_timestamp(void)
7{
8 return __native_read_tsc();
9}
10
11#ifdef CONFIG_X86_32
12#include "feather_trace_32.h"
13#else
14#include "feather_trace_64.h"
15#endif
16
17#endif
diff --git a/arch/x86/include/asm/feather_trace_32.h b/arch/x86/include/asm/feather_trace_32.h
new file mode 100644
index 00000000000..75e81a9f938
--- /dev/null
+++ b/arch/x86/include/asm/feather_trace_32.h
@@ -0,0 +1,115 @@
1/* Copyright (c) 2007-2012 Björn Brandenburg, <bbb@mpi-sws.org>
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining
4 * a copy of this software and associated documentation files (the
5 * "Software"), to deal in the Software without restriction, including
6 * without limitation the rights to use, copy, modify, merge, publish,
7 * distribute, sublicense, and/or sell copies of the Software, and to
8 * permit persons to whom the Software is furnished to do so, subject to
9 * the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be
12 * included in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24/* Do not directly include this file. Include feather_trace.h instead */
25
26#define feather_callback __attribute__((regparm(3))) __attribute__((used))
27
28/*
29 * Make the compiler reload any register that is not saved in a cdecl function
30 * call (minus the registers that we explicitly clobber as output registers).
31 */
32#define __FT_CLOBBER_LIST0 "memory", "cc", "eax", "edx", "ecx"
33#define __FT_CLOBBER_LIST1 "memory", "cc", "eax", "ecx"
34#define __FT_CLOBBER_LIST2 "memory", "cc", "eax"
35#define __FT_CLOBBER_LIST3 "memory", "cc", "eax"
36
37#define __FT_TMP1(x) "=d" (x)
38#define __FT_ARG1(x) "0" ((long) (x))
39#define __FT_TMP2(x) "=c" (x)
40#define __FT_ARG2(x) "1" ((long) (x))
41
42#define __FT_ARG3(x) "r" ((long) (x))
43
44#define ft_event(id, callback) \
45 __asm__ __volatile__( \
46 "1: jmp 2f \n\t" \
47 " call " #callback " \n\t" \
48 ".section __event_table, \"aw\" \n\t" \
49 ".long " #id ", 0, 1b, 2f \n\t" \
50 ".previous \n\t" \
51 "2: \n\t" \
52 : : : __FT_CLOBBER_LIST0)
53
54#define ft_event0(id, callback) \
55 __asm__ __volatile__( \
56 "1: jmp 2f \n\t" \
57 " movl $" #id ", %%eax \n\t" \
58 " call " #callback " \n\t" \
59 ".section __event_table, \"aw\" \n\t" \
60 ".long " #id ", 0, 1b, 2f \n\t" \
61 ".previous \n\t" \
62 "2: \n\t" \
63 : : : __FT_CLOBBER_LIST0)
64
65#define ft_event1(id, callback, param) \
66 do { \
67 long __ft_tmp1; \
68 __asm__ __volatile__( \
69 "1: jmp 2f \n\t" \
70 " movl $" #id ", %%eax \n\t" \
71 " call " #callback " \n\t" \
72 ".section __event_table, \"aw\" \n\t" \
73 ".long " #id ", 0, 1b, 2f \n\t" \
74 ".previous \n\t" \
75 "2: \n\t" \
76 : __FT_TMP1(__ft_tmp1) \
77 : __FT_ARG1(param) \
78 : __FT_CLOBBER_LIST1); \
79 } while (0);
80
81#define ft_event2(id, callback, param, param2) \
82 do { \
83 long __ft_tmp1, __ft_tmp2; \
84 __asm__ __volatile__( \
85 "1: jmp 2f \n\t" \
86 " movl $" #id ", %%eax \n\t" \
87 " call " #callback " \n\t" \
88 ".section __event_table, \"aw\" \n\t" \
89 ".long " #id ", 0, 1b, 2f \n\t" \
90 ".previous \n\t" \
91 "2: \n\t" \
92 : __FT_TMP1(__ft_tmp1), __FT_TMP2(__ft_tmp2) \
93 : __FT_ARG1(param), __FT_ARG2(param2) \
94 : __FT_CLOBBER_LIST2); \
95 } while (0);
96
97
98#define ft_event3(id, callback, param, param2, param3) \
99 do { \
100 long __ft_tmp1, __ft_tmp2; \
101 __asm__ __volatile__( \
102 "1: jmp 2f \n\t" \
103 " subl $4, %%esp \n\t" \
104 " movl $" #id ", %%eax \n\t" \
105 " movl %2, (%%esp) \n\t" \
106 " call " #callback " \n\t" \
107 " addl $4, %%esp \n\t" \
108 ".section __event_table, \"aw\" \n\t" \
109 ".long " #id ", 0, 1b, 2f \n\t" \
110 ".previous \n\t" \
111 "2: \n\t" \
112 : __FT_TMP1(__ft_tmp1), __FT_TMP2(__ft_tmp2) \
113 : __FT_ARG1(param), __FT_ARG2(param2), __FT_ARG3(param3) \
114 : __FT_CLOBBER_LIST3); \
115 } while (0);
diff --git a/arch/x86/include/asm/feather_trace_64.h b/arch/x86/include/asm/feather_trace_64.h
new file mode 100644
index 00000000000..5ce49e2eebb
--- /dev/null
+++ b/arch/x86/include/asm/feather_trace_64.h
@@ -0,0 +1,124 @@
1/* Copyright (c) 2010 Andrea Bastoni, <bastoni@cs.unc.edu>
2 * Copyright (c) 2012 Björn Brandenburg, <bbb@mpi-sws.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25/* Do not directly include this file. Include feather_trace.h instead */
26
27/* regparm is the default on x86_64 */
28#define feather_callback __attribute__((used))
29
30#define __FT_EVENT_TABLE(id,from,to) \
31 ".section __event_table, \"aw\"\n\t" \
32 ".balign 8\n\t" \
33 ".quad " #id ", 0, " #from ", " #to " \n\t" \
34 ".previous \n\t"
35
36/*
37 * x86_64 caller only owns rbp, rbx, r12-r15;
38 * the callee can freely modify the others.
39 */
40#define __FT_CLOBBER_LIST0 "memory", "cc", "rdi", "rsi", "rdx", "rcx", \
41 "r8", "r9", "r10", "r11", "rax"
42
43#define __FT_CLOBBER_LIST1 "memory", "cc", "rdi", "rdx", "rcx", \
44 "r8", "r9", "r10", "r11", "rax"
45
46#define __FT_CLOBBER_LIST2 "memory", "cc", "rdi", "rcx", \
47 "r8", "r9", "r10", "r11", "rax"
48
49#define __FT_CLOBBER_LIST3 "memory", "cc", "rdi", \
50 "r8", "r9", "r10", "r11", "rax"
51
52/* The registers RDI, RSI, RDX, RCX, R8 and R9 are used for integer and pointer
53 * arguments. */
54
55/* RSI */
56#define __FT_TMP1(x) "=S" (x)
57#define __FT_ARG1(x) "0" ((long) (x))
58
59/* RDX */
60#define __FT_TMP2(x) "=d" (x)
61#define __FT_ARG2(x) "1" ((long) (x))
62
63/* RCX */
64#define __FT_TMP3(x) "=c" (x)
65#define __FT_ARG3(x) "2" ((long) (x))
66
67#define ft_event(id, callback) \
68 __asm__ __volatile__( \
69 "1: jmp 2f \n\t" \
70 " call " #callback " \n\t" \
71 __FT_EVENT_TABLE(id,1b,2f) \
72 "2: \n\t" \
73 : : : __FT_CLOBBER_LIST0)
74
75#define ft_event0(id, callback) \
76 __asm__ __volatile__( \
77 "1: jmp 2f \n\t" \
78 " movq $" #id ", %%rdi \n\t" \
79 " call " #callback " \n\t" \
80 __FT_EVENT_TABLE(id,1b,2f) \
81 "2: \n\t" \
82 : : : __FT_CLOBBER_LIST0)
83
84#define ft_event1(id, callback, param) \
85 do { \
86 long __ft_tmp1; \
87 __asm__ __volatile__( \
88 "1: jmp 2f \n\t" \
89 " movq $" #id ", %%rdi \n\t" \
90 " call " #callback " \n\t" \
91 __FT_EVENT_TABLE(id,1b,2f) \
92 "2: \n\t" \
93 : __FT_TMP1(__ft_tmp1) \
94 : __FT_ARG1(param) \
95 : __FT_CLOBBER_LIST1); \
96 } while (0);
97
98#define ft_event2(id, callback, param, param2) \
99 do { \
100 long __ft_tmp1, __ft_tmp2; \
101 __asm__ __volatile__( \
102 "1: jmp 2f \n\t" \
103 " movq $" #id ", %%rdi \n\t" \
104 " call " #callback " \n\t" \
105 __FT_EVENT_TABLE(id,1b,2f) \
106 "2: \n\t" \
107 : __FT_TMP1(__ft_tmp1), __FT_TMP2(__ft_tmp2) \
108 : __FT_ARG1(param), __FT_ARG2(param2) \
109 : __FT_CLOBBER_LIST2); \
110 } while (0);
111
112#define ft_event3(id, callback, param, param2, param3) \
113 do { \
114 long __ft_tmp1, __ft_tmp2, __ft_tmp3; \
115 __asm__ __volatile__( \
116 "1: jmp 2f \n\t" \
117 " movq $" #id ", %%rdi \n\t" \
118 " call " #callback " \n\t" \
119 __FT_EVENT_TABLE(id,1b,2f) \
120 "2: \n\t" \
121 : __FT_TMP1(__ft_tmp1), __FT_TMP2(__ft_tmp2), __FT_TMP3(__ft_tmp3) \
122 : __FT_ARG1(param), __FT_ARG2(param2), __FT_ARG3(param3) \
123 : __FT_CLOBBER_LIST3); \
124 } while (0);
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index eb92a6ed2be..8f1e5445d37 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -76,6 +76,8 @@ extern void threshold_interrupt(void);
76extern void call_function_interrupt(void); 76extern void call_function_interrupt(void);
77extern void call_function_single_interrupt(void); 77extern void call_function_single_interrupt(void);
78 78
79extern void pull_timers_interrupt(void);
80
79/* IOAPIC */ 81/* IOAPIC */
80#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs)) 82#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
81extern unsigned long io_apic_irqs; 83extern unsigned long io_apic_irqs;
@@ -154,6 +156,7 @@ extern asmlinkage void smp_irq_move_cleanup_interrupt(void);
154extern void smp_reschedule_interrupt(struct pt_regs *); 156extern void smp_reschedule_interrupt(struct pt_regs *);
155extern void smp_call_function_interrupt(struct pt_regs *); 157extern void smp_call_function_interrupt(struct pt_regs *);
156extern void smp_call_function_single_interrupt(struct pt_regs *); 158extern void smp_call_function_single_interrupt(struct pt_regs *);
159extern void smp_pull_timers_interrupt(struct pt_regs *);
157#ifdef CONFIG_X86_32 160#ifdef CONFIG_X86_32
158extern void smp_invalidate_interrupt(struct pt_regs *); 161extern void smp_invalidate_interrupt(struct pt_regs *);
159#else 162#else
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 7e50f06393a..7de6ad70365 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -130,6 +130,13 @@
130#define INVALIDATE_TLB_VECTOR_START \ 130#define INVALIDATE_TLB_VECTOR_START \
131 (INVALIDATE_TLB_VECTOR_END-NUM_INVALIDATE_TLB_VECTORS+1) 131 (INVALIDATE_TLB_VECTOR_END-NUM_INVALIDATE_TLB_VECTORS+1)
132 132
133/*
134 * LITMUS^RT pull timers IRQ vector
135 * Make sure it's below the above max 32 vectors.
136 */
137#define PULL_TIMERS_VECTOR 0xce
138
139
133#define NR_VECTORS 256 140#define NR_VECTORS 256
134 141
135#define FPU_IRQ 13 142#define FPU_IRQ 13
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 0d1171c9772..7e6a7b66203 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -166,6 +166,10 @@ extern void print_cpu_info(struct cpuinfo_x86 *);
166extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); 166extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
167extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 167extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
168extern unsigned short num_cache_leaves; 168extern unsigned short num_cache_leaves;
169#ifdef CONFIG_SYSFS
170extern int get_shared_cpu_map(cpumask_var_t mask,
171 unsigned int cpu, int index);
172#endif
169 173
170extern void detect_extended_topology(struct cpuinfo_x86 *c); 174extern void detect_extended_topology(struct cpuinfo_x86 *c);
171extern void detect_ht(struct cpuinfo_x86 *c); 175extern void detect_ht(struct cpuinfo_x86 *c);
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
index 593485b38ab..2f6e127db30 100644
--- a/arch/x86/include/asm/unistd_32.h
+++ b/arch/x86/include/asm/unistd_32.h
@@ -353,9 +353,13 @@
353#define __NR_sendmmsg 345 353#define __NR_sendmmsg 345
354#define __NR_setns 346 354#define __NR_setns 346
355 355
356#define __NR_LITMUS 347
357
358#include "litmus/unistd_32.h"
359
356#ifdef __KERNEL__ 360#ifdef __KERNEL__
357 361
358#define NR_syscalls 347 362#define NR_syscalls 347 + NR_litmus_syscalls
359 363
360#define __ARCH_WANT_IPC_PARSE_VERSION 364#define __ARCH_WANT_IPC_PARSE_VERSION
361#define __ARCH_WANT_OLD_READDIR 365#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index 20104057344..b2836462d2f 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -684,6 +684,14 @@ __SYSCALL(__NR_setns, sys_setns)
684#define __NR_getcpu 309 684#define __NR_getcpu 309
685__SYSCALL(__NR_getcpu, sys_getcpu) 685__SYSCALL(__NR_getcpu, sys_getcpu)
686 686
687#define __NR_LITMUS 309
688
689#ifdef __KERNEL__
690#include "litmus/unistd_64.h"
691#else
692#include "../../../../include/litmus/unistd_64.h"
693#endif
694
687#ifndef __NO_STUBS 695#ifndef __NO_STUBS
688#define __ARCH_WANT_OLD_READDIR 696#define __ARCH_WANT_OLD_READDIR
689#define __ARCH_WANT_OLD_STAT 697#define __ARCH_WANT_OLD_STAT
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 82f2912155a..c84954ad12f 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -99,6 +99,8 @@ obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
99obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o 99obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
100obj-$(CONFIG_OF) += devicetree.o 100obj-$(CONFIG_OF) += devicetree.o
101 101
102obj-$(CONFIG_FEATHER_TRACE) += ft_event.o
103
102### 104###
103# 64 bit specific files 105# 64 bit specific files
104ifeq ($(CONFIG_X86_64),y) 106ifeq ($(CONFIG_X86_64),y)
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index c105c533ed9..76c5cc82163 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -20,6 +20,8 @@
20#include <asm/amd_nb.h> 20#include <asm/amd_nb.h>
21#include <asm/smp.h> 21#include <asm/smp.h>
22 22
23#include <litmus/color.h>
24
23#define LVL_1_INST 1 25#define LVL_1_INST 1
24#define LVL_1_DATA 2 26#define LVL_1_DATA 2
25#define LVL_2 3 27#define LVL_2 3
@@ -747,6 +749,23 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
747static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); 749static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
748#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) 750#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
749 751
752/* returns CPUs that share the index cache with cpu */
753int get_shared_cpu_map(cpumask_var_t mask, unsigned int cpu, int index)
754{
755 int ret = 0;
756 struct _cpuid4_info *this_leaf;
757
758 if (index >= num_cache_leaves) {
759 index = num_cache_leaves - 1;
760 ret = index;
761 }
762
763 this_leaf = CPUID4_INFO_IDX(cpu,index);
764 cpumask_copy(mask, to_cpumask(this_leaf->shared_cpu_map));
765
766 return ret;
767}
768
750#ifdef CONFIG_SMP 769#ifdef CONFIG_SMP
751static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 770static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
752{ 771{
@@ -1198,6 +1217,19 @@ static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
1198 .notifier_call = cacheinfo_cpu_callback, 1217 .notifier_call = cacheinfo_cpu_callback,
1199}; 1218};
1200 1219
1220static void set_color_vars(void)
1221{
1222 struct _cpuid4_info *leaf = CPUID4_INFO_IDX(
1223 smp_processor_id(), num_cache_leaves - 1);
1224 color_cache_info.size = leaf->size;
1225 color_cache_info.line_size =
1226 (unsigned long)leaf->ebx.split.coherency_line_size + 1;
1227 color_cache_info.ways =
1228 (unsigned long)leaf->ebx.split.ways_of_associativity + 1;
1229 color_cache_info.sets =
1230 (unsigned long)leaf->ecx.split.number_of_sets + 1;
1231}
1232
1201static int __cpuinit cache_sysfs_init(void) 1233static int __cpuinit cache_sysfs_init(void)
1202{ 1234{
1203 int i; 1235 int i;
@@ -1214,6 +1246,7 @@ static int __cpuinit cache_sysfs_init(void)
1214 return err; 1246 return err;
1215 } 1247 }
1216 register_hotcpu_notifier(&cacheinfo_cpu_notifier); 1248 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1249 set_color_vars();
1217 return 0; 1250 return 0;
1218} 1251}
1219 1252
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 6419bb05ecd..e5d2d3fa7a0 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -993,6 +993,8 @@ apicinterrupt CALL_FUNCTION_VECTOR \
993 call_function_interrupt smp_call_function_interrupt 993 call_function_interrupt smp_call_function_interrupt
994apicinterrupt RESCHEDULE_VECTOR \ 994apicinterrupt RESCHEDULE_VECTOR \
995 reschedule_interrupt smp_reschedule_interrupt 995 reschedule_interrupt smp_reschedule_interrupt
996apicinterrupt PULL_TIMERS_VECTOR \
997 pull_timers_interrupt smp_pull_timers_interrupt
996#endif 998#endif
997 999
998apicinterrupt ERROR_APIC_VECTOR \ 1000apicinterrupt ERROR_APIC_VECTOR \
diff --git a/arch/x86/kernel/ft_event.c b/arch/x86/kernel/ft_event.c
new file mode 100644
index 00000000000..37cc3325271
--- /dev/null
+++ b/arch/x86/kernel/ft_event.c
@@ -0,0 +1,118 @@
1#include <linux/types.h>
2
3#include <litmus/feather_trace.h>
4
5/* the feather trace management functions assume
6 * exclusive access to the event table
7 */
8
9#ifndef CONFIG_DEBUG_RODATA
10
11#define BYTE_JUMP 0xeb
12#define BYTE_JUMP_LEN 0x02
13
14/* for each event, there is an entry in the event table */
15struct trace_event {
16 long id;
17 long count;
18 long start_addr;
19 long end_addr;
20};
21
22extern struct trace_event __start___event_table[];
23extern struct trace_event __stop___event_table[];
24
25/* Workaround: if no events are defined, then the event_table section does not
26 * exist and the above references cause linker errors. This could probably be
27 * fixed by adjusting the linker script, but it is easier to maintain for us if
28 * we simply create a dummy symbol in the event table section.
29 */
30int __event_table_dummy[0] __attribute__ ((section("__event_table")));
31
32int ft_enable_event(unsigned long id)
33{
34 struct trace_event* te = __start___event_table;
35 int count = 0;
36 char* delta;
37 unsigned char* instr;
38
39 while (te < __stop___event_table) {
40 if (te->id == id && ++te->count == 1) {
41 instr = (unsigned char*) te->start_addr;
42 /* make sure we don't clobber something wrong */
43 if (*instr == BYTE_JUMP) {
44 delta = (((unsigned char*) te->start_addr) + 1);
45 *delta = 0;
46 }
47 }
48 if (te->id == id)
49 count++;
50 te++;
51 }
52
53 printk(KERN_DEBUG "ft_enable_event: enabled %d events\n", count);
54 return count;
55}
56
57int ft_disable_event(unsigned long id)
58{
59 struct trace_event* te = __start___event_table;
60 int count = 0;
61 char* delta;
62 unsigned char* instr;
63
64 while (te < __stop___event_table) {
65 if (te->id == id && --te->count == 0) {
66 instr = (unsigned char*) te->start_addr;
67 if (*instr == BYTE_JUMP) {
68 delta = (((unsigned char*) te->start_addr) + 1);
69 *delta = te->end_addr - te->start_addr -
70 BYTE_JUMP_LEN;
71 }
72 }
73 if (te->id == id)
74 count++;
75 te++;
76 }
77
78 printk(KERN_DEBUG "ft_disable_event: disabled %d events\n", count);
79 return count;
80}
81
82int ft_disable_all_events(void)
83{
84 struct trace_event* te = __start___event_table;
85 int count = 0;
86 char* delta;
87 unsigned char* instr;
88
89 while (te < __stop___event_table) {
90 if (te->count) {
91 instr = (unsigned char*) te->start_addr;
92 if (*instr == BYTE_JUMP) {
93 delta = (((unsigned char*) te->start_addr)
94 + 1);
95 *delta = te->end_addr - te->start_addr -
96 BYTE_JUMP_LEN;
97 te->count = 0;
98 count++;
99 }
100 }
101 te++;
102 }
103 return count;
104}
105
106int ft_is_event_enabled(unsigned long id)
107{
108 struct trace_event* te = __start___event_table;
109
110 while (te < __stop___event_table) {
111 if (te->id == id)
112 return te->count;
113 te++;
114 }
115 return 0;
116}
117
118#endif
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index b3300e6bace..f3a90e926f5 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -252,6 +252,9 @@ static void __init smp_intr_init(void)
252 alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, 252 alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
253 call_function_single_interrupt); 253 call_function_single_interrupt);
254 254
255 /* IPI for hrtimer pulling on remote cpus */
256 alloc_intr_gate(PULL_TIMERS_VECTOR, pull_timers_interrupt);
257
255 /* Low priority IPI to cleanup after moving an irq */ 258 /* Low priority IPI to cleanup after moving an irq */
256 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); 259 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
257 set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); 260 set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 013e7eba83b..ed4c4f54e2a 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -23,6 +23,10 @@
23#include <linux/cpu.h> 23#include <linux/cpu.h>
24#include <linux/gfp.h> 24#include <linux/gfp.h>
25 25
26#include <litmus/preempt.h>
27#include <litmus/debug_trace.h>
28#include <litmus/trace.h>
29
26#include <asm/mtrr.h> 30#include <asm/mtrr.h>
27#include <asm/tlbflush.h> 31#include <asm/tlbflush.h>
28#include <asm/mmu_context.h> 32#include <asm/mmu_context.h>
@@ -118,6 +122,7 @@ static void native_smp_send_reschedule(int cpu)
118 WARN_ON(1); 122 WARN_ON(1);
119 return; 123 return;
120 } 124 }
125 TS_SEND_RESCHED_START(cpu);
121 apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); 126 apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
122} 127}
123 128
@@ -147,6 +152,16 @@ void native_send_call_func_ipi(const struct cpumask *mask)
147 free_cpumask_var(allbutself); 152 free_cpumask_var(allbutself);
148} 153}
149 154
155/* trigger timers on remote cpu */
156void smp_send_pull_timers(int cpu)
157{
158 if (unlikely(cpu_is_offline(cpu))) {
159 WARN_ON(1);
160 return;
161 }
162 apic->send_IPI_mask(cpumask_of(cpu), PULL_TIMERS_VECTOR);
163}
164
150/* 165/*
151 * this function calls the 'stop' function on all other CPUs in the system. 166 * this function calls the 'stop' function on all other CPUs in the system.
152 */ 167 */
@@ -199,8 +214,15 @@ static void native_stop_other_cpus(int wait)
199void smp_reschedule_interrupt(struct pt_regs *regs) 214void smp_reschedule_interrupt(struct pt_regs *regs)
200{ 215{
201 ack_APIC_irq(); 216 ack_APIC_irq();
217 /* LITMUS^RT: this IPI might need to trigger the sched state machine. */
218 sched_state_ipi();
202 inc_irq_stat(irq_resched_count); 219 inc_irq_stat(irq_resched_count);
220 /*
221 * LITMUS^RT: starting from 3.0 schedule_ipi() actually does something.
222 * This may increase IPI latencies compared with previous versions.
223 */
203 scheduler_ipi(); 224 scheduler_ipi();
225 TS_SEND_RESCHED_END;
204 /* 226 /*
205 * KVM uses this interrupt to force a cpu out of guest mode 227 * KVM uses this interrupt to force a cpu out of guest mode
206 */ 228 */
@@ -224,6 +246,15 @@ void smp_call_function_single_interrupt(struct pt_regs *regs)
224 irq_exit(); 246 irq_exit();
225} 247}
226 248
249extern void hrtimer_pull(void);
250
251void smp_pull_timers_interrupt(struct pt_regs *regs)
252{
253 ack_APIC_irq();
254 TRACE("pull timer interrupt\n");
255 hrtimer_pull();
256}
257
227struct smp_ops smp_ops = { 258struct smp_ops smp_ops = {
228 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, 259 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
229 .smp_prepare_cpus = native_smp_prepare_cpus, 260 .smp_prepare_cpus = native_smp_prepare_cpus,
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index bc19be332bc..5ffb20e6ba4 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -346,3 +346,17 @@ ENTRY(sys_call_table)
346 .long sys_syncfs 346 .long sys_syncfs
347 .long sys_sendmmsg /* 345 */ 347 .long sys_sendmmsg /* 345 */
348 .long sys_setns 348 .long sys_setns
349 .long sys_set_rt_task_param /* LITMUS^RT 347 */
350 .long sys_get_rt_task_param
351 .long sys_complete_job
352 .long sys_od_open
353 .long sys_od_close
354 .long sys_litmus_lock /* +5 */
355 .long sys_litmus_unlock
356 .long sys_query_job_no
357 .long sys_wait_for_job_release
358 .long sys_wait_for_ts_release
359 .long sys_release_ts /* +10 */
360 .long sys_null_call
361 .long sys_set_rt_task_mc_param
362 .long sys_set_color_page_info