aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/include/asm/system.h
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/tile/include/asm/system.h
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'arch/tile/include/asm/system.h')
-rw-r--r--arch/tile/include/asm/system.h31
1 files changed, 22 insertions, 9 deletions
diff --git a/arch/tile/include/asm/system.h b/arch/tile/include/asm/system.h
index f749be327ce0..23d1842f4839 100644
--- a/arch/tile/include/asm/system.h
+++ b/arch/tile/include/asm/system.h
@@ -89,6 +89,27 @@
89#define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */ 89#define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */
90#endif 90#endif
91 91
92#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
93#include <hv/syscall_public.h>
94/*
95 * Issue an uncacheable load to each memory controller, then
96 * wait until those loads have completed.
97 */
98static inline void __mb_incoherent(void)
99{
100 long clobber_r10;
101 asm volatile("swint2"
102 : "=R10" (clobber_r10)
103 : "R10" (HV_SYS_fence_incoherent)
104 : "r0", "r1", "r2", "r3", "r4",
105 "r5", "r6", "r7", "r8", "r9",
106 "r11", "r12", "r13", "r14",
107 "r15", "r16", "r17", "r18", "r19",
108 "r20", "r21", "r22", "r23", "r24",
109 "r25", "r26", "r27", "r28", "r29");
110}
111#endif
112
92/* Fence to guarantee visibility of stores to incoherent memory. */ 113/* Fence to guarantee visibility of stores to incoherent memory. */
93static inline void 114static inline void
94mb_incoherent(void) 115mb_incoherent(void)
@@ -97,7 +118,6 @@ mb_incoherent(void)
97 118
98#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() 119#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
99 { 120 {
100 int __mb_incoherent(void);
101#if CHIP_HAS_TILE_WRITE_PENDING() 121#if CHIP_HAS_TILE_WRITE_PENDING()
102 const unsigned long WRITE_TIMEOUT_CYCLES = 400; 122 const unsigned long WRITE_TIMEOUT_CYCLES = 400;
103 unsigned long start = get_cycles_low(); 123 unsigned long start = get_cycles_low();
@@ -161,7 +181,7 @@ extern struct task_struct *_switch_to(struct task_struct *prev,
161/* Helper function for _switch_to(). */ 181/* Helper function for _switch_to(). */
162extern struct task_struct *__switch_to(struct task_struct *prev, 182extern struct task_struct *__switch_to(struct task_struct *prev,
163 struct task_struct *next, 183 struct task_struct *next,
164 unsigned long new_system_save_1_0); 184 unsigned long new_system_save_k_0);
165 185
166/* Address that switched-away from tasks are at. */ 186/* Address that switched-away from tasks are at. */
167extern unsigned long get_switch_to_pc(void); 187extern unsigned long get_switch_to_pc(void);
@@ -214,13 +234,6 @@ int hardwall_deactivate(struct task_struct *task);
214} while (0) 234} while (0)
215#endif 235#endif
216 236
217/* Invoke the simulator "syscall" mechanism (see arch/tile/kernel/entry.S). */
218extern int _sim_syscall(int syscall_num, ...);
219#define sim_syscall(syscall_num, ...) \
220 _sim_syscall(SIM_CONTROL_SYSCALL + \
221 ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS), \
222 ## __VA_ARGS__)
223
224/* 237/*
225 * Kernel threads can check to see if they need to migrate their 238 * Kernel threads can check to see if they need to migrate their
226 * stack whenever they return from a context switch; for user 239 * stack whenever they return from a context switch; for user