diff options
Diffstat (limited to 'arch/tile/include/asm/system.h')
-rw-r--r-- | arch/tile/include/asm/system.h | 220 |
1 files changed, 220 insertions, 0 deletions
diff --git a/arch/tile/include/asm/system.h b/arch/tile/include/asm/system.h new file mode 100644 index 000000000000..d6ca7f816c87 --- /dev/null +++ b/arch/tile/include/asm/system.h | |||
@@ -0,0 +1,220 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SYSTEM_H | ||
16 | #define _ASM_TILE_SYSTEM_H | ||
17 | |||
18 | #ifndef __ASSEMBLY__ | ||
19 | |||
20 | #include <linux/types.h> | ||
21 | #include <linux/irqflags.h> | ||
22 | |||
23 | /* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */ | ||
24 | #include <asm/ptrace.h> | ||
25 | |||
26 | #include <arch/chip.h> | ||
27 | #include <arch/sim_def.h> | ||
28 | #include <arch/spr_def.h> | ||
29 | |||
30 | /* | ||
31 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
32 | * depend on. | ||
33 | * | ||
34 | * No data-dependent reads from memory-like regions are ever reordered | ||
35 | * over this barrier. All reads preceding this primitive are guaranteed | ||
36 | * to access memory (but not necessarily other CPUs' caches) before any | ||
37 | * reads following this primitive that depend on the data return by | ||
38 | * any of the preceding reads. This primitive is much lighter weight than | ||
39 | * rmb() on most CPUs, and is never heavier weight than is | ||
40 | * rmb(). | ||
41 | * | ||
42 | * These ordering constraints are respected by both the local CPU | ||
43 | * and the compiler. | ||
44 | * | ||
45 | * Ordering is not guaranteed by anything other than these primitives, | ||
46 | * not even by data dependencies. See the documentation for | ||
47 | * memory_barrier() for examples and URLs to more information. | ||
48 | * | ||
49 | * For example, the following code would force ordering (the initial | ||
50 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
51 | * | ||
52 | * <programlisting> | ||
53 | * CPU 0 CPU 1 | ||
54 | * | ||
55 | * b = 2; | ||
56 | * memory_barrier(); | ||
57 | * p = &b; q = p; | ||
58 | * read_barrier_depends(); | ||
59 | * d = *q; | ||
60 | * </programlisting> | ||
61 | * | ||
62 | * because the read of "*q" depends on the read of "p" and these | ||
63 | * two reads are separated by a read_barrier_depends(). However, | ||
64 | * the following code, with the same initial values for "a" and "b": | ||
65 | * | ||
66 | * <programlisting> | ||
67 | * CPU 0 CPU 1 | ||
68 | * | ||
69 | * a = 2; | ||
70 | * memory_barrier(); | ||
71 | * b = 3; y = b; | ||
72 | * read_barrier_depends(); | ||
73 | * x = a; | ||
74 | * </programlisting> | ||
75 | * | ||
76 | * does not enforce ordering, since there is no data dependency between | ||
77 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
78 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
79 | * in cases like this where there are no data dependencies. | ||
80 | */ | ||
81 | |||
82 | #define read_barrier_depends() do { } while (0) | ||
83 | |||
84 | #define __sync() __insn_mf() | ||
85 | |||
86 | #if CHIP_HAS_SPLIT_CYCLE() | ||
87 | #define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW) | ||
88 | #else | ||
89 | #define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */ | ||
90 | #endif | ||
91 | |||
92 | /* Fence to guarantee visibility of stores to incoherent memory. */ | ||
93 | static inline void | ||
94 | mb_incoherent(void) | ||
95 | { | ||
96 | __insn_mf(); | ||
97 | |||
98 | #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() | ||
99 | { | ||
100 | int __mb_incoherent(void); | ||
101 | #if CHIP_HAS_TILE_WRITE_PENDING() | ||
102 | const unsigned long WRITE_TIMEOUT_CYCLES = 400; | ||
103 | unsigned long start = get_cycles_low(); | ||
104 | do { | ||
105 | if (__insn_mfspr(SPR_TILE_WRITE_PENDING) == 0) | ||
106 | return; | ||
107 | } while ((get_cycles_low() - start) < WRITE_TIMEOUT_CYCLES); | ||
108 | #endif /* CHIP_HAS_TILE_WRITE_PENDING() */ | ||
109 | (void) __mb_incoherent(); | ||
110 | } | ||
111 | #endif /* CHIP_HAS_MF_WAITS_FOR_VICTIMS() */ | ||
112 | } | ||
113 | |||
114 | #define fast_wmb() __sync() | ||
115 | #define fast_rmb() __sync() | ||
116 | #define fast_mb() __sync() | ||
117 | #define fast_iob() mb_incoherent() | ||
118 | |||
119 | #define wmb() fast_wmb() | ||
120 | #define rmb() fast_rmb() | ||
121 | #define mb() fast_mb() | ||
122 | #define iob() fast_iob() | ||
123 | |||
124 | #ifdef CONFIG_SMP | ||
125 | #define smp_mb() mb() | ||
126 | #define smp_rmb() rmb() | ||
127 | #define smp_wmb() wmb() | ||
128 | #define smp_read_barrier_depends() read_barrier_depends() | ||
129 | #else | ||
130 | #define smp_mb() barrier() | ||
131 | #define smp_rmb() barrier() | ||
132 | #define smp_wmb() barrier() | ||
133 | #define smp_read_barrier_depends() do { } while (0) | ||
134 | #endif | ||
135 | |||
136 | #define set_mb(var, value) \ | ||
137 | do { var = value; mb(); } while (0) | ||
138 | |||
139 | #include <linux/irqflags.h> | ||
140 | |||
141 | /* | ||
142 | * Pause the DMA engine and static network before task switching. | ||
143 | */ | ||
144 | #define prepare_arch_switch(next) _prepare_arch_switch(next) | ||
145 | void _prepare_arch_switch(struct task_struct *next); | ||
146 | |||
147 | |||
148 | /* | ||
149 | * switch_to(n) should switch tasks to task nr n, first | ||
150 | * checking that n isn't the current task, in which case it does nothing. | ||
151 | * The number of callee-saved registers saved on the kernel stack | ||
152 | * is defined here for use in copy_thread() and must agree with __switch_to(). | ||
153 | */ | ||
154 | #endif /* !__ASSEMBLY__ */ | ||
155 | #define CALLEE_SAVED_FIRST_REG 30 | ||
156 | #define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */ | ||
157 | #ifndef __ASSEMBLY__ | ||
158 | struct task_struct; | ||
159 | #define switch_to(prev, next, last) ((last) = _switch_to((prev), (next))) | ||
160 | extern struct task_struct *_switch_to(struct task_struct *prev, | ||
161 | struct task_struct *next); | ||
162 | |||
163 | /* | ||
164 | * On SMP systems, when the scheduler does migration-cost autodetection, | ||
165 | * it needs a way to flush as much of the CPU's caches as possible: | ||
166 | * | ||
167 | * TODO: fill this in! | ||
168 | */ | ||
169 | static inline void sched_cacheflush(void) | ||
170 | { | ||
171 | } | ||
172 | |||
173 | #define arch_align_stack(x) (x) | ||
174 | |||
175 | /* | ||
176 | * Is the kernel doing fixups of unaligned accesses? If <0, no kernel | ||
177 | * intervention occurs and SIGBUS is delivered with no data address | ||
178 | * info. If 0, the kernel single-steps the instruction to discover | ||
179 | * the data address to provide with the SIGBUS. If 1, the kernel does | ||
180 | * a fixup. | ||
181 | */ | ||
182 | extern int unaligned_fixup; | ||
183 | |||
184 | /* Is the kernel printing on each unaligned fixup? */ | ||
185 | extern int unaligned_printk; | ||
186 | |||
187 | /* Number of unaligned fixups performed */ | ||
188 | extern unsigned int unaligned_fixup_count; | ||
189 | |||
190 | /* User-level DMA management functions */ | ||
191 | void grant_dma_mpls(void); | ||
192 | void restrict_dma_mpls(void); | ||
193 | |||
194 | |||
195 | /* Invoke the simulator "syscall" mechanism (see arch/tile/kernel/entry.S). */ | ||
196 | extern int _sim_syscall(int syscall_num, ...); | ||
197 | #define sim_syscall(syscall_num, ...) \ | ||
198 | _sim_syscall(SIM_CONTROL_SYSCALL + \ | ||
199 | ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS), \ | ||
200 | ## __VA_ARGS__) | ||
201 | |||
202 | /* | ||
203 | * Kernel threads can check to see if they need to migrate their | ||
204 | * stack whenever they return from a context switch; for user | ||
205 | * threads, we defer until they are returning to user-space. | ||
206 | */ | ||
207 | #define finish_arch_switch(prev) do { \ | ||
208 | if (unlikely((prev)->state == TASK_DEAD)) \ | ||
209 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \ | ||
210 | ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \ | ||
211 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \ | ||
212 | (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \ | ||
213 | if (current->mm == NULL && !kstack_hash && \ | ||
214 | current_thread_info()->homecache_cpu != smp_processor_id()) \ | ||
215 | homecache_migrate_kthread(); \ | ||
216 | } while (0) | ||
217 | |||
218 | #endif /* !__ASSEMBLY__ */ | ||
219 | |||
220 | #endif /* _ASM_TILE_SYSTEM_H */ | ||