diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-28 18:58:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-28 18:58:21 -0400 |
commit | 0195c00244dc2e9f522475868fa278c473ba7339 (patch) | |
tree | f97ca98ae64ede2c33ad3de05ed7bbfa4f4495ed /arch/tile/include | |
parent | f21ce8f8447c8be8847dadcfdbcc76b0d7365fa5 (diff) | |
parent | 141124c02059eee9dbc5c86ea797b1ca888e77f7 (diff) |
Merge tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system
Pull "Disintegrate and delete asm/system.h" from David Howells:
"Here are a bunch of patches to disintegrate asm/system.h into a set of
separate bits to relieve the problem of circular inclusion
dependencies.
I've built all the working defconfigs from all the arches that I can
and made sure that they don't break.
The reason for these patches is that I recently encountered a circular
dependency problem that came about when I produced some patches to
optimise get_order() by rewriting it to use ilog2().
This uses bitops - and on the SH arch asm/bitops.h drags in
asm-generic/get_order.h by a circuituous route involving asm/system.h.
The main difficulty seems to be asm/system.h. It holds a number of
low level bits with no/few dependencies that are commonly used (eg.
memory barriers) and a number of bits with more dependencies that
aren't used in many places (eg. switch_to()).
These patches break asm/system.h up into the following core pieces:
(1) asm/barrier.h
Move memory barriers here. This already done for MIPS and Alpha.
(2) asm/switch_to.h
Move switch_to() and related stuff here.
(3) asm/exec.h
Move arch_align_stack() here. Other process execution related bits
could perhaps go here from asm/processor.h.
(4) asm/cmpxchg.h
Move xchg() and cmpxchg() here as they're full word atomic ops and
frequently used by atomic_xchg() and atomic_cmpxchg().
(5) asm/bug.h
Move die() and related bits.
(6) asm/auxvec.h
Move AT_VECTOR_SIZE_ARCH here.
Other arch headers are created as needed on a per-arch basis."
Fixed up some conflicts from other header file cleanups and moving code
around that has happened in the meantime, so David's testing is somewhat
weakened by that. We'll find out anything that got broken and fix it..
* tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system: (38 commits)
Delete all instances of asm/system.h
Remove all #inclusions of asm/system.h
Add #includes needed to permit the removal of asm/system.h
Move all declarations of free_initmem() to linux/mm.h
Disintegrate asm/system.h for OpenRISC
Split arch_align_stack() out from asm-generic/system.h
Split the switch_to() wrapper out of asm-generic/system.h
Move the asm-generic/system.h xchg() implementation to asm-generic/cmpxchg.h
Create asm-generic/barrier.h
Make asm-generic/cmpxchg.h #include asm-generic/cmpxchg-local.h
Disintegrate asm/system.h for Xtensa
Disintegrate asm/system.h for Unicore32 [based on ver #3, changed by gxt]
Disintegrate asm/system.h for Tile
Disintegrate asm/system.h for Sparc
Disintegrate asm/system.h for SH
Disintegrate asm/system.h for Score
Disintegrate asm/system.h for S390
Disintegrate asm/system.h for PowerPC
Disintegrate asm/system.h for PA-RISC
Disintegrate asm/system.h for MN10300
...
Diffstat (limited to 'arch/tile/include')
-rw-r--r-- | arch/tile/include/asm/atomic.h | 2 | ||||
-rw-r--r-- | arch/tile/include/asm/atomic_32.h | 1 | ||||
-rw-r--r-- | arch/tile/include/asm/atomic_64.h | 1 | ||||
-rw-r--r-- | arch/tile/include/asm/barrier.h (renamed from arch/tile/include/asm/system.h) | 121 | ||||
-rw-r--r-- | arch/tile/include/asm/bitops_32.h | 1 | ||||
-rw-r--r-- | arch/tile/include/asm/bitops_64.h | 1 | ||||
-rw-r--r-- | arch/tile/include/asm/cacheflush.h | 11 | ||||
-rw-r--r-- | arch/tile/include/asm/exec.h | 20 | ||||
-rw-r--r-- | arch/tile/include/asm/pgtable.h | 1 | ||||
-rw-r--r-- | arch/tile/include/asm/setup.h | 22 | ||||
-rw-r--r-- | arch/tile/include/asm/spinlock_32.h | 1 | ||||
-rw-r--r-- | arch/tile/include/asm/switch_to.h | 76 | ||||
-rw-r--r-- | arch/tile/include/asm/timex.h | 2 | ||||
-rw-r--r-- | arch/tile/include/asm/unaligned.h | 15 |
14 files changed, 152 insertions, 123 deletions
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h index 921dbeb8a70c..bb696da5d7cd 100644 --- a/arch/tile/include/asm/atomic.h +++ b/arch/tile/include/asm/atomic.h | |||
@@ -20,7 +20,7 @@ | |||
20 | #ifndef __ASSEMBLY__ | 20 | #ifndef __ASSEMBLY__ |
21 | 21 | ||
22 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
23 | #include <asm/system.h> | 23 | #include <linux/types.h> |
24 | 24 | ||
25 | #define ATOMIC_INIT(i) { (i) } | 25 | #define ATOMIC_INIT(i) { (i) } |
26 | 26 | ||
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index c03349e0ca9f..466dc4a39a4f 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #ifndef _ASM_TILE_ATOMIC_32_H | 17 | #ifndef _ASM_TILE_ATOMIC_32_H |
18 | #define _ASM_TILE_ATOMIC_32_H | 18 | #define _ASM_TILE_ATOMIC_32_H |
19 | 19 | ||
20 | #include <asm/barrier.h> | ||
20 | #include <arch/chip.h> | 21 | #include <arch/chip.h> |
21 | 22 | ||
22 | #ifndef __ASSEMBLY__ | 23 | #ifndef __ASSEMBLY__ |
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h index 27fe667fddfe..f4500c688ffa 100644 --- a/arch/tile/include/asm/atomic_64.h +++ b/arch/tile/include/asm/atomic_64.h | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #ifndef __ASSEMBLY__ | 20 | #ifndef __ASSEMBLY__ |
21 | 21 | ||
22 | #include <asm/barrier.h> | ||
22 | #include <arch/spr_def.h> | 23 | #include <arch/spr_def.h> |
23 | 24 | ||
24 | /* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */ | 25 | /* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */ |
diff --git a/arch/tile/include/asm/system.h b/arch/tile/include/asm/barrier.h index 23d1842f4839..990a217a0b72 100644 --- a/arch/tile/include/asm/system.h +++ b/arch/tile/include/asm/barrier.h | |||
@@ -12,20 +12,15 @@ | |||
12 | * more details. | 12 | * more details. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #ifndef _ASM_TILE_SYSTEM_H | 15 | #ifndef _ASM_TILE_BARRIER_H |
16 | #define _ASM_TILE_SYSTEM_H | 16 | #define _ASM_TILE_BARRIER_H |
17 | 17 | ||
18 | #ifndef __ASSEMBLY__ | 18 | #ifndef __ASSEMBLY__ |
19 | 19 | ||
20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | #include <linux/irqflags.h> | ||
22 | |||
23 | /* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */ | ||
24 | #include <asm/ptrace.h> | ||
25 | |||
26 | #include <arch/chip.h> | 21 | #include <arch/chip.h> |
27 | #include <arch/sim_def.h> | ||
28 | #include <arch/spr_def.h> | 22 | #include <arch/spr_def.h> |
23 | #include <asm/timex.h> | ||
29 | 24 | ||
30 | /* | 25 | /* |
31 | * read_barrier_depends - Flush all pending reads that subsequents reads | 26 | * read_barrier_depends - Flush all pending reads that subsequents reads |
@@ -78,17 +73,10 @@ | |||
78 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | 73 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() |
79 | * in cases like this where there are no data dependencies. | 74 | * in cases like this where there are no data dependencies. |
80 | */ | 75 | */ |
81 | |||
82 | #define read_barrier_depends() do { } while (0) | 76 | #define read_barrier_depends() do { } while (0) |
83 | 77 | ||
84 | #define __sync() __insn_mf() | 78 | #define __sync() __insn_mf() |
85 | 79 | ||
86 | #if CHIP_HAS_SPLIT_CYCLE() | ||
87 | #define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW) | ||
88 | #else | ||
89 | #define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */ | ||
90 | #endif | ||
91 | |||
92 | #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() | 80 | #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() |
93 | #include <hv/syscall_public.h> | 81 | #include <hv/syscall_public.h> |
94 | /* | 82 | /* |
@@ -156,106 +144,5 @@ mb_incoherent(void) | |||
156 | #define set_mb(var, value) \ | 144 | #define set_mb(var, value) \ |
157 | do { var = value; mb(); } while (0) | 145 | do { var = value; mb(); } while (0) |
158 | 146 | ||
159 | /* | ||
160 | * Pause the DMA engine and static network before task switching. | ||
161 | */ | ||
162 | #define prepare_arch_switch(next) _prepare_arch_switch(next) | ||
163 | void _prepare_arch_switch(struct task_struct *next); | ||
164 | |||
165 | |||
166 | /* | ||
167 | * switch_to(n) should switch tasks to task nr n, first | ||
168 | * checking that n isn't the current task, in which case it does nothing. | ||
169 | * The number of callee-saved registers saved on the kernel stack | ||
170 | * is defined here for use in copy_thread() and must agree with __switch_to(). | ||
171 | */ | ||
172 | #endif /* !__ASSEMBLY__ */ | ||
173 | #define CALLEE_SAVED_FIRST_REG 30 | ||
174 | #define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */ | ||
175 | #ifndef __ASSEMBLY__ | ||
176 | struct task_struct; | ||
177 | #define switch_to(prev, next, last) ((last) = _switch_to((prev), (next))) | ||
178 | extern struct task_struct *_switch_to(struct task_struct *prev, | ||
179 | struct task_struct *next); | ||
180 | |||
181 | /* Helper function for _switch_to(). */ | ||
182 | extern struct task_struct *__switch_to(struct task_struct *prev, | ||
183 | struct task_struct *next, | ||
184 | unsigned long new_system_save_k_0); | ||
185 | |||
186 | /* Address that switched-away from tasks are at. */ | ||
187 | extern unsigned long get_switch_to_pc(void); | ||
188 | |||
189 | /* | ||
190 | * On SMP systems, when the scheduler does migration-cost autodetection, | ||
191 | * it needs a way to flush as much of the CPU's caches as possible: | ||
192 | * | ||
193 | * TODO: fill this in! | ||
194 | */ | ||
195 | static inline void sched_cacheflush(void) | ||
196 | { | ||
197 | } | ||
198 | |||
199 | #define arch_align_stack(x) (x) | ||
200 | |||
201 | /* | ||
202 | * Is the kernel doing fixups of unaligned accesses? If <0, no kernel | ||
203 | * intervention occurs and SIGBUS is delivered with no data address | ||
204 | * info. If 0, the kernel single-steps the instruction to discover | ||
205 | * the data address to provide with the SIGBUS. If 1, the kernel does | ||
206 | * a fixup. | ||
207 | */ | ||
208 | extern int unaligned_fixup; | ||
209 | |||
210 | /* Is the kernel printing on each unaligned fixup? */ | ||
211 | extern int unaligned_printk; | ||
212 | |||
213 | /* Number of unaligned fixups performed */ | ||
214 | extern unsigned int unaligned_fixup_count; | ||
215 | |||
216 | /* Init-time routine to do tile-specific per-cpu setup. */ | ||
217 | void setup_cpu(int boot); | ||
218 | |||
219 | /* User-level DMA management functions */ | ||
220 | void grant_dma_mpls(void); | ||
221 | void restrict_dma_mpls(void); | ||
222 | |||
223 | #ifdef CONFIG_HARDWALL | ||
224 | /* User-level network management functions */ | ||
225 | void reset_network_state(void); | ||
226 | void grant_network_mpls(void); | ||
227 | void restrict_network_mpls(void); | ||
228 | int hardwall_deactivate(struct task_struct *task); | ||
229 | |||
230 | /* Hook hardwall code into changes in affinity. */ | ||
231 | #define arch_set_cpus_allowed(p, new_mask) do { \ | ||
232 | if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \ | ||
233 | hardwall_deactivate(p); \ | ||
234 | } while (0) | ||
235 | #endif | ||
236 | |||
237 | /* | ||
238 | * Kernel threads can check to see if they need to migrate their | ||
239 | * stack whenever they return from a context switch; for user | ||
240 | * threads, we defer until they are returning to user-space. | ||
241 | */ | ||
242 | #define finish_arch_switch(prev) do { \ | ||
243 | if (unlikely((prev)->state == TASK_DEAD)) \ | ||
244 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \ | ||
245 | ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \ | ||
246 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \ | ||
247 | (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \ | ||
248 | if (current->mm == NULL && !kstack_hash && \ | ||
249 | current_thread_info()->homecache_cpu != smp_processor_id()) \ | ||
250 | homecache_migrate_kthread(); \ | ||
251 | } while (0) | ||
252 | |||
253 | /* Support function for forking a new task. */ | ||
254 | void ret_from_fork(void); | ||
255 | |||
256 | /* Called from ret_from_fork() when a new process starts up. */ | ||
257 | struct task_struct *sim_notify_fork(struct task_struct *prev); | ||
258 | |||
259 | #endif /* !__ASSEMBLY__ */ | 147 | #endif /* !__ASSEMBLY__ */ |
260 | 148 | #endif /* _ASM_TILE_BARRIER_H */ | |
261 | #endif /* _ASM_TILE_SYSTEM_H */ | ||
diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h index 571b118bfd9b..ddc4c1efde43 100644 --- a/arch/tile/include/asm/bitops_32.h +++ b/arch/tile/include/asm/bitops_32.h | |||
@@ -17,7 +17,6 @@ | |||
17 | 17 | ||
18 | #include <linux/compiler.h> | 18 | #include <linux/compiler.h> |
19 | #include <linux/atomic.h> | 19 | #include <linux/atomic.h> |
20 | #include <asm/system.h> | ||
21 | 20 | ||
22 | /* Tile-specific routines to support <asm/bitops.h>. */ | 21 | /* Tile-specific routines to support <asm/bitops.h>. */ |
23 | unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask); | 22 | unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask); |
diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h index e9c8e381ee0e..58d021a9834f 100644 --- a/arch/tile/include/asm/bitops_64.h +++ b/arch/tile/include/asm/bitops_64.h | |||
@@ -17,7 +17,6 @@ | |||
17 | 17 | ||
18 | #include <linux/compiler.h> | 18 | #include <linux/compiler.h> |
19 | #include <linux/atomic.h> | 19 | #include <linux/atomic.h> |
20 | #include <asm/system.h> | ||
21 | 20 | ||
22 | /* See <asm/bitops.h> for API comments. */ | 21 | /* See <asm/bitops.h> for API comments. */ |
23 | 22 | ||
diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h index e925f4bb498f..0fc63c488edf 100644 --- a/arch/tile/include/asm/cacheflush.h +++ b/arch/tile/include/asm/cacheflush.h | |||
@@ -20,7 +20,6 @@ | |||
20 | /* Keep includes the same across arches. */ | 20 | /* Keep includes the same across arches. */ |
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <linux/cache.h> | 22 | #include <linux/cache.h> |
23 | #include <asm/system.h> | ||
24 | #include <arch/icache.h> | 23 | #include <arch/icache.h> |
25 | 24 | ||
26 | /* Caches are physically-indexed and so don't need special treatment */ | 25 | /* Caches are physically-indexed and so don't need special treatment */ |
@@ -152,4 +151,14 @@ static inline void finv_buffer_local(void *buffer, size_t size) | |||
152 | */ | 151 | */ |
153 | void finv_buffer_remote(void *buffer, size_t size, int hfh); | 152 | void finv_buffer_remote(void *buffer, size_t size, int hfh); |
154 | 153 | ||
154 | /* | ||
155 | * On SMP systems, when the scheduler does migration-cost autodetection, | ||
156 | * it needs a way to flush as much of the CPU's caches as possible: | ||
157 | * | ||
158 | * TODO: fill this in! | ||
159 | */ | ||
160 | static inline void sched_cacheflush(void) | ||
161 | { | ||
162 | } | ||
163 | |||
155 | #endif /* _ASM_TILE_CACHEFLUSH_H */ | 164 | #endif /* _ASM_TILE_CACHEFLUSH_H */ |
diff --git a/arch/tile/include/asm/exec.h b/arch/tile/include/asm/exec.h new file mode 100644 index 000000000000..a714e1950867 --- /dev/null +++ b/arch/tile/include/asm/exec.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_EXEC_H | ||
16 | #define _ASM_TILE_EXEC_H | ||
17 | |||
18 | #define arch_align_stack(x) (x) | ||
19 | |||
20 | #endif /* _ASM_TILE_EXEC_H */ | ||
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h index 1a20b7ef8ea2..67490910774d 100644 --- a/arch/tile/include/asm/pgtable.h +++ b/arch/tile/include/asm/pgtable.h | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/spinlock.h> | 29 | #include <linux/spinlock.h> |
30 | #include <asm/processor.h> | 30 | #include <asm/processor.h> |
31 | #include <asm/fixmap.h> | 31 | #include <asm/fixmap.h> |
32 | #include <asm/system.h> | ||
33 | 32 | ||
34 | struct mm_struct; | 33 | struct mm_struct; |
35 | struct vm_area_struct; | 34 | struct vm_area_struct; |
diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h index 7caf0f36b030..e58613e0752f 100644 --- a/arch/tile/include/asm/setup.h +++ b/arch/tile/include/asm/setup.h | |||
@@ -31,6 +31,28 @@ void early_panic(const char *fmt, ...); | |||
31 | void warn_early_printk(void); | 31 | void warn_early_printk(void); |
32 | void __init disable_early_printk(void); | 32 | void __init disable_early_printk(void); |
33 | 33 | ||
34 | /* Init-time routine to do tile-specific per-cpu setup. */ | ||
35 | void setup_cpu(int boot); | ||
36 | |||
37 | /* User-level DMA management functions */ | ||
38 | void grant_dma_mpls(void); | ||
39 | void restrict_dma_mpls(void); | ||
40 | |||
41 | #ifdef CONFIG_HARDWALL | ||
42 | /* User-level network management functions */ | ||
43 | void reset_network_state(void); | ||
44 | void grant_network_mpls(void); | ||
45 | void restrict_network_mpls(void); | ||
46 | struct task_struct; | ||
47 | int hardwall_deactivate(struct task_struct *task); | ||
48 | |||
49 | /* Hook hardwall code into changes in affinity. */ | ||
50 | #define arch_set_cpus_allowed(p, new_mask) do { \ | ||
51 | if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \ | ||
52 | hardwall_deactivate(p); \ | ||
53 | } while (0) | ||
54 | #endif | ||
55 | |||
34 | #endif /* __KERNEL__ */ | 56 | #endif /* __KERNEL__ */ |
35 | 57 | ||
36 | #endif /* _ASM_TILE_SETUP_H */ | 58 | #endif /* _ASM_TILE_SETUP_H */ |
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h index a5e4208d34f9..c0a77b38d39a 100644 --- a/arch/tile/include/asm/spinlock_32.h +++ b/arch/tile/include/asm/spinlock_32.h | |||
@@ -19,7 +19,6 @@ | |||
19 | 19 | ||
20 | #include <linux/atomic.h> | 20 | #include <linux/atomic.h> |
21 | #include <asm/page.h> | 21 | #include <asm/page.h> |
22 | #include <asm/system.h> | ||
23 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
24 | 23 | ||
25 | /* | 24 | /* |
diff --git a/arch/tile/include/asm/switch_to.h b/arch/tile/include/asm/switch_to.h new file mode 100644 index 000000000000..1d48c5fee8b7 --- /dev/null +++ b/arch/tile/include/asm/switch_to.h | |||
@@ -0,0 +1,76 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SWITCH_TO_H | ||
16 | #define _ASM_TILE_SWITCH_TO_H | ||
17 | |||
18 | #include <arch/sim_def.h> | ||
19 | |||
20 | /* | ||
21 | * switch_to(n) should switch tasks to task nr n, first | ||
22 | * checking that n isn't the current task, in which case it does nothing. | ||
23 | * The number of callee-saved registers saved on the kernel stack | ||
24 | * is defined here for use in copy_thread() and must agree with __switch_to(). | ||
25 | */ | ||
26 | #define CALLEE_SAVED_FIRST_REG 30 | ||
27 | #define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */ | ||
28 | |||
29 | #ifndef __ASSEMBLY__ | ||
30 | |||
31 | struct task_struct; | ||
32 | |||
33 | /* | ||
34 | * Pause the DMA engine and static network before task switching. | ||
35 | */ | ||
36 | #define prepare_arch_switch(next) _prepare_arch_switch(next) | ||
37 | void _prepare_arch_switch(struct task_struct *next); | ||
38 | |||
39 | struct task_struct; | ||
40 | #define switch_to(prev, next, last) ((last) = _switch_to((prev), (next))) | ||
41 | extern struct task_struct *_switch_to(struct task_struct *prev, | ||
42 | struct task_struct *next); | ||
43 | |||
44 | /* Helper function for _switch_to(). */ | ||
45 | extern struct task_struct *__switch_to(struct task_struct *prev, | ||
46 | struct task_struct *next, | ||
47 | unsigned long new_system_save_k_0); | ||
48 | |||
49 | /* Address that switched-away from tasks are at. */ | ||
50 | extern unsigned long get_switch_to_pc(void); | ||
51 | |||
52 | /* | ||
53 | * Kernel threads can check to see if they need to migrate their | ||
54 | * stack whenever they return from a context switch; for user | ||
55 | * threads, we defer until they are returning to user-space. | ||
56 | */ | ||
57 | #define finish_arch_switch(prev) do { \ | ||
58 | if (unlikely((prev)->state == TASK_DEAD)) \ | ||
59 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \ | ||
60 | ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \ | ||
61 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \ | ||
62 | (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \ | ||
63 | if (current->mm == NULL && !kstack_hash && \ | ||
64 | current_thread_info()->homecache_cpu != smp_processor_id()) \ | ||
65 | homecache_migrate_kthread(); \ | ||
66 | } while (0) | ||
67 | |||
68 | /* Support function for forking a new task. */ | ||
69 | void ret_from_fork(void); | ||
70 | |||
71 | /* Called from ret_from_fork() when a new process starts up. */ | ||
72 | struct task_struct *sim_notify_fork(struct task_struct *prev); | ||
73 | |||
74 | #endif /* !__ASSEMBLY__ */ | ||
75 | |||
76 | #endif /* _ASM_TILE_SWITCH_TO_H */ | ||
diff --git a/arch/tile/include/asm/timex.h b/arch/tile/include/asm/timex.h index 29921f0b86da..dc987d53e2a9 100644 --- a/arch/tile/include/asm/timex.h +++ b/arch/tile/include/asm/timex.h | |||
@@ -29,11 +29,13 @@ typedef unsigned long long cycles_t; | |||
29 | 29 | ||
30 | #if CHIP_HAS_SPLIT_CYCLE() | 30 | #if CHIP_HAS_SPLIT_CYCLE() |
31 | cycles_t get_cycles(void); | 31 | cycles_t get_cycles(void); |
32 | #define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW) | ||
32 | #else | 33 | #else |
33 | static inline cycles_t get_cycles(void) | 34 | static inline cycles_t get_cycles(void) |
34 | { | 35 | { |
35 | return __insn_mfspr(SPR_CYCLE); | 36 | return __insn_mfspr(SPR_CYCLE); |
36 | } | 37 | } |
38 | #define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */ | ||
37 | #endif | 39 | #endif |
38 | 40 | ||
39 | cycles_t get_clock_rate(void); | 41 | cycles_t get_clock_rate(void); |
diff --git a/arch/tile/include/asm/unaligned.h b/arch/tile/include/asm/unaligned.h index 137e2de5b102..37dfbe598872 100644 --- a/arch/tile/include/asm/unaligned.h +++ b/arch/tile/include/asm/unaligned.h | |||
@@ -21,4 +21,19 @@ | |||
21 | #define get_unaligned __get_unaligned_le | 21 | #define get_unaligned __get_unaligned_le |
22 | #define put_unaligned __put_unaligned_le | 22 | #define put_unaligned __put_unaligned_le |
23 | 23 | ||
24 | /* | ||
25 | * Is the kernel doing fixups of unaligned accesses? If <0, no kernel | ||
26 | * intervention occurs and SIGBUS is delivered with no data address | ||
27 | * info. If 0, the kernel single-steps the instruction to discover | ||
28 | * the data address to provide with the SIGBUS. If 1, the kernel does | ||
29 | * a fixup. | ||
30 | */ | ||
31 | extern int unaligned_fixup; | ||
32 | |||
33 | /* Is the kernel printing on each unaligned fixup? */ | ||
34 | extern int unaligned_printk; | ||
35 | |||
36 | /* Number of unaligned fixups performed */ | ||
37 | extern unsigned int unaligned_fixup_count; | ||
38 | |||
24 | #endif /* _ASM_TILE_UNALIGNED_H */ | 39 | #endif /* _ASM_TILE_UNALIGNED_H */ |