aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/include/asm')
-rw-r--r--arch/tile/include/asm/atomic.h2
-rw-r--r--arch/tile/include/asm/atomic_32.h1
-rw-r--r--arch/tile/include/asm/atomic_64.h1
-rw-r--r--arch/tile/include/asm/barrier.h (renamed from arch/tile/include/asm/system.h)121
-rw-r--r--arch/tile/include/asm/bitops_32.h1
-rw-r--r--arch/tile/include/asm/bitops_64.h1
-rw-r--r--arch/tile/include/asm/cacheflush.h11
-rw-r--r--arch/tile/include/asm/exec.h20
-rw-r--r--arch/tile/include/asm/pgtable.h1
-rw-r--r--arch/tile/include/asm/setup.h22
-rw-r--r--arch/tile/include/asm/spinlock_32.h1
-rw-r--r--arch/tile/include/asm/switch_to.h76
-rw-r--r--arch/tile/include/asm/timex.h2
-rw-r--r--arch/tile/include/asm/unaligned.h15
14 files changed, 152 insertions, 123 deletions
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index 921dbeb8a70c..bb696da5d7cd 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -20,7 +20,7 @@
20#ifndef __ASSEMBLY__ 20#ifndef __ASSEMBLY__
21 21
22#include <linux/compiler.h> 22#include <linux/compiler.h>
23#include <asm/system.h> 23#include <linux/types.h>
24 24
25#define ATOMIC_INIT(i) { (i) } 25#define ATOMIC_INIT(i) { (i) }
26 26
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index c03349e0ca9f..466dc4a39a4f 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -17,6 +17,7 @@
17#ifndef _ASM_TILE_ATOMIC_32_H 17#ifndef _ASM_TILE_ATOMIC_32_H
18#define _ASM_TILE_ATOMIC_32_H 18#define _ASM_TILE_ATOMIC_32_H
19 19
20#include <asm/barrier.h>
20#include <arch/chip.h> 21#include <arch/chip.h>
21 22
22#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index 27fe667fddfe..f4500c688ffa 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -19,6 +19,7 @@
19 19
20#ifndef __ASSEMBLY__ 20#ifndef __ASSEMBLY__
21 21
22#include <asm/barrier.h>
22#include <arch/spr_def.h> 23#include <arch/spr_def.h>
23 24
24/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */ 25/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
diff --git a/arch/tile/include/asm/system.h b/arch/tile/include/asm/barrier.h
index 23d1842f4839..990a217a0b72 100644
--- a/arch/tile/include/asm/system.h
+++ b/arch/tile/include/asm/barrier.h
@@ -12,20 +12,15 @@
12 * more details. 12 * more details.
13 */ 13 */
14 14
15#ifndef _ASM_TILE_SYSTEM_H 15#ifndef _ASM_TILE_BARRIER_H
16#define _ASM_TILE_SYSTEM_H 16#define _ASM_TILE_BARRIER_H
17 17
18#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
19 19
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/irqflags.h>
22
23/* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */
24#include <asm/ptrace.h>
25
26#include <arch/chip.h> 21#include <arch/chip.h>
27#include <arch/sim_def.h>
28#include <arch/spr_def.h> 22#include <arch/spr_def.h>
23#include <asm/timex.h>
29 24
30/* 25/*
31 * read_barrier_depends - Flush all pending reads that subsequents reads 26 * read_barrier_depends - Flush all pending reads that subsequents reads
@@ -78,17 +73,10 @@
78 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 73 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
79 * in cases like this where there are no data dependencies. 74 * in cases like this where there are no data dependencies.
80 */ 75 */
81
82#define read_barrier_depends() do { } while (0) 76#define read_barrier_depends() do { } while (0)
83 77
84#define __sync() __insn_mf() 78#define __sync() __insn_mf()
85 79
86#if CHIP_HAS_SPLIT_CYCLE()
87#define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW)
88#else
89#define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */
90#endif
91
92#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() 80#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
93#include <hv/syscall_public.h> 81#include <hv/syscall_public.h>
94/* 82/*
@@ -156,106 +144,5 @@ mb_incoherent(void)
156#define set_mb(var, value) \ 144#define set_mb(var, value) \
157 do { var = value; mb(); } while (0) 145 do { var = value; mb(); } while (0)
158 146
159/*
160 * Pause the DMA engine and static network before task switching.
161 */
162#define prepare_arch_switch(next) _prepare_arch_switch(next)
163void _prepare_arch_switch(struct task_struct *next);
164
165
166/*
167 * switch_to(n) should switch tasks to task nr n, first
168 * checking that n isn't the current task, in which case it does nothing.
169 * The number of callee-saved registers saved on the kernel stack
170 * is defined here for use in copy_thread() and must agree with __switch_to().
171 */
172#endif /* !__ASSEMBLY__ */
173#define CALLEE_SAVED_FIRST_REG 30
174#define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */
175#ifndef __ASSEMBLY__
176struct task_struct;
177#define switch_to(prev, next, last) ((last) = _switch_to((prev), (next)))
178extern struct task_struct *_switch_to(struct task_struct *prev,
179 struct task_struct *next);
180
181/* Helper function for _switch_to(). */
182extern struct task_struct *__switch_to(struct task_struct *prev,
183 struct task_struct *next,
184 unsigned long new_system_save_k_0);
185
186/* Address that switched-away from tasks are at. */
187extern unsigned long get_switch_to_pc(void);
188
189/*
190 * On SMP systems, when the scheduler does migration-cost autodetection,
191 * it needs a way to flush as much of the CPU's caches as possible:
192 *
193 * TODO: fill this in!
194 */
195static inline void sched_cacheflush(void)
196{
197}
198
199#define arch_align_stack(x) (x)
200
201/*
202 * Is the kernel doing fixups of unaligned accesses? If <0, no kernel
203 * intervention occurs and SIGBUS is delivered with no data address
204 * info. If 0, the kernel single-steps the instruction to discover
205 * the data address to provide with the SIGBUS. If 1, the kernel does
206 * a fixup.
207 */
208extern int unaligned_fixup;
209
210/* Is the kernel printing on each unaligned fixup? */
211extern int unaligned_printk;
212
213/* Number of unaligned fixups performed */
214extern unsigned int unaligned_fixup_count;
215
216/* Init-time routine to do tile-specific per-cpu setup. */
217void setup_cpu(int boot);
218
219/* User-level DMA management functions */
220void grant_dma_mpls(void);
221void restrict_dma_mpls(void);
222
223#ifdef CONFIG_HARDWALL
224/* User-level network management functions */
225void reset_network_state(void);
226void grant_network_mpls(void);
227void restrict_network_mpls(void);
228int hardwall_deactivate(struct task_struct *task);
229
230/* Hook hardwall code into changes in affinity. */
231#define arch_set_cpus_allowed(p, new_mask) do { \
232 if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \
233 hardwall_deactivate(p); \
234} while (0)
235#endif
236
237/*
238 * Kernel threads can check to see if they need to migrate their
239 * stack whenever they return from a context switch; for user
240 * threads, we defer until they are returning to user-space.
241 */
242#define finish_arch_switch(prev) do { \
243 if (unlikely((prev)->state == TASK_DEAD)) \
244 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \
245 ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \
246 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \
247 (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \
248 if (current->mm == NULL && !kstack_hash && \
249 current_thread_info()->homecache_cpu != smp_processor_id()) \
250 homecache_migrate_kthread(); \
251} while (0)
252
253/* Support function for forking a new task. */
254void ret_from_fork(void);
255
256/* Called from ret_from_fork() when a new process starts up. */
257struct task_struct *sim_notify_fork(struct task_struct *prev);
258
259#endif /* !__ASSEMBLY__ */ 147#endif /* !__ASSEMBLY__ */
260 148#endif /* _ASM_TILE_BARRIER_H */
261#endif /* _ASM_TILE_SYSTEM_H */
diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h
index 571b118bfd9b..ddc4c1efde43 100644
--- a/arch/tile/include/asm/bitops_32.h
+++ b/arch/tile/include/asm/bitops_32.h
@@ -17,7 +17,6 @@
17 17
18#include <linux/compiler.h> 18#include <linux/compiler.h>
19#include <linux/atomic.h> 19#include <linux/atomic.h>
20#include <asm/system.h>
21 20
22/* Tile-specific routines to support <asm/bitops.h>. */ 21/* Tile-specific routines to support <asm/bitops.h>. */
23unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask); 22unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask);
diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h
index e9c8e381ee0e..58d021a9834f 100644
--- a/arch/tile/include/asm/bitops_64.h
+++ b/arch/tile/include/asm/bitops_64.h
@@ -17,7 +17,6 @@
17 17
18#include <linux/compiler.h> 18#include <linux/compiler.h>
19#include <linux/atomic.h> 19#include <linux/atomic.h>
20#include <asm/system.h>
21 20
22/* See <asm/bitops.h> for API comments. */ 21/* See <asm/bitops.h> for API comments. */
23 22
diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h
index e925f4bb498f..0fc63c488edf 100644
--- a/arch/tile/include/asm/cacheflush.h
+++ b/arch/tile/include/asm/cacheflush.h
@@ -20,7 +20,6 @@
20/* Keep includes the same across arches. */ 20/* Keep includes the same across arches. */
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/cache.h> 22#include <linux/cache.h>
23#include <asm/system.h>
24#include <arch/icache.h> 23#include <arch/icache.h>
25 24
26/* Caches are physically-indexed and so don't need special treatment */ 25/* Caches are physically-indexed and so don't need special treatment */
@@ -152,4 +151,14 @@ static inline void finv_buffer_local(void *buffer, size_t size)
152 */ 151 */
153void finv_buffer_remote(void *buffer, size_t size, int hfh); 152void finv_buffer_remote(void *buffer, size_t size, int hfh);
154 153
154/*
155 * On SMP systems, when the scheduler does migration-cost autodetection,
156 * it needs a way to flush as much of the CPU's caches as possible:
157 *
158 * TODO: fill this in!
159 */
160static inline void sched_cacheflush(void)
161{
162}
163
155#endif /* _ASM_TILE_CACHEFLUSH_H */ 164#endif /* _ASM_TILE_CACHEFLUSH_H */
diff --git a/arch/tile/include/asm/exec.h b/arch/tile/include/asm/exec.h
new file mode 100644
index 000000000000..a714e1950867
--- /dev/null
+++ b/arch/tile/include/asm/exec.h
@@ -0,0 +1,20 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_EXEC_H
16#define _ASM_TILE_EXEC_H
17
18#define arch_align_stack(x) (x)
19
20#endif /* _ASM_TILE_EXEC_H */
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index 1a20b7ef8ea2..67490910774d 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -29,7 +29,6 @@
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <asm/processor.h> 30#include <asm/processor.h>
31#include <asm/fixmap.h> 31#include <asm/fixmap.h>
32#include <asm/system.h>
33 32
34struct mm_struct; 33struct mm_struct;
35struct vm_area_struct; 34struct vm_area_struct;
diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h
index 7caf0f36b030..e58613e0752f 100644
--- a/arch/tile/include/asm/setup.h
+++ b/arch/tile/include/asm/setup.h
@@ -31,6 +31,28 @@ void early_panic(const char *fmt, ...);
31void warn_early_printk(void); 31void warn_early_printk(void);
32void __init disable_early_printk(void); 32void __init disable_early_printk(void);
33 33
34/* Init-time routine to do tile-specific per-cpu setup. */
35void setup_cpu(int boot);
36
37/* User-level DMA management functions */
38void grant_dma_mpls(void);
39void restrict_dma_mpls(void);
40
41#ifdef CONFIG_HARDWALL
42/* User-level network management functions */
43void reset_network_state(void);
44void grant_network_mpls(void);
45void restrict_network_mpls(void);
46struct task_struct;
47int hardwall_deactivate(struct task_struct *task);
48
49/* Hook hardwall code into changes in affinity. */
50#define arch_set_cpus_allowed(p, new_mask) do { \
51 if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \
52 hardwall_deactivate(p); \
53} while (0)
54#endif
55
34#endif /* __KERNEL__ */ 56#endif /* __KERNEL__ */
35 57
36#endif /* _ASM_TILE_SETUP_H */ 58#endif /* _ASM_TILE_SETUP_H */
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h
index a5e4208d34f9..c0a77b38d39a 100644
--- a/arch/tile/include/asm/spinlock_32.h
+++ b/arch/tile/include/asm/spinlock_32.h
@@ -19,7 +19,6 @@
19 19
20#include <linux/atomic.h> 20#include <linux/atomic.h>
21#include <asm/page.h> 21#include <asm/page.h>
22#include <asm/system.h>
23#include <linux/compiler.h> 22#include <linux/compiler.h>
24 23
25/* 24/*
diff --git a/arch/tile/include/asm/switch_to.h b/arch/tile/include/asm/switch_to.h
new file mode 100644
index 000000000000..1d48c5fee8b7
--- /dev/null
+++ b/arch/tile/include/asm/switch_to.h
@@ -0,0 +1,76 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_SWITCH_TO_H
16#define _ASM_TILE_SWITCH_TO_H
17
18#include <arch/sim_def.h>
19
20/*
21 * switch_to(n) should switch tasks to task nr n, first
22 * checking that n isn't the current task, in which case it does nothing.
23 * The number of callee-saved registers saved on the kernel stack
24 * is defined here for use in copy_thread() and must agree with __switch_to().
25 */
26#define CALLEE_SAVED_FIRST_REG 30
27#define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */
28
29#ifndef __ASSEMBLY__
30
31struct task_struct;
32
33/*
34 * Pause the DMA engine and static network before task switching.
35 */
36#define prepare_arch_switch(next) _prepare_arch_switch(next)
37void _prepare_arch_switch(struct task_struct *next);
38
39struct task_struct;
40#define switch_to(prev, next, last) ((last) = _switch_to((prev), (next)))
41extern struct task_struct *_switch_to(struct task_struct *prev,
42 struct task_struct *next);
43
44/* Helper function for _switch_to(). */
45extern struct task_struct *__switch_to(struct task_struct *prev,
46 struct task_struct *next,
47 unsigned long new_system_save_k_0);
48
49/* Address that switched-away from tasks are at. */
50extern unsigned long get_switch_to_pc(void);
51
52/*
53 * Kernel threads can check to see if they need to migrate their
54 * stack whenever they return from a context switch; for user
55 * threads, we defer until they are returning to user-space.
56 */
57#define finish_arch_switch(prev) do { \
58 if (unlikely((prev)->state == TASK_DEAD)) \
59 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \
60 ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \
61 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \
62 (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \
63 if (current->mm == NULL && !kstack_hash && \
64 current_thread_info()->homecache_cpu != smp_processor_id()) \
65 homecache_migrate_kthread(); \
66} while (0)
67
68/* Support function for forking a new task. */
69void ret_from_fork(void);
70
71/* Called from ret_from_fork() when a new process starts up. */
72struct task_struct *sim_notify_fork(struct task_struct *prev);
73
74#endif /* !__ASSEMBLY__ */
75
76#endif /* _ASM_TILE_SWITCH_TO_H */
diff --git a/arch/tile/include/asm/timex.h b/arch/tile/include/asm/timex.h
index 29921f0b86da..dc987d53e2a9 100644
--- a/arch/tile/include/asm/timex.h
+++ b/arch/tile/include/asm/timex.h
@@ -29,11 +29,13 @@ typedef unsigned long long cycles_t;
29 29
30#if CHIP_HAS_SPLIT_CYCLE() 30#if CHIP_HAS_SPLIT_CYCLE()
31cycles_t get_cycles(void); 31cycles_t get_cycles(void);
32#define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW)
32#else 33#else
33static inline cycles_t get_cycles(void) 34static inline cycles_t get_cycles(void)
34{ 35{
35 return __insn_mfspr(SPR_CYCLE); 36 return __insn_mfspr(SPR_CYCLE);
36} 37}
38#define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */
37#endif 39#endif
38 40
39cycles_t get_clock_rate(void); 41cycles_t get_clock_rate(void);
diff --git a/arch/tile/include/asm/unaligned.h b/arch/tile/include/asm/unaligned.h
index 137e2de5b102..37dfbe598872 100644
--- a/arch/tile/include/asm/unaligned.h
+++ b/arch/tile/include/asm/unaligned.h
@@ -21,4 +21,19 @@
21#define get_unaligned __get_unaligned_le 21#define get_unaligned __get_unaligned_le
22#define put_unaligned __put_unaligned_le 22#define put_unaligned __put_unaligned_le
23 23
24/*
25 * Is the kernel doing fixups of unaligned accesses? If <0, no kernel
26 * intervention occurs and SIGBUS is delivered with no data address
27 * info. If 0, the kernel single-steps the instruction to discover
28 * the data address to provide with the SIGBUS. If 1, the kernel does
29 * a fixup.
30 */
31extern int unaligned_fixup;
32
33/* Is the kernel printing on each unaligned fixup? */
34extern int unaligned_printk;
35
36/* Number of unaligned fixups performed */
37extern unsigned int unaligned_fixup_count;
38
24#endif /* _ASM_TILE_UNALIGNED_H */ 39#endif /* _ASM_TILE_UNALIGNED_H */