aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/include')
-rw-r--r--arch/tile/include/arch/spr_def.h4
-rw-r--r--arch/tile/include/asm/atomic.h50
-rw-r--r--arch/tile/include/asm/atomic_32.h2
-rw-r--r--arch/tile/include/asm/bitops_64.h8
-rw-r--r--arch/tile/include/asm/cmpxchg.h73
-rw-r--r--arch/tile/include/asm/irq.h2
-rw-r--r--arch/tile/include/asm/spinlock_64.h2
-rw-r--r--arch/tile/include/asm/stack.h1
-rw-r--r--arch/tile/include/asm/traps.h6
9 files changed, 89 insertions, 59 deletions
diff --git a/arch/tile/include/arch/spr_def.h b/arch/tile/include/arch/spr_def.h
index f548efeb2de3..d6ba449b5363 100644
--- a/arch/tile/include/arch/spr_def.h
+++ b/arch/tile/include/arch/spr_def.h
@@ -60,8 +60,8 @@
60 _concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,) 60 _concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,)
61#define SPR_IPI_EVENT_RESET_K \ 61#define SPR_IPI_EVENT_RESET_K \
62 _concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,) 62 _concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,)
63#define SPR_IPI_MASK_SET_K \ 63#define SPR_IPI_EVENT_SET_K \
64 _concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,) 64 _concat4(SPR_IPI_EVENT_SET_, CONFIG_KERNEL_PL,,)
65#define INT_IPI_K \ 65#define INT_IPI_K \
66 _concat4(INT_IPI_, CONFIG_KERNEL_PL,,) 66 _concat4(INT_IPI_, CONFIG_KERNEL_PL,,)
67 67
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index bb696da5d7cd..f2461429a4a4 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -17,6 +17,8 @@
17#ifndef _ASM_TILE_ATOMIC_H 17#ifndef _ASM_TILE_ATOMIC_H
18#define _ASM_TILE_ATOMIC_H 18#define _ASM_TILE_ATOMIC_H
19 19
20#include <asm/cmpxchg.h>
21
20#ifndef __ASSEMBLY__ 22#ifndef __ASSEMBLY__
21 23
22#include <linux/compiler.h> 24#include <linux/compiler.h>
@@ -121,54 +123,6 @@ static inline int atomic_read(const atomic_t *v)
121 */ 123 */
122#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) 124#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
123 125
124/* Nonexistent functions intended to cause link errors. */
125extern unsigned long __xchg_called_with_bad_pointer(void);
126extern unsigned long __cmpxchg_called_with_bad_pointer(void);
127
128#define xchg(ptr, x) \
129 ({ \
130 typeof(*(ptr)) __x; \
131 switch (sizeof(*(ptr))) { \
132 case 4: \
133 __x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \
134 (atomic_t *)(ptr), \
135 (u32)(typeof((x)-(x)))(x)); \
136 break; \
137 case 8: \
138 __x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \
139 (atomic64_t *)(ptr), \
140 (u64)(typeof((x)-(x)))(x)); \
141 break; \
142 default: \
143 __xchg_called_with_bad_pointer(); \
144 } \
145 __x; \
146 })
147
148#define cmpxchg(ptr, o, n) \
149 ({ \
150 typeof(*(ptr)) __x; \
151 switch (sizeof(*(ptr))) { \
152 case 4: \
153 __x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \
154 (atomic_t *)(ptr), \
155 (u32)(typeof((o)-(o)))(o), \
156 (u32)(typeof((n)-(n)))(n)); \
157 break; \
158 case 8: \
159 __x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \
160 (atomic64_t *)(ptr), \
161 (u64)(typeof((o)-(o)))(o), \
162 (u64)(typeof((n)-(n)))(n)); \
163 break; \
164 default: \
165 __cmpxchg_called_with_bad_pointer(); \
166 } \
167 __x; \
168 })
169
170#define tas(ptr) (xchg((ptr), 1))
171
172#endif /* __ASSEMBLY__ */ 126#endif /* __ASSEMBLY__ */
173 127
174#ifndef __tilegx__ 128#ifndef __tilegx__
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 466dc4a39a4f..54d1da826f93 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -200,7 +200,7 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
200 * @u: ...unless v is equal to u. 200 * @u: ...unless v is equal to u.
201 * 201 *
202 * Atomically adds @a to @v, so long as @v was not already @u. 202 * Atomically adds @a to @v, so long as @v was not already @u.
203 * Returns the old value of @v. 203 * Returns non-zero if @v was not @u, and zero otherwise.
204 */ 204 */
205static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) 205static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
206{ 206{
diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h
index 58d021a9834f..60b87ee54fb8 100644
--- a/arch/tile/include/asm/bitops_64.h
+++ b/arch/tile/include/asm/bitops_64.h
@@ -38,10 +38,10 @@ static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
38 38
39static inline void change_bit(unsigned nr, volatile unsigned long *addr) 39static inline void change_bit(unsigned nr, volatile unsigned long *addr)
40{ 40{
41 unsigned long old, mask = (1UL << (nr % BITS_PER_LONG)); 41 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
42 long guess, oldval; 42 unsigned long guess, oldval;
43 addr += nr / BITS_PER_LONG; 43 addr += nr / BITS_PER_LONG;
44 old = *addr; 44 oldval = *addr;
45 do { 45 do {
46 guess = oldval; 46 guess = oldval;
47 oldval = atomic64_cmpxchg((atomic64_t *)addr, 47 oldval = atomic64_cmpxchg((atomic64_t *)addr,
@@ -85,7 +85,7 @@ static inline int test_and_change_bit(unsigned nr,
85 volatile unsigned long *addr) 85 volatile unsigned long *addr)
86{ 86{
87 unsigned long mask = (1UL << (nr % BITS_PER_LONG)); 87 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
88 long guess, oldval = *addr; 88 unsigned long guess, oldval;
89 addr += nr / BITS_PER_LONG; 89 addr += nr / BITS_PER_LONG;
90 oldval = *addr; 90 oldval = *addr;
91 do { 91 do {
diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..276f067e3640
--- /dev/null
+++ b/arch/tile/include/asm/cmpxchg.h
@@ -0,0 +1,73 @@
1/*
2 * cmpxchg.h -- forked from asm/atomic.h with this copyright:
3 *
4 * Copyright 2010 Tilera Corporation. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation, version 2.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for
14 * more details.
15 *
16 */
17
18#ifndef _ASM_TILE_CMPXCHG_H
19#define _ASM_TILE_CMPXCHG_H
20
21#ifndef __ASSEMBLY__
22
23/* Nonexistent functions intended to cause link errors. */
24extern unsigned long __xchg_called_with_bad_pointer(void);
25extern unsigned long __cmpxchg_called_with_bad_pointer(void);
26
27#define xchg(ptr, x) \
28 ({ \
29 typeof(*(ptr)) __x; \
30 switch (sizeof(*(ptr))) { \
31 case 4: \
32 __x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \
33 (atomic_t *)(ptr), \
34 (u32)(typeof((x)-(x)))(x)); \
35 break; \
36 case 8: \
37 __x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \
38 (atomic64_t *)(ptr), \
39 (u64)(typeof((x)-(x)))(x)); \
40 break; \
41 default: \
42 __xchg_called_with_bad_pointer(); \
43 } \
44 __x; \
45 })
46
47#define cmpxchg(ptr, o, n) \
48 ({ \
49 typeof(*(ptr)) __x; \
50 switch (sizeof(*(ptr))) { \
51 case 4: \
52 __x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \
53 (atomic_t *)(ptr), \
54 (u32)(typeof((o)-(o)))(o), \
55 (u32)(typeof((n)-(n)))(n)); \
56 break; \
57 case 8: \
58 __x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \
59 (atomic64_t *)(ptr), \
60 (u64)(typeof((o)-(o)))(o), \
61 (u64)(typeof((n)-(n)))(n)); \
62 break; \
63 default: \
64 __cmpxchg_called_with_bad_pointer(); \
65 } \
66 __x; \
67 })
68
69#define tas(ptr) (xchg((ptr), 1))
70
71#endif /* __ASSEMBLY__ */
72
73#endif /* _ASM_TILE_CMPXCHG_H */
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h
index f80f8ceabc67..33cff9a3058b 100644
--- a/arch/tile/include/asm/irq.h
+++ b/arch/tile/include/asm/irq.h
@@ -21,7 +21,7 @@
21#define NR_IRQS 32 21#define NR_IRQS 32
22 22
23/* IRQ numbers used for linux IPIs. */ 23/* IRQ numbers used for linux IPIs. */
24#define IRQ_RESCHEDULE 1 24#define IRQ_RESCHEDULE 0
25 25
26#define irq_canonicalize(irq) (irq) 26#define irq_canonicalize(irq) (irq)
27 27
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h
index 72be5904e020..5f8b6a095fd8 100644
--- a/arch/tile/include/asm/spinlock_64.h
+++ b/arch/tile/include/asm/spinlock_64.h
@@ -137,7 +137,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
137static inline void arch_write_unlock(arch_rwlock_t *rw) 137static inline void arch_write_unlock(arch_rwlock_t *rw)
138{ 138{
139 __insn_mf(); 139 __insn_mf();
140 rw->lock = 0; 140 __insn_exch4(&rw->lock, 0); /* Avoid waiting in the write buffer. */
141} 141}
142 142
143static inline int arch_read_trylock(arch_rwlock_t *rw) 143static inline int arch_read_trylock(arch_rwlock_t *rw)
diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h
index 4d97a2db932e..0e9d382a2d45 100644
--- a/arch/tile/include/asm/stack.h
+++ b/arch/tile/include/asm/stack.h
@@ -25,7 +25,6 @@
25struct KBacktraceIterator { 25struct KBacktraceIterator {
26 BacktraceIterator it; 26 BacktraceIterator it;
27 struct task_struct *task; /* task we are backtracing */ 27 struct task_struct *task; /* task we are backtracing */
28 pte_t *pgtable; /* page table for user space access */
29 int end; /* iteration complete. */ 28 int end; /* iteration complete. */
30 int new_context; /* new context is starting */ 29 int new_context; /* new context is starting */
31 int profile; /* profiling, so stop on async intrpt */ 30 int profile; /* profiling, so stop on async intrpt */
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h
index 5f20f920f932..e28c3df4176a 100644
--- a/arch/tile/include/asm/traps.h
+++ b/arch/tile/include/asm/traps.h
@@ -64,7 +64,11 @@ void do_breakpoint(struct pt_regs *, int fault_num);
64 64
65 65
66#ifdef __tilegx__ 66#ifdef __tilegx__
67/* kernel/single_step.c */
67void gx_singlestep_handle(struct pt_regs *, int fault_num); 68void gx_singlestep_handle(struct pt_regs *, int fault_num);
69
70/* kernel/intvec_64.S */
71void fill_ra_stack(void);
68#endif 72#endif
69 73
70#endif /* _ASM_TILE_SYSCALLS_H */ 74#endif /* _ASM_TILE_TRAPS_H */