diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-10-07 12:30:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-10-07 12:30:02 -0400 |
commit | 4f97c9b206c4bf6ed5ad31091ec2eb9c2204e44c (patch) | |
tree | fe5e840595303db13e742f968d6ee8682cb97fc2 /arch | |
parent | 162bdafa46bfbc10c564de3c1aca2708440181e8 (diff) | |
parent | 0cc96a745059e8daf1d7d6a26672f0ad9056e989 (diff) |
Merge branch 'stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
Pull Tile bugfixes from Chris Metcalf:
"This fixes some serious issues with PREEMPT support, and a couple of
smaller corner-case issues fixed in the last couple of weeks"
* 'stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile:
arch: tile: re-use kbasename() helper
tile: use a more conservative __my_cpu_offset in CONFIG_PREEMPT
tile: ensure interrupts disabled for preempt_schedule_irq()
tile: change lock initalization in hardwall
tile: include: asm: use 'long long' instead of 'u64' for atomic64_t and its related functions
Diffstat (limited to 'arch')
-rw-r--r-- | arch/tile/include/asm/atomic.h | 5 | ||||
-rw-r--r-- | arch/tile/include/asm/atomic_32.h | 27 | ||||
-rw-r--r-- | arch/tile/include/asm/cmpxchg.h | 28 | ||||
-rw-r--r-- | arch/tile/include/asm/percpu.h | 34 | ||||
-rw-r--r-- | arch/tile/kernel/hardwall.c | 6 | ||||
-rw-r--r-- | arch/tile/kernel/intvec_32.S | 3 | ||||
-rw-r--r-- | arch/tile/kernel/intvec_64.S | 3 | ||||
-rw-r--r-- | arch/tile/kernel/stack.c | 12 | ||||
-rw-r--r-- | arch/tile/lib/atomic_32.c | 8 |
9 files changed, 84 insertions, 42 deletions
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h index d385eaadece7..709798460763 100644 --- a/arch/tile/include/asm/atomic.h +++ b/arch/tile/include/asm/atomic.h | |||
@@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n) | |||
166 | * | 166 | * |
167 | * Atomically sets @v to @i and returns old @v | 167 | * Atomically sets @v to @i and returns old @v |
168 | */ | 168 | */ |
169 | static inline u64 atomic64_xchg(atomic64_t *v, u64 n) | 169 | static inline long long atomic64_xchg(atomic64_t *v, long long n) |
170 | { | 170 | { |
171 | return xchg64(&v->counter, n); | 171 | return xchg64(&v->counter, n); |
172 | } | 172 | } |
@@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n) | |||
180 | * Atomically checks if @v holds @o and replaces it with @n if so. | 180 | * Atomically checks if @v holds @o and replaces it with @n if so. |
181 | * Returns the old value at @v. | 181 | * Returns the old value at @v. |
182 | */ | 182 | */ |
183 | static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) | 183 | static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, |
184 | long long n) | ||
184 | { | 185 | { |
185 | return cmpxchg64(&v->counter, o, n); | 186 | return cmpxchg64(&v->counter, o, n); |
186 | } | 187 | } |
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 0d0395b1b152..1ad4a1f7d42b 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h | |||
@@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n) | |||
80 | /* A 64bit atomic type */ | 80 | /* A 64bit atomic type */ |
81 | 81 | ||
82 | typedef struct { | 82 | typedef struct { |
83 | u64 __aligned(8) counter; | 83 | long long counter; |
84 | } atomic64_t; | 84 | } atomic64_t; |
85 | 85 | ||
86 | #define ATOMIC64_INIT(val) { (val) } | 86 | #define ATOMIC64_INIT(val) { (val) } |
@@ -91,14 +91,14 @@ typedef struct { | |||
91 | * | 91 | * |
92 | * Atomically reads the value of @v. | 92 | * Atomically reads the value of @v. |
93 | */ | 93 | */ |
94 | static inline u64 atomic64_read(const atomic64_t *v) | 94 | static inline long long atomic64_read(const atomic64_t *v) |
95 | { | 95 | { |
96 | /* | 96 | /* |
97 | * Requires an atomic op to read both 32-bit parts consistently. | 97 | * Requires an atomic op to read both 32-bit parts consistently. |
98 | * Casting away const is safe since the atomic support routines | 98 | * Casting away const is safe since the atomic support routines |
99 | * do not write to memory if the value has not been modified. | 99 | * do not write to memory if the value has not been modified. |
100 | */ | 100 | */ |
101 | return _atomic64_xchg_add((u64 *)&v->counter, 0); | 101 | return _atomic64_xchg_add((long long *)&v->counter, 0); |
102 | } | 102 | } |
103 | 103 | ||
104 | /** | 104 | /** |
@@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v) | |||
108 | * | 108 | * |
109 | * Atomically adds @i to @v. | 109 | * Atomically adds @i to @v. |
110 | */ | 110 | */ |
111 | static inline void atomic64_add(u64 i, atomic64_t *v) | 111 | static inline void atomic64_add(long long i, atomic64_t *v) |
112 | { | 112 | { |
113 | _atomic64_xchg_add(&v->counter, i); | 113 | _atomic64_xchg_add(&v->counter, i); |
114 | } | 114 | } |
@@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v) | |||
120 | * | 120 | * |
121 | * Atomically adds @i to @v and returns @i + @v | 121 | * Atomically adds @i to @v and returns @i + @v |
122 | */ | 122 | */ |
123 | static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | 123 | static inline long long atomic64_add_return(long long i, atomic64_t *v) |
124 | { | 124 | { |
125 | smp_mb(); /* barrier for proper semantics */ | 125 | smp_mb(); /* barrier for proper semantics */ |
126 | return _atomic64_xchg_add(&v->counter, i) + i; | 126 | return _atomic64_xchg_add(&v->counter, i) + i; |
@@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | |||
135 | * Atomically adds @a to @v, so long as @v was not already @u. | 135 | * Atomically adds @a to @v, so long as @v was not already @u. |
136 | * Returns non-zero if @v was not @u, and zero otherwise. | 136 | * Returns non-zero if @v was not @u, and zero otherwise. |
137 | */ | 137 | */ |
138 | static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) | 138 | static inline long long atomic64_add_unless(atomic64_t *v, long long a, |
139 | long long u) | ||
139 | { | 140 | { |
140 | smp_mb(); /* barrier for proper semantics */ | 141 | smp_mb(); /* barrier for proper semantics */ |
141 | return _atomic64_xchg_add_unless(&v->counter, a, u) != u; | 142 | return _atomic64_xchg_add_unless(&v->counter, a, u) != u; |
@@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) | |||
151 | * atomic64_set() can't be just a raw store, since it would be lost if it | 152 | * atomic64_set() can't be just a raw store, since it would be lost if it |
152 | * fell between the load and store of one of the other atomic ops. | 153 | * fell between the load and store of one of the other atomic ops. |
153 | */ | 154 | */ |
154 | static inline void atomic64_set(atomic64_t *v, u64 n) | 155 | static inline void atomic64_set(atomic64_t *v, long long n) |
155 | { | 156 | { |
156 | _atomic64_xchg(&v->counter, n); | 157 | _atomic64_xchg(&v->counter, n); |
157 | } | 158 | } |
@@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p, | |||
236 | extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); | 237 | extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); |
237 | extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); | 238 | extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); |
238 | extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); | 239 | extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); |
239 | extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); | 240 | extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, |
240 | extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); | 241 | long long o, long long n); |
241 | extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); | 242 | extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n); |
242 | extern u64 __atomic64_xchg_add_unless(volatile u64 *p, | 243 | extern long long __atomic64_xchg_add(volatile long long *p, int *lock, |
243 | int *lock, u64 o, u64 n); | 244 | long long n); |
245 | extern long long __atomic64_xchg_add_unless(volatile long long *p, | ||
246 | int *lock, long long o, long long n); | ||
244 | 247 | ||
245 | /* Return failure from the atomic wrappers. */ | 248 | /* Return failure from the atomic wrappers. */ |
246 | struct __get_user __atomic_bad_address(int __user *addr); | 249 | struct __get_user __atomic_bad_address(int __user *addr); |
diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h index 4001d5eab4bb..0ccda3c425be 100644 --- a/arch/tile/include/asm/cmpxchg.h +++ b/arch/tile/include/asm/cmpxchg.h | |||
@@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n); | |||
35 | int _atomic_xchg_add(int *v, int i); | 35 | int _atomic_xchg_add(int *v, int i); |
36 | int _atomic_xchg_add_unless(int *v, int a, int u); | 36 | int _atomic_xchg_add_unless(int *v, int a, int u); |
37 | int _atomic_cmpxchg(int *ptr, int o, int n); | 37 | int _atomic_cmpxchg(int *ptr, int o, int n); |
38 | u64 _atomic64_xchg(u64 *v, u64 n); | 38 | long long _atomic64_xchg(long long *v, long long n); |
39 | u64 _atomic64_xchg_add(u64 *v, u64 i); | 39 | long long _atomic64_xchg_add(long long *v, long long i); |
40 | u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u); | 40 | long long _atomic64_xchg_add_unless(long long *v, long long a, long long u); |
41 | u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); | 41 | long long _atomic64_cmpxchg(long long *v, long long o, long long n); |
42 | 42 | ||
43 | #define xchg(ptr, n) \ | 43 | #define xchg(ptr, n) \ |
44 | ({ \ | 44 | ({ \ |
@@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); | |||
53 | if (sizeof(*(ptr)) != 4) \ | 53 | if (sizeof(*(ptr)) != 4) \ |
54 | __cmpxchg_called_with_bad_pointer(); \ | 54 | __cmpxchg_called_with_bad_pointer(); \ |
55 | smp_mb(); \ | 55 | smp_mb(); \ |
56 | (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \ | 56 | (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, \ |
57 | (int)n); \ | ||
57 | }) | 58 | }) |
58 | 59 | ||
59 | #define xchg64(ptr, n) \ | 60 | #define xchg64(ptr, n) \ |
@@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); | |||
61 | if (sizeof(*(ptr)) != 8) \ | 62 | if (sizeof(*(ptr)) != 8) \ |
62 | __xchg_called_with_bad_pointer(); \ | 63 | __xchg_called_with_bad_pointer(); \ |
63 | smp_mb(); \ | 64 | smp_mb(); \ |
64 | (typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \ | 65 | (typeof(*(ptr)))_atomic64_xchg((long long *)(ptr), \ |
66 | (long long)(n)); \ | ||
65 | }) | 67 | }) |
66 | 68 | ||
67 | #define cmpxchg64(ptr, o, n) \ | 69 | #define cmpxchg64(ptr, o, n) \ |
@@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); | |||
69 | if (sizeof(*(ptr)) != 8) \ | 71 | if (sizeof(*(ptr)) != 8) \ |
70 | __cmpxchg_called_with_bad_pointer(); \ | 72 | __cmpxchg_called_with_bad_pointer(); \ |
71 | smp_mb(); \ | 73 | smp_mb(); \ |
72 | (typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \ | 74 | (typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr, \ |
75 | (long long)o, (long long)n); \ | ||
73 | }) | 76 | }) |
74 | 77 | ||
75 | #else | 78 | #else |
@@ -81,10 +84,11 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); | |||
81 | switch (sizeof(*(ptr))) { \ | 84 | switch (sizeof(*(ptr))) { \ |
82 | case 4: \ | 85 | case 4: \ |
83 | __x = (typeof(__x))(unsigned long) \ | 86 | __x = (typeof(__x))(unsigned long) \ |
84 | __insn_exch4((ptr), (u32)(unsigned long)(n)); \ | 87 | __insn_exch4((ptr), \ |
88 | (u32)(unsigned long)(n)); \ | ||
85 | break; \ | 89 | break; \ |
86 | case 8: \ | 90 | case 8: \ |
87 | __x = (typeof(__x)) \ | 91 | __x = (typeof(__x)) \ |
88 | __insn_exch((ptr), (unsigned long)(n)); \ | 92 | __insn_exch((ptr), (unsigned long)(n)); \ |
89 | break; \ | 93 | break; \ |
90 | default: \ | 94 | default: \ |
@@ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); | |||
103 | switch (sizeof(*(ptr))) { \ | 107 | switch (sizeof(*(ptr))) { \ |
104 | case 4: \ | 108 | case 4: \ |
105 | __x = (typeof(__x))(unsigned long) \ | 109 | __x = (typeof(__x))(unsigned long) \ |
106 | __insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \ | 110 | __insn_cmpexch4((ptr), \ |
111 | (u32)(unsigned long)(n)); \ | ||
107 | break; \ | 112 | break; \ |
108 | case 8: \ | 113 | case 8: \ |
109 | __x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \ | 114 | __x = (typeof(__x))__insn_cmpexch((ptr), \ |
115 | (long long)(n)); \ | ||
110 | break; \ | 116 | break; \ |
111 | default: \ | 117 | default: \ |
112 | __cmpxchg_called_with_bad_pointer(); \ | 118 | __cmpxchg_called_with_bad_pointer(); \ |
diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h index 63294f5a8efb..4f7ae39fa202 100644 --- a/arch/tile/include/asm/percpu.h +++ b/arch/tile/include/asm/percpu.h | |||
@@ -15,9 +15,37 @@ | |||
15 | #ifndef _ASM_TILE_PERCPU_H | 15 | #ifndef _ASM_TILE_PERCPU_H |
16 | #define _ASM_TILE_PERCPU_H | 16 | #define _ASM_TILE_PERCPU_H |
17 | 17 | ||
18 | register unsigned long __my_cpu_offset __asm__("tp"); | 18 | register unsigned long my_cpu_offset_reg asm("tp"); |
19 | #define __my_cpu_offset __my_cpu_offset | 19 | |
20 | #define set_my_cpu_offset(tp) (__my_cpu_offset = (tp)) | 20 | #ifdef CONFIG_PREEMPT |
21 | /* | ||
22 | * For full preemption, we can't just use the register variable | ||
23 | * directly, since we need barrier() to hazard against it, causing the | ||
24 | * compiler to reload anything computed from a previous "tp" value. | ||
25 | * But we also don't want to use volatile asm, since we'd like the | ||
26 | * compiler to be able to cache the value across multiple percpu reads. | ||
27 | * So we use a fake stack read as a hazard against barrier(). | ||
28 | * The 'U' constraint is like 'm' but disallows postincrement. | ||
29 | */ | ||
30 | static inline unsigned long __my_cpu_offset(void) | ||
31 | { | ||
32 | unsigned long tp; | ||
33 | register unsigned long *sp asm("sp"); | ||
34 | asm("move %0, tp" : "=r" (tp) : "U" (*sp)); | ||
35 | return tp; | ||
36 | } | ||
37 | #define __my_cpu_offset __my_cpu_offset() | ||
38 | #else | ||
39 | /* | ||
40 | * We don't need to hazard against barrier() since "tp" doesn't ever | ||
41 | * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only | ||
42 | * changes at function call points, at which we are already re-reading | ||
43 | * the value of "tp" due to "my_cpu_offset_reg" being a global variable. | ||
44 | */ | ||
45 | #define __my_cpu_offset my_cpu_offset_reg | ||
46 | #endif | ||
47 | |||
48 | #define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp)) | ||
21 | 49 | ||
22 | #include <asm-generic/percpu.h> | 50 | #include <asm-generic/percpu.h> |
23 | 51 | ||
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c index df27a1fd94a3..531f4c365351 100644 --- a/arch/tile/kernel/hardwall.c +++ b/arch/tile/kernel/hardwall.c | |||
@@ -66,7 +66,7 @@ static struct hardwall_type hardwall_types[] = { | |||
66 | 0, | 66 | 0, |
67 | "udn", | 67 | "udn", |
68 | LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list), | 68 | LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list), |
69 | __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock), | 69 | __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock), |
70 | NULL | 70 | NULL |
71 | }, | 71 | }, |
72 | #ifndef __tilepro__ | 72 | #ifndef __tilepro__ |
@@ -77,7 +77,7 @@ static struct hardwall_type hardwall_types[] = { | |||
77 | 1, /* disabled pending hypervisor support */ | 77 | 1, /* disabled pending hypervisor support */ |
78 | "idn", | 78 | "idn", |
79 | LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list), | 79 | LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list), |
80 | __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock), | 80 | __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock), |
81 | NULL | 81 | NULL |
82 | }, | 82 | }, |
83 | { /* access to user-space IPI */ | 83 | { /* access to user-space IPI */ |
@@ -87,7 +87,7 @@ static struct hardwall_type hardwall_types[] = { | |||
87 | 0, | 87 | 0, |
88 | "ipi", | 88 | "ipi", |
89 | LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list), | 89 | LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list), |
90 | __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock), | 90 | __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock), |
91 | NULL | 91 | NULL |
92 | }, | 92 | }, |
93 | #endif | 93 | #endif |
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index 088d5c141e68..2cbe6d5dd6b0 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S | |||
@@ -815,6 +815,9 @@ STD_ENTRY(interrupt_return) | |||
815 | } | 815 | } |
816 | bzt r28, 1f | 816 | bzt r28, 1f |
817 | bnz r29, 1f | 817 | bnz r29, 1f |
818 | /* Disable interrupts explicitly for preemption. */ | ||
819 | IRQ_DISABLE(r20,r21) | ||
820 | TRACE_IRQS_OFF | ||
818 | jal preempt_schedule_irq | 821 | jal preempt_schedule_irq |
819 | FEEDBACK_REENTER(interrupt_return) | 822 | FEEDBACK_REENTER(interrupt_return) |
820 | 1: | 823 | 1: |
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index ec755d3f3734..b8fc497f2437 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S | |||
@@ -841,6 +841,9 @@ STD_ENTRY(interrupt_return) | |||
841 | } | 841 | } |
842 | beqzt r28, 1f | 842 | beqzt r28, 1f |
843 | bnez r29, 1f | 843 | bnez r29, 1f |
844 | /* Disable interrupts explicitly for preemption. */ | ||
845 | IRQ_DISABLE(r20,r21) | ||
846 | TRACE_IRQS_OFF | ||
844 | jal preempt_schedule_irq | 847 | jal preempt_schedule_irq |
845 | FEEDBACK_REENTER(interrupt_return) | 848 | FEEDBACK_REENTER(interrupt_return) |
846 | 1: | 849 | 1: |
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index 362284af3afd..c93977a62116 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/mmzone.h> | 23 | #include <linux/mmzone.h> |
24 | #include <linux/dcache.h> | 24 | #include <linux/dcache.h> |
25 | #include <linux/fs.h> | 25 | #include <linux/fs.h> |
26 | #include <linux/string.h> | ||
26 | #include <asm/backtrace.h> | 27 | #include <asm/backtrace.h> |
27 | #include <asm/page.h> | 28 | #include <asm/page.h> |
28 | #include <asm/ucontext.h> | 29 | #include <asm/ucontext.h> |
@@ -332,21 +333,18 @@ static void describe_addr(struct KBacktraceIterator *kbt, | |||
332 | } | 333 | } |
333 | 334 | ||
334 | if (vma->vm_file) { | 335 | if (vma->vm_file) { |
335 | char *s; | ||
336 | p = d_path(&vma->vm_file->f_path, buf, bufsize); | 336 | p = d_path(&vma->vm_file->f_path, buf, bufsize); |
337 | if (IS_ERR(p)) | 337 | if (IS_ERR(p)) |
338 | p = "?"; | 338 | p = "?"; |
339 | s = strrchr(p, '/'); | 339 | name = kbasename(p); |
340 | if (s) | ||
341 | p = s+1; | ||
342 | } else { | 340 | } else { |
343 | p = "anon"; | 341 | name = "anon"; |
344 | } | 342 | } |
345 | 343 | ||
346 | /* Generate a string description of the vma info. */ | 344 | /* Generate a string description of the vma info. */ |
347 | namelen = strlen(p); | 345 | namelen = strlen(name); |
348 | remaining = (bufsize - 1) - namelen; | 346 | remaining = (bufsize - 1) - namelen; |
349 | memmove(buf, p, namelen); | 347 | memmove(buf, name, namelen); |
350 | snprintf(buf + namelen, remaining, "[%lx+%lx] ", | 348 | snprintf(buf + namelen, remaining, "[%lx+%lx] ", |
351 | vma->vm_start, vma->vm_end - vma->vm_start); | 349 | vma->vm_start, vma->vm_end - vma->vm_start); |
352 | } | 350 | } |
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c index 759efa337be8..c89b211fd9e7 100644 --- a/arch/tile/lib/atomic_32.c +++ b/arch/tile/lib/atomic_32.c | |||
@@ -107,19 +107,19 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask) | |||
107 | EXPORT_SYMBOL(_atomic_xor); | 107 | EXPORT_SYMBOL(_atomic_xor); |
108 | 108 | ||
109 | 109 | ||
110 | u64 _atomic64_xchg(u64 *v, u64 n) | 110 | long long _atomic64_xchg(long long *v, long long n) |
111 | { | 111 | { |
112 | return __atomic64_xchg(v, __atomic_setup(v), n); | 112 | return __atomic64_xchg(v, __atomic_setup(v), n); |
113 | } | 113 | } |
114 | EXPORT_SYMBOL(_atomic64_xchg); | 114 | EXPORT_SYMBOL(_atomic64_xchg); |
115 | 115 | ||
116 | u64 _atomic64_xchg_add(u64 *v, u64 i) | 116 | long long _atomic64_xchg_add(long long *v, long long i) |
117 | { | 117 | { |
118 | return __atomic64_xchg_add(v, __atomic_setup(v), i); | 118 | return __atomic64_xchg_add(v, __atomic_setup(v), i); |
119 | } | 119 | } |
120 | EXPORT_SYMBOL(_atomic64_xchg_add); | 120 | EXPORT_SYMBOL(_atomic64_xchg_add); |
121 | 121 | ||
122 | u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u) | 122 | long long _atomic64_xchg_add_unless(long long *v, long long a, long long u) |
123 | { | 123 | { |
124 | /* | 124 | /* |
125 | * Note: argument order is switched here since it is easier | 125 | * Note: argument order is switched here since it is easier |
@@ -130,7 +130,7 @@ u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u) | |||
130 | } | 130 | } |
131 | EXPORT_SYMBOL(_atomic64_xchg_add_unless); | 131 | EXPORT_SYMBOL(_atomic64_xchg_add_unless); |
132 | 132 | ||
133 | u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n) | 133 | long long _atomic64_cmpxchg(long long *v, long long o, long long n) |
134 | { | 134 | { |
135 | return __atomic64_cmpxchg(v, __atomic_setup(v), o, n); | 135 | return __atomic64_cmpxchg(v, __atomic_setup(v), o, n); |
136 | } | 136 | } |