aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 18:58:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 18:58:21 -0400
commit0195c00244dc2e9f522475868fa278c473ba7339 (patch)
treef97ca98ae64ede2c33ad3de05ed7bbfa4f4495ed /arch/mips/include/asm
parentf21ce8f8447c8be8847dadcfdbcc76b0d7365fa5 (diff)
parent141124c02059eee9dbc5c86ea797b1ca888e77f7 (diff)
Merge tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system
Pull "Disintegrate and delete asm/system.h" from David Howells: "Here are a bunch of patches to disintegrate asm/system.h into a set of separate bits to relieve the problem of circular inclusion dependencies. I've built all the working defconfigs from all the arches that I can and made sure that they don't break. The reason for these patches is that I recently encountered a circular dependency problem that came about when I produced some patches to optimise get_order() by rewriting it to use ilog2(). This uses bitops - and on the SH arch asm/bitops.h drags in asm-generic/get_order.h by a circuituous route involving asm/system.h. The main difficulty seems to be asm/system.h. It holds a number of low level bits with no/few dependencies that are commonly used (eg. memory barriers) and a number of bits with more dependencies that aren't used in many places (eg. switch_to()). These patches break asm/system.h up into the following core pieces: (1) asm/barrier.h Move memory barriers here. This already done for MIPS and Alpha. (2) asm/switch_to.h Move switch_to() and related stuff here. (3) asm/exec.h Move arch_align_stack() here. Other process execution related bits could perhaps go here from asm/processor.h. (4) asm/cmpxchg.h Move xchg() and cmpxchg() here as they're full word atomic ops and frequently used by atomic_xchg() and atomic_cmpxchg(). (5) asm/bug.h Move die() and related bits. (6) asm/auxvec.h Move AT_VECTOR_SIZE_ARCH here. Other arch headers are created as needed on a per-arch basis." Fixed up some conflicts from other header file cleanups and moving code around that has happened in the meantime, so David's testing is somewhat weakened by that. We'll find out anything that got broken and fix it.. * tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system: (38 commits) Delete all instances of asm/system.h Remove all #inclusions of asm/system.h Add #includes needed to permit the removal of asm/system.h Move all declarations of free_initmem() to linux/mm.h Disintegrate asm/system.h for OpenRISC Split arch_align_stack() out from asm-generic/system.h Split the switch_to() wrapper out of asm-generic/system.h Move the asm-generic/system.h xchg() implementation to asm-generic/cmpxchg.h Create asm-generic/barrier.h Make asm-generic/cmpxchg.h #include asm-generic/cmpxchg-local.h Disintegrate asm/system.h for Xtensa Disintegrate asm/system.h for Unicore32 [based on ver #3, changed by gxt] Disintegrate asm/system.h for Tile Disintegrate asm/system.h for Sparc Disintegrate asm/system.h for SH Disintegrate asm/system.h for Score Disintegrate asm/system.h for S390 Disintegrate asm/system.h for PowerPC Disintegrate asm/system.h for PA-RISC Disintegrate asm/system.h for MN10300 ...
Diffstat (limited to 'arch/mips/include/asm')
-rw-r--r--arch/mips/include/asm/atomic.h2
-rw-r--r--arch/mips/include/asm/barrier.h2
-rw-r--r--arch/mips/include/asm/cmpxchg.h124
-rw-r--r--arch/mips/include/asm/dma.h1
-rw-r--r--arch/mips/include/asm/exec.h17
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000_dma.h1
-rw-r--r--arch/mips/include/asm/processor.h7
-rw-r--r--arch/mips/include/asm/setup.h11
-rw-r--r--arch/mips/include/asm/switch_to.h85
-rw-r--r--arch/mips/include/asm/system.h235
-rw-r--r--arch/mips/include/asm/txx9/jmr3927.h1
11 files changed, 246 insertions, 240 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 1d93f81d57e7..3f4c5cb6433e 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -18,8 +18,8 @@
18#include <linux/types.h> 18#include <linux/types.h>
19#include <asm/barrier.h> 19#include <asm/barrier.h>
20#include <asm/cpu-features.h> 20#include <asm/cpu-features.h>
21#include <asm/cmpxchg.h>
21#include <asm/war.h> 22#include <asm/war.h>
22#include <asm/system.h>
23 23
24#define ATOMIC_INIT(i) { (i) } 24#define ATOMIC_INIT(i) { (i) }
25 25
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index c0884f02d3a6..f7fdc24e972d 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -8,6 +8,8 @@
8#ifndef __ASM_BARRIER_H 8#ifndef __ASM_BARRIER_H
9#define __ASM_BARRIER_H 9#define __ASM_BARRIER_H
10 10
11#include <asm/addrspace.h>
12
11/* 13/*
12 * read_barrier_depends - Flush all pending reads that subsequents reads 14 * read_barrier_depends - Flush all pending reads that subsequents reads
13 * depend on. 15 * depend on.
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index d8d1c2805ac7..285a41fa0b18 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -9,6 +9,130 @@
9#define __ASM_CMPXCHG_H 9#define __ASM_CMPXCHG_H
10 10
11#include <linux/irqflags.h> 11#include <linux/irqflags.h>
12#include <asm/war.h>
13
14static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
15{
16 __u32 retval;
17
18 smp_mb__before_llsc();
19
20 if (kernel_uses_llsc && R10000_LLSC_WAR) {
21 unsigned long dummy;
22
23 __asm__ __volatile__(
24 " .set mips3 \n"
25 "1: ll %0, %3 # xchg_u32 \n"
26 " .set mips0 \n"
27 " move %2, %z4 \n"
28 " .set mips3 \n"
29 " sc %2, %1 \n"
30 " beqzl %2, 1b \n"
31 " .set mips0 \n"
32 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
33 : "R" (*m), "Jr" (val)
34 : "memory");
35 } else if (kernel_uses_llsc) {
36 unsigned long dummy;
37
38 do {
39 __asm__ __volatile__(
40 " .set mips3 \n"
41 " ll %0, %3 # xchg_u32 \n"
42 " .set mips0 \n"
43 " move %2, %z4 \n"
44 " .set mips3 \n"
45 " sc %2, %1 \n"
46 " .set mips0 \n"
47 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
48 : "R" (*m), "Jr" (val)
49 : "memory");
50 } while (unlikely(!dummy));
51 } else {
52 unsigned long flags;
53
54 raw_local_irq_save(flags);
55 retval = *m;
56 *m = val;
57 raw_local_irq_restore(flags); /* implies memory barrier */
58 }
59
60 smp_llsc_mb();
61
62 return retval;
63}
64
65#ifdef CONFIG_64BIT
66static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
67{
68 __u64 retval;
69
70 smp_mb__before_llsc();
71
72 if (kernel_uses_llsc && R10000_LLSC_WAR) {
73 unsigned long dummy;
74
75 __asm__ __volatile__(
76 " .set mips3 \n"
77 "1: lld %0, %3 # xchg_u64 \n"
78 " move %2, %z4 \n"
79 " scd %2, %1 \n"
80 " beqzl %2, 1b \n"
81 " .set mips0 \n"
82 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
83 : "R" (*m), "Jr" (val)
84 : "memory");
85 } else if (kernel_uses_llsc) {
86 unsigned long dummy;
87
88 do {
89 __asm__ __volatile__(
90 " .set mips3 \n"
91 " lld %0, %3 # xchg_u64 \n"
92 " move %2, %z4 \n"
93 " scd %2, %1 \n"
94 " .set mips0 \n"
95 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
96 : "R" (*m), "Jr" (val)
97 : "memory");
98 } while (unlikely(!dummy));
99 } else {
100 unsigned long flags;
101
102 raw_local_irq_save(flags);
103 retval = *m;
104 *m = val;
105 raw_local_irq_restore(flags); /* implies memory barrier */
106 }
107
108 smp_llsc_mb();
109
110 return retval;
111}
112#else
113extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
114#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
115#endif
116
117static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
118{
119 switch (size) {
120 case 4:
121 return __xchg_u32(ptr, x);
122 case 8:
123 return __xchg_u64(ptr, x);
124 }
125
126 return x;
127}
128
129#define xchg(ptr, x) \
130({ \
131 BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc); \
132 \
133 ((__typeof__(*(ptr))) \
134 __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
135})
12 136
13#define __HAVE_ARCH_CMPXCHG 1 137#define __HAVE_ARCH_CMPXCHG 1
14 138
diff --git a/arch/mips/include/asm/dma.h b/arch/mips/include/asm/dma.h
index 2d47da62d5a7..f5097f65a8ab 100644
--- a/arch/mips/include/asm/dma.h
+++ b/arch/mips/include/asm/dma.h
@@ -15,7 +15,6 @@
15#include <asm/io.h> /* need byte IO */ 15#include <asm/io.h> /* need byte IO */
16#include <linux/spinlock.h> /* And spinlocks */ 16#include <linux/spinlock.h> /* And spinlocks */
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <asm/system.h>
19 18
20 19
21#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER 20#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
new file mode 100644
index 000000000000..c1f6afa4bc4f
--- /dev/null
+++ b/arch/mips/include/asm/exec.h
@@ -0,0 +1,17 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10 * Copyright (C) 2000 MIPS Technologies, Inc.
11 */
12#ifndef _ASM_EXEC_H
13#define _ASM_EXEC_H
14
15extern unsigned long arch_align_stack(unsigned long sp);
16
17#endif /* _ASM_EXEC_H */
diff --git a/arch/mips/include/asm/mach-au1x00/au1000_dma.h b/arch/mips/include/asm/mach-au1x00/au1000_dma.h
index 59f5b55b2200..ba4cf0e91c8b 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000_dma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000_dma.h
@@ -33,7 +33,6 @@
33#include <linux/io.h> /* need byte IO */ 33#include <linux/io.h> /* need byte IO */
34#include <linux/spinlock.h> /* And spinlocks */ 34#include <linux/spinlock.h> /* And spinlocks */
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <asm/system.h>
37 36
38#define NUM_AU1000_DMA_CHANNELS 8 37#define NUM_AU1000_DMA_CHANNELS 8
39 38
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index c104f1039a69..20e9dcf42b27 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -19,7 +19,6 @@
19#include <asm/cpu-info.h> 19#include <asm/cpu-info.h>
20#include <asm/mipsregs.h> 20#include <asm/mipsregs.h>
21#include <asm/prefetch.h> 21#include <asm/prefetch.h>
22#include <asm/system.h>
23 22
24/* 23/*
25 * Return current * instruction pointer ("program counter"). 24 * Return current * instruction pointer ("program counter").
@@ -356,6 +355,12 @@ unsigned long get_wchan(struct task_struct *p);
356#define ARCH_HAS_PREFETCHW 355#define ARCH_HAS_PREFETCHW
357#define prefetchw(x) __builtin_prefetch((x), 1, 1) 356#define prefetchw(x) __builtin_prefetch((x), 1, 1)
358 357
358/*
359 * See Documentation/scheduler/sched-arch.txt; prevents deadlock on SMP
360 * systems.
361 */
362#define __ARCH_WANT_UNLOCKED_CTXSW
363
359#endif 364#endif
360 365
361#endif /* _ASM_PROCESSOR_H */ 366#endif /* _ASM_PROCESSOR_H */
diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h
index 50511aac04e9..6dce6d8d09ab 100644
--- a/arch/mips/include/asm/setup.h
+++ b/arch/mips/include/asm/setup.h
@@ -5,6 +5,17 @@
5 5
6#ifdef __KERNEL__ 6#ifdef __KERNEL__
7extern void setup_early_printk(void); 7extern void setup_early_printk(void);
8
9extern void set_handler(unsigned long offset, void *addr, unsigned long len);
10extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len);
11
12typedef void (*vi_handler_t)(void);
13extern void *set_vi_handler(int n, vi_handler_t addr);
14
15extern void *set_except_vector(int n, void *addr);
16extern unsigned long ebase;
17extern void per_cpu_trap_init(void);
18
8#endif /* __KERNEL__ */ 19#endif /* __KERNEL__ */
9 20
10#endif /* __SETUP_H */ 21#endif /* __SETUP_H */
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
new file mode 100644
index 000000000000..5d33621b5658
--- /dev/null
+++ b/arch/mips/include/asm/switch_to.h
@@ -0,0 +1,85 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10 * Copyright (C) 2000 MIPS Technologies, Inc.
11 */
12#ifndef _ASM_SWITCH_TO_H
13#define _ASM_SWITCH_TO_H
14
15#include <asm/cpu-features.h>
16#include <asm/watch.h>
17#include <asm/dsp.h>
18
19struct task_struct;
20
21/*
22 * switch_to(n) should switch tasks to task nr n, first
23 * checking that n isn't the current task, in which case it does nothing.
24 */
25extern asmlinkage void *resume(void *last, void *next, void *next_ti);
26
27extern unsigned int ll_bit;
28extern struct task_struct *ll_task;
29
30#ifdef CONFIG_MIPS_MT_FPAFF
31
32/*
33 * Handle the scheduler resume end of FPU affinity management. We do this
34 * inline to try to keep the overhead down. If we have been forced to run on
35 * a "CPU" with an FPU because of a previous high level of FP computation,
36 * but did not actually use the FPU during the most recent time-slice (CU1
37 * isn't set), we undo the restriction on cpus_allowed.
38 *
39 * We're not calling set_cpus_allowed() here, because we have no need to
40 * force prompt migration - we're already switching the current CPU to a
41 * different thread.
42 */
43
44#define __mips_mt_fpaff_switch_to(prev) \
45do { \
46 struct thread_info *__prev_ti = task_thread_info(prev); \
47 \
48 if (cpu_has_fpu && \
49 test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
50 (!(KSTK_STATUS(prev) & ST0_CU1))) { \
51 clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
52 prev->cpus_allowed = prev->thread.user_cpus_allowed; \
53 } \
54 next->thread.emulated_fp = 0; \
55} while(0)
56
57#else
58#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
59#endif
60
61#define __clear_software_ll_bit() \
62do { \
63 if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \
64 ll_bit = 0; \
65} while (0)
66
67#define switch_to(prev, next, last) \
68do { \
69 __mips_mt_fpaff_switch_to(prev); \
70 if (cpu_has_dsp) \
71 __save_dsp(prev); \
72 __clear_software_ll_bit(); \
73 (last) = resume(prev, next, task_thread_info(next)); \
74} while (0)
75
76#define finish_arch_switch(prev) \
77do { \
78 if (cpu_has_dsp) \
79 __restore_dsp(current); \
80 if (cpu_has_userlocal) \
81 write_c0_userlocal(current_thread_info()->tp_value); \
82 __restore_watch(); \
83} while (0)
84
85#endif /* _ASM_SWITCH_TO_H */
diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
deleted file mode 100644
index 6018c80ce37a..000000000000
--- a/arch/mips/include/asm/system.h
+++ /dev/null
@@ -1,235 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10 * Copyright (C) 2000 MIPS Technologies, Inc.
11 */
12#ifndef _ASM_SYSTEM_H
13#define _ASM_SYSTEM_H
14
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/irqflags.h>
18
19#include <asm/addrspace.h>
20#include <asm/barrier.h>
21#include <asm/cmpxchg.h>
22#include <asm/cpu-features.h>
23#include <asm/dsp.h>
24#include <asm/watch.h>
25#include <asm/war.h>
26
27
28/*
29 * switch_to(n) should switch tasks to task nr n, first
30 * checking that n isn't the current task, in which case it does nothing.
31 */
32extern asmlinkage void *resume(void *last, void *next, void *next_ti);
33
34struct task_struct;
35
36extern unsigned int ll_bit;
37extern struct task_struct *ll_task;
38
39#ifdef CONFIG_MIPS_MT_FPAFF
40
41/*
42 * Handle the scheduler resume end of FPU affinity management. We do this
43 * inline to try to keep the overhead down. If we have been forced to run on
44 * a "CPU" with an FPU because of a previous high level of FP computation,
45 * but did not actually use the FPU during the most recent time-slice (CU1
46 * isn't set), we undo the restriction on cpus_allowed.
47 *
48 * We're not calling set_cpus_allowed() here, because we have no need to
49 * force prompt migration - we're already switching the current CPU to a
50 * different thread.
51 */
52
53#define __mips_mt_fpaff_switch_to(prev) \
54do { \
55 struct thread_info *__prev_ti = task_thread_info(prev); \
56 \
57 if (cpu_has_fpu && \
58 test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
59 (!(KSTK_STATUS(prev) & ST0_CU1))) { \
60 clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
61 prev->cpus_allowed = prev->thread.user_cpus_allowed; \
62 } \
63 next->thread.emulated_fp = 0; \
64} while(0)
65
66#else
67#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
68#endif
69
70#define __clear_software_ll_bit() \
71do { \
72 if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \
73 ll_bit = 0; \
74} while (0)
75
76#define switch_to(prev, next, last) \
77do { \
78 __mips_mt_fpaff_switch_to(prev); \
79 if (cpu_has_dsp) \
80 __save_dsp(prev); \
81 __clear_software_ll_bit(); \
82 (last) = resume(prev, next, task_thread_info(next)); \
83} while (0)
84
85#define finish_arch_switch(prev) \
86do { \
87 if (cpu_has_dsp) \
88 __restore_dsp(current); \
89 if (cpu_has_userlocal) \
90 write_c0_userlocal(current_thread_info()->tp_value); \
91 __restore_watch(); \
92} while (0)
93
94static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
95{
96 __u32 retval;
97
98 smp_mb__before_llsc();
99
100 if (kernel_uses_llsc && R10000_LLSC_WAR) {
101 unsigned long dummy;
102
103 __asm__ __volatile__(
104 " .set mips3 \n"
105 "1: ll %0, %3 # xchg_u32 \n"
106 " .set mips0 \n"
107 " move %2, %z4 \n"
108 " .set mips3 \n"
109 " sc %2, %1 \n"
110 " beqzl %2, 1b \n"
111 " .set mips0 \n"
112 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
113 : "R" (*m), "Jr" (val)
114 : "memory");
115 } else if (kernel_uses_llsc) {
116 unsigned long dummy;
117
118 do {
119 __asm__ __volatile__(
120 " .set mips3 \n"
121 " ll %0, %3 # xchg_u32 \n"
122 " .set mips0 \n"
123 " move %2, %z4 \n"
124 " .set mips3 \n"
125 " sc %2, %1 \n"
126 " .set mips0 \n"
127 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
128 : "R" (*m), "Jr" (val)
129 : "memory");
130 } while (unlikely(!dummy));
131 } else {
132 unsigned long flags;
133
134 raw_local_irq_save(flags);
135 retval = *m;
136 *m = val;
137 raw_local_irq_restore(flags); /* implies memory barrier */
138 }
139
140 smp_llsc_mb();
141
142 return retval;
143}
144
145#ifdef CONFIG_64BIT
146static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
147{
148 __u64 retval;
149
150 smp_mb__before_llsc();
151
152 if (kernel_uses_llsc && R10000_LLSC_WAR) {
153 unsigned long dummy;
154
155 __asm__ __volatile__(
156 " .set mips3 \n"
157 "1: lld %0, %3 # xchg_u64 \n"
158 " move %2, %z4 \n"
159 " scd %2, %1 \n"
160 " beqzl %2, 1b \n"
161 " .set mips0 \n"
162 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
163 : "R" (*m), "Jr" (val)
164 : "memory");
165 } else if (kernel_uses_llsc) {
166 unsigned long dummy;
167
168 do {
169 __asm__ __volatile__(
170 " .set mips3 \n"
171 " lld %0, %3 # xchg_u64 \n"
172 " move %2, %z4 \n"
173 " scd %2, %1 \n"
174 " .set mips0 \n"
175 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
176 : "R" (*m), "Jr" (val)
177 : "memory");
178 } while (unlikely(!dummy));
179 } else {
180 unsigned long flags;
181
182 raw_local_irq_save(flags);
183 retval = *m;
184 *m = val;
185 raw_local_irq_restore(flags); /* implies memory barrier */
186 }
187
188 smp_llsc_mb();
189
190 return retval;
191}
192#else
193extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
194#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
195#endif
196
197static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
198{
199 switch (size) {
200 case 4:
201 return __xchg_u32(ptr, x);
202 case 8:
203 return __xchg_u64(ptr, x);
204 }
205
206 return x;
207}
208
209#define xchg(ptr, x) \
210({ \
211 BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc); \
212 \
213 ((__typeof__(*(ptr))) \
214 __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
215})
216
217extern void set_handler(unsigned long offset, void *addr, unsigned long len);
218extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len);
219
220typedef void (*vi_handler_t)(void);
221extern void *set_vi_handler(int n, vi_handler_t addr);
222
223extern void *set_except_vector(int n, void *addr);
224extern unsigned long ebase;
225extern void per_cpu_trap_init(void);
226
227/*
228 * See include/asm-ia64/system.h; prevents deadlock on SMP
229 * systems.
230 */
231#define __ARCH_WANT_UNLOCKED_CTXSW
232
233extern unsigned long arch_align_stack(unsigned long sp);
234
235#endif /* _ASM_SYSTEM_H */
diff --git a/arch/mips/include/asm/txx9/jmr3927.h b/arch/mips/include/asm/txx9/jmr3927.h
index a409c446bf18..8808d7f82da0 100644
--- a/arch/mips/include/asm/txx9/jmr3927.h
+++ b/arch/mips/include/asm/txx9/jmr3927.h
@@ -12,7 +12,6 @@
12 12
13#include <asm/txx9/tx3927.h> 13#include <asm/txx9/tx3927.h>
14#include <asm/addrspace.h> 14#include <asm/addrspace.h>
15#include <asm/system.h>
16#include <asm/txx9irq.h> 15#include <asm/txx9irq.h>
17 16
18/* CS */ 17/* CS */