aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/include')
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/tile/include/asm/barrier.h68
-rw-r--r--arch/tile/include/asm/compat.h1
-rw-r--r--arch/tile/include/asm/fixmap.h33
4 files changed, 3 insertions, 100 deletions
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index 22f3bd147fa7..3793c75e45d9 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -39,3 +39,4 @@ generic-y += trace_clock.h
39generic-y += types.h 39generic-y += types.h
40generic-y += xor.h 40generic-y += xor.h
41generic-y += preempt.h 41generic-y += preempt.h
42generic-y += hash.h
diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h
index a9a73da5865d..b5a05d050a8f 100644
--- a/arch/tile/include/asm/barrier.h
+++ b/arch/tile/include/asm/barrier.h
@@ -22,59 +22,6 @@
22#include <arch/spr_def.h> 22#include <arch/spr_def.h>
23#include <asm/timex.h> 23#include <asm/timex.h>
24 24
25/*
26 * read_barrier_depends - Flush all pending reads that subsequents reads
27 * depend on.
28 *
29 * No data-dependent reads from memory-like regions are ever reordered
30 * over this barrier. All reads preceding this primitive are guaranteed
31 * to access memory (but not necessarily other CPUs' caches) before any
32 * reads following this primitive that depend on the data return by
33 * any of the preceding reads. This primitive is much lighter weight than
34 * rmb() on most CPUs, and is never heavier weight than is
35 * rmb().
36 *
37 * These ordering constraints are respected by both the local CPU
38 * and the compiler.
39 *
40 * Ordering is not guaranteed by anything other than these primitives,
41 * not even by data dependencies. See the documentation for
42 * memory_barrier() for examples and URLs to more information.
43 *
44 * For example, the following code would force ordering (the initial
45 * value of "a" is zero, "b" is one, and "p" is "&a"):
46 *
47 * <programlisting>
48 * CPU 0 CPU 1
49 *
50 * b = 2;
51 * memory_barrier();
52 * p = &b; q = p;
53 * read_barrier_depends();
54 * d = *q;
55 * </programlisting>
56 *
57 * because the read of "*q" depends on the read of "p" and these
58 * two reads are separated by a read_barrier_depends(). However,
59 * the following code, with the same initial values for "a" and "b":
60 *
61 * <programlisting>
62 * CPU 0 CPU 1
63 *
64 * a = 2;
65 * memory_barrier();
66 * b = 3; y = b;
67 * read_barrier_depends();
68 * x = a;
69 * </programlisting>
70 *
71 * does not enforce ordering, since there is no data dependency between
72 * the read of "a" and the read of "b". Therefore, on some CPUs, such
73 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
74 * in cases like this where there are no data dependencies.
75 */
76#define read_barrier_depends() do { } while (0)
77
78#define __sync() __insn_mf() 25#define __sync() __insn_mf()
79 26
80#include <hv/syscall_public.h> 27#include <hv/syscall_public.h>
@@ -125,20 +72,7 @@ mb_incoherent(void)
125#define mb() fast_mb() 72#define mb() fast_mb()
126#define iob() fast_iob() 73#define iob() fast_iob()
127 74
128#ifdef CONFIG_SMP 75#include <asm-generic/barrier.h>
129#define smp_mb() mb()
130#define smp_rmb() rmb()
131#define smp_wmb() wmb()
132#define smp_read_barrier_depends() read_barrier_depends()
133#else
134#define smp_mb() barrier()
135#define smp_rmb() barrier()
136#define smp_wmb() barrier()
137#define smp_read_barrier_depends() do { } while (0)
138#endif
139
140#define set_mb(var, value) \
141 do { var = value; mb(); } while (0)
142 76
143#endif /* !__ASSEMBLY__ */ 77#endif /* !__ASSEMBLY__ */
144#endif /* _ASM_TILE_BARRIER_H */ 78#endif /* _ASM_TILE_BARRIER_H */
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 78f1f2ded86c..ffd4493efc78 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -281,7 +281,6 @@ long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
281 u32 dummy, u32 low, u32 high); 281 u32 dummy, u32 low, u32 high);
282long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count, 282long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
283 u32 dummy, u32 low, u32 high); 283 u32 dummy, u32 low, u32 high);
284long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len);
285long compat_sys_sync_file_range2(int fd, unsigned int flags, 284long compat_sys_sync_file_range2(int fd, unsigned int flags,
286 u32 offset_lo, u32 offset_hi, 285 u32 offset_lo, u32 offset_hi,
287 u32 nbytes_lo, u32 nbytes_hi); 286 u32 nbytes_lo, u32 nbytes_hi);
diff --git a/arch/tile/include/asm/fixmap.h b/arch/tile/include/asm/fixmap.h
index c6b9c1b38fd1..ffe2637aeb31 100644
--- a/arch/tile/include/asm/fixmap.h
+++ b/arch/tile/include/asm/fixmap.h
@@ -25,9 +25,6 @@
25#include <asm/kmap_types.h> 25#include <asm/kmap_types.h>
26#endif 26#endif
27 27
28#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
29#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
30
31/* 28/*
32 * Here we define all the compile-time 'special' virtual 29 * Here we define all the compile-time 'special' virtual
33 * addresses. The point is to have a constant address at 30 * addresses. The point is to have a constant address at
@@ -83,35 +80,7 @@ enum fixed_addresses {
83#define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE) 80#define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE)
84#define FIXADDR_BOOT_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_BOOT_SIZE) 81#define FIXADDR_BOOT_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_BOOT_SIZE)
85 82
86extern void __this_fixmap_does_not_exist(void); 83#include <asm-generic/fixmap.h>
87
88/*
89 * 'index to address' translation. If anyone tries to use the idx
90 * directly without tranlation, we catch the bug with a NULL-deference
91 * kernel oops. Illegal ranges of incoming indices are caught too.
92 */
93static __always_inline unsigned long fix_to_virt(const unsigned int idx)
94{
95 /*
96 * this branch gets completely eliminated after inlining,
97 * except when someone tries to use fixaddr indices in an
98 * illegal way. (such as mixing up address types or using
99 * out-of-range indices).
100 *
101 * If it doesn't get removed, the linker will complain
102 * loudly with a reasonably clear error message..
103 */
104 if (idx >= __end_of_fixed_addresses)
105 __this_fixmap_does_not_exist();
106
107 return __fix_to_virt(idx);
108}
109
110static inline unsigned long virt_to_fix(const unsigned long vaddr)
111{
112 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
113 return __virt_to_fix(vaddr);
114}
115 84
116#endif /* !__ASSEMBLY__ */ 85#endif /* !__ASSEMBLY__ */
117 86