aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/sys-x86/asm/system.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/um/sys-x86/asm/system.h')
-rw-r--r--arch/um/sys-x86/asm/system.h133
1 files changed, 133 insertions, 0 deletions
diff --git a/arch/um/sys-x86/asm/system.h b/arch/um/sys-x86/asm/system.h
new file mode 100644
index 000000000000..a89113bc74f2
--- /dev/null
+++ b/arch/um/sys-x86/asm/system.h
@@ -0,0 +1,133 @@
1#ifndef _ASM_X86_SYSTEM_H_
2#define _ASM_X86_SYSTEM_H_
3
4#include <asm/asm.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
8#include <asm/nops.h>
9#include <asm/system-um.h>
10
11#include <linux/kernel.h>
12#include <linux/irqflags.h>
13
14/* entries in ARCH_DLINFO: */
15#ifdef CONFIG_IA32_EMULATION
16# define AT_VECTOR_SIZE_ARCH 2
17#else
18# define AT_VECTOR_SIZE_ARCH 1
19#endif
20
21extern unsigned long arch_align_stack(unsigned long sp);
22
23void default_idle(void);
24
25/*
26 * Force strict CPU ordering.
27 * And yes, this is required on UP too when we're talking
28 * to devices.
29 */
30#ifdef CONFIG_X86_32
31/*
32 * Some non-Intel clones support out of order store. wmb() ceases to be a
33 * nop for these.
34 */
35#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
36#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
37#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
38#else
39#define mb() asm volatile("mfence":::"memory")
40#define rmb() asm volatile("lfence":::"memory")
41#define wmb() asm volatile("sfence" ::: "memory")
42#endif
43
44/**
45 * read_barrier_depends - Flush all pending reads that subsequents reads
46 * depend on.
47 *
48 * No data-dependent reads from memory-like regions are ever reordered
49 * over this barrier. All reads preceding this primitive are guaranteed
50 * to access memory (but not necessarily other CPUs' caches) before any
51 * reads following this primitive that depend on the data return by
52 * any of the preceding reads. This primitive is much lighter weight than
53 * rmb() on most CPUs, and is never heavier weight than is
54 * rmb().
55 *
56 * These ordering constraints are respected by both the local CPU
57 * and the compiler.
58 *
59 * Ordering is not guaranteed by anything other than these primitives,
60 * not even by data dependencies. See the documentation for
61 * memory_barrier() for examples and URLs to more information.
62 *
63 * For example, the following code would force ordering (the initial
64 * value of "a" is zero, "b" is one, and "p" is "&a"):
65 *
66 * <programlisting>
67 * CPU 0 CPU 1
68 *
69 * b = 2;
70 * memory_barrier();
71 * p = &b; q = p;
72 * read_barrier_depends();
73 * d = *q;
74 * </programlisting>
75 *
76 * because the read of "*q" depends on the read of "p" and these
77 * two reads are separated by a read_barrier_depends(). However,
78 * the following code, with the same initial values for "a" and "b":
79 *
80 * <programlisting>
81 * CPU 0 CPU 1
82 *
83 * a = 2;
84 * memory_barrier();
85 * b = 3; y = b;
86 * read_barrier_depends();
87 * x = a;
88 * </programlisting>
89 *
90 * does not enforce ordering, since there is no data dependency between
91 * the read of "a" and the read of "b". Therefore, on some CPUs, such
92 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
93 * in cases like this where there are no data dependencies.
94 **/
95
96#define read_barrier_depends() do { } while (0)
97
98#ifdef CONFIG_SMP
99#define smp_mb() mb()
100#ifdef CONFIG_X86_PPRO_FENCE
101# define smp_rmb() rmb()
102#else
103# define smp_rmb() barrier()
104#endif
105#ifdef CONFIG_X86_OOSTORE
106# define smp_wmb() wmb()
107#else
108# define smp_wmb() barrier()
109#endif
110#define smp_read_barrier_depends() read_barrier_depends()
111#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
112#else
113#define smp_mb() barrier()
114#define smp_rmb() barrier()
115#define smp_wmb() barrier()
116#define smp_read_barrier_depends() do { } while (0)
117#define set_mb(var, value) do { var = value; barrier(); } while (0)
118#endif
119
120/*
121 * Stop RDTSC speculation. This is needed when you need to use RDTSC
122 * (or get_cycles or vread that possibly accesses the TSC) in a defined
123 * code region.
124 *
125 * (Could use an alternative three way for this if there was one.)
126 */
127static inline void rdtsc_barrier(void)
128{
129 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
130 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
131}
132
133#endif