aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/tls.h62
-rw-r--r--arch/arm/include/asm/uaccess.h48
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h25
-rw-r--r--arch/arm/include/asm/xen/page.h9
4 files changed, 108 insertions, 36 deletions
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
index 83259b873333..36172adda9d0 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
@@ -1,6 +1,9 @@
1#ifndef __ASMARM_TLS_H 1#ifndef __ASMARM_TLS_H
2#define __ASMARM_TLS_H 2#define __ASMARM_TLS_H
3 3
4#include <linux/compiler.h>
5#include <asm/thread_info.h>
6
4#ifdef __ASSEMBLY__ 7#ifdef __ASSEMBLY__
5#include <asm/asm-offsets.h> 8#include <asm/asm-offsets.h>
6 .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2 9 .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
@@ -50,6 +53,47 @@
50#endif 53#endif
51 54
52#ifndef __ASSEMBLY__ 55#ifndef __ASSEMBLY__
56
57static inline void set_tls(unsigned long val)
58{
59 struct thread_info *thread;
60
61 thread = current_thread_info();
62
63 thread->tp_value[0] = val;
64
65 /*
66 * This code runs with preemption enabled and therefore must
67 * be reentrant with respect to switch_tls.
68 *
69 * We need to ensure ordering between the shadow state and the
70 * hardware state, so that we don't corrupt the hardware state
71 * with a stale shadow state during context switch.
72 *
73 * If we're preempted here, switch_tls will load TPIDRURO from
74 * thread_info upon resuming execution and the following mcr
75 * is merely redundant.
76 */
77 barrier();
78
79 if (!tls_emu) {
80 if (has_tls_reg) {
81 asm("mcr p15, 0, %0, c13, c0, 3"
82 : : "r" (val));
83 } else {
84 /*
85 * User space must never try to access this
86 * directly. Expect your app to break
87 * eventually if you do so. The user helper
88 * at 0xffff0fe0 must be used instead. (see
89 * entry-armv.S for details)
90 */
91 *((unsigned int *)0xffff0ff0) = val;
92 }
93
94 }
95}
96
53static inline unsigned long get_tpuser(void) 97static inline unsigned long get_tpuser(void)
54{ 98{
55 unsigned long reg = 0; 99 unsigned long reg = 0;
@@ -59,5 +103,23 @@ static inline unsigned long get_tpuser(void)
59 103
60 return reg; 104 return reg;
61} 105}
106
107static inline void set_tpuser(unsigned long val)
108{
109 /* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
110 * we need not update thread_info.
111 */
112 if (has_tls_reg && !tls_emu) {
113 asm("mcr p15, 0, %0, c13, c0, 2"
114 : : "r" (val));
115 }
116}
117
118static inline void flush_tls(void)
119{
120 set_tls(0);
121 set_tpuser(0);
122}
123
62#endif 124#endif
63#endif /* __ASMARM_TLS_H */ 125#endif /* __ASMARM_TLS_H */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index a4cd7af475e9..4767eb9caa78 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -107,8 +107,11 @@ static inline void set_fs(mm_segment_t fs)
107extern int __get_user_1(void *); 107extern int __get_user_1(void *);
108extern int __get_user_2(void *); 108extern int __get_user_2(void *);
109extern int __get_user_4(void *); 109extern int __get_user_4(void *);
110extern int __get_user_lo8(void *); 110extern int __get_user_32t_8(void *);
111extern int __get_user_8(void *); 111extern int __get_user_8(void *);
112extern int __get_user_64t_1(void *);
113extern int __get_user_64t_2(void *);
114extern int __get_user_64t_4(void *);
112 115
113#define __GUP_CLOBBER_1 "lr", "cc" 116#define __GUP_CLOBBER_1 "lr", "cc"
114#ifdef CONFIG_CPU_USE_DOMAINS 117#ifdef CONFIG_CPU_USE_DOMAINS
@@ -117,7 +120,7 @@ extern int __get_user_8(void *);
117#define __GUP_CLOBBER_2 "lr", "cc" 120#define __GUP_CLOBBER_2 "lr", "cc"
118#endif 121#endif
119#define __GUP_CLOBBER_4 "lr", "cc" 122#define __GUP_CLOBBER_4 "lr", "cc"
120#define __GUP_CLOBBER_lo8 "lr", "cc" 123#define __GUP_CLOBBER_32t_8 "lr", "cc"
121#define __GUP_CLOBBER_8 "lr", "cc" 124#define __GUP_CLOBBER_8 "lr", "cc"
122 125
123#define __get_user_x(__r2,__p,__e,__l,__s) \ 126#define __get_user_x(__r2,__p,__e,__l,__s) \
@@ -131,12 +134,30 @@ extern int __get_user_8(void *);
131 134
132/* narrowing a double-word get into a single 32bit word register: */ 135/* narrowing a double-word get into a single 32bit word register: */
133#ifdef __ARMEB__ 136#ifdef __ARMEB__
134#define __get_user_xb(__r2, __p, __e, __l, __s) \ 137#define __get_user_x_32t(__r2, __p, __e, __l, __s) \
135 __get_user_x(__r2, __p, __e, __l, lo8) 138 __get_user_x(__r2, __p, __e, __l, 32t_8)
136#else 139#else
137#define __get_user_xb __get_user_x 140#define __get_user_x_32t __get_user_x
138#endif 141#endif
139 142
143/*
144 * storing result into proper least significant word of 64bit target var,
145 * different only for big endian case where 64 bit __r2 lsw is r3:
146 */
147#ifdef __ARMEB__
148#define __get_user_x_64t(__r2, __p, __e, __l, __s) \
149 __asm__ __volatile__ ( \
150 __asmeq("%0", "r0") __asmeq("%1", "r2") \
151 __asmeq("%3", "r1") \
152 "bl __get_user_64t_" #__s \
153 : "=&r" (__e), "=r" (__r2) \
154 : "0" (__p), "r" (__l) \
155 : __GUP_CLOBBER_##__s)
156#else
157#define __get_user_x_64t __get_user_x
158#endif
159
160
140#define __get_user_check(x,p) \ 161#define __get_user_check(x,p) \
141 ({ \ 162 ({ \
142 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 163 unsigned long __limit = current_thread_info()->addr_limit - 1; \
@@ -146,17 +167,26 @@ extern int __get_user_8(void *);
146 register int __e asm("r0"); \ 167 register int __e asm("r0"); \
147 switch (sizeof(*(__p))) { \ 168 switch (sizeof(*(__p))) { \
148 case 1: \ 169 case 1: \
149 __get_user_x(__r2, __p, __e, __l, 1); \ 170 if (sizeof((x)) >= 8) \
171 __get_user_x_64t(__r2, __p, __e, __l, 1); \
172 else \
173 __get_user_x(__r2, __p, __e, __l, 1); \
150 break; \ 174 break; \
151 case 2: \ 175 case 2: \
152 __get_user_x(__r2, __p, __e, __l, 2); \ 176 if (sizeof((x)) >= 8) \
177 __get_user_x_64t(__r2, __p, __e, __l, 2); \
178 else \
179 __get_user_x(__r2, __p, __e, __l, 2); \
153 break; \ 180 break; \
154 case 4: \ 181 case 4: \
155 __get_user_x(__r2, __p, __e, __l, 4); \ 182 if (sizeof((x)) >= 8) \
183 __get_user_x_64t(__r2, __p, __e, __l, 4); \
184 else \
185 __get_user_x(__r2, __p, __e, __l, 4); \
156 break; \ 186 break; \
157 case 8: \ 187 case 8: \
158 if (sizeof((x)) < 8) \ 188 if (sizeof((x)) < 8) \
159 __get_user_xb(__r2, __p, __e, __l, 4); \ 189 __get_user_x_32t(__r2, __p, __e, __l, 4); \
160 else \ 190 else \
161 __get_user_x(__r2, __p, __e, __l, 8); \ 191 __get_user_x(__r2, __p, __e, __l, 8); \
162 break; \ 192 break; \
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index 1109017499e5..e8275ea88e88 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -26,25 +26,14 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
26 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); 26 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
27} 27}
28 28
29static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 29void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
30 size_t size, enum dma_data_direction dir, 30 size_t size, enum dma_data_direction dir,
31 struct dma_attrs *attrs) 31 struct dma_attrs *attrs);
32{
33 if (__generic_dma_ops(hwdev)->unmap_page)
34 __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
35}
36 32
37static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 33void xen_dma_sync_single_for_cpu(struct device *hwdev,
38 dma_addr_t handle, size_t size, enum dma_data_direction dir) 34 dma_addr_t handle, size_t size, enum dma_data_direction dir);
39{ 35
40 if (__generic_dma_ops(hwdev)->sync_single_for_cpu) 36void xen_dma_sync_single_for_device(struct device *hwdev,
41 __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); 37 dma_addr_t handle, size_t size, enum dma_data_direction dir);
42}
43 38
44static inline void xen_dma_sync_single_for_device(struct device *hwdev,
45 dma_addr_t handle, size_t size, enum dma_data_direction dir)
46{
47 if (__generic_dma_ops(hwdev)->sync_single_for_device)
48 __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
49}
50#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ 39#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index ded062f9b358..135c24a5ba26 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -33,7 +33,6 @@ typedef struct xpaddr {
33#define INVALID_P2M_ENTRY (~0UL) 33#define INVALID_P2M_ENTRY (~0UL)
34 34
35unsigned long __pfn_to_mfn(unsigned long pfn); 35unsigned long __pfn_to_mfn(unsigned long pfn);
36unsigned long __mfn_to_pfn(unsigned long mfn);
37extern struct rb_root phys_to_mach; 36extern struct rb_root phys_to_mach;
38 37
39static inline unsigned long pfn_to_mfn(unsigned long pfn) 38static inline unsigned long pfn_to_mfn(unsigned long pfn)
@@ -51,14 +50,6 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
51 50
52static inline unsigned long mfn_to_pfn(unsigned long mfn) 51static inline unsigned long mfn_to_pfn(unsigned long mfn)
53{ 52{
54 unsigned long pfn;
55
56 if (phys_to_mach.rb_node != NULL) {
57 pfn = __mfn_to_pfn(mfn);
58 if (pfn != INVALID_P2M_ENTRY)
59 return pfn;
60 }
61
62 return mfn; 53 return mfn;
63} 54}
64 55