diff options
Diffstat (limited to 'arch/tile/include/asm')
26 files changed, 669 insertions, 294 deletions
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index 0bb42642343a..143473e3a0bb 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild | |||
@@ -2,6 +2,7 @@ include include/asm-generic/Kbuild.asm | |||
2 | 2 | ||
3 | header-y += ../arch/ | 3 | header-y += ../arch/ |
4 | 4 | ||
5 | header-y += cachectl.h | ||
5 | header-y += ucontext.h | 6 | header-y += ucontext.h |
6 | header-y += hardwall.h | 7 | header-y += hardwall.h |
7 | 8 | ||
@@ -21,7 +22,6 @@ generic-y += ipcbuf.h | |||
21 | generic-y += irq_regs.h | 22 | generic-y += irq_regs.h |
22 | generic-y += kdebug.h | 23 | generic-y += kdebug.h |
23 | generic-y += local.h | 24 | generic-y += local.h |
24 | generic-y += module.h | ||
25 | generic-y += msgbuf.h | 25 | generic-y += msgbuf.h |
26 | generic-y += mutex.h | 26 | generic-y += mutex.h |
27 | generic-y += param.h | 27 | generic-y += param.h |
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 54d1da826f93..e7fb5cfb9597 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h | |||
@@ -303,7 +303,14 @@ void __init_atomic_per_cpu(void); | |||
303 | void __atomic_fault_unlock(int *lock_ptr); | 303 | void __atomic_fault_unlock(int *lock_ptr); |
304 | #endif | 304 | #endif |
305 | 305 | ||
306 | /* Return a pointer to the lock for the given address. */ | ||
307 | int *__atomic_hashed_lock(volatile void *v); | ||
308 | |||
306 | /* Private helper routines in lib/atomic_asm_32.S */ | 309 | /* Private helper routines in lib/atomic_asm_32.S */ |
310 | struct __get_user { | ||
311 | unsigned long val; | ||
312 | int err; | ||
313 | }; | ||
307 | extern struct __get_user __atomic_cmpxchg(volatile int *p, | 314 | extern struct __get_user __atomic_cmpxchg(volatile int *p, |
308 | int *lock, int o, int n); | 315 | int *lock, int o, int n); |
309 | extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n); | 316 | extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n); |
@@ -319,6 +326,9 @@ extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); | |||
319 | extern u64 __atomic64_xchg_add_unless(volatile u64 *p, | 326 | extern u64 __atomic64_xchg_add_unless(volatile u64 *p, |
320 | int *lock, u64 o, u64 n); | 327 | int *lock, u64 o, u64 n); |
321 | 328 | ||
329 | /* Return failure from the atomic wrappers. */ | ||
330 | struct __get_user __atomic_bad_address(int __user *addr); | ||
331 | |||
322 | #endif /* !__ASSEMBLY__ */ | 332 | #endif /* !__ASSEMBLY__ */ |
323 | 333 | ||
324 | #endif /* _ASM_TILE_ATOMIC_32_H */ | 334 | #endif /* _ASM_TILE_ATOMIC_32_H */ |
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h index 16f1fa51fea1..bd186c4eaa50 100644 --- a/arch/tile/include/asm/bitops.h +++ b/arch/tile/include/asm/bitops.h | |||
@@ -77,6 +77,11 @@ static inline int ffs(int x) | |||
77 | return __builtin_ffs(x); | 77 | return __builtin_ffs(x); |
78 | } | 78 | } |
79 | 79 | ||
80 | static inline int fls64(__u64 w) | ||
81 | { | ||
82 | return (sizeof(__u64) * 8) - __builtin_clzll(w); | ||
83 | } | ||
84 | |||
80 | /** | 85 | /** |
81 | * fls - find last set bit in word | 86 | * fls - find last set bit in word |
82 | * @x: the word to search | 87 | * @x: the word to search |
@@ -90,12 +95,7 @@ static inline int ffs(int x) | |||
90 | */ | 95 | */ |
91 | static inline int fls(int x) | 96 | static inline int fls(int x) |
92 | { | 97 | { |
93 | return (sizeof(int) * 8) - __builtin_clz(x); | 98 | return fls64((unsigned int) x); |
94 | } | ||
95 | |||
96 | static inline int fls64(__u64 w) | ||
97 | { | ||
98 | return (sizeof(__u64) * 8) - __builtin_clzll(w); | ||
99 | } | 99 | } |
100 | 100 | ||
101 | static inline unsigned int __arch_hweight32(unsigned int w) | 101 | static inline unsigned int __arch_hweight32(unsigned int w) |
diff --git a/arch/tile/include/asm/byteorder.h b/arch/tile/include/asm/byteorder.h index 9558416d578b..fb72ecf49218 100644 --- a/arch/tile/include/asm/byteorder.h +++ b/arch/tile/include/asm/byteorder.h | |||
@@ -1 +1,21 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #if defined (__BIG_ENDIAN__) | ||
16 | #include <linux/byteorder/big_endian.h> | ||
17 | #elif defined (__LITTLE_ENDIAN__) | ||
1 | #include <linux/byteorder/little_endian.h> | 18 | #include <linux/byteorder/little_endian.h> |
19 | #else | ||
20 | #error "__BIG_ENDIAN__ or __LITTLE_ENDIAN__ must be defined." | ||
21 | #endif | ||
diff --git a/arch/tile/include/asm/cachectl.h b/arch/tile/include/asm/cachectl.h new file mode 100644 index 000000000000..af4c9f9154d1 --- /dev/null +++ b/arch/tile/include/asm/cachectl.h | |||
@@ -0,0 +1,42 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_CACHECTL_H | ||
16 | #define _ASM_TILE_CACHECTL_H | ||
17 | |||
18 | /* | ||
19 | * Options for cacheflush system call. | ||
20 | * | ||
21 | * The ICACHE flush is performed on all cores currently running the | ||
22 | * current process's address space. The intent is for user | ||
23 | * applications to be able to modify code, invoke the system call, | ||
24 | * then allow arbitrary other threads in the same address space to see | ||
25 | * the newly-modified code. Passing a length of CHIP_L1I_CACHE_SIZE() | ||
26 | * or more invalidates the entire icache on all cores in the address | ||
27 | * spaces. (Note: currently this option invalidates the entire icache | ||
28 | * regardless of the requested address and length, but we may choose | ||
29 | * to honor the arguments at some point.) | ||
30 | * | ||
31 | * Flush and invalidation of memory can normally be performed with the | ||
32 | * __insn_flush(), __insn_inv(), and __insn_finv() instructions from | ||
33 | * userspace. The DCACHE option to the system call allows userspace | ||
34 | * to flush the entire L1+L2 data cache from the core. In this case, | ||
35 | * the address and length arguments are not used. The DCACHE flush is | ||
36 | * restricted to the current core, not all cores in the address space. | ||
37 | */ | ||
38 | #define ICACHE (1<<0) /* invalidate L1 instruction cache */ | ||
39 | #define DCACHE (1<<1) /* flush and invalidate data cache */ | ||
40 | #define BCACHE (ICACHE|DCACHE) /* flush both caches */ | ||
41 | |||
42 | #endif /* _ASM_TILE_CACHECTL_H */ | ||
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h index 4b4b28969a65..69adc08d36a5 100644 --- a/arch/tile/include/asm/compat.h +++ b/arch/tile/include/asm/compat.h | |||
@@ -242,9 +242,6 @@ long compat_sys_fallocate(int fd, int mode, | |||
242 | long compat_sys_sched_rr_get_interval(compat_pid_t pid, | 242 | long compat_sys_sched_rr_get_interval(compat_pid_t pid, |
243 | struct compat_timespec __user *interval); | 243 | struct compat_timespec __user *interval); |
244 | 244 | ||
245 | /* Tilera Linux syscalls that don't have "compat" versions. */ | ||
246 | #define compat_sys_flush_cache sys_flush_cache | ||
247 | |||
248 | /* These are the intvec_64.S trampolines. */ | 245 | /* These are the intvec_64.S trampolines. */ |
249 | long _compat_sys_execve(const char __user *path, | 246 | long _compat_sys_execve(const char __user *path, |
250 | const compat_uptr_t __user *argv, | 247 | const compat_uptr_t __user *argv, |
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h index 623a6bb741c1..d16d006d660e 100644 --- a/arch/tile/include/asm/elf.h +++ b/arch/tile/include/asm/elf.h | |||
@@ -44,7 +44,11 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; | |||
44 | #else | 44 | #else |
45 | #define ELF_CLASS ELFCLASS32 | 45 | #define ELF_CLASS ELFCLASS32 |
46 | #endif | 46 | #endif |
47 | #ifdef __BIG_ENDIAN__ | ||
48 | #define ELF_DATA ELFDATA2MSB | ||
49 | #else | ||
47 | #define ELF_DATA ELFDATA2LSB | 50 | #define ELF_DATA ELFDATA2LSB |
51 | #endif | ||
48 | 52 | ||
49 | /* | 53 | /* |
50 | * There seems to be a bug in how compat_binfmt_elf.c works: it | 54 | * There seems to be a bug in how compat_binfmt_elf.c works: it |
@@ -59,6 +63,7 @@ enum { ELF_ARCH = CHIP_ELF_TYPE() }; | |||
59 | */ | 63 | */ |
60 | #define elf_check_arch(x) \ | 64 | #define elf_check_arch(x) \ |
61 | ((x)->e_ident[EI_CLASS] == ELF_CLASS && \ | 65 | ((x)->e_ident[EI_CLASS] == ELF_CLASS && \ |
66 | (x)->e_ident[EI_DATA] == ELF_DATA && \ | ||
62 | (x)->e_machine == CHIP_ELF_TYPE()) | 67 | (x)->e_machine == CHIP_ELF_TYPE()) |
63 | 68 | ||
64 | /* The module loader only handles a few relocation types. */ | 69 | /* The module loader only handles a few relocation types. */ |
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h index d03ec124a598..5909ac3d7218 100644 --- a/arch/tile/include/asm/futex.h +++ b/arch/tile/include/asm/futex.h | |||
@@ -28,29 +28,81 @@ | |||
28 | #include <linux/futex.h> | 28 | #include <linux/futex.h> |
29 | #include <linux/uaccess.h> | 29 | #include <linux/uaccess.h> |
30 | #include <linux/errno.h> | 30 | #include <linux/errno.h> |
31 | #include <asm/atomic.h> | ||
31 | 32 | ||
32 | extern struct __get_user futex_set(u32 __user *v, int i); | 33 | /* |
33 | extern struct __get_user futex_add(u32 __user *v, int n); | 34 | * Support macros for futex operations. Do not use these macros directly. |
34 | extern struct __get_user futex_or(u32 __user *v, int n); | 35 | * They assume "ret", "val", "oparg", and "uaddr" in the lexical context. |
35 | extern struct __get_user futex_andn(u32 __user *v, int n); | 36 | * __futex_cmpxchg() additionally assumes "oldval". |
36 | extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n); | 37 | */ |
38 | |||
39 | #ifdef __tilegx__ | ||
40 | |||
41 | #define __futex_asm(OP) \ | ||
42 | asm("1: {" #OP " %1, %3, %4; movei %0, 0 }\n" \ | ||
43 | ".pushsection .fixup,\"ax\"\n" \ | ||
44 | "0: { movei %0, %5; j 9f }\n" \ | ||
45 | ".section __ex_table,\"a\"\n" \ | ||
46 | ".quad 1b, 0b\n" \ | ||
47 | ".popsection\n" \ | ||
48 | "9:" \ | ||
49 | : "=r" (ret), "=r" (val), "+m" (*(uaddr)) \ | ||
50 | : "r" (uaddr), "r" (oparg), "i" (-EFAULT)) | ||
51 | |||
52 | #define __futex_set() __futex_asm(exch4) | ||
53 | #define __futex_add() __futex_asm(fetchadd4) | ||
54 | #define __futex_or() __futex_asm(fetchor4) | ||
55 | #define __futex_andn() ({ oparg = ~oparg; __futex_asm(fetchand4); }) | ||
56 | #define __futex_cmpxchg() \ | ||
57 | ({ __insn_mtspr(SPR_CMPEXCH_VALUE, oldval); __futex_asm(cmpexch4); }) | ||
58 | |||
59 | #define __futex_xor() \ | ||
60 | ({ \ | ||
61 | u32 oldval, n = oparg; \ | ||
62 | if ((ret = __get_user(oldval, uaddr)) == 0) { \ | ||
63 | do { \ | ||
64 | oparg = oldval ^ n; \ | ||
65 | __futex_cmpxchg(); \ | ||
66 | } while (ret == 0 && oldval != val); \ | ||
67 | } \ | ||
68 | }) | ||
69 | |||
70 | /* No need to prefetch, since the atomic ops go to the home cache anyway. */ | ||
71 | #define __futex_prolog() | ||
37 | 72 | ||
38 | #ifndef __tilegx__ | ||
39 | extern struct __get_user futex_xor(u32 __user *v, int n); | ||
40 | #else | 73 | #else |
41 | static inline struct __get_user futex_xor(u32 __user *uaddr, int n) | 74 | |
42 | { | 75 | #define __futex_call(FN) \ |
43 | struct __get_user asm_ret = __get_user_4(uaddr); | 76 | { \ |
44 | if (!asm_ret.err) { | 77 | struct __get_user gu = FN((u32 __force *)uaddr, lock, oparg); \ |
45 | int oldval, newval; | 78 | val = gu.val; \ |
46 | do { | 79 | ret = gu.err; \ |
47 | oldval = asm_ret.val; | ||
48 | newval = oldval ^ n; | ||
49 | asm_ret = futex_cmpxchg(uaddr, oldval, newval); | ||
50 | } while (asm_ret.err == 0 && oldval != asm_ret.val); | ||
51 | } | 80 | } |
52 | return asm_ret; | 81 | |
53 | } | 82 | #define __futex_set() __futex_call(__atomic_xchg) |
83 | #define __futex_add() __futex_call(__atomic_xchg_add) | ||
84 | #define __futex_or() __futex_call(__atomic_or) | ||
85 | #define __futex_andn() __futex_call(__atomic_andn) | ||
86 | #define __futex_xor() __futex_call(__atomic_xor) | ||
87 | |||
88 | #define __futex_cmpxchg() \ | ||
89 | { \ | ||
90 | struct __get_user gu = __atomic_cmpxchg((u32 __force *)uaddr, \ | ||
91 | lock, oldval, oparg); \ | ||
92 | val = gu.val; \ | ||
93 | ret = gu.err; \ | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * Find the lock pointer for the atomic calls to use, and issue a | ||
98 | * prefetch to the user address to bring it into cache. Similar to | ||
99 | * __atomic_setup(), but we can't do a read into the L1 since it might | ||
100 | * fault; instead we do a prefetch into the L2. | ||
101 | */ | ||
102 | #define __futex_prolog() \ | ||
103 | int *lock; \ | ||
104 | __insn_prefetch(uaddr); \ | ||
105 | lock = __atomic_hashed_lock((int __force *)uaddr) | ||
54 | #endif | 106 | #endif |
55 | 107 | ||
56 | static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | 108 | static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) |
@@ -59,8 +111,12 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |||
59 | int cmp = (encoded_op >> 24) & 15; | 111 | int cmp = (encoded_op >> 24) & 15; |
60 | int oparg = (encoded_op << 8) >> 20; | 112 | int oparg = (encoded_op << 8) >> 20; |
61 | int cmparg = (encoded_op << 20) >> 20; | 113 | int cmparg = (encoded_op << 20) >> 20; |
62 | int ret; | 114 | int uninitialized_var(val), ret; |
63 | struct __get_user asm_ret; | 115 | |
116 | __futex_prolog(); | ||
117 | |||
118 | /* The 32-bit futex code makes this assumption, so validate it here. */ | ||
119 | BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int)); | ||
64 | 120 | ||
65 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 121 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
66 | oparg = 1 << oparg; | 122 | oparg = 1 << oparg; |
@@ -71,46 +127,45 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |||
71 | pagefault_disable(); | 127 | pagefault_disable(); |
72 | switch (op) { | 128 | switch (op) { |
73 | case FUTEX_OP_SET: | 129 | case FUTEX_OP_SET: |
74 | asm_ret = futex_set(uaddr, oparg); | 130 | __futex_set(); |
75 | break; | 131 | break; |
76 | case FUTEX_OP_ADD: | 132 | case FUTEX_OP_ADD: |
77 | asm_ret = futex_add(uaddr, oparg); | 133 | __futex_add(); |
78 | break; | 134 | break; |
79 | case FUTEX_OP_OR: | 135 | case FUTEX_OP_OR: |
80 | asm_ret = futex_or(uaddr, oparg); | 136 | __futex_or(); |
81 | break; | 137 | break; |
82 | case FUTEX_OP_ANDN: | 138 | case FUTEX_OP_ANDN: |
83 | asm_ret = futex_andn(uaddr, oparg); | 139 | __futex_andn(); |
84 | break; | 140 | break; |
85 | case FUTEX_OP_XOR: | 141 | case FUTEX_OP_XOR: |
86 | asm_ret = futex_xor(uaddr, oparg); | 142 | __futex_xor(); |
87 | break; | 143 | break; |
88 | default: | 144 | default: |
89 | asm_ret.err = -ENOSYS; | 145 | ret = -ENOSYS; |
146 | break; | ||
90 | } | 147 | } |
91 | pagefault_enable(); | 148 | pagefault_enable(); |
92 | 149 | ||
93 | ret = asm_ret.err; | ||
94 | |||
95 | if (!ret) { | 150 | if (!ret) { |
96 | switch (cmp) { | 151 | switch (cmp) { |
97 | case FUTEX_OP_CMP_EQ: | 152 | case FUTEX_OP_CMP_EQ: |
98 | ret = (asm_ret.val == cmparg); | 153 | ret = (val == cmparg); |
99 | break; | 154 | break; |
100 | case FUTEX_OP_CMP_NE: | 155 | case FUTEX_OP_CMP_NE: |
101 | ret = (asm_ret.val != cmparg); | 156 | ret = (val != cmparg); |
102 | break; | 157 | break; |
103 | case FUTEX_OP_CMP_LT: | 158 | case FUTEX_OP_CMP_LT: |
104 | ret = (asm_ret.val < cmparg); | 159 | ret = (val < cmparg); |
105 | break; | 160 | break; |
106 | case FUTEX_OP_CMP_GE: | 161 | case FUTEX_OP_CMP_GE: |
107 | ret = (asm_ret.val >= cmparg); | 162 | ret = (val >= cmparg); |
108 | break; | 163 | break; |
109 | case FUTEX_OP_CMP_LE: | 164 | case FUTEX_OP_CMP_LE: |
110 | ret = (asm_ret.val <= cmparg); | 165 | ret = (val <= cmparg); |
111 | break; | 166 | break; |
112 | case FUTEX_OP_CMP_GT: | 167 | case FUTEX_OP_CMP_GT: |
113 | ret = (asm_ret.val > cmparg); | 168 | ret = (val > cmparg); |
114 | break; | 169 | break; |
115 | default: | 170 | default: |
116 | ret = -ENOSYS; | 171 | ret = -ENOSYS; |
@@ -120,22 +175,20 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |||
120 | } | 175 | } |
121 | 176 | ||
122 | static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | 177 | static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
123 | u32 oldval, u32 newval) | 178 | u32 oldval, u32 oparg) |
124 | { | 179 | { |
125 | struct __get_user asm_ret; | 180 | int ret, val; |
181 | |||
182 | __futex_prolog(); | ||
126 | 183 | ||
127 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | 184 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
128 | return -EFAULT; | 185 | return -EFAULT; |
129 | 186 | ||
130 | asm_ret = futex_cmpxchg(uaddr, oldval, newval); | 187 | __futex_cmpxchg(); |
131 | *uval = asm_ret.val; | ||
132 | return asm_ret.err; | ||
133 | } | ||
134 | 188 | ||
135 | #ifndef __tilegx__ | 189 | *uval = val; |
136 | /* Return failure from the atomic wrappers. */ | 190 | return ret; |
137 | struct __get_user __atomic_bad_address(int __user *addr); | 191 | } |
138 | #endif | ||
139 | 192 | ||
140 | #endif /* !__ASSEMBLY__ */ | 193 | #endif /* !__ASSEMBLY__ */ |
141 | 194 | ||
diff --git a/arch/tile/include/asm/hardwall.h b/arch/tile/include/asm/hardwall.h index 2ac422848c7d..47514a58d685 100644 --- a/arch/tile/include/asm/hardwall.h +++ b/arch/tile/include/asm/hardwall.h | |||
@@ -11,12 +11,14 @@ | |||
11 | * NON INFRINGEMENT. See the GNU General Public License for | 11 | * NON INFRINGEMENT. See the GNU General Public License for |
12 | * more details. | 12 | * more details. |
13 | * | 13 | * |
14 | * Provide methods for the HARDWALL_FILE for accessing the UDN. | 14 | * Provide methods for access control of per-cpu resources like |
15 | * UDN, IDN, or IPI. | ||
15 | */ | 16 | */ |
16 | 17 | ||
17 | #ifndef _ASM_TILE_HARDWALL_H | 18 | #ifndef _ASM_TILE_HARDWALL_H |
18 | #define _ASM_TILE_HARDWALL_H | 19 | #define _ASM_TILE_HARDWALL_H |
19 | 20 | ||
21 | #include <arch/chip.h> | ||
20 | #include <linux/ioctl.h> | 22 | #include <linux/ioctl.h> |
21 | 23 | ||
22 | #define HARDWALL_IOCTL_BASE 0xa2 | 24 | #define HARDWALL_IOCTL_BASE 0xa2 |
@@ -24,8 +26,9 @@ | |||
24 | /* | 26 | /* |
25 | * The HARDWALL_CREATE() ioctl is a macro with a "size" argument. | 27 | * The HARDWALL_CREATE() ioctl is a macro with a "size" argument. |
26 | * The resulting ioctl value is passed to the kernel in conjunction | 28 | * The resulting ioctl value is passed to the kernel in conjunction |
27 | * with a pointer to a little-endian bitmask of cpus, which must be | 29 | * with a pointer to a standard kernel bitmask of cpus. |
28 | * physically in a rectangular configuration on the chip. | 30 | * For network resources (UDN or IDN) the bitmask must physically |
31 | * represent a rectangular configuration on the chip. | ||
29 | * The "size" is the number of bytes of cpu mask data. | 32 | * The "size" is the number of bytes of cpu mask data. |
30 | */ | 33 | */ |
31 | #define _HARDWALL_CREATE 1 | 34 | #define _HARDWALL_CREATE 1 |
@@ -44,13 +47,7 @@ | |||
44 | #define HARDWALL_GET_ID \ | 47 | #define HARDWALL_GET_ID \ |
45 | _IO(HARDWALL_IOCTL_BASE, _HARDWALL_GET_ID) | 48 | _IO(HARDWALL_IOCTL_BASE, _HARDWALL_GET_ID) |
46 | 49 | ||
47 | #ifndef __KERNEL__ | 50 | #ifdef __KERNEL__ |
48 | |||
49 | /* This is the canonical name expected by userspace. */ | ||
50 | #define HARDWALL_FILE "/dev/hardwall" | ||
51 | |||
52 | #else | ||
53 | |||
54 | /* /proc hooks for hardwall. */ | 51 | /* /proc hooks for hardwall. */ |
55 | struct proc_dir_entry; | 52 | struct proc_dir_entry; |
56 | #ifdef CONFIG_HARDWALL | 53 | #ifdef CONFIG_HARDWALL |
@@ -59,7 +56,6 @@ int proc_pid_hardwall(struct task_struct *task, char *buffer); | |||
59 | #else | 56 | #else |
60 | static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {} | 57 | static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {} |
61 | #endif | 58 | #endif |
62 | |||
63 | #endif | 59 | #endif |
64 | 60 | ||
65 | #endif /* _ASM_TILE_HARDWALL_H */ | 61 | #endif /* _ASM_TILE_HARDWALL_H */ |
diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h index d396d1805163..b2042380a5aa 100644 --- a/arch/tile/include/asm/hugetlb.h +++ b/arch/tile/include/asm/hugetlb.h | |||
@@ -106,4 +106,25 @@ static inline void arch_release_hugepage(struct page *page) | |||
106 | { | 106 | { |
107 | } | 107 | } |
108 | 108 | ||
109 | #ifdef CONFIG_HUGETLB_SUPER_PAGES | ||
110 | static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, | ||
111 | struct page *page, int writable) | ||
112 | { | ||
113 | size_t pagesize = huge_page_size(hstate_vma(vma)); | ||
114 | if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) | ||
115 | entry = pte_mksuper(entry); | ||
116 | return entry; | ||
117 | } | ||
118 | #define arch_make_huge_pte arch_make_huge_pte | ||
119 | |||
120 | /* Sizes to scale up page size for PTEs with HV_PTE_SUPER bit. */ | ||
121 | enum { | ||
122 | HUGE_SHIFT_PGDIR = 0, | ||
123 | HUGE_SHIFT_PMD = 1, | ||
124 | HUGE_SHIFT_PAGE = 2, | ||
125 | HUGE_SHIFT_ENTRIES | ||
126 | }; | ||
127 | extern int huge_shift[HUGE_SHIFT_ENTRIES]; | ||
128 | #endif | ||
129 | |||
109 | #endif /* _ASM_TILE_HUGETLB_H */ | 130 | #endif /* _ASM_TILE_HUGETLB_H */ |
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index 5db0ce54284d..b4e96fef2cf8 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h | |||
@@ -28,10 +28,10 @@ | |||
28 | */ | 28 | */ |
29 | #if CHIP_HAS_AUX_PERF_COUNTERS() | 29 | #if CHIP_HAS_AUX_PERF_COUNTERS() |
30 | #define LINUX_MASKABLE_INTERRUPTS_HI \ | 30 | #define LINUX_MASKABLE_INTERRUPTS_HI \ |
31 | (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT))) | 31 | (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT))) |
32 | #else | 32 | #else |
33 | #define LINUX_MASKABLE_INTERRUPTS_HI \ | 33 | #define LINUX_MASKABLE_INTERRUPTS_HI \ |
34 | (~(INT_MASK_HI(INT_PERF_COUNT))) | 34 | (~(INT_MASK_HI(INT_PERF_COUNT))) |
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | #else | 37 | #else |
@@ -90,6 +90,14 @@ | |||
90 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \ | 90 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \ |
91 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \ | 91 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \ |
92 | } while (0) | 92 | } while (0) |
93 | #define interrupt_mask_save_mask() \ | ||
94 | (__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_0) | \ | ||
95 | (((unsigned long long)__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_1))<<32)) | ||
96 | #define interrupt_mask_restore_mask(mask) do { \ | ||
97 | unsigned long long __m = (mask); \ | ||
98 | __insn_mtspr(SPR_INTERRUPT_MASK_K_0, (unsigned long)(__m)); \ | ||
99 | __insn_mtspr(SPR_INTERRUPT_MASK_K_1, (unsigned long)(__m>>32)); \ | ||
100 | } while (0) | ||
93 | #else | 101 | #else |
94 | #define interrupt_mask_set(n) \ | 102 | #define interrupt_mask_set(n) \ |
95 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n))) | 103 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n))) |
@@ -101,6 +109,10 @@ | |||
101 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask)) | 109 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask)) |
102 | #define interrupt_mask_reset_mask(mask) \ | 110 | #define interrupt_mask_reset_mask(mask) \ |
103 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask)) | 111 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask)) |
112 | #define interrupt_mask_save_mask() \ | ||
113 | __insn_mfspr(SPR_INTERRUPT_MASK_K) | ||
114 | #define interrupt_mask_restore_mask(mask) \ | ||
115 | __insn_mtspr(SPR_INTERRUPT_MASK_K, (mask)) | ||
104 | #endif | 116 | #endif |
105 | 117 | ||
106 | /* | 118 | /* |
@@ -122,7 +134,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
122 | 134 | ||
123 | /* Disable all interrupts, including NMIs. */ | 135 | /* Disable all interrupts, including NMIs. */ |
124 | #define arch_local_irq_disable_all() \ | 136 | #define arch_local_irq_disable_all() \ |
125 | interrupt_mask_set_mask(-1UL) | 137 | interrupt_mask_set_mask(-1ULL) |
126 | 138 | ||
127 | /* Re-enable all maskable interrupts. */ | 139 | /* Re-enable all maskable interrupts. */ |
128 | #define arch_local_irq_enable() \ | 140 | #define arch_local_irq_enable() \ |
@@ -179,7 +191,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
179 | #ifdef __tilegx__ | 191 | #ifdef __tilegx__ |
180 | 192 | ||
181 | #if INT_MEM_ERROR != 0 | 193 | #if INT_MEM_ERROR != 0 |
182 | # error Fix IRQ_DISABLED() macro | 194 | # error Fix IRQS_DISABLED() macro |
183 | #endif | 195 | #endif |
184 | 196 | ||
185 | /* Return 0 or 1 to indicate whether interrupts are currently disabled. */ | 197 | /* Return 0 or 1 to indicate whether interrupts are currently disabled. */ |
@@ -207,9 +219,10 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
207 | mtspr SPR_INTERRUPT_MASK_SET_K, tmp | 219 | mtspr SPR_INTERRUPT_MASK_SET_K, tmp |
208 | 220 | ||
209 | /* Enable interrupts. */ | 221 | /* Enable interrupts. */ |
210 | #define IRQ_ENABLE(tmp0, tmp1) \ | 222 | #define IRQ_ENABLE_LOAD(tmp0, tmp1) \ |
211 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ | 223 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ |
212 | ld tmp0, tmp0; \ | 224 | ld tmp0, tmp0 |
225 | #define IRQ_ENABLE_APPLY(tmp0, tmp1) \ | ||
213 | mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0 | 226 | mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0 |
214 | 227 | ||
215 | #else /* !__tilegx__ */ | 228 | #else /* !__tilegx__ */ |
@@ -253,17 +266,22 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
253 | mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp | 266 | mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp |
254 | 267 | ||
255 | /* Enable interrupts. */ | 268 | /* Enable interrupts. */ |
256 | #define IRQ_ENABLE(tmp0, tmp1) \ | 269 | #define IRQ_ENABLE_LOAD(tmp0, tmp1) \ |
257 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ | 270 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ |
258 | { \ | 271 | { \ |
259 | lw tmp0, tmp0; \ | 272 | lw tmp0, tmp0; \ |
260 | addi tmp1, tmp0, 4 \ | 273 | addi tmp1, tmp0, 4 \ |
261 | }; \ | 274 | }; \ |
262 | lw tmp1, tmp1; \ | 275 | lw tmp1, tmp1 |
276 | #define IRQ_ENABLE_APPLY(tmp0, tmp1) \ | ||
263 | mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \ | 277 | mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \ |
264 | mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1 | 278 | mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1 |
265 | #endif | 279 | #endif |
266 | 280 | ||
281 | #define IRQ_ENABLE(tmp0, tmp1) \ | ||
282 | IRQ_ENABLE_LOAD(tmp0, tmp1); \ | ||
283 | IRQ_ENABLE_APPLY(tmp0, tmp1) | ||
284 | |||
267 | /* | 285 | /* |
268 | * Do the CPU's IRQ-state tracing from assembly code. We call a | 286 | * Do the CPU's IRQ-state tracing from assembly code. We call a |
269 | * C function, but almost everywhere we do, we don't mind clobbering | 287 | * C function, but almost everywhere we do, we don't mind clobbering |
diff --git a/arch/tile/include/asm/kexec.h b/arch/tile/include/asm/kexec.h index c11a6cc73bb8..fc98ccfc98ac 100644 --- a/arch/tile/include/asm/kexec.h +++ b/arch/tile/include/asm/kexec.h | |||
@@ -19,12 +19,24 @@ | |||
19 | 19 | ||
20 | #include <asm/page.h> | 20 | #include <asm/page.h> |
21 | 21 | ||
22 | #ifndef __tilegx__ | ||
22 | /* Maximum physical address we can use pages from. */ | 23 | /* Maximum physical address we can use pages from. */ |
23 | #define KEXEC_SOURCE_MEMORY_LIMIT TASK_SIZE | 24 | #define KEXEC_SOURCE_MEMORY_LIMIT TASK_SIZE |
24 | /* Maximum address we can reach in physical address mode. */ | 25 | /* Maximum address we can reach in physical address mode. */ |
25 | #define KEXEC_DESTINATION_MEMORY_LIMIT TASK_SIZE | 26 | #define KEXEC_DESTINATION_MEMORY_LIMIT TASK_SIZE |
26 | /* Maximum address we can use for the control code buffer. */ | 27 | /* Maximum address we can use for the control code buffer. */ |
27 | #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE | 28 | #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE |
29 | #else | ||
30 | /* We need to limit the memory below PGDIR_SIZE since | ||
31 | * we only setup page table for [0, PGDIR_SIZE) before final kexec. | ||
32 | */ | ||
33 | /* Maximum physical address we can use pages from. */ | ||
34 | #define KEXEC_SOURCE_MEMORY_LIMIT PGDIR_SIZE | ||
35 | /* Maximum address we can reach in physical address mode. */ | ||
36 | #define KEXEC_DESTINATION_MEMORY_LIMIT PGDIR_SIZE | ||
37 | /* Maximum address we can use for the control code buffer. */ | ||
38 | #define KEXEC_CONTROL_MEMORY_LIMIT PGDIR_SIZE | ||
39 | #endif | ||
28 | 40 | ||
29 | #define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE | 41 | #define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE |
30 | 42 | ||
diff --git a/arch/tile/include/asm/mmu.h b/arch/tile/include/asm/mmu.h index 92f94c77b6e4..e2c789096795 100644 --- a/arch/tile/include/asm/mmu.h +++ b/arch/tile/include/asm/mmu.h | |||
@@ -21,7 +21,7 @@ struct mm_context { | |||
21 | * Written under the mmap_sem semaphore; read without the | 21 | * Written under the mmap_sem semaphore; read without the |
22 | * semaphore but atomically, but it is conservatively set. | 22 | * semaphore but atomically, but it is conservatively set. |
23 | */ | 23 | */ |
24 | unsigned int priority_cached; | 24 | unsigned long priority_cached; |
25 | }; | 25 | }; |
26 | 26 | ||
27 | typedef struct mm_context mm_context_t; | 27 | typedef struct mm_context mm_context_t; |
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h index 15fb24641120..37f0b741dee7 100644 --- a/arch/tile/include/asm/mmu_context.h +++ b/arch/tile/include/asm/mmu_context.h | |||
@@ -30,11 +30,15 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
30 | return 0; | 30 | return 0; |
31 | } | 31 | } |
32 | 32 | ||
33 | /* Note that arch/tile/kernel/head.S also calls hv_install_context() */ | 33 | /* |
34 | * Note that arch/tile/kernel/head_NN.S and arch/tile/mm/migrate_NN.S | ||
35 | * also call hv_install_context(). | ||
36 | */ | ||
34 | static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot) | 37 | static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot) |
35 | { | 38 | { |
36 | /* FIXME: DIRECTIO should not always be set. FIXME. */ | 39 | /* FIXME: DIRECTIO should not always be set. FIXME. */ |
37 | int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO); | 40 | int rc = hv_install_context(__pa(pgdir), prot, asid, |
41 | HV_CTX_DIRECTIO | CTX_PAGE_FLAG); | ||
38 | if (rc < 0) | 42 | if (rc < 0) |
39 | panic("hv_install_context failed: %d", rc); | 43 | panic("hv_install_context failed: %d", rc); |
40 | } | 44 | } |
diff --git a/arch/tile/include/asm/module.h b/arch/tile/include/asm/module.h new file mode 100644 index 000000000000..44ed07ccd3d2 --- /dev/null +++ b/arch/tile/include/asm/module.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_MODULE_H | ||
16 | #define _ASM_TILE_MODULE_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | #include <asm-generic/module.h> | ||
21 | |||
22 | /* We can't use modules built with different page sizes. */ | ||
23 | #if defined(CONFIG_PAGE_SIZE_16KB) | ||
24 | # define MODULE_PGSZ " 16KB" | ||
25 | #elif defined(CONFIG_PAGE_SIZE_64KB) | ||
26 | # define MODULE_PGSZ " 64KB" | ||
27 | #else | ||
28 | # define MODULE_PGSZ "" | ||
29 | #endif | ||
30 | |||
31 | /* We don't really support no-SMP so tag if someone tries. */ | ||
32 | #ifdef CONFIG_SMP | ||
33 | #define MODULE_NOSMP "" | ||
34 | #else | ||
35 | #define MODULE_NOSMP " nosmp" | ||
36 | #endif | ||
37 | |||
38 | #define MODULE_ARCH_VERMAGIC CHIP_ARCH_NAME MODULE_PGSZ MODULE_NOSMP | ||
39 | |||
40 | #endif /* _ASM_TILE_MODULE_H */ | ||
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h index db93518fac03..9d9131e5c552 100644 --- a/arch/tile/include/asm/page.h +++ b/arch/tile/include/asm/page.h | |||
@@ -20,8 +20,17 @@ | |||
20 | #include <arch/chip.h> | 20 | #include <arch/chip.h> |
21 | 21 | ||
22 | /* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */ | 22 | /* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */ |
23 | #define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL | 23 | #if defined(CONFIG_PAGE_SIZE_16KB) |
24 | #define HPAGE_SHIFT HV_LOG2_PAGE_SIZE_LARGE | 24 | #define PAGE_SHIFT 14 |
25 | #define CTX_PAGE_FLAG HV_CTX_PG_SM_16K | ||
26 | #elif defined(CONFIG_PAGE_SIZE_64KB) | ||
27 | #define PAGE_SHIFT 16 | ||
28 | #define CTX_PAGE_FLAG HV_CTX_PG_SM_64K | ||
29 | #else | ||
30 | #define PAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_SMALL | ||
31 | #define CTX_PAGE_FLAG 0 | ||
32 | #endif | ||
33 | #define HPAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_LARGE | ||
25 | 34 | ||
26 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) | 35 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) |
27 | #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) | 36 | #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) |
@@ -78,8 +87,7 @@ typedef HV_PTE pgprot_t; | |||
78 | /* | 87 | /* |
79 | * User L2 page tables are managed as one L2 page table per page, | 88 | * User L2 page tables are managed as one L2 page table per page, |
80 | * because we use the page allocator for them. This keeps the allocation | 89 | * because we use the page allocator for them. This keeps the allocation |
81 | * simple and makes it potentially useful to implement HIGHPTE at some point. | 90 | * simple, but it's also inefficient, since L2 page tables are much smaller |
82 | * However, it's also inefficient, since L2 page tables are much smaller | ||
83 | * than pages (currently 2KB vs 64KB). So we should revisit this. | 91 | * than pages (currently 2KB vs 64KB). So we should revisit this. |
84 | */ | 92 | */ |
85 | typedef struct page *pgtable_t; | 93 | typedef struct page *pgtable_t; |
@@ -128,7 +136,7 @@ static inline __attribute_const__ int get_order(unsigned long size) | |||
128 | 136 | ||
129 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | 137 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
130 | 138 | ||
131 | #define HUGE_MAX_HSTATE 2 | 139 | #define HUGE_MAX_HSTATE 6 |
132 | 140 | ||
133 | #ifdef CONFIG_HUGETLB_PAGE | 141 | #ifdef CONFIG_HUGETLB_PAGE |
134 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | 142 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
diff --git a/arch/tile/include/asm/pgalloc.h b/arch/tile/include/asm/pgalloc.h index e919c0bdc22d..1b902508b664 100644 --- a/arch/tile/include/asm/pgalloc.h +++ b/arch/tile/include/asm/pgalloc.h | |||
@@ -19,24 +19,24 @@ | |||
19 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
20 | #include <linux/mmzone.h> | 20 | #include <linux/mmzone.h> |
21 | #include <asm/fixmap.h> | 21 | #include <asm/fixmap.h> |
22 | #include <asm/page.h> | ||
22 | #include <hv/hypervisor.h> | 23 | #include <hv/hypervisor.h> |
23 | 24 | ||
24 | /* Bits for the size of the second-level page table. */ | 25 | /* Bits for the size of the second-level page table. */ |
25 | #define L2_KERNEL_PGTABLE_SHIFT \ | 26 | #define L2_KERNEL_PGTABLE_SHIFT _HV_LOG2_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT) |
26 | (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE) | 27 | |
28 | /* How big is a kernel L2 page table? */ | ||
29 | #define L2_KERNEL_PGTABLE_SIZE (1UL << L2_KERNEL_PGTABLE_SHIFT) | ||
27 | 30 | ||
28 | /* We currently allocate user L2 page tables by page (unlike kernel L2s). */ | 31 | /* We currently allocate user L2 page tables by page (unlike kernel L2s). */ |
29 | #if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL | 32 | #if L2_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT |
30 | #define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL | 33 | #define L2_USER_PGTABLE_SHIFT PAGE_SHIFT |
31 | #else | 34 | #else |
32 | #define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT | 35 | #define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT |
33 | #endif | 36 | #endif |
34 | 37 | ||
35 | /* How many pages do we need, as an "order", for a user L2 page table? */ | 38 | /* How many pages do we need, as an "order", for a user L2 page table? */ |
36 | #define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL) | 39 | #define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - PAGE_SHIFT) |
37 | |||
38 | /* How big is a kernel L2 page table? */ | ||
39 | #define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT) | ||
40 | 40 | ||
41 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) | 41 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
42 | { | 42 | { |
@@ -50,14 +50,14 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) | |||
50 | static inline void pmd_populate_kernel(struct mm_struct *mm, | 50 | static inline void pmd_populate_kernel(struct mm_struct *mm, |
51 | pmd_t *pmd, pte_t *ptep) | 51 | pmd_t *pmd, pte_t *ptep) |
52 | { | 52 | { |
53 | set_pmd(pmd, ptfn_pmd(__pa(ptep) >> HV_LOG2_PAGE_TABLE_ALIGN, | 53 | set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(__pa(ptep)), |
54 | __pgprot(_PAGE_PRESENT))); | 54 | __pgprot(_PAGE_PRESENT))); |
55 | } | 55 | } |
56 | 56 | ||
57 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | 57 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, |
58 | pgtable_t page) | 58 | pgtable_t page) |
59 | { | 59 | { |
60 | set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page)), | 60 | set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))), |
61 | __pgprot(_PAGE_PRESENT))); | 61 | __pgprot(_PAGE_PRESENT))); |
62 | } | 62 | } |
63 | 63 | ||
@@ -68,8 +68,20 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | |||
68 | extern pgd_t *pgd_alloc(struct mm_struct *mm); | 68 | extern pgd_t *pgd_alloc(struct mm_struct *mm); |
69 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); | 69 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); |
70 | 70 | ||
71 | extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address); | 71 | extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address, |
72 | extern void pte_free(struct mm_struct *mm, struct page *pte); | 72 | int order); |
73 | extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order); | ||
74 | |||
75 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm, | ||
76 | unsigned long address) | ||
77 | { | ||
78 | return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER); | ||
79 | } | ||
80 | |||
81 | static inline void pte_free(struct mm_struct *mm, struct page *pte) | ||
82 | { | ||
83 | pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER); | ||
84 | } | ||
73 | 85 | ||
74 | #define pmd_pgtable(pmd) pmd_page(pmd) | 86 | #define pmd_pgtable(pmd) pmd_page(pmd) |
75 | 87 | ||
@@ -85,8 +97,13 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | |||
85 | pte_free(mm, virt_to_page(pte)); | 97 | pte_free(mm, virt_to_page(pte)); |
86 | } | 98 | } |
87 | 99 | ||
88 | extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, | 100 | extern void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte, |
89 | unsigned long address); | 101 | unsigned long address, int order); |
102 | static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, | ||
103 | unsigned long address) | ||
104 | { | ||
105 | __pgtable_free_tlb(tlb, pte, address, L2_USER_PGTABLE_ORDER); | ||
106 | } | ||
90 | 107 | ||
91 | #define check_pgt_cache() do { } while (0) | 108 | #define check_pgt_cache() do { } while (0) |
92 | 109 | ||
@@ -104,19 +121,44 @@ void shatter_pmd(pmd_t *pmd); | |||
104 | void shatter_huge_page(unsigned long addr); | 121 | void shatter_huge_page(unsigned long addr); |
105 | 122 | ||
106 | #ifdef __tilegx__ | 123 | #ifdef __tilegx__ |
107 | /* We share a single page allocator for both L1 and L2 page tables. */ | 124 | |
108 | #if HV_L1_SIZE != HV_L2_SIZE | ||
109 | # error Rework assumption that L1 and L2 page tables are same size. | ||
110 | #endif | ||
111 | #define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER | ||
112 | #define pud_populate(mm, pud, pmd) \ | 125 | #define pud_populate(mm, pud, pmd) \ |
113 | pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd)) | 126 | pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd)) |
114 | #define pmd_alloc_one(mm, addr) \ | 127 | |
115 | ((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr)))) | 128 | /* Bits for the size of the L1 (intermediate) page table. */ |
116 | #define pmd_free(mm, pmdp) \ | 129 | #define L1_KERNEL_PGTABLE_SHIFT _HV_LOG2_L1_SIZE(HPAGE_SHIFT) |
117 | pte_free((mm), virt_to_page(pmdp)) | 130 | |
118 | #define __pmd_free_tlb(tlb, pmdp, address) \ | 131 | /* How big is a kernel L2 page table? */ |
119 | __pte_free_tlb((tlb), virt_to_page(pmdp), (address)) | 132 | #define L1_KERNEL_PGTABLE_SIZE (1UL << L1_KERNEL_PGTABLE_SHIFT) |
133 | |||
134 | /* We currently allocate L1 page tables by page. */ | ||
135 | #if L1_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT | ||
136 | #define L1_USER_PGTABLE_SHIFT PAGE_SHIFT | ||
137 | #else | ||
138 | #define L1_USER_PGTABLE_SHIFT L1_KERNEL_PGTABLE_SHIFT | ||
120 | #endif | 139 | #endif |
121 | 140 | ||
141 | /* How many pages do we need, as an "order", for an L1 page table? */ | ||
142 | #define L1_USER_PGTABLE_ORDER (L1_USER_PGTABLE_SHIFT - PAGE_SHIFT) | ||
143 | |||
144 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | ||
145 | { | ||
146 | struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER); | ||
147 | return (pmd_t *)page_to_virt(p); | ||
148 | } | ||
149 | |||
150 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp) | ||
151 | { | ||
152 | pgtable_free(mm, virt_to_page(pmdp), L1_USER_PGTABLE_ORDER); | ||
153 | } | ||
154 | |||
155 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | ||
156 | unsigned long address) | ||
157 | { | ||
158 | __pgtable_free_tlb(tlb, virt_to_page(pmdp), address, | ||
159 | L1_USER_PGTABLE_ORDER); | ||
160 | } | ||
161 | |||
162 | #endif /* __tilegx__ */ | ||
163 | |||
122 | #endif /* _ASM_TILE_PGALLOC_H */ | 164 | #endif /* _ASM_TILE_PGALLOC_H */ |
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h index 67490910774d..73b1a4c9ad03 100644 --- a/arch/tile/include/asm/pgtable.h +++ b/arch/tile/include/asm/pgtable.h | |||
@@ -27,8 +27,10 @@ | |||
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
29 | #include <linux/spinlock.h> | 29 | #include <linux/spinlock.h> |
30 | #include <linux/pfn.h> | ||
30 | #include <asm/processor.h> | 31 | #include <asm/processor.h> |
31 | #include <asm/fixmap.h> | 32 | #include <asm/fixmap.h> |
33 | #include <asm/page.h> | ||
32 | 34 | ||
33 | struct mm_struct; | 35 | struct mm_struct; |
34 | struct vm_area_struct; | 36 | struct vm_area_struct; |
@@ -69,6 +71,7 @@ extern void set_page_homes(void); | |||
69 | 71 | ||
70 | #define _PAGE_PRESENT HV_PTE_PRESENT | 72 | #define _PAGE_PRESENT HV_PTE_PRESENT |
71 | #define _PAGE_HUGE_PAGE HV_PTE_PAGE | 73 | #define _PAGE_HUGE_PAGE HV_PTE_PAGE |
74 | #define _PAGE_SUPER_PAGE HV_PTE_SUPER | ||
72 | #define _PAGE_READABLE HV_PTE_READABLE | 75 | #define _PAGE_READABLE HV_PTE_READABLE |
73 | #define _PAGE_WRITABLE HV_PTE_WRITABLE | 76 | #define _PAGE_WRITABLE HV_PTE_WRITABLE |
74 | #define _PAGE_EXECUTABLE HV_PTE_EXECUTABLE | 77 | #define _PAGE_EXECUTABLE HV_PTE_EXECUTABLE |
@@ -85,6 +88,7 @@ extern void set_page_homes(void); | |||
85 | #define _PAGE_ALL (\ | 88 | #define _PAGE_ALL (\ |
86 | _PAGE_PRESENT | \ | 89 | _PAGE_PRESENT | \ |
87 | _PAGE_HUGE_PAGE | \ | 90 | _PAGE_HUGE_PAGE | \ |
91 | _PAGE_SUPER_PAGE | \ | ||
88 | _PAGE_READABLE | \ | 92 | _PAGE_READABLE | \ |
89 | _PAGE_WRITABLE | \ | 93 | _PAGE_WRITABLE | \ |
90 | _PAGE_EXECUTABLE | \ | 94 | _PAGE_EXECUTABLE | \ |
@@ -162,7 +166,7 @@ extern void set_page_homes(void); | |||
162 | (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val } | 166 | (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val } |
163 | 167 | ||
164 | /* Just setting the PFN to zero suffices. */ | 168 | /* Just setting the PFN to zero suffices. */ |
165 | #define pte_pgprot(x) hv_pte_set_pfn((x), 0) | 169 | #define pte_pgprot(x) hv_pte_set_pa((x), 0) |
166 | 170 | ||
167 | /* | 171 | /* |
168 | * For PTEs and PDEs, we must clear the Present bit first when | 172 | * For PTEs and PDEs, we must clear the Present bit first when |
@@ -187,6 +191,7 @@ static inline void __pte_clear(pte_t *ptep) | |||
187 | * Undefined behaviour if not.. | 191 | * Undefined behaviour if not.. |
188 | */ | 192 | */ |
189 | #define pte_present hv_pte_get_present | 193 | #define pte_present hv_pte_get_present |
194 | #define pte_mknotpresent hv_pte_clear_present | ||
190 | #define pte_user hv_pte_get_user | 195 | #define pte_user hv_pte_get_user |
191 | #define pte_read hv_pte_get_readable | 196 | #define pte_read hv_pte_get_readable |
192 | #define pte_dirty hv_pte_get_dirty | 197 | #define pte_dirty hv_pte_get_dirty |
@@ -194,6 +199,7 @@ static inline void __pte_clear(pte_t *ptep) | |||
194 | #define pte_write hv_pte_get_writable | 199 | #define pte_write hv_pte_get_writable |
195 | #define pte_exec hv_pte_get_executable | 200 | #define pte_exec hv_pte_get_executable |
196 | #define pte_huge hv_pte_get_page | 201 | #define pte_huge hv_pte_get_page |
202 | #define pte_super hv_pte_get_super | ||
197 | #define pte_rdprotect hv_pte_clear_readable | 203 | #define pte_rdprotect hv_pte_clear_readable |
198 | #define pte_exprotect hv_pte_clear_executable | 204 | #define pte_exprotect hv_pte_clear_executable |
199 | #define pte_mkclean hv_pte_clear_dirty | 205 | #define pte_mkclean hv_pte_clear_dirty |
@@ -206,6 +212,7 @@ static inline void __pte_clear(pte_t *ptep) | |||
206 | #define pte_mkyoung hv_pte_set_accessed | 212 | #define pte_mkyoung hv_pte_set_accessed |
207 | #define pte_mkwrite hv_pte_set_writable | 213 | #define pte_mkwrite hv_pte_set_writable |
208 | #define pte_mkhuge hv_pte_set_page | 214 | #define pte_mkhuge hv_pte_set_page |
215 | #define pte_mksuper hv_pte_set_super | ||
209 | 216 | ||
210 | #define pte_special(pte) 0 | 217 | #define pte_special(pte) 0 |
211 | #define pte_mkspecial(pte) (pte) | 218 | #define pte_mkspecial(pte) (pte) |
@@ -261,7 +268,7 @@ static inline int pte_none(pte_t pte) | |||
261 | 268 | ||
262 | static inline unsigned long pte_pfn(pte_t pte) | 269 | static inline unsigned long pte_pfn(pte_t pte) |
263 | { | 270 | { |
264 | return hv_pte_get_pfn(pte); | 271 | return PFN_DOWN(hv_pte_get_pa(pte)); |
265 | } | 272 | } |
266 | 273 | ||
267 | /* Set or get the remote cache cpu in a pgprot with remote caching. */ | 274 | /* Set or get the remote cache cpu in a pgprot with remote caching. */ |
@@ -270,7 +277,7 @@ extern int get_remote_cache_cpu(pgprot_t prot); | |||
270 | 277 | ||
271 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) | 278 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) |
272 | { | 279 | { |
273 | return hv_pte_set_pfn(prot, pfn); | 280 | return hv_pte_set_pa(prot, PFN_PHYS(pfn)); |
274 | } | 281 | } |
275 | 282 | ||
276 | /* Support for priority mappings. */ | 283 | /* Support for priority mappings. */ |
@@ -312,7 +319,7 @@ extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next); | |||
312 | */ | 319 | */ |
313 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 320 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
314 | { | 321 | { |
315 | return pfn_pte(hv_pte_get_pfn(pte), newprot); | 322 | return pfn_pte(pte_pfn(pte), newprot); |
316 | } | 323 | } |
317 | 324 | ||
318 | /* | 325 | /* |
@@ -335,13 +342,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
335 | */ | 342 | */ |
336 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 343 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
337 | 344 | ||
338 | #if defined(CONFIG_HIGHPTE) | ||
339 | extern pte_t *pte_offset_map(pmd_t *, unsigned long address); | ||
340 | #define pte_unmap(pte) kunmap_atomic(pte) | ||
341 | #else | ||
342 | #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) | 345 | #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) |
343 | #define pte_unmap(pte) do { } while (0) | 346 | #define pte_unmap(pte) do { } while (0) |
344 | #endif | ||
345 | 347 | ||
346 | /* Clear a non-executable kernel PTE and flush it from the TLB. */ | 348 | /* Clear a non-executable kernel PTE and flush it from the TLB. */ |
347 | #define kpte_clear_flush(ptep, vaddr) \ | 349 | #define kpte_clear_flush(ptep, vaddr) \ |
@@ -410,6 +412,46 @@ static inline unsigned long pmd_index(unsigned long address) | |||
410 | return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); | 412 | return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); |
411 | } | 413 | } |
412 | 414 | ||
415 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG | ||
416 | static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, | ||
417 | unsigned long address, | ||
418 | pmd_t *pmdp) | ||
419 | { | ||
420 | return ptep_test_and_clear_young(vma, address, pmdp_ptep(pmdp)); | ||
421 | } | ||
422 | |||
423 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT | ||
424 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, | ||
425 | unsigned long address, pmd_t *pmdp) | ||
426 | { | ||
427 | ptep_set_wrprotect(mm, address, pmdp_ptep(pmdp)); | ||
428 | } | ||
429 | |||
430 | |||
431 | #define __HAVE_ARCH_PMDP_GET_AND_CLEAR | ||
432 | static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, | ||
433 | unsigned long address, | ||
434 | pmd_t *pmdp) | ||
435 | { | ||
436 | return pte_pmd(ptep_get_and_clear(mm, address, pmdp_ptep(pmdp))); | ||
437 | } | ||
438 | |||
439 | static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval) | ||
440 | { | ||
441 | set_pte(pmdp_ptep(pmdp), pmd_pte(pmdval)); | ||
442 | } | ||
443 | |||
444 | #define set_pmd_at(mm, addr, pmdp, pmdval) __set_pmd(pmdp, pmdval) | ||
445 | |||
446 | /* Create a pmd from a PTFN. */ | ||
447 | static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot) | ||
448 | { | ||
449 | return pte_pmd(hv_pte_set_ptfn(prot, ptfn)); | ||
450 | } | ||
451 | |||
452 | /* Return the page-table frame number (ptfn) that a pmd_t points at. */ | ||
453 | #define pmd_ptfn(pmd) hv_pte_get_ptfn(pmd_pte(pmd)) | ||
454 | |||
413 | /* | 455 | /* |
414 | * A given kernel pmd_t maps to a specific virtual address (either a | 456 | * A given kernel pmd_t maps to a specific virtual address (either a |
415 | * kernel huge page or a kernel pte_t table). Since kernel pte_t | 457 | * kernel huge page or a kernel pte_t table). Since kernel pte_t |
@@ -430,7 +472,48 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) | |||
430 | * OK for pte_lockptr(), since we just end up with potentially one | 472 | * OK for pte_lockptr(), since we just end up with potentially one |
431 | * lock being used for several pte_t arrays. | 473 | * lock being used for several pte_t arrays. |
432 | */ | 474 | */ |
433 | #define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd))) | 475 | #define pmd_page(pmd) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pmd_ptfn(pmd)))) |
476 | |||
477 | static inline void pmd_clear(pmd_t *pmdp) | ||
478 | { | ||
479 | __pte_clear(pmdp_ptep(pmdp)); | ||
480 | } | ||
481 | |||
482 | #define pmd_mknotpresent(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd))) | ||
483 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) | ||
484 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) | ||
485 | #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) | ||
486 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) | ||
487 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) | ||
488 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) | ||
489 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) | ||
490 | #define pmd_huge_page(pmd) pte_huge(pmd_pte(pmd)) | ||
491 | #define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd))) | ||
492 | #define __HAVE_ARCH_PMD_WRITE | ||
493 | |||
494 | #define pfn_pmd(pfn, pgprot) pte_pmd(pfn_pte((pfn), (pgprot))) | ||
495 | #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) | ||
496 | #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) | ||
497 | |||
498 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | ||
499 | { | ||
500 | return pfn_pmd(pmd_pfn(pmd), newprot); | ||
501 | } | ||
502 | |||
503 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
504 | #define has_transparent_hugepage() 1 | ||
505 | #define pmd_trans_huge pmd_huge_page | ||
506 | |||
507 | static inline pmd_t pmd_mksplitting(pmd_t pmd) | ||
508 | { | ||
509 | return pte_pmd(hv_pte_set_client2(pmd_pte(pmd))); | ||
510 | } | ||
511 | |||
512 | static inline int pmd_trans_splitting(pmd_t pmd) | ||
513 | { | ||
514 | return hv_pte_get_client2(pmd_pte(pmd)); | ||
515 | } | ||
516 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
434 | 517 | ||
435 | /* | 518 | /* |
436 | * The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | 519 | * The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] |
@@ -448,17 +531,13 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) | |||
448 | return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); | 531 | return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); |
449 | } | 532 | } |
450 | 533 | ||
451 | static inline int pmd_huge_page(pmd_t pmd) | ||
452 | { | ||
453 | return pmd_val(pmd) & _PAGE_HUGE_PAGE; | ||
454 | } | ||
455 | |||
456 | #include <asm-generic/pgtable.h> | 534 | #include <asm-generic/pgtable.h> |
457 | 535 | ||
458 | /* Support /proc/NN/pgtable API. */ | 536 | /* Support /proc/NN/pgtable API. */ |
459 | struct seq_file; | 537 | struct seq_file; |
460 | int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm, | 538 | int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm, |
461 | unsigned long vaddr, pte_t *ptep, void **datap); | 539 | unsigned long vaddr, unsigned long pagesize, |
540 | pte_t *ptep, void **datap); | ||
462 | 541 | ||
463 | #endif /* !__ASSEMBLY__ */ | 542 | #endif /* !__ASSEMBLY__ */ |
464 | 543 | ||
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h index 9f98529761fd..4ce4a7a99c24 100644 --- a/arch/tile/include/asm/pgtable_32.h +++ b/arch/tile/include/asm/pgtable_32.h | |||
@@ -20,11 +20,12 @@ | |||
20 | * The level-1 index is defined by the huge page size. A PGD is composed | 20 | * The level-1 index is defined by the huge page size. A PGD is composed |
21 | * of PTRS_PER_PGD pgd_t's and is the top level of the page table. | 21 | * of PTRS_PER_PGD pgd_t's and is the top level of the page table. |
22 | */ | 22 | */ |
23 | #define PGDIR_SHIFT HV_LOG2_PAGE_SIZE_LARGE | 23 | #define PGDIR_SHIFT HPAGE_SHIFT |
24 | #define PGDIR_SIZE HV_PAGE_SIZE_LARGE | 24 | #define PGDIR_SIZE HPAGE_SIZE |
25 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 25 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
26 | #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) | 26 | #define PTRS_PER_PGD _HV_L1_ENTRIES(HPAGE_SHIFT) |
27 | #define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t)) | 27 | #define PGD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT) |
28 | #define SIZEOF_PGD _HV_L1_SIZE(HPAGE_SHIFT) | ||
28 | 29 | ||
29 | /* | 30 | /* |
30 | * The level-2 index is defined by the difference between the huge | 31 | * The level-2 index is defined by the difference between the huge |
@@ -33,8 +34,9 @@ | |||
33 | * Note that the hypervisor docs use PTE for what we call pte_t, so | 34 | * Note that the hypervisor docs use PTE for what we call pte_t, so |
34 | * this nomenclature is somewhat confusing. | 35 | * this nomenclature is somewhat confusing. |
35 | */ | 36 | */ |
36 | #define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)) | 37 | #define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT) |
37 | #define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t)) | 38 | #define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT) |
39 | #define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT) | ||
38 | 40 | ||
39 | #ifndef __ASSEMBLY__ | 41 | #ifndef __ASSEMBLY__ |
40 | 42 | ||
@@ -111,24 +113,14 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |||
111 | return pte; | 113 | return pte; |
112 | } | 114 | } |
113 | 115 | ||
114 | static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval) | 116 | /* |
115 | { | 117 | * pmds are wrappers around pgds, which are the same as ptes. |
116 | set_pte(&pmdp->pud.pgd, pmdval.pud.pgd); | 118 | * It's often convenient to "cast" back and forth and use the pte methods, |
117 | } | 119 | * which are the methods supplied by the hypervisor. |
118 | 120 | */ | |
119 | /* Create a pmd from a PTFN. */ | 121 | #define pmd_pte(pmd) ((pmd).pud.pgd) |
120 | static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot) | 122 | #define pmdp_ptep(pmdp) (&(pmdp)->pud.pgd) |
121 | { | 123 | #define pte_pmd(pte) ((pmd_t){ { (pte) } }) |
122 | return (pmd_t){ { hv_pte_set_ptfn(prot, ptfn) } }; | ||
123 | } | ||
124 | |||
125 | /* Return the page-table frame number (ptfn) that a pmd_t points at. */ | ||
126 | #define pmd_ptfn(pmd) hv_pte_get_ptfn((pmd).pud.pgd) | ||
127 | |||
128 | static inline void pmd_clear(pmd_t *pmdp) | ||
129 | { | ||
130 | __pte_clear(&pmdp->pud.pgd); | ||
131 | } | ||
132 | 124 | ||
133 | #endif /* __ASSEMBLY__ */ | 125 | #endif /* __ASSEMBLY__ */ |
134 | 126 | ||
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h index fd80328523b4..2492fa5478e7 100644 --- a/arch/tile/include/asm/pgtable_64.h +++ b/arch/tile/include/asm/pgtable_64.h | |||
@@ -21,17 +21,19 @@ | |||
21 | #define PGDIR_SIZE HV_L1_SPAN | 21 | #define PGDIR_SIZE HV_L1_SPAN |
22 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 22 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
23 | #define PTRS_PER_PGD HV_L0_ENTRIES | 23 | #define PTRS_PER_PGD HV_L0_ENTRIES |
24 | #define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t)) | 24 | #define PGD_INDEX(va) HV_L0_INDEX(va) |
25 | #define SIZEOF_PGD HV_L0_SIZE | ||
25 | 26 | ||
26 | /* | 27 | /* |
27 | * The level-1 index is defined by the huge page size. A PMD is composed | 28 | * The level-1 index is defined by the huge page size. A PMD is composed |
28 | * of PTRS_PER_PMD pgd_t's and is the middle level of the page table. | 29 | * of PTRS_PER_PMD pgd_t's and is the middle level of the page table. |
29 | */ | 30 | */ |
30 | #define PMD_SHIFT HV_LOG2_PAGE_SIZE_LARGE | 31 | #define PMD_SHIFT HPAGE_SHIFT |
31 | #define PMD_SIZE HV_PAGE_SIZE_LARGE | 32 | #define PMD_SIZE HPAGE_SIZE |
32 | #define PMD_MASK (~(PMD_SIZE-1)) | 33 | #define PMD_MASK (~(PMD_SIZE-1)) |
33 | #define PTRS_PER_PMD (1 << (PGDIR_SHIFT - PMD_SHIFT)) | 34 | #define PTRS_PER_PMD _HV_L1_ENTRIES(HPAGE_SHIFT) |
34 | #define SIZEOF_PMD (PTRS_PER_PMD * sizeof(pmd_t)) | 35 | #define PMD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT) |
36 | #define SIZEOF_PMD _HV_L1_SIZE(HPAGE_SHIFT) | ||
35 | 37 | ||
36 | /* | 38 | /* |
37 | * The level-2 index is defined by the difference between the huge | 39 | * The level-2 index is defined by the difference between the huge |
@@ -40,17 +42,19 @@ | |||
40 | * Note that the hypervisor docs use PTE for what we call pte_t, so | 42 | * Note that the hypervisor docs use PTE for what we call pte_t, so |
41 | * this nomenclature is somewhat confusing. | 43 | * this nomenclature is somewhat confusing. |
42 | */ | 44 | */ |
43 | #define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)) | 45 | #define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT) |
44 | #define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t)) | 46 | #define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT) |
47 | #define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT) | ||
45 | 48 | ||
46 | /* | 49 | /* |
47 | * Align the vmalloc area to an L2 page table, and leave a guard page | 50 | * Align the vmalloc area to an L2 page table. Omit guard pages at |
48 | * at the beginning and end. The vmalloc code also puts in an internal | 51 | * the beginning and end for simplicity (particularly in the per-cpu |
52 | * memory allocation code). The vmalloc code puts in an internal | ||
49 | * guard page between each allocation. | 53 | * guard page between each allocation. |
50 | */ | 54 | */ |
51 | #define _VMALLOC_END HUGE_VMAP_BASE | 55 | #define _VMALLOC_END HUGE_VMAP_BASE |
52 | #define VMALLOC_END (_VMALLOC_END - PAGE_SIZE) | 56 | #define VMALLOC_END _VMALLOC_END |
53 | #define VMALLOC_START (_VMALLOC_START + PAGE_SIZE) | 57 | #define VMALLOC_START _VMALLOC_START |
54 | 58 | ||
55 | #define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE) | 59 | #define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE) |
56 | 60 | ||
@@ -98,7 +102,7 @@ static inline int pud_bad(pud_t pud) | |||
98 | * A pud_t points to a pmd_t array. Since we can have multiple per | 102 | * A pud_t points to a pmd_t array. Since we can have multiple per |
99 | * page, we don't have a one-to-one mapping of pud_t's to pages. | 103 | * page, we don't have a one-to-one mapping of pud_t's to pages. |
100 | */ | 104 | */ |
101 | #define pud_page(pud) pfn_to_page(HV_PTFN_TO_PFN(pud_ptfn(pud))) | 105 | #define pud_page(pud) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pud_ptfn(pud)))) |
102 | 106 | ||
103 | static inline unsigned long pud_index(unsigned long address) | 107 | static inline unsigned long pud_index(unsigned long address) |
104 | { | 108 | { |
@@ -108,28 +112,6 @@ static inline unsigned long pud_index(unsigned long address) | |||
108 | #define pmd_offset(pud, address) \ | 112 | #define pmd_offset(pud, address) \ |
109 | ((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address)) | 113 | ((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address)) |
110 | 114 | ||
111 | static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval) | ||
112 | { | ||
113 | set_pte(pmdp, pmdval); | ||
114 | } | ||
115 | |||
116 | /* Create a pmd from a PTFN and pgprot. */ | ||
117 | static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot) | ||
118 | { | ||
119 | return hv_pte_set_ptfn(prot, ptfn); | ||
120 | } | ||
121 | |||
122 | /* Return the page-table frame number (ptfn) that a pmd_t points at. */ | ||
123 | static inline unsigned long pmd_ptfn(pmd_t pmd) | ||
124 | { | ||
125 | return hv_pte_get_ptfn(pmd); | ||
126 | } | ||
127 | |||
128 | static inline void pmd_clear(pmd_t *pmdp) | ||
129 | { | ||
130 | __pte_clear(pmdp); | ||
131 | } | ||
132 | |||
133 | /* Normalize an address to having the correct high bits set. */ | 115 | /* Normalize an address to having the correct high bits set. */ |
134 | #define pgd_addr_normalize pgd_addr_normalize | 116 | #define pgd_addr_normalize pgd_addr_normalize |
135 | static inline unsigned long pgd_addr_normalize(unsigned long addr) | 117 | static inline unsigned long pgd_addr_normalize(unsigned long addr) |
@@ -170,6 +152,13 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |||
170 | return hv_pte(__insn_exch(&ptep->val, 0UL)); | 152 | return hv_pte(__insn_exch(&ptep->val, 0UL)); |
171 | } | 153 | } |
172 | 154 | ||
155 | /* | ||
156 | * pmds are the same as pgds and ptes, so converting is a no-op. | ||
157 | */ | ||
158 | #define pmd_pte(pmd) (pmd) | ||
159 | #define pmdp_ptep(pmdp) (pmdp) | ||
160 | #define pte_pmd(pte) (pte) | ||
161 | |||
173 | #endif /* __ASSEMBLY__ */ | 162 | #endif /* __ASSEMBLY__ */ |
174 | 163 | ||
175 | #endif /* _ASM_TILE_PGTABLE_64_H */ | 164 | #endif /* _ASM_TILE_PGTABLE_64_H */ |
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h index 15cd8a4a06ce..8c4dd9ff91eb 100644 --- a/arch/tile/include/asm/processor.h +++ b/arch/tile/include/asm/processor.h | |||
@@ -76,6 +76,17 @@ struct async_tlb { | |||
76 | 76 | ||
77 | #ifdef CONFIG_HARDWALL | 77 | #ifdef CONFIG_HARDWALL |
78 | struct hardwall_info; | 78 | struct hardwall_info; |
79 | struct hardwall_task { | ||
80 | /* Which hardwall is this task tied to? (or NULL if none) */ | ||
81 | struct hardwall_info *info; | ||
82 | /* Chains this task into the list at info->task_head. */ | ||
83 | struct list_head list; | ||
84 | }; | ||
85 | #ifdef __tilepro__ | ||
86 | #define HARDWALL_TYPES 1 /* udn */ | ||
87 | #else | ||
88 | #define HARDWALL_TYPES 3 /* udn, idn, and ipi */ | ||
89 | #endif | ||
79 | #endif | 90 | #endif |
80 | 91 | ||
81 | struct thread_struct { | 92 | struct thread_struct { |
@@ -116,10 +127,8 @@ struct thread_struct { | |||
116 | unsigned long dstream_pf; | 127 | unsigned long dstream_pf; |
117 | #endif | 128 | #endif |
118 | #ifdef CONFIG_HARDWALL | 129 | #ifdef CONFIG_HARDWALL |
119 | /* Is this task tied to an activated hardwall? */ | 130 | /* Hardwall information for various resources. */ |
120 | struct hardwall_info *hardwall; | 131 | struct hardwall_task hardwall[HARDWALL_TYPES]; |
121 | /* Chains this task into the list at hardwall->list. */ | ||
122 | struct list_head hardwall_list; | ||
123 | #endif | 132 | #endif |
124 | #if CHIP_HAS_TILE_DMA() | 133 | #if CHIP_HAS_TILE_DMA() |
125 | /* Async DMA TLB fault information */ | 134 | /* Async DMA TLB fault information */ |
diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h index e58613e0752f..c67eb70ea78e 100644 --- a/arch/tile/include/asm/setup.h +++ b/arch/tile/include/asm/setup.h | |||
@@ -41,15 +41,15 @@ void restrict_dma_mpls(void); | |||
41 | #ifdef CONFIG_HARDWALL | 41 | #ifdef CONFIG_HARDWALL |
42 | /* User-level network management functions */ | 42 | /* User-level network management functions */ |
43 | void reset_network_state(void); | 43 | void reset_network_state(void); |
44 | void grant_network_mpls(void); | ||
45 | void restrict_network_mpls(void); | ||
46 | struct task_struct; | 44 | struct task_struct; |
47 | int hardwall_deactivate(struct task_struct *task); | 45 | void hardwall_switch_tasks(struct task_struct *prev, struct task_struct *next); |
46 | void hardwall_deactivate_all(struct task_struct *task); | ||
47 | int hardwall_ipi_valid(int cpu); | ||
48 | 48 | ||
49 | /* Hook hardwall code into changes in affinity. */ | 49 | /* Hook hardwall code into changes in affinity. */ |
50 | #define arch_set_cpus_allowed(p, new_mask) do { \ | 50 | #define arch_set_cpus_allowed(p, new_mask) do { \ |
51 | if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \ | 51 | if (!cpumask_equal(&p->cpus_allowed, new_mask)) \ |
52 | hardwall_deactivate(p); \ | 52 | hardwall_deactivate_all(p); \ |
53 | } while (0) | 53 | } while (0) |
54 | #endif | 54 | #endif |
55 | 55 | ||
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h index 3b5507c31eae..06f0464cfed9 100644 --- a/arch/tile/include/asm/syscalls.h +++ b/arch/tile/include/asm/syscalls.h | |||
@@ -43,7 +43,8 @@ long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi, | |||
43 | u32 len, int advice); | 43 | u32 len, int advice); |
44 | int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi, | 44 | int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi, |
45 | u32 len_lo, u32 len_hi, int advice); | 45 | u32 len_lo, u32 len_hi, int advice); |
46 | long sys_flush_cache(void); | 46 | long sys_cacheflush(unsigned long addr, unsigned long len, |
47 | unsigned long flags); | ||
47 | #ifndef __tilegx__ /* No mmap() in the 32-bit kernel. */ | 48 | #ifndef __tilegx__ /* No mmap() in the 32-bit kernel. */ |
48 | #define sys_mmap sys_mmap | 49 | #define sys_mmap sys_mmap |
49 | #endif | 50 | #endif |
diff --git a/arch/tile/include/asm/tlbflush.h b/arch/tile/include/asm/tlbflush.h index 96199d214fb8..dcf91b25a1e5 100644 --- a/arch/tile/include/asm/tlbflush.h +++ b/arch/tile/include/asm/tlbflush.h | |||
@@ -38,16 +38,11 @@ DECLARE_PER_CPU(int, current_asid); | |||
38 | /* The hypervisor tells us what ASIDs are available to us. */ | 38 | /* The hypervisor tells us what ASIDs are available to us. */ |
39 | extern int min_asid, max_asid; | 39 | extern int min_asid, max_asid; |
40 | 40 | ||
41 | static inline unsigned long hv_page_size(const struct vm_area_struct *vma) | ||
42 | { | ||
43 | return (vma->vm_flags & VM_HUGETLB) ? HPAGE_SIZE : PAGE_SIZE; | ||
44 | } | ||
45 | |||
46 | /* Pass as vma pointer for non-executable mapping, if no vma available. */ | 41 | /* Pass as vma pointer for non-executable mapping, if no vma available. */ |
47 | #define FLUSH_NONEXEC ((const struct vm_area_struct *)-1UL) | 42 | #define FLUSH_NONEXEC ((struct vm_area_struct *)-1UL) |
48 | 43 | ||
49 | /* Flush a single user page on this cpu. */ | 44 | /* Flush a single user page on this cpu. */ |
50 | static inline void local_flush_tlb_page(const struct vm_area_struct *vma, | 45 | static inline void local_flush_tlb_page(struct vm_area_struct *vma, |
51 | unsigned long addr, | 46 | unsigned long addr, |
52 | unsigned long page_size) | 47 | unsigned long page_size) |
53 | { | 48 | { |
@@ -60,7 +55,7 @@ static inline void local_flush_tlb_page(const struct vm_area_struct *vma, | |||
60 | } | 55 | } |
61 | 56 | ||
62 | /* Flush range of user pages on this cpu. */ | 57 | /* Flush range of user pages on this cpu. */ |
63 | static inline void local_flush_tlb_pages(const struct vm_area_struct *vma, | 58 | static inline void local_flush_tlb_pages(struct vm_area_struct *vma, |
64 | unsigned long addr, | 59 | unsigned long addr, |
65 | unsigned long page_size, | 60 | unsigned long page_size, |
66 | unsigned long len) | 61 | unsigned long len) |
@@ -117,10 +112,10 @@ extern void flush_tlb_all(void); | |||
117 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | 112 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
118 | extern void flush_tlb_current_task(void); | 113 | extern void flush_tlb_current_task(void); |
119 | extern void flush_tlb_mm(struct mm_struct *); | 114 | extern void flush_tlb_mm(struct mm_struct *); |
120 | extern void flush_tlb_page(const struct vm_area_struct *, unsigned long); | 115 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); |
121 | extern void flush_tlb_page_mm(const struct vm_area_struct *, | 116 | extern void flush_tlb_page_mm(struct vm_area_struct *, |
122 | struct mm_struct *, unsigned long); | 117 | struct mm_struct *, unsigned long); |
123 | extern void flush_tlb_range(const struct vm_area_struct *, | 118 | extern void flush_tlb_range(struct vm_area_struct *, |
124 | unsigned long start, unsigned long end); | 119 | unsigned long start, unsigned long end); |
125 | 120 | ||
126 | #define flush_tlb() flush_tlb_current_task() | 121 | #define flush_tlb() flush_tlb_current_task() |
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h index ef34d2caa5b1..c3dd275f25e2 100644 --- a/arch/tile/include/asm/uaccess.h +++ b/arch/tile/include/asm/uaccess.h | |||
@@ -114,45 +114,75 @@ struct exception_table_entry { | |||
114 | extern int fixup_exception(struct pt_regs *regs); | 114 | extern int fixup_exception(struct pt_regs *regs); |
115 | 115 | ||
116 | /* | 116 | /* |
117 | * We return the __get_user_N function results in a structure, | 117 | * Support macros for __get_user(). |
118 | * thus in r0 and r1. If "err" is zero, "val" is the result | 118 | * |
119 | * of the read; otherwise, "err" is -EFAULT. | 119 | * Implementation note: The "case 8" logic of casting to the type of |
120 | * | 120 | * the result of subtracting the value from itself is basically a way |
121 | * We rarely need 8-byte values on a 32-bit architecture, but | 121 | * of keeping all integer types the same, but casting any pointers to |
122 | * we size the structure to accommodate. In practice, for the | 122 | * ptrdiff_t, i.e. also an integer type. This way there are no |
123 | * the smaller reads, we can zero the high word for free, and | 123 | * questionable casts seen by the compiler on an ILP32 platform. |
124 | * the caller will ignore it by virtue of casting anyway. | 124 | * |
125 | * Note that __get_user() and __put_user() assume proper alignment. | ||
125 | */ | 126 | */ |
126 | struct __get_user { | ||
127 | unsigned long long val; | ||
128 | int err; | ||
129 | }; | ||
130 | 127 | ||
131 | /* | 128 | #ifdef __LP64__ |
132 | * FIXME: we should express these as inline extended assembler, since | 129 | #define _ASM_PTR ".quad" |
133 | * they're fundamentally just a variable dereference and some | 130 | #else |
134 | * supporting exception_table gunk. Note that (a la i386) we can | 131 | #define _ASM_PTR ".long" |
135 | * extend the copy_to_user and copy_from_user routines to call into | 132 | #endif |
136 | * such extended assembler routines, though we will have to use a | 133 | |
137 | * different return code in that case (1, 2, or 4, rather than -EFAULT). | 134 | #define __get_user_asm(OP, x, ptr, ret) \ |
138 | */ | 135 | asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n" \ |
139 | extern struct __get_user __get_user_1(const void __user *); | 136 | ".pushsection .fixup,\"ax\"\n" \ |
140 | extern struct __get_user __get_user_2(const void __user *); | 137 | "0: { movei %1, 0; movei %0, %3 }\n" \ |
141 | extern struct __get_user __get_user_4(const void __user *); | 138 | "j 9f\n" \ |
142 | extern struct __get_user __get_user_8(const void __user *); | 139 | ".section __ex_table,\"a\"\n" \ |
143 | extern int __put_user_1(long, void __user *); | 140 | _ASM_PTR " 1b, 0b\n" \ |
144 | extern int __put_user_2(long, void __user *); | 141 | ".popsection\n" \ |
145 | extern int __put_user_4(long, void __user *); | 142 | "9:" \ |
146 | extern int __put_user_8(long long, void __user *); | 143 | : "=r" (ret), "=r" (x) \ |
147 | 144 | : "r" (ptr), "i" (-EFAULT)) | |
148 | /* Unimplemented routines to cause linker failures */ | 145 | |
149 | extern struct __get_user __get_user_bad(void); | 146 | #ifdef __tilegx__ |
150 | extern int __put_user_bad(void); | 147 | #define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret) |
148 | #define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret) | ||
149 | #define __get_user_4(x, ptr, ret) __get_user_asm(ld4u, x, ptr, ret) | ||
150 | #define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret) | ||
151 | #else | ||
152 | #define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret) | ||
153 | #define __get_user_2(x, ptr, ret) __get_user_asm(lh_u, x, ptr, ret) | ||
154 | #define __get_user_4(x, ptr, ret) __get_user_asm(lw, x, ptr, ret) | ||
155 | #ifdef __LITTLE_ENDIAN | ||
156 | #define __lo32(a, b) a | ||
157 | #define __hi32(a, b) b | ||
158 | #else | ||
159 | #define __lo32(a, b) b | ||
160 | #define __hi32(a, b) a | ||
161 | #endif | ||
162 | #define __get_user_8(x, ptr, ret) \ | ||
163 | ({ \ | ||
164 | unsigned int __a, __b; \ | ||
165 | asm volatile("1: { lw %1, %3; addi %2, %3, 4 }\n" \ | ||
166 | "2: { lw %2, %2; movei %0, 0 }\n" \ | ||
167 | ".pushsection .fixup,\"ax\"\n" \ | ||
168 | "0: { movei %1, 0; movei %2, 0 }\n" \ | ||
169 | "{ movei %0, %4; j 9f }\n" \ | ||
170 | ".section __ex_table,\"a\"\n" \ | ||
171 | ".word 1b, 0b\n" \ | ||
172 | ".word 2b, 0b\n" \ | ||
173 | ".popsection\n" \ | ||
174 | "9:" \ | ||
175 | : "=r" (ret), "=r" (__a), "=&r" (__b) \ | ||
176 | : "r" (ptr), "i" (-EFAULT)); \ | ||
177 | (x) = (__typeof(x))(__typeof((x)-(x))) \ | ||
178 | (((u64)__hi32(__a, __b) << 32) | \ | ||
179 | __lo32(__a, __b)); \ | ||
180 | }) | ||
181 | #endif | ||
182 | |||
183 | extern int __get_user_bad(void) | ||
184 | __attribute__((warning("sizeof __get_user argument not 1, 2, 4 or 8"))); | ||
151 | 185 | ||
152 | /* | ||
153 | * Careful: we have to cast the result to the type of the pointer | ||
154 | * for sign reasons. | ||
155 | */ | ||
156 | /** | 186 | /** |
157 | * __get_user: - Get a simple variable from user space, with less checking. | 187 | * __get_user: - Get a simple variable from user space, with less checking. |
158 | * @x: Variable to store result. | 188 | * @x: Variable to store result. |
@@ -174,30 +204,62 @@ extern int __put_user_bad(void); | |||
174 | * function. | 204 | * function. |
175 | */ | 205 | */ |
176 | #define __get_user(x, ptr) \ | 206 | #define __get_user(x, ptr) \ |
177 | ({ struct __get_user __ret; \ | 207 | ({ \ |
178 | __typeof__(*(ptr)) const __user *__gu_addr = (ptr); \ | 208 | int __ret; \ |
179 | __chk_user_ptr(__gu_addr); \ | 209 | __chk_user_ptr(ptr); \ |
180 | switch (sizeof(*(__gu_addr))) { \ | 210 | switch (sizeof(*(ptr))) { \ |
181 | case 1: \ | 211 | case 1: __get_user_1(x, ptr, __ret); break; \ |
182 | __ret = __get_user_1(__gu_addr); \ | 212 | case 2: __get_user_2(x, ptr, __ret); break; \ |
183 | break; \ | 213 | case 4: __get_user_4(x, ptr, __ret); break; \ |
184 | case 2: \ | 214 | case 8: __get_user_8(x, ptr, __ret); break; \ |
185 | __ret = __get_user_2(__gu_addr); \ | 215 | default: __ret = __get_user_bad(); break; \ |
186 | break; \ | 216 | } \ |
187 | case 4: \ | 217 | __ret; \ |
188 | __ret = __get_user_4(__gu_addr); \ | 218 | }) |
189 | break; \ | 219 | |
190 | case 8: \ | 220 | /* Support macros for __put_user(). */ |
191 | __ret = __get_user_8(__gu_addr); \ | 221 | |
192 | break; \ | 222 | #define __put_user_asm(OP, x, ptr, ret) \ |
193 | default: \ | 223 | asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n" \ |
194 | __ret = __get_user_bad(); \ | 224 | ".pushsection .fixup,\"ax\"\n" \ |
195 | break; \ | 225 | "0: { movei %0, %3; j 9f }\n" \ |
196 | } \ | 226 | ".section __ex_table,\"a\"\n" \ |
197 | (x) = (__typeof__(*__gu_addr)) (__typeof__(*__gu_addr - *__gu_addr)) \ | 227 | _ASM_PTR " 1b, 0b\n" \ |
198 | __ret.val; \ | 228 | ".popsection\n" \ |
199 | __ret.err; \ | 229 | "9:" \ |
200 | }) | 230 | : "=r" (ret) \ |
231 | : "r" (ptr), "r" (x), "i" (-EFAULT)) | ||
232 | |||
233 | #ifdef __tilegx__ | ||
234 | #define __put_user_1(x, ptr, ret) __put_user_asm(st1, x, ptr, ret) | ||
235 | #define __put_user_2(x, ptr, ret) __put_user_asm(st2, x, ptr, ret) | ||
236 | #define __put_user_4(x, ptr, ret) __put_user_asm(st4, x, ptr, ret) | ||
237 | #define __put_user_8(x, ptr, ret) __put_user_asm(st, x, ptr, ret) | ||
238 | #else | ||
239 | #define __put_user_1(x, ptr, ret) __put_user_asm(sb, x, ptr, ret) | ||
240 | #define __put_user_2(x, ptr, ret) __put_user_asm(sh, x, ptr, ret) | ||
241 | #define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret) | ||
242 | #define __put_user_8(x, ptr, ret) \ | ||
243 | ({ \ | ||
244 | u64 __x = (__typeof((x)-(x)))(x); \ | ||
245 | int __lo = (int) __x, __hi = (int) (__x >> 32); \ | ||
246 | asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n" \ | ||
247 | "2: { sw %0, %3; movei %0, 0 }\n" \ | ||
248 | ".pushsection .fixup,\"ax\"\n" \ | ||
249 | "0: { movei %0, %4; j 9f }\n" \ | ||
250 | ".section __ex_table,\"a\"\n" \ | ||
251 | ".word 1b, 0b\n" \ | ||
252 | ".word 2b, 0b\n" \ | ||
253 | ".popsection\n" \ | ||
254 | "9:" \ | ||
255 | : "=&r" (ret) \ | ||
256 | : "r" (ptr), "r" (__lo32(__lo, __hi)), \ | ||
257 | "r" (__hi32(__lo, __hi)), "i" (-EFAULT)); \ | ||
258 | }) | ||
259 | #endif | ||
260 | |||
261 | extern int __put_user_bad(void) | ||
262 | __attribute__((warning("sizeof __put_user argument not 1, 2, 4 or 8"))); | ||
201 | 263 | ||
202 | /** | 264 | /** |
203 | * __put_user: - Write a simple value into user space, with less checking. | 265 | * __put_user: - Write a simple value into user space, with less checking. |
@@ -217,39 +279,19 @@ extern int __put_user_bad(void); | |||
217 | * function. | 279 | * function. |
218 | * | 280 | * |
219 | * Returns zero on success, or -EFAULT on error. | 281 | * Returns zero on success, or -EFAULT on error. |
220 | * | ||
221 | * Implementation note: The "case 8" logic of casting to the type of | ||
222 | * the result of subtracting the value from itself is basically a way | ||
223 | * of keeping all integer types the same, but casting any pointers to | ||
224 | * ptrdiff_t, i.e. also an integer type. This way there are no | ||
225 | * questionable casts seen by the compiler on an ILP32 platform. | ||
226 | */ | 282 | */ |
227 | #define __put_user(x, ptr) \ | 283 | #define __put_user(x, ptr) \ |
228 | ({ \ | 284 | ({ \ |
229 | int __pu_err = 0; \ | 285 | int __ret; \ |
230 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ | 286 | __chk_user_ptr(ptr); \ |
231 | typeof(*__pu_addr) __pu_val = (x); \ | 287 | switch (sizeof(*(ptr))) { \ |
232 | __chk_user_ptr(__pu_addr); \ | 288 | case 1: __put_user_1(x, ptr, __ret); break; \ |
233 | switch (sizeof(__pu_val)) { \ | 289 | case 2: __put_user_2(x, ptr, __ret); break; \ |
234 | case 1: \ | 290 | case 4: __put_user_4(x, ptr, __ret); break; \ |
235 | __pu_err = __put_user_1((long)__pu_val, __pu_addr); \ | 291 | case 8: __put_user_8(x, ptr, __ret); break; \ |
236 | break; \ | 292 | default: __ret = __put_user_bad(); break; \ |
237 | case 2: \ | ||
238 | __pu_err = __put_user_2((long)__pu_val, __pu_addr); \ | ||
239 | break; \ | ||
240 | case 4: \ | ||
241 | __pu_err = __put_user_4((long)__pu_val, __pu_addr); \ | ||
242 | break; \ | ||
243 | case 8: \ | ||
244 | __pu_err = \ | ||
245 | __put_user_8((__typeof__(__pu_val - __pu_val))__pu_val,\ | ||
246 | __pu_addr); \ | ||
247 | break; \ | ||
248 | default: \ | ||
249 | __pu_err = __put_user_bad(); \ | ||
250 | break; \ | ||
251 | } \ | 293 | } \ |
252 | __pu_err; \ | 294 | __ret; \ |
253 | }) | 295 | }) |
254 | 296 | ||
255 | /* | 297 | /* |
@@ -378,7 +420,7 @@ static inline unsigned long __must_check copy_from_user(void *to, | |||
378 | /** | 420 | /** |
379 | * __copy_in_user() - copy data within user space, with less checking. | 421 | * __copy_in_user() - copy data within user space, with less checking. |
380 | * @to: Destination address, in user space. | 422 | * @to: Destination address, in user space. |
381 | * @from: Source address, in kernel space. | 423 | * @from: Source address, in user space. |
382 | * @n: Number of bytes to copy. | 424 | * @n: Number of bytes to copy. |
383 | * | 425 | * |
384 | * Context: User context only. This function may sleep. | 426 | * Context: User context only. This function may sleep. |
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h index f70bf1c541f1..a017246ca0ce 100644 --- a/arch/tile/include/asm/unistd.h +++ b/arch/tile/include/asm/unistd.h | |||
@@ -24,8 +24,8 @@ | |||
24 | #include <asm-generic/unistd.h> | 24 | #include <asm-generic/unistd.h> |
25 | 25 | ||
26 | /* Additional Tilera-specific syscalls. */ | 26 | /* Additional Tilera-specific syscalls. */ |
27 | #define __NR_flush_cache (__NR_arch_specific_syscall + 1) | 27 | #define __NR_cacheflush (__NR_arch_specific_syscall + 1) |
28 | __SYSCALL(__NR_flush_cache, sys_flush_cache) | 28 | __SYSCALL(__NR_cacheflush, sys_cacheflush) |
29 | 29 | ||
30 | #ifndef __tilegx__ | 30 | #ifndef __tilegx__ |
31 | /* "Fast" syscalls provide atomic support for 32-bit chips. */ | 31 | /* "Fast" syscalls provide atomic support for 32-bit chips. */ |