diff options
author | David S. Miller <davem@davemloft.net> | 2015-04-02 16:16:53 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-04-02 16:16:53 -0400 |
commit | 9f0d34bc344889c2e6c593bd949d7ab821f0f4a5 (patch) | |
tree | e5bfc776a09315afa4dbcae97ac04f2cca239c96 /arch/arm64 | |
parent | e4a924f5768c55002c02ceba9b9f86824c35f956 (diff) | |
parent | 0a4812798fae4f6bfcaab51e31b3898ff5ea3108 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/usb/asix_common.c
drivers/net/usb/sr9800.c
drivers/net/usb/usbnet.c
include/linux/usb/usbnet.h
net/ipv4/tcp_ipv4.c
net/ipv6/tcp_ipv6.c
The TCP conflicts were overlapping changes. In 'net' we added a
READ_ONCE() to the socket cached RX route read, whilst in 'net-next'
Eric Dumazet touched the surrounding code dealing with how mini
sockets are handled.
With USB, it's a case of the same bug fix first going into net-next
and then I cherry picked it back into net.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/boot/dts/arm/juno-clocks.dtsi | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/cmpxchg.h | 32 | ||||
-rw-r--r-- | arch/arm64/include/asm/mmu_context.h | 9 | ||||
-rw-r--r-- | arch/arm64/include/asm/percpu.h | 44 |
4 files changed, 67 insertions, 20 deletions
diff --git a/arch/arm64/boot/dts/arm/juno-clocks.dtsi b/arch/arm64/boot/dts/arm/juno-clocks.dtsi index ea2b5666a16f..c9b89efe0f56 100644 --- a/arch/arm64/boot/dts/arm/juno-clocks.dtsi +++ b/arch/arm64/boot/dts/arm/juno-clocks.dtsi | |||
@@ -8,7 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | /* SoC fixed clocks */ | 10 | /* SoC fixed clocks */ |
11 | soc_uartclk: refclk72738khz { | 11 | soc_uartclk: refclk7273800hz { |
12 | compatible = "fixed-clock"; | 12 | compatible = "fixed-clock"; |
13 | #clock-cells = <0>; | 13 | #clock-cells = <0>; |
14 | clock-frequency = <7273800>; | 14 | clock-frequency = <7273800>; |
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index cb9593079f29..d8c25b7b18fb 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h | |||
@@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, | |||
246 | __ret; \ | 246 | __ret; \ |
247 | }) | 247 | }) |
248 | 248 | ||
249 | #define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | 249 | #define _protect_cmpxchg_local(pcp, o, n) \ |
250 | #define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | 250 | ({ \ |
251 | #define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | 251 | typeof(*raw_cpu_ptr(&(pcp))) __ret; \ |
252 | #define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | 252 | preempt_disable(); \ |
253 | 253 | __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \ | |
254 | #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ | 254 | preempt_enable(); \ |
255 | cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \ | 255 | __ret; \ |
256 | o1, o2, n1, n2) | 256 | }) |
257 | |||
258 | #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | ||
259 | #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | ||
260 | #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | ||
261 | #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | ||
262 | |||
263 | #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ | ||
264 | ({ \ | ||
265 | int __ret; \ | ||
266 | preempt_disable(); \ | ||
267 | __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \ | ||
268 | raw_cpu_ptr(&(ptr2)), \ | ||
269 | o1, o2, n1, n2); \ | ||
270 | preempt_enable(); \ | ||
271 | __ret; \ | ||
272 | }) | ||
257 | 273 | ||
258 | #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) | 274 | #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) |
259 | #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) | 275 | #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) |
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index a9eee33dfa62..101a42bde728 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h | |||
@@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
151 | { | 151 | { |
152 | unsigned int cpu = smp_processor_id(); | 152 | unsigned int cpu = smp_processor_id(); |
153 | 153 | ||
154 | /* | ||
155 | * init_mm.pgd does not contain any user mappings and it is always | ||
156 | * active for kernel addresses in TTBR1. Just set the reserved TTBR0. | ||
157 | */ | ||
158 | if (next == &init_mm) { | ||
159 | cpu_set_reserved_ttbr0(); | ||
160 | return; | ||
161 | } | ||
162 | |||
154 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) | 163 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) |
155 | check_and_switch_context(next, tsk); | 164 | check_and_switch_context(next, tsk); |
156 | } | 165 | } |
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 09da25bc596f..4fde8c1df97f 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h | |||
@@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, | |||
204 | return ret; | 204 | return ret; |
205 | } | 205 | } |
206 | 206 | ||
207 | #define _percpu_read(pcp) \ | ||
208 | ({ \ | ||
209 | typeof(pcp) __retval; \ | ||
210 | preempt_disable(); \ | ||
211 | __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \ | ||
212 | sizeof(pcp)); \ | ||
213 | preempt_enable(); \ | ||
214 | __retval; \ | ||
215 | }) | ||
216 | |||
217 | #define _percpu_write(pcp, val) \ | ||
218 | do { \ | ||
219 | preempt_disable(); \ | ||
220 | __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \ | ||
221 | sizeof(pcp)); \ | ||
222 | preempt_enable(); \ | ||
223 | } while(0) \ | ||
224 | |||
225 | #define _pcp_protect(operation, pcp, val) \ | ||
226 | ({ \ | ||
227 | typeof(pcp) __retval; \ | ||
228 | preempt_disable(); \ | ||
229 | __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \ | ||
230 | (val), sizeof(pcp)); \ | ||
231 | preempt_enable(); \ | ||
232 | __retval; \ | ||
233 | }) | ||
234 | |||
207 | #define _percpu_add(pcp, val) \ | 235 | #define _percpu_add(pcp, val) \ |
208 | __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) | 236 | _pcp_protect(__percpu_add, pcp, val) |
209 | 237 | ||
210 | #define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val)) | 238 | #define _percpu_add_return(pcp, val) _percpu_add(pcp, val) |
211 | 239 | ||
212 | #define _percpu_and(pcp, val) \ | 240 | #define _percpu_and(pcp, val) \ |
213 | __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) | 241 | _pcp_protect(__percpu_and, pcp, val) |
214 | 242 | ||
215 | #define _percpu_or(pcp, val) \ | 243 | #define _percpu_or(pcp, val) \ |
216 | __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) | 244 | _pcp_protect(__percpu_or, pcp, val) |
217 | |||
218 | #define _percpu_read(pcp) (typeof(pcp)) \ | ||
219 | (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp))) | ||
220 | |||
221 | #define _percpu_write(pcp, val) \ | ||
222 | __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)) | ||
223 | 245 | ||
224 | #define _percpu_xchg(pcp, val) (typeof(pcp)) \ | 246 | #define _percpu_xchg(pcp, val) (typeof(pcp)) \ |
225 | (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))) | 247 | _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)) |
226 | 248 | ||
227 | #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) | 249 | #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) |
228 | #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) | 250 | #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) |