diff options
-rw-r--r-- | arch/s390/include/asm/percpu.h | 44 | ||||
-rw-r--r-- | arch/x86/include/asm/percpu.h | 28 | ||||
-rw-r--r-- | include/linux/netdevice.h | 4 | ||||
-rw-r--r-- | include/linux/netfilter/x_tables.h | 4 | ||||
-rw-r--r-- | include/linux/percpu.h | 190 | ||||
-rw-r--r-- | include/net/snmp.h | 14 | ||||
-rw-r--r-- | mm/slub.c | 6 | ||||
-rw-r--r-- | net/caif/caif_dev.c | 4 | ||||
-rw-r--r-- | net/caif/cffrml.c | 4 |
9 files changed, 62 insertions, 236 deletions
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 5325c89a5843..0fbd1899c7b0 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h | |||
@@ -19,7 +19,7 @@ | |||
19 | #define ARCH_NEEDS_WEAK_PER_CPU | 19 | #define ARCH_NEEDS_WEAK_PER_CPU |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #define arch_irqsafe_cpu_to_op(pcp, val, op) \ | 22 | #define arch_this_cpu_to_op(pcp, val, op) \ |
23 | do { \ | 23 | do { \ |
24 | typedef typeof(pcp) pcp_op_T__; \ | 24 | typedef typeof(pcp) pcp_op_T__; \ |
25 | pcp_op_T__ old__, new__, prev__; \ | 25 | pcp_op_T__ old__, new__, prev__; \ |
@@ -41,27 +41,27 @@ do { \ | |||
41 | preempt_enable(); \ | 41 | preempt_enable(); \ |
42 | } while (0) | 42 | } while (0) |
43 | 43 | ||
44 | #define irqsafe_cpu_add_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) | 44 | #define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +) |
45 | #define irqsafe_cpu_add_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) | 45 | #define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +) |
46 | #define irqsafe_cpu_add_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) | 46 | #define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +) |
47 | #define irqsafe_cpu_add_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) | 47 | #define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +) |
48 | 48 | ||
49 | #define irqsafe_cpu_and_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) | 49 | #define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &) |
50 | #define irqsafe_cpu_and_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) | 50 | #define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &) |
51 | #define irqsafe_cpu_and_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) | 51 | #define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &) |
52 | #define irqsafe_cpu_and_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) | 52 | #define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, &) |
53 | 53 | ||
54 | #define irqsafe_cpu_or_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) | 54 | #define this_cpu_or_1(pcp, val) arch_this_cpu_to_op(pcp, val, |) |
55 | #define irqsafe_cpu_or_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) | 55 | #define this_cpu_or_2(pcp, val) arch_this_cpu_to_op(pcp, val, |) |
56 | #define irqsafe_cpu_or_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) | 56 | #define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, |) |
57 | #define irqsafe_cpu_or_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) | 57 | #define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, |) |
58 | 58 | ||
59 | #define irqsafe_cpu_xor_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) | 59 | #define this_cpu_xor_1(pcp, val) arch_this_cpu_to_op(pcp, val, ^) |
60 | #define irqsafe_cpu_xor_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) | 60 | #define this_cpu_xor_2(pcp, val) arch_this_cpu_to_op(pcp, val, ^) |
61 | #define irqsafe_cpu_xor_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) | 61 | #define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^) |
62 | #define irqsafe_cpu_xor_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) | 62 | #define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^) |
63 | 63 | ||
64 | #define arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) \ | 64 | #define arch_this_cpu_cmpxchg(pcp, oval, nval) \ |
65 | ({ \ | 65 | ({ \ |
66 | typedef typeof(pcp) pcp_op_T__; \ | 66 | typedef typeof(pcp) pcp_op_T__; \ |
67 | pcp_op_T__ ret__; \ | 67 | pcp_op_T__ ret__; \ |
@@ -79,10 +79,10 @@ do { \ | |||
79 | ret__; \ | 79 | ret__; \ |
80 | }) | 80 | }) |
81 | 81 | ||
82 | #define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) | 82 | #define this_cpu_cmpxchg_1(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) |
83 | #define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) | 83 | #define this_cpu_cmpxchg_2(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) |
84 | #define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) | 84 | #define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) |
85 | #define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) | 85 | #define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) |
86 | 86 | ||
87 | #include <asm-generic/percpu.h> | 87 | #include <asm-generic/percpu.h> |
88 | 88 | ||
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 3470c9d0ebba..562ccb5323de 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -414,22 +414,6 @@ do { \ | |||
414 | #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) | 414 | #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) |
415 | #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) | 415 | #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) |
416 | 416 | ||
417 | #define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val) | ||
418 | #define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val) | ||
419 | #define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val) | ||
420 | #define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) | ||
421 | #define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) | ||
422 | #define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) | ||
423 | #define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) | ||
424 | #define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) | ||
425 | #define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) | ||
426 | #define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | ||
427 | #define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | ||
428 | #define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | ||
429 | #define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) | ||
430 | #define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) | ||
431 | #define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) | ||
432 | |||
433 | #ifndef CONFIG_M386 | 417 | #ifndef CONFIG_M386 |
434 | #define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) | 418 | #define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) |
435 | #define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) | 419 | #define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) |
@@ -445,9 +429,6 @@ do { \ | |||
445 | #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | 429 | #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
446 | #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | 430 | #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
447 | 431 | ||
448 | #define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
449 | #define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
450 | #define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
451 | #endif /* !CONFIG_M386 */ | 432 | #endif /* !CONFIG_M386 */ |
452 | 433 | ||
453 | #ifdef CONFIG_X86_CMPXCHG64 | 434 | #ifdef CONFIG_X86_CMPXCHG64 |
@@ -467,7 +448,6 @@ do { \ | |||
467 | 448 | ||
468 | #define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) | 449 | #define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) |
469 | #define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) | 450 | #define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) |
470 | #define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) | ||
471 | #endif /* CONFIG_X86_CMPXCHG64 */ | 451 | #endif /* CONFIG_X86_CMPXCHG64 */ |
472 | 452 | ||
473 | /* | 453 | /* |
@@ -495,13 +475,6 @@ do { \ | |||
495 | #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) | 475 | #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) |
496 | #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | 476 | #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
497 | 477 | ||
498 | #define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) | ||
499 | #define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | ||
500 | #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | ||
501 | #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | ||
502 | #define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) | ||
503 | #define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
504 | |||
505 | /* | 478 | /* |
506 | * Pretty complex macro to generate cmpxchg16 instruction. The instruction | 479 | * Pretty complex macro to generate cmpxchg16 instruction. The instruction |
507 | * is not supported on early AMD64 processors so we must be able to emulate | 480 | * is not supported on early AMD64 processors so we must be able to emulate |
@@ -532,7 +505,6 @@ do { \ | |||
532 | 505 | ||
533 | #define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) | 506 | #define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) |
534 | #define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) | 507 | #define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) |
535 | #define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) | ||
536 | 508 | ||
537 | #endif | 509 | #endif |
538 | 510 | ||
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a82ad4dd306a..ca8d9bc4e502 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -2115,7 +2115,7 @@ extern void netdev_run_todo(void); | |||
2115 | */ | 2115 | */ |
2116 | static inline void dev_put(struct net_device *dev) | 2116 | static inline void dev_put(struct net_device *dev) |
2117 | { | 2117 | { |
2118 | irqsafe_cpu_dec(*dev->pcpu_refcnt); | 2118 | this_cpu_dec(*dev->pcpu_refcnt); |
2119 | } | 2119 | } |
2120 | 2120 | ||
2121 | /** | 2121 | /** |
@@ -2126,7 +2126,7 @@ static inline void dev_put(struct net_device *dev) | |||
2126 | */ | 2126 | */ |
2127 | static inline void dev_hold(struct net_device *dev) | 2127 | static inline void dev_hold(struct net_device *dev) |
2128 | { | 2128 | { |
2129 | irqsafe_cpu_inc(*dev->pcpu_refcnt); | 2129 | this_cpu_inc(*dev->pcpu_refcnt); |
2130 | } | 2130 | } |
2131 | 2131 | ||
2132 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on | 2132 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on |
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 32cddf78b13e..8d674a786744 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h | |||
@@ -471,7 +471,7 @@ DECLARE_PER_CPU(seqcount_t, xt_recseq); | |||
471 | * | 471 | * |
472 | * Begin packet processing : all readers must wait the end | 472 | * Begin packet processing : all readers must wait the end |
473 | * 1) Must be called with preemption disabled | 473 | * 1) Must be called with preemption disabled |
474 | * 2) softirqs must be disabled too (or we should use irqsafe_cpu_add()) | 474 | * 2) softirqs must be disabled too (or we should use this_cpu_add()) |
475 | * Returns : | 475 | * Returns : |
476 | * 1 if no recursion on this cpu | 476 | * 1 if no recursion on this cpu |
477 | * 0 if recursion detected | 477 | * 0 if recursion detected |
@@ -503,7 +503,7 @@ static inline unsigned int xt_write_recseq_begin(void) | |||
503 | * | 503 | * |
504 | * End packet processing : all readers can proceed | 504 | * End packet processing : all readers can proceed |
505 | * 1) Must be called with preemption disabled | 505 | * 1) Must be called with preemption disabled |
506 | * 2) softirqs must be disabled too (or we should use irqsafe_cpu_add()) | 506 | * 2) softirqs must be disabled too (or we should use this_cpu_add()) |
507 | */ | 507 | */ |
508 | static inline void xt_write_recseq_end(unsigned int addend) | 508 | static inline void xt_write_recseq_end(unsigned int addend) |
509 | { | 509 | { |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 9ca008f0c542..32cd1f67462e 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -172,10 +172,10 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); | |||
172 | * equal char, int or long. percpu_read() evaluates to a lvalue and | 172 | * equal char, int or long. percpu_read() evaluates to a lvalue and |
173 | * all others to void. | 173 | * all others to void. |
174 | * | 174 | * |
175 | * These operations are guaranteed to be atomic w.r.t. preemption. | 175 | * These operations are guaranteed to be atomic. |
176 | * The generic versions use plain get/put_cpu_var(). Archs are | 176 | * The generic versions disable interrupts. Archs are |
177 | * encouraged to implement single-instruction alternatives which don't | 177 | * encouraged to implement single-instruction alternatives which don't |
178 | * require preemption protection. | 178 | * require protection. |
179 | */ | 179 | */ |
180 | #ifndef percpu_read | 180 | #ifndef percpu_read |
181 | # define percpu_read(var) \ | 181 | # define percpu_read(var) \ |
@@ -347,9 +347,10 @@ do { \ | |||
347 | 347 | ||
348 | #define _this_cpu_generic_to_op(pcp, val, op) \ | 348 | #define _this_cpu_generic_to_op(pcp, val, op) \ |
349 | do { \ | 349 | do { \ |
350 | preempt_disable(); \ | 350 | unsigned long flags; \ |
351 | local_irq_save(flags); \ | ||
351 | *__this_cpu_ptr(&(pcp)) op val; \ | 352 | *__this_cpu_ptr(&(pcp)) op val; \ |
352 | preempt_enable(); \ | 353 | local_irq_restore(flags); \ |
353 | } while (0) | 354 | } while (0) |
354 | 355 | ||
355 | #ifndef this_cpu_write | 356 | #ifndef this_cpu_write |
@@ -447,10 +448,11 @@ do { \ | |||
447 | #define _this_cpu_generic_add_return(pcp, val) \ | 448 | #define _this_cpu_generic_add_return(pcp, val) \ |
448 | ({ \ | 449 | ({ \ |
449 | typeof(pcp) ret__; \ | 450 | typeof(pcp) ret__; \ |
450 | preempt_disable(); \ | 451 | unsigned long flags; \ |
452 | local_irq_save(flags); \ | ||
451 | __this_cpu_add(pcp, val); \ | 453 | __this_cpu_add(pcp, val); \ |
452 | ret__ = __this_cpu_read(pcp); \ | 454 | ret__ = __this_cpu_read(pcp); \ |
453 | preempt_enable(); \ | 455 | local_irq_restore(flags); \ |
454 | ret__; \ | 456 | ret__; \ |
455 | }) | 457 | }) |
456 | 458 | ||
@@ -476,10 +478,11 @@ do { \ | |||
476 | 478 | ||
477 | #define _this_cpu_generic_xchg(pcp, nval) \ | 479 | #define _this_cpu_generic_xchg(pcp, nval) \ |
478 | ({ typeof(pcp) ret__; \ | 480 | ({ typeof(pcp) ret__; \ |
479 | preempt_disable(); \ | 481 | unsigned long flags; \ |
482 | local_irq_save(flags); \ | ||
480 | ret__ = __this_cpu_read(pcp); \ | 483 | ret__ = __this_cpu_read(pcp); \ |
481 | __this_cpu_write(pcp, nval); \ | 484 | __this_cpu_write(pcp, nval); \ |
482 | preempt_enable(); \ | 485 | local_irq_restore(flags); \ |
483 | ret__; \ | 486 | ret__; \ |
484 | }) | 487 | }) |
485 | 488 | ||
@@ -501,12 +504,14 @@ do { \ | |||
501 | #endif | 504 | #endif |
502 | 505 | ||
503 | #define _this_cpu_generic_cmpxchg(pcp, oval, nval) \ | 506 | #define _this_cpu_generic_cmpxchg(pcp, oval, nval) \ |
504 | ({ typeof(pcp) ret__; \ | 507 | ({ \ |
505 | preempt_disable(); \ | 508 | typeof(pcp) ret__; \ |
509 | unsigned long flags; \ | ||
510 | local_irq_save(flags); \ | ||
506 | ret__ = __this_cpu_read(pcp); \ | 511 | ret__ = __this_cpu_read(pcp); \ |
507 | if (ret__ == (oval)) \ | 512 | if (ret__ == (oval)) \ |
508 | __this_cpu_write(pcp, nval); \ | 513 | __this_cpu_write(pcp, nval); \ |
509 | preempt_enable(); \ | 514 | local_irq_restore(flags); \ |
510 | ret__; \ | 515 | ret__; \ |
511 | }) | 516 | }) |
512 | 517 | ||
@@ -538,10 +543,11 @@ do { \ | |||
538 | #define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | 543 | #define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
539 | ({ \ | 544 | ({ \ |
540 | int ret__; \ | 545 | int ret__; \ |
541 | preempt_disable(); \ | 546 | unsigned long flags; \ |
547 | local_irq_save(flags); \ | ||
542 | ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \ | 548 | ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \ |
543 | oval1, oval2, nval1, nval2); \ | 549 | oval1, oval2, nval1, nval2); \ |
544 | preempt_enable(); \ | 550 | local_irq_restore(flags); \ |
545 | ret__; \ | 551 | ret__; \ |
546 | }) | 552 | }) |
547 | 553 | ||
@@ -567,9 +573,9 @@ do { \ | |||
567 | #endif | 573 | #endif |
568 | 574 | ||
569 | /* | 575 | /* |
570 | * Generic percpu operations that do not require preemption handling. | 576 | * Generic percpu operations for context that are safe from preemption/interrupts. |
571 | * Either we do not care about races or the caller has the | 577 | * Either we do not care about races or the caller has the |
572 | * responsibility of handling preemptions issues. Arch code can still | 578 | * responsibility of handling preemption/interrupt issues. Arch code can still |
573 | * override these instructions since the arch per cpu code may be more | 579 | * override these instructions since the arch per cpu code may be more |
574 | * efficient and may actually get race freeness for free (that is the | 580 | * efficient and may actually get race freeness for free (that is the |
575 | * case for x86 for example). | 581 | * case for x86 for example). |
@@ -802,156 +808,4 @@ do { \ | |||
802 | __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) | 808 | __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) |
803 | #endif | 809 | #endif |
804 | 810 | ||
805 | /* | ||
806 | * IRQ safe versions of the per cpu RMW operations. Note that these operations | ||
807 | * are *not* safe against modification of the same variable from another | ||
808 | * processors (which one gets when using regular atomic operations) | ||
809 | * They are guaranteed to be atomic vs. local interrupts and | ||
810 | * preemption only. | ||
811 | */ | ||
812 | #define irqsafe_cpu_generic_to_op(pcp, val, op) \ | ||
813 | do { \ | ||
814 | unsigned long flags; \ | ||
815 | local_irq_save(flags); \ | ||
816 | *__this_cpu_ptr(&(pcp)) op val; \ | ||
817 | local_irq_restore(flags); \ | ||
818 | } while (0) | ||
819 | |||
820 | #ifndef irqsafe_cpu_add | ||
821 | # ifndef irqsafe_cpu_add_1 | ||
822 | # define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | ||
823 | # endif | ||
824 | # ifndef irqsafe_cpu_add_2 | ||
825 | # define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | ||
826 | # endif | ||
827 | # ifndef irqsafe_cpu_add_4 | ||
828 | # define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | ||
829 | # endif | ||
830 | # ifndef irqsafe_cpu_add_8 | ||
831 | # define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | ||
832 | # endif | ||
833 | # define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val)) | ||
834 | #endif | ||
835 | |||
836 | #ifndef irqsafe_cpu_sub | ||
837 | # define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val)) | ||
838 | #endif | ||
839 | |||
840 | #ifndef irqsafe_cpu_inc | ||
841 | # define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1) | ||
842 | #endif | ||
843 | |||
844 | #ifndef irqsafe_cpu_dec | ||
845 | # define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1) | ||
846 | #endif | ||
847 | |||
848 | #ifndef irqsafe_cpu_and | ||
849 | # ifndef irqsafe_cpu_and_1 | ||
850 | # define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | ||
851 | # endif | ||
852 | # ifndef irqsafe_cpu_and_2 | ||
853 | # define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | ||
854 | # endif | ||
855 | # ifndef irqsafe_cpu_and_4 | ||
856 | # define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | ||
857 | # endif | ||
858 | # ifndef irqsafe_cpu_and_8 | ||
859 | # define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | ||
860 | # endif | ||
861 | # define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val)) | ||
862 | #endif | ||
863 | |||
864 | #ifndef irqsafe_cpu_or | ||
865 | # ifndef irqsafe_cpu_or_1 | ||
866 | # define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | ||
867 | # endif | ||
868 | # ifndef irqsafe_cpu_or_2 | ||
869 | # define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | ||
870 | # endif | ||
871 | # ifndef irqsafe_cpu_or_4 | ||
872 | # define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | ||
873 | # endif | ||
874 | # ifndef irqsafe_cpu_or_8 | ||
875 | # define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | ||
876 | # endif | ||
877 | # define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val)) | ||
878 | #endif | ||
879 | |||
880 | #ifndef irqsafe_cpu_xor | ||
881 | # ifndef irqsafe_cpu_xor_1 | ||
882 | # define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | ||
883 | # endif | ||
884 | # ifndef irqsafe_cpu_xor_2 | ||
885 | # define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | ||
886 | # endif | ||
887 | # ifndef irqsafe_cpu_xor_4 | ||
888 | # define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | ||
889 | # endif | ||
890 | # ifndef irqsafe_cpu_xor_8 | ||
891 | # define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | ||
892 | # endif | ||
893 | # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) | ||
894 | #endif | ||
895 | |||
896 | #define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \ | ||
897 | ({ \ | ||
898 | typeof(pcp) ret__; \ | ||
899 | unsigned long flags; \ | ||
900 | local_irq_save(flags); \ | ||
901 | ret__ = __this_cpu_read(pcp); \ | ||
902 | if (ret__ == (oval)) \ | ||
903 | __this_cpu_write(pcp, nval); \ | ||
904 | local_irq_restore(flags); \ | ||
905 | ret__; \ | ||
906 | }) | ||
907 | |||
908 | #ifndef irqsafe_cpu_cmpxchg | ||
909 | # ifndef irqsafe_cpu_cmpxchg_1 | ||
910 | # define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | ||
911 | # endif | ||
912 | # ifndef irqsafe_cpu_cmpxchg_2 | ||
913 | # define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | ||
914 | # endif | ||
915 | # ifndef irqsafe_cpu_cmpxchg_4 | ||
916 | # define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | ||
917 | # endif | ||
918 | # ifndef irqsafe_cpu_cmpxchg_8 | ||
919 | # define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | ||
920 | # endif | ||
921 | # define irqsafe_cpu_cmpxchg(pcp, oval, nval) \ | ||
922 | __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval) | ||
923 | #endif | ||
924 | |||
925 | #define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
926 | ({ \ | ||
927 | int ret__; \ | ||
928 | unsigned long flags; \ | ||
929 | local_irq_save(flags); \ | ||
930 | ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \ | ||
931 | oval1, oval2, nval1, nval2); \ | ||
932 | local_irq_restore(flags); \ | ||
933 | ret__; \ | ||
934 | }) | ||
935 | |||
936 | #ifndef irqsafe_cpu_cmpxchg_double | ||
937 | # ifndef irqsafe_cpu_cmpxchg_double_1 | ||
938 | # define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
939 | irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | ||
940 | # endif | ||
941 | # ifndef irqsafe_cpu_cmpxchg_double_2 | ||
942 | # define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
943 | irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | ||
944 | # endif | ||
945 | # ifndef irqsafe_cpu_cmpxchg_double_4 | ||
946 | # define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
947 | irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | ||
948 | # endif | ||
949 | # ifndef irqsafe_cpu_cmpxchg_double_8 | ||
950 | # define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
951 | irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | ||
952 | # endif | ||
953 | # define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
954 | __pcpu_double_call_return_bool(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) | ||
955 | #endif | ||
956 | |||
957 | #endif /* __LINUX_PERCPU_H */ | 811 | #endif /* __LINUX_PERCPU_H */ |
diff --git a/include/net/snmp.h b/include/net/snmp.h index 8f0f9ac0307f..e067aed7e378 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h | |||
@@ -129,33 +129,33 @@ struct linux_xfrm_mib { | |||
129 | __this_cpu_inc(mib[0]->mibs[field]) | 129 | __this_cpu_inc(mib[0]->mibs[field]) |
130 | 130 | ||
131 | #define SNMP_INC_STATS_USER(mib, field) \ | 131 | #define SNMP_INC_STATS_USER(mib, field) \ |
132 | irqsafe_cpu_inc(mib[0]->mibs[field]) | 132 | this_cpu_inc(mib[0]->mibs[field]) |
133 | 133 | ||
134 | #define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \ | 134 | #define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \ |
135 | atomic_long_inc(&mib->mibs[field]) | 135 | atomic_long_inc(&mib->mibs[field]) |
136 | 136 | ||
137 | #define SNMP_INC_STATS(mib, field) \ | 137 | #define SNMP_INC_STATS(mib, field) \ |
138 | irqsafe_cpu_inc(mib[0]->mibs[field]) | 138 | this_cpu_inc(mib[0]->mibs[field]) |
139 | 139 | ||
140 | #define SNMP_DEC_STATS(mib, field) \ | 140 | #define SNMP_DEC_STATS(mib, field) \ |
141 | irqsafe_cpu_dec(mib[0]->mibs[field]) | 141 | this_cpu_dec(mib[0]->mibs[field]) |
142 | 142 | ||
143 | #define SNMP_ADD_STATS_BH(mib, field, addend) \ | 143 | #define SNMP_ADD_STATS_BH(mib, field, addend) \ |
144 | __this_cpu_add(mib[0]->mibs[field], addend) | 144 | __this_cpu_add(mib[0]->mibs[field], addend) |
145 | 145 | ||
146 | #define SNMP_ADD_STATS_USER(mib, field, addend) \ | 146 | #define SNMP_ADD_STATS_USER(mib, field, addend) \ |
147 | irqsafe_cpu_add(mib[0]->mibs[field], addend) | 147 | this_cpu_add(mib[0]->mibs[field], addend) |
148 | 148 | ||
149 | #define SNMP_ADD_STATS(mib, field, addend) \ | 149 | #define SNMP_ADD_STATS(mib, field, addend) \ |
150 | irqsafe_cpu_add(mib[0]->mibs[field], addend) | 150 | this_cpu_add(mib[0]->mibs[field], addend) |
151 | /* | 151 | /* |
152 | * Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr" | 152 | * Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr" |
153 | * to make @ptr a non-percpu pointer. | 153 | * to make @ptr a non-percpu pointer. |
154 | */ | 154 | */ |
155 | #define SNMP_UPD_PO_STATS(mib, basefield, addend) \ | 155 | #define SNMP_UPD_PO_STATS(mib, basefield, addend) \ |
156 | do { \ | 156 | do { \ |
157 | irqsafe_cpu_inc(mib[0]->mibs[basefield##PKTS]); \ | 157 | this_cpu_inc(mib[0]->mibs[basefield##PKTS]); \ |
158 | irqsafe_cpu_add(mib[0]->mibs[basefield##OCTETS], addend); \ | 158 | this_cpu_add(mib[0]->mibs[basefield##OCTETS], addend); \ |
159 | } while (0) | 159 | } while (0) |
160 | #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ | 160 | #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ |
161 | do { \ | 161 | do { \ |
@@ -1978,7 +1978,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | |||
1978 | page->pobjects = pobjects; | 1978 | page->pobjects = pobjects; |
1979 | page->next = oldpage; | 1979 | page->next = oldpage; |
1980 | 1980 | ||
1981 | } while (irqsafe_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); | 1981 | } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); |
1982 | stat(s, CPU_PARTIAL_FREE); | 1982 | stat(s, CPU_PARTIAL_FREE); |
1983 | return pobjects; | 1983 | return pobjects; |
1984 | } | 1984 | } |
@@ -2304,7 +2304,7 @@ redo: | |||
2304 | * Since this is without lock semantics the protection is only against | 2304 | * Since this is without lock semantics the protection is only against |
2305 | * code executing on this cpu *not* from access by other cpus. | 2305 | * code executing on this cpu *not* from access by other cpus. |
2306 | */ | 2306 | */ |
2307 | if (unlikely(!irqsafe_cpu_cmpxchg_double( | 2307 | if (unlikely(!this_cpu_cmpxchg_double( |
2308 | s->cpu_slab->freelist, s->cpu_slab->tid, | 2308 | s->cpu_slab->freelist, s->cpu_slab->tid, |
2309 | object, tid, | 2309 | object, tid, |
2310 | get_freepointer_safe(s, object), next_tid(tid)))) { | 2310 | get_freepointer_safe(s, object), next_tid(tid)))) { |
@@ -2534,7 +2534,7 @@ redo: | |||
2534 | if (likely(page == c->page)) { | 2534 | if (likely(page == c->page)) { |
2535 | set_freepointer(s, object, c->freelist); | 2535 | set_freepointer(s, object, c->freelist); |
2536 | 2536 | ||
2537 | if (unlikely(!irqsafe_cpu_cmpxchg_double( | 2537 | if (unlikely(!this_cpu_cmpxchg_double( |
2538 | s->cpu_slab->freelist, s->cpu_slab->tid, | 2538 | s->cpu_slab->freelist, s->cpu_slab->tid, |
2539 | c->freelist, tid, | 2539 | c->freelist, tid, |
2540 | object, next_tid(tid)))) { | 2540 | object, next_tid(tid)))) { |
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index f1fa1f6e658d..64930cc2746a 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c | |||
@@ -69,12 +69,12 @@ static struct caif_device_entry_list *caif_device_list(struct net *net) | |||
69 | 69 | ||
70 | static void caifd_put(struct caif_device_entry *e) | 70 | static void caifd_put(struct caif_device_entry *e) |
71 | { | 71 | { |
72 | irqsafe_cpu_dec(*e->pcpu_refcnt); | 72 | this_cpu_dec(*e->pcpu_refcnt); |
73 | } | 73 | } |
74 | 74 | ||
75 | static void caifd_hold(struct caif_device_entry *e) | 75 | static void caifd_hold(struct caif_device_entry *e) |
76 | { | 76 | { |
77 | irqsafe_cpu_inc(*e->pcpu_refcnt); | 77 | this_cpu_inc(*e->pcpu_refcnt); |
78 | } | 78 | } |
79 | 79 | ||
80 | static int caifd_refcnt_read(struct caif_device_entry *e) | 80 | static int caifd_refcnt_read(struct caif_device_entry *e) |
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c index d3ca87bf23b7..0a7df7ef062d 100644 --- a/net/caif/cffrml.c +++ b/net/caif/cffrml.c | |||
@@ -177,14 +177,14 @@ void cffrml_put(struct cflayer *layr) | |||
177 | { | 177 | { |
178 | struct cffrml *this = container_obj(layr); | 178 | struct cffrml *this = container_obj(layr); |
179 | if (layr != NULL && this->pcpu_refcnt != NULL) | 179 | if (layr != NULL && this->pcpu_refcnt != NULL) |
180 | irqsafe_cpu_dec(*this->pcpu_refcnt); | 180 | this_cpu_dec(*this->pcpu_refcnt); |
181 | } | 181 | } |
182 | 182 | ||
183 | void cffrml_hold(struct cflayer *layr) | 183 | void cffrml_hold(struct cflayer *layr) |
184 | { | 184 | { |
185 | struct cffrml *this = container_obj(layr); | 185 | struct cffrml *this = container_obj(layr); |
186 | if (layr != NULL && this->pcpu_refcnt != NULL) | 186 | if (layr != NULL && this->pcpu_refcnt != NULL) |
187 | irqsafe_cpu_inc(*this->pcpu_refcnt); | 187 | this_cpu_inc(*this->pcpu_refcnt); |
188 | } | 188 | } |
189 | 189 | ||
190 | int cffrml_refcnt_read(struct cflayer *layr) | 190 | int cffrml_refcnt_read(struct cflayer *layr) |