aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/netdevice.h4
-rw-r--r--include/linux/netfilter/x_tables.h4
-rw-r--r--include/linux/percpu.h190
-rw-r--r--include/net/snmp.h14
4 files changed, 33 insertions, 179 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a82ad4dd306a..ca8d9bc4e502 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2115,7 +2115,7 @@ extern void netdev_run_todo(void);
2115 */ 2115 */
2116static inline void dev_put(struct net_device *dev) 2116static inline void dev_put(struct net_device *dev)
2117{ 2117{
2118 irqsafe_cpu_dec(*dev->pcpu_refcnt); 2118 this_cpu_dec(*dev->pcpu_refcnt);
2119} 2119}
2120 2120
2121/** 2121/**
@@ -2126,7 +2126,7 @@ static inline void dev_put(struct net_device *dev)
2126 */ 2126 */
2127static inline void dev_hold(struct net_device *dev) 2127static inline void dev_hold(struct net_device *dev)
2128{ 2128{
2129 irqsafe_cpu_inc(*dev->pcpu_refcnt); 2129 this_cpu_inc(*dev->pcpu_refcnt);
2130} 2130}
2131 2131
2132/* Carrier loss detection, dial on demand. The functions netif_carrier_on 2132/* Carrier loss detection, dial on demand. The functions netif_carrier_on
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 32cddf78b13e..8d674a786744 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -471,7 +471,7 @@ DECLARE_PER_CPU(seqcount_t, xt_recseq);
471 * 471 *
472 * Begin packet processing : all readers must wait the end 472 * Begin packet processing : all readers must wait the end
473 * 1) Must be called with preemption disabled 473 * 1) Must be called with preemption disabled
474 * 2) softirqs must be disabled too (or we should use irqsafe_cpu_add()) 474 * 2) softirqs must be disabled too (or we should use this_cpu_add())
475 * Returns : 475 * Returns :
476 * 1 if no recursion on this cpu 476 * 1 if no recursion on this cpu
477 * 0 if recursion detected 477 * 0 if recursion detected
@@ -503,7 +503,7 @@ static inline unsigned int xt_write_recseq_begin(void)
503 * 503 *
504 * End packet processing : all readers can proceed 504 * End packet processing : all readers can proceed
505 * 1) Must be called with preemption disabled 505 * 1) Must be called with preemption disabled
506 * 2) softirqs must be disabled too (or we should use irqsafe_cpu_add()) 506 * 2) softirqs must be disabled too (or we should use this_cpu_add())
507 */ 507 */
508static inline void xt_write_recseq_end(unsigned int addend) 508static inline void xt_write_recseq_end(unsigned int addend)
509{ 509{
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 9ca008f0c542..32cd1f67462e 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -172,10 +172,10 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
172 * equal char, int or long. percpu_read() evaluates to a lvalue and 172 * equal char, int or long. percpu_read() evaluates to a lvalue and
173 * all others to void. 173 * all others to void.
174 * 174 *
175 * These operations are guaranteed to be atomic w.r.t. preemption. 175 * These operations are guaranteed to be atomic.
176 * The generic versions use plain get/put_cpu_var(). Archs are 176 * The generic versions disable interrupts. Archs are
177 * encouraged to implement single-instruction alternatives which don't 177 * encouraged to implement single-instruction alternatives which don't
178 * require preemption protection. 178 * require protection.
179 */ 179 */
180#ifndef percpu_read 180#ifndef percpu_read
181# define percpu_read(var) \ 181# define percpu_read(var) \
@@ -347,9 +347,10 @@ do { \
347 347
348#define _this_cpu_generic_to_op(pcp, val, op) \ 348#define _this_cpu_generic_to_op(pcp, val, op) \
349do { \ 349do { \
350 preempt_disable(); \ 350 unsigned long flags; \
351 local_irq_save(flags); \
351 *__this_cpu_ptr(&(pcp)) op val; \ 352 *__this_cpu_ptr(&(pcp)) op val; \
352 preempt_enable(); \ 353 local_irq_restore(flags); \
353} while (0) 354} while (0)
354 355
355#ifndef this_cpu_write 356#ifndef this_cpu_write
@@ -447,10 +448,11 @@ do { \
447#define _this_cpu_generic_add_return(pcp, val) \ 448#define _this_cpu_generic_add_return(pcp, val) \
448({ \ 449({ \
449 typeof(pcp) ret__; \ 450 typeof(pcp) ret__; \
450 preempt_disable(); \ 451 unsigned long flags; \
452 local_irq_save(flags); \
451 __this_cpu_add(pcp, val); \ 453 __this_cpu_add(pcp, val); \
452 ret__ = __this_cpu_read(pcp); \ 454 ret__ = __this_cpu_read(pcp); \
453 preempt_enable(); \ 455 local_irq_restore(flags); \
454 ret__; \ 456 ret__; \
455}) 457})
456 458
@@ -476,10 +478,11 @@ do { \
476 478
477#define _this_cpu_generic_xchg(pcp, nval) \ 479#define _this_cpu_generic_xchg(pcp, nval) \
478({ typeof(pcp) ret__; \ 480({ typeof(pcp) ret__; \
479 preempt_disable(); \ 481 unsigned long flags; \
482 local_irq_save(flags); \
480 ret__ = __this_cpu_read(pcp); \ 483 ret__ = __this_cpu_read(pcp); \
481 __this_cpu_write(pcp, nval); \ 484 __this_cpu_write(pcp, nval); \
482 preempt_enable(); \ 485 local_irq_restore(flags); \
483 ret__; \ 486 ret__; \
484}) 487})
485 488
@@ -501,12 +504,14 @@ do { \
501#endif 504#endif
502 505
503#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \ 506#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
504({ typeof(pcp) ret__; \ 507({ \
505 preempt_disable(); \ 508 typeof(pcp) ret__; \
509 unsigned long flags; \
510 local_irq_save(flags); \
506 ret__ = __this_cpu_read(pcp); \ 511 ret__ = __this_cpu_read(pcp); \
507 if (ret__ == (oval)) \ 512 if (ret__ == (oval)) \
508 __this_cpu_write(pcp, nval); \ 513 __this_cpu_write(pcp, nval); \
509 preempt_enable(); \ 514 local_irq_restore(flags); \
510 ret__; \ 515 ret__; \
511}) 516})
512 517
@@ -538,10 +543,11 @@ do { \
538#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 543#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
539({ \ 544({ \
540 int ret__; \ 545 int ret__; \
541 preempt_disable(); \ 546 unsigned long flags; \
547 local_irq_save(flags); \
542 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \ 548 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
543 oval1, oval2, nval1, nval2); \ 549 oval1, oval2, nval1, nval2); \
544 preempt_enable(); \ 550 local_irq_restore(flags); \
545 ret__; \ 551 ret__; \
546}) 552})
547 553
@@ -567,9 +573,9 @@ do { \
567#endif 573#endif
568 574
569/* 575/*
570 * Generic percpu operations that do not require preemption handling. 576 * Generic percpu operations for context that are safe from preemption/interrupts.
571 * Either we do not care about races or the caller has the 577 * Either we do not care about races or the caller has the
572 * responsibility of handling preemptions issues. Arch code can still 578 * responsibility of handling preemption/interrupt issues. Arch code can still
573 * override these instructions since the arch per cpu code may be more 579 * override these instructions since the arch per cpu code may be more
574 * efficient and may actually get race freeness for free (that is the 580 * efficient and may actually get race freeness for free (that is the
575 * case for x86 for example). 581 * case for x86 for example).
@@ -802,156 +808,4 @@ do { \
802 __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) 808 __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
803#endif 809#endif
804 810
805/*
806 * IRQ safe versions of the per cpu RMW operations. Note that these operations
807 * are *not* safe against modification of the same variable from another
808 * processors (which one gets when using regular atomic operations)
809 * They are guaranteed to be atomic vs. local interrupts and
810 * preemption only.
811 */
812#define irqsafe_cpu_generic_to_op(pcp, val, op) \
813do { \
814 unsigned long flags; \
815 local_irq_save(flags); \
816 *__this_cpu_ptr(&(pcp)) op val; \
817 local_irq_restore(flags); \
818} while (0)
819
820#ifndef irqsafe_cpu_add
821# ifndef irqsafe_cpu_add_1
822# define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
823# endif
824# ifndef irqsafe_cpu_add_2
825# define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
826# endif
827# ifndef irqsafe_cpu_add_4
828# define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
829# endif
830# ifndef irqsafe_cpu_add_8
831# define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
832# endif
833# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
834#endif
835
836#ifndef irqsafe_cpu_sub
837# define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
838#endif
839
840#ifndef irqsafe_cpu_inc
841# define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
842#endif
843
844#ifndef irqsafe_cpu_dec
845# define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
846#endif
847
848#ifndef irqsafe_cpu_and
849# ifndef irqsafe_cpu_and_1
850# define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
851# endif
852# ifndef irqsafe_cpu_and_2
853# define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
854# endif
855# ifndef irqsafe_cpu_and_4
856# define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
857# endif
858# ifndef irqsafe_cpu_and_8
859# define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
860# endif
861# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
862#endif
863
864#ifndef irqsafe_cpu_or
865# ifndef irqsafe_cpu_or_1
866# define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
867# endif
868# ifndef irqsafe_cpu_or_2
869# define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
870# endif
871# ifndef irqsafe_cpu_or_4
872# define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
873# endif
874# ifndef irqsafe_cpu_or_8
875# define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
876# endif
877# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
878#endif
879
880#ifndef irqsafe_cpu_xor
881# ifndef irqsafe_cpu_xor_1
882# define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
883# endif
884# ifndef irqsafe_cpu_xor_2
885# define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
886# endif
887# ifndef irqsafe_cpu_xor_4
888# define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
889# endif
890# ifndef irqsafe_cpu_xor_8
891# define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
892# endif
893# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
894#endif
895
896#define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \
897({ \
898 typeof(pcp) ret__; \
899 unsigned long flags; \
900 local_irq_save(flags); \
901 ret__ = __this_cpu_read(pcp); \
902 if (ret__ == (oval)) \
903 __this_cpu_write(pcp, nval); \
904 local_irq_restore(flags); \
905 ret__; \
906})
907
908#ifndef irqsafe_cpu_cmpxchg
909# ifndef irqsafe_cpu_cmpxchg_1
910# define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
911# endif
912# ifndef irqsafe_cpu_cmpxchg_2
913# define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
914# endif
915# ifndef irqsafe_cpu_cmpxchg_4
916# define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
917# endif
918# ifndef irqsafe_cpu_cmpxchg_8
919# define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
920# endif
921# define irqsafe_cpu_cmpxchg(pcp, oval, nval) \
922 __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
923#endif
924
925#define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
926({ \
927 int ret__; \
928 unsigned long flags; \
929 local_irq_save(flags); \
930 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
931 oval1, oval2, nval1, nval2); \
932 local_irq_restore(flags); \
933 ret__; \
934})
935
936#ifndef irqsafe_cpu_cmpxchg_double
937# ifndef irqsafe_cpu_cmpxchg_double_1
938# define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
939 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
940# endif
941# ifndef irqsafe_cpu_cmpxchg_double_2
942# define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
943 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
944# endif
945# ifndef irqsafe_cpu_cmpxchg_double_4
946# define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
947 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
948# endif
949# ifndef irqsafe_cpu_cmpxchg_double_8
950# define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
951 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
952# endif
953# define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
954 __pcpu_double_call_return_bool(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
955#endif
956
957#endif /* __LINUX_PERCPU_H */ 811#endif /* __LINUX_PERCPU_H */
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 8f0f9ac0307f..e067aed7e378 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -129,33 +129,33 @@ struct linux_xfrm_mib {
129 __this_cpu_inc(mib[0]->mibs[field]) 129 __this_cpu_inc(mib[0]->mibs[field])
130 130
131#define SNMP_INC_STATS_USER(mib, field) \ 131#define SNMP_INC_STATS_USER(mib, field) \
132 irqsafe_cpu_inc(mib[0]->mibs[field]) 132 this_cpu_inc(mib[0]->mibs[field])
133 133
134#define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \ 134#define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \
135 atomic_long_inc(&mib->mibs[field]) 135 atomic_long_inc(&mib->mibs[field])
136 136
137#define SNMP_INC_STATS(mib, field) \ 137#define SNMP_INC_STATS(mib, field) \
138 irqsafe_cpu_inc(mib[0]->mibs[field]) 138 this_cpu_inc(mib[0]->mibs[field])
139 139
140#define SNMP_DEC_STATS(mib, field) \ 140#define SNMP_DEC_STATS(mib, field) \
141 irqsafe_cpu_dec(mib[0]->mibs[field]) 141 this_cpu_dec(mib[0]->mibs[field])
142 142
143#define SNMP_ADD_STATS_BH(mib, field, addend) \ 143#define SNMP_ADD_STATS_BH(mib, field, addend) \
144 __this_cpu_add(mib[0]->mibs[field], addend) 144 __this_cpu_add(mib[0]->mibs[field], addend)
145 145
146#define SNMP_ADD_STATS_USER(mib, field, addend) \ 146#define SNMP_ADD_STATS_USER(mib, field, addend) \
147 irqsafe_cpu_add(mib[0]->mibs[field], addend) 147 this_cpu_add(mib[0]->mibs[field], addend)
148 148
149#define SNMP_ADD_STATS(mib, field, addend) \ 149#define SNMP_ADD_STATS(mib, field, addend) \
150 irqsafe_cpu_add(mib[0]->mibs[field], addend) 150 this_cpu_add(mib[0]->mibs[field], addend)
151/* 151/*
152 * Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr" 152 * Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr"
153 * to make @ptr a non-percpu pointer. 153 * to make @ptr a non-percpu pointer.
154 */ 154 */
155#define SNMP_UPD_PO_STATS(mib, basefield, addend) \ 155#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
156 do { \ 156 do { \
157 irqsafe_cpu_inc(mib[0]->mibs[basefield##PKTS]); \ 157 this_cpu_inc(mib[0]->mibs[basefield##PKTS]); \
158 irqsafe_cpu_add(mib[0]->mibs[basefield##OCTETS], addend); \ 158 this_cpu_add(mib[0]->mibs[basefield##OCTETS], addend); \
159 } while (0) 159 } while (0)
160#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ 160#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
161 do { \ 161 do { \