diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/interrupt.h | 1 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 70 | ||||
-rw-r--r-- | include/linux/rcutiny.h | 8 | ||||
-rw-r--r-- | include/linux/rcutree.h | 13 | ||||
-rw-r--r-- | include/net/sctp/sctp.h | 1 | ||||
-rw-r--r-- | include/trace/events/irq.h | 3 |
6 files changed, 91 insertions, 5 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index bea0ac750712..6c12989839d9 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -414,7 +414,6 @@ enum | |||
414 | TASKLET_SOFTIRQ, | 414 | TASKLET_SOFTIRQ, |
415 | SCHED_SOFTIRQ, | 415 | SCHED_SOFTIRQ, |
416 | HRTIMER_SOFTIRQ, | 416 | HRTIMER_SOFTIRQ, |
417 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ | ||
418 | 417 | ||
419 | NR_SOFTIRQS | 418 | NR_SOFTIRQS |
420 | }; | 419 | }; |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index ff422d2b7f90..99f9aa7c2804 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -47,6 +47,18 @@ | |||
47 | extern int rcutorture_runnable; /* for sysctl */ | 47 | extern int rcutorture_runnable; /* for sysctl */ |
48 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ | 48 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ |
49 | 49 | ||
50 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | ||
51 | extern void rcutorture_record_test_transition(void); | ||
52 | extern void rcutorture_record_progress(unsigned long vernum); | ||
53 | #else | ||
54 | static inline void rcutorture_record_test_transition(void) | ||
55 | { | ||
56 | } | ||
57 | static inline void rcutorture_record_progress(unsigned long vernum) | ||
58 | { | ||
59 | } | ||
60 | #endif | ||
61 | |||
50 | #define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) | 62 | #define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) |
51 | #define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b)) | 63 | #define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b)) |
52 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) | 64 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) |
@@ -68,7 +80,6 @@ extern void call_rcu_sched(struct rcu_head *head, | |||
68 | extern void synchronize_sched(void); | 80 | extern void synchronize_sched(void); |
69 | extern void rcu_barrier_bh(void); | 81 | extern void rcu_barrier_bh(void); |
70 | extern void rcu_barrier_sched(void); | 82 | extern void rcu_barrier_sched(void); |
71 | extern int sched_expedited_torture_stats(char *page); | ||
72 | 83 | ||
73 | static inline void __rcu_read_lock_bh(void) | 84 | static inline void __rcu_read_lock_bh(void) |
74 | { | 85 | { |
@@ -774,6 +785,7 @@ extern struct debug_obj_descr rcuhead_debug_descr; | |||
774 | 785 | ||
775 | static inline void debug_rcu_head_queue(struct rcu_head *head) | 786 | static inline void debug_rcu_head_queue(struct rcu_head *head) |
776 | { | 787 | { |
788 | WARN_ON_ONCE((unsigned long)head & 0x3); | ||
777 | debug_object_activate(head, &rcuhead_debug_descr); | 789 | debug_object_activate(head, &rcuhead_debug_descr); |
778 | debug_object_active_state(head, &rcuhead_debug_descr, | 790 | debug_object_active_state(head, &rcuhead_debug_descr, |
779 | STATE_RCU_HEAD_READY, | 791 | STATE_RCU_HEAD_READY, |
@@ -797,4 +809,60 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head) | |||
797 | } | 809 | } |
798 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | 810 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
799 | 811 | ||
812 | static __always_inline bool __is_kfree_rcu_offset(unsigned long offset) | ||
813 | { | ||
814 | return offset < 4096; | ||
815 | } | ||
816 | |||
817 | static __always_inline | ||
818 | void __kfree_rcu(struct rcu_head *head, unsigned long offset) | ||
819 | { | ||
820 | typedef void (*rcu_callback)(struct rcu_head *); | ||
821 | |||
822 | BUILD_BUG_ON(!__builtin_constant_p(offset)); | ||
823 | |||
824 | /* See the kfree_rcu() header comment. */ | ||
825 | BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); | ||
826 | |||
827 | call_rcu(head, (rcu_callback)offset); | ||
828 | } | ||
829 | |||
830 | extern void kfree(const void *); | ||
831 | |||
832 | static inline void __rcu_reclaim(struct rcu_head *head) | ||
833 | { | ||
834 | unsigned long offset = (unsigned long)head->func; | ||
835 | |||
836 | if (__is_kfree_rcu_offset(offset)) | ||
837 | kfree((void *)head - offset); | ||
838 | else | ||
839 | head->func(head); | ||
840 | } | ||
841 | |||
842 | /** | ||
843 | * kfree_rcu() - kfree an object after a grace period. | ||
844 | * @ptr: pointer to kfree | ||
845 | * @rcu_head: the name of the struct rcu_head within the type of @ptr. | ||
846 | * | ||
847 | * Many rcu callbacks functions just call kfree() on the base structure. | ||
848 | * These functions are trivial, but their size adds up, and furthermore | ||
849 | * when they are used in a kernel module, that module must invoke the | ||
850 | * high-latency rcu_barrier() function at module-unload time. | ||
851 | * | ||
852 | * The kfree_rcu() function handles this issue. Rather than encoding a | ||
853 | * function address in the embedded rcu_head structure, kfree_rcu() instead | ||
854 | * encodes the offset of the rcu_head structure within the base structure. | ||
855 | * Because the functions are not allowed in the low-order 4096 bytes of | ||
856 | * kernel virtual memory, offsets up to 4095 bytes can be accommodated. | ||
857 | * If the offset is larger than 4095 bytes, a compile-time error will | ||
858 | * be generated in __kfree_rcu(). If this error is triggered, you can | ||
859 | * either fall back to use of call_rcu() or rearrange the structure to | ||
860 | * position the rcu_head structure into the first 4096 bytes. | ||
861 | * | ||
862 | * Note that the allowable offset might decrease in the future, for example, | ||
863 | * to allow something like kmem_cache_free_rcu(). | ||
864 | */ | ||
865 | #define kfree_rcu(ptr, rcu_head) \ | ||
866 | __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) | ||
867 | |||
800 | #endif /* __LINUX_RCUPDATE_H */ | 868 | #endif /* __LINUX_RCUPDATE_H */ |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 30ebd7c8d874..52b3e0281fd0 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -100,6 +100,14 @@ static inline void rcu_note_context_switch(int cpu) | |||
100 | } | 100 | } |
101 | 101 | ||
102 | /* | 102 | /* |
103 | * Take advantage of the fact that there is only one CPU, which | ||
104 | * allows us to ignore virtualization-based context switches. | ||
105 | */ | ||
106 | static inline void rcu_virt_note_context_switch(int cpu) | ||
107 | { | ||
108 | } | ||
109 | |||
110 | /* | ||
103 | * Return the number of grace periods. | 111 | * Return the number of grace periods. |
104 | */ | 112 | */ |
105 | static inline long rcu_batches_completed(void) | 113 | static inline long rcu_batches_completed(void) |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 3a933482734a..e65d06634dd8 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -35,6 +35,16 @@ extern void rcu_note_context_switch(int cpu); | |||
35 | extern int rcu_needs_cpu(int cpu); | 35 | extern int rcu_needs_cpu(int cpu); |
36 | extern void rcu_cpu_stall_reset(void); | 36 | extern void rcu_cpu_stall_reset(void); |
37 | 37 | ||
38 | /* | ||
39 | * Note a virtualization-based context switch. This is simply a | ||
40 | * wrapper around rcu_note_context_switch(), which allows TINY_RCU | ||
41 | * to save a few bytes. | ||
42 | */ | ||
43 | static inline void rcu_virt_note_context_switch(int cpu) | ||
44 | { | ||
45 | rcu_note_context_switch(cpu); | ||
46 | } | ||
47 | |||
38 | #ifdef CONFIG_TREE_PREEMPT_RCU | 48 | #ifdef CONFIG_TREE_PREEMPT_RCU |
39 | 49 | ||
40 | extern void exit_rcu(void); | 50 | extern void exit_rcu(void); |
@@ -58,9 +68,12 @@ static inline void synchronize_rcu_bh_expedited(void) | |||
58 | 68 | ||
59 | extern void rcu_barrier(void); | 69 | extern void rcu_barrier(void); |
60 | 70 | ||
71 | extern unsigned long rcutorture_testseq; | ||
72 | extern unsigned long rcutorture_vernum; | ||
61 | extern long rcu_batches_completed(void); | 73 | extern long rcu_batches_completed(void); |
62 | extern long rcu_batches_completed_bh(void); | 74 | extern long rcu_batches_completed_bh(void); |
63 | extern long rcu_batches_completed_sched(void); | 75 | extern long rcu_batches_completed_sched(void); |
76 | |||
64 | extern void rcu_force_quiescent_state(void); | 77 | extern void rcu_force_quiescent_state(void); |
65 | extern void rcu_bh_force_quiescent_state(void); | 78 | extern void rcu_bh_force_quiescent_state(void); |
66 | extern void rcu_sched_force_quiescent_state(void); | 79 | extern void rcu_sched_force_quiescent_state(void); |
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 505845ddb0be..01e094c6d0ae 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
@@ -115,7 +115,6 @@ | |||
115 | * sctp/protocol.c | 115 | * sctp/protocol.c |
116 | */ | 116 | */ |
117 | extern struct sock *sctp_get_ctl_sock(void); | 117 | extern struct sock *sctp_get_ctl_sock(void); |
118 | extern void sctp_local_addr_free(struct rcu_head *head); | ||
119 | extern int sctp_copy_local_addr_list(struct sctp_bind_addr *, | 118 | extern int sctp_copy_local_addr_list(struct sctp_bind_addr *, |
120 | sctp_scope_t, gfp_t gfp, | 119 | sctp_scope_t, gfp_t gfp, |
121 | int flags); | 120 | int flags); |
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h index 1c09820df585..ae045ca7d356 100644 --- a/include/trace/events/irq.h +++ b/include/trace/events/irq.h | |||
@@ -20,8 +20,7 @@ struct softirq_action; | |||
20 | softirq_name(BLOCK_IOPOLL), \ | 20 | softirq_name(BLOCK_IOPOLL), \ |
21 | softirq_name(TASKLET), \ | 21 | softirq_name(TASKLET), \ |
22 | softirq_name(SCHED), \ | 22 | softirq_name(SCHED), \ |
23 | softirq_name(HRTIMER), \ | 23 | softirq_name(HRTIMER)) |
24 | softirq_name(RCU)) | ||
25 | 24 | ||
26 | /** | 25 | /** |
27 | * irq_handler_entry - called immediately before the irq action handler | 26 | * irq_handler_entry - called immediately before the irq action handler |