diff options
author | Shlomo Pongratz <shlomop@mellanox.com> | 2014-02-02 08:42:10 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-02-04 23:01:55 -0500 |
commit | a664a4f7aa4f01ca8728e3ec43618327416fc8ff (patch) | |
tree | 62866bf915a2074380ec6c066ee7330804b39cda /net/ipv4 | |
parent | 9330ed16da840b1b68cd99600d403b6f3e9fa4e2 (diff) |
net/ipv4: Use proper RCU APIs for writer-side in udp_offload.c
RCU writer side should use rcu_dereference_protected() and not
rcu_dereference(), fix that. This also removes the "suspicious RCU usage"
warning seen when running with CONFIG_PROVE_RCU.
Also, don't use rcu_assign_pointer/rcu_dereference for pointers
which are invisible beyond the udp offload code.
Fixes: b582ef0 ('net: Add GRO support for UDP encapsulating protocols')
Reported-by: Eric Dumazet <edumazet@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/udp_offload.c | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 2ffea6f31efc..88b4023ecfcf 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c | |||
@@ -17,6 +17,8 @@ | |||
17 | static DEFINE_SPINLOCK(udp_offload_lock); | 17 | static DEFINE_SPINLOCK(udp_offload_lock); |
18 | static struct udp_offload_priv __rcu *udp_offload_base __read_mostly; | 18 | static struct udp_offload_priv __rcu *udp_offload_base __read_mostly; |
19 | 19 | ||
20 | #define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock)) | ||
21 | |||
20 | struct udp_offload_priv { | 22 | struct udp_offload_priv { |
21 | struct udp_offload *offload; | 23 | struct udp_offload *offload; |
22 | struct rcu_head rcu; | 24 | struct rcu_head rcu; |
@@ -100,7 +102,6 @@ out: | |||
100 | 102 | ||
101 | int udp_add_offload(struct udp_offload *uo) | 103 | int udp_add_offload(struct udp_offload *uo) |
102 | { | 104 | { |
103 | struct udp_offload_priv __rcu **head = &udp_offload_base; | ||
104 | struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC); | 105 | struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC); |
105 | 106 | ||
106 | if (!new_offload) | 107 | if (!new_offload) |
@@ -109,8 +110,8 @@ int udp_add_offload(struct udp_offload *uo) | |||
109 | new_offload->offload = uo; | 110 | new_offload->offload = uo; |
110 | 111 | ||
111 | spin_lock(&udp_offload_lock); | 112 | spin_lock(&udp_offload_lock); |
112 | rcu_assign_pointer(new_offload->next, rcu_dereference(*head)); | 113 | new_offload->next = udp_offload_base; |
113 | rcu_assign_pointer(*head, new_offload); | 114 | rcu_assign_pointer(udp_offload_base, new_offload); |
114 | spin_unlock(&udp_offload_lock); | 115 | spin_unlock(&udp_offload_lock); |
115 | 116 | ||
116 | return 0; | 117 | return 0; |
@@ -130,12 +131,12 @@ void udp_del_offload(struct udp_offload *uo) | |||
130 | 131 | ||
131 | spin_lock(&udp_offload_lock); | 132 | spin_lock(&udp_offload_lock); |
132 | 133 | ||
133 | uo_priv = rcu_dereference(*head); | 134 | uo_priv = udp_deref_protected(*head); |
134 | for (; uo_priv != NULL; | 135 | for (; uo_priv != NULL; |
135 | uo_priv = rcu_dereference(*head)) { | 136 | uo_priv = udp_deref_protected(*head)) { |
136 | |||
137 | if (uo_priv->offload == uo) { | 137 | if (uo_priv->offload == uo) { |
138 | rcu_assign_pointer(*head, rcu_dereference(uo_priv->next)); | 138 | rcu_assign_pointer(*head, |
139 | udp_deref_protected(uo_priv->next)); | ||
139 | goto unlock; | 140 | goto unlock; |
140 | } | 141 | } |
141 | head = &uo_priv->next; | 142 | head = &uo_priv->next; |