aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2018-11-08 20:34:27 -0500
committerDavid S. Miller <davem@davemloft.net>2018-11-08 21:40:30 -0500
commit0d5b9311baf27bb545f187f12ecfd558220c607d (patch)
tree73bc70b75089cf84c4d473506bfa59db36a08baa /net
parente12c225258f2584906765234ca6db4ad4c618192 (diff)
inet: frags: better deal with smp races
Multiple cpus might attempt to insert a new fragment in rhashtable, if for example RPS is buggy, as reported by 배석진 in https://patchwork.ozlabs.org/patch/994601/ We use rhashtable_lookup_get_insert_key() instead of rhashtable_insert_fast() to let cpus losing the race free their own inet_frag_queue and use the one that was inserted by another cpu. Fixes: 648700f76b03 ("inet: frags: use rhashtables for reassembly units") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: 배석진 <soukjin.bae@samsung.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/inet_fragment.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index bcb11f3a27c0..760a9e52e02b 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -178,21 +178,22 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
178} 178}
179 179
180static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf, 180static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
181 void *arg) 181 void *arg,
182 struct inet_frag_queue **prev)
182{ 183{
183 struct inet_frags *f = nf->f; 184 struct inet_frags *f = nf->f;
184 struct inet_frag_queue *q; 185 struct inet_frag_queue *q;
185 int err;
186 186
187 q = inet_frag_alloc(nf, f, arg); 187 q = inet_frag_alloc(nf, f, arg);
188 if (!q) 188 if (!q) {
189 *prev = ERR_PTR(-ENOMEM);
189 return NULL; 190 return NULL;
190 191 }
191 mod_timer(&q->timer, jiffies + nf->timeout); 192 mod_timer(&q->timer, jiffies + nf->timeout);
192 193
193 err = rhashtable_insert_fast(&nf->rhashtable, &q->node, 194 *prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key,
194 f->rhash_params); 195 &q->node, f->rhash_params);
195 if (err < 0) { 196 if (*prev) {
196 q->flags |= INET_FRAG_COMPLETE; 197 q->flags |= INET_FRAG_COMPLETE;
197 inet_frag_kill(q); 198 inet_frag_kill(q);
198 inet_frag_destroy(q); 199 inet_frag_destroy(q);
@@ -204,22 +205,22 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
204/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */ 205/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
205struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key) 206struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
206{ 207{
207 struct inet_frag_queue *fq; 208 struct inet_frag_queue *fq = NULL, *prev;
208 209
209 if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) 210 if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
210 return NULL; 211 return NULL;
211 212
212 rcu_read_lock(); 213 rcu_read_lock();
213 214
214 fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params); 215 prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
215 if (fq) { 216 if (!prev)
217 fq = inet_frag_create(nf, key, &prev);
218 if (prev && !IS_ERR(prev)) {
219 fq = prev;
216 if (!refcount_inc_not_zero(&fq->refcnt)) 220 if (!refcount_inc_not_zero(&fq->refcnt))
217 fq = NULL; 221 fq = NULL;
218 rcu_read_unlock();
219 return fq;
220 } 222 }
221 rcu_read_unlock(); 223 rcu_read_unlock();
222 224 return fq;
223 return inet_frag_create(nf, key);
224} 225}
225EXPORT_SYMBOL(inet_frag_find); 226EXPORT_SYMBOL(inet_frag_find);