diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/inet_fragment.c | 57 |
1 files changed, 44 insertions, 13 deletions
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 1206ca64b0ea..e97d66a1fdde 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -52,20 +52,27 @@ static void inet_frag_secret_rebuild(unsigned long dummy) | |||
52 | unsigned long now = jiffies; | 52 | unsigned long now = jiffies; |
53 | int i; | 53 | int i; |
54 | 54 | ||
55 | /* Per bucket lock NOT needed here, due to write lock protection */ | ||
55 | write_lock(&f->lock); | 56 | write_lock(&f->lock); |
57 | |||
56 | get_random_bytes(&f->rnd, sizeof(u32)); | 58 | get_random_bytes(&f->rnd, sizeof(u32)); |
57 | for (i = 0; i < INETFRAGS_HASHSZ; i++) { | 59 | for (i = 0; i < INETFRAGS_HASHSZ; i++) { |
60 | struct inet_frag_bucket *hb; | ||
58 | struct inet_frag_queue *q; | 61 | struct inet_frag_queue *q; |
59 | struct hlist_node *n; | 62 | struct hlist_node *n; |
60 | 63 | ||
61 | hlist_for_each_entry_safe(q, n, &f->hash[i], list) { | 64 | hb = &f->hash[i]; |
65 | hlist_for_each_entry_safe(q, n, &hb->chain, list) { | ||
62 | unsigned int hval = f->hashfn(q); | 66 | unsigned int hval = f->hashfn(q); |
63 | 67 | ||
64 | if (hval != i) { | 68 | if (hval != i) { |
69 | struct inet_frag_bucket *hb_dest; | ||
70 | |||
65 | hlist_del(&q->list); | 71 | hlist_del(&q->list); |
66 | 72 | ||
67 | /* Relink to new hash chain. */ | 73 | /* Relink to new hash chain. */ |
68 | hlist_add_head(&q->list, &f->hash[hval]); | 74 | hb_dest = &f->hash[hval]; |
75 | hlist_add_head(&q->list, &hb_dest->chain); | ||
69 | } | 76 | } |
70 | } | 77 | } |
71 | } | 78 | } |
@@ -78,9 +85,12 @@ void inet_frags_init(struct inet_frags *f) | |||
78 | { | 85 | { |
79 | int i; | 86 | int i; |
80 | 87 | ||
81 | for (i = 0; i < INETFRAGS_HASHSZ; i++) | 88 | for (i = 0; i < INETFRAGS_HASHSZ; i++) { |
82 | INIT_HLIST_HEAD(&f->hash[i]); | 89 | struct inet_frag_bucket *hb = &f->hash[i]; |
83 | 90 | ||
91 | spin_lock_init(&hb->chain_lock); | ||
92 | INIT_HLIST_HEAD(&hb->chain); | ||
93 | } | ||
84 | rwlock_init(&f->lock); | 94 | rwlock_init(&f->lock); |
85 | 95 | ||
86 | f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ | 96 | f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ |
@@ -122,9 +132,18 @@ EXPORT_SYMBOL(inet_frags_exit_net); | |||
122 | 132 | ||
123 | static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) | 133 | static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) |
124 | { | 134 | { |
125 | write_lock(&f->lock); | 135 | struct inet_frag_bucket *hb; |
136 | unsigned int hash; | ||
137 | |||
138 | read_lock(&f->lock); | ||
139 | hash = f->hashfn(fq); | ||
140 | hb = &f->hash[hash]; | ||
141 | |||
142 | spin_lock(&hb->chain_lock); | ||
126 | hlist_del(&fq->list); | 143 | hlist_del(&fq->list); |
127 | write_unlock(&f->lock); | 144 | spin_unlock(&hb->chain_lock); |
145 | |||
146 | read_unlock(&f->lock); | ||
128 | inet_frag_lru_del(fq); | 147 | inet_frag_lru_del(fq); |
129 | } | 148 | } |
130 | 149 | ||
@@ -226,27 +245,32 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, | |||
226 | struct inet_frag_queue *qp_in, struct inet_frags *f, | 245 | struct inet_frag_queue *qp_in, struct inet_frags *f, |
227 | void *arg) | 246 | void *arg) |
228 | { | 247 | { |
248 | struct inet_frag_bucket *hb; | ||
229 | struct inet_frag_queue *qp; | 249 | struct inet_frag_queue *qp; |
230 | #ifdef CONFIG_SMP | 250 | #ifdef CONFIG_SMP |
231 | #endif | 251 | #endif |
232 | unsigned int hash; | 252 | unsigned int hash; |
233 | 253 | ||
234 | write_lock(&f->lock); | 254 | read_lock(&f->lock); /* Protects against hash rebuild */ |
235 | /* | 255 | /* |
236 | * While we stayed w/o the lock other CPU could update | 256 | * While we stayed w/o the lock other CPU could update |
237 | * the rnd seed, so we need to re-calculate the hash | 257 | * the rnd seed, so we need to re-calculate the hash |
238 | * chain. Fortunatelly the qp_in can be used to get one. | 258 | * chain. Fortunatelly the qp_in can be used to get one. |
239 | */ | 259 | */ |
240 | hash = f->hashfn(qp_in); | 260 | hash = f->hashfn(qp_in); |
261 | hb = &f->hash[hash]; | ||
262 | spin_lock(&hb->chain_lock); | ||
263 | |||
241 | #ifdef CONFIG_SMP | 264 | #ifdef CONFIG_SMP |
242 | /* With SMP race we have to recheck hash table, because | 265 | /* With SMP race we have to recheck hash table, because |
243 | * such entry could be created on other cpu, while we | 266 | * such entry could be created on other cpu, while we |
244 | * promoted read lock to write lock. | 267 | * released the hash bucket lock. |
245 | */ | 268 | */ |
246 | hlist_for_each_entry(qp, &f->hash[hash], list) { | 269 | hlist_for_each_entry(qp, &hb->chain, list) { |
247 | if (qp->net == nf && f->match(qp, arg)) { | 270 | if (qp->net == nf && f->match(qp, arg)) { |
248 | atomic_inc(&qp->refcnt); | 271 | atomic_inc(&qp->refcnt); |
249 | write_unlock(&f->lock); | 272 | spin_unlock(&hb->chain_lock); |
273 | read_unlock(&f->lock); | ||
250 | qp_in->last_in |= INET_FRAG_COMPLETE; | 274 | qp_in->last_in |= INET_FRAG_COMPLETE; |
251 | inet_frag_put(qp_in, f); | 275 | inet_frag_put(qp_in, f); |
252 | return qp; | 276 | return qp; |
@@ -258,8 +282,9 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, | |||
258 | atomic_inc(&qp->refcnt); | 282 | atomic_inc(&qp->refcnt); |
259 | 283 | ||
260 | atomic_inc(&qp->refcnt); | 284 | atomic_inc(&qp->refcnt); |
261 | hlist_add_head(&qp->list, &f->hash[hash]); | 285 | hlist_add_head(&qp->list, &hb->chain); |
262 | write_unlock(&f->lock); | 286 | spin_unlock(&hb->chain_lock); |
287 | read_unlock(&f->lock); | ||
263 | inet_frag_lru_add(nf, qp); | 288 | inet_frag_lru_add(nf, qp); |
264 | return qp; | 289 | return qp; |
265 | } | 290 | } |
@@ -300,17 +325,23 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, | |||
300 | struct inet_frags *f, void *key, unsigned int hash) | 325 | struct inet_frags *f, void *key, unsigned int hash) |
301 | __releases(&f->lock) | 326 | __releases(&f->lock) |
302 | { | 327 | { |
328 | struct inet_frag_bucket *hb; | ||
303 | struct inet_frag_queue *q; | 329 | struct inet_frag_queue *q; |
304 | int depth = 0; | 330 | int depth = 0; |
305 | 331 | ||
306 | hlist_for_each_entry(q, &f->hash[hash], list) { | 332 | hb = &f->hash[hash]; |
333 | |||
334 | spin_lock(&hb->chain_lock); | ||
335 | hlist_for_each_entry(q, &hb->chain, list) { | ||
307 | if (q->net == nf && f->match(q, key)) { | 336 | if (q->net == nf && f->match(q, key)) { |
308 | atomic_inc(&q->refcnt); | 337 | atomic_inc(&q->refcnt); |
338 | spin_unlock(&hb->chain_lock); | ||
309 | read_unlock(&f->lock); | 339 | read_unlock(&f->lock); |
310 | return q; | 340 | return q; |
311 | } | 341 | } |
312 | depth++; | 342 | depth++; |
313 | } | 343 | } |
344 | spin_unlock(&hb->chain_lock); | ||
314 | read_unlock(&f->lock); | 345 | read_unlock(&f->lock); |
315 | 346 | ||
316 | if (depth <= INETFRAGS_MAXDEPTH) | 347 | if (depth <= INETFRAGS_MAXDEPTH) |