diff options
| author | Yasuyuki Kozakai <yasuyuki.kozakai@toshiba.co.jp> | 2005-11-14 18:28:18 -0500 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2005-11-14 18:28:18 -0500 |
| commit | 1ba430bc3e243d38c0bb2b185bea664b04fc59df (patch) | |
| tree | f675bb89f3ac344ddf25ecbdf3a89484a7f3ac4f | |
| parent | 7686a02c0ebc11e4f881fe14db3df18569b7dbc1 (diff) | |
[NETFILTER] nf_conntrack: fix possibility of infinite loop while evicting nf_ct_frag6_queue
This synchronizes nf_ct_reasm with ipv6 reassembly, and fixes a possibility
of an infinite loop if CPUs evict and create nf_ct_frag6_queue in parallel.
Signed-off-by: Yasuyuki Kozakai <yasuyuki.kozakai@toshiba.co.jp>
Signed-off-by: Harald Welte <laforge@netfilter.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | net/ipv6/netfilter/nf_conntrack_reasm.c | 42 |
1 files changed, 26 insertions, 16 deletions
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index ed7603fe5fe3..1b68d714c0a4 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
| @@ -190,8 +190,10 @@ static void nf_ct_frag6_secret_rebuild(unsigned long dummy) | |||
| 190 | atomic_t nf_ct_frag6_mem = ATOMIC_INIT(0); | 190 | atomic_t nf_ct_frag6_mem = ATOMIC_INIT(0); |
| 191 | 191 | ||
| 192 | /* Memory Tracking Functions. */ | 192 | /* Memory Tracking Functions. */ |
| 193 | static inline void frag_kfree_skb(struct sk_buff *skb) | 193 | static inline void frag_kfree_skb(struct sk_buff *skb, unsigned int *work) |
| 194 | { | 194 | { |
| 195 | if (work) | ||
| 196 | *work -= skb->truesize; | ||
| 195 | atomic_sub(skb->truesize, &nf_ct_frag6_mem); | 197 | atomic_sub(skb->truesize, &nf_ct_frag6_mem); |
| 196 | if (NFCT_FRAG6_CB(skb)->orig) | 198 | if (NFCT_FRAG6_CB(skb)->orig) |
| 197 | kfree_skb(NFCT_FRAG6_CB(skb)->orig); | 199 | kfree_skb(NFCT_FRAG6_CB(skb)->orig); |
| @@ -199,8 +201,11 @@ static inline void frag_kfree_skb(struct sk_buff *skb) | |||
| 199 | kfree_skb(skb); | 201 | kfree_skb(skb); |
| 200 | } | 202 | } |
| 201 | 203 | ||
| 202 | static inline void frag_free_queue(struct nf_ct_frag6_queue *fq) | 204 | static inline void frag_free_queue(struct nf_ct_frag6_queue *fq, |
| 205 | unsigned int *work) | ||
| 203 | { | 206 | { |
| 207 | if (work) | ||
| 208 | *work -= sizeof(struct nf_ct_frag6_queue); | ||
| 204 | atomic_sub(sizeof(struct nf_ct_frag6_queue), &nf_ct_frag6_mem); | 209 | atomic_sub(sizeof(struct nf_ct_frag6_queue), &nf_ct_frag6_mem); |
| 205 | kfree(fq); | 210 | kfree(fq); |
| 206 | } | 211 | } |
| @@ -218,7 +223,8 @@ static inline struct nf_ct_frag6_queue *frag_alloc_queue(void) | |||
| 218 | /* Destruction primitives. */ | 223 | /* Destruction primitives. */ |
| 219 | 224 | ||
| 220 | /* Complete destruction of fq. */ | 225 | /* Complete destruction of fq. */ |
| 221 | static void nf_ct_frag6_destroy(struct nf_ct_frag6_queue *fq) | 226 | static void nf_ct_frag6_destroy(struct nf_ct_frag6_queue *fq, |
| 227 | unsigned int *work) | ||
| 222 | { | 228 | { |
| 223 | struct sk_buff *fp; | 229 | struct sk_buff *fp; |
| 224 | 230 | ||
| @@ -230,17 +236,17 @@ static void nf_ct_frag6_destroy(struct nf_ct_frag6_queue *fq) | |||
| 230 | while (fp) { | 236 | while (fp) { |
| 231 | struct sk_buff *xp = fp->next; | 237 | struct sk_buff *xp = fp->next; |
| 232 | 238 | ||
| 233 | frag_kfree_skb(fp); | 239 | frag_kfree_skb(fp, work); |
| 234 | fp = xp; | 240 | fp = xp; |
| 235 | } | 241 | } |
| 236 | 242 | ||
| 237 | frag_free_queue(fq); | 243 | frag_free_queue(fq, work); |
| 238 | } | 244 | } |
| 239 | 245 | ||
| 240 | static __inline__ void fq_put(struct nf_ct_frag6_queue *fq) | 246 | static __inline__ void fq_put(struct nf_ct_frag6_queue *fq, unsigned int *work) |
| 241 | { | 247 | { |
| 242 | if (atomic_dec_and_test(&fq->refcnt)) | 248 | if (atomic_dec_and_test(&fq->refcnt)) |
| 243 | nf_ct_frag6_destroy(fq); | 249 | nf_ct_frag6_destroy(fq, work); |
| 244 | } | 250 | } |
| 245 | 251 | ||
| 246 | /* Kill fq entry. It is not destroyed immediately, | 252 | /* Kill fq entry. It is not destroyed immediately, |
| @@ -262,10 +268,14 @@ static void nf_ct_frag6_evictor(void) | |||
| 262 | { | 268 | { |
| 263 | struct nf_ct_frag6_queue *fq; | 269 | struct nf_ct_frag6_queue *fq; |
| 264 | struct list_head *tmp; | 270 | struct list_head *tmp; |
| 271 | unsigned int work; | ||
| 265 | 272 | ||
| 266 | for (;;) { | 273 | work = atomic_read(&nf_ct_frag6_mem); |
| 267 | if (atomic_read(&nf_ct_frag6_mem) <= nf_ct_frag6_low_thresh) | 274 | if (work <= nf_ct_frag6_low_thresh) |
| 268 | return; | 275 | return; |
| 276 | |||
| 277 | work -= nf_ct_frag6_low_thresh; | ||
| 278 | while (work > 0) { | ||
| 269 | read_lock(&nf_ct_frag6_lock); | 279 | read_lock(&nf_ct_frag6_lock); |
| 270 | if (list_empty(&nf_ct_frag6_lru_list)) { | 280 | if (list_empty(&nf_ct_frag6_lru_list)) { |
| 271 | read_unlock(&nf_ct_frag6_lock); | 281 | read_unlock(&nf_ct_frag6_lock); |
| @@ -281,7 +291,7 @@ static void nf_ct_frag6_evictor(void) | |||
| 281 | fq_kill(fq); | 291 | fq_kill(fq); |
| 282 | spin_unlock(&fq->lock); | 292 | spin_unlock(&fq->lock); |
| 283 | 293 | ||
| 284 | fq_put(fq); | 294 | fq_put(fq, &work); |
| 285 | } | 295 | } |
| 286 | } | 296 | } |
| 287 | 297 | ||
| @@ -298,7 +308,7 @@ static void nf_ct_frag6_expire(unsigned long data) | |||
| 298 | 308 | ||
| 299 | out: | 309 | out: |
| 300 | spin_unlock(&fq->lock); | 310 | spin_unlock(&fq->lock); |
| 301 | fq_put(fq); | 311 | fq_put(fq, NULL); |
| 302 | } | 312 | } |
| 303 | 313 | ||
| 304 | /* Creation primitives. */ | 314 | /* Creation primitives. */ |
| @@ -318,7 +328,7 @@ static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash, | |||
| 318 | atomic_inc(&fq->refcnt); | 328 | atomic_inc(&fq->refcnt); |
| 319 | write_unlock(&nf_ct_frag6_lock); | 329 | write_unlock(&nf_ct_frag6_lock); |
| 320 | fq_in->last_in |= COMPLETE; | 330 | fq_in->last_in |= COMPLETE; |
| 321 | fq_put(fq_in); | 331 | fq_put(fq_in, NULL); |
| 322 | return fq; | 332 | return fq; |
| 323 | } | 333 | } |
| 324 | } | 334 | } |
| @@ -535,7 +545,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
| 535 | fq->fragments = next; | 545 | fq->fragments = next; |
| 536 | 546 | ||
| 537 | fq->meat -= free_it->len; | 547 | fq->meat -= free_it->len; |
| 538 | frag_kfree_skb(free_it); | 548 | frag_kfree_skb(free_it, NULL); |
| 539 | } | 549 | } |
| 540 | } | 550 | } |
| 541 | 551 | ||
| @@ -811,7 +821,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) | |||
| 811 | if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { | 821 | if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { |
| 812 | spin_unlock(&fq->lock); | 822 | spin_unlock(&fq->lock); |
| 813 | DEBUGP("Can't insert skb to queue\n"); | 823 | DEBUGP("Can't insert skb to queue\n"); |
| 814 | fq_put(fq); | 824 | fq_put(fq, NULL); |
| 815 | goto ret_orig; | 825 | goto ret_orig; |
| 816 | } | 826 | } |
| 817 | 827 | ||
| @@ -822,7 +832,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) | |||
| 822 | } | 832 | } |
| 823 | spin_unlock(&fq->lock); | 833 | spin_unlock(&fq->lock); |
| 824 | 834 | ||
| 825 | fq_put(fq); | 835 | fq_put(fq, NULL); |
| 826 | return ret_skb; | 836 | return ret_skb; |
| 827 | 837 | ||
| 828 | ret_orig: | 838 | ret_orig: |
