diff options
| author | Pavel Emelyanov <xemul@openvz.org> | 2007-10-15 05:24:19 -0400 |
|---|---|---|
| committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-15 15:26:38 -0400 |
| commit | 5ab11c98d3a950faf6922b6166e5f8fc874590e7 (patch) | |
| tree | ef9ab897361f106309df37b6d4f2e95fdecdb240 | |
| parent | 114342f2d38439cb1a54f1f724fa38729b093c48 (diff) | |
[INET]: Move common fields from frag_queues in one place.
Introduce the struct inet_frag_queue in include/net/inet_frag.h
file and place there all the common fields from three structs:
* struct ipq in ipv4/ip_fragment.c
* struct nf_ct_frag6_queue in nf_conntrack_reasm.c
* struct frag_queue in ipv6/reassembly.c
After this, replace these fields on appropriate structures with
this structure instance and fix the users to use correct names
i.e. hunks like
- atomic_dec(&fq->refcnt);
+ atomic_dec(&fq->q.refcnt);
(these occupy most of the patch)
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | include/net/inet_frag.h | 21 | ||||
| -rw-r--r-- | net/ipv4/ip_fragment.c | 177 | ||||
| -rw-r--r-- | net/ipv6/netfilter/nf_conntrack_reasm.c | 137 | ||||
| -rw-r--r-- | net/ipv6/reassembly.c | 153 |
4 files changed, 238 insertions, 250 deletions
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h new file mode 100644 index 000000000000..74e9cb9b6943 --- /dev/null +++ b/include/net/inet_frag.h | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | #ifndef __NET_FRAG_H__ | ||
| 2 | #define __NET_FRAG_H__ | ||
| 3 | |||
| 4 | struct inet_frag_queue { | ||
| 5 | struct hlist_node list; | ||
| 6 | struct list_head lru_list; /* lru list member */ | ||
| 7 | spinlock_t lock; | ||
| 8 | atomic_t refcnt; | ||
| 9 | struct timer_list timer; /* when will this queue expire? */ | ||
| 10 | struct sk_buff *fragments; /* list of received fragments */ | ||
| 11 | ktime_t stamp; | ||
| 12 | int len; /* total length of orig datagram */ | ||
| 13 | int meat; | ||
| 14 | __u8 last_in; /* first/last segment arrived? */ | ||
| 15 | |||
| 16 | #define COMPLETE 4 | ||
| 17 | #define FIRST_IN 2 | ||
| 18 | #define LAST_IN 1 | ||
| 19 | }; | ||
| 20 | |||
| 21 | #endif | ||
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 32108cf2a784..428eaa502ec2 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include <net/icmp.h> | 39 | #include <net/icmp.h> |
| 40 | #include <net/checksum.h> | 40 | #include <net/checksum.h> |
| 41 | #include <net/inetpeer.h> | 41 | #include <net/inetpeer.h> |
| 42 | #include <net/inet_frag.h> | ||
| 42 | #include <linux/tcp.h> | 43 | #include <linux/tcp.h> |
| 43 | #include <linux/udp.h> | 44 | #include <linux/udp.h> |
| 44 | #include <linux/inet.h> | 45 | #include <linux/inet.h> |
| @@ -74,25 +75,13 @@ struct ipfrag_skb_cb | |||
| 74 | 75 | ||
| 75 | /* Describe an entry in the "incomplete datagrams" queue. */ | 76 | /* Describe an entry in the "incomplete datagrams" queue. */ |
| 76 | struct ipq { | 77 | struct ipq { |
| 77 | struct hlist_node list; | 78 | struct inet_frag_queue q; |
| 78 | struct list_head lru_list; /* lru list member */ | 79 | |
| 79 | u32 user; | 80 | u32 user; |
| 80 | __be32 saddr; | 81 | __be32 saddr; |
| 81 | __be32 daddr; | 82 | __be32 daddr; |
| 82 | __be16 id; | 83 | __be16 id; |
| 83 | u8 protocol; | 84 | u8 protocol; |
| 84 | u8 last_in; | ||
| 85 | #define COMPLETE 4 | ||
| 86 | #define FIRST_IN 2 | ||
| 87 | #define LAST_IN 1 | ||
| 88 | |||
| 89 | struct sk_buff *fragments; /* linked list of received fragments */ | ||
| 90 | int len; /* total length of original datagram */ | ||
| 91 | int meat; | ||
| 92 | spinlock_t lock; | ||
| 93 | atomic_t refcnt; | ||
| 94 | struct timer_list timer; /* when will this queue expire? */ | ||
| 95 | ktime_t stamp; | ||
| 96 | int iif; | 85 | int iif; |
| 97 | unsigned int rid; | 86 | unsigned int rid; |
| 98 | struct inet_peer *peer; | 87 | struct inet_peer *peer; |
| @@ -114,8 +103,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
| 114 | 103 | ||
| 115 | static __inline__ void __ipq_unlink(struct ipq *qp) | 104 | static __inline__ void __ipq_unlink(struct ipq *qp) |
| 116 | { | 105 | { |
| 117 | hlist_del(&qp->list); | 106 | hlist_del(&qp->q.list); |
| 118 | list_del(&qp->lru_list); | 107 | list_del(&qp->q.lru_list); |
| 119 | ip_frag_nqueues--; | 108 | ip_frag_nqueues--; |
| 120 | } | 109 | } |
| 121 | 110 | ||
| @@ -147,15 +136,15 @@ static void ipfrag_secret_rebuild(unsigned long dummy) | |||
| 147 | struct ipq *q; | 136 | struct ipq *q; |
| 148 | struct hlist_node *p, *n; | 137 | struct hlist_node *p, *n; |
| 149 | 138 | ||
| 150 | hlist_for_each_entry_safe(q, p, n, &ipq_hash[i], list) { | 139 | hlist_for_each_entry_safe(q, p, n, &ipq_hash[i], q.list) { |
| 151 | unsigned int hval = ipqhashfn(q->id, q->saddr, | 140 | unsigned int hval = ipqhashfn(q->id, q->saddr, |
| 152 | q->daddr, q->protocol); | 141 | q->daddr, q->protocol); |
| 153 | 142 | ||
| 154 | if (hval != i) { | 143 | if (hval != i) { |
| 155 | hlist_del(&q->list); | 144 | hlist_del(&q->q.list); |
| 156 | 145 | ||
| 157 | /* Relink to new hash chain. */ | 146 | /* Relink to new hash chain. */ |
| 158 | hlist_add_head(&q->list, &ipq_hash[hval]); | 147 | hlist_add_head(&q->q.list, &ipq_hash[hval]); |
| 159 | } | 148 | } |
| 160 | } | 149 | } |
| 161 | } | 150 | } |
| @@ -201,14 +190,14 @@ static void ip_frag_destroy(struct ipq *qp, int *work) | |||
| 201 | { | 190 | { |
| 202 | struct sk_buff *fp; | 191 | struct sk_buff *fp; |
| 203 | 192 | ||
| 204 | BUG_TRAP(qp->last_in&COMPLETE); | 193 | BUG_TRAP(qp->q.last_in&COMPLETE); |
| 205 | BUG_TRAP(del_timer(&qp->timer) == 0); | 194 | BUG_TRAP(del_timer(&qp->q.timer) == 0); |
| 206 | 195 | ||
| 207 | if (qp->peer) | 196 | if (qp->peer) |
| 208 | inet_putpeer(qp->peer); | 197 | inet_putpeer(qp->peer); |
| 209 | 198 | ||
| 210 | /* Release all fragment data. */ | 199 | /* Release all fragment data. */ |
| 211 | fp = qp->fragments; | 200 | fp = qp->q.fragments; |
| 212 | while (fp) { | 201 | while (fp) { |
| 213 | struct sk_buff *xp = fp->next; | 202 | struct sk_buff *xp = fp->next; |
| 214 | 203 | ||
| @@ -222,7 +211,7 @@ static void ip_frag_destroy(struct ipq *qp, int *work) | |||
| 222 | 211 | ||
| 223 | static __inline__ void ipq_put(struct ipq *ipq, int *work) | 212 | static __inline__ void ipq_put(struct ipq *ipq, int *work) |
| 224 | { | 213 | { |
| 225 | if (atomic_dec_and_test(&ipq->refcnt)) | 214 | if (atomic_dec_and_test(&ipq->q.refcnt)) |
| 226 | ip_frag_destroy(ipq, work); | 215 | ip_frag_destroy(ipq, work); |
| 227 | } | 216 | } |
| 228 | 217 | ||
| @@ -231,13 +220,13 @@ static __inline__ void ipq_put(struct ipq *ipq, int *work) | |||
| 231 | */ | 220 | */ |
| 232 | static void ipq_kill(struct ipq *ipq) | 221 | static void ipq_kill(struct ipq *ipq) |
| 233 | { | 222 | { |
| 234 | if (del_timer(&ipq->timer)) | 223 | if (del_timer(&ipq->q.timer)) |
| 235 | atomic_dec(&ipq->refcnt); | 224 | atomic_dec(&ipq->q.refcnt); |
| 236 | 225 | ||
| 237 | if (!(ipq->last_in & COMPLETE)) { | 226 | if (!(ipq->q.last_in & COMPLETE)) { |
| 238 | ipq_unlink(ipq); | 227 | ipq_unlink(ipq); |
| 239 | atomic_dec(&ipq->refcnt); | 228 | atomic_dec(&ipq->q.refcnt); |
| 240 | ipq->last_in |= COMPLETE; | 229 | ipq->q.last_in |= COMPLETE; |
| 241 | } | 230 | } |
| 242 | } | 231 | } |
| 243 | 232 | ||
| @@ -261,14 +250,14 @@ static void ip_evictor(void) | |||
| 261 | return; | 250 | return; |
| 262 | } | 251 | } |
| 263 | tmp = ipq_lru_list.next; | 252 | tmp = ipq_lru_list.next; |
| 264 | qp = list_entry(tmp, struct ipq, lru_list); | 253 | qp = list_entry(tmp, struct ipq, q.lru_list); |
| 265 | atomic_inc(&qp->refcnt); | 254 | atomic_inc(&qp->q.refcnt); |
| 266 | read_unlock(&ipfrag_lock); | 255 | read_unlock(&ipfrag_lock); |
| 267 | 256 | ||
| 268 | spin_lock(&qp->lock); | 257 | spin_lock(&qp->q.lock); |
| 269 | if (!(qp->last_in&COMPLETE)) | 258 | if (!(qp->q.last_in&COMPLETE)) |
| 270 | ipq_kill(qp); | 259 | ipq_kill(qp); |
| 271 | spin_unlock(&qp->lock); | 260 | spin_unlock(&qp->q.lock); |
| 272 | 261 | ||
| 273 | ipq_put(qp, &work); | 262 | ipq_put(qp, &work); |
| 274 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | 263 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); |
| @@ -282,9 +271,9 @@ static void ip_expire(unsigned long arg) | |||
| 282 | { | 271 | { |
| 283 | struct ipq *qp = (struct ipq *) arg; | 272 | struct ipq *qp = (struct ipq *) arg; |
| 284 | 273 | ||
| 285 | spin_lock(&qp->lock); | 274 | spin_lock(&qp->q.lock); |
| 286 | 275 | ||
| 287 | if (qp->last_in & COMPLETE) | 276 | if (qp->q.last_in & COMPLETE) |
| 288 | goto out; | 277 | goto out; |
| 289 | 278 | ||
| 290 | ipq_kill(qp); | 279 | ipq_kill(qp); |
| @@ -292,8 +281,8 @@ static void ip_expire(unsigned long arg) | |||
| 292 | IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT); | 281 | IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT); |
| 293 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | 282 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); |
| 294 | 283 | ||
| 295 | if ((qp->last_in&FIRST_IN) && qp->fragments != NULL) { | 284 | if ((qp->q.last_in&FIRST_IN) && qp->q.fragments != NULL) { |
| 296 | struct sk_buff *head = qp->fragments; | 285 | struct sk_buff *head = qp->q.fragments; |
| 297 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ | 286 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ |
| 298 | if ((head->dev = dev_get_by_index(&init_net, qp->iif)) != NULL) { | 287 | if ((head->dev = dev_get_by_index(&init_net, qp->iif)) != NULL) { |
| 299 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); | 288 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); |
| @@ -301,7 +290,7 @@ static void ip_expire(unsigned long arg) | |||
| 301 | } | 290 | } |
| 302 | } | 291 | } |
| 303 | out: | 292 | out: |
| 304 | spin_unlock(&qp->lock); | 293 | spin_unlock(&qp->q.lock); |
| 305 | ipq_put(qp, NULL); | 294 | ipq_put(qp, NULL); |
| 306 | } | 295 | } |
| 307 | 296 | ||
| @@ -323,15 +312,15 @@ static struct ipq *ip_frag_intern(struct ipq *qp_in) | |||
| 323 | * such entry could be created on other cpu, while we | 312 | * such entry could be created on other cpu, while we |
| 324 | * promoted read lock to write lock. | 313 | * promoted read lock to write lock. |
| 325 | */ | 314 | */ |
| 326 | hlist_for_each_entry(qp, n, &ipq_hash[hash], list) { | 315 | hlist_for_each_entry(qp, n, &ipq_hash[hash], q.list) { |
| 327 | if (qp->id == qp_in->id && | 316 | if (qp->id == qp_in->id && |
| 328 | qp->saddr == qp_in->saddr && | 317 | qp->saddr == qp_in->saddr && |
| 329 | qp->daddr == qp_in->daddr && | 318 | qp->daddr == qp_in->daddr && |
| 330 | qp->protocol == qp_in->protocol && | 319 | qp->protocol == qp_in->protocol && |
| 331 | qp->user == qp_in->user) { | 320 | qp->user == qp_in->user) { |
| 332 | atomic_inc(&qp->refcnt); | 321 | atomic_inc(&qp->q.refcnt); |
| 333 | write_unlock(&ipfrag_lock); | 322 | write_unlock(&ipfrag_lock); |
| 334 | qp_in->last_in |= COMPLETE; | 323 | qp_in->q.last_in |= COMPLETE; |
| 335 | ipq_put(qp_in, NULL); | 324 | ipq_put(qp_in, NULL); |
| 336 | return qp; | 325 | return qp; |
| 337 | } | 326 | } |
| @@ -339,13 +328,13 @@ static struct ipq *ip_frag_intern(struct ipq *qp_in) | |||
| 339 | #endif | 328 | #endif |
| 340 | qp = qp_in; | 329 | qp = qp_in; |
| 341 | 330 | ||
| 342 | if (!mod_timer(&qp->timer, jiffies + sysctl_ipfrag_time)) | 331 | if (!mod_timer(&qp->q.timer, jiffies + sysctl_ipfrag_time)) |
| 343 | atomic_inc(&qp->refcnt); | 332 | atomic_inc(&qp->q.refcnt); |
| 344 | 333 | ||
| 345 | atomic_inc(&qp->refcnt); | 334 | atomic_inc(&qp->q.refcnt); |
| 346 | hlist_add_head(&qp->list, &ipq_hash[hash]); | 335 | hlist_add_head(&qp->q.list, &ipq_hash[hash]); |
| 347 | INIT_LIST_HEAD(&qp->lru_list); | 336 | INIT_LIST_HEAD(&qp->q.lru_list); |
| 348 | list_add_tail(&qp->lru_list, &ipq_lru_list); | 337 | list_add_tail(&qp->q.lru_list, &ipq_lru_list); |
| 349 | ip_frag_nqueues++; | 338 | ip_frag_nqueues++; |
| 350 | write_unlock(&ipfrag_lock); | 339 | write_unlock(&ipfrag_lock); |
| 351 | return qp; | 340 | return qp; |
| @@ -360,23 +349,23 @@ static struct ipq *ip_frag_create(struct iphdr *iph, u32 user) | |||
| 360 | goto out_nomem; | 349 | goto out_nomem; |
| 361 | 350 | ||
| 362 | qp->protocol = iph->protocol; | 351 | qp->protocol = iph->protocol; |
| 363 | qp->last_in = 0; | 352 | qp->q.last_in = 0; |
| 364 | qp->id = iph->id; | 353 | qp->id = iph->id; |
| 365 | qp->saddr = iph->saddr; | 354 | qp->saddr = iph->saddr; |
| 366 | qp->daddr = iph->daddr; | 355 | qp->daddr = iph->daddr; |
| 367 | qp->user = user; | 356 | qp->user = user; |
| 368 | qp->len = 0; | 357 | qp->q.len = 0; |
| 369 | qp->meat = 0; | 358 | qp->q.meat = 0; |
| 370 | qp->fragments = NULL; | 359 | qp->q.fragments = NULL; |
| 371 | qp->iif = 0; | 360 | qp->iif = 0; |
| 372 | qp->peer = sysctl_ipfrag_max_dist ? inet_getpeer(iph->saddr, 1) : NULL; | 361 | qp->peer = sysctl_ipfrag_max_dist ? inet_getpeer(iph->saddr, 1) : NULL; |
| 373 | 362 | ||
| 374 | /* Initialize a timer for this entry. */ | 363 | /* Initialize a timer for this entry. */ |
| 375 | init_timer(&qp->timer); | 364 | init_timer(&qp->q.timer); |
| 376 | qp->timer.data = (unsigned long) qp; /* pointer to queue */ | 365 | qp->q.timer.data = (unsigned long) qp; /* pointer to queue */ |
| 377 | qp->timer.function = ip_expire; /* expire function */ | 366 | qp->q.timer.function = ip_expire; /* expire function */ |
| 378 | spin_lock_init(&qp->lock); | 367 | spin_lock_init(&qp->q.lock); |
| 379 | atomic_set(&qp->refcnt, 1); | 368 | atomic_set(&qp->q.refcnt, 1); |
| 380 | 369 | ||
| 381 | return ip_frag_intern(qp); | 370 | return ip_frag_intern(qp); |
| 382 | 371 | ||
| @@ -400,13 +389,13 @@ static inline struct ipq *ip_find(struct iphdr *iph, u32 user) | |||
| 400 | 389 | ||
| 401 | read_lock(&ipfrag_lock); | 390 | read_lock(&ipfrag_lock); |
| 402 | hash = ipqhashfn(id, saddr, daddr, protocol); | 391 | hash = ipqhashfn(id, saddr, daddr, protocol); |
| 403 | hlist_for_each_entry(qp, n, &ipq_hash[hash], list) { | 392 | hlist_for_each_entry(qp, n, &ipq_hash[hash], q.list) { |
| 404 | if (qp->id == id && | 393 | if (qp->id == id && |
| 405 | qp->saddr == saddr && | 394 | qp->saddr == saddr && |
| 406 | qp->daddr == daddr && | 395 | qp->daddr == daddr && |
| 407 | qp->protocol == protocol && | 396 | qp->protocol == protocol && |
| 408 | qp->user == user) { | 397 | qp->user == user) { |
| 409 | atomic_inc(&qp->refcnt); | 398 | atomic_inc(&qp->q.refcnt); |
| 410 | read_unlock(&ipfrag_lock); | 399 | read_unlock(&ipfrag_lock); |
| 411 | return qp; | 400 | return qp; |
| 412 | } | 401 | } |
| @@ -432,7 +421,7 @@ static inline int ip_frag_too_far(struct ipq *qp) | |||
| 432 | end = atomic_inc_return(&peer->rid); | 421 | end = atomic_inc_return(&peer->rid); |
| 433 | qp->rid = end; | 422 | qp->rid = end; |
| 434 | 423 | ||
| 435 | rc = qp->fragments && (end - start) > max; | 424 | rc = qp->q.fragments && (end - start) > max; |
| 436 | 425 | ||
| 437 | if (rc) { | 426 | if (rc) { |
| 438 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | 427 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); |
| @@ -445,22 +434,22 @@ static int ip_frag_reinit(struct ipq *qp) | |||
| 445 | { | 434 | { |
| 446 | struct sk_buff *fp; | 435 | struct sk_buff *fp; |
| 447 | 436 | ||
| 448 | if (!mod_timer(&qp->timer, jiffies + sysctl_ipfrag_time)) { | 437 | if (!mod_timer(&qp->q.timer, jiffies + sysctl_ipfrag_time)) { |
| 449 | atomic_inc(&qp->refcnt); | 438 | atomic_inc(&qp->q.refcnt); |
| 450 | return -ETIMEDOUT; | 439 | return -ETIMEDOUT; |
| 451 | } | 440 | } |
| 452 | 441 | ||
| 453 | fp = qp->fragments; | 442 | fp = qp->q.fragments; |
| 454 | do { | 443 | do { |
| 455 | struct sk_buff *xp = fp->next; | 444 | struct sk_buff *xp = fp->next; |
| 456 | frag_kfree_skb(fp, NULL); | 445 | frag_kfree_skb(fp, NULL); |
| 457 | fp = xp; | 446 | fp = xp; |
| 458 | } while (fp); | 447 | } while (fp); |
| 459 | 448 | ||
| 460 | qp->last_in = 0; | 449 | qp->q.last_in = 0; |
| 461 | qp->len = 0; | 450 | qp->q.len = 0; |
| 462 | qp->meat = 0; | 451 | qp->q.meat = 0; |
| 463 | qp->fragments = NULL; | 452 | qp->q.fragments = NULL; |
| 464 | qp->iif = 0; | 453 | qp->iif = 0; |
| 465 | 454 | ||
| 466 | return 0; | 455 | return 0; |
| @@ -475,7 +464,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
| 475 | int ihl, end; | 464 | int ihl, end; |
| 476 | int err = -ENOENT; | 465 | int err = -ENOENT; |
| 477 | 466 | ||
| 478 | if (qp->last_in & COMPLETE) | 467 | if (qp->q.last_in & COMPLETE) |
| 479 | goto err; | 468 | goto err; |
| 480 | 469 | ||
| 481 | if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && | 470 | if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && |
| @@ -500,22 +489,22 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
| 500 | /* If we already have some bits beyond end | 489 | /* If we already have some bits beyond end |
| 501 | * or have different end, the segment is corrrupted. | 490 | * or have different end, the segment is corrrupted. |
| 502 | */ | 491 | */ |
| 503 | if (end < qp->len || | 492 | if (end < qp->q.len || |
| 504 | ((qp->last_in & LAST_IN) && end != qp->len)) | 493 | ((qp->q.last_in & LAST_IN) && end != qp->q.len)) |
| 505 | goto err; | 494 | goto err; |
| 506 | qp->last_in |= LAST_IN; | 495 | qp->q.last_in |= LAST_IN; |
| 507 | qp->len = end; | 496 | qp->q.len = end; |
| 508 | } else { | 497 | } else { |
| 509 | if (end&7) { | 498 | if (end&7) { |
| 510 | end &= ~7; | 499 | end &= ~7; |
| 511 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | 500 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) |
| 512 | skb->ip_summed = CHECKSUM_NONE; | 501 | skb->ip_summed = CHECKSUM_NONE; |
| 513 | } | 502 | } |
| 514 | if (end > qp->len) { | 503 | if (end > qp->q.len) { |
| 515 | /* Some bits beyond end -> corruption. */ | 504 | /* Some bits beyond end -> corruption. */ |
| 516 | if (qp->last_in & LAST_IN) | 505 | if (qp->q.last_in & LAST_IN) |
| 517 | goto err; | 506 | goto err; |
| 518 | qp->len = end; | 507 | qp->q.len = end; |
| 519 | } | 508 | } |
| 520 | } | 509 | } |
| 521 | if (end == offset) | 510 | if (end == offset) |
| @@ -534,7 +523,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
| 534 | * this fragment, right? | 523 | * this fragment, right? |
| 535 | */ | 524 | */ |
| 536 | prev = NULL; | 525 | prev = NULL; |
| 537 | for (next = qp->fragments; next != NULL; next = next->next) { | 526 | for (next = qp->q.fragments; next != NULL; next = next->next) { |
| 538 | if (FRAG_CB(next)->offset >= offset) | 527 | if (FRAG_CB(next)->offset >= offset) |
| 539 | break; /* bingo! */ | 528 | break; /* bingo! */ |
| 540 | prev = next; | 529 | prev = next; |
| @@ -572,7 +561,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
| 572 | if (!pskb_pull(next, i)) | 561 | if (!pskb_pull(next, i)) |
| 573 | goto err; | 562 | goto err; |
| 574 | FRAG_CB(next)->offset += i; | 563 | FRAG_CB(next)->offset += i; |
| 575 | qp->meat -= i; | 564 | qp->q.meat -= i; |
| 576 | if (next->ip_summed != CHECKSUM_UNNECESSARY) | 565 | if (next->ip_summed != CHECKSUM_UNNECESSARY) |
| 577 | next->ip_summed = CHECKSUM_NONE; | 566 | next->ip_summed = CHECKSUM_NONE; |
| 578 | break; | 567 | break; |
| @@ -587,9 +576,9 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
| 587 | if (prev) | 576 | if (prev) |
| 588 | prev->next = next; | 577 | prev->next = next; |
| 589 | else | 578 | else |
| 590 | qp->fragments = next; | 579 | qp->q.fragments = next; |
| 591 | 580 | ||
| 592 | qp->meat -= free_it->len; | 581 | qp->q.meat -= free_it->len; |
| 593 | frag_kfree_skb(free_it, NULL); | 582 | frag_kfree_skb(free_it, NULL); |
| 594 | } | 583 | } |
| 595 | } | 584 | } |
| @@ -601,24 +590,24 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
| 601 | if (prev) | 590 | if (prev) |
| 602 | prev->next = skb; | 591 | prev->next = skb; |
| 603 | else | 592 | else |
| 604 | qp->fragments = skb; | 593 | qp->q.fragments = skb; |
| 605 | 594 | ||
| 606 | dev = skb->dev; | 595 | dev = skb->dev; |
| 607 | if (dev) { | 596 | if (dev) { |
| 608 | qp->iif = dev->ifindex; | 597 | qp->iif = dev->ifindex; |
| 609 | skb->dev = NULL; | 598 | skb->dev = NULL; |
| 610 | } | 599 | } |
| 611 | qp->stamp = skb->tstamp; | 600 | qp->q.stamp = skb->tstamp; |
| 612 | qp->meat += skb->len; | 601 | qp->q.meat += skb->len; |
| 613 | atomic_add(skb->truesize, &ip_frag_mem); | 602 | atomic_add(skb->truesize, &ip_frag_mem); |
| 614 | if (offset == 0) | 603 | if (offset == 0) |
| 615 | qp->last_in |= FIRST_IN; | 604 | qp->q.last_in |= FIRST_IN; |
| 616 | 605 | ||
| 617 | if (qp->last_in == (FIRST_IN | LAST_IN) && qp->meat == qp->len) | 606 | if (qp->q.last_in == (FIRST_IN | LAST_IN) && qp->q.meat == qp->q.len) |
| 618 | return ip_frag_reasm(qp, prev, dev); | 607 | return ip_frag_reasm(qp, prev, dev); |
| 619 | 608 | ||
| 620 | write_lock(&ipfrag_lock); | 609 | write_lock(&ipfrag_lock); |
| 621 | list_move_tail(&qp->lru_list, &ipq_lru_list); | 610 | list_move_tail(&qp->q.lru_list, &ipq_lru_list); |
| 622 | write_unlock(&ipfrag_lock); | 611 | write_unlock(&ipfrag_lock); |
| 623 | return -EINPROGRESS; | 612 | return -EINPROGRESS; |
| 624 | 613 | ||
| @@ -634,7 +623,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
| 634 | struct net_device *dev) | 623 | struct net_device *dev) |
| 635 | { | 624 | { |
| 636 | struct iphdr *iph; | 625 | struct iphdr *iph; |
| 637 | struct sk_buff *fp, *head = qp->fragments; | 626 | struct sk_buff *fp, *head = qp->q.fragments; |
| 638 | int len; | 627 | int len; |
| 639 | int ihlen; | 628 | int ihlen; |
| 640 | int err; | 629 | int err; |
| @@ -652,11 +641,11 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
| 652 | fp->next = head->next; | 641 | fp->next = head->next; |
| 653 | prev->next = fp; | 642 | prev->next = fp; |
| 654 | 643 | ||
| 655 | skb_morph(head, qp->fragments); | 644 | skb_morph(head, qp->q.fragments); |
| 656 | head->next = qp->fragments->next; | 645 | head->next = qp->q.fragments->next; |
| 657 | 646 | ||
| 658 | kfree_skb(qp->fragments); | 647 | kfree_skb(qp->q.fragments); |
| 659 | qp->fragments = head; | 648 | qp->q.fragments = head; |
| 660 | } | 649 | } |
| 661 | 650 | ||
| 662 | BUG_TRAP(head != NULL); | 651 | BUG_TRAP(head != NULL); |
| @@ -664,7 +653,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
| 664 | 653 | ||
| 665 | /* Allocate a new buffer for the datagram. */ | 654 | /* Allocate a new buffer for the datagram. */ |
| 666 | ihlen = ip_hdrlen(head); | 655 | ihlen = ip_hdrlen(head); |
| 667 | len = ihlen + qp->len; | 656 | len = ihlen + qp->q.len; |
| 668 | 657 | ||
| 669 | err = -E2BIG; | 658 | err = -E2BIG; |
| 670 | if (len > 65535) | 659 | if (len > 65535) |
| @@ -715,13 +704,13 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
| 715 | 704 | ||
| 716 | head->next = NULL; | 705 | head->next = NULL; |
| 717 | head->dev = dev; | 706 | head->dev = dev; |
| 718 | head->tstamp = qp->stamp; | 707 | head->tstamp = qp->q.stamp; |
| 719 | 708 | ||
| 720 | iph = ip_hdr(head); | 709 | iph = ip_hdr(head); |
| 721 | iph->frag_off = 0; | 710 | iph->frag_off = 0; |
| 722 | iph->tot_len = htons(len); | 711 | iph->tot_len = htons(len); |
| 723 | IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS); | 712 | IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS); |
| 724 | qp->fragments = NULL; | 713 | qp->q.fragments = NULL; |
| 725 | return 0; | 714 | return 0; |
| 726 | 715 | ||
| 727 | out_nomem: | 716 | out_nomem: |
| @@ -753,11 +742,11 @@ int ip_defrag(struct sk_buff *skb, u32 user) | |||
| 753 | if ((qp = ip_find(ip_hdr(skb), user)) != NULL) { | 742 | if ((qp = ip_find(ip_hdr(skb), user)) != NULL) { |
| 754 | int ret; | 743 | int ret; |
| 755 | 744 | ||
| 756 | spin_lock(&qp->lock); | 745 | spin_lock(&qp->q.lock); |
| 757 | 746 | ||
| 758 | ret = ip_frag_queue(qp, skb); | 747 | ret = ip_frag_queue(qp, skb); |
| 759 | 748 | ||
| 760 | spin_unlock(&qp->lock); | 749 | spin_unlock(&qp->q.lock); |
| 761 | ipq_put(qp, NULL); | 750 | ipq_put(qp, NULL); |
| 762 | return ret; | 751 | return ret; |
| 763 | } | 752 | } |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 25442a8c1ba8..52e9f6a3995d 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | 31 | ||
| 32 | #include <net/sock.h> | 32 | #include <net/sock.h> |
| 33 | #include <net/snmp.h> | 33 | #include <net/snmp.h> |
| 34 | #include <net/inet_frag.h> | ||
| 34 | 35 | ||
| 35 | #include <net/ipv6.h> | 36 | #include <net/ipv6.h> |
| 36 | #include <net/protocol.h> | 37 | #include <net/protocol.h> |
| @@ -63,25 +64,13 @@ struct nf_ct_frag6_skb_cb | |||
| 63 | 64 | ||
| 64 | struct nf_ct_frag6_queue | 65 | struct nf_ct_frag6_queue |
| 65 | { | 66 | { |
| 66 | struct hlist_node list; | 67 | struct inet_frag_queue q; |
| 67 | struct list_head lru_list; /* lru list member */ | ||
| 68 | 68 | ||
| 69 | __be32 id; /* fragment id */ | 69 | __be32 id; /* fragment id */ |
| 70 | struct in6_addr saddr; | 70 | struct in6_addr saddr; |
| 71 | struct in6_addr daddr; | 71 | struct in6_addr daddr; |
| 72 | 72 | ||
| 73 | spinlock_t lock; | ||
| 74 | atomic_t refcnt; | ||
| 75 | struct timer_list timer; /* expire timer */ | ||
| 76 | struct sk_buff *fragments; | ||
| 77 | int len; | ||
| 78 | int meat; | ||
| 79 | ktime_t stamp; | ||
| 80 | unsigned int csum; | 73 | unsigned int csum; |
| 81 | __u8 last_in; /* has first/last segment arrived? */ | ||
| 82 | #define COMPLETE 4 | ||
| 83 | #define FIRST_IN 2 | ||
| 84 | #define LAST_IN 1 | ||
| 85 | __u16 nhoffset; | 74 | __u16 nhoffset; |
| 86 | }; | 75 | }; |
| 87 | 76 | ||
| @@ -97,8 +86,8 @@ int nf_ct_frag6_nqueues = 0; | |||
| 97 | 86 | ||
| 98 | static __inline__ void __fq_unlink(struct nf_ct_frag6_queue *fq) | 87 | static __inline__ void __fq_unlink(struct nf_ct_frag6_queue *fq) |
| 99 | { | 88 | { |
| 100 | hlist_del(&fq->list); | 89 | hlist_del(&fq->q.list); |
| 101 | list_del(&fq->lru_list); | 90 | list_del(&fq->q.lru_list); |
| 102 | nf_ct_frag6_nqueues--; | 91 | nf_ct_frag6_nqueues--; |
| 103 | } | 92 | } |
| 104 | 93 | ||
| @@ -150,14 +139,14 @@ static void nf_ct_frag6_secret_rebuild(unsigned long dummy) | |||
| 150 | struct nf_ct_frag6_queue *q; | 139 | struct nf_ct_frag6_queue *q; |
| 151 | struct hlist_node *p, *n; | 140 | struct hlist_node *p, *n; |
| 152 | 141 | ||
| 153 | hlist_for_each_entry_safe(q, p, n, &nf_ct_frag6_hash[i], list) { | 142 | hlist_for_each_entry_safe(q, p, n, &nf_ct_frag6_hash[i], q.list) { |
| 154 | unsigned int hval = ip6qhashfn(q->id, | 143 | unsigned int hval = ip6qhashfn(q->id, |
| 155 | &q->saddr, | 144 | &q->saddr, |
| 156 | &q->daddr); | 145 | &q->daddr); |
| 157 | if (hval != i) { | 146 | if (hval != i) { |
| 158 | hlist_del(&q->list); | 147 | hlist_del(&q->q.list); |
| 159 | /* Relink to new hash chain. */ | 148 | /* Relink to new hash chain. */ |
| 160 | hlist_add_head(&q->list, | 149 | hlist_add_head(&q->q.list, |
| 161 | &nf_ct_frag6_hash[hval]); | 150 | &nf_ct_frag6_hash[hval]); |
| 162 | } | 151 | } |
| 163 | } | 152 | } |
| @@ -208,11 +197,11 @@ static void nf_ct_frag6_destroy(struct nf_ct_frag6_queue *fq, | |||
| 208 | { | 197 | { |
| 209 | struct sk_buff *fp; | 198 | struct sk_buff *fp; |
| 210 | 199 | ||
| 211 | BUG_TRAP(fq->last_in&COMPLETE); | 200 | BUG_TRAP(fq->q.last_in&COMPLETE); |
| 212 | BUG_TRAP(del_timer(&fq->timer) == 0); | 201 | BUG_TRAP(del_timer(&fq->q.timer) == 0); |
| 213 | 202 | ||
| 214 | /* Release all fragment data. */ | 203 | /* Release all fragment data. */ |
| 215 | fp = fq->fragments; | 204 | fp = fq->q.fragments; |
| 216 | while (fp) { | 205 | while (fp) { |
| 217 | struct sk_buff *xp = fp->next; | 206 | struct sk_buff *xp = fp->next; |
| 218 | 207 | ||
| @@ -225,7 +214,7 @@ static void nf_ct_frag6_destroy(struct nf_ct_frag6_queue *fq, | |||
| 225 | 214 | ||
| 226 | static __inline__ void fq_put(struct nf_ct_frag6_queue *fq, unsigned int *work) | 215 | static __inline__ void fq_put(struct nf_ct_frag6_queue *fq, unsigned int *work) |
| 227 | { | 216 | { |
| 228 | if (atomic_dec_and_test(&fq->refcnt)) | 217 | if (atomic_dec_and_test(&fq->q.refcnt)) |
| 229 | nf_ct_frag6_destroy(fq, work); | 218 | nf_ct_frag6_destroy(fq, work); |
| 230 | } | 219 | } |
| 231 | 220 | ||
| @@ -234,13 +223,13 @@ static __inline__ void fq_put(struct nf_ct_frag6_queue *fq, unsigned int *work) | |||
| 234 | */ | 223 | */ |
| 235 | static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq) | 224 | static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq) |
| 236 | { | 225 | { |
| 237 | if (del_timer(&fq->timer)) | 226 | if (del_timer(&fq->q.timer)) |
| 238 | atomic_dec(&fq->refcnt); | 227 | atomic_dec(&fq->q.refcnt); |
| 239 | 228 | ||
| 240 | if (!(fq->last_in & COMPLETE)) { | 229 | if (!(fq->q.last_in & COMPLETE)) { |
| 241 | fq_unlink(fq); | 230 | fq_unlink(fq); |
| 242 | atomic_dec(&fq->refcnt); | 231 | atomic_dec(&fq->q.refcnt); |
| 243 | fq->last_in |= COMPLETE; | 232 | fq->q.last_in |= COMPLETE; |
| 244 | } | 233 | } |
| 245 | } | 234 | } |
| 246 | 235 | ||
| @@ -263,14 +252,14 @@ static void nf_ct_frag6_evictor(void) | |||
| 263 | } | 252 | } |
| 264 | tmp = nf_ct_frag6_lru_list.next; | 253 | tmp = nf_ct_frag6_lru_list.next; |
| 265 | BUG_ON(tmp == NULL); | 254 | BUG_ON(tmp == NULL); |
| 266 | fq = list_entry(tmp, struct nf_ct_frag6_queue, lru_list); | 255 | fq = list_entry(tmp, struct nf_ct_frag6_queue, q.lru_list); |
| 267 | atomic_inc(&fq->refcnt); | 256 | atomic_inc(&fq->q.refcnt); |
| 268 | read_unlock(&nf_ct_frag6_lock); | 257 | read_unlock(&nf_ct_frag6_lock); |
| 269 | 258 | ||
| 270 | spin_lock(&fq->lock); | 259 | spin_lock(&fq->q.lock); |
| 271 | if (!(fq->last_in&COMPLETE)) | 260 | if (!(fq->q.last_in&COMPLETE)) |
| 272 | fq_kill(fq); | 261 | fq_kill(fq); |
| 273 | spin_unlock(&fq->lock); | 262 | spin_unlock(&fq->q.lock); |
| 274 | 263 | ||
| 275 | fq_put(fq, &work); | 264 | fq_put(fq, &work); |
| 276 | } | 265 | } |
| @@ -280,15 +269,15 @@ static void nf_ct_frag6_expire(unsigned long data) | |||
| 280 | { | 269 | { |
| 281 | struct nf_ct_frag6_queue *fq = (struct nf_ct_frag6_queue *) data; | 270 | struct nf_ct_frag6_queue *fq = (struct nf_ct_frag6_queue *) data; |
| 282 | 271 | ||
| 283 | spin_lock(&fq->lock); | 272 | spin_lock(&fq->q.lock); |
| 284 | 273 | ||
| 285 | if (fq->last_in & COMPLETE) | 274 | if (fq->q.last_in & COMPLETE) |
| 286 | goto out; | 275 | goto out; |
| 287 | 276 | ||
| 288 | fq_kill(fq); | 277 | fq_kill(fq); |
| 289 | 278 | ||
| 290 | out: | 279 | out: |
| 291 | spin_unlock(&fq->lock); | 280 | spin_unlock(&fq->q.lock); |
| 292 | fq_put(fq, NULL); | 281 | fq_put(fq, NULL); |
| 293 | } | 282 | } |
| 294 | 283 | ||
| @@ -304,13 +293,13 @@ static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash, | |||
| 304 | 293 | ||
| 305 | write_lock(&nf_ct_frag6_lock); | 294 | write_lock(&nf_ct_frag6_lock); |
| 306 | #ifdef CONFIG_SMP | 295 | #ifdef CONFIG_SMP |
| 307 | hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], list) { | 296 | hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], q.list) { |
| 308 | if (fq->id == fq_in->id && | 297 | if (fq->id == fq_in->id && |
| 309 | ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && | 298 | ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && |
| 310 | ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { | 299 | ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { |
| 311 | atomic_inc(&fq->refcnt); | 300 | atomic_inc(&fq->q.refcnt); |
| 312 | write_unlock(&nf_ct_frag6_lock); | 301 | write_unlock(&nf_ct_frag6_lock); |
| 313 | fq_in->last_in |= COMPLETE; | 302 | fq_in->q.last_in |= COMPLETE; |
| 314 | fq_put(fq_in, NULL); | 303 | fq_put(fq_in, NULL); |
| 315 | return fq; | 304 | return fq; |
| 316 | } | 305 | } |
| @@ -318,13 +307,13 @@ static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash, | |||
| 318 | #endif | 307 | #endif |
| 319 | fq = fq_in; | 308 | fq = fq_in; |
| 320 | 309 | ||
| 321 | if (!mod_timer(&fq->timer, jiffies + nf_ct_frag6_timeout)) | 310 | if (!mod_timer(&fq->q.timer, jiffies + nf_ct_frag6_timeout)) |
| 322 | atomic_inc(&fq->refcnt); | 311 | atomic_inc(&fq->q.refcnt); |
| 323 | 312 | ||
| 324 | atomic_inc(&fq->refcnt); | 313 | atomic_inc(&fq->q.refcnt); |
| 325 | hlist_add_head(&fq->list, &nf_ct_frag6_hash[hash]); | 314 | hlist_add_head(&fq->q.list, &nf_ct_frag6_hash[hash]); |
| 326 | INIT_LIST_HEAD(&fq->lru_list); | 315 | INIT_LIST_HEAD(&fq->q.lru_list); |
| 327 | list_add_tail(&fq->lru_list, &nf_ct_frag6_lru_list); | 316 | list_add_tail(&fq->q.lru_list, &nf_ct_frag6_lru_list); |
| 328 | nf_ct_frag6_nqueues++; | 317 | nf_ct_frag6_nqueues++; |
| 329 | write_unlock(&nf_ct_frag6_lock); | 318 | write_unlock(&nf_ct_frag6_lock); |
| 330 | return fq; | 319 | return fq; |
| @@ -347,9 +336,9 @@ nf_ct_frag6_create(unsigned int hash, __be32 id, struct in6_addr *src, str | |||
| 347 | ipv6_addr_copy(&fq->saddr, src); | 336 | ipv6_addr_copy(&fq->saddr, src); |
| 348 | ipv6_addr_copy(&fq->daddr, dst); | 337 | ipv6_addr_copy(&fq->daddr, dst); |
| 349 | 338 | ||
| 350 | setup_timer(&fq->timer, nf_ct_frag6_expire, (unsigned long)fq); | 339 | setup_timer(&fq->q.timer, nf_ct_frag6_expire, (unsigned long)fq); |
| 351 | spin_lock_init(&fq->lock); | 340 | spin_lock_init(&fq->q.lock); |
| 352 | atomic_set(&fq->refcnt, 1); | 341 | atomic_set(&fq->q.refcnt, 1); |
| 353 | 342 | ||
| 354 | return nf_ct_frag6_intern(hash, fq); | 343 | return nf_ct_frag6_intern(hash, fq); |
| 355 | 344 | ||
| @@ -365,11 +354,11 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst) | |||
| 365 | unsigned int hash = ip6qhashfn(id, src, dst); | 354 | unsigned int hash = ip6qhashfn(id, src, dst); |
| 366 | 355 | ||
| 367 | read_lock(&nf_ct_frag6_lock); | 356 | read_lock(&nf_ct_frag6_lock); |
| 368 | hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], list) { | 357 | hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], q.list) { |
| 369 | if (fq->id == id && | 358 | if (fq->id == id && |
| 370 | ipv6_addr_equal(src, &fq->saddr) && | 359 | ipv6_addr_equal(src, &fq->saddr) && |
| 371 | ipv6_addr_equal(dst, &fq->daddr)) { | 360 | ipv6_addr_equal(dst, &fq->daddr)) { |
| 372 | atomic_inc(&fq->refcnt); | 361 | atomic_inc(&fq->q.refcnt); |
| 373 | read_unlock(&nf_ct_frag6_lock); | 362 | read_unlock(&nf_ct_frag6_lock); |
| 374 | return fq; | 363 | return fq; |
| 375 | } | 364 | } |
| @@ -386,7 +375,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
| 386 | struct sk_buff *prev, *next; | 375 | struct sk_buff *prev, *next; |
| 387 | int offset, end; | 376 | int offset, end; |
| 388 | 377 | ||
| 389 | if (fq->last_in & COMPLETE) { | 378 | if (fq->q.last_in & COMPLETE) { |
| 390 | pr_debug("Allready completed\n"); | 379 | pr_debug("Allready completed\n"); |
| 391 | goto err; | 380 | goto err; |
| 392 | } | 381 | } |
| @@ -412,13 +401,13 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
| 412 | /* If we already have some bits beyond end | 401 | /* If we already have some bits beyond end |
| 413 | * or have different end, the segment is corrupted. | 402 | * or have different end, the segment is corrupted. |
| 414 | */ | 403 | */ |
| 415 | if (end < fq->len || | 404 | if (end < fq->q.len || |
| 416 | ((fq->last_in & LAST_IN) && end != fq->len)) { | 405 | ((fq->q.last_in & LAST_IN) && end != fq->q.len)) { |
| 417 | pr_debug("already received last fragment\n"); | 406 | pr_debug("already received last fragment\n"); |
| 418 | goto err; | 407 | goto err; |
| 419 | } | 408 | } |
| 420 | fq->last_in |= LAST_IN; | 409 | fq->q.last_in |= LAST_IN; |
| 421 | fq->len = end; | 410 | fq->q.len = end; |
| 422 | } else { | 411 | } else { |
| 423 | /* Check if the fragment is rounded to 8 bytes. | 412 | /* Check if the fragment is rounded to 8 bytes. |
| 424 | * Required by the RFC. | 413 | * Required by the RFC. |
| @@ -430,13 +419,13 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
| 430 | pr_debug("end of fragment not rounded to 8 bytes.\n"); | 419 | pr_debug("end of fragment not rounded to 8 bytes.\n"); |
| 431 | return -1; | 420 | return -1; |
| 432 | } | 421 | } |
| 433 | if (end > fq->len) { | 422 | if (end > fq->q.len) { |
| 434 | /* Some bits beyond end -> corruption. */ | 423 | /* Some bits beyond end -> corruption. */ |
| 435 | if (fq->last_in & LAST_IN) { | 424 | if (fq->q.last_in & LAST_IN) { |
| 436 | pr_debug("last packet already reached.\n"); | 425 | pr_debug("last packet already reached.\n"); |
| 437 | goto err; | 426 | goto err; |
| 438 | } | 427 | } |
| 439 | fq->len = end; | 428 | fq->q.len = end; |
| 440 | } | 429 | } |
| 441 | } | 430 | } |
| 442 | 431 | ||
| @@ -458,7 +447,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
| 458 | * this fragment, right? | 447 | * this fragment, right? |
| 459 | */ | 448 | */ |
| 460 | prev = NULL; | 449 | prev = NULL; |
| 461 | for (next = fq->fragments; next != NULL; next = next->next) { | 450 | for (next = fq->q.fragments; next != NULL; next = next->next) { |
| 462 | if (NFCT_FRAG6_CB(next)->offset >= offset) | 451 | if (NFCT_FRAG6_CB(next)->offset >= offset) |
| 463 | break; /* bingo! */ | 452 | break; /* bingo! */ |
| 464 | prev = next; | 453 | prev = next; |
| @@ -503,7 +492,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
| 503 | 492 | ||
| 504 | /* next fragment */ | 493 | /* next fragment */ |
| 505 | NFCT_FRAG6_CB(next)->offset += i; | 494 | NFCT_FRAG6_CB(next)->offset += i; |
| 506 | fq->meat -= i; | 495 | fq->q.meat -= i; |
| 507 | if (next->ip_summed != CHECKSUM_UNNECESSARY) | 496 | if (next->ip_summed != CHECKSUM_UNNECESSARY) |
| 508 | next->ip_summed = CHECKSUM_NONE; | 497 | next->ip_summed = CHECKSUM_NONE; |
| 509 | break; | 498 | break; |
| @@ -518,9 +507,9 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
| 518 | if (prev) | 507 | if (prev) |
| 519 | prev->next = next; | 508 | prev->next = next; |
| 520 | else | 509 | else |
| 521 | fq->fragments = next; | 510 | fq->q.fragments = next; |
| 522 | 511 | ||
| 523 | fq->meat -= free_it->len; | 512 | fq->q.meat -= free_it->len; |
| 524 | frag_kfree_skb(free_it, NULL); | 513 | frag_kfree_skb(free_it, NULL); |
| 525 | } | 514 | } |
| 526 | } | 515 | } |
| @@ -532,11 +521,11 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
| 532 | if (prev) | 521 | if (prev) |
| 533 | prev->next = skb; | 522 | prev->next = skb; |
| 534 | else | 523 | else |
| 535 | fq->fragments = skb; | 524 | fq->q.fragments = skb; |
| 536 | 525 | ||
| 537 | skb->dev = NULL; | 526 | skb->dev = NULL; |
| 538 | fq->stamp = skb->tstamp; | 527 | fq->q.stamp = skb->tstamp; |
| 539 | fq->meat += skb->len; | 528 | fq->q.meat += skb->len; |
| 540 | atomic_add(skb->truesize, &nf_ct_frag6_mem); | 529 | atomic_add(skb->truesize, &nf_ct_frag6_mem); |
| 541 | 530 | ||
| 542 | /* The first fragment. | 531 | /* The first fragment. |
| @@ -544,10 +533,10 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
| 544 | */ | 533 | */ |
| 545 | if (offset == 0) { | 534 | if (offset == 0) { |
| 546 | fq->nhoffset = nhoff; | 535 | fq->nhoffset = nhoff; |
| 547 | fq->last_in |= FIRST_IN; | 536 | fq->q.last_in |= FIRST_IN; |
| 548 | } | 537 | } |
| 549 | write_lock(&nf_ct_frag6_lock); | 538 | write_lock(&nf_ct_frag6_lock); |
| 550 | list_move_tail(&fq->lru_list, &nf_ct_frag6_lru_list); | 539 | list_move_tail(&fq->q.lru_list, &nf_ct_frag6_lru_list); |
| 551 | write_unlock(&nf_ct_frag6_lock); | 540 | write_unlock(&nf_ct_frag6_lock); |
| 552 | return 0; | 541 | return 0; |
| 553 | 542 | ||
| @@ -567,7 +556,7 @@ err: | |||
| 567 | static struct sk_buff * | 556 | static struct sk_buff * |
| 568 | nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) | 557 | nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) |
| 569 | { | 558 | { |
| 570 | struct sk_buff *fp, *op, *head = fq->fragments; | 559 | struct sk_buff *fp, *op, *head = fq->q.fragments; |
| 571 | int payload_len; | 560 | int payload_len; |
| 572 | 561 | ||
| 573 | fq_kill(fq); | 562 | fq_kill(fq); |
| @@ -577,7 +566,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) | |||
| 577 | 566 | ||
| 578 | /* Unfragmented part is taken from the first segment. */ | 567 | /* Unfragmented part is taken from the first segment. */ |
| 579 | payload_len = ((head->data - skb_network_header(head)) - | 568 | payload_len = ((head->data - skb_network_header(head)) - |
| 580 | sizeof(struct ipv6hdr) + fq->len - | 569 | sizeof(struct ipv6hdr) + fq->q.len - |
| 581 | sizeof(struct frag_hdr)); | 570 | sizeof(struct frag_hdr)); |
| 582 | if (payload_len > IPV6_MAXPLEN) { | 571 | if (payload_len > IPV6_MAXPLEN) { |
| 583 | pr_debug("payload len is too large.\n"); | 572 | pr_debug("payload len is too large.\n"); |
| @@ -643,7 +632,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) | |||
| 643 | 632 | ||
| 644 | head->next = NULL; | 633 | head->next = NULL; |
| 645 | head->dev = dev; | 634 | head->dev = dev; |
| 646 | head->tstamp = fq->stamp; | 635 | head->tstamp = fq->q.stamp; |
| 647 | ipv6_hdr(head)->payload_len = htons(payload_len); | 636 | ipv6_hdr(head)->payload_len = htons(payload_len); |
| 648 | 637 | ||
| 649 | /* Yes, and fold redundant checksum back. 8) */ | 638 | /* Yes, and fold redundant checksum back. 8) */ |
| @@ -652,7 +641,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) | |||
| 652 | skb_network_header_len(head), | 641 | skb_network_header_len(head), |
| 653 | head->csum); | 642 | head->csum); |
| 654 | 643 | ||
| 655 | fq->fragments = NULL; | 644 | fq->q.fragments = NULL; |
| 656 | 645 | ||
| 657 | /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */ | 646 | /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */ |
| 658 | fp = skb_shinfo(head)->frag_list; | 647 | fp = skb_shinfo(head)->frag_list; |
| @@ -797,21 +786,21 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) | |||
| 797 | goto ret_orig; | 786 | goto ret_orig; |
| 798 | } | 787 | } |
| 799 | 788 | ||
| 800 | spin_lock(&fq->lock); | 789 | spin_lock(&fq->q.lock); |
| 801 | 790 | ||
| 802 | if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { | 791 | if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { |
| 803 | spin_unlock(&fq->lock); | 792 | spin_unlock(&fq->q.lock); |
| 804 | pr_debug("Can't insert skb to queue\n"); | 793 | pr_debug("Can't insert skb to queue\n"); |
| 805 | fq_put(fq, NULL); | 794 | fq_put(fq, NULL); |
| 806 | goto ret_orig; | 795 | goto ret_orig; |
| 807 | } | 796 | } |
| 808 | 797 | ||
| 809 | if (fq->last_in == (FIRST_IN|LAST_IN) && fq->meat == fq->len) { | 798 | if (fq->q.last_in == (FIRST_IN|LAST_IN) && fq->q.meat == fq->q.len) { |
| 810 | ret_skb = nf_ct_frag6_reasm(fq, dev); | 799 | ret_skb = nf_ct_frag6_reasm(fq, dev); |
| 811 | if (ret_skb == NULL) | 800 | if (ret_skb == NULL) |
| 812 | pr_debug("Can't reassemble fragmented packets\n"); | 801 | pr_debug("Can't reassemble fragmented packets\n"); |
| 813 | } | 802 | } |
| 814 | spin_unlock(&fq->lock); | 803 | spin_unlock(&fq->q.lock); |
| 815 | 804 | ||
| 816 | fq_put(fq, NULL); | 805 | fq_put(fq, NULL); |
| 817 | return ret_skb; | 806 | return ret_skb; |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index fa1055b669d1..aef5dd1ebc8a 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
| @@ -54,6 +54,7 @@ | |||
| 54 | #include <net/rawv6.h> | 54 | #include <net/rawv6.h> |
| 55 | #include <net/ndisc.h> | 55 | #include <net/ndisc.h> |
| 56 | #include <net/addrconf.h> | 56 | #include <net/addrconf.h> |
| 57 | #include <net/inet_frag.h> | ||
| 57 | 58 | ||
| 58 | int sysctl_ip6frag_high_thresh __read_mostly = 256*1024; | 59 | int sysctl_ip6frag_high_thresh __read_mostly = 256*1024; |
| 59 | int sysctl_ip6frag_low_thresh __read_mostly = 192*1024; | 60 | int sysctl_ip6frag_low_thresh __read_mostly = 192*1024; |
| @@ -75,26 +76,14 @@ struct ip6frag_skb_cb | |||
| 75 | 76 | ||
| 76 | struct frag_queue | 77 | struct frag_queue |
| 77 | { | 78 | { |
| 78 | struct hlist_node list; | 79 | struct inet_frag_queue q; |
| 79 | struct list_head lru_list; /* lru list member */ | ||
| 80 | 80 | ||
| 81 | __be32 id; /* fragment id */ | 81 | __be32 id; /* fragment id */ |
| 82 | struct in6_addr saddr; | 82 | struct in6_addr saddr; |
| 83 | struct in6_addr daddr; | 83 | struct in6_addr daddr; |
| 84 | 84 | ||
| 85 | spinlock_t lock; | ||
| 86 | atomic_t refcnt; | ||
| 87 | struct timer_list timer; /* expire timer */ | ||
| 88 | struct sk_buff *fragments; | ||
| 89 | int len; | ||
| 90 | int meat; | ||
| 91 | int iif; | 85 | int iif; |
| 92 | ktime_t stamp; | ||
| 93 | unsigned int csum; | 86 | unsigned int csum; |
| 94 | __u8 last_in; /* has first/last segment arrived? */ | ||
| 95 | #define COMPLETE 4 | ||
| 96 | #define FIRST_IN 2 | ||
| 97 | #define LAST_IN 1 | ||
| 98 | __u16 nhoffset; | 87 | __u16 nhoffset; |
| 99 | }; | 88 | }; |
| 100 | 89 | ||
| @@ -113,8 +102,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
| 113 | 102 | ||
| 114 | static __inline__ void __fq_unlink(struct frag_queue *fq) | 103 | static __inline__ void __fq_unlink(struct frag_queue *fq) |
| 115 | { | 104 | { |
| 116 | hlist_del(&fq->list); | 105 | hlist_del(&fq->q.list); |
| 117 | list_del(&fq->lru_list); | 106 | list_del(&fq->q.lru_list); |
| 118 | ip6_frag_nqueues--; | 107 | ip6_frag_nqueues--; |
| 119 | } | 108 | } |
| 120 | 109 | ||
| @@ -170,16 +159,16 @@ static void ip6_frag_secret_rebuild(unsigned long dummy) | |||
| 170 | struct frag_queue *q; | 159 | struct frag_queue *q; |
| 171 | struct hlist_node *p, *n; | 160 | struct hlist_node *p, *n; |
| 172 | 161 | ||
| 173 | hlist_for_each_entry_safe(q, p, n, &ip6_frag_hash[i], list) { | 162 | hlist_for_each_entry_safe(q, p, n, &ip6_frag_hash[i], q.list) { |
| 174 | unsigned int hval = ip6qhashfn(q->id, | 163 | unsigned int hval = ip6qhashfn(q->id, |
| 175 | &q->saddr, | 164 | &q->saddr, |
| 176 | &q->daddr); | 165 | &q->daddr); |
| 177 | 166 | ||
| 178 | if (hval != i) { | 167 | if (hval != i) { |
| 179 | hlist_del(&q->list); | 168 | hlist_del(&q->q.list); |
| 180 | 169 | ||
| 181 | /* Relink to new hash chain. */ | 170 | /* Relink to new hash chain. */ |
| 182 | hlist_add_head(&q->list, | 171 | hlist_add_head(&q->q.list, |
| 183 | &ip6_frag_hash[hval]); | 172 | &ip6_frag_hash[hval]); |
| 184 | 173 | ||
| 185 | } | 174 | } |
| @@ -226,11 +215,11 @@ static void ip6_frag_destroy(struct frag_queue *fq, int *work) | |||
| 226 | { | 215 | { |
| 227 | struct sk_buff *fp; | 216 | struct sk_buff *fp; |
| 228 | 217 | ||
| 229 | BUG_TRAP(fq->last_in&COMPLETE); | 218 | BUG_TRAP(fq->q.last_in&COMPLETE); |
| 230 | BUG_TRAP(del_timer(&fq->timer) == 0); | 219 | BUG_TRAP(del_timer(&fq->q.timer) == 0); |
| 231 | 220 | ||
| 232 | /* Release all fragment data. */ | 221 | /* Release all fragment data. */ |
| 233 | fp = fq->fragments; | 222 | fp = fq->q.fragments; |
| 234 | while (fp) { | 223 | while (fp) { |
| 235 | struct sk_buff *xp = fp->next; | 224 | struct sk_buff *xp = fp->next; |
| 236 | 225 | ||
| @@ -243,7 +232,7 @@ static void ip6_frag_destroy(struct frag_queue *fq, int *work) | |||
| 243 | 232 | ||
| 244 | static __inline__ void fq_put(struct frag_queue *fq, int *work) | 233 | static __inline__ void fq_put(struct frag_queue *fq, int *work) |
| 245 | { | 234 | { |
| 246 | if (atomic_dec_and_test(&fq->refcnt)) | 235 | if (atomic_dec_and_test(&fq->q.refcnt)) |
| 247 | ip6_frag_destroy(fq, work); | 236 | ip6_frag_destroy(fq, work); |
| 248 | } | 237 | } |
| 249 | 238 | ||
| @@ -252,13 +241,13 @@ static __inline__ void fq_put(struct frag_queue *fq, int *work) | |||
| 252 | */ | 241 | */ |
| 253 | static __inline__ void fq_kill(struct frag_queue *fq) | 242 | static __inline__ void fq_kill(struct frag_queue *fq) |
| 254 | { | 243 | { |
| 255 | if (del_timer(&fq->timer)) | 244 | if (del_timer(&fq->q.timer)) |
| 256 | atomic_dec(&fq->refcnt); | 245 | atomic_dec(&fq->q.refcnt); |
| 257 | 246 | ||
| 258 | if (!(fq->last_in & COMPLETE)) { | 247 | if (!(fq->q.last_in & COMPLETE)) { |
| 259 | fq_unlink(fq); | 248 | fq_unlink(fq); |
| 260 | atomic_dec(&fq->refcnt); | 249 | atomic_dec(&fq->q.refcnt); |
| 261 | fq->last_in |= COMPLETE; | 250 | fq->q.last_in |= COMPLETE; |
| 262 | } | 251 | } |
| 263 | } | 252 | } |
| 264 | 253 | ||
| @@ -279,14 +268,14 @@ static void ip6_evictor(struct inet6_dev *idev) | |||
| 279 | return; | 268 | return; |
| 280 | } | 269 | } |
| 281 | tmp = ip6_frag_lru_list.next; | 270 | tmp = ip6_frag_lru_list.next; |
| 282 | fq = list_entry(tmp, struct frag_queue, lru_list); | 271 | fq = list_entry(tmp, struct frag_queue, q.lru_list); |
| 283 | atomic_inc(&fq->refcnt); | 272 | atomic_inc(&fq->q.refcnt); |
| 284 | read_unlock(&ip6_frag_lock); | 273 | read_unlock(&ip6_frag_lock); |
| 285 | 274 | ||
| 286 | spin_lock(&fq->lock); | 275 | spin_lock(&fq->q.lock); |
| 287 | if (!(fq->last_in&COMPLETE)) | 276 | if (!(fq->q.last_in&COMPLETE)) |
| 288 | fq_kill(fq); | 277 | fq_kill(fq); |
| 289 | spin_unlock(&fq->lock); | 278 | spin_unlock(&fq->q.lock); |
| 290 | 279 | ||
| 291 | fq_put(fq, &work); | 280 | fq_put(fq, &work); |
| 292 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS); | 281 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS); |
| @@ -298,9 +287,9 @@ static void ip6_frag_expire(unsigned long data) | |||
| 298 | struct frag_queue *fq = (struct frag_queue *) data; | 287 | struct frag_queue *fq = (struct frag_queue *) data; |
| 299 | struct net_device *dev = NULL; | 288 | struct net_device *dev = NULL; |
| 300 | 289 | ||
| 301 | spin_lock(&fq->lock); | 290 | spin_lock(&fq->q.lock); |
| 302 | 291 | ||
| 303 | if (fq->last_in & COMPLETE) | 292 | if (fq->q.last_in & COMPLETE) |
| 304 | goto out; | 293 | goto out; |
| 305 | 294 | ||
| 306 | fq_kill(fq); | 295 | fq_kill(fq); |
| @@ -315,7 +304,7 @@ static void ip6_frag_expire(unsigned long data) | |||
| 315 | rcu_read_unlock(); | 304 | rcu_read_unlock(); |
| 316 | 305 | ||
| 317 | /* Don't send error if the first segment did not arrive. */ | 306 | /* Don't send error if the first segment did not arrive. */ |
| 318 | if (!(fq->last_in&FIRST_IN) || !fq->fragments) | 307 | if (!(fq->q.last_in&FIRST_IN) || !fq->q.fragments) |
| 319 | goto out; | 308 | goto out; |
| 320 | 309 | ||
| 321 | /* | 310 | /* |
| @@ -323,12 +312,12 @@ static void ip6_frag_expire(unsigned long data) | |||
| 323 | segment was received. And do not use fq->dev | 312 | segment was received. And do not use fq->dev |
| 324 | pointer directly, device might already disappeared. | 313 | pointer directly, device might already disappeared. |
| 325 | */ | 314 | */ |
| 326 | fq->fragments->dev = dev; | 315 | fq->q.fragments->dev = dev; |
| 327 | icmpv6_send(fq->fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); | 316 | icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); |
| 328 | out: | 317 | out: |
| 329 | if (dev) | 318 | if (dev) |
| 330 | dev_put(dev); | 319 | dev_put(dev); |
| 331 | spin_unlock(&fq->lock); | 320 | spin_unlock(&fq->q.lock); |
| 332 | fq_put(fq, NULL); | 321 | fq_put(fq, NULL); |
| 333 | } | 322 | } |
| 334 | 323 | ||
| @@ -346,13 +335,13 @@ static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in) | |||
| 346 | write_lock(&ip6_frag_lock); | 335 | write_lock(&ip6_frag_lock); |
| 347 | hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr); | 336 | hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr); |
| 348 | #ifdef CONFIG_SMP | 337 | #ifdef CONFIG_SMP |
| 349 | hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) { | 338 | hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], q.list) { |
| 350 | if (fq->id == fq_in->id && | 339 | if (fq->id == fq_in->id && |
| 351 | ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && | 340 | ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && |
| 352 | ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { | 341 | ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { |
| 353 | atomic_inc(&fq->refcnt); | 342 | atomic_inc(&fq->q.refcnt); |
| 354 | write_unlock(&ip6_frag_lock); | 343 | write_unlock(&ip6_frag_lock); |
| 355 | fq_in->last_in |= COMPLETE; | 344 | fq_in->q.last_in |= COMPLETE; |
| 356 | fq_put(fq_in, NULL); | 345 | fq_put(fq_in, NULL); |
| 357 | return fq; | 346 | return fq; |
| 358 | } | 347 | } |
| @@ -360,13 +349,13 @@ static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in) | |||
| 360 | #endif | 349 | #endif |
| 361 | fq = fq_in; | 350 | fq = fq_in; |
| 362 | 351 | ||
| 363 | if (!mod_timer(&fq->timer, jiffies + sysctl_ip6frag_time)) | 352 | if (!mod_timer(&fq->q.timer, jiffies + sysctl_ip6frag_time)) |
| 364 | atomic_inc(&fq->refcnt); | 353 | atomic_inc(&fq->q.refcnt); |
| 365 | 354 | ||
| 366 | atomic_inc(&fq->refcnt); | 355 | atomic_inc(&fq->q.refcnt); |
| 367 | hlist_add_head(&fq->list, &ip6_frag_hash[hash]); | 356 | hlist_add_head(&fq->q.list, &ip6_frag_hash[hash]); |
| 368 | INIT_LIST_HEAD(&fq->lru_list); | 357 | INIT_LIST_HEAD(&fq->q.lru_list); |
| 369 | list_add_tail(&fq->lru_list, &ip6_frag_lru_list); | 358 | list_add_tail(&fq->q.lru_list, &ip6_frag_lru_list); |
| 370 | ip6_frag_nqueues++; | 359 | ip6_frag_nqueues++; |
| 371 | write_unlock(&ip6_frag_lock); | 360 | write_unlock(&ip6_frag_lock); |
| 372 | return fq; | 361 | return fq; |
| @@ -386,11 +375,11 @@ ip6_frag_create(__be32 id, struct in6_addr *src, struct in6_addr *dst, | |||
| 386 | ipv6_addr_copy(&fq->saddr, src); | 375 | ipv6_addr_copy(&fq->saddr, src); |
| 387 | ipv6_addr_copy(&fq->daddr, dst); | 376 | ipv6_addr_copy(&fq->daddr, dst); |
| 388 | 377 | ||
| 389 | init_timer(&fq->timer); | 378 | init_timer(&fq->q.timer); |
| 390 | fq->timer.function = ip6_frag_expire; | 379 | fq->q.timer.function = ip6_frag_expire; |
| 391 | fq->timer.data = (long) fq; | 380 | fq->q.timer.data = (long) fq; |
| 392 | spin_lock_init(&fq->lock); | 381 | spin_lock_init(&fq->q.lock); |
| 393 | atomic_set(&fq->refcnt, 1); | 382 | atomic_set(&fq->q.refcnt, 1); |
| 394 | 383 | ||
| 395 | return ip6_frag_intern(fq); | 384 | return ip6_frag_intern(fq); |
| 396 | 385 | ||
| @@ -409,11 +398,11 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst, | |||
| 409 | 398 | ||
| 410 | read_lock(&ip6_frag_lock); | 399 | read_lock(&ip6_frag_lock); |
| 411 | hash = ip6qhashfn(id, src, dst); | 400 | hash = ip6qhashfn(id, src, dst); |
| 412 | hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) { | 401 | hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], q.list) { |
| 413 | if (fq->id == id && | 402 | if (fq->id == id && |
| 414 | ipv6_addr_equal(src, &fq->saddr) && | 403 | ipv6_addr_equal(src, &fq->saddr) && |
| 415 | ipv6_addr_equal(dst, &fq->daddr)) { | 404 | ipv6_addr_equal(dst, &fq->daddr)) { |
| 416 | atomic_inc(&fq->refcnt); | 405 | atomic_inc(&fq->q.refcnt); |
| 417 | read_unlock(&ip6_frag_lock); | 406 | read_unlock(&ip6_frag_lock); |
| 418 | return fq; | 407 | return fq; |
| 419 | } | 408 | } |
| @@ -431,7 +420,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
| 431 | struct net_device *dev; | 420 | struct net_device *dev; |
| 432 | int offset, end; | 421 | int offset, end; |
| 433 | 422 | ||
| 434 | if (fq->last_in & COMPLETE) | 423 | if (fq->q.last_in & COMPLETE) |
| 435 | goto err; | 424 | goto err; |
| 436 | 425 | ||
| 437 | offset = ntohs(fhdr->frag_off) & ~0x7; | 426 | offset = ntohs(fhdr->frag_off) & ~0x7; |
| @@ -459,11 +448,11 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
| 459 | /* If we already have some bits beyond end | 448 | /* If we already have some bits beyond end |
| 460 | * or have different end, the segment is corrupted. | 449 | * or have different end, the segment is corrupted. |
| 461 | */ | 450 | */ |
| 462 | if (end < fq->len || | 451 | if (end < fq->q.len || |
| 463 | ((fq->last_in & LAST_IN) && end != fq->len)) | 452 | ((fq->q.last_in & LAST_IN) && end != fq->q.len)) |
| 464 | goto err; | 453 | goto err; |
| 465 | fq->last_in |= LAST_IN; | 454 | fq->q.last_in |= LAST_IN; |
| 466 | fq->len = end; | 455 | fq->q.len = end; |
| 467 | } else { | 456 | } else { |
| 468 | /* Check if the fragment is rounded to 8 bytes. | 457 | /* Check if the fragment is rounded to 8 bytes. |
| 469 | * Required by the RFC. | 458 | * Required by the RFC. |
| @@ -478,11 +467,11 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
| 478 | offsetof(struct ipv6hdr, payload_len)); | 467 | offsetof(struct ipv6hdr, payload_len)); |
| 479 | return -1; | 468 | return -1; |
| 480 | } | 469 | } |
| 481 | if (end > fq->len) { | 470 | if (end > fq->q.len) { |
| 482 | /* Some bits beyond end -> corruption. */ | 471 | /* Some bits beyond end -> corruption. */ |
| 483 | if (fq->last_in & LAST_IN) | 472 | if (fq->q.last_in & LAST_IN) |
| 484 | goto err; | 473 | goto err; |
| 485 | fq->len = end; | 474 | fq->q.len = end; |
| 486 | } | 475 | } |
| 487 | } | 476 | } |
| 488 | 477 | ||
| @@ -501,7 +490,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
| 501 | * this fragment, right? | 490 | * this fragment, right? |
| 502 | */ | 491 | */ |
| 503 | prev = NULL; | 492 | prev = NULL; |
| 504 | for(next = fq->fragments; next != NULL; next = next->next) { | 493 | for(next = fq->q.fragments; next != NULL; next = next->next) { |
| 505 | if (FRAG6_CB(next)->offset >= offset) | 494 | if (FRAG6_CB(next)->offset >= offset) |
| 506 | break; /* bingo! */ | 495 | break; /* bingo! */ |
| 507 | prev = next; | 496 | prev = next; |
| @@ -538,7 +527,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
| 538 | if (!pskb_pull(next, i)) | 527 | if (!pskb_pull(next, i)) |
| 539 | goto err; | 528 | goto err; |
| 540 | FRAG6_CB(next)->offset += i; /* next fragment */ | 529 | FRAG6_CB(next)->offset += i; /* next fragment */ |
| 541 | fq->meat -= i; | 530 | fq->q.meat -= i; |
| 542 | if (next->ip_summed != CHECKSUM_UNNECESSARY) | 531 | if (next->ip_summed != CHECKSUM_UNNECESSARY) |
| 543 | next->ip_summed = CHECKSUM_NONE; | 532 | next->ip_summed = CHECKSUM_NONE; |
| 544 | break; | 533 | break; |
| @@ -553,9 +542,9 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
| 553 | if (prev) | 542 | if (prev) |
| 554 | prev->next = next; | 543 | prev->next = next; |
| 555 | else | 544 | else |
| 556 | fq->fragments = next; | 545 | fq->q.fragments = next; |
| 557 | 546 | ||
| 558 | fq->meat -= free_it->len; | 547 | fq->q.meat -= free_it->len; |
| 559 | frag_kfree_skb(free_it, NULL); | 548 | frag_kfree_skb(free_it, NULL); |
| 560 | } | 549 | } |
| 561 | } | 550 | } |
| @@ -567,15 +556,15 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
| 567 | if (prev) | 556 | if (prev) |
| 568 | prev->next = skb; | 557 | prev->next = skb; |
| 569 | else | 558 | else |
| 570 | fq->fragments = skb; | 559 | fq->q.fragments = skb; |
| 571 | 560 | ||
| 572 | dev = skb->dev; | 561 | dev = skb->dev; |
| 573 | if (dev) { | 562 | if (dev) { |
| 574 | fq->iif = dev->ifindex; | 563 | fq->iif = dev->ifindex; |
| 575 | skb->dev = NULL; | 564 | skb->dev = NULL; |
| 576 | } | 565 | } |
| 577 | fq->stamp = skb->tstamp; | 566 | fq->q.stamp = skb->tstamp; |
| 578 | fq->meat += skb->len; | 567 | fq->q.meat += skb->len; |
| 579 | atomic_add(skb->truesize, &ip6_frag_mem); | 568 | atomic_add(skb->truesize, &ip6_frag_mem); |
| 580 | 569 | ||
| 581 | /* The first fragment. | 570 | /* The first fragment. |
| @@ -583,14 +572,14 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
| 583 | */ | 572 | */ |
| 584 | if (offset == 0) { | 573 | if (offset == 0) { |
| 585 | fq->nhoffset = nhoff; | 574 | fq->nhoffset = nhoff; |
| 586 | fq->last_in |= FIRST_IN; | 575 | fq->q.last_in |= FIRST_IN; |
| 587 | } | 576 | } |
| 588 | 577 | ||
| 589 | if (fq->last_in == (FIRST_IN | LAST_IN) && fq->meat == fq->len) | 578 | if (fq->q.last_in == (FIRST_IN | LAST_IN) && fq->q.meat == fq->q.len) |
| 590 | return ip6_frag_reasm(fq, prev, dev); | 579 | return ip6_frag_reasm(fq, prev, dev); |
| 591 | 580 | ||
| 592 | write_lock(&ip6_frag_lock); | 581 | write_lock(&ip6_frag_lock); |
| 593 | list_move_tail(&fq->lru_list, &ip6_frag_lru_list); | 582 | list_move_tail(&fq->q.lru_list, &ip6_frag_lru_list); |
| 594 | write_unlock(&ip6_frag_lock); | 583 | write_unlock(&ip6_frag_lock); |
| 595 | return -1; | 584 | return -1; |
| 596 | 585 | ||
| @@ -612,7 +601,7 @@ err: | |||
| 612 | static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | 601 | static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, |
| 613 | struct net_device *dev) | 602 | struct net_device *dev) |
| 614 | { | 603 | { |
| 615 | struct sk_buff *fp, *head = fq->fragments; | 604 | struct sk_buff *fp, *head = fq->q.fragments; |
| 616 | int payload_len; | 605 | int payload_len; |
| 617 | unsigned int nhoff; | 606 | unsigned int nhoff; |
| 618 | 607 | ||
| @@ -629,11 +618,11 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
| 629 | fp->next = head->next; | 618 | fp->next = head->next; |
| 630 | prev->next = fp; | 619 | prev->next = fp; |
| 631 | 620 | ||
| 632 | skb_morph(head, fq->fragments); | 621 | skb_morph(head, fq->q.fragments); |
| 633 | head->next = fq->fragments->next; | 622 | head->next = fq->q.fragments->next; |
| 634 | 623 | ||
| 635 | kfree_skb(fq->fragments); | 624 | kfree_skb(fq->q.fragments); |
| 636 | fq->fragments = head; | 625 | fq->q.fragments = head; |
| 637 | } | 626 | } |
| 638 | 627 | ||
| 639 | BUG_TRAP(head != NULL); | 628 | BUG_TRAP(head != NULL); |
| @@ -641,7 +630,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
| 641 | 630 | ||
| 642 | /* Unfragmented part is taken from the first segment. */ | 631 | /* Unfragmented part is taken from the first segment. */ |
| 643 | payload_len = ((head->data - skb_network_header(head)) - | 632 | payload_len = ((head->data - skb_network_header(head)) - |
| 644 | sizeof(struct ipv6hdr) + fq->len - | 633 | sizeof(struct ipv6hdr) + fq->q.len - |
| 645 | sizeof(struct frag_hdr)); | 634 | sizeof(struct frag_hdr)); |
| 646 | if (payload_len > IPV6_MAXPLEN) | 635 | if (payload_len > IPV6_MAXPLEN) |
| 647 | goto out_oversize; | 636 | goto out_oversize; |
| @@ -700,7 +689,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
| 700 | 689 | ||
| 701 | head->next = NULL; | 690 | head->next = NULL; |
| 702 | head->dev = dev; | 691 | head->dev = dev; |
| 703 | head->tstamp = fq->stamp; | 692 | head->tstamp = fq->q.stamp; |
| 704 | ipv6_hdr(head)->payload_len = htons(payload_len); | 693 | ipv6_hdr(head)->payload_len = htons(payload_len); |
| 705 | IP6CB(head)->nhoff = nhoff; | 694 | IP6CB(head)->nhoff = nhoff; |
| 706 | 695 | ||
| @@ -713,7 +702,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
| 713 | rcu_read_lock(); | 702 | rcu_read_lock(); |
| 714 | IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS); | 703 | IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS); |
| 715 | rcu_read_unlock(); | 704 | rcu_read_unlock(); |
| 716 | fq->fragments = NULL; | 705 | fq->q.fragments = NULL; |
| 717 | return 1; | 706 | return 1; |
| 718 | 707 | ||
| 719 | out_oversize: | 708 | out_oversize: |
| @@ -773,11 +762,11 @@ static int ipv6_frag_rcv(struct sk_buff **skbp) | |||
| 773 | ip6_dst_idev(skb->dst))) != NULL) { | 762 | ip6_dst_idev(skb->dst))) != NULL) { |
| 774 | int ret; | 763 | int ret; |
| 775 | 764 | ||
| 776 | spin_lock(&fq->lock); | 765 | spin_lock(&fq->q.lock); |
| 777 | 766 | ||
| 778 | ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); | 767 | ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); |
| 779 | 768 | ||
| 780 | spin_unlock(&fq->lock); | 769 | spin_unlock(&fq->q.lock); |
| 781 | fq_put(fq, NULL); | 770 | fq_put(fq, NULL); |
| 782 | return ret; | 771 | return ret; |
| 783 | } | 772 | } |
