diff options
author | Pavel Emelyanov <xemul@openvz.org> | 2007-10-15 05:39:14 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-15 15:26:42 -0400 |
commit | 1e4b82873af0f21002e37a81ef063d2e5410deb3 (patch) | |
tree | 9c4054c8393f03bae9565f98a109cc5721cf490f | |
parent | 321a3a99e4717b960e21c62fc6a140d21453df7f (diff) |
[INET]: Consolidate the xxx_frag_destroy
To make in possible we need to know the exact frag queue
size for inet_frags->mem management and two callbacks:
* to destoy the skb (optional, used in conntracks only)
* to free the queue itself (mandatory, but later I plan to
move the allocation and the destruction of frag_queues
into the common place, so this callback will most likely
be optional too).
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/inet_frag.h | 5 | ||||
-rw-r--r-- | net/ipv4/inet_fragment.c | 40 | ||||
-rw-r--r-- | net/ipv4/ip_fragment.c | 39 | ||||
-rw-r--r-- | net/ipv6/netfilter/nf_conntrack_reasm.c | 44 | ||||
-rw-r--r-- | net/ipv6/reassembly.c | 32 |
5 files changed, 74 insertions, 86 deletions
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index e374412ff42b..2dd1cd4e7f44 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h | |||
@@ -33,16 +33,21 @@ struct inet_frags { | |||
33 | rwlock_t lock; | 33 | rwlock_t lock; |
34 | u32 rnd; | 34 | u32 rnd; |
35 | int nqueues; | 35 | int nqueues; |
36 | int qsize; | ||
36 | atomic_t mem; | 37 | atomic_t mem; |
37 | struct timer_list secret_timer; | 38 | struct timer_list secret_timer; |
38 | struct inet_frags_ctl *ctl; | 39 | struct inet_frags_ctl *ctl; |
39 | 40 | ||
40 | unsigned int (*hashfn)(struct inet_frag_queue *); | 41 | unsigned int (*hashfn)(struct inet_frag_queue *); |
42 | void (*destructor)(struct inet_frag_queue *); | ||
43 | void (*skb_free)(struct sk_buff *); | ||
41 | }; | 44 | }; |
42 | 45 | ||
43 | void inet_frags_init(struct inet_frags *); | 46 | void inet_frags_init(struct inet_frags *); |
44 | void inet_frags_fini(struct inet_frags *); | 47 | void inet_frags_fini(struct inet_frags *); |
45 | 48 | ||
46 | void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); | 49 | void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); |
50 | void inet_frag_destroy(struct inet_frag_queue *q, | ||
51 | struct inet_frags *f, int *work); | ||
47 | 52 | ||
48 | #endif | 53 | #endif |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index ec10e05c6666..15fb2c4a36a7 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -17,6 +17,8 @@ | |||
17 | #include <linux/timer.h> | 17 | #include <linux/timer.h> |
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/random.h> | 19 | #include <linux/random.h> |
20 | #include <linux/skbuff.h> | ||
21 | #include <linux/rtnetlink.h> | ||
20 | 22 | ||
21 | #include <net/inet_frag.h> | 23 | #include <net/inet_frag.h> |
22 | 24 | ||
@@ -100,3 +102,41 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f) | |||
100 | } | 102 | } |
101 | 103 | ||
102 | EXPORT_SYMBOL(inet_frag_kill); | 104 | EXPORT_SYMBOL(inet_frag_kill); |
105 | |||
106 | static inline void frag_kfree_skb(struct inet_frags *f, struct sk_buff *skb, | ||
107 | int *work) | ||
108 | { | ||
109 | if (work) | ||
110 | *work -= skb->truesize; | ||
111 | |||
112 | atomic_sub(skb->truesize, &f->mem); | ||
113 | if (f->skb_free) | ||
114 | f->skb_free(skb); | ||
115 | kfree_skb(skb); | ||
116 | } | ||
117 | |||
118 | void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f, | ||
119 | int *work) | ||
120 | { | ||
121 | struct sk_buff *fp; | ||
122 | |||
123 | BUG_TRAP(q->last_in & COMPLETE); | ||
124 | BUG_TRAP(del_timer(&q->timer) == 0); | ||
125 | |||
126 | /* Release all fragment data. */ | ||
127 | fp = q->fragments; | ||
128 | while (fp) { | ||
129 | struct sk_buff *xp = fp->next; | ||
130 | |||
131 | frag_kfree_skb(f, fp, work); | ||
132 | fp = xp; | ||
133 | } | ||
134 | |||
135 | if (work) | ||
136 | *work -= f->qsize; | ||
137 | atomic_sub(f->qsize, &f->mem); | ||
138 | |||
139 | f->destructor(q); | ||
140 | |||
141 | } | ||
142 | EXPORT_SYMBOL(inet_frag_destroy); | ||
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index e231c248aea7..e8736632094a 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -132,11 +132,13 @@ static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work) | |||
132 | kfree_skb(skb); | 132 | kfree_skb(skb); |
133 | } | 133 | } |
134 | 134 | ||
135 | static __inline__ void frag_free_queue(struct ipq *qp, int *work) | 135 | static __inline__ void ip4_frag_free(struct inet_frag_queue *q) |
136 | { | 136 | { |
137 | if (work) | 137 | struct ipq *qp; |
138 | *work -= sizeof(struct ipq); | 138 | |
139 | atomic_sub(sizeof(struct ipq), &ip4_frags.mem); | 139 | qp = container_of(q, struct ipq, q); |
140 | if (qp->peer) | ||
141 | inet_putpeer(qp->peer); | ||
140 | kfree(qp); | 142 | kfree(qp); |
141 | } | 143 | } |
142 | 144 | ||
@@ -153,34 +155,10 @@ static __inline__ struct ipq *frag_alloc_queue(void) | |||
153 | 155 | ||
154 | /* Destruction primitives. */ | 156 | /* Destruction primitives. */ |
155 | 157 | ||
156 | /* Complete destruction of ipq. */ | ||
157 | static void ip_frag_destroy(struct ipq *qp, int *work) | ||
158 | { | ||
159 | struct sk_buff *fp; | ||
160 | |||
161 | BUG_TRAP(qp->q.last_in&COMPLETE); | ||
162 | BUG_TRAP(del_timer(&qp->q.timer) == 0); | ||
163 | |||
164 | if (qp->peer) | ||
165 | inet_putpeer(qp->peer); | ||
166 | |||
167 | /* Release all fragment data. */ | ||
168 | fp = qp->q.fragments; | ||
169 | while (fp) { | ||
170 | struct sk_buff *xp = fp->next; | ||
171 | |||
172 | frag_kfree_skb(fp, work); | ||
173 | fp = xp; | ||
174 | } | ||
175 | |||
176 | /* Finally, release the queue descriptor itself. */ | ||
177 | frag_free_queue(qp, work); | ||
178 | } | ||
179 | |||
180 | static __inline__ void ipq_put(struct ipq *ipq, int *work) | 158 | static __inline__ void ipq_put(struct ipq *ipq, int *work) |
181 | { | 159 | { |
182 | if (atomic_dec_and_test(&ipq->q.refcnt)) | 160 | if (atomic_dec_and_test(&ipq->q.refcnt)) |
183 | ip_frag_destroy(ipq, work); | 161 | inet_frag_destroy(&ipq->q, &ip4_frags, work); |
184 | } | 162 | } |
185 | 163 | ||
186 | /* Kill ipq entry. It is not destroyed immediately, | 164 | /* Kill ipq entry. It is not destroyed immediately, |
@@ -721,6 +699,9 @@ void __init ipfrag_init(void) | |||
721 | { | 699 | { |
722 | ip4_frags.ctl = &ip4_frags_ctl; | 700 | ip4_frags.ctl = &ip4_frags_ctl; |
723 | ip4_frags.hashfn = ip4_hashfn; | 701 | ip4_frags.hashfn = ip4_hashfn; |
702 | ip4_frags.destructor = ip4_frag_free; | ||
703 | ip4_frags.skb_free = NULL; | ||
704 | ip4_frags.qsize = sizeof(struct ipq); | ||
724 | inet_frags_init(&ip4_frags); | 705 | inet_frags_init(&ip4_frags); |
725 | } | 706 | } |
726 | 707 | ||
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index a3aef387bcfb..785f5cda188e 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -114,25 +114,25 @@ static unsigned int nf_hashfn(struct inet_frag_queue *q) | |||
114 | return ip6qhashfn(nq->id, &nq->saddr, &nq->daddr); | 114 | return ip6qhashfn(nq->id, &nq->saddr, &nq->daddr); |
115 | } | 115 | } |
116 | 116 | ||
117 | static void nf_skb_free(struct sk_buff *skb) | ||
118 | { | ||
119 | if (NFCT_FRAG6_CB(skb)->orig) | ||
120 | kfree_skb(NFCT_FRAG6_CB(skb)->orig); | ||
121 | } | ||
122 | |||
117 | /* Memory Tracking Functions. */ | 123 | /* Memory Tracking Functions. */ |
118 | static inline void frag_kfree_skb(struct sk_buff *skb, unsigned int *work) | 124 | static inline void frag_kfree_skb(struct sk_buff *skb, unsigned int *work) |
119 | { | 125 | { |
120 | if (work) | 126 | if (work) |
121 | *work -= skb->truesize; | 127 | *work -= skb->truesize; |
122 | atomic_sub(skb->truesize, &nf_frags.mem); | 128 | atomic_sub(skb->truesize, &nf_frags.mem); |
123 | if (NFCT_FRAG6_CB(skb)->orig) | 129 | nf_skb_free(skb); |
124 | kfree_skb(NFCT_FRAG6_CB(skb)->orig); | ||
125 | |||
126 | kfree_skb(skb); | 130 | kfree_skb(skb); |
127 | } | 131 | } |
128 | 132 | ||
129 | static inline void frag_free_queue(struct nf_ct_frag6_queue *fq, | 133 | static void nf_frag_free(struct inet_frag_queue *q) |
130 | unsigned int *work) | ||
131 | { | 134 | { |
132 | if (work) | 135 | kfree(container_of(q, struct nf_ct_frag6_queue, q)); |
133 | *work -= sizeof(struct nf_ct_frag6_queue); | ||
134 | atomic_sub(sizeof(struct nf_ct_frag6_queue), &nf_frags.mem); | ||
135 | kfree(fq); | ||
136 | } | 136 | } |
137 | 137 | ||
138 | static inline struct nf_ct_frag6_queue *frag_alloc_queue(void) | 138 | static inline struct nf_ct_frag6_queue *frag_alloc_queue(void) |
@@ -147,31 +147,10 @@ static inline struct nf_ct_frag6_queue *frag_alloc_queue(void) | |||
147 | 147 | ||
148 | /* Destruction primitives. */ | 148 | /* Destruction primitives. */ |
149 | 149 | ||
150 | /* Complete destruction of fq. */ | ||
151 | static void nf_ct_frag6_destroy(struct nf_ct_frag6_queue *fq, | ||
152 | unsigned int *work) | ||
153 | { | ||
154 | struct sk_buff *fp; | ||
155 | |||
156 | BUG_TRAP(fq->q.last_in&COMPLETE); | ||
157 | BUG_TRAP(del_timer(&fq->q.timer) == 0); | ||
158 | |||
159 | /* Release all fragment data. */ | ||
160 | fp = fq->q.fragments; | ||
161 | while (fp) { | ||
162 | struct sk_buff *xp = fp->next; | ||
163 | |||
164 | frag_kfree_skb(fp, work); | ||
165 | fp = xp; | ||
166 | } | ||
167 | |||
168 | frag_free_queue(fq, work); | ||
169 | } | ||
170 | |||
171 | static __inline__ void fq_put(struct nf_ct_frag6_queue *fq, unsigned int *work) | 150 | static __inline__ void fq_put(struct nf_ct_frag6_queue *fq, unsigned int *work) |
172 | { | 151 | { |
173 | if (atomic_dec_and_test(&fq->q.refcnt)) | 152 | if (atomic_dec_and_test(&fq->q.refcnt)) |
174 | nf_ct_frag6_destroy(fq, work); | 153 | inet_frag_destroy(&fq->q, &nf_frags, work); |
175 | } | 154 | } |
176 | 155 | ||
177 | /* Kill fq entry. It is not destroyed immediately, | 156 | /* Kill fq entry. It is not destroyed immediately, |
@@ -799,6 +778,9 @@ int nf_ct_frag6_init(void) | |||
799 | { | 778 | { |
800 | nf_frags.ctl = &nf_frags_ctl; | 779 | nf_frags.ctl = &nf_frags_ctl; |
801 | nf_frags.hashfn = nf_hashfn; | 780 | nf_frags.hashfn = nf_hashfn; |
781 | nf_frags.destructor = nf_frag_free; | ||
782 | nf_frags.skb_free = nf_skb_free; | ||
783 | nf_frags.qsize = sizeof(struct nf_ct_frag6_queue); | ||
802 | inet_frags_init(&nf_frags); | 784 | inet_frags_init(&nf_frags); |
803 | 785 | ||
804 | return 0; | 786 | return 0; |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index c7d4961bbcf7..940b7d2383ec 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -152,12 +152,9 @@ static inline void frag_kfree_skb(struct sk_buff *skb, int *work) | |||
152 | kfree_skb(skb); | 152 | kfree_skb(skb); |
153 | } | 153 | } |
154 | 154 | ||
155 | static inline void frag_free_queue(struct frag_queue *fq, int *work) | 155 | static void ip6_frag_free(struct inet_frag_queue *fq) |
156 | { | 156 | { |
157 | if (work) | 157 | kfree(container_of(fq, struct frag_queue, q)); |
158 | *work -= sizeof(struct frag_queue); | ||
159 | atomic_sub(sizeof(struct frag_queue), &ip6_frags.mem); | ||
160 | kfree(fq); | ||
161 | } | 158 | } |
162 | 159 | ||
163 | static inline struct frag_queue *frag_alloc_queue(void) | 160 | static inline struct frag_queue *frag_alloc_queue(void) |
@@ -172,30 +169,10 @@ static inline struct frag_queue *frag_alloc_queue(void) | |||
172 | 169 | ||
173 | /* Destruction primitives. */ | 170 | /* Destruction primitives. */ |
174 | 171 | ||
175 | /* Complete destruction of fq. */ | ||
176 | static void ip6_frag_destroy(struct frag_queue *fq, int *work) | ||
177 | { | ||
178 | struct sk_buff *fp; | ||
179 | |||
180 | BUG_TRAP(fq->q.last_in&COMPLETE); | ||
181 | BUG_TRAP(del_timer(&fq->q.timer) == 0); | ||
182 | |||
183 | /* Release all fragment data. */ | ||
184 | fp = fq->q.fragments; | ||
185 | while (fp) { | ||
186 | struct sk_buff *xp = fp->next; | ||
187 | |||
188 | frag_kfree_skb(fp, work); | ||
189 | fp = xp; | ||
190 | } | ||
191 | |||
192 | frag_free_queue(fq, work); | ||
193 | } | ||
194 | |||
195 | static __inline__ void fq_put(struct frag_queue *fq, int *work) | 172 | static __inline__ void fq_put(struct frag_queue *fq, int *work) |
196 | { | 173 | { |
197 | if (atomic_dec_and_test(&fq->q.refcnt)) | 174 | if (atomic_dec_and_test(&fq->q.refcnt)) |
198 | ip6_frag_destroy(fq, work); | 175 | inet_frag_destroy(&fq->q, &ip6_frags, work); |
199 | } | 176 | } |
200 | 177 | ||
201 | /* Kill fq entry. It is not destroyed immediately, | 178 | /* Kill fq entry. It is not destroyed immediately, |
@@ -744,5 +721,8 @@ void __init ipv6_frag_init(void) | |||
744 | 721 | ||
745 | ip6_frags.ctl = &ip6_frags_ctl; | 722 | ip6_frags.ctl = &ip6_frags_ctl; |
746 | ip6_frags.hashfn = ip6_hashfn; | 723 | ip6_frags.hashfn = ip6_hashfn; |
724 | ip6_frags.destructor = ip6_frag_free; | ||
725 | ip6_frags.skb_free = NULL; | ||
726 | ip6_frags.qsize = sizeof(struct frag_queue); | ||
747 | inet_frags_init(&ip6_frags); | 727 | inet_frags_init(&ip6_frags); |
748 | } | 728 | } |