diff options
Diffstat (limited to 'net/ipv4/inet_fragment.c')
-rw-r--r-- | net/ipv4/inet_fragment.c | 89 |
1 files changed, 88 insertions, 1 deletions
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 484cf512858f..e15e04fc6661 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -136,7 +136,9 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f, | |||
136 | *work -= f->qsize; | 136 | *work -= f->qsize; |
137 | atomic_sub(f->qsize, &f->mem); | 137 | atomic_sub(f->qsize, &f->mem); |
138 | 138 | ||
139 | f->destructor(q); | 139 | if (f->destructor) |
140 | f->destructor(q); | ||
141 | kfree(q); | ||
140 | 142 | ||
141 | } | 143 | } |
142 | EXPORT_SYMBOL(inet_frag_destroy); | 144 | EXPORT_SYMBOL(inet_frag_destroy); |
@@ -172,3 +174,88 @@ int inet_frag_evictor(struct inet_frags *f) | |||
172 | return evicted; | 174 | return evicted; |
173 | } | 175 | } |
174 | EXPORT_SYMBOL(inet_frag_evictor); | 176 | EXPORT_SYMBOL(inet_frag_evictor); |
177 | |||
178 | static struct inet_frag_queue *inet_frag_intern(struct inet_frag_queue *qp_in, | ||
179 | struct inet_frags *f, unsigned int hash, void *arg) | ||
180 | { | ||
181 | struct inet_frag_queue *qp; | ||
182 | #ifdef CONFIG_SMP | ||
183 | struct hlist_node *n; | ||
184 | #endif | ||
185 | |||
186 | write_lock(&f->lock); | ||
187 | #ifdef CONFIG_SMP | ||
188 | /* With SMP race we have to recheck hash table, because | ||
189 | * such entry could be created on other cpu, while we | ||
190 | * promoted read lock to write lock. | ||
191 | */ | ||
192 | hlist_for_each_entry(qp, n, &f->hash[hash], list) { | ||
193 | if (f->match(qp, arg)) { | ||
194 | atomic_inc(&qp->refcnt); | ||
195 | write_unlock(&f->lock); | ||
196 | qp_in->last_in |= COMPLETE; | ||
197 | inet_frag_put(qp_in, f); | ||
198 | return qp; | ||
199 | } | ||
200 | } | ||
201 | #endif | ||
202 | qp = qp_in; | ||
203 | if (!mod_timer(&qp->timer, jiffies + f->ctl->timeout)) | ||
204 | atomic_inc(&qp->refcnt); | ||
205 | |||
206 | atomic_inc(&qp->refcnt); | ||
207 | hlist_add_head(&qp->list, &f->hash[hash]); | ||
208 | list_add_tail(&qp->lru_list, &f->lru_list); | ||
209 | f->nqueues++; | ||
210 | write_unlock(&f->lock); | ||
211 | return qp; | ||
212 | } | ||
213 | |||
214 | static struct inet_frag_queue *inet_frag_alloc(struct inet_frags *f, void *arg) | ||
215 | { | ||
216 | struct inet_frag_queue *q; | ||
217 | |||
218 | q = kzalloc(f->qsize, GFP_ATOMIC); | ||
219 | if (q == NULL) | ||
220 | return NULL; | ||
221 | |||
222 | f->constructor(q, arg); | ||
223 | atomic_add(f->qsize, &f->mem); | ||
224 | setup_timer(&q->timer, f->frag_expire, (unsigned long)q); | ||
225 | spin_lock_init(&q->lock); | ||
226 | atomic_set(&q->refcnt, 1); | ||
227 | |||
228 | return q; | ||
229 | } | ||
230 | |||
231 | static struct inet_frag_queue *inet_frag_create(struct inet_frags *f, | ||
232 | void *arg, unsigned int hash) | ||
233 | { | ||
234 | struct inet_frag_queue *q; | ||
235 | |||
236 | q = inet_frag_alloc(f, arg); | ||
237 | if (q == NULL) | ||
238 | return NULL; | ||
239 | |||
240 | return inet_frag_intern(q, f, hash, arg); | ||
241 | } | ||
242 | |||
243 | struct inet_frag_queue *inet_frag_find(struct inet_frags *f, void *key, | ||
244 | unsigned int hash) | ||
245 | { | ||
246 | struct inet_frag_queue *q; | ||
247 | struct hlist_node *n; | ||
248 | |||
249 | read_lock(&f->lock); | ||
250 | hlist_for_each_entry(q, n, &f->hash[hash], list) { | ||
251 | if (f->match(q, key)) { | ||
252 | atomic_inc(&q->refcnt); | ||
253 | read_unlock(&f->lock); | ||
254 | return q; | ||
255 | } | ||
256 | } | ||
257 | read_unlock(&f->lock); | ||
258 | |||
259 | return inet_frag_create(f, key, hash); | ||
260 | } | ||
261 | EXPORT_SYMBOL(inet_frag_find); | ||