aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/ip_fragment.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/ip_fragment.c')
-rw-r--r--net/ipv4/ip_fragment.c54
1 files changed, 18 insertions, 36 deletions
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index d12a18b8f568..4b1bbbee22c5 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -123,6 +123,20 @@ static unsigned int ip4_hashfn(struct inet_frag_queue *q)
123 return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); 123 return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
124} 124}
125 125
126static int ip4_frag_equal(struct inet_frag_queue *q1,
127 struct inet_frag_queue *q2)
128{
129 struct ipq *qp1, *qp2;
130
131 qp1 = container_of(q1, struct ipq, q);
132 qp2 = container_of(q2, struct ipq, q);
133 return (qp1->id == qp2->id &&
134 qp1->saddr == qp2->saddr &&
135 qp1->daddr == qp2->daddr &&
136 qp1->protocol == qp2->protocol &&
137 qp1->user == qp2->user);
138}
139
126/* Memory Tracking Functions. */ 140/* Memory Tracking Functions. */
127static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work) 141static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work)
128{ 142{
@@ -214,43 +228,10 @@ out:
214 228
215static struct ipq *ip_frag_intern(struct ipq *qp_in, unsigned int hash) 229static struct ipq *ip_frag_intern(struct ipq *qp_in, unsigned int hash)
216{ 230{
217 struct ipq *qp; 231 struct inet_frag_queue *q;
218#ifdef CONFIG_SMP
219 struct hlist_node *n;
220#endif
221 232
222 write_lock(&ip4_frags.lock); 233 q = inet_frag_intern(&qp_in->q, &ip4_frags, hash);
223#ifdef CONFIG_SMP 234 return container_of(q, struct ipq, q);
224 /* With SMP race we have to recheck hash table, because
225 * such entry could be created on other cpu, while we
226 * promoted read lock to write lock.
227 */
228 hlist_for_each_entry(qp, n, &ip4_frags.hash[hash], q.list) {
229 if (qp->id == qp_in->id &&
230 qp->saddr == qp_in->saddr &&
231 qp->daddr == qp_in->daddr &&
232 qp->protocol == qp_in->protocol &&
233 qp->user == qp_in->user) {
234 atomic_inc(&qp->q.refcnt);
235 write_unlock(&ip4_frags.lock);
236 qp_in->q.last_in |= COMPLETE;
237 ipq_put(qp_in);
238 return qp;
239 }
240 }
241#endif
242 qp = qp_in;
243
244 if (!mod_timer(&qp->q.timer, jiffies + ip4_frags_ctl.timeout))
245 atomic_inc(&qp->q.refcnt);
246
247 atomic_inc(&qp->q.refcnt);
248 hlist_add_head(&qp->q.list, &ip4_frags.hash[hash]);
249 INIT_LIST_HEAD(&qp->q.lru_list);
250 list_add_tail(&qp->q.lru_list, &ip4_frags.lru_list);
251 ip4_frags.nqueues++;
252 write_unlock(&ip4_frags.lock);
253 return qp;
254} 235}
255 236
256/* Add an entry to the 'ipq' queue for a newly received IP datagram. */ 237/* Add an entry to the 'ipq' queue for a newly received IP datagram. */
@@ -671,6 +652,7 @@ void __init ipfrag_init(void)
671 ip4_frags.destructor = ip4_frag_free; 652 ip4_frags.destructor = ip4_frag_free;
672 ip4_frags.skb_free = NULL; 653 ip4_frags.skb_free = NULL;
673 ip4_frags.qsize = sizeof(struct ipq); 654 ip4_frags.qsize = sizeof(struct ipq);
655 ip4_frags.equal = ip4_frag_equal;
674 inet_frags_init(&ip4_frags); 656 inet_frags_init(&ip4_frags);
675} 657}
676 658