diff options
author | Stephen Hemminger <shemminger@linux-foundation.org> | 2007-03-08 23:44:43 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-04-26 01:24:11 -0400 |
commit | 132adf54639cf7dd9315e8df89c2faa59f6e46d9 (patch) | |
tree | 256e3e30e843e6144bdae68e4aad181db5819d0e /net/ipv4/ip_fragment.c | |
parent | 1ac58ee37f439044eb09381f33c97ce0e7f2643b (diff) |
[IPV4]: cleanup
Add whitespace around keywords.
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/ip_fragment.c')
-rw-r--r-- | net/ipv4/ip_fragment.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index e10be7d7752d..3dfd7581cfc6 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -184,7 +184,7 @@ static __inline__ struct ipq *frag_alloc_queue(void) | |||
184 | { | 184 | { |
185 | struct ipq *qp = kmalloc(sizeof(struct ipq), GFP_ATOMIC); | 185 | struct ipq *qp = kmalloc(sizeof(struct ipq), GFP_ATOMIC); |
186 | 186 | ||
187 | if(!qp) | 187 | if (!qp) |
188 | return NULL; | 188 | return NULL; |
189 | atomic_add(sizeof(struct ipq), &ip_frag_mem); | 189 | atomic_add(sizeof(struct ipq), &ip_frag_mem); |
190 | return qp; | 190 | return qp; |
@@ -321,11 +321,11 @@ static struct ipq *ip_frag_intern(struct ipq *qp_in) | |||
321 | * promoted read lock to write lock. | 321 | * promoted read lock to write lock. |
322 | */ | 322 | */ |
323 | hlist_for_each_entry(qp, n, &ipq_hash[hash], list) { | 323 | hlist_for_each_entry(qp, n, &ipq_hash[hash], list) { |
324 | if(qp->id == qp_in->id && | 324 | if (qp->id == qp_in->id && |
325 | qp->saddr == qp_in->saddr && | 325 | qp->saddr == qp_in->saddr && |
326 | qp->daddr == qp_in->daddr && | 326 | qp->daddr == qp_in->daddr && |
327 | qp->protocol == qp_in->protocol && | 327 | qp->protocol == qp_in->protocol && |
328 | qp->user == qp_in->user) { | 328 | qp->user == qp_in->user) { |
329 | atomic_inc(&qp->refcnt); | 329 | atomic_inc(&qp->refcnt); |
330 | write_unlock(&ipfrag_lock); | 330 | write_unlock(&ipfrag_lock); |
331 | qp_in->last_in |= COMPLETE; | 331 | qp_in->last_in |= COMPLETE; |
@@ -398,11 +398,11 @@ static inline struct ipq *ip_find(struct iphdr *iph, u32 user) | |||
398 | read_lock(&ipfrag_lock); | 398 | read_lock(&ipfrag_lock); |
399 | hash = ipqhashfn(id, saddr, daddr, protocol); | 399 | hash = ipqhashfn(id, saddr, daddr, protocol); |
400 | hlist_for_each_entry(qp, n, &ipq_hash[hash], list) { | 400 | hlist_for_each_entry(qp, n, &ipq_hash[hash], list) { |
401 | if(qp->id == id && | 401 | if (qp->id == id && |
402 | qp->saddr == saddr && | 402 | qp->saddr == saddr && |
403 | qp->daddr == daddr && | 403 | qp->daddr == daddr && |
404 | qp->protocol == protocol && | 404 | qp->protocol == protocol && |
405 | qp->user == user) { | 405 | qp->user == user) { |
406 | atomic_inc(&qp->refcnt); | 406 | atomic_inc(&qp->refcnt); |
407 | read_unlock(&ipfrag_lock); | 407 | read_unlock(&ipfrag_lock); |
408 | return qp; | 408 | return qp; |
@@ -524,7 +524,7 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
524 | * this fragment, right? | 524 | * this fragment, right? |
525 | */ | 525 | */ |
526 | prev = NULL; | 526 | prev = NULL; |
527 | for(next = qp->fragments; next != NULL; next = next->next) { | 527 | for (next = qp->fragments; next != NULL; next = next->next) { |
528 | if (FRAG_CB(next)->offset >= offset) | 528 | if (FRAG_CB(next)->offset >= offset) |
529 | break; /* bingo! */ | 529 | break; /* bingo! */ |
530 | prev = next; | 530 | prev = next; |
@@ -627,7 +627,7 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev) | |||
627 | ihlen = head->nh.iph->ihl*4; | 627 | ihlen = head->nh.iph->ihl*4; |
628 | len = ihlen + qp->len; | 628 | len = ihlen + qp->len; |
629 | 629 | ||
630 | if(len > 65535) | 630 | if (len > 65535) |
631 | goto out_oversize; | 631 | goto out_oversize; |
632 | 632 | ||
633 | /* Head of list must not be cloned. */ | 633 | /* Head of list must not be cloned. */ |