aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/esp4.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-10-09 16:33:35 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-10 19:55:03 -0400
commitb7c6538cd84f8072fad43bfce530f5bf695edbba (patch)
treee0ba79ffe7b79355985a45de9961b17a0462764f /net/ipv4/esp4.c
parent050f009e16f908932070313c1745d09dc69fd62b (diff)
[IPSEC]: Move state lock into x->type->output
This patch releases the lock on the state before calling x->type->output. It also adds the lock to the spots where they're currently needed. Most of those places (all except mip6) are expected to disappear with async crypto. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/esp4.c')
-rw-r--r--net/ipv4/esp4.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index ffd565350411..452910dae89f 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -8,6 +8,7 @@
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/pfkeyv2.h> 9#include <linux/pfkeyv2.h>
10#include <linux/random.h> 10#include <linux/random.h>
11#include <linux/spinlock.h>
11#include <net/icmp.h> 12#include <net/icmp.h>
12#include <net/protocol.h> 13#include <net/protocol.h>
13#include <net/udp.h> 14#include <net/udp.h>
@@ -66,6 +67,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
66 top_iph->tot_len = htons(skb->len + alen); 67 top_iph->tot_len = htons(skb->len + alen);
67 *(skb_tail_pointer(trailer) - 1) = top_iph->protocol; 68 *(skb_tail_pointer(trailer) - 1) = top_iph->protocol;
68 69
70 spin_lock_bh(&x->lock);
71
69 /* this is non-NULL only with UDP Encapsulation */ 72 /* this is non-NULL only with UDP Encapsulation */
70 if (x->encap) { 73 if (x->encap) {
71 struct xfrm_encap_tmpl *encap = x->encap; 74 struct xfrm_encap_tmpl *encap = x->encap;
@@ -111,7 +114,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
111 if (unlikely(nfrags > ESP_NUM_FAST_SG)) { 114 if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
112 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); 115 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
113 if (!sg) 116 if (!sg)
114 goto error; 117 goto unlock;
115 } 118 }
116 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); 119 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
117 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); 120 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
@@ -120,7 +123,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
120 } while (0); 123 } while (0);
121 124
122 if (unlikely(err)) 125 if (unlikely(err))
123 goto error; 126 goto unlock;
124 127
125 if (esp->conf.ivlen) { 128 if (esp->conf.ivlen) {
126 memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen); 129 memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
@@ -133,6 +136,9 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
133 memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); 136 memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
134 } 137 }
135 138
139unlock:
140 spin_unlock_bh(&x->lock);
141
136 ip_send_check(top_iph); 142 ip_send_check(top_iph);
137 143
138error: 144error: