aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorMartin Willi <martin@strongswan.org>2010-12-07 23:37:50 -0500
committerDavid S. Miller <davem@davemloft.net>2010-12-10 17:43:59 -0500
commitd979e20f2b9f8a50c8d5f889e0b5d78580440d1f (patch)
treefed3b0c657cfb62d0e2f3df25fbc4cad058059b2 /net
parent35d2856b4693e8de5d616307b56cef296b839157 (diff)
xfrm: Traffic Flow Confidentiality for IPv4 ESP
Add TFC padding to all packets smaller than the boundary configured on the xfrm state. If the boundary is larger than the PMTU, limit padding to the PMTU. Signed-off-by: Martin Willi <martin@strongswan.org> Acked-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/esp4.c32
1 files changed, 24 insertions, 8 deletions
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 14ca1f1c3fb0..e42a905180f0 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -23,6 +23,8 @@ struct esp_skb_cb {
23 23
24#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) 24#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
25 25
26static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
27
26/* 28/*
27 * Allocate an AEAD request structure with extra space for SG and IV. 29 * Allocate an AEAD request structure with extra space for SG and IV.
28 * 30 *
@@ -117,25 +119,35 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
117 int blksize; 119 int blksize;
118 int clen; 120 int clen;
119 int alen; 121 int alen;
122 int plen;
123 int tfclen;
120 int nfrags; 124 int nfrags;
121 125
122 /* skb is pure payload to encrypt */ 126 /* skb is pure payload to encrypt */
123 127
124 err = -ENOMEM; 128 err = -ENOMEM;
125 129
126 /* Round to block size */
127 clen = skb->len;
128
129 esp = x->data; 130 esp = x->data;
130 aead = esp->aead; 131 aead = esp->aead;
131 alen = crypto_aead_authsize(aead); 132 alen = crypto_aead_authsize(aead);
132 133
134 tfclen = 0;
135 if (x->tfcpad) {
136 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
137 u32 padto;
138
139 padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached));
140 if (skb->len < padto)
141 tfclen = padto - skb->len;
142 }
133 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 143 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
134 clen = ALIGN(clen + 2, blksize); 144 clen = ALIGN(skb->len + 2 + tfclen, blksize);
135 if (esp->padlen) 145 if (esp->padlen)
136 clen = ALIGN(clen, esp->padlen); 146 clen = ALIGN(clen, esp->padlen);
147 plen = clen - skb->len - tfclen;
137 148
138 if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0) 149 err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
150 if (err < 0)
139 goto error; 151 goto error;
140 nfrags = err; 152 nfrags = err;
141 153
@@ -150,13 +162,17 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
150 162
151 /* Fill padding... */ 163 /* Fill padding... */
152 tail = skb_tail_pointer(trailer); 164 tail = skb_tail_pointer(trailer);
165 if (tfclen) {
166 memset(tail, 0, tfclen);
167 tail += tfclen;
168 }
153 do { 169 do {
154 int i; 170 int i;
155 for (i=0; i<clen-skb->len - 2; i++) 171 for (i = 0; i < plen - 2; i++)
156 tail[i] = i + 1; 172 tail[i] = i + 1;
157 } while (0); 173 } while (0);
158 tail[clen - skb->len - 2] = (clen - skb->len) - 2; 174 tail[plen - 2] = plen - 2;
159 tail[clen - skb->len - 1] = *skb_mac_header(skb); 175 tail[plen - 1] = *skb_mac_header(skb);
160 pskb_put(skb, trailer, clen - skb->len + alen); 176 pskb_put(skb, trailer, clen - skb->len + alen);
161 177
162 skb_push(skb, -skb_network_offset(skb)); 178 skb_push(skb, -skb_network_offset(skb));