diff options
author | Glenn Griffin <ggriffin.kernel@gmail.com> | 2008-02-08 00:49:26 -0500 |
---|---|---|
committer | YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> | 2008-03-04 01:18:21 -0500 |
commit | c6aefafb7ec620911d46174eed514f9df639e5a4 (patch) | |
tree | 626e3d47a7bb31f586935c480bed09f342f2fbca /net/ipv6/tcp_ipv6.c | |
parent | 11baab7ac34723ad481e0f97fca733272ef364d4 (diff) |
[TCP]: Add IPv6 support to TCP SYN cookies
Updated to incorporate Eric's suggestion of using a per cpu buffer
rather than allocating on the stack. Just a two line change, but will
resend in it's entirety.
Signed-off-by: Glenn Griffin <ggriffin.kernel@gmail.com>
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 77 |
1 files changed, 55 insertions, 22 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 1cbbb87dbad2..fd773ac7531a 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -512,6 +512,20 @@ done: | |||
512 | return err; | 512 | return err; |
513 | } | 513 | } |
514 | 514 | ||
515 | static inline void syn_flood_warning(struct sk_buff *skb) | ||
516 | { | ||
517 | #ifdef CONFIG_SYN_COOKIES | ||
518 | if (sysctl_tcp_syncookies) | ||
519 | printk(KERN_INFO | ||
520 | "TCPv6: Possible SYN flooding on port %d. " | ||
521 | "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest)); | ||
522 | else | ||
523 | #endif | ||
524 | printk(KERN_INFO | ||
525 | "TCPv6: Possible SYN flooding on port %d. " | ||
526 | "Dropping request.\n", ntohs(tcp_hdr(skb)->dest)); | ||
527 | } | ||
528 | |||
515 | static void tcp_v6_reqsk_destructor(struct request_sock *req) | 529 | static void tcp_v6_reqsk_destructor(struct request_sock *req) |
516 | { | 530 | { |
517 | if (inet6_rsk(req)->pktopts) | 531 | if (inet6_rsk(req)->pktopts) |
@@ -915,7 +929,7 @@ done_opts: | |||
915 | } | 929 | } |
916 | #endif | 930 | #endif |
917 | 931 | ||
918 | static struct request_sock_ops tcp6_request_sock_ops __read_mostly = { | 932 | struct request_sock_ops tcp6_request_sock_ops __read_mostly = { |
919 | .family = AF_INET6, | 933 | .family = AF_INET6, |
920 | .obj_size = sizeof(struct tcp6_request_sock), | 934 | .obj_size = sizeof(struct tcp6_request_sock), |
921 | .rtx_syn_ack = tcp_v6_send_synack, | 935 | .rtx_syn_ack = tcp_v6_send_synack, |
@@ -1213,9 +1227,9 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) | |||
1213 | return NULL; | 1227 | return NULL; |
1214 | } | 1228 | } |
1215 | 1229 | ||
1216 | #if 0 /*def CONFIG_SYN_COOKIES*/ | 1230 | #ifdef CONFIG_SYN_COOKIES |
1217 | if (!th->rst && !th->syn && th->ack) | 1231 | if (!th->rst && !th->syn && th->ack) |
1218 | sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt)); | 1232 | sk = cookie_v6_check(sk, skb); |
1219 | #endif | 1233 | #endif |
1220 | return sk; | 1234 | return sk; |
1221 | } | 1235 | } |
@@ -1231,6 +1245,11 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1231 | struct tcp_sock *tp = tcp_sk(sk); | 1245 | struct tcp_sock *tp = tcp_sk(sk); |
1232 | struct request_sock *req = NULL; | 1246 | struct request_sock *req = NULL; |
1233 | __u32 isn = TCP_SKB_CB(skb)->when; | 1247 | __u32 isn = TCP_SKB_CB(skb)->when; |
1248 | #ifdef CONFIG_SYN_COOKIES | ||
1249 | int want_cookie = 0; | ||
1250 | #else | ||
1251 | #define want_cookie 0 | ||
1252 | #endif | ||
1234 | 1253 | ||
1235 | if (skb->protocol == htons(ETH_P_IP)) | 1254 | if (skb->protocol == htons(ETH_P_IP)) |
1236 | return tcp_v4_conn_request(sk, skb); | 1255 | return tcp_v4_conn_request(sk, skb); |
@@ -1238,12 +1257,14 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1238 | if (!ipv6_unicast_destination(skb)) | 1257 | if (!ipv6_unicast_destination(skb)) |
1239 | goto drop; | 1258 | goto drop; |
1240 | 1259 | ||
1241 | /* | ||
1242 | * There are no SYN attacks on IPv6, yet... | ||
1243 | */ | ||
1244 | if (inet_csk_reqsk_queue_is_full(sk) && !isn) { | 1260 | if (inet_csk_reqsk_queue_is_full(sk) && !isn) { |
1245 | if (net_ratelimit()) | 1261 | if (net_ratelimit()) |
1246 | printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n"); | 1262 | syn_flood_warning(skb); |
1263 | #ifdef CONFIG_SYN_COOKIES | ||
1264 | if (sysctl_tcp_syncookies) | ||
1265 | want_cookie = 1; | ||
1266 | else | ||
1267 | #endif | ||
1247 | goto drop; | 1268 | goto drop; |
1248 | } | 1269 | } |
1249 | 1270 | ||
@@ -1264,29 +1285,39 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1264 | 1285 | ||
1265 | tcp_parse_options(skb, &tmp_opt, 0); | 1286 | tcp_parse_options(skb, &tmp_opt, 0); |
1266 | 1287 | ||
1288 | if (want_cookie) { | ||
1289 | tcp_clear_options(&tmp_opt); | ||
1290 | tmp_opt.saw_tstamp = 0; | ||
1291 | } | ||
1292 | |||
1267 | tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; | 1293 | tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; |
1268 | tcp_openreq_init(req, &tmp_opt, skb); | 1294 | tcp_openreq_init(req, &tmp_opt, skb); |
1269 | 1295 | ||
1270 | treq = inet6_rsk(req); | 1296 | treq = inet6_rsk(req); |
1271 | ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr); | 1297 | ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr); |
1272 | ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr); | 1298 | ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr); |
1273 | TCP_ECN_create_request(req, tcp_hdr(skb)); | ||
1274 | treq->pktopts = NULL; | 1299 | treq->pktopts = NULL; |
1275 | if (ipv6_opt_accepted(sk, skb) || | 1300 | if (!want_cookie) |
1276 | np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || | 1301 | TCP_ECN_create_request(req, tcp_hdr(skb)); |
1277 | np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { | 1302 | |
1278 | atomic_inc(&skb->users); | 1303 | if (want_cookie) { |
1279 | treq->pktopts = skb; | 1304 | isn = cookie_v6_init_sequence(sk, skb, &req->mss); |
1280 | } | 1305 | } else if (!isn) { |
1281 | treq->iif = sk->sk_bound_dev_if; | 1306 | if (ipv6_opt_accepted(sk, skb) || |
1307 | np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || | ||
1308 | np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { | ||
1309 | atomic_inc(&skb->users); | ||
1310 | treq->pktopts = skb; | ||
1311 | } | ||
1312 | treq->iif = sk->sk_bound_dev_if; | ||
1282 | 1313 | ||
1283 | /* So that link locals have meaning */ | 1314 | /* So that link locals have meaning */ |
1284 | if (!sk->sk_bound_dev_if && | 1315 | if (!sk->sk_bound_dev_if && |
1285 | ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) | 1316 | ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) |
1286 | treq->iif = inet6_iif(skb); | 1317 | treq->iif = inet6_iif(skb); |
1287 | 1318 | ||
1288 | if (isn == 0) | ||
1289 | isn = tcp_v6_init_sequence(skb); | 1319 | isn = tcp_v6_init_sequence(skb); |
1320 | } | ||
1290 | 1321 | ||
1291 | tcp_rsk(req)->snt_isn = isn; | 1322 | tcp_rsk(req)->snt_isn = isn; |
1292 | 1323 | ||
@@ -1295,8 +1326,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1295 | if (tcp_v6_send_synack(sk, req)) | 1326 | if (tcp_v6_send_synack(sk, req)) |
1296 | goto drop; | 1327 | goto drop; |
1297 | 1328 | ||
1298 | inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); | 1329 | if (!want_cookie) { |
1299 | return 0; | 1330 | inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); |
1331 | return 0; | ||
1332 | } | ||
1300 | 1333 | ||
1301 | drop: | 1334 | drop: |
1302 | if (req) | 1335 | if (req) |