aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2008-10-09 17:42:40 -0400
committerDavid S. Miller <davem@davemloft.net>2008-10-09 17:42:40 -0400
commit626e264dd1989bdc98a5eaf2e059af4dba07ac4f (patch)
tree200fe958b32d662a686ff597a9a3260af827767a /net
parent81ada62d70060023923f46ab666cdc2970e1e0ce (diff)
tcpv6: combine tcp_v6_send_(reset|ack)
$ codiff tcp_ipv6.o.old tcp_ipv6.o.new net/ipv6/tcp_ipv6.c: tcp_v6_md5_hash_hdr | -144 tcp_v6_send_ack | -585 tcp_v6_send_reset | -540 3 functions changed, 1269 bytes removed, diff: -1269 net/ipv6/tcp_ipv6.c: tcp_v6_send_response | +791 1 function changed, 791 bytes added, diff: +791 tcp_ipv6.o.new: 4 functions changed, 791 bytes added, 1269 bytes removed, diff: -478 I choose to leave the reset related netns comment in place (not the one that is killed) as I cannot understand its English so it's a bit hard for me to evaluate its usefulness :-). Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv6/tcp_ipv6.c139
1 files changed, 40 insertions, 99 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 1941c5c888d..13c65144a00 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -942,7 +942,8 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
942 return 0; 942 return 0;
943} 943}
944 944
945static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) 945static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
946 u32 ts, struct tcp_md5sig_key *key, int rst)
946{ 947{
947 struct tcphdr *th = tcp_hdr(skb), *t1; 948 struct tcphdr *th = tcp_hdr(skb), *t1;
948 struct sk_buff *buff; 949 struct sk_buff *buff;
@@ -951,31 +952,14 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
951 struct sock *ctl_sk = net->ipv6.tcp_sk; 952 struct sock *ctl_sk = net->ipv6.tcp_sk;
952 unsigned int tot_len = sizeof(struct tcphdr); 953 unsigned int tot_len = sizeof(struct tcphdr);
953 __be32 *topt; 954 __be32 *topt;
954#ifdef CONFIG_TCP_MD5SIG
955 struct tcp_md5sig_key *key;
956#endif
957
958 if (th->rst)
959 return;
960
961 if (!ipv6_unicast_destination(skb))
962 return;
963 955
956 if (ts)
957 tot_len += TCPOLEN_TSTAMP_ALIGNED;
964#ifdef CONFIG_TCP_MD5SIG 958#ifdef CONFIG_TCP_MD5SIG
965 if (sk)
966 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
967 else
968 key = NULL;
969
970 if (key) 959 if (key)
971 tot_len += TCPOLEN_MD5SIG_ALIGNED; 960 tot_len += TCPOLEN_MD5SIG_ALIGNED;
972#endif 961#endif
973 962
974 /*
975 * We need to grab some memory, and put together an RST,
976 * and then put it into the queue to be sent.
977 */
978
979 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, 963 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
980 GFP_ATOMIC); 964 GFP_ATOMIC);
981 if (buff == NULL) 965 if (buff == NULL)
@@ -990,18 +974,21 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
990 t1->dest = th->source; 974 t1->dest = th->source;
991 t1->source = th->dest; 975 t1->source = th->dest;
992 t1->doff = tot_len / 4; 976 t1->doff = tot_len / 4;
993 t1->rst = 1; 977 t1->seq = htonl(seq);
994 978 t1->ack_seq = htonl(ack);
995 if(th->ack) { 979 t1->ack = !rst || !th->ack;
996 t1->seq = th->ack_seq; 980 t1->rst = rst;
997 } else { 981 t1->window = htons(win);
998 t1->ack = 1;
999 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
1000 + skb->len - (th->doff<<2));
1001 }
1002 982
1003 topt = (__be32 *)(t1 + 1); 983 topt = (__be32 *)(t1 + 1);
1004 984
985 if (ts) {
986 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
987 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
988 *topt++ = htonl(tcp_time_stamp);
989 *topt++ = htonl(ts);
990 }
991
1005#ifdef CONFIG_TCP_MD5SIG 992#ifdef CONFIG_TCP_MD5SIG
1006 if (key) { 993 if (key) {
1007 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 994 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
@@ -1036,7 +1023,8 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1036 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { 1023 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1037 ip6_xmit(ctl_sk, buff, &fl, NULL, 0); 1024 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
1038 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 1025 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1039 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); 1026 if (rst)
1027 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1040 return; 1028 return;
1041 } 1029 }
1042 } 1030 }
@@ -1044,87 +1032,40 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1044 kfree_skb(buff); 1032 kfree_skb(buff);
1045} 1033}
1046 1034
1047static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts, 1035static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1048 struct tcp_md5sig_key *key)
1049{ 1036{
1050 struct tcphdr *th = tcp_hdr(skb), *t1; 1037 struct tcphdr *th = tcp_hdr(skb);
1051 struct sk_buff *buff; 1038 u32 seq = 0, ack_seq = 0;
1052 struct flowi fl;
1053 struct net *net = dev_net(skb->dst->dev);
1054 struct sock *ctl_sk = net->ipv6.tcp_sk;
1055 unsigned int tot_len = sizeof(struct tcphdr);
1056 __be32 *topt;
1057
1058 if (ts)
1059 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1060#ifdef CONFIG_TCP_MD5SIG 1039#ifdef CONFIG_TCP_MD5SIG
1061 if (key) 1040 struct tcp_md5sig_key *key;
1062 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1063#endif 1041#endif
1064 1042
1065 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, 1043 if (th->rst)
1066 GFP_ATOMIC);
1067 if (buff == NULL)
1068 return; 1044 return;
1069 1045
1070 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); 1046 if (!ipv6_unicast_destination(skb))
1071 1047 return;
1072 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1073
1074 /* Swap the send and the receive. */
1075 memset(t1, 0, sizeof(*t1));
1076 t1->dest = th->source;
1077 t1->source = th->dest;
1078 t1->doff = tot_len / 4;
1079 t1->seq = htonl(seq);
1080 t1->ack_seq = htonl(ack);
1081 t1->ack = 1;
1082 t1->window = htons(win);
1083
1084 topt = (__be32 *)(t1 + 1);
1085
1086 if (ts) {
1087 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1088 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1089 *topt++ = htonl(tcp_time_stamp);
1090 *topt++ = htonl(ts);
1091 }
1092 1048
1093#ifdef CONFIG_TCP_MD5SIG 1049#ifdef CONFIG_TCP_MD5SIG
1094 if (key) { 1050 if (sk)
1095 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 1051 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
1096 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); 1052 else
1097 tcp_v6_md5_hash_hdr((__u8 *)topt, key, 1053 key = NULL;
1098 &ipv6_hdr(skb)->saddr,
1099 &ipv6_hdr(skb)->daddr, t1);
1100 }
1101#endif 1054#endif
1102 1055
1103 buff->csum = csum_partial((char *)t1, tot_len, 0); 1056 if (th->ack)
1104 1057 seq = ntohl(th->ack_seq);
1105 memset(&fl, 0, sizeof(fl)); 1058 else
1106 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr); 1059 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1107 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr); 1060 (th->doff << 2);
1108
1109 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1110 tot_len, IPPROTO_TCP,
1111 buff->csum);
1112
1113 fl.proto = IPPROTO_TCP;
1114 fl.oif = inet6_iif(skb);
1115 fl.fl_ip_dport = t1->dest;
1116 fl.fl_ip_sport = t1->source;
1117 security_skb_classify_flow(skb, &fl);
1118 1061
1119 if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) { 1062 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1120 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { 1063}
1121 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
1122 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1123 return;
1124 }
1125 }
1126 1064
1127 kfree_skb(buff); 1065static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1066 struct tcp_md5sig_key *key)
1067{
1068 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1128} 1069}
1129 1070
1130static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) 1071static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)