aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOctavian Purdila <octavian.purdila@intel.com>2014-06-25 10:09:55 -0400
committerDavid S. Miller <davem@davemloft.net>2014-06-27 18:53:36 -0400
commitd94e0417ad8d96d7d96b69335338ad942eaeecf1 (patch)
tree0e4b8e9ef731f478d7748d1f701b66edc2acbeb3
parentfb7b37a7f3d6f7b7ba05ee526fee96810d5b92a8 (diff)
tcp: add route_req method to tcp_request_sock_ops
Create wrappers with same signature for the IPv4/IPv6 request routing calls and use these wrappers (via route_req method from tcp_request_sock_ops) in tcp_v4_conn_request and tcp_v6_conn_request with the purpose of unifying the two functions in a later patch. We can later drop the wrapper functions and modify inet_csk_route_req and inet6_cks_route_req to use the same signature. Signed-off-by: Octavian Purdila <octavian.purdila@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/tcp.h3
-rw-r--r--net/ipv4/tcp_ipv4.c36
-rw-r--r--net/ipv6/tcp_ipv6.c26
3 files changed, 52 insertions, 13 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 086d00ec6d8b..59fcc5934c79 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1605,6 +1605,9 @@ struct tcp_request_sock_ops {
1605 __u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb, 1605 __u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb,
1606 __u16 *mss); 1606 __u16 *mss);
1607#endif 1607#endif
1608 struct dst_entry *(*route_req)(struct sock *sk, struct flowi *fl,
1609 const struct request_sock *req,
1610 bool *strict);
1608}; 1611};
1609 1612
1610#ifdef CONFIG_SYN_COOKIES 1613#ifdef CONFIG_SYN_COOKIES
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 8c69e44c287b..54fbbd8b4fcd 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1248,6 +1248,22 @@ static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
1248 ireq->opt = tcp_v4_save_options(skb); 1248 ireq->opt = tcp_v4_save_options(skb);
1249} 1249}
1250 1250
1251static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1252 const struct request_sock *req,
1253 bool *strict)
1254{
1255 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1256
1257 if (strict) {
1258 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1259 *strict = true;
1260 else
1261 *strict = false;
1262 }
1263
1264 return dst;
1265}
1266
1251struct request_sock_ops tcp_request_sock_ops __read_mostly = { 1267struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1252 .family = PF_INET, 1268 .family = PF_INET,
1253 .obj_size = sizeof(struct tcp_request_sock), 1269 .obj_size = sizeof(struct tcp_request_sock),
@@ -1267,6 +1283,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1267#ifdef CONFIG_SYN_COOKIES 1283#ifdef CONFIG_SYN_COOKIES
1268 .cookie_init_seq = cookie_v4_init_sequence, 1284 .cookie_init_seq = cookie_v4_init_sequence,
1269#endif 1285#endif
1286 .route_req = tcp_v4_route_req,
1270}; 1287};
1271 1288
1272int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) 1289int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
@@ -1346,11 +1363,13 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1346 * timewait bucket, so that all the necessary checks 1363 * timewait bucket, so that all the necessary checks
1347 * are made in the function processing timewait state. 1364 * are made in the function processing timewait state.
1348 */ 1365 */
1349 if (tmp_opt.saw_tstamp && 1366 if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle) {
1350 tcp_death_row.sysctl_tw_recycle && 1367 bool strict;
1351 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL && 1368
1352 fl4.daddr == saddr) { 1369 dst = af_ops->route_req(sk, (struct flowi *)&fl4, req,
1353 if (!tcp_peer_is_proven(req, dst, true)) { 1370 &strict);
1371 if (dst && strict &&
1372 !tcp_peer_is_proven(req, dst, true)) {
1354 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); 1373 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1355 goto drop_and_release; 1374 goto drop_and_release;
1356 } 1375 }
@@ -1374,8 +1393,11 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1374 1393
1375 isn = tcp_v4_init_sequence(skb); 1394 isn = tcp_v4_init_sequence(skb);
1376 } 1395 }
1377 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) 1396 if (!dst) {
1378 goto drop_and_free; 1397 dst = af_ops->route_req(sk, (struct flowi *)&fl4, req, NULL);
1398 if (!dst)
1399 goto drop_and_free;
1400 }
1379 1401
1380 tcp_rsk(req)->snt_isn = isn; 1402 tcp_rsk(req)->snt_isn = isn;
1381 tcp_openreq_init_rwin(req, sk, dst); 1403 tcp_openreq_init_rwin(req, sk, dst);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 17710cffddaa..d780d8808566 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -745,6 +745,16 @@ static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
745 } 745 }
746} 746}
747 747
748static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
749 const struct request_sock *req,
750 bool *strict)
751{
752 if (strict)
753 *strict = true;
754 return inet6_csk_route_req(sk, &fl->u.ip6, req);
755}
756
757
748struct request_sock_ops tcp6_request_sock_ops __read_mostly = { 758struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
749 .family = AF_INET6, 759 .family = AF_INET6,
750 .obj_size = sizeof(struct tcp6_request_sock), 760 .obj_size = sizeof(struct tcp6_request_sock),
@@ -764,6 +774,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
764#ifdef CONFIG_SYN_COOKIES 774#ifdef CONFIG_SYN_COOKIES
765 .cookie_init_seq = cookie_v6_init_sequence, 775 .cookie_init_seq = cookie_v6_init_sequence,
766#endif 776#endif
777 .route_req = tcp_v6_route_req,
767}; 778};
768 779
769static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, 780static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
@@ -1078,10 +1089,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1078 * timewait bucket, so that all the necessary checks 1089 * timewait bucket, so that all the necessary checks
1079 * are made in the function processing timewait state. 1090 * are made in the function processing timewait state.
1080 */ 1091 */
1081 if (tmp_opt.saw_tstamp && 1092 if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle) {
1082 tcp_death_row.sysctl_tw_recycle && 1093 dst = af_ops->route_req(sk, (struct flowi *)&fl6, req,
1083 (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) { 1094 NULL);
1084 if (!tcp_peer_is_proven(req, dst, true)) { 1095 if (dst && !tcp_peer_is_proven(req, dst, true)) {
1085 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); 1096 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1086 goto drop_and_release; 1097 goto drop_and_release;
1087 } 1098 }
@@ -1110,8 +1121,11 @@ have_isn:
1110 if (security_inet_conn_request(sk, skb, req)) 1121 if (security_inet_conn_request(sk, skb, req))
1111 goto drop_and_release; 1122 goto drop_and_release;
1112 1123
1113 if (!dst && (dst = inet6_csk_route_req(sk, &fl6, req)) == NULL) 1124 if (!dst) {
1114 goto drop_and_free; 1125 dst = af_ops->route_req(sk, (struct flowi *)&fl6, req, NULL);
1126 if (!dst)
1127 goto drop_and_free;
1128 }
1115 1129
1116 tcp_rsk(req)->snt_isn = isn; 1130 tcp_rsk(req)->snt_isn = isn;
1117 tcp_openreq_init_rwin(req, sk, dst); 1131 tcp_openreq_init_rwin(req, sk, dst);