aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-03-25 23:10:56 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:23:51 -0400
commit759e5d006462d53fb708daa8284b4ad909415da1 (patch)
treeedcc4e9d975199b3fe5e2aadc3d1e06824755e75 /net/ipv4
parent1ab6eb62b02e0949a392fb19bf31ba59ae1022b1 (diff)
[UDP]: Clean up UDP-Lite receive checksum
This patch eliminates some duplicate code for the verification of receive checksums between UDP-Lite and UDP. It does this by introducing __skb_checksum_complete_head which is identical to __skb_checksum_complete_head apart from the fact that it takes a length parameter rather than computing the first skb->len bytes. As a result UDP-Lite will be able to use hardware checksum offload for packets which do not use partial coverage checksums. It also means that UDP-Lite loopback no longer does unnecessary checksum verification. If any NICs start support UDP-Lite this would also start working automatically. This patch removes the assumption that msg_flags has MSG_TRUNC clear upon entry in recvmsg. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/udp.c96
-rw-r--r--net/ipv4/udplite.c2
2 files changed, 50 insertions, 48 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index fc620a7c1db4..86368832d481 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -810,7 +810,9 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
810 struct inet_sock *inet = inet_sk(sk); 810 struct inet_sock *inet = inet_sk(sk);
811 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; 811 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
812 struct sk_buff *skb; 812 struct sk_buff *skb;
813 int copied, err, copy_only, is_udplite = IS_UDPLITE(sk); 813 unsigned int ulen, copied;
814 int err;
815 int is_udplite = IS_UDPLITE(sk);
814 816
815 /* 817 /*
816 * Check any passed addresses 818 * Check any passed addresses
@@ -826,28 +828,25 @@ try_again:
826 if (!skb) 828 if (!skb)
827 goto out; 829 goto out;
828 830
829 copied = skb->len - sizeof(struct udphdr); 831 ulen = skb->len - sizeof(struct udphdr);
830 if (copied > len) { 832 copied = len;
831 copied = len; 833 if (copied > ulen)
834 copied = ulen;
835 else if (copied < ulen)
832 msg->msg_flags |= MSG_TRUNC; 836 msg->msg_flags |= MSG_TRUNC;
833 }
834 837
835 /* 838 /*
836 * Decide whether to checksum and/or copy data. 839 * If checksum is needed at all, try to do it while copying the
837 * 840 * data. If the data is truncated, or if we only want a partial
838 * UDP: checksum may have been computed in HW, 841 * coverage checksum (UDP-Lite), do it before the copy.
839 * (re-)compute it if message is truncated.
840 * UDP-Lite: always needs to checksum, no HW support.
841 */ 842 */
842 copy_only = (skb->ip_summed==CHECKSUM_UNNECESSARY);
843 843
844 if (is_udplite || (!copy_only && msg->msg_flags&MSG_TRUNC)) { 844 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
845 if (__udp_lib_checksum_complete(skb)) 845 if (udp_lib_checksum_complete(skb))
846 goto csum_copy_err; 846 goto csum_copy_err;
847 copy_only = 1;
848 } 847 }
849 848
850 if (copy_only) 849 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
851 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), 850 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
852 msg->msg_iov, copied ); 851 msg->msg_iov, copied );
853 else { 852 else {
@@ -875,7 +874,7 @@ try_again:
875 874
876 err = copied; 875 err = copied;
877 if (flags & MSG_TRUNC) 876 if (flags & MSG_TRUNC)
878 err = skb->len - sizeof(struct udphdr); 877 err = ulen;
879 878
880out_free: 879out_free:
881 skb_free_datagram(sk, skb); 880 skb_free_datagram(sk, skb);
@@ -1095,10 +1094,9 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
1095 } 1094 }
1096 } 1095 }
1097 1096
1098 if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) { 1097 if (sk->sk_filter) {
1099 if (__udp_lib_checksum_complete(skb)) 1098 if (udp_lib_checksum_complete(skb))
1100 goto drop; 1099 goto drop;
1101 skb->ip_summed = CHECKSUM_UNNECESSARY;
1102 } 1100 }
1103 1101
1104 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { 1102 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
@@ -1166,25 +1164,36 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
1166 * Otherwise, csum completion requires chacksumming packet body, 1164 * Otherwise, csum completion requires chacksumming packet body,
1167 * including udp header and folding it to skb->csum. 1165 * including udp header and folding it to skb->csum.
1168 */ 1166 */
1169static inline void udp4_csum_init(struct sk_buff *skb, struct udphdr *uh) 1167static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
1168 int proto)
1170{ 1169{
1170 int err;
1171
1172 UDP_SKB_CB(skb)->partial_cov = 0;
1173 UDP_SKB_CB(skb)->cscov = skb->len;
1174
1175 if (proto == IPPROTO_UDPLITE) {
1176 err = udplite_checksum_init(skb, uh);
1177 if (err)
1178 return err;
1179 }
1180
1171 if (uh->check == 0) { 1181 if (uh->check == 0) {
1172 skb->ip_summed = CHECKSUM_UNNECESSARY; 1182 skb->ip_summed = CHECKSUM_UNNECESSARY;
1173 } else if (skb->ip_summed == CHECKSUM_COMPLETE) { 1183 } else if (skb->ip_summed == CHECKSUM_COMPLETE) {
1174 if (!csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr, 1184 if (!csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr,
1175 skb->len, IPPROTO_UDP, skb->csum )) 1185 skb->len, proto, skb->csum))
1176 skb->ip_summed = CHECKSUM_UNNECESSARY; 1186 skb->ip_summed = CHECKSUM_UNNECESSARY;
1177 } 1187 }
1178 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 1188 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
1179 skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, 1189 skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr,
1180 skb->nh.iph->daddr, 1190 skb->nh.iph->daddr,
1181 skb->len, IPPROTO_UDP, 0); 1191 skb->len, proto, 0);
1182 /* Probably, we should checksum udp header (it should be in cache 1192 /* Probably, we should checksum udp header (it should be in cache
1183 * in any case) and data in tiny packets (< rx copybreak). 1193 * in any case) and data in tiny packets (< rx copybreak).
1184 */ 1194 */
1185 1195
1186 /* UDP = UDP-Lite with a non-partial checksum coverage */ 1196 return 0;
1187 UDP_SKB_CB(skb)->partial_cov = 0;
1188} 1197}
1189 1198
1190/* 1199/*
@@ -1192,7 +1201,7 @@ static inline void udp4_csum_init(struct sk_buff *skb, struct udphdr *uh)
1192 */ 1201 */
1193 1202
1194int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], 1203int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1195 int is_udplite) 1204 int proto)
1196{ 1205{
1197 struct sock *sk; 1206 struct sock *sk;
1198 struct udphdr *uh = skb->h.uh; 1207 struct udphdr *uh = skb->h.uh;
@@ -1211,19 +1220,16 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1211 if (ulen > skb->len) 1220 if (ulen > skb->len)
1212 goto short_packet; 1221 goto short_packet;
1213 1222
1214 if(! is_udplite ) { /* UDP validates ulen. */ 1223 if (proto == IPPROTO_UDP) {
1215 1224 /* UDP validates ulen. */
1216 if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) 1225 if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
1217 goto short_packet; 1226 goto short_packet;
1218 uh = skb->h.uh; 1227 uh = skb->h.uh;
1219
1220 udp4_csum_init(skb, uh);
1221
1222 } else { /* UDP-Lite validates cscov. */
1223 if (udplite4_csum_init(skb, uh))
1224 goto csum_error;
1225 } 1228 }
1226 1229
1230 if (udp4_csum_init(skb, uh, proto))
1231 goto csum_error;
1232
1227 if(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 1233 if(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1228 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); 1234 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable);
1229 1235
@@ -1250,7 +1256,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1250 if (udp_lib_checksum_complete(skb)) 1256 if (udp_lib_checksum_complete(skb))
1251 goto csum_error; 1257 goto csum_error;
1252 1258
1253 UDP_INC_STATS_BH(UDP_MIB_NOPORTS, is_udplite); 1259 UDP_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1254 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 1260 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
1255 1261
1256 /* 1262 /*
@@ -1262,7 +1268,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1262 1268
1263short_packet: 1269short_packet:
1264 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n", 1270 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n",
1265 is_udplite? "-Lite" : "", 1271 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1266 NIPQUAD(saddr), 1272 NIPQUAD(saddr),
1267 ntohs(uh->source), 1273 ntohs(uh->source),
1268 ulen, 1274 ulen,
@@ -1277,21 +1283,21 @@ csum_error:
1277 * the network is concerned, anyway) as per 4.1.3.4 (MUST). 1283 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1278 */ 1284 */
1279 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n", 1285 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n",
1280 is_udplite? "-Lite" : "", 1286 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1281 NIPQUAD(saddr), 1287 NIPQUAD(saddr),
1282 ntohs(uh->source), 1288 ntohs(uh->source),
1283 NIPQUAD(daddr), 1289 NIPQUAD(daddr),
1284 ntohs(uh->dest), 1290 ntohs(uh->dest),
1285 ulen); 1291 ulen);
1286drop: 1292drop:
1287 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); 1293 UDP_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1288 kfree_skb(skb); 1294 kfree_skb(skb);
1289 return(0); 1295 return(0);
1290} 1296}
1291 1297
1292__inline__ int udp_rcv(struct sk_buff *skb) 1298__inline__ int udp_rcv(struct sk_buff *skb)
1293{ 1299{
1294 return __udp4_lib_rcv(skb, udp_hash, 0); 1300 return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP);
1295} 1301}
1296 1302
1297int udp_destroy_sock(struct sock *sk) 1303int udp_destroy_sock(struct sock *sk)
@@ -1486,15 +1492,11 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1486 struct sk_buff *skb; 1492 struct sk_buff *skb;
1487 1493
1488 spin_lock_bh(&rcvq->lock); 1494 spin_lock_bh(&rcvq->lock);
1489 while ((skb = skb_peek(rcvq)) != NULL) { 1495 while ((skb = skb_peek(rcvq)) != NULL &&
1490 if (udp_lib_checksum_complete(skb)) { 1496 udp_lib_checksum_complete(skb)) {
1491 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite); 1497 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite);
1492 __skb_unlink(skb, rcvq); 1498 __skb_unlink(skb, rcvq);
1493 kfree_skb(skb); 1499 kfree_skb(skb);
1494 } else {
1495 skb->ip_summed = CHECKSUM_UNNECESSARY;
1496 break;
1497 }
1498 } 1500 }
1499 spin_unlock_bh(&rcvq->lock); 1501 spin_unlock_bh(&rcvq->lock);
1500 1502
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index b28fe1edf98b..f34fd686a8f1 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -31,7 +31,7 @@ static int udplite_v4_get_port(struct sock *sk, unsigned short snum)
31 31
32static int udplite_rcv(struct sk_buff *skb) 32static int udplite_rcv(struct sk_buff *skb)
33{ 33{
34 return __udp4_lib_rcv(skb, udplite_hash, 1); 34 return __udp4_lib_rcv(skb, udplite_hash, IPPROTO_UDPLITE);
35} 35}
36 36
37static void udplite_err(struct sk_buff *skb, u32 info) 37static void udplite_err(struct sk_buff *skb, u32 info)