diff options
author | Patrick McHardy <kaber@trash.net> | 2010-05-10 12:39:28 -0400 |
---|---|---|
committer | Patrick McHardy <kaber@trash.net> | 2010-05-10 12:39:28 -0400 |
commit | 1e4b1057121bc756b91758a434b504d2010f6088 (patch) | |
tree | b016cf2c728289c7e36d9e4e488f30ab0bd0ae6e /net/ipv4/af_inet.c | |
parent | 3b254c54ec46eb022cb26ee6ab37fae23f5f7d6a (diff) | |
parent | 3ee943728fff536edaf8f59faa58aaa1aa7366e3 (diff) |
Merge branch 'master' of /repos/git/net-next-2.6
Conflicts:
net/bridge/br_device.c
net/bridge/br_forward.c
Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/ipv4/af_inet.c')
-rw-r--r-- | net/ipv4/af_inet.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index c5376c725503..c6c43bcd1c6f 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -419,7 +419,7 @@ int inet_release(struct socket *sock) | |||
419 | if (sk) { | 419 | if (sk) { |
420 | long timeout; | 420 | long timeout; |
421 | 421 | ||
422 | inet_rps_reset_flow(sk); | 422 | sock_rps_reset_flow(sk); |
423 | 423 | ||
424 | /* Applications forget to leave groups before exiting */ | 424 | /* Applications forget to leave groups before exiting */ |
425 | ip_mc_drop_socket(sk); | 425 | ip_mc_drop_socket(sk); |
@@ -548,7 +548,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo) | |||
548 | { | 548 | { |
549 | DEFINE_WAIT(wait); | 549 | DEFINE_WAIT(wait); |
550 | 550 | ||
551 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 551 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
552 | 552 | ||
553 | /* Basic assumption: if someone sets sk->sk_err, he _must_ | 553 | /* Basic assumption: if someone sets sk->sk_err, he _must_ |
554 | * change state of the socket from TCP_SYN_*. | 554 | * change state of the socket from TCP_SYN_*. |
@@ -561,9 +561,9 @@ static long inet_wait_for_connect(struct sock *sk, long timeo) | |||
561 | lock_sock(sk); | 561 | lock_sock(sk); |
562 | if (signal_pending(current) || !timeo) | 562 | if (signal_pending(current) || !timeo) |
563 | break; | 563 | break; |
564 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 564 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
565 | } | 565 | } |
566 | finish_wait(sk->sk_sleep, &wait); | 566 | finish_wait(sk_sleep(sk), &wait); |
567 | return timeo; | 567 | return timeo; |
568 | } | 568 | } |
569 | 569 | ||
@@ -722,7 +722,7 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
722 | { | 722 | { |
723 | struct sock *sk = sock->sk; | 723 | struct sock *sk = sock->sk; |
724 | 724 | ||
725 | inet_rps_record_flow(sk); | 725 | sock_rps_record_flow(sk); |
726 | 726 | ||
727 | /* We may need to bind the socket. */ | 727 | /* We may need to bind the socket. */ |
728 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) | 728 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) |
@@ -737,7 +737,7 @@ static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, | |||
737 | { | 737 | { |
738 | struct sock *sk = sock->sk; | 738 | struct sock *sk = sock->sk; |
739 | 739 | ||
740 | inet_rps_record_flow(sk); | 740 | sock_rps_record_flow(sk); |
741 | 741 | ||
742 | /* We may need to bind the socket. */ | 742 | /* We may need to bind the socket. */ |
743 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) | 743 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) |
@@ -755,7 +755,7 @@ int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
755 | int addr_len = 0; | 755 | int addr_len = 0; |
756 | int err; | 756 | int err; |
757 | 757 | ||
758 | inet_rps_record_flow(sk); | 758 | sock_rps_record_flow(sk); |
759 | 759 | ||
760 | err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, | 760 | err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, |
761 | flags & ~MSG_DONTWAIT, &addr_len); | 761 | flags & ~MSG_DONTWAIT, &addr_len); |
@@ -1323,8 +1323,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, | |||
1323 | if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) | 1323 | if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) |
1324 | goto out_unlock; | 1324 | goto out_unlock; |
1325 | 1325 | ||
1326 | id = ntohl(*(u32 *)&iph->id); | 1326 | id = ntohl(*(__be32 *)&iph->id); |
1327 | flush = (u16)((ntohl(*(u32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF)); | 1327 | flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF)); |
1328 | id >>= 16; | 1328 | id >>= 16; |
1329 | 1329 | ||
1330 | for (p = *head; p; p = p->next) { | 1330 | for (p = *head; p; p = p->next) { |
@@ -1337,8 +1337,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, | |||
1337 | 1337 | ||
1338 | if ((iph->protocol ^ iph2->protocol) | | 1338 | if ((iph->protocol ^ iph2->protocol) | |
1339 | (iph->tos ^ iph2->tos) | | 1339 | (iph->tos ^ iph2->tos) | |
1340 | (iph->saddr ^ iph2->saddr) | | 1340 | ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) | |
1341 | (iph->daddr ^ iph2->daddr)) { | 1341 | ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) { |
1342 | NAPI_GRO_CB(p)->same_flow = 0; | 1342 | NAPI_GRO_CB(p)->same_flow = 0; |
1343 | continue; | 1343 | continue; |
1344 | } | 1344 | } |