diff options
author | Paul Mundt <lethal@linux-sh.org> | 2011-01-12 00:47:21 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2011-01-12 00:47:21 -0500 |
commit | 53582c4c508a95ece9f7c907edef9911c7eb79eb (patch) | |
tree | c099e9880f7bf7d60e87dfa6a19cb4e8780ec860 /net/ipv4 | |
parent | 6ae1e19dbb43ff0d429a9f45087c513212686836 (diff) | |
parent | da97da73d418533187a8390cf6541f48bed653e8 (diff) |
Merge branch 'rmobile/sdio' into rmobile-latest
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/ah4.c | 7 | ||||
-rw-r--r-- | net/ipv4/arp.c | 29 | ||||
-rw-r--r-- | net/ipv4/inet_connection_sock.c | 5 | ||||
-rw-r--r-- | net/ipv4/inet_diag.c | 2 | ||||
-rw-r--r-- | net/ipv4/netfilter/arp_tables.c | 45 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_tables.c | 45 |
6 files changed, 54 insertions, 79 deletions
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 880a5ec6dce0..86961bec70ab 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c | |||
@@ -314,14 +314,15 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) | |||
314 | 314 | ||
315 | skb->ip_summed = CHECKSUM_NONE; | 315 | skb->ip_summed = CHECKSUM_NONE; |
316 | 316 | ||
317 | ah = (struct ip_auth_hdr *)skb->data; | ||
318 | iph = ip_hdr(skb); | ||
319 | ihl = ip_hdrlen(skb); | ||
320 | 317 | ||
321 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) | 318 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) |
322 | goto out; | 319 | goto out; |
323 | nfrags = err; | 320 | nfrags = err; |
324 | 321 | ||
322 | ah = (struct ip_auth_hdr *)skb->data; | ||
323 | iph = ip_hdr(skb); | ||
324 | ihl = ip_hdrlen(skb); | ||
325 | |||
325 | work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); | 326 | work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); |
326 | if (!work_iph) | 327 | if (!work_iph) |
327 | goto out; | 328 | goto out; |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index a2fc7b961dbc..04c8b69fd426 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -1143,6 +1143,23 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev) | |||
1143 | return err; | 1143 | return err; |
1144 | } | 1144 | } |
1145 | 1145 | ||
1146 | int arp_invalidate(struct net_device *dev, __be32 ip) | ||
1147 | { | ||
1148 | struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev); | ||
1149 | int err = -ENXIO; | ||
1150 | |||
1151 | if (neigh) { | ||
1152 | if (neigh->nud_state & ~NUD_NOARP) | ||
1153 | err = neigh_update(neigh, NULL, NUD_FAILED, | ||
1154 | NEIGH_UPDATE_F_OVERRIDE| | ||
1155 | NEIGH_UPDATE_F_ADMIN); | ||
1156 | neigh_release(neigh); | ||
1157 | } | ||
1158 | |||
1159 | return err; | ||
1160 | } | ||
1161 | EXPORT_SYMBOL(arp_invalidate); | ||
1162 | |||
1146 | static int arp_req_delete_public(struct net *net, struct arpreq *r, | 1163 | static int arp_req_delete_public(struct net *net, struct arpreq *r, |
1147 | struct net_device *dev) | 1164 | struct net_device *dev) |
1148 | { | 1165 | { |
@@ -1163,7 +1180,6 @@ static int arp_req_delete(struct net *net, struct arpreq *r, | |||
1163 | { | 1180 | { |
1164 | int err; | 1181 | int err; |
1165 | __be32 ip; | 1182 | __be32 ip; |
1166 | struct neighbour *neigh; | ||
1167 | 1183 | ||
1168 | if (r->arp_flags & ATF_PUBL) | 1184 | if (r->arp_flags & ATF_PUBL) |
1169 | return arp_req_delete_public(net, r, dev); | 1185 | return arp_req_delete_public(net, r, dev); |
@@ -1181,16 +1197,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r, | |||
1181 | if (!dev) | 1197 | if (!dev) |
1182 | return -EINVAL; | 1198 | return -EINVAL; |
1183 | } | 1199 | } |
1184 | err = -ENXIO; | 1200 | return arp_invalidate(dev, ip); |
1185 | neigh = neigh_lookup(&arp_tbl, &ip, dev); | ||
1186 | if (neigh) { | ||
1187 | if (neigh->nud_state & ~NUD_NOARP) | ||
1188 | err = neigh_update(neigh, NULL, NUD_FAILED, | ||
1189 | NEIGH_UPDATE_F_OVERRIDE| | ||
1190 | NEIGH_UPDATE_F_ADMIN); | ||
1191 | neigh_release(neigh); | ||
1192 | } | ||
1193 | return err; | ||
1194 | } | 1201 | } |
1195 | 1202 | ||
1196 | /* | 1203 | /* |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 25e318153f14..97e5fb765265 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk, | |||
73 | !sk2->sk_bound_dev_if || | 73 | !sk2->sk_bound_dev_if || |
74 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { | 74 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { |
75 | if (!reuse || !sk2->sk_reuse || | 75 | if (!reuse || !sk2->sk_reuse || |
76 | sk2->sk_state == TCP_LISTEN) { | 76 | ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) { |
77 | const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); | 77 | const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); |
78 | if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || | 78 | if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || |
79 | sk2_rcv_saddr == sk_rcv_saddr(sk)) | 79 | sk2_rcv_saddr == sk_rcv_saddr(sk)) |
@@ -122,7 +122,8 @@ again: | |||
122 | (tb->num_owners < smallest_size || smallest_size == -1)) { | 122 | (tb->num_owners < smallest_size || smallest_size == -1)) { |
123 | smallest_size = tb->num_owners; | 123 | smallest_size = tb->num_owners; |
124 | smallest_rover = rover; | 124 | smallest_rover = rover; |
125 | if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { | 125 | if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 && |
126 | !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { | ||
126 | spin_unlock(&head->lock); | 127 | spin_unlock(&head->lock); |
127 | snum = smallest_rover; | 128 | snum = smallest_rover; |
128 | goto have_snum; | 129 | goto have_snum; |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 2ada17129fce..2746c1fa6417 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -858,7 +858,7 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
858 | nlmsg_len(nlh) < hdrlen) | 858 | nlmsg_len(nlh) < hdrlen) |
859 | return -EINVAL; | 859 | return -EINVAL; |
860 | 860 | ||
861 | if (nlh->nlmsg_flags & NLM_F_DUMP) { | 861 | if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { |
862 | if (nlmsg_attrlen(nlh, hdrlen)) { | 862 | if (nlmsg_attrlen(nlh, hdrlen)) { |
863 | struct nlattr *attr; | 863 | struct nlattr *attr; |
864 | 864 | ||
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 3fac340a28d5..e855fffaed95 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -710,42 +710,25 @@ static void get_counters(const struct xt_table_info *t, | |||
710 | struct arpt_entry *iter; | 710 | struct arpt_entry *iter; |
711 | unsigned int cpu; | 711 | unsigned int cpu; |
712 | unsigned int i; | 712 | unsigned int i; |
713 | unsigned int curcpu = get_cpu(); | ||
714 | |||
715 | /* Instead of clearing (by a previous call to memset()) | ||
716 | * the counters and using adds, we set the counters | ||
717 | * with data used by 'current' CPU | ||
718 | * | ||
719 | * Bottom half has to be disabled to prevent deadlock | ||
720 | * if new softirq were to run and call ipt_do_table | ||
721 | */ | ||
722 | local_bh_disable(); | ||
723 | i = 0; | ||
724 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { | ||
725 | SET_COUNTER(counters[i], iter->counters.bcnt, | ||
726 | iter->counters.pcnt); | ||
727 | ++i; | ||
728 | } | ||
729 | local_bh_enable(); | ||
730 | /* Processing counters from other cpus, we can let bottom half enabled, | ||
731 | * (preemption is disabled) | ||
732 | */ | ||
733 | 713 | ||
734 | for_each_possible_cpu(cpu) { | 714 | for_each_possible_cpu(cpu) { |
735 | if (cpu == curcpu) | 715 | seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; |
736 | continue; | 716 | |
737 | i = 0; | 717 | i = 0; |
738 | local_bh_disable(); | ||
739 | xt_info_wrlock(cpu); | ||
740 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 718 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
741 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 719 | u64 bcnt, pcnt; |
742 | iter->counters.pcnt); | 720 | unsigned int start; |
721 | |||
722 | do { | ||
723 | start = read_seqbegin(lock); | ||
724 | bcnt = iter->counters.bcnt; | ||
725 | pcnt = iter->counters.pcnt; | ||
726 | } while (read_seqretry(lock, start)); | ||
727 | |||
728 | ADD_COUNTER(counters[i], bcnt, pcnt); | ||
743 | ++i; | 729 | ++i; |
744 | } | 730 | } |
745 | xt_info_wrunlock(cpu); | ||
746 | local_bh_enable(); | ||
747 | } | 731 | } |
748 | put_cpu(); | ||
749 | } | 732 | } |
750 | 733 | ||
751 | static struct xt_counters *alloc_counters(const struct xt_table *table) | 734 | static struct xt_counters *alloc_counters(const struct xt_table *table) |
@@ -759,7 +742,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) | |||
759 | * about). | 742 | * about). |
760 | */ | 743 | */ |
761 | countersize = sizeof(struct xt_counters) * private->number; | 744 | countersize = sizeof(struct xt_counters) * private->number; |
762 | counters = vmalloc(countersize); | 745 | counters = vzalloc(countersize); |
763 | 746 | ||
764 | if (counters == NULL) | 747 | if (counters == NULL) |
765 | return ERR_PTR(-ENOMEM); | 748 | return ERR_PTR(-ENOMEM); |
@@ -1007,7 +990,7 @@ static int __do_replace(struct net *net, const char *name, | |||
1007 | struct arpt_entry *iter; | 990 | struct arpt_entry *iter; |
1008 | 991 | ||
1009 | ret = 0; | 992 | ret = 0; |
1010 | counters = vmalloc(num_counters * sizeof(struct xt_counters)); | 993 | counters = vzalloc(num_counters * sizeof(struct xt_counters)); |
1011 | if (!counters) { | 994 | if (!counters) { |
1012 | ret = -ENOMEM; | 995 | ret = -ENOMEM; |
1013 | goto out; | 996 | goto out; |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index a846d633b3b6..652efea013dc 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -884,42 +884,25 @@ get_counters(const struct xt_table_info *t, | |||
884 | struct ipt_entry *iter; | 884 | struct ipt_entry *iter; |
885 | unsigned int cpu; | 885 | unsigned int cpu; |
886 | unsigned int i; | 886 | unsigned int i; |
887 | unsigned int curcpu = get_cpu(); | ||
888 | |||
889 | /* Instead of clearing (by a previous call to memset()) | ||
890 | * the counters and using adds, we set the counters | ||
891 | * with data used by 'current' CPU. | ||
892 | * | ||
893 | * Bottom half has to be disabled to prevent deadlock | ||
894 | * if new softirq were to run and call ipt_do_table | ||
895 | */ | ||
896 | local_bh_disable(); | ||
897 | i = 0; | ||
898 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { | ||
899 | SET_COUNTER(counters[i], iter->counters.bcnt, | ||
900 | iter->counters.pcnt); | ||
901 | ++i; | ||
902 | } | ||
903 | local_bh_enable(); | ||
904 | /* Processing counters from other cpus, we can let bottom half enabled, | ||
905 | * (preemption is disabled) | ||
906 | */ | ||
907 | 887 | ||
908 | for_each_possible_cpu(cpu) { | 888 | for_each_possible_cpu(cpu) { |
909 | if (cpu == curcpu) | 889 | seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; |
910 | continue; | 890 | |
911 | i = 0; | 891 | i = 0; |
912 | local_bh_disable(); | ||
913 | xt_info_wrlock(cpu); | ||
914 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 892 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
915 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 893 | u64 bcnt, pcnt; |
916 | iter->counters.pcnt); | 894 | unsigned int start; |
895 | |||
896 | do { | ||
897 | start = read_seqbegin(lock); | ||
898 | bcnt = iter->counters.bcnt; | ||
899 | pcnt = iter->counters.pcnt; | ||
900 | } while (read_seqretry(lock, start)); | ||
901 | |||
902 | ADD_COUNTER(counters[i], bcnt, pcnt); | ||
917 | ++i; /* macro does multi eval of i */ | 903 | ++i; /* macro does multi eval of i */ |
918 | } | 904 | } |
919 | xt_info_wrunlock(cpu); | ||
920 | local_bh_enable(); | ||
921 | } | 905 | } |
922 | put_cpu(); | ||
923 | } | 906 | } |
924 | 907 | ||
925 | static struct xt_counters *alloc_counters(const struct xt_table *table) | 908 | static struct xt_counters *alloc_counters(const struct xt_table *table) |
@@ -932,7 +915,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) | |||
932 | (other than comefrom, which userspace doesn't care | 915 | (other than comefrom, which userspace doesn't care |
933 | about). */ | 916 | about). */ |
934 | countersize = sizeof(struct xt_counters) * private->number; | 917 | countersize = sizeof(struct xt_counters) * private->number; |
935 | counters = vmalloc(countersize); | 918 | counters = vzalloc(countersize); |
936 | 919 | ||
937 | if (counters == NULL) | 920 | if (counters == NULL) |
938 | return ERR_PTR(-ENOMEM); | 921 | return ERR_PTR(-ENOMEM); |
@@ -1203,7 +1186,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, | |||
1203 | struct ipt_entry *iter; | 1186 | struct ipt_entry *iter; |
1204 | 1187 | ||
1205 | ret = 0; | 1188 | ret = 0; |
1206 | counters = vmalloc(num_counters * sizeof(struct xt_counters)); | 1189 | counters = vzalloc(num_counters * sizeof(struct xt_counters)); |
1207 | if (!counters) { | 1190 | if (!counters) { |
1208 | ret = -ENOMEM; | 1191 | ret = -ENOMEM; |
1209 | goto out; | 1192 | goto out; |