aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-18 01:19:28 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-18 01:19:28 -0500
commit7d0d46da750a252371cb747b48ddda27d1047881 (patch)
treedb6ac506c54775047278332e1cd3e42aad2aacb9 /net
parent48ba620aab90f4c7e9bb002e2f30863a4ea0f915 (diff)
parent3af57f78c38131b7a66e2b01e06fdacae01992a3 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) The value choosen for the new SO_MAX_PACING_RATE socket option on parisc was very poorly choosen, let's fix it while we still can. From Eric Dumazet. 2) Our generic reciprocal divide was found to handle some edge cases incorrectly, part of this is encoded into the BPF as deep as the JIT engines themselves. Just use a real divide throughout for now. From Eric Dumazet. 3) Because the initial lookup is lockless, the TCP metrics engine can end up creating two entries for the same lookup key. Fix this by doing a second lookup under the lock before we actually create the new entry. From Christoph Paasch. 4) Fix scatter-gather list init in usbnet driver, from Bjørn Mork. 5) Fix unintended 32-bit truncation in cxgb4 driver's bit shifting. From Dan Carpenter. 6) Netlink socket dumping uses the wrong socket state for timewait sockets. Fix from Neal Cardwell. 7) Fix netlink memory leak in ieee802154_add_iface(), from Christian Engelmayer. 8) Multicast forwarding in ipv4 can overflow the per-rule reference counts, causing all multicast traffic to cease. Fix from Hannes Frederic Sowa. 9) via-rhine needs to stop all TX queues when it resets the device, from Richard Weinberger. 10) Fix RDS per-cpu accesses broken by the this_cpu_* conversions. From Gerald Schaefer. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: s390/bpf,jit: fix 32 bit divisions, use unsigned divide instructions parisc: fix SO_MAX_PACING_RATE typo ipv6: simplify detection of first operational link-local address on interface tcp: metrics: Avoid duplicate entries with the same destination-IP net: rds: fix per-cpu helper usage e1000e: Fix compilation warning when !CONFIG_PM_SLEEP bpf: do not use reciprocal divide be2net: add dma_mapping_error() check for dma_map_page() bnx2x: Don't release PCI bars on shutdown net,via-rhine: Fix tx_timeout handling batman-adv: fix batman-adv header overhead calculation qlge: Fix vlan netdev features. net: avoid reference counter overflows on fib_rules in multicast forwarding dm9601: add USB IDs for new dm96xx variants MAINTAINERS: add virtio-dev ML for virtio ieee802154: Fix memory leak in ieee802154_add_iface() net: usbnet: fix SG initialisation inet_diag: fix inet_diag_dump_icsk() to use correct state for timewait sockets cxgb4: silence shift wrapping static checker warning
Diffstat (limited to 'net')
-rw-r--r--net/batman-adv/main.c2
-rw-r--r--net/core/filter.c30
-rw-r--r--net/ieee802154/nl-phy.c6
-rw-r--r--net/ipv4/inet_diag.c5
-rw-r--r--net/ipv4/ipmr.c7
-rw-r--r--net/ipv4/tcp_metrics.c51
-rw-r--r--net/ipv6/addrconf.c38
-rw-r--r--net/ipv6/ip6mr.c7
-rw-r--r--net/rds/ib_recv.c7
9 files changed, 73 insertions, 80 deletions
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 1511f64a6cea..faba0f61ad53 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -277,7 +277,7 @@ int batadv_max_header_len(void)
277 sizeof(struct batadv_coded_packet)); 277 sizeof(struct batadv_coded_packet));
278#endif 278#endif
279 279
280 return header_len; 280 return header_len + ETH_HLEN;
281} 281}
282 282
283/** 283/**
diff --git a/net/core/filter.c b/net/core/filter.c
index 01b780856db2..ad30d626a5bd 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -36,7 +36,6 @@
36#include <asm/uaccess.h> 36#include <asm/uaccess.h>
37#include <asm/unaligned.h> 37#include <asm/unaligned.h>
38#include <linux/filter.h> 38#include <linux/filter.h>
39#include <linux/reciprocal_div.h>
40#include <linux/ratelimit.h> 39#include <linux/ratelimit.h>
41#include <linux/seccomp.h> 40#include <linux/seccomp.h>
42#include <linux/if_vlan.h> 41#include <linux/if_vlan.h>
@@ -166,7 +165,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
166 A /= X; 165 A /= X;
167 continue; 166 continue;
168 case BPF_S_ALU_DIV_K: 167 case BPF_S_ALU_DIV_K:
169 A = reciprocal_divide(A, K); 168 A /= K;
170 continue; 169 continue;
171 case BPF_S_ALU_MOD_X: 170 case BPF_S_ALU_MOD_X:
172 if (X == 0) 171 if (X == 0)
@@ -553,11 +552,6 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
553 /* Some instructions need special checks */ 552 /* Some instructions need special checks */
554 switch (code) { 553 switch (code) {
555 case BPF_S_ALU_DIV_K: 554 case BPF_S_ALU_DIV_K:
556 /* check for division by zero */
557 if (ftest->k == 0)
558 return -EINVAL;
559 ftest->k = reciprocal_value(ftest->k);
560 break;
561 case BPF_S_ALU_MOD_K: 555 case BPF_S_ALU_MOD_K:
562 /* check for division by zero */ 556 /* check for division by zero */
563 if (ftest->k == 0) 557 if (ftest->k == 0)
@@ -853,27 +847,7 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
853 to->code = decodes[code]; 847 to->code = decodes[code];
854 to->jt = filt->jt; 848 to->jt = filt->jt;
855 to->jf = filt->jf; 849 to->jf = filt->jf;
856 850 to->k = filt->k;
857 if (code == BPF_S_ALU_DIV_K) {
858 /*
859 * When loaded this rule user gave us X, which was
860 * translated into R = r(X). Now we calculate the
861 * RR = r(R) and report it back. If next time this
862 * value is loaded and RRR = r(RR) is calculated
863 * then the R == RRR will be true.
864 *
865 * One exception. X == 1 translates into R == 0 and
866 * we can't calculate RR out of it with r().
867 */
868
869 if (filt->k == 0)
870 to->k = 1;
871 else
872 to->k = reciprocal_value(filt->k);
873
874 BUG_ON(reciprocal_value(to->k) != filt->k);
875 } else
876 to->k = filt->k;
877} 851}
878 852
879int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len) 853int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index d08c7a43dcd1..89b265aea151 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -221,8 +221,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
221 221
222 if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) { 222 if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) {
223 type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]); 223 type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]);
224 if (type >= __IEEE802154_DEV_MAX) 224 if (type >= __IEEE802154_DEV_MAX) {
225 return -EINVAL; 225 rc = -EINVAL;
226 goto nla_put_failure;
227 }
226 } 228 }
227 229
228 dev = phy->add_iface(phy, devname, type); 230 dev = phy->add_iface(phy, devname, type);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index a0f52dac8940..e34dccbc4d70 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -930,12 +930,15 @@ skip_listen_ht:
930 spin_lock_bh(lock); 930 spin_lock_bh(lock);
931 sk_nulls_for_each(sk, node, &head->chain) { 931 sk_nulls_for_each(sk, node, &head->chain) {
932 int res; 932 int res;
933 int state;
933 934
934 if (!net_eq(sock_net(sk), net)) 935 if (!net_eq(sock_net(sk), net))
935 continue; 936 continue;
936 if (num < s_num) 937 if (num < s_num)
937 goto next_normal; 938 goto next_normal;
938 if (!(r->idiag_states & (1 << sk->sk_state))) 939 state = (sk->sk_state == TCP_TIME_WAIT) ?
940 inet_twsk(sk)->tw_substate : sk->sk_state;
941 if (!(r->idiag_states & (1 << state)))
939 goto next_normal; 942 goto next_normal;
940 if (r->sdiag_family != AF_UNSPEC && 943 if (r->sdiag_family != AF_UNSPEC &&
941 sk->sk_family != r->sdiag_family) 944 sk->sk_family != r->sdiag_family)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 62212c772a4b..1672409f5ba5 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -157,9 +157,12 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
157static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, 157static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
158 struct mr_table **mrt) 158 struct mr_table **mrt)
159{ 159{
160 struct ipmr_result res;
161 struct fib_lookup_arg arg = { .result = &res, };
162 int err; 160 int err;
161 struct ipmr_result res;
162 struct fib_lookup_arg arg = {
163 .result = &res,
164 .flags = FIB_LOOKUP_NOREF,
165 };
163 166
164 err = fib_rules_lookup(net->ipv4.mr_rules_ops, 167 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
165 flowi4_to_flowi(flp4), 0, &arg); 168 flowi4_to_flowi(flp4), 0, &arg);
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 06493736fbc8..098b3a29f6f3 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -22,6 +22,9 @@
22 22
23int sysctl_tcp_nometrics_save __read_mostly; 23int sysctl_tcp_nometrics_save __read_mostly;
24 24
25static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
26 struct net *net, unsigned int hash);
27
25struct tcp_fastopen_metrics { 28struct tcp_fastopen_metrics {
26 u16 mss; 29 u16 mss;
27 u16 syn_loss:10; /* Recurring Fast Open SYN losses */ 30 u16 syn_loss:10; /* Recurring Fast Open SYN losses */
@@ -130,16 +133,41 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
130 } 133 }
131} 134}
132 135
136#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
137
138static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
139{
140 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
141 tcpm_suck_dst(tm, dst, false);
142}
143
144#define TCP_METRICS_RECLAIM_DEPTH 5
145#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
146
133static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, 147static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
134 struct inetpeer_addr *addr, 148 struct inetpeer_addr *addr,
135 unsigned int hash, 149 unsigned int hash)
136 bool reclaim)
137{ 150{
138 struct tcp_metrics_block *tm; 151 struct tcp_metrics_block *tm;
139 struct net *net; 152 struct net *net;
153 bool reclaim = false;
140 154
141 spin_lock_bh(&tcp_metrics_lock); 155 spin_lock_bh(&tcp_metrics_lock);
142 net = dev_net(dst->dev); 156 net = dev_net(dst->dev);
157
158 /* While waiting for the spin-lock the cache might have been populated
159 * with this entry and so we have to check again.
160 */
161 tm = __tcp_get_metrics(addr, net, hash);
162 if (tm == TCP_METRICS_RECLAIM_PTR) {
163 reclaim = true;
164 tm = NULL;
165 }
166 if (tm) {
167 tcpm_check_stamp(tm, dst);
168 goto out_unlock;
169 }
170
143 if (unlikely(reclaim)) { 171 if (unlikely(reclaim)) {
144 struct tcp_metrics_block *oldest; 172 struct tcp_metrics_block *oldest;
145 173
@@ -169,17 +197,6 @@ out_unlock:
169 return tm; 197 return tm;
170} 198}
171 199
172#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
173
174static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
175{
176 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
177 tcpm_suck_dst(tm, dst, false);
178}
179
180#define TCP_METRICS_RECLAIM_DEPTH 5
181#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
182
183static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth) 200static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
184{ 201{
185 if (tm) 202 if (tm)
@@ -282,7 +299,6 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
282 struct inetpeer_addr addr; 299 struct inetpeer_addr addr;
283 unsigned int hash; 300 unsigned int hash;
284 struct net *net; 301 struct net *net;
285 bool reclaim;
286 302
287 addr.family = sk->sk_family; 303 addr.family = sk->sk_family;
288 switch (addr.family) { 304 switch (addr.family) {
@@ -304,13 +320,10 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
304 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); 320 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
305 321
306 tm = __tcp_get_metrics(&addr, net, hash); 322 tm = __tcp_get_metrics(&addr, net, hash);
307 reclaim = false; 323 if (tm == TCP_METRICS_RECLAIM_PTR)
308 if (tm == TCP_METRICS_RECLAIM_PTR) {
309 reclaim = true;
310 tm = NULL; 324 tm = NULL;
311 }
312 if (!tm && create) 325 if (!tm && create)
313 tm = tcpm_new(dst, &addr, hash, reclaim); 326 tm = tcpm_new(dst, &addr, hash);
314 else 327 else
315 tcpm_check_stamp(tm, dst); 328 tcpm_check_stamp(tm, dst);
316 329
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index abe46a4228ce..4b6b720971b9 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3189,6 +3189,22 @@ out:
3189 in6_ifa_put(ifp); 3189 in6_ifa_put(ifp);
3190} 3190}
3191 3191
3192/* ifp->idev must be at least read locked */
3193static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
3194{
3195 struct inet6_ifaddr *ifpiter;
3196 struct inet6_dev *idev = ifp->idev;
3197
3198 list_for_each_entry(ifpiter, &idev->addr_list, if_list) {
3199 if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
3200 (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
3201 IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
3202 IFA_F_PERMANENT)
3203 return false;
3204 }
3205 return true;
3206}
3207
3192static void addrconf_dad_completed(struct inet6_ifaddr *ifp) 3208static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
3193{ 3209{
3194 struct net_device *dev = ifp->idev->dev; 3210 struct net_device *dev = ifp->idev->dev;
@@ -3208,14 +3224,11 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
3208 */ 3224 */
3209 3225
3210 read_lock_bh(&ifp->idev->lock); 3226 read_lock_bh(&ifp->idev->lock);
3211 spin_lock(&ifp->lock); 3227 send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
3212 send_mld = ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL &&
3213 ifp->idev->valid_ll_addr_cnt == 1;
3214 send_rs = send_mld && 3228 send_rs = send_mld &&
3215 ipv6_accept_ra(ifp->idev) && 3229 ipv6_accept_ra(ifp->idev) &&
3216 ifp->idev->cnf.rtr_solicits > 0 && 3230 ifp->idev->cnf.rtr_solicits > 0 &&
3217 (dev->flags&IFF_LOOPBACK) == 0; 3231 (dev->flags&IFF_LOOPBACK) == 0;
3218 spin_unlock(&ifp->lock);
3219 read_unlock_bh(&ifp->idev->lock); 3232 read_unlock_bh(&ifp->idev->lock);
3220 3233
3221 /* While dad is in progress mld report's source address is in6_addrany. 3234 /* While dad is in progress mld report's source address is in6_addrany.
@@ -4512,19 +4525,6 @@ errout:
4512 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err); 4525 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
4513} 4526}
4514 4527
4515static void update_valid_ll_addr_cnt(struct inet6_ifaddr *ifp, int count)
4516{
4517 write_lock_bh(&ifp->idev->lock);
4518 spin_lock(&ifp->lock);
4519 if (((ifp->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|
4520 IFA_F_DADFAILED)) == IFA_F_PERMANENT) &&
4521 (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL))
4522 ifp->idev->valid_ll_addr_cnt += count;
4523 WARN_ON(ifp->idev->valid_ll_addr_cnt < 0);
4524 spin_unlock(&ifp->lock);
4525 write_unlock_bh(&ifp->idev->lock);
4526}
4527
4528static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) 4528static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
4529{ 4529{
4530 struct net *net = dev_net(ifp->idev->dev); 4530 struct net *net = dev_net(ifp->idev->dev);
@@ -4533,8 +4533,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
4533 4533
4534 switch (event) { 4534 switch (event) {
4535 case RTM_NEWADDR: 4535 case RTM_NEWADDR:
4536 update_valid_ll_addr_cnt(ifp, 1);
4537
4538 /* 4536 /*
4539 * If the address was optimistic 4537 * If the address was optimistic
4540 * we inserted the route at the start of 4538 * we inserted the route at the start of
@@ -4550,8 +4548,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
4550 ifp->idev->dev, 0, 0); 4548 ifp->idev->dev, 0, 0);
4551 break; 4549 break;
4552 case RTM_DELADDR: 4550 case RTM_DELADDR:
4553 update_valid_ll_addr_cnt(ifp, -1);
4554
4555 if (ifp->idev->cnf.forwarding) 4551 if (ifp->idev->cnf.forwarding)
4556 addrconf_leave_anycast(ifp); 4552 addrconf_leave_anycast(ifp);
4557 addrconf_leave_solict(ifp->idev, &ifp->addr); 4553 addrconf_leave_solict(ifp->idev, &ifp->addr);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index f365310bfcca..0eb4038a4d63 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -141,9 +141,12 @@ static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
141static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6, 141static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
142 struct mr6_table **mrt) 142 struct mr6_table **mrt)
143{ 143{
144 struct ip6mr_result res;
145 struct fib_lookup_arg arg = { .result = &res, };
146 int err; 144 int err;
145 struct ip6mr_result res;
146 struct fib_lookup_arg arg = {
147 .result = &res,
148 .flags = FIB_LOOKUP_NOREF,
149 };
147 150
148 err = fib_rules_lookup(net->ipv6.mr6_rules_ops, 151 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
149 flowi6_to_flowi(flp6), 0, &arg); 152 flowi6_to_flowi(flp6), 0, &arg);
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 8eb9501e3d60..b7ebe23cdedf 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -421,8 +421,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
421 struct rds_ib_refill_cache *cache) 421 struct rds_ib_refill_cache *cache)
422{ 422{
423 unsigned long flags; 423 unsigned long flags;
424 struct list_head *old; 424 struct list_head *old, *chpfirst;
425 struct list_head __percpu *chpfirst;
426 425
427 local_irq_save(flags); 426 local_irq_save(flags);
428 427
@@ -432,7 +431,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
432 else /* put on front */ 431 else /* put on front */
433 list_add_tail(new_item, chpfirst); 432 list_add_tail(new_item, chpfirst);
434 433
435 __this_cpu_write(chpfirst, new_item); 434 __this_cpu_write(cache->percpu->first, new_item);
436 __this_cpu_inc(cache->percpu->count); 435 __this_cpu_inc(cache->percpu->count);
437 436
438 if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT) 437 if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
@@ -452,7 +451,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
452 } while (old); 451 } while (old);
453 452
454 453
455 __this_cpu_write(chpfirst, NULL); 454 __this_cpu_write(cache->percpu->first, NULL);
456 __this_cpu_write(cache->percpu->count, 0); 455 __this_cpu_write(cache->percpu->count, 0);
457end: 456end:
458 local_irq_restore(flags); 457 local_irq_restore(flags);