aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-12-23 17:57:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-12-23 17:57:55 -0500
commit155d4551bd0ab04367f4ca4e0a229774497da4d8 (patch)
treee5ea2f802e778c4ad6e343ea344992a20076dd70
parentad1fca2003822ff2f24c88ad68a29970c5e0d0a7 (diff)
parent6350323ad8def2ac00d77cdee3b79c9b9fba75c4 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: netfilter: xt_connbytes: handle negation correctly net: relax rcvbuf limits rps: fix insufficient bounds checking in store_rps_dev_flow_table_cnt() net: introduce DST_NOPEER dst flag mqprio: Avoid panic if no options are provided bridge: provide a mtu() method for fake_dst_ops
-rw-r--r--include/net/dst.h1
-rw-r--r--include/net/sock.h4
-rw-r--r--net/bridge/br_netfilter.c8
-rw-r--r--net/core/net-sysfs.c7
-rw-r--r--net/core/sock.c6
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/netfilter/xt_connbytes.c6
-rw-r--r--net/packet/af_packet.c6
-rw-r--r--net/sched/sch_mqprio.c2
10 files changed, 26 insertions, 20 deletions
diff --git a/include/net/dst.h b/include/net/dst.h
index 6faec1a60216..75766b42660e 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -53,6 +53,7 @@ struct dst_entry {
53#define DST_NOHASH 0x0008 53#define DST_NOHASH 0x0008
54#define DST_NOCACHE 0x0010 54#define DST_NOCACHE 0x0010
55#define DST_NOCOUNT 0x0020 55#define DST_NOCOUNT 0x0020
56#define DST_NOPEER 0x0040
56 57
57 short error; 58 short error;
58 short obsolete; 59 short obsolete;
diff --git a/include/net/sock.h b/include/net/sock.h
index abb6e0f0c3c3..32e39371fba6 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -637,12 +637,14 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
637 637
638/* 638/*
639 * Take into account size of receive queue and backlog queue 639 * Take into account size of receive queue and backlog queue
640 * Do not take into account this skb truesize,
641 * to allow even a single big packet to come.
640 */ 642 */
641static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb) 643static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
642{ 644{
643 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); 645 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
644 646
645 return qsize + skb->truesize > sk->sk_rcvbuf; 647 return qsize > sk->sk_rcvbuf;
646} 648}
647 649
648/* The per-socket spinlock must be held here. */ 650/* The per-socket spinlock must be held here. */
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index d6ec3720c77e..fa8b8f763580 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -114,12 +114,18 @@ static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const vo
114 return NULL; 114 return NULL;
115} 115}
116 116
117static unsigned int fake_mtu(const struct dst_entry *dst)
118{
119 return dst->dev->mtu;
120}
121
117static struct dst_ops fake_dst_ops = { 122static struct dst_ops fake_dst_ops = {
118 .family = AF_INET, 123 .family = AF_INET,
119 .protocol = cpu_to_be16(ETH_P_IP), 124 .protocol = cpu_to_be16(ETH_P_IP),
120 .update_pmtu = fake_update_pmtu, 125 .update_pmtu = fake_update_pmtu,
121 .cow_metrics = fake_cow_metrics, 126 .cow_metrics = fake_cow_metrics,
122 .neigh_lookup = fake_neigh_lookup, 127 .neigh_lookup = fake_neigh_lookup,
128 .mtu = fake_mtu,
123}; 129};
124 130
125/* 131/*
@@ -141,7 +147,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
141 rt->dst.dev = br->dev; 147 rt->dst.dev = br->dev;
142 rt->dst.path = &rt->dst; 148 rt->dst.path = &rt->dst;
143 dst_init_metrics(&rt->dst, br_dst_default_metrics, true); 149 dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
144 rt->dst.flags = DST_NOXFRM; 150 rt->dst.flags = DST_NOXFRM | DST_NOPEER;
145 rt->dst.ops = &fake_dst_ops; 151 rt->dst.ops = &fake_dst_ops;
146} 152}
147 153
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index c71c434a4c05..385aefe53648 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -665,11 +665,14 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
665 if (count) { 665 if (count) {
666 int i; 666 int i;
667 667
668 if (count > 1<<30) { 668 if (count > INT_MAX)
669 return -EINVAL;
670 count = roundup_pow_of_two(count);
671 if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table))
672 / sizeof(struct rps_dev_flow)) {
669 /* Enforce a limit to prevent overflow */ 673 /* Enforce a limit to prevent overflow */
670 return -EINVAL; 674 return -EINVAL;
671 } 675 }
672 count = roundup_pow_of_two(count);
673 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count)); 676 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
674 if (!table) 677 if (!table)
675 return -ENOMEM; 678 return -ENOMEM;
diff --git a/net/core/sock.c b/net/core/sock.c
index 4ed7b1d12f5e..b23f174ab84c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -288,11 +288,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
288 unsigned long flags; 288 unsigned long flags;
289 struct sk_buff_head *list = &sk->sk_receive_queue; 289 struct sk_buff_head *list = &sk->sk_receive_queue;
290 290
291 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces 291 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
292 number of warnings when compiling with -W --ANK
293 */
294 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
295 (unsigned)sk->sk_rcvbuf) {
296 atomic_inc(&sk->sk_drops); 292 atomic_inc(&sk->sk_drops);
297 trace_sock_rcvqueue_full(sk, skb); 293 trace_sock_rcvqueue_full(sk, skb);
298 return -ENOMEM; 294 return -ENOMEM;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 85cc053d9d6e..94cdbc55ca7e 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1367,7 +1367,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1367{ 1367{
1368 struct rtable *rt = (struct rtable *) dst; 1368 struct rtable *rt = (struct rtable *) dst;
1369 1369
1370 if (rt) { 1370 if (rt && !(rt->dst.flags & DST_NOPEER)) {
1371 if (rt->peer == NULL) 1371 if (rt->peer == NULL)
1372 rt_bind_peer(rt, rt->rt_dst, 1); 1372 rt_bind_peer(rt, rt->rt_dst, 1);
1373 1373
@@ -1378,7 +1378,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1378 iph->id = htons(inet_getid(rt->peer, more)); 1378 iph->id = htons(inet_getid(rt->peer, more));
1379 return; 1379 return;
1380 } 1380 }
1381 } else 1381 } else if (!rt)
1382 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n", 1382 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1383 __builtin_return_address(0)); 1383 __builtin_return_address(0));
1384 1384
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 84d0bd5cac93..ec562713db9b 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -603,7 +603,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
603 static atomic_t ipv6_fragmentation_id; 603 static atomic_t ipv6_fragmentation_id;
604 int old, new; 604 int old, new;
605 605
606 if (rt) { 606 if (rt && !(rt->dst.flags & DST_NOPEER)) {
607 struct inet_peer *peer; 607 struct inet_peer *peer;
608 608
609 if (!rt->rt6i_peer) 609 if (!rt->rt6i_peer)
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
index 5b138506690e..9ddf1c3bfb39 100644
--- a/net/netfilter/xt_connbytes.c
+++ b/net/netfilter/xt_connbytes.c
@@ -87,10 +87,10 @@ connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par)
87 break; 87 break;
88 } 88 }
89 89
90 if (sinfo->count.to) 90 if (sinfo->count.to >= sinfo->count.from)
91 return what <= sinfo->count.to && what >= sinfo->count.from; 91 return what <= sinfo->count.to && what >= sinfo->count.from;
92 else 92 else /* inverted */
93 return what >= sinfo->count.from; 93 return what < sinfo->count.to || what > sinfo->count.from;
94} 94}
95 95
96static int connbytes_mt_check(const struct xt_mtchk_param *par) 96static int connbytes_mt_check(const struct xt_mtchk_param *par)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 82a6f34d39d0..3891702b81df 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1630,8 +1630,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1630 if (snaplen > res) 1630 if (snaplen > res)
1631 snaplen = res; 1631 snaplen = res;
1632 1632
1633 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 1633 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1634 (unsigned)sk->sk_rcvbuf)
1635 goto drop_n_acct; 1634 goto drop_n_acct;
1636 1635
1637 if (skb_shared(skb)) { 1636 if (skb_shared(skb)) {
@@ -1762,8 +1761,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1762 if (po->tp_version <= TPACKET_V2) { 1761 if (po->tp_version <= TPACKET_V2) {
1763 if (macoff + snaplen > po->rx_ring.frame_size) { 1762 if (macoff + snaplen > po->rx_ring.frame_size) {
1764 if (po->copy_thresh && 1763 if (po->copy_thresh &&
1765 atomic_read(&sk->sk_rmem_alloc) + skb->truesize 1764 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1766 < (unsigned)sk->sk_rcvbuf) {
1767 if (skb_shared(skb)) { 1765 if (skb_shared(skb)) {
1768 copy_skb = skb_clone(skb, GFP_ATOMIC); 1766 copy_skb = skb_clone(skb, GFP_ATOMIC);
1769 } else { 1767 } else {
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index f88256cbacbf..28de43092330 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -107,7 +107,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
107 if (!netif_is_multiqueue(dev)) 107 if (!netif_is_multiqueue(dev))
108 return -EOPNOTSUPP; 108 return -EOPNOTSUPP;
109 109
110 if (nla_len(opt) < sizeof(*qopt)) 110 if (!opt || nla_len(opt) < sizeof(*qopt))
111 return -EINVAL; 111 return -EINVAL;
112 112
113 qopt = nla_data(opt); 113 qopt = nla_data(opt);