aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c58
-rw-r--r--net/core/dev_addr_lists.c3
-rw-r--r--net/core/filter.c9
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c1
-rw-r--r--net/netfilter/xt_CT.c28
-rw-r--r--net/netlink/af_netlink.c24
-rw-r--r--net/phonet/pep.c3
-rw-r--r--net/sctp/socket.c5
-rw-r--r--net/socket.c6
12 files changed, 71 insertions, 74 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 6c7dc9d78e10..c25d453b2803 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4028,54 +4028,41 @@ static int dev_ifconf(struct net *net, char __user *arg)
4028 4028
4029#ifdef CONFIG_PROC_FS 4029#ifdef CONFIG_PROC_FS
4030 4030
4031#define BUCKET_SPACE (32 - NETDEV_HASHBITS) 4031#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
4032
4033struct dev_iter_state {
4034 struct seq_net_private p;
4035 unsigned int pos; /* bucket << BUCKET_SPACE + offset */
4036};
4037 4032
4038#define get_bucket(x) ((x) >> BUCKET_SPACE) 4033#define get_bucket(x) ((x) >> BUCKET_SPACE)
4039#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1)) 4034#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4040#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) 4035#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4041 4036
4042static inline struct net_device *dev_from_same_bucket(struct seq_file *seq) 4037static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
4043{ 4038{
4044 struct dev_iter_state *state = seq->private;
4045 struct net *net = seq_file_net(seq); 4039 struct net *net = seq_file_net(seq);
4046 struct net_device *dev; 4040 struct net_device *dev;
4047 struct hlist_node *p; 4041 struct hlist_node *p;
4048 struct hlist_head *h; 4042 struct hlist_head *h;
4049 unsigned int count, bucket, offset; 4043 unsigned int count = 0, offset = get_offset(*pos);
4050 4044
4051 bucket = get_bucket(state->pos); 4045 h = &net->dev_name_head[get_bucket(*pos)];
4052 offset = get_offset(state->pos);
4053 h = &net->dev_name_head[bucket];
4054 count = 0;
4055 hlist_for_each_entry_rcu(dev, p, h, name_hlist) { 4046 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
4056 if (count++ == offset) { 4047 if (++count == offset)
4057 state->pos = set_bucket_offset(bucket, count);
4058 return dev; 4048 return dev;
4059 }
4060 } 4049 }
4061 4050
4062 return NULL; 4051 return NULL;
4063} 4052}
4064 4053
4065static inline struct net_device *dev_from_new_bucket(struct seq_file *seq) 4054static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
4066{ 4055{
4067 struct dev_iter_state *state = seq->private;
4068 struct net_device *dev; 4056 struct net_device *dev;
4069 unsigned int bucket; 4057 unsigned int bucket;
4070 4058
4071 bucket = get_bucket(state->pos);
4072 do { 4059 do {
4073 dev = dev_from_same_bucket(seq); 4060 dev = dev_from_same_bucket(seq, pos);
4074 if (dev) 4061 if (dev)
4075 return dev; 4062 return dev;
4076 4063
4077 bucket++; 4064 bucket = get_bucket(*pos) + 1;
4078 state->pos = set_bucket_offset(bucket, 0); 4065 *pos = set_bucket_offset(bucket, 1);
4079 } while (bucket < NETDEV_HASHENTRIES); 4066 } while (bucket < NETDEV_HASHENTRIES);
4080 4067
4081 return NULL; 4068 return NULL;
@@ -4088,33 +4075,20 @@ static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
4088void *dev_seq_start(struct seq_file *seq, loff_t *pos) 4075void *dev_seq_start(struct seq_file *seq, loff_t *pos)
4089 __acquires(RCU) 4076 __acquires(RCU)
4090{ 4077{
4091 struct dev_iter_state *state = seq->private;
4092
4093 rcu_read_lock(); 4078 rcu_read_lock();
4094 if (!*pos) 4079 if (!*pos)
4095 return SEQ_START_TOKEN; 4080 return SEQ_START_TOKEN;
4096 4081
4097 /* check for end of the hash */ 4082 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
4098 if (state->pos == 0 && *pos > 1)
4099 return NULL; 4083 return NULL;
4100 4084
4101 return dev_from_new_bucket(seq); 4085 return dev_from_bucket(seq, pos);
4102} 4086}
4103 4087
4104void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4088void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4105{ 4089{
4106 struct net_device *dev;
4107
4108 ++*pos; 4090 ++*pos;
4109 4091 return dev_from_bucket(seq, pos);
4110 if (v == SEQ_START_TOKEN)
4111 return dev_from_new_bucket(seq);
4112
4113 dev = dev_from_same_bucket(seq);
4114 if (dev)
4115 return dev;
4116
4117 return dev_from_new_bucket(seq);
4118} 4092}
4119 4093
4120void dev_seq_stop(struct seq_file *seq, void *v) 4094void dev_seq_stop(struct seq_file *seq, void *v)
@@ -4213,13 +4187,7 @@ static const struct seq_operations dev_seq_ops = {
4213static int dev_seq_open(struct inode *inode, struct file *file) 4187static int dev_seq_open(struct inode *inode, struct file *file)
4214{ 4188{
4215 return seq_open_net(inode, file, &dev_seq_ops, 4189 return seq_open_net(inode, file, &dev_seq_ops,
4216 sizeof(struct dev_iter_state)); 4190 sizeof(struct seq_net_private));
4217}
4218
4219int dev_seq_open_ops(struct inode *inode, struct file *file,
4220 const struct seq_operations *ops)
4221{
4222 return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
4223} 4191}
4224 4192
4225static const struct file_operations dev_seq_fops = { 4193static const struct file_operations dev_seq_fops = {
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 29c07fef9228..626698f0db8b 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -696,7 +696,8 @@ static const struct seq_operations dev_mc_seq_ops = {
696 696
697static int dev_mc_seq_open(struct inode *inode, struct file *file) 697static int dev_mc_seq_open(struct inode *inode, struct file *file)
698{ 698{
699 return dev_seq_open_ops(inode, file, &dev_mc_seq_ops); 699 return seq_open_net(inode, file, &dev_mc_seq_ops,
700 sizeof(struct seq_net_private));
700} 701}
701 702
702static const struct file_operations dev_mc_seq_fops = { 703static const struct file_operations dev_mc_seq_fops = {
diff --git a/net/core/filter.c b/net/core/filter.c
index cf4989ac503b..6f755cca4520 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -39,8 +39,11 @@
39#include <linux/reciprocal_div.h> 39#include <linux/reciprocal_div.h>
40#include <linux/ratelimit.h> 40#include <linux/ratelimit.h>
41 41
42/* No hurry in this branch */ 42/* No hurry in this branch
43static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size) 43 *
44 * Exported for the bpf jit load helper.
45 */
46void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
44{ 47{
45 u8 *ptr = NULL; 48 u8 *ptr = NULL;
46 49
@@ -59,7 +62,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
59{ 62{
60 if (k >= 0) 63 if (k >= 0)
61 return skb_header_pointer(skb, k, size, buffer); 64 return skb_header_pointer(skb, k, size, buffer);
62 return __load_pointer(skb, k, size); 65 return bpf_internal_load_pointer_neg_helper(skb, k, size);
63} 66}
64 67
65/** 68/**
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f223cdc75da6..baf8d281152c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3161,6 +3161,8 @@ static void sock_rmem_free(struct sk_buff *skb)
3161 */ 3161 */
3162int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3162int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3163{ 3163{
3164 int len = skb->len;
3165
3164 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3166 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3165 (unsigned)sk->sk_rcvbuf) 3167 (unsigned)sk->sk_rcvbuf)
3166 return -ENOMEM; 3168 return -ENOMEM;
@@ -3175,7 +3177,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3175 3177
3176 skb_queue_tail(&sk->sk_error_queue, skb); 3178 skb_queue_tail(&sk->sk_error_queue, skb);
3177 if (!sock_flag(sk, SOCK_DEAD)) 3179 if (!sock_flag(sk, SOCK_DEAD))
3178 sk->sk_data_ready(sk, skb->len); 3180 sk->sk_data_ready(sk, len);
3179 return 0; 3181 return 0;
3180} 3182}
3181EXPORT_SYMBOL(sock_queue_err_skb); 3183EXPORT_SYMBOL(sock_queue_err_skb);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index cfd7edda0a8e..5d54ed30e821 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -860,7 +860,7 @@ wait_for_memory:
860 } 860 }
861 861
862out: 862out:
863 if (copied) 863 if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
864 tcp_push(sk, flags, mss_now, tp->nonagle); 864 tcp_push(sk, flags, mss_now, tp->nonagle);
865 return copied; 865 return copied;
866 866
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 16c33e308121..b2869cab2092 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2044,7 +2044,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2044 if (!delta) 2044 if (!delta)
2045 pmc->mca_sfcount[sfmode]--; 2045 pmc->mca_sfcount[sfmode]--;
2046 for (j=0; j<i; j++) 2046 for (j=0; j<i; j++)
2047 (void) ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]); 2047 ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2048 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) { 2048 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2049 struct ip6_sf_list *psf; 2049 struct ip6_sf_list *psf;
2050 2050
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index cbdb754dbb10..3cc4487ac349 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -735,6 +735,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
735 735
736#ifdef CONFIG_NF_CONNTRACK_ZONES 736#ifdef CONFIG_NF_CONNTRACK_ZONES
737out_free: 737out_free:
738 atomic_dec(&net->ct.count);
738 kmem_cache_free(net->ct.nf_conntrack_cachep, ct); 739 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
739 return ERR_PTR(-ENOMEM); 740 return ERR_PTR(-ENOMEM);
740#endif 741#endif
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 0c8e43810ce3..59530e93fa58 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -150,6 +150,17 @@ err1:
150 return ret; 150 return ret;
151} 151}
152 152
153#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
154static void __xt_ct_tg_timeout_put(struct ctnl_timeout *timeout)
155{
156 typeof(nf_ct_timeout_put_hook) timeout_put;
157
158 timeout_put = rcu_dereference(nf_ct_timeout_put_hook);
159 if (timeout_put)
160 timeout_put(timeout);
161}
162#endif
163
153static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par) 164static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
154{ 165{
155 struct xt_ct_target_info_v1 *info = par->targinfo; 166 struct xt_ct_target_info_v1 *info = par->targinfo;
@@ -158,7 +169,9 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
158 struct nf_conn *ct; 169 struct nf_conn *ct;
159 int ret = 0; 170 int ret = 0;
160 u8 proto; 171 u8 proto;
161 172#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
173 struct ctnl_timeout *timeout;
174#endif
162 if (info->flags & ~XT_CT_NOTRACK) 175 if (info->flags & ~XT_CT_NOTRACK)
163 return -EINVAL; 176 return -EINVAL;
164 177
@@ -216,7 +229,6 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
216#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 229#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
217 if (info->timeout) { 230 if (info->timeout) {
218 typeof(nf_ct_timeout_find_get_hook) timeout_find_get; 231 typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
219 struct ctnl_timeout *timeout;
220 struct nf_conn_timeout *timeout_ext; 232 struct nf_conn_timeout *timeout_ext;
221 233
222 rcu_read_lock(); 234 rcu_read_lock();
@@ -245,7 +257,7 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
245 pr_info("Timeout policy `%s' can only be " 257 pr_info("Timeout policy `%s' can only be "
246 "used by L3 protocol number %d\n", 258 "used by L3 protocol number %d\n",
247 info->timeout, timeout->l3num); 259 info->timeout, timeout->l3num);
248 goto err4; 260 goto err5;
249 } 261 }
250 /* Make sure the timeout policy matches any existing 262 /* Make sure the timeout policy matches any existing
251 * protocol tracker, otherwise default to generic. 263 * protocol tracker, otherwise default to generic.
@@ -258,13 +270,13 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
258 "used by L4 protocol number %d\n", 270 "used by L4 protocol number %d\n",
259 info->timeout, 271 info->timeout,
260 timeout->l4proto->l4proto); 272 timeout->l4proto->l4proto);
261 goto err4; 273 goto err5;
262 } 274 }
263 timeout_ext = nf_ct_timeout_ext_add(ct, timeout, 275 timeout_ext = nf_ct_timeout_ext_add(ct, timeout,
264 GFP_KERNEL); 276 GFP_ATOMIC);
265 if (timeout_ext == NULL) { 277 if (timeout_ext == NULL) {
266 ret = -ENOMEM; 278 ret = -ENOMEM;
267 goto err4; 279 goto err5;
268 } 280 }
269 } else { 281 } else {
270 ret = -ENOENT; 282 ret = -ENOENT;
@@ -281,8 +293,12 @@ out:
281 info->ct = ct; 293 info->ct = ct;
282 return 0; 294 return 0;
283 295
296#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
297err5:
298 __xt_ct_tg_timeout_put(timeout);
284err4: 299err4:
285 rcu_read_unlock(); 300 rcu_read_unlock();
301#endif
286err3: 302err3:
287 nf_conntrack_free(ct); 303 nf_conntrack_free(ct);
288err2: 304err2:
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 32bb75324e76..faa48f70b7c9 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -829,12 +829,19 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
829 return 0; 829 return 0;
830} 830}
831 831
832int netlink_sendskb(struct sock *sk, struct sk_buff *skb) 832static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
833{ 833{
834 int len = skb->len; 834 int len = skb->len;
835 835
836 skb_queue_tail(&sk->sk_receive_queue, skb); 836 skb_queue_tail(&sk->sk_receive_queue, skb);
837 sk->sk_data_ready(sk, len); 837 sk->sk_data_ready(sk, len);
838 return len;
839}
840
841int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
842{
843 int len = __netlink_sendskb(sk, skb);
844
838 sock_put(sk); 845 sock_put(sk);
839 return len; 846 return len;
840} 847}
@@ -957,8 +964,7 @@ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
957 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && 964 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
958 !test_bit(0, &nlk->state)) { 965 !test_bit(0, &nlk->state)) {
959 skb_set_owner_r(skb, sk); 966 skb_set_owner_r(skb, sk);
960 skb_queue_tail(&sk->sk_receive_queue, skb); 967 __netlink_sendskb(sk, skb);
961 sk->sk_data_ready(sk, skb->len);
962 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); 968 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
963 } 969 }
964 return -1; 970 return -1;
@@ -1698,10 +1704,8 @@ static int netlink_dump(struct sock *sk)
1698 1704
1699 if (sk_filter(sk, skb)) 1705 if (sk_filter(sk, skb))
1700 kfree_skb(skb); 1706 kfree_skb(skb);
1701 else { 1707 else
1702 skb_queue_tail(&sk->sk_receive_queue, skb); 1708 __netlink_sendskb(sk, skb);
1703 sk->sk_data_ready(sk, skb->len);
1704 }
1705 return 0; 1709 return 0;
1706 } 1710 }
1707 1711
@@ -1715,10 +1719,8 @@ static int netlink_dump(struct sock *sk)
1715 1719
1716 if (sk_filter(sk, skb)) 1720 if (sk_filter(sk, skb))
1717 kfree_skb(skb); 1721 kfree_skb(skb);
1718 else { 1722 else
1719 skb_queue_tail(&sk->sk_receive_queue, skb); 1723 __netlink_sendskb(sk, skb);
1720 sk->sk_data_ready(sk, skb->len);
1721 }
1722 1724
1723 if (cb->done) 1725 if (cb->done)
1724 cb->done(cb); 1726 cb->done(cb);
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 9f60008740e3..9726fe684ab8 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -1130,6 +1130,9 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
1130 int flags = msg->msg_flags; 1130 int flags = msg->msg_flags;
1131 int err, done; 1131 int err, done;
1132 1132
1133 if (len > USHRT_MAX)
1134 return -EMSGSIZE;
1135
1133 if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL| 1136 if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
1134 MSG_CMSG_COMPAT)) || 1137 MSG_CMSG_COMPAT)) ||
1135 !(msg->msg_flags & MSG_EOR)) 1138 !(msg->msg_flags & MSG_EOR))
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 06b42b7f5a02..92ba71dfe080 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4133,9 +4133,10 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
4133static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 4133static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
4134 int __user *optlen) 4134 int __user *optlen)
4135{ 4135{
4136 if (len < sizeof(struct sctp_event_subscribe)) 4136 if (len <= 0)
4137 return -EINVAL; 4137 return -EINVAL;
4138 len = sizeof(struct sctp_event_subscribe); 4138 if (len > sizeof(struct sctp_event_subscribe))
4139 len = sizeof(struct sctp_event_subscribe);
4139 if (put_user(len, optlen)) 4140 if (put_user(len, optlen))
4140 return -EFAULT; 4141 return -EFAULT;
4141 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) 4142 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
diff --git a/net/socket.c b/net/socket.c
index 484cc6953fc6..851edcd6b098 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -811,9 +811,9 @@ static ssize_t sock_sendpage(struct file *file, struct page *page,
811 811
812 sock = file->private_data; 812 sock = file->private_data;
813 813
814 flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT; 814 flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
815 if (more) 815 /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */
816 flags |= MSG_MORE; 816 flags |= more;
817 817
818 return kernel_sendpage(sock, page, offset, size, flags); 818 return kernel_sendpage(sock, page, offset, size, flags);
819} 819}