aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c133
-rw-r--r--net/core/filter.c23
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/rtnetlink.c86
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/sock.c49
-rw-r--r--net/core/sock_diag.c4
-rw-r--r--net/core/utils.c8
9 files changed, 251 insertions, 62 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index d2c8a06b3a98..fb8b0546485b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2283,8 +2283,8 @@ EXPORT_SYMBOL(skb_checksum_help);
2283 2283
2284__be16 skb_network_protocol(struct sk_buff *skb, int *depth) 2284__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2285{ 2285{
2286 unsigned int vlan_depth = skb->mac_len;
2286 __be16 type = skb->protocol; 2287 __be16 type = skb->protocol;
2287 int vlan_depth = skb->mac_len;
2288 2288
2289 /* Tunnel gso handlers can set protocol to ethernet. */ 2289 /* Tunnel gso handlers can set protocol to ethernet. */
2290 if (type == htons(ETH_P_TEB)) { 2290 if (type == htons(ETH_P_TEB)) {
@@ -2297,15 +2297,30 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2297 type = eth->h_proto; 2297 type = eth->h_proto;
2298 } 2298 }
2299 2299
2300 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { 2300 /* if skb->protocol is 802.1Q/AD then the header should already be
2301 struct vlan_hdr *vh; 2301 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
2302 * ETH_HLEN otherwise
2303 */
2304 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
2305 if (vlan_depth) {
2306 if (unlikely(WARN_ON(vlan_depth < VLAN_HLEN)))
2307 return 0;
2308 vlan_depth -= VLAN_HLEN;
2309 } else {
2310 vlan_depth = ETH_HLEN;
2311 }
2312 do {
2313 struct vlan_hdr *vh;
2302 2314
2303 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) 2315 if (unlikely(!pskb_may_pull(skb,
2304 return 0; 2316 vlan_depth + VLAN_HLEN)))
2317 return 0;
2305 2318
2306 vh = (struct vlan_hdr *)(skb->data + vlan_depth); 2319 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2307 type = vh->h_vlan_encapsulated_proto; 2320 type = vh->h_vlan_encapsulated_proto;
2308 vlan_depth += VLAN_HLEN; 2321 vlan_depth += VLAN_HLEN;
2322 } while (type == htons(ETH_P_8021Q) ||
2323 type == htons(ETH_P_8021AD));
2309 } 2324 }
2310 2325
2311 *depth = vlan_depth; 2326 *depth = vlan_depth;
@@ -2418,7 +2433,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
2418 * 2. No high memory really exists on this machine. 2433 * 2. No high memory really exists on this machine.
2419 */ 2434 */
2420 2435
2421static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb) 2436static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2422{ 2437{
2423#ifdef CONFIG_HIGHMEM 2438#ifdef CONFIG_HIGHMEM
2424 int i; 2439 int i;
@@ -2493,38 +2508,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2493} 2508}
2494 2509
2495static netdev_features_t harmonize_features(struct sk_buff *skb, 2510static netdev_features_t harmonize_features(struct sk_buff *skb,
2496 const struct net_device *dev, 2511 netdev_features_t features)
2497 netdev_features_t features)
2498{ 2512{
2499 int tmp; 2513 int tmp;
2500 2514
2501 if (skb->ip_summed != CHECKSUM_NONE && 2515 if (skb->ip_summed != CHECKSUM_NONE &&
2502 !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) { 2516 !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
2503 features &= ~NETIF_F_ALL_CSUM; 2517 features &= ~NETIF_F_ALL_CSUM;
2504 } else if (illegal_highdma(dev, skb)) { 2518 } else if (illegal_highdma(skb->dev, skb)) {
2505 features &= ~NETIF_F_SG; 2519 features &= ~NETIF_F_SG;
2506 } 2520 }
2507 2521
2508 return features; 2522 return features;
2509} 2523}
2510 2524
2511netdev_features_t netif_skb_dev_features(struct sk_buff *skb, 2525netdev_features_t netif_skb_features(struct sk_buff *skb)
2512 const struct net_device *dev)
2513{ 2526{
2514 __be16 protocol = skb->protocol; 2527 __be16 protocol = skb->protocol;
2515 netdev_features_t features = dev->features; 2528 netdev_features_t features = skb->dev->features;
2516 2529
2517 if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs) 2530 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2518 features &= ~NETIF_F_GSO_MASK; 2531 features &= ~NETIF_F_GSO_MASK;
2519 2532
2520 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { 2533 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
2521 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2534 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2522 protocol = veh->h_vlan_encapsulated_proto; 2535 protocol = veh->h_vlan_encapsulated_proto;
2523 } else if (!vlan_tx_tag_present(skb)) { 2536 } else if (!vlan_tx_tag_present(skb)) {
2524 return harmonize_features(skb, dev, features); 2537 return harmonize_features(skb, features);
2525 } 2538 }
2526 2539
2527 features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | 2540 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2528 NETIF_F_HW_VLAN_STAG_TX); 2541 NETIF_F_HW_VLAN_STAG_TX);
2529 2542
2530 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) 2543 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2532,9 +2545,9 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
2532 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 2545 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2533 NETIF_F_HW_VLAN_STAG_TX; 2546 NETIF_F_HW_VLAN_STAG_TX;
2534 2547
2535 return harmonize_features(skb, dev, features); 2548 return harmonize_features(skb, features);
2536} 2549}
2537EXPORT_SYMBOL(netif_skb_dev_features); 2550EXPORT_SYMBOL(netif_skb_features);
2538 2551
2539int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2552int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2540 struct netdev_queue *txq) 2553 struct netdev_queue *txq)
@@ -3953,6 +3966,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
3953 } 3966 }
3954 NAPI_GRO_CB(skb)->count = 1; 3967 NAPI_GRO_CB(skb)->count = 1;
3955 NAPI_GRO_CB(skb)->age = jiffies; 3968 NAPI_GRO_CB(skb)->age = jiffies;
3969 NAPI_GRO_CB(skb)->last = skb;
3956 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 3970 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3957 skb->next = napi->gro_list; 3971 skb->next = napi->gro_list;
3958 napi->gro_list = skb; 3972 napi->gro_list = skb;
@@ -4543,6 +4557,32 @@ void *netdev_adjacent_get_private(struct list_head *adj_list)
4543EXPORT_SYMBOL(netdev_adjacent_get_private); 4557EXPORT_SYMBOL(netdev_adjacent_get_private);
4544 4558
4545/** 4559/**
4560 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4561 * @dev: device
4562 * @iter: list_head ** of the current position
4563 *
4564 * Gets the next device from the dev's upper list, starting from iter
4565 * position. The caller must hold RCU read lock.
4566 */
4567struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4568 struct list_head **iter)
4569{
4570 struct netdev_adjacent *upper;
4571
4572 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4573
4574 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4575
4576 if (&upper->list == &dev->adj_list.upper)
4577 return NULL;
4578
4579 *iter = &upper->list;
4580
4581 return upper->dev;
4582}
4583EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4584
4585/**
4546 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list 4586 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
4547 * @dev: device 4587 * @dev: device
4548 * @iter: list_head ** of the current position 4588 * @iter: list_head ** of the current position
@@ -4624,6 +4664,32 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4624EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 4664EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4625 4665
4626/** 4666/**
4667 * netdev_lower_get_next - Get the next device from the lower neighbour
4668 * list
4669 * @dev: device
4670 * @iter: list_head ** of the current position
4671 *
4672 * Gets the next netdev_adjacent from the dev's lower neighbour
4673 * list, starting from iter position. The caller must hold RTNL lock or
4674 * its own locking that guarantees that the neighbour lower
4675 * list will remain unchainged.
4676 */
4677void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
4678{
4679 struct netdev_adjacent *lower;
4680
4681 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
4682
4683 if (&lower->list == &dev->adj_list.lower)
4684 return NULL;
4685
4686 *iter = &lower->list;
4687
4688 return lower->dev;
4689}
4690EXPORT_SYMBOL(netdev_lower_get_next);
4691
4692/**
4627 * netdev_lower_get_first_private_rcu - Get the first ->private from the 4693 * netdev_lower_get_first_private_rcu - Get the first ->private from the
4628 * lower neighbour list, RCU 4694 * lower neighbour list, RCU
4629 * variant 4695 * variant
@@ -5073,6 +5139,30 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
5073} 5139}
5074EXPORT_SYMBOL(netdev_lower_dev_get_private); 5140EXPORT_SYMBOL(netdev_lower_dev_get_private);
5075 5141
5142
5143int dev_get_nest_level(struct net_device *dev,
5144 bool (*type_check)(struct net_device *dev))
5145{
5146 struct net_device *lower = NULL;
5147 struct list_head *iter;
5148 int max_nest = -1;
5149 int nest;
5150
5151 ASSERT_RTNL();
5152
5153 netdev_for_each_lower_dev(dev, lower, iter) {
5154 nest = dev_get_nest_level(lower, type_check);
5155 if (max_nest < nest)
5156 max_nest = nest;
5157 }
5158
5159 if (type_check(dev))
5160 max_nest++;
5161
5162 return max_nest;
5163}
5164EXPORT_SYMBOL(dev_get_nest_level);
5165
5076static void dev_change_rx_flags(struct net_device *dev, int flags) 5166static void dev_change_rx_flags(struct net_device *dev, int flags)
5077{ 5167{
5078 const struct net_device_ops *ops = dev->netdev_ops; 5168 const struct net_device_ops *ops = dev->netdev_ops;
@@ -5238,7 +5328,6 @@ void __dev_set_rx_mode(struct net_device *dev)
5238 if (ops->ndo_set_rx_mode) 5328 if (ops->ndo_set_rx_mode)
5239 ops->ndo_set_rx_mode(dev); 5329 ops->ndo_set_rx_mode(dev);
5240} 5330}
5241EXPORT_SYMBOL(__dev_set_rx_mode);
5242 5331
5243void dev_set_rx_mode(struct net_device *dev) 5332void dev_set_rx_mode(struct net_device *dev)
5244{ 5333{
@@ -5543,7 +5632,7 @@ static int dev_new_index(struct net *net)
5543 5632
5544/* Delayed registration/unregisteration */ 5633/* Delayed registration/unregisteration */
5545static LIST_HEAD(net_todo_list); 5634static LIST_HEAD(net_todo_list);
5546static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 5635DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
5547 5636
5548static void net_set_todo(struct net_device *dev) 5637static void net_set_todo(struct net_device *dev)
5549{ 5638{
diff --git a/net/core/filter.c b/net/core/filter.c
index cd58614660cf..4aec7b93f1a9 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -122,6 +122,13 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
122 return 0; 122 return 0;
123} 123}
124 124
125/* Register mappings for user programs. */
126#define A_REG 0
127#define X_REG 7
128#define TMP_REG 8
129#define ARG2_REG 2
130#define ARG3_REG 3
131
125/** 132/**
126 * __sk_run_filter - run a filter on a given context 133 * __sk_run_filter - run a filter on a given context
127 * @ctx: buffer to run the filter on 134 * @ctx: buffer to run the filter on
@@ -242,6 +249,8 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
242 249
243 regs[FP_REG] = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; 250 regs[FP_REG] = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
244 regs[ARG1_REG] = (u64) (unsigned long) ctx; 251 regs[ARG1_REG] = (u64) (unsigned long) ctx;
252 regs[A_REG] = 0;
253 regs[X_REG] = 0;
245 254
246select_insn: 255select_insn:
247 goto *jumptable[insn->code]; 256 goto *jumptable[insn->code];
@@ -643,13 +652,6 @@ static u64 __get_raw_cpu_id(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
643 return raw_smp_processor_id(); 652 return raw_smp_processor_id();
644} 653}
645 654
646/* Register mappings for user programs. */
647#define A_REG 0
648#define X_REG 7
649#define TMP_REG 8
650#define ARG2_REG 2
651#define ARG3_REG 3
652
653static bool convert_bpf_extensions(struct sock_filter *fp, 655static bool convert_bpf_extensions(struct sock_filter *fp,
654 struct sock_filter_int **insnp) 656 struct sock_filter_int **insnp)
655{ 657{
@@ -1557,8 +1559,13 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1557 fp->jited = 0; 1559 fp->jited = 0;
1558 1560
1559 err = sk_chk_filter(fp->insns, fp->len); 1561 err = sk_chk_filter(fp->insns, fp->len);
1560 if (err) 1562 if (err) {
1563 if (sk != NULL)
1564 sk_filter_uncharge(sk, fp);
1565 else
1566 kfree(fp);
1561 return ERR_PTR(err); 1567 return ERR_PTR(err);
1568 }
1562 1569
1563 /* Probe if we can JIT compile the filter and if so, do 1570 /* Probe if we can JIT compile the filter and if so, do
1564 * the compilation of the filter. 1571 * the compilation of the filter.
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8f8a96ef9f3f..32d872eec7f5 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1248,8 +1248,8 @@ void __neigh_set_probe_once(struct neighbour *neigh)
1248 neigh->updated = jiffies; 1248 neigh->updated = jiffies;
1249 if (!(neigh->nud_state & NUD_FAILED)) 1249 if (!(neigh->nud_state & NUD_FAILED))
1250 return; 1250 return;
1251 neigh->nud_state = NUD_PROBE; 1251 neigh->nud_state = NUD_INCOMPLETE;
1252 atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES)); 1252 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1253 neigh_add_timer(neigh, 1253 neigh_add_timer(neigh,
1254 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME)); 1254 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1255} 1255}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 81d3a9a08453..7c8ffd974961 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -24,7 +24,7 @@
24 24
25static LIST_HEAD(pernet_list); 25static LIST_HEAD(pernet_list);
26static struct list_head *first_device = &pernet_list; 26static struct list_head *first_device = &pernet_list;
27static DEFINE_MUTEX(net_mutex); 27DEFINE_MUTEX(net_mutex);
28 28
29LIST_HEAD(net_namespace_list); 29LIST_HEAD(net_namespace_list);
30EXPORT_SYMBOL_GPL(net_namespace_list); 30EXPORT_SYMBOL_GPL(net_namespace_list);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d4ff41739b0f..2d8d8fcfa060 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -353,15 +353,46 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
353} 353}
354EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 354EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
355 355
356/* Return with the rtnl_lock held when there are no network
357 * devices unregistering in any network namespace.
358 */
359static void rtnl_lock_unregistering_all(void)
360{
361 struct net *net;
362 bool unregistering;
363 DEFINE_WAIT(wait);
364
365 for (;;) {
366 prepare_to_wait(&netdev_unregistering_wq, &wait,
367 TASK_UNINTERRUPTIBLE);
368 unregistering = false;
369 rtnl_lock();
370 for_each_net(net) {
371 if (net->dev_unreg_count > 0) {
372 unregistering = true;
373 break;
374 }
375 }
376 if (!unregistering)
377 break;
378 __rtnl_unlock();
379 schedule();
380 }
381 finish_wait(&netdev_unregistering_wq, &wait);
382}
383
356/** 384/**
357 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 385 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
358 * @ops: struct rtnl_link_ops * to unregister 386 * @ops: struct rtnl_link_ops * to unregister
359 */ 387 */
360void rtnl_link_unregister(struct rtnl_link_ops *ops) 388void rtnl_link_unregister(struct rtnl_link_ops *ops)
361{ 389{
362 rtnl_lock(); 390 /* Close the race with cleanup_net() */
391 mutex_lock(&net_mutex);
392 rtnl_lock_unregistering_all();
363 __rtnl_link_unregister(ops); 393 __rtnl_link_unregister(ops);
364 rtnl_unlock(); 394 rtnl_unlock();
395 mutex_unlock(&net_mutex);
365} 396}
366EXPORT_SYMBOL_GPL(rtnl_link_unregister); 397EXPORT_SYMBOL_GPL(rtnl_link_unregister);
367 398
@@ -774,7 +805,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
774 return 0; 805 return 0;
775} 806}
776 807
777static size_t rtnl_port_size(const struct net_device *dev) 808static size_t rtnl_port_size(const struct net_device *dev,
809 u32 ext_filter_mask)
778{ 810{
779 size_t port_size = nla_total_size(4) /* PORT_VF */ 811 size_t port_size = nla_total_size(4) /* PORT_VF */
780 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ 812 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
@@ -790,7 +822,8 @@ static size_t rtnl_port_size(const struct net_device *dev)
790 size_t port_self_size = nla_total_size(sizeof(struct nlattr)) 822 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
791 + port_size; 823 + port_size;
792 824
793 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent) 825 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
826 !(ext_filter_mask & RTEXT_FILTER_VF))
794 return 0; 827 return 0;
795 if (dev_num_vf(dev->dev.parent)) 828 if (dev_num_vf(dev->dev.parent))
796 return port_self_size + vf_ports_size + 829 return port_self_size + vf_ports_size +
@@ -826,7 +859,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
826 + nla_total_size(ext_filter_mask 859 + nla_total_size(ext_filter_mask
827 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 860 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
828 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 861 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
829 + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 862 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
830 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 863 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
831 + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */ 864 + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
832 + nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */ 865 + nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */
@@ -888,11 +921,13 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
888 return 0; 921 return 0;
889} 922}
890 923
891static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev) 924static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
925 u32 ext_filter_mask)
892{ 926{
893 int err; 927 int err;
894 928
895 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent) 929 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
930 !(ext_filter_mask & RTEXT_FILTER_VF))
896 return 0; 931 return 0;
897 932
898 err = rtnl_port_self_fill(skb, dev); 933 err = rtnl_port_self_fill(skb, dev);
@@ -1079,7 +1114,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1079 nla_nest_end(skb, vfinfo); 1114 nla_nest_end(skb, vfinfo);
1080 } 1115 }
1081 1116
1082 if (rtnl_port_fill(skb, dev)) 1117 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1083 goto nla_put_failure; 1118 goto nla_put_failure;
1084 1119
1085 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) { 1120 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
@@ -1198,6 +1233,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1198 struct hlist_head *head; 1233 struct hlist_head *head;
1199 struct nlattr *tb[IFLA_MAX+1]; 1234 struct nlattr *tb[IFLA_MAX+1];
1200 u32 ext_filter_mask = 0; 1235 u32 ext_filter_mask = 0;
1236 int err;
1201 1237
1202 s_h = cb->args[0]; 1238 s_h = cb->args[0];
1203 s_idx = cb->args[1]; 1239 s_idx = cb->args[1];
@@ -1218,11 +1254,17 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1218 hlist_for_each_entry_rcu(dev, head, index_hlist) { 1254 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1219 if (idx < s_idx) 1255 if (idx < s_idx)
1220 goto cont; 1256 goto cont;
1221 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1257 err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
1222 NETLINK_CB(cb->skb).portid, 1258 NETLINK_CB(cb->skb).portid,
1223 cb->nlh->nlmsg_seq, 0, 1259 cb->nlh->nlmsg_seq, 0,
1224 NLM_F_MULTI, 1260 NLM_F_MULTI,
1225 ext_filter_mask) <= 0) 1261 ext_filter_mask);
1262 /* If we ran out of room on the first message,
1263 * we're in trouble
1264 */
1265 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
1266
1267 if (err <= 0)
1226 goto out; 1268 goto out;
1227 1269
1228 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 1270 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -1395,7 +1437,8 @@ static int do_set_master(struct net_device *dev, int ifindex)
1395 return 0; 1437 return 0;
1396} 1438}
1397 1439
1398static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, 1440static int do_setlink(const struct sk_buff *skb,
1441 struct net_device *dev, struct ifinfomsg *ifm,
1399 struct nlattr **tb, char *ifname, int modified) 1442 struct nlattr **tb, char *ifname, int modified)
1400{ 1443{
1401 const struct net_device_ops *ops = dev->netdev_ops; 1444 const struct net_device_ops *ops = dev->netdev_ops;
@@ -1407,7 +1450,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1407 err = PTR_ERR(net); 1450 err = PTR_ERR(net);
1408 goto errout; 1451 goto errout;
1409 } 1452 }
1410 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) { 1453 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
1411 err = -EPERM; 1454 err = -EPERM;
1412 goto errout; 1455 goto errout;
1413 } 1456 }
@@ -1661,7 +1704,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
1661 if (err < 0) 1704 if (err < 0)
1662 goto errout; 1705 goto errout;
1663 1706
1664 err = do_setlink(dev, ifm, tb, ifname, 0); 1707 err = do_setlink(skb, dev, ifm, tb, ifname, 0);
1665errout: 1708errout:
1666 return err; 1709 return err;
1667} 1710}
@@ -1778,7 +1821,8 @@ err:
1778} 1821}
1779EXPORT_SYMBOL(rtnl_create_link); 1822EXPORT_SYMBOL(rtnl_create_link);
1780 1823
1781static int rtnl_group_changelink(struct net *net, int group, 1824static int rtnl_group_changelink(const struct sk_buff *skb,
1825 struct net *net, int group,
1782 struct ifinfomsg *ifm, 1826 struct ifinfomsg *ifm,
1783 struct nlattr **tb) 1827 struct nlattr **tb)
1784{ 1828{
@@ -1787,7 +1831,7 @@ static int rtnl_group_changelink(struct net *net, int group,
1787 1831
1788 for_each_netdev(net, dev) { 1832 for_each_netdev(net, dev) {
1789 if (dev->group == group) { 1833 if (dev->group == group) {
1790 err = do_setlink(dev, ifm, tb, NULL, 0); 1834 err = do_setlink(skb, dev, ifm, tb, NULL, 0);
1791 if (err < 0) 1835 if (err < 0)
1792 return err; 1836 return err;
1793 } 1837 }
@@ -1929,12 +1973,12 @@ replay:
1929 modified = 1; 1973 modified = 1;
1930 } 1974 }
1931 1975
1932 return do_setlink(dev, ifm, tb, ifname, modified); 1976 return do_setlink(skb, dev, ifm, tb, ifname, modified);
1933 } 1977 }
1934 1978
1935 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 1979 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1936 if (ifm->ifi_index == 0 && tb[IFLA_GROUP]) 1980 if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
1937 return rtnl_group_changelink(net, 1981 return rtnl_group_changelink(skb, net,
1938 nla_get_u32(tb[IFLA_GROUP]), 1982 nla_get_u32(tb[IFLA_GROUP]),
1939 ifm, tb); 1983 ifm, tb);
1940 return -ENODEV; 1984 return -ENODEV;
@@ -2321,7 +2365,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
2321 int err = -EINVAL; 2365 int err = -EINVAL;
2322 __u8 *addr; 2366 __u8 *addr;
2323 2367
2324 if (!capable(CAP_NET_ADMIN)) 2368 if (!netlink_capable(skb, CAP_NET_ADMIN))
2325 return -EPERM; 2369 return -EPERM;
2326 2370
2327 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); 2371 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
@@ -2773,7 +2817,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2773 sz_idx = type>>2; 2817 sz_idx = type>>2;
2774 kind = type&3; 2818 kind = type&3;
2775 2819
2776 if (kind != 2 && !ns_capable(net->user_ns, CAP_NET_ADMIN)) 2820 if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
2777 return -EPERM; 2821 return -EPERM;
2778 2822
2779 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { 2823 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 1b62343f5837..8383b2bddeb9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3076,7 +3076,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3076 if (unlikely(p->len + len >= 65536)) 3076 if (unlikely(p->len + len >= 65536))
3077 return -E2BIG; 3077 return -E2BIG;
3078 3078
3079 lp = NAPI_GRO_CB(p)->last ?: p; 3079 lp = NAPI_GRO_CB(p)->last;
3080 pinfo = skb_shinfo(lp); 3080 pinfo = skb_shinfo(lp);
3081 3081
3082 if (headlen <= offset) { 3082 if (headlen <= offset) {
@@ -3192,7 +3192,7 @@ merge:
3192 3192
3193 __skb_pull(skb, offset); 3193 __skb_pull(skb, offset);
3194 3194
3195 if (!NAPI_GRO_CB(p)->last) 3195 if (NAPI_GRO_CB(p)->last == p)
3196 skb_shinfo(p)->frag_list = skb; 3196 skb_shinfo(p)->frag_list = skb;
3197 else 3197 else
3198 NAPI_GRO_CB(p)->last->next = skb; 3198 NAPI_GRO_CB(p)->last->next = skb;
diff --git a/net/core/sock.c b/net/core/sock.c
index b4fff008136f..664ee4295b6f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -145,6 +145,55 @@
145static DEFINE_MUTEX(proto_list_mutex); 145static DEFINE_MUTEX(proto_list_mutex);
146static LIST_HEAD(proto_list); 146static LIST_HEAD(proto_list);
147 147
148/**
149 * sk_ns_capable - General socket capability test
150 * @sk: Socket to use a capability on or through
151 * @user_ns: The user namespace of the capability to use
152 * @cap: The capability to use
153 *
154 * Test to see if the opener of the socket had when the socket was
155 * created and the current process has the capability @cap in the user
156 * namespace @user_ns.
157 */
158bool sk_ns_capable(const struct sock *sk,
159 struct user_namespace *user_ns, int cap)
160{
161 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
162 ns_capable(user_ns, cap);
163}
164EXPORT_SYMBOL(sk_ns_capable);
165
166/**
167 * sk_capable - Socket global capability test
168 * @sk: Socket to use a capability on or through
169 * @cap: The global capbility to use
170 *
171 * Test to see if the opener of the socket had when the socket was
172 * created and the current process has the capability @cap in all user
173 * namespaces.
174 */
175bool sk_capable(const struct sock *sk, int cap)
176{
177 return sk_ns_capable(sk, &init_user_ns, cap);
178}
179EXPORT_SYMBOL(sk_capable);
180
181/**
182 * sk_net_capable - Network namespace socket capability test
183 * @sk: Socket to use a capability on or through
184 * @cap: The capability to use
185 *
186 * Test to see if the opener of the socket had when the socke was created
187 * and the current process has the capability @cap over the network namespace
188 * the socket is a member of.
189 */
190bool sk_net_capable(const struct sock *sk, int cap)
191{
192 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
193}
194EXPORT_SYMBOL(sk_net_capable);
195
196
148#ifdef CONFIG_MEMCG_KMEM 197#ifdef CONFIG_MEMCG_KMEM
149int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 198int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
150{ 199{
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index d7af18859322..a4216a4c9572 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -49,7 +49,7 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
49} 49}
50EXPORT_SYMBOL_GPL(sock_diag_put_meminfo); 50EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
51 51
52int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk, 52int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
53 struct sk_buff *skb, int attrtype) 53 struct sk_buff *skb, int attrtype)
54{ 54{
55 struct sock_fprog_kern *fprog; 55 struct sock_fprog_kern *fprog;
@@ -58,7 +58,7 @@ int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
58 unsigned int flen; 58 unsigned int flen;
59 int err = 0; 59 int err = 0;
60 60
61 if (!ns_capable(user_ns, CAP_NET_ADMIN)) { 61 if (!may_report_filterinfo) {
62 nla_reserve(skb, attrtype, 0); 62 nla_reserve(skb, attrtype, 0);
63 return 0; 63 return 0;
64 } 64 }
diff --git a/net/core/utils.c b/net/core/utils.c
index 2f737bf90b3f..eed34338736c 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -348,8 +348,8 @@ static void __net_random_once_deferred(struct work_struct *w)
348{ 348{
349 struct __net_random_once_work *work = 349 struct __net_random_once_work *work =
350 container_of(w, struct __net_random_once_work, work); 350 container_of(w, struct __net_random_once_work, work);
351 if (!static_key_enabled(work->key)) 351 BUG_ON(!static_key_enabled(work->key));
352 static_key_slow_inc(work->key); 352 static_key_slow_dec(work->key);
353 kfree(work); 353 kfree(work);
354} 354}
355 355
@@ -367,7 +367,7 @@ static void __net_random_once_disable_jump(struct static_key *key)
367} 367}
368 368
369bool __net_get_random_once(void *buf, int nbytes, bool *done, 369bool __net_get_random_once(void *buf, int nbytes, bool *done,
370 struct static_key *done_key) 370 struct static_key *once_key)
371{ 371{
372 static DEFINE_SPINLOCK(lock); 372 static DEFINE_SPINLOCK(lock);
373 unsigned long flags; 373 unsigned long flags;
@@ -382,7 +382,7 @@ bool __net_get_random_once(void *buf, int nbytes, bool *done,
382 *done = true; 382 *done = true;
383 spin_unlock_irqrestore(&lock, flags); 383 spin_unlock_irqrestore(&lock, flags);
384 384
385 __net_random_once_disable_jump(done_key); 385 __net_random_once_disable_jump(once_key);
386 386
387 return true; 387 return true;
388} 388}